repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
youhusky/Facebook_Prepare | 653. Two Sum IV - Input is a BST.py | 1 | 1134 | # Given a Binary Search Tree and a target number, return true if there exist two elements in the BST such that their sum is equal to the given target.
# Example 1:
# Input:
# 5
# / \
# 3 6
# / \ \
# 2 4 7
# Target = 9
# Output: True
# Example 2:
# Input:
# 5
# / \
# 3 6
# / \ \
# 2 4 7
# Target = 28
# Output: False
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# Simple BFS levle-order
class Solution(object):
def findTarget(self, root, k):
"""
:type root: TreeNode
:type k: int
:rtype: bool
"""
if not root:
return False
queue = [root]
s = set()
while queue:
node = queue.pop(0)
# two sum thought
if k - node.val in s:
return True
s.add(node.val)
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
return False
| mit | -3,556,029,239,713,989,000 | 19.636364 | 150 | 0.485891 | false |
FedoraScientific/salome-smesh | doc/salome/examples/modifying_meshes_ex16.py | 1 | 1273 | # Diagonal Inversion
import salome
salome.salome_init()
import GEOM
from salome.geom import geomBuilder
geompy = geomBuilder.New(salome.myStudy)
import SMESH, SALOMEDS
from salome.smesh import smeshBuilder
smesh = smeshBuilder.New(salome.myStudy)
import salome_notebook
# create an empty mesh structure
mesh = smesh.Mesh()
# create the following mesh:
# .----.----.----.
# | /| /| /|
# | / | / | / |
# | / | / | / |
# |/ |/ |/ |
# .----.----.----.
bb = [0, 0, 0, 0]
tt = [0, 0, 0, 0]
ff = [0, 0, 0, 0, 0, 0]
bb[0] = mesh.AddNode( 0., 0., 0.)
bb[1] = mesh.AddNode(10., 0., 0.)
bb[2] = mesh.AddNode(20., 0., 0.)
bb[3] = mesh.AddNode(30., 0., 0.)
tt[0] = mesh.AddNode( 0., 15., 0.)
tt[1] = mesh.AddNode(10., 15., 0.)
tt[2] = mesh.AddNode(20., 15., 0.)
tt[3] = mesh.AddNode(30., 15., 0.)
ff[0] = mesh.AddFace([bb[0], bb[1], tt[1]])
ff[1] = mesh.AddFace([bb[0], tt[1], tt[0]])
ff[2] = mesh.AddFace([bb[1], bb[2], tt[2]])
ff[3] = mesh.AddFace([bb[1], tt[2], tt[1]])
ff[4] = mesh.AddFace([bb[2], bb[3], tt[3]])
ff[5] = mesh.AddFace([bb[2], tt[3], tt[2]])
# inverse the diagonal bb[1] - tt[2]
print "\nDiagonal inversion ... ",
res = mesh.InverseDiag(bb[1], tt[2])
if not res: print "failed!"
else: print "done."
salome.sg.updateObjBrowser(1)
| lgpl-2.1 | 6,688,399,887,949,552,000 | 22.574074 | 43 | 0.563236 | false |
iamahuman/angr | angr/state_plugins/filesystem.py | 1 | 23698 | import os
import logging
from .plugin import SimStatePlugin
from ..storage.file import SimFile
from ..errors import SimMergeError
from ..misc.ux import once
l = logging.getLogger(name=__name__)
class SimFilesystem(SimStatePlugin): # pretends links don't exist
"""
angr's emulated filesystem. Available as state.fs.
When constructing, all parameters are optional.
:param files: A mapping from filepath to SimFile
:param pathsep: The character used to separate path elements, default forward slash.
:param cwd: The path of the current working directory to use
:param mountpoints: A mapping from filepath to SimMountpoint
:ivar pathsep: The current pathsep
:ivar cwd: The current working directory
:ivar unlinks: A list of unlink operations, tuples of filename and simfile. Be careful, this list is
shallow-copied from successor to successor, so don't mutate anything in it without copying.
"""
def __init__(self, files=None, pathsep=None, cwd=None, mountpoints=None):
super(SimFilesystem, self).__init__()
if files is None: files = {}
if pathsep is None: pathsep = b'/'
if cwd is None: cwd = pathsep
if mountpoints is None: mountpoints = {}
self.pathsep = pathsep
self.cwd = cwd
self._unlinks = []
self._files = {}
self._mountpoints = {}
for fname in mountpoints:
self.mount(fname, mountpoints[fname])
for fname in files:
self.insert(fname, files[fname])
@property
def unlinks(self):
for _, f in self._unlinks:
f.set_state(self.state)
return self._unlinks
def set_state(self, state):
super(SimFilesystem, self).set_state(state)
for fname in self._files:
self._files[fname].set_state(state)
for fname in self._mountpoints:
self._mountpoints[fname].set_state(state)
@SimStatePlugin.memo
def copy(self, memo):
o = SimFilesystem(
files={k: v.copy(memo) for k, v in self._files.items()},
pathsep=self.pathsep,
cwd=self.cwd,
mountpoints={k: v.copy(memo) for k, v in self._mountpoints.items()}
)
o._unlinks = list(self._unlinks)
return o
def merge(self, others, merge_conditions, common_ancestor=None):
merging_occured = False
for o in others:
if o.cwd != self.cwd:
raise SimMergeError("Can't merge filesystems with disparate cwds")
if len(o._mountpoints) != len(self._mountpoints):
raise SimMergeError("Can't merge filesystems with disparate mountpoints")
if list(map(id, o.unlinks)) != list(map(id, self.unlinks)):
raise SimMergeError("Can't merge filesystems with disparate unlinks")
for fname in self._mountpoints:
try:
subdeck = [o._mountpoints[fname] for o in others]
except KeyError:
raise SimMergeError("Can't merge filesystems with disparate file sets")
if common_ancestor is not None and fname in common_ancestor._mountpoints:
common_mp = common_ancestor._mountpoints[fname]
else:
common_mp = None
merging_occured |= self._mountpoints[fname].merge(subdeck, merge_conditions, common_ancestor=common_mp)
# this is a little messy
deck = [self] + others
all_files = set.union(*(set(o._files.keys()) for o in deck))
for fname in all_files:
subdeck = [o._files[fname] if fname in o._files else None for o in deck]
representative = next(x for x in subdeck if x is not None)
for i, v in enumerate(subdeck):
if v is None:
subdeck[i] = representative()
if i == 0:
self._files[fname] = subdeck[i]
if common_ancestor is not None and fname in common_ancestor._files:
common_simfile = common_ancestor._files[fname]
else:
common_simfile = None
merging_occured |= subdeck[0].merge(subdeck[1:], merge_conditions, common_ancestor=common_simfile)
return merging_occured
def widen(self, others): # pylint: disable=unused-argument
if once('fs_widen_warning'):
l.warning("Filesystems can't be widened yet - beware unsoundness")
def _normalize_path(self, path):
"""
Takes a path and returns a simple absolute path as a list of directories from the root
"""
if type(path) is str:
path = path.encode()
path = path.split(b'\0')[0]
if path[0:1] != self.pathsep:
path = self.cwd + self.pathsep + path
keys = path.split(self.pathsep)
i = 0
while i < len(keys):
if keys[i] == b'':
keys.pop(i)
elif keys[i] == b'.':
keys.pop(i)
elif keys[i] == b'..':
keys.pop(i)
if i != 0:
keys.pop(i-1)
i -= 1
else:
i += 1
return keys
def _join_chunks(self, keys):
"""
Takes a list of directories from the root and joins them into a string path
"""
return self.pathsep + self.pathsep.join(keys)
def chdir(self, path):
"""
Changes the current directory to the given path
"""
self.cwd = self._join_chunks(self._normalize_path(path))
def get(self, path):
"""
Get a file from the filesystem. Returns a SimFile or None.
"""
mountpoint, chunks = self.get_mountpoint(path)
if mountpoint is None:
return self._files.get(self._join_chunks(chunks))
else:
return mountpoint.get(chunks)
def insert(self, path, simfile):
"""
Insert a file into the filesystem. Returns whether the operation was successful.
"""
if self.state is not None:
simfile.set_state(self.state)
mountpoint, chunks = self.get_mountpoint(path)
if mountpoint is None:
self._files[self._join_chunks(chunks)] = simfile
return True
else:
return mountpoint.insert(chunks, simfile)
def delete(self, path):
"""
Remove a file from the filesystem. Returns whether the operation was successful.
This will add a ``fs_unlink`` event with the path of the file and also the index into the `unlinks` list.
"""
mountpoint, chunks = self.get_mountpoint(path)
apath = self._join_chunks(chunks)
if mountpoint is None:
try:
simfile = self._files.pop(apath)
except KeyError:
return False
else:
self.state.history.add_event('fs_unlink', path=apath, unlink_idx=len(self.unlinks))
self.unlinks.append((apath, simfile))
return True
else:
return mountpoint.delete(chunks)
def mount(self, path, mount):
"""
Add a mountpoint to the filesystem.
"""
self._mountpoints[self._join_chunks(self._normalize_path(path))] = mount
def unmount(self, path):
"""
Remove a mountpoint from the filesystem.
"""
del self._mountpoints[self._join_chunks(self._normalize_path(path))]
def get_mountpoint(self, path):
"""
Look up the mountpoint servicing the given path.
:return: A tuple of the mount and a list of path elements traversing from the mountpoint to the specified file.
"""
path_chunks = self._normalize_path(path)
for i in range(len(path_chunks) - 1, -1, -1):
partial_path = self._join_chunks(path_chunks[:-i])
if partial_path in self._mountpoints:
mountpoint = self._mountpoints[partial_path]
if mountpoint is None:
break
return mountpoint, path_chunks[-i:]
return None, path_chunks
SimFilesystem.register_default('fs')
class SimMount(SimStatePlugin):
"""
This is the base class for "mount points" in angr's simulated filesystem. Subclass this class and
give it to the filesystem to intercept all file creations and opens below the mountpoint.
Since this a SimStatePlugin you may also want to implement set_state, copy, merge, etc.
"""
def get(self, _path_elements):
"""
Implement this function to instrument file lookups.
:param path_elements: A list of path elements traversing from the mountpoint to the file
:return: A SimFile, or None
"""
raise NotImplementedError
def insert(self, _path_elements, simfile):
"""
Implement this function to instrument file creation.
:param path_elements: A list of path elements traversing from the mountpoint to the file
:param simfile: The file to insert
:return: A bool indicating whether the insert occured
"""
raise NotImplementedError
def delete(self, _path_elements):
"""
Implement this function to instrument file deletion.
:param path_elements: A list of path elements traversing from the mountpoint to the file
:return: A bool indicating whether the delete occured
"""
raise NotImplementedError
class SimHostFilesystem(SimMount):
"""
Simulated mount that makes some piece from the host filesystem available to the guest.
:param str host_path: The path on the host to mount
:param str pathsep: The host path separator character, default os.path.sep
"""
def __init__(self, host_path, pathsep=os.path.sep):
super(SimHostFilesystem, self).__init__()
self.host_path = host_path
self.pathsep = pathsep
self.cache = {}
self.deleted_list = set()
def get(self, path_elements):
path = self.pathsep.join(x.decode() for x in path_elements)
if path in self.deleted_list:
return None
if path not in self.cache:
host_path = os.path.join(self.host_path, path)
simfile = self._load_file(host_path)
if simfile is None:
return None
self.insert(path_elements, simfile)
return self.cache[path]
@staticmethod
def _load_file(path):
try:
with open(path, 'rb') as fp:
content = fp.read()
except OSError:
return None
else:
return SimFile(name='file://' + path, content=content, size=len(content))
def insert(self, path_elements, simfile):
path = self.pathsep.join(x.decode() for x in path_elements)
simfile.set_state(self.state)
self.cache[path] = simfile
self.deleted_list.discard(path)
return True
def delete(self, path_elements):
path = self.pathsep.join(x.decode() for x in path_elements)
self.deleted_list.add(path)
return self.cache.pop(path, None) is not None
@SimStatePlugin.memo
def copy(self, memo):
x = SimHostFilesystem(self.host_path, pathsep=self.pathsep)
x.cache = {fname: self.cache[fname].copy(memo) for fname in self.cache}
x.deleted_list = set(self.deleted_list)
return x
def set_state(self, state):
super(SimHostFilesystem, self).set_state(state)
for fname in self.cache:
self.cache[fname].set_state(state)
def merge(self, others, merge_conditions, common_ancestor=None):
merging_occured = False
for o in others:
if o.host_path != self.host_path:
raise SimMergeError("Can't merge SimHostFilesystems with disparate host paths")
if o.pathsep != self.pathsep:
raise SimMergeError("Can't merge SimHostFilesystems with disparate pathseps")
if o.deleted_list != self.deleted_list:
raise SimMergeError("Can't merge SimHostFilesystems with disparate deleted files")
deck = [self] + others
all_files = set.union(*(set(o._files.keys()) for o in deck))
for fname in all_files:
subdeck = []
basecase = None
for o in deck:
try:
subdeck.append(o.cache[fname])
except KeyError:
if basecase is None:
basecase = self._load_file(os.path.join(self.host_path, fname))
subdeck.append(basecase)
if common_ancestor is not None and fname in common_ancestor.cache:
common_simfile = common_ancestor.cache[fname]
else:
common_simfile = None
merging_occured |= subdeck[0].merge(subdeck[1:], merge_conditions, common_ancestor=common_simfile)
return merging_occured
def widen(self, others): # pylint: disable=unused-argument
if once('host_fs_widen_warning'):
l.warn("The host filesystem mount can't be widened yet - beware unsoundness")
#class SimDirectory(SimStatePlugin):
# """
# This is the base class for directories in angr's emulated filesystem. An instance of this class or a subclass will
# be found as ``state.fs``, representing the root of the filesystem.
#
# :ivar files: A mapping from filename to file that this directory contains.
# """
# def __init__(self, files=None, writable=True, parent=None, pathsep='/'):
# super(SimDirectory, self).__init__()
# self.files = files
# self.writable = writable
# self.parent = parent if parent is not None else self
# self.pathsep = pathsep
# self.files['.'] = self
# self.files['..'] = self.parent
#
# def __len__(self):
# return len(self.files)
#
# def lookup(self, path, writing=False):
# """
# Look up the file or directory at the end of the given path.
# This method should be called on the current working directory object.
#
# :param str path: The path to look up
# :param bool writing: Whether the operation desired requires write permissions
# :returns: The SimDirectory or SimFile object specified, or None if not found, or False if writing
# was requested and the target is nonwritable
# """
# if len(path) == 0:
# return None
# if path[0] == self.pathsep:
# # lookup the filesystem root
# root = self
# while root.parent is not root:
# root = root.parent
# return root._lookup(path[1:], writing)
# else:
# return self._lookup(path, writing)
#
# def _lookup(self, path, writing):
# while path.startswith(self.pathsep):
# path = path[1:]
#
# if len(path) == 0:
# if writing and not self.writable:
# return False
# return self
#
# for fname, simfile in self.files.items():
# if path.startswith(fname):
# if len(path) == len(fname):
# if writing and not simfile.writable:
# return False
# return simfile
# elif path[len(fname)] == self.pathsep:
# if isinstance(simfile, SimDirectory):
# return simfile._lookup(path[len(fname)+1:])
# else: # TODO: symlinks
# return None
#
# return None
#
# def insert(self, path, simfile):
# """
# Add a file to the filesystem.
# This method should be called on the current working directory object.
#
# :param str path: The path to insert the new file at
# :param simfile: The new file or directory
# :returns: A boolean indicating whether the operation succeeded
# """
# while len(path) > 1 and path[-1] == self.pathsep:
# path = path[:-1]
#
# if self.pathsep not in path:
# if path in self.files:
# return False
# if isinstance(simfile, SimDirectory):
# if simfile.parent is simfile:
# simfile.parent = self
# simfile.pathsep = self.pathsep
# else:
# l.error("Trying to add directory to filesystem which already has a parent")
#
# self.files[path] = simfile
# simfile.set_state(self.state)
# return True
# else:
# lastsep = path.rindex(self.pathsep) + 1
# head, tail = path[:lastsep], path[lastsep:]
# parent = self.lookup(head, True)
#
# if not parent:
# return False
# return parent.insert(tail, simfile)
#
# def remove(self, path):
# """
# Remove a file from the filesystem. If the target is a directory, the directory must be empty.
# This method should be called on the current working directory object.
#
# :param str path: The path to remove the file at
# :returns: A boolean indicating whether the operation succeeded
# """
# while len(path) > 1 and path[-1] == self.pathsep:
# # TODO: when symlinks exist this will need to be fixed to delete the target of the
# # symlink instead of the link itself
# path = path[:-1]
#
# if self.pathsep not in path:
# if path in ('.', '..'):
# return False
# if path not in self.files:
# return False
# if isinstance(self.files[path], SimDirectory) and len(self.files[path]) != 2:
# return False
#
# del self.files[path]
# return True
# else:
# lastsep = path.rindex(self.pathsep) + 1
# head, tail = path[:lastsep], path[lastsep:]
# parent = self.lookup(head, True)
#
# if not parent:
# return False
# return parent.remove(tail)
#
# @SimStatePlugin.memo
# def copy(self, memo):
# return SimDirectory(
# files={x: y.copy(memo) for x, y in self.files.items()},
# writable=self.writable,
# parent=self.parent.copy(memo),
# pathsep=self.pathsep)
#
# def merge(self, others, conditions, ancestor=None):
# new_files = {path: (simfile, [], []) for path, simfile in self.files.items() if path not in ('.', '..')}
# for other, condition in zip(others, conditions):
# if type(other) is not type(self):
# raise SimMergeError("Can't merge filesystem elements of disparate types")
# for path, simfile in other.files.items():
# if path in ('.', '..'):
# continue
# if path not in new_files:
# l.warning("Cannot represent the conditional creation of files")
# new_files[path] = (simfile, [], [])
# else:
# new_files[path][1].append(simfile)
# new_files[path][2].append(condition)
#
# for k in new_files:
# new_files[k][0].merge(new_files[k][1], new_files[k][2], ancestor)
# new_files[k] = new_files[k][0]
# new_files['.'] = self
# new_files['..'] = self.parent
# self.files = new_files
#
# def widen(self, others):
# new_files = {path: [simfile] for path, simfile in self.files.items() if path not in ('.', '..')}
# for other in others:
# if type(other) is not type(self):
# raise SimMergeError("Can't merge filesystem elements of disparate types")
# for path, simfile in other.files.items():
# if path in ('.', '..'):
# continue
# if path not in new_files:
# new_files[path] = [simfile]
# else:
# new_files[path].append(simfile)
#
# for k in new_files:
# new_files[k][0].widen(new_files[k][1:])
# new_files[k] = new_files[k][0]
# new_files['.'] = self
# new_files['..'] = self.parent
# self.files = new_files
#
#class SimDirectoryConcrete(SimDirectory):
# """
# A SimDirectory that forwards its requests to the host filesystem
#
# :param host_path: The path on the host filesystem to provide
# :param writable: Whether to allow mutation of the host filesystem by the guest
# """
# def __init__(self, host_path, writable=False, pathsep='/', host_root=None, parent=None):
# super(SimConcreteDirectory, self).__init__(files={}, writable=writable, parent=parent, pathsep=pathsep)
# self.host_path = os.path.realpath(host_path)
# self.host_root = self.host_path if host_root is None else host_root
#
# def _lookup(self, path, writing):
# partial_path = self.host_path
# for i, pathkey in enumerate(path.split(self.pathsep)):
# if partial_path == self.host_root and pathkey == '..':
# target = self.pathsep.join(path.split(self.pathsep)[i+1:])
# return self.parent._lookup(target, writing)
# if not os.path.isdir(partial_path):
# return None
#
# partial_path = os.path.realpath(partial_path + self.pathsep + pathkey)
#
# if writing and not self.writable:
# return False
#
# if os.path.isdir(partial_path):
# f = SimDirectoryConcrete(host_path=partial_path, writable=self.writable, host_root=self.host_root, parent=self.parent)
# f.set_state(self.state)
# return f
# elif os.path.isfile(partial_path):
# try:
# f = SimFileConcrete(host_path=partial_path, writable=self.writable)
# f.set_state(self.state)
# return f
# except OSError:
# return None
# else:
# raise SimFilesystemError("Can't handle something other than a file or directory in a concrete filesystem")
#
# def insert(self, path, simfile):
# if self.pathsep in path:
# return super(SimDirectoryConcrete, self).insert(path, simfile)
# else:
# fullpath = os.path.join(self.host_path, path)
# if os.path.exists(fullpath):
# return False
# with open(fullpath, 'w') as fp:
# fp.write(simfile.concretize())
# return True
#
# def remove(self, path):
# if self.pathsep in path:
# return super(SimDirectoryConcrete, self).remove(path)
# else:
# fullpath = os.path.join(self.host_path, path)
# if not os.path.exists(fullpath):
# return False
# if os.path.isdir(fullpath):
# try:
# os.rmdir(fullpath)
# except OSError:
# return False
# return True
# elif os.path.isfile(fullpath):
# try:
# os.unlink(fullpath)
# except OSError:
# return False
# return True
# else:
# raise SimFilesystemError("Can't handle anything but files and directories in concrete filesystem")
#
#SimDirectory.register_default('fs')
| bsd-2-clause | -3,923,235,753,469,859,000 | 37.408428 | 131 | 0.565027 | false |
grow/pygrow | grow/commands/subcommands/build.py | 1 | 4362 | """Command for building pods into static deployments."""
import os
import click
from grow.commands import shared
from grow.common import bulk_errors
from grow.common import rc_config
from grow.common import utils
from grow.deployments import stats
from grow.deployments.destinations import local as local_destination
from grow.performance import docs_loader
from grow.pods import pods
from grow.rendering import renderer
from grow import storage
CFG = rc_config.RC_CONFIG.prefixed('grow.build')
# pylint: disable=too-many-locals
@click.command()
@shared.pod_path_argument
@click.option('--clear-cache',
default=CFG.get('clear-cache', False), is_flag=True,
help='Clear the pod cache before building.')
@click.option('--file', '--pod-path', 'pod_paths',
help='Build only pages affected by content files.', multiple=True)
@click.option('--locate-untranslated',
default=CFG.get('locate-untranslated', False), is_flag=True,
help='Shows untranslated message information.')
@shared.locale_option(help_text='Filter build routes to specific locale.')
@shared.deployment_option(CFG)
@shared.out_dir_option(CFG)
@shared.preprocess_option(CFG)
@shared.threaded_option(CFG)
@shared.shards_option
@shared.shard_option
@shared.work_dir_option
@shared.routes_file_option()
def build(pod_path, out_dir, preprocess, clear_cache, pod_paths,
locate_untranslated, deployment, threaded, locale, shards, shard,
work_dir, routes_file):
"""Generates static files and dumps them to a local destination."""
root = os.path.abspath(os.path.join(os.getcwd(), pod_path))
out_dir = out_dir or os.path.join(root, 'build')
pod = pods.Pod(root, storage=storage.FileStorage)
if not pod_paths or clear_cache:
# Clear the cache when building all, only force if the flag is used.
pod.podcache.reset(force=clear_cache)
deployment_obj = None
if deployment:
deployment_obj = pod.get_deployment(deployment)
pod.set_env(deployment_obj.config.env)
if preprocess:
with pod.profile.timer('grow_preprocess'):
pod.preprocess()
if locate_untranslated:
pod.enable(pod.FEATURE_TRANSLATION_STATS)
try:
with pod.profile.timer('grow_build'):
config = local_destination.Config(out_dir=out_dir)
# When using a specific deployment env need to also copy over.
if deployment_obj:
config.env = deployment_obj.config.env
destination = local_destination.LocalDestination(config)
destination.pod = pod
repo = utils.get_git_repo(pod.root)
pod.router.use_simple()
is_partial = bool(pod_paths) or bool(locale)
if pod_paths:
pod_paths = [pod.clean_pod_path(path) for path in pod_paths]
pod.router.add_pod_paths(pod_paths)
elif routes_file:
pod.router.from_data(pod.read_json(routes_file))
else:
pod.router.add_all()
if locale:
pod.router.filter('whitelist', locales=list(locale))
# Shard the routes when using sharding.
if shards and shard:
is_partial = True
pod.router.shard(shards, shard)
# Preload the documents used by the paths after filtering.
docs_loader.DocsLoader.load_from_routes(pod, pod.router.routes)
paths = pod.router.routes.paths
content_generator = renderer.Renderer.rendered_docs(
pod, pod.router.routes, source_dir=work_dir,
use_threading=threaded)
stats_obj = stats.Stats(pod, paths=paths)
destination.deploy(
content_generator, stats=stats_obj, repo=repo, confirm=False,
test=False, is_partial=is_partial)
pod.podcache.write()
except bulk_errors.BulkErrors as err:
# Write the podcache files even when there are rendering errors.
pod.podcache.write()
bulk_errors.display_bulk_errors(err)
raise click.Abort()
except pods.Error as err:
raise click.ClickException(str(err))
if locate_untranslated:
pod.translation_stats.pretty_print()
destination.export_untranslated_catalogs()
return pod
| mit | -689,199,156,016,179,200 | 39.388889 | 80 | 0.65039 | false |
praekelt/go-contacts-api | go_contacts/backends/utils.py | 1 | 1634 | from vumi.persist.model import VumiRiakError
from go_api.collections.errors import CollectionUsageError
from go_api.queue import PausingQueueCloseMarker
from twisted.internet.defer import inlineCallbacks, returnValue
@inlineCallbacks
def _get_page_of_keys(
model_proxy, user_account_key, max_results, cursor,
field_name='user_account'):
try:
contact_keys = yield model_proxy.index_keys_page(
field_name, user_account_key, max_results=max_results,
continuation=cursor)
except VumiRiakError:
raise CollectionUsageError(
"Riak error, possible invalid cursor: %r" % (cursor,))
cursor = contact_keys.continuation
returnValue((cursor, contact_keys))
@inlineCallbacks
def _fill_queue(q, get_page, get_dict, close_queue=True):
keys_deferred = get_page(None)
while True:
cursor, keys = yield keys_deferred
if cursor is not None:
# Get the next page of keys while we fetch the objects
keys_deferred = get_page(cursor)
for key in keys:
obj = yield get_dict(key)
yield q.put(obj)
if cursor is None:
break
if close_queue:
q.put(PausingQueueCloseMarker())
@inlineCallbacks
def _get_smart_page_of_keys(model_proxy, max_results, cursor, query):
if cursor is None:
cursor = 0
contact_keys = yield model_proxy.real_search(
query, rows=max_results, start=cursor)
if len(contact_keys) == 0:
new_cursor = None
else:
new_cursor = cursor + len(contact_keys)
returnValue((new_cursor, contact_keys))
| bsd-3-clause | -5,106,374,650,340,650,000 | 29.259259 | 69 | 0.654223 | false |
phr4nz/dproxify | setup.py | 1 | 1331 | # setup.py : dproxify setup
# Written by Francesco Palumbo aka phranz
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from distutils.core import setup
import sys
sys.path.append('dproxify')
from pkginfo import *
setup(
name = MODNAME,
version = VERSION,
author = AUTHOR,
author_email = EMAIL,
url = REPO,
license = LICENSE,
description = DESCRIPTION,
long_description= ''.join(open('README.rst').readlines()),
classifiers = CLASSIFIERS,
packages = [MODNAME],
data_files = [('/usr/share/doc/' + MODNAME + '-' + VERSION, ['LICENSE']),
('/usr/share/doc/' + MODNAME + '-' + VERSION, ['README.rst']),]
)
| gpl-3.0 | -6,153,610,981,818,929,000 | 33.128205 | 86 | 0.652893 | false |
arnaudbore/declarativeTask | src/ld_extractInfo_v2.py | 1 | 1839 | import sys
import ast
from config import dataFolder
from expyriment.misc import data_preprocessing
import numpy as np
agg = data_preprocessing.Aggregator(data_folder=dataFolder,
file_name=sys.argv[1])
print 'Variable computed: '
data = {}
for variable in agg.variables:
data[variable] = agg.get_variable_data(variable)
indexBlocks = np.unique(data['NBlock'])
for block in indexBlocks:
print 'Block {}'.format(block)
correctAnswers = np.logical_and(data['Picture']==data['Answers'], data['NBlock']==block)
wrongAnswers = np.logical_and(data['Picture']!=data['Answers'], data['NBlock']==block)
correctRT = [int(i) for i in data['RT'][correctAnswers]]
print 'Correct answers: {}'.format(len(correctRT))
print 'Mean correct RT: {} ms'.format(np.mean(correctRT))
# Read Header
header = data_preprocessing.read_datafile(dataFolder + sys.argv[1], only_header_and_variable_names=True)
header = header[3].split('\n#e ')
# Get Matrix
matrixPictures = ast.literal_eval(header[header.index('Positions pictures:')+1].split('\n')[0].split('\n')[0])
print '############################'
print 'Last Block'
print ''
print 'Pictures found:'
print 'Name Position'
names = []
for idx, val in enumerate(correctAnswers):
if val:
print data['Answers'][idx][0], matrixPictures.index(data['Answers'][idx])
names.append(data['Answers'][idx][0])
aList = [ word for word in names if word[0]=='a']
cList = [ word for word in names if word[0]=='c']
fList = [ word for word in names if word[0]=='f']
vList = [ word for word in names if word[0]=='v']
print '############################'
print 'Category animals: ' + str(len(aList))
print 'Category clothes: ' + str(len(cList))
print 'Category fruits: ' + str(len(fList))
print 'Category vehicules: ' + str(len(vList))
| mit | -8,856,084,287,288,248,000 | 31.839286 | 110 | 0.653072 | false |
foobarbazblarg/stayclean | stayclean-2018-october/display-final-after-month-is-over.py | 1 | 3056 | #!/usr/bin/python
# TODO: issues with new oauth2 stuff. Keep using older version of Python for now.
# #!/usr/bin/env python
from participantCollection import ParticipantCollection
import re
import datetime
import pyperclip
# Edit Me!
# This script gets run on the first day of the following month, and that month's URL is
# what goes here. E.g. If this directory is the directory for February, this script gets
# run on March 1, and this URL is the URL for the March challenge page.
nextMonthURL = "https://www.reddit.com/r/pornfree/comments/9t9kh3/stay_clean_november_this_thread_updated_daily/"
# If this directory is the directory for November, this script gets run on December 1,
# and currentMonthIndex gets the index of November, i.e. 11.
currentMonthIndex = datetime.date.today().month - 1
if currentMonthIndex == 0:
currentMonthIndex = 12
currentMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[currentMonthIndex]
nextMonthIndex = currentMonthIndex % 12 + 1
nextMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[nextMonthIndex]
participants = ParticipantCollection()
numberStillIn = participants.sizeOfParticipantsWhoAreStillIn()
initialNumber = participants.size()
percentStillIn = int(round(100 * numberStillIn / initialNumber, 0))
def templateForParticipants():
answer = ""
for participant in participants.participantsWhoAreStillInAndHaveCheckedIn():
answer += "/u/" + participant.name
answer += "\n\n"
return answer
def templateToUse():
answer = ""
answer += "The Stay Clean CURRENT_MONTH_NAME challenge is now over. Join us for **[the NEXT_MONTH_NAME challenge](NEXT_MONTH_URL)**.\n"
answer += "\n"
answer += "**NUMBER_STILL_IN** out of INITIAL_NUMBER participants made it all the way through the challenge. That's **PERCENT_STILL_IN%**.\n"
answer += "\n"
answer += "Congratulations to these participants, all of whom were victorious:\n\n"
answer += templateForParticipants()
return answer
def stringToPrint():
answer = templateToUse()
answer = re.sub('NUMBER_STILL_IN', str(numberStillIn), answer)
answer = re.sub('INITIAL_NUMBER', str(initialNumber), answer)
answer = re.sub('PERCENT_STILL_IN', str(percentStillIn), answer)
answer = re.sub('CURRENT_MONTH_INDEX', str(currentMonthIndex), answer)
answer = re.sub('CURRENT_MONTH_NAME', currentMonthName, answer)
answer = re.sub('NEXT_MONTH_INDEX', str(nextMonthIndex), answer)
answer = re.sub('NEXT_MONTH_NAME', nextMonthName, answer)
answer = re.sub('NEXT_MONTH_URL', nextMonthURL, answer)
return answer
outputString = stringToPrint()
print "============================================================="
print outputString
print "============================================================="
pyperclip.copy(outputString)
| mit | -5,068,722,511,293,555,000 | 43.289855 | 199 | 0.682264 | false |
willettk/decals | python/decals_analysis.py | 1 | 23642 | # Do some preliminary analysis on the results of the DECaLS-Galaxy Zoo data.
from astropy.io import fits
from astropy.cosmology import WMAP9
from matplotlib import pyplot as plt
from matplotlib import cm
from matplotlib.colors import LogNorm
from collections import Counter
import numpy as np
import re
import pandas as pd
from gz_class import plurality
gzpath = '/Users/willettk/Astronomy/Research/GalaxyZoo'
decals_path = '{0}/decals'.format(gzpath)
plot_path = '{0}/plots'.format(decals_path)
"""
decals_analysis
=========
Make plots and start analysis on the collated Galaxy Zoo-DECaLS data.
Kyle Willett (UMN) - 10 Dec 2015
"""
def load_data():
mgs = fits.getdata('{0}/matched/gz2_main.fits'.format(decals_path),1)
s82 = fits.getdata('{0}/matched/gz2_s82_coadd1.fits'.format(decals_path),1)
decals = fits.getdata('{0}/matched/decals_dr1.fits'.format(decals_path),1)
return mgs,s82,decals
def color_mag_plots(mgs,s82,decals,savefig=False):
# Make paneled histograms of the color distribution for several magnitude bins of Galaxy Zoo data.
"""
SDSS main sample (GZ2)
Stripe 82 coadded data (GZ2)
DECaLS
"""
redshifts = (0.12,0.08,0.05)
appmag_lim = 17.0
# Work out the magnitude limit from cosmology
fig,axarr = plt.subplots(num=1,nrows=3,ncols=3,figsize=(12,10))
for z,ax in zip(redshifts,axarr.ravel()):
absmag_lim = appmag_lim - WMAP9.distmod(z).value
maglim = (mgs['PETROMAG_MR'] < absmag_lim) & (mgs['REDSHIFT'] <= z)
spiral = mgs['t01_smooth_or_features_a02_features_or_disk_weighted_fraction'] >= 0.8
elliptical = mgs['t01_smooth_or_features_a01_smooth_weighted_fraction'] >= 0.8
ax.hist(mgs[maglim & spiral]['PETROMAG_U'] - mgs[maglim & spiral]['PETROMAG_R'],range=(0,4),bins=25,color='blue',histtype='step',label='spiral')
ax.hist(mgs[maglim & elliptical]['PETROMAG_U'] - mgs[maglim & elliptical]['PETROMAG_R'],range=(0,4),bins=25,color='red',histtype='step',label='elliptical')
ax.set_xlabel(r'$(u-r)$',fontsize=16)
ax.set_title(r'$M_r<{0:.2f}, z<{1:.2f}$'.format(absmag_lim,z),fontsize=16)
ax.text(0.95,0.95,'MGS',ha='right',va='top',transform=ax.transAxes)
if ax == axarr.ravel()[0]:
ax.legend(loc='upper left',fontsize=10)
s82_lim = 17.77
for z,ax in zip(redshifts,axarr.ravel()[3:6]):
absmag_lim = s82_lim - WMAP9.distmod(z).value
maglim = (s82['PETROMAG_MR'] < absmag_lim) & (s82['REDSHIFT'] <= z)
spiral = s82['t01_smooth_or_features_a02_features_or_disk_weighted_fraction'] >= 0.8
elliptical = s82['t01_smooth_or_features_a01_smooth_weighted_fraction'] >= 0.8
ax.hist(s82[maglim & spiral]['PETROMAG_U'] - s82[maglim & spiral]['PETROMAG_R'],range=(0,4),bins=25,color='blue',histtype='step',label='spiral')
ax.hist(s82[maglim & elliptical]['PETROMAG_U'] - s82[maglim & elliptical]['PETROMAG_R'],range=(0,4),bins=25,color='red',histtype='step',label='elliptical')
ax.set_xlabel(r'$(u-r)$',fontsize=16)
ax.set_title(r'$M_r<{0:.2f}, z<{1:.2f}$'.format(absmag_lim,z),fontsize=16)
ax.text(0.95,0.95,'Stripe 82',ha='right',va='top',transform=ax.transAxes)
decals_lim = 17.77
for z,ax in zip(redshifts,axarr.ravel()[6:]):
absmag_lim = decals_lim - WMAP9.distmod(z).value
maglim = (decals['metadata.mag.abs_r'] < absmag_lim) & (decals['metadata.redshift'] <= z)
spiral = decals['t00_smooth_or_features_a1_features_frac'] >= 0.8
elliptical = decals['t00_smooth_or_features_a0_smooth_frac'] >= 0.8
ax.hist(decals[maglim & spiral]['metadata.mag.u'] - decals[maglim & spiral]['metadata.mag.r'],range=(0,4),bins=25,color='blue',histtype='step',label='spiral')
ax.hist(decals[maglim & elliptical]['metadata.mag.u'] - decals[maglim & elliptical]['metadata.mag.r'],range=(0,4),bins=25,color='red',histtype='step',label='elliptical')
ax.set_xlabel(r'$(u-r)$',fontsize=16)
ax.set_title(r'$M_r<{0:.2f}, z<{1:.2f}$'.format(absmag_lim,z),fontsize=16)
ax.text(0.95,0.95,'DECaLS',ha='right',va='top',transform=ax.transAxes)
fig.tight_layout()
if savefig:
plt.savefig('{0}/color_hist.pdf'.format(plot_path))
else:
plt.show()
return None
def color_mag_ratio(mgs,s82,decal,savefig=False):
# Plot the spiral to elliptical ratio as a function of optical color.
redshifts = (0.12,0.08,0.05)
linestyles = ('solid','dashed','dashdot')
datasets = ({'data':mgs,
'title':'MGS',
'appmag':17.0,
'sp':'t01_smooth_or_features_a02_features_or_disk_weighted_fraction',
'el':'t01_smooth_or_features_a01_smooth_weighted_fraction',
'umag':'PETROMAG_U',
'rmag':'PETROMAG_R',
'absr':'PETROMAG_MR',
'redshift':'REDSHIFT'},
{'data':s82,
'title':'Stripe 82',
'appmag':17.77,
'sp':'t01_smooth_or_features_a02_features_or_disk_weighted_fraction',
'el':'t01_smooth_or_features_a01_smooth_weighted_fraction',
'umag':'PETROMAG_U',
'rmag':'PETROMAG_R',
'absr':'PETROMAG_MR',
'redshift':'REDSHIFT'},
{'data':decals,
'title':'DECaLS',
'appmag':17.77,
'sp':'t00_smooth_or_features_a1_features_frac',
'el':'t00_smooth_or_features_a0_smooth_frac',
'umag':'metadata.mag.u',
'rmag':'metadata.mag.r',
'absr':'metadata.mag.abs_r',
'redshift':'metadata.redshift'})
# Work out the magnitude limit from cosmology
fig,axarr = plt.subplots(num=2,nrows=1,ncols=3,figsize=(12,5))
for ax,d in zip(axarr.ravel(),datasets):
for z,ls in zip(redshifts,linestyles):
absmag_lim = d['appmag'] - WMAP9.distmod(z).value
maglim = (d['data'][d['absr']] < absmag_lim) & (d['data'][d['redshift']] <= z)
spiral = d['data'][d['sp']] >= 0.8
elliptical = d['data'][d['el']] >= 0.8
n_sp,bins_sp = np.histogram(d['data'][maglim & spiral][d['umag']] - d['data'][maglim & spiral][d['rmag']],range=(0,4),bins=25)
n_el,bins_el = np.histogram(d['data'][maglim & elliptical][d['umag']] - d['data'][maglim & elliptical][d['rmag']],range=(0,4),bins=25)
plotval = np.log10(n_sp * 1./n_el)
ax.plot(bins_sp[1:],plotval,linestyle=ls,label=r'$M_r<{0:.2f}, z<{1:.2f}$'.format(absmag_lim,z))
ax.set_xlabel(r'$(u-r)$',fontsize=16)
ax.set_ylabel(r'$\log(n_{sp}/n_{el})$',fontsize=16)
ax.set_ylim(-1.5,1.5)
ax.set_title(d['title'],fontsize=16)
if ax == axarr.ravel()[0]:
ax.legend(loc='upper left',fontsize=8)
fig.tight_layout()
if savefig:
plt.savefig('{0}/feature_ratio.pdf'.format(plot_path))
else:
plt.show()
return None
def feature_comparison(savefig=False):
# Plot the difference in vote fractions for the matched galaxies
filename = '{0}/fits/decals_gz2_union.fits'.format(decals_path)
data = fits.getdata(filename,1)
# Map the columns
matched_cols = [{'title':'smooth', 'gz2':"gz2_t01_smooth_or_features_a01_smooth_fraction", "decals":"decals_t00_smooth_or_features_a0_smooth_frac"},
{'title':'features/disk', 'gz2':"gz2_t01_smooth_or_features_a02_features_or_disk_fraction", "decals":"decals_t00_smooth_or_features_a1_features_frac"},
{'title':'star', 'gz2':"gz2_t01_smooth_or_features_a03_star_or_artifact_fraction", "decals":"decals_t00_smooth_or_features_a2_artifact_frac"},
{'title':'edge-on', 'gz2':"gz2_t02_edgeon_a04_yes_fraction", "decals":"decals_t01_disk_edge_on_a0_yes_frac"},
{'title':'not edge-on', 'gz2':"gz2_t02_edgeon_a05_no_fraction", "decals":"decals_t01_disk_edge_on_a1_no_frac"},
{'title':'bar', 'gz2':"gz2_t03_bar_a06_bar_fraction", "decals":"decals_t02_bar_a0_bar_frac"},
{'title':'no bar', 'gz2':"gz2_t03_bar_a07_no_bar_fraction", "decals":"decals_t02_bar_a1_no_bar_frac"},
{'title':'spiral', 'gz2':"gz2_t04_spiral_a08_spiral_fraction", "decals":"decals_t03_spiral_a0_spiral_frac"},
{'title':'no spiral', 'gz2':"gz2_t04_spiral_a09_no_spiral_fraction", "decals":"decals_t03_spiral_a1_no_spiral_frac"},
{'title':'no bulge', 'gz2':"gz2_t05_bulge_prominence_a10_no_bulge_fraction", "decals":"decals_t04_bulge_prominence_a0_no_bulge_frac"},
{'title':'medium bulge', 'gz2':"gz2_t05_bulge_prominence_a11_just_noticeable_fraction", "decals":"decals_t04_bulge_prominence_a1_obvious_frac"},
{'title':'obvious bulge', 'gz2':"gz2_t05_bulge_prominence_a12_obvious_fraction", "decals":"decals_t04_bulge_prominence_a2_dominant_frac"},
{'title':'completely round', 'gz2':"gz2_t07_rounded_a16_completely_round_fraction", "decals":"decals_t08_rounded_a0_completely_round_frac"},
{'title':'in between', 'gz2':"gz2_t07_rounded_a17_in_between_fraction", "decals":"decals_t08_rounded_a1_in_between_frac"},
{'title':'cigar shaped', 'gz2':"gz2_t07_rounded_a18_cigar_shaped_fraction", "decals":"decals_t08_rounded_a2_cigar_shaped_frac"},
{'title':'ring', 'gz2':"gz2_t08_odd_feature_a19_ring_fraction", "decals":"decals_t10_odd_feature_x1_ring_frac"},
{'title':'lens/arc', 'gz2':"gz2_t08_odd_feature_a20_lens_or_arc_fraction", "decals":"decals_t10_odd_feature_x2_lens_frac"},
{'title':'irregular', 'gz2':"gz2_t08_odd_feature_a22_irregular_fraction", "decals":"decals_t10_odd_feature_x4_irregular_frac"},
{'title':'other', 'gz2':"gz2_t08_odd_feature_a23_other_fraction", "decals":"decals_t10_odd_feature_x5_other_frac"},
{'title':'dust lane', 'gz2':"gz2_t08_odd_feature_a38_dust_lane_fraction", "decals":"decals_t10_odd_feature_x3_dustlane_frac"},
{'title':'rounded bulge', 'gz2':"gz2_t09_bulge_shape_a25_rounded_fraction", "decals":"decals_t07_bulge_shape_a0_rounded_frac"},
{'title':'boxy bulge', 'gz2':"gz2_t09_bulge_shape_a26_boxy_fraction", "decals":"decals_t07_bulge_shape_a1_boxy_frac"},
{'title':'no bulge', 'gz2':"gz2_t09_bulge_shape_a27_no_bulge_fraction", "decals":"decals_t07_bulge_shape_a2_no_bulge_frac"},
{'title':'tight arms', 'gz2':"gz2_t10_arms_winding_a28_tight_fraction", "decals":"decals_t05_arms_winding_a0_tight_frac"},
{'title':'medium arms', 'gz2':"gz2_t10_arms_winding_a29_medium_fraction", "decals":"decals_t05_arms_winding_a1_medium_frac"},
{'title':'loose arms', 'gz2':"gz2_t10_arms_winding_a30_loose_fraction", "decals":"decals_t05_arms_winding_a2_loose_frac"},
{'title':'1 arm', 'gz2':"gz2_t11_arms_number_a31_1_fraction", "decals":"decals_t06_arms_number_a0_1_frac"},
{'title':'2 arms', 'gz2':"gz2_t11_arms_number_a32_2_fraction", "decals":"decals_t06_arms_number_a1_2_frac"},
{'title':'3 arms', 'gz2':"gz2_t11_arms_number_a33_3_fraction", "decals":"decals_t06_arms_number_a2_3_frac"},
{'title':'4 arms', 'gz2':"gz2_t11_arms_number_a34_4_fraction", "decals":"decals_t06_arms_number_a3_4_frac"},
{'title':'5+ arms', 'gz2':"gz2_t11_arms_number_a36_more_than_4_fraction", "decals":"decals_t06_arms_number_a4_more_than_4_frac"}]
# Working, but still needs to sort for questions that are ACTUALLY ANSWERED. Lots of pileup at 0,0.
columns = data.columns
decals_fraccols,gz2_fraccols = [],[]
for c in columns:
colname = c.name
if len(colname) > 6:
if colname[-4:] == 'frac' and colname[:6] == 'decals':
decals_fraccols.append(c)
if len(colname) > 17:
if colname[-8:] == 'fraction' and colname[-17:] != "weighted_fraction" and colname[:3] == 'gz2':
gz2_fraccols.append(c)
decals_votearr = data.from_columns(decals_fraccols)
gz2_votearr = data.from_columns(gz2_fraccols)
decals_tasks,gz2_tasks = [],[]
for v in decals_votearr:
e_decals,a_decals = plurality(np.array(list(v)),'decals')
decals_tasks.append(e_decals)
for v in gz2_votearr:
e_gz2,a_gz2 = plurality(np.array(list(v)),'gz2')
gz2_tasks.append(e_gz2)
fig,axarr = plt.subplots(num=1,nrows=4,ncols=8,figsize=(16,10))
nrows = axarr.shape[0]
ncols = axarr.shape[1]
def plot_features(ax,taskno,indices):
plotind = indices.flatten()
ax.hist2d(data[matched_cols[taskno]['gz2']][plotind],data[matched_cols[taskno]['decals']][plotind],bins=(20,20),range=[[0,1],[0,1]],norm=LogNorm(),cmap = cm.viridis)
ax.plot([0,1],[0,1],linestyle='--',color='red',lw=2)
ax.set_title(matched_cols[taskno]['title'],fontsize=8)
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.set_xlabel(r'$f_{GZ2}$',fontsize=10)
ax.set_ylabel(r'$f_{DECaLS}$',fontsize=10)
ax.set_aspect('equal')
# Smooth/features
answers_per_task = [3,2,2,2,3,3,5,3,3,5]
match_tasks = [[ 0, 0],
[ 1, 1],
[ 2, 2],
[ 3, 3],
[ 4, 4],
[ 6, 8],
[ 7,10],
[ 8, 7],
[ 9, 5],
[10, 6]]
n = 0
for a,m in zip(answers_per_task,match_tasks):
inds = np.array(([np.array(decals_tasks)[:,m[1]] == True])) & np.array(([np.array(gz2_tasks)[:,m[0]] == True]))
for i in range(a):
plot_features(axarr.ravel()[n],n,inds)
n += 1
'''
for i in range(nrows):
ax = axarr.ravel()[i*ncols]
ax.set_ylabel(r'$f_{GZ2}$',fontsize=10)
for i in range(ncols):
ax = axarr.ravel()[(nrows - 1)*ncols + i]
ax.set_xlabel(r'$f_{DECaLS}$',fontsize=10)
'''
for di in range((nrows*ncols)-n):
fig.delaxes(axarr.ravel()[(nrows*ncols)-(di+1)])
fig.tight_layout()
if savefig:
plt.savefig('{0}/decals_gz2_feature_comparison.pdf'.format(plot_path))
else:
plt.show()
return None
def survey_dict():
# Information about the specific group settings in the project
d = {u'candels': {'name':u'CANDELS','retire_limit':80},
u'candels_2epoch': {'name':u'CANDELS 2-epoch','retire_limit':80},
u'decals': {'name':u'DECaLS','retire_limit':40},
u'ferengi': {'name':u'FERENGI','retire_limit':40},
u'goods_full': {'name':u'GOODS full-depth','retire_limit':40},
u'illustris': {'name':u'Illustris','retire_limit':40},
u'sloan_singleband':{'name':u'SDSS single-band','retire_limit':40},
u'ukidss': {'name':u'UKIDSS','retire_limit':40},
#u'sloan': {'name':u'SDSS DR8','retire_limit':60},
u'stripe82': {'name':u'Stripe 82','retire_limit':40},
u'gz2': {'name':u'SDSS DR7','retire_limit':40}}
return d
def is_number(s):
# Is a string a representation of a number?
try:
int(s)
return True
except ValueError:
return False
def morphology_distribution(survey='decals'):
# What's the plurality distribution of morphologies?
try:
collation_file = "{0}/gz_reduction_sandbox/data/decals_unweighted_classifications_00.csv".format(gzpath)
collated = pd.read_csv(collation_file)
except IOError:
print "Collation file for {0:} does not exist. Aborting.".format(survey)
return None
columns = collated.columns
fraccols,colnames = [],[]
for c in columns:
if c[-4:] == 'frac':
fraccols.append(c)
if c[0] == 't' and is_number(c[1:3]):
colnames.append(c[:3])
collist = list(set(colnames))
collist.sort()
# Plot distribution of vote fractions for each task
ntasks = len(collist)
ncols = 4 if ntasks > 9 else int(np.sqrt(ntasks))
nrows = int(ntasks / ncols) if ntasks % ncols == 0 else int(ntasks / ncols) + 1
sd = survey_dict()[survey]
survey_name = sd['name']
def f7(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
tasklabels = f7([re.split("[ax][0-9]",f)[0][11:-1] for f in fraccols])
labels = [re.split("[ax][0-9]",f)[-1][1:-5] for f in fraccols]
# Make pie charts of the plurality votes
votearr = np.array(collated[fraccols])
class_arr,task_arr,task_ans = [],[],[]
for v in votearr:
e,a = plurality(v,survey)
task_arr.append(e)
task_ans.append(a)
task_arr = np.array(task_arr)
task_ans = np.array(task_ans)
fig,axarr = plt.subplots(nrows=nrows,ncols=ncols,figsize=(15,12))
colors=[u'#377EB8', u'#E41A1C', u'#4DAF4A', u'#984EA3', u'#FF7F00',u'#A6761D',u'#1B9E77']
n = (task_arr.shape)[1]
for i in range(n):
ax = axarr.ravel()[i]
c = Counter(task_ans[:,i][task_arr[:,i] == True])
pv,pl = [],[]
for k in c:
pv.append(c[k])
pl.append(labels[k])
ax.pie(pv,labels=pl,colors=colors,autopct=lambda(p): '{:.0f}'.format(p * sum(pv) / 100))
title = '{0:} - t{1:02} {2:}'.format(survey_name,i,tasklabels[i]) if i == 0 else 't{0:02} {1:}'.format(i,tasklabels[i])
ax.set_title(title)
ax.set_aspect('equal')
# Remove empty axes from subplots
if axarr.size > ntasks:
for i in range(axarr.size - ntasks):
ax = axarr.ravel()[axarr.size-(i+1)]
ax.set_axis_off()
fig.set_tight_layout(True)
plt.savefig('{1}/decals/plots/pie_{0:}.eps'.format(survey,gzpath))
plt.close()
return None
def morph_table_gz2():
# Print LaTeX-formatted tables of the GZ vote counts and fractions, and plot as pie chart.
overlap = True
survey = 'decals'
# Get weights
try:
fitsfile = "{0}/dr10/dr10_gz2_main_specz.fits".format(gzpath)
hdr = fits.getheader(fitsfile,1)
colnames = []
for i in range(hdr['TFIELDS']):
colnames.append(hdr['TTYPE{0}'.format(i+1)])
if overlap:
if survey == 'gz2':
collation_file = "{0}/decals/csv/decals_gz2_main.csv".format(gzpath)
elif survey == 'stripe82':
collation_file = "{0}/decals/csv/decals_gz2_stripe82c1.csv".format(gzpath)
elif survey == 'decals':
collation_file = "{0}/decals/csv/decals_gz2_union.csv".format(gzpath)
collated = pd.read_csv(collation_file)
else:
if survey == 'gz2':
collation_file = "{0}/dr10/dr10_gz2_main_specz.csv".format(gzpath)
elif survey == 'stripe82':
collation_file = "{0}/dr10/dr10_gz2_stripe82_coadd1.csv".format(gzpath)
collated = pd.read_csv(collation_file,names=colnames)
except IOError:
print "Collation file for {0:} does not exist. Aborting.".format(survey)
return None
columns = collated.columns
fraccols,colnames = [],[]
if survey == 'decals':
for c in columns:
if len(c) > 10:
if c[-4:] == 'frac' and c[:6] == 'decals':
fraccols.append(c)
if c[7] == 't' and is_number(c[8:10]):
colnames.append(c[7:10])
else:
for c in columns:
if c[-17:] == 'weighted_fraction':
fraccols.append(c)
if c[0] == 't' and is_number(c[1:3]):
colnames.append(c[:3])
collist = list(set(colnames))
collist.sort()
# Plot distribution of vote fractions for each task
ntasks = len(collist)
ncols = 4 if ntasks > 9 else int(np.sqrt(ntasks))
nrows = int(ntasks / ncols) if ntasks % ncols == 0 else int(ntasks / ncols) + 1
sd = survey_dict()[survey]
survey_name = sd['name']
def f7(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
if survey == 'decals':
tasklabels = f7([re.split("[ax][0-9]",f)[0][11:-1] for f in fraccols])
labels = [re.split("[ax][0-9]",f)[-1][1:-5] for f in fraccols]
else:
tasklabels = f7([re.split("[ax][0-9]",f)[0][4:-1] for f in fraccols])
labels = [re.split("[ax][0-9]",f[4:-18])[-1][2:] for f in fraccols]
# Make pie charts of the plurality votes
votearr = np.array(collated[fraccols])
class_arr,task_arr,task_ans = [],[],[]
for v in votearr:
e,a = plurality(v,survey)
task_arr.append(e)
task_ans.append(a)
task_arr = np.array(task_arr)
task_ans = np.array(task_ans)
fig,axarr = plt.subplots(nrows=nrows,ncols=ncols,figsize=(15,12))
colors=[u'#377EB8', u'#E41A1C', u'#4DAF4A', u'#984EA3', u'#FF7F00',u'#A6761D',u'#1B9E77']
n = (task_arr.shape)[1]
for i in range(n):
ax = axarr.ravel()[i]
c = Counter(task_ans[:,i][task_arr[:,i] == True])
pv,pl = [],[]
task_total = sum(c.values())
for k in c:
pv.append(c[k])
pl.append(labels[k])
# Print to screen in LaTeX format
print "{0:20} & {1:6} & {3:.2f} & {2:.2f}".format(labels[k],c[k],c[k] * 1./task_total,c[k] * 1./len(collated))
print ""
ax.pie(pv,labels=pl,colors=colors,autopct=lambda(p): '{:.0f}'.format(p * sum(pv) / 100))
title = '{0:} - t{1:02} {2:}'.format(survey_name,i,tasklabels[i]) if i == 0 else 't{0:02} {1:}'.format(i,tasklabels[i])
ax.set_title(title)
ax.set_aspect('equal')
# Remove empty axes from subplots
if axarr.size > ntasks:
for i in range(axarr.size - ntasks):
ax = axarr.ravel()[axarr.size-(i+1)]
ax.set_axis_off()
fig.set_tight_layout(True)
suffix = '_overlap' if overlap else ''
plt.savefig('{1}/decals/plots/pie_{0}{2}.eps'.format(survey,gzpath,suffix))
plt.close()
return None
if __name__ == "__main__":
mgs,s82,decals = load_data()
#color_mag_plots(mgs,s82,decals,savefig=True)
#color_mag_ratio(mgs,s82,decals,savefig=True)
#feature_comparison(savefig=True)
| mit | -1,780,753,696,382,251,300 | 43.691871 | 184 | 0.550207 | false |
jaumemarti/l10n-spain-txerpa | l10n_es_aeat/wizard/export_to_boe.py | 1 | 7887 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2004-2011
# Pexego Sistemas Informáticos. (http://pexego.es)
# Luis Manuel Angueira Blanco (Pexego)
#
# Copyright (C) 2013
# Ignacio Ibeas - Acysos S.L. (http://acysos.com)
# Migración a OpenERP 7.0
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import time
from openerp.osv import orm, fields
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT
class L10nEsAeatReportExportToBoe(orm.TransientModel):
_name = "l10n.es.aeat.report.export_to_boe"
_description = "Export Report to BOE Format"
_columns = {
'name': fields.char('File name', readonly=True),
'data': fields.binary('File', readonly=True),
'state': fields.selection([('open', 'open'), # open wizard
('get', 'get')]), # get file
}
_defaults = {
'state': 'open',
}
def _formatString(self, text, length, fill=' ', align='<'):
"""
Formats the string into a fixed length ASCII (iso-8859-1) record.
Note:
'Todos los campos alfanuméricos y alfabéticos se presentarán
alineados a la izquierda y rellenos de blancos por la derecha,
en mayúsculas sin caracteres especiales, y sin vocales acentuadas.
Para los caracteres específicos del idioma se utilizará la
codificación ISO-8859-1. De esta forma la letra “Ñ” tendrá el
valor ASCII 209 (Hex. D1) y la “Ç”(cedilla mayúscula) el valor
ASCII 199 (Hex. C7).'
"""
if not text:
return fill * length
# Replace accents and convert to upper
from unidecode import unidecode
text = unidecode(unicode(text))
text = text.upper()
ascii_string = text.encode('iso-8859-1')
# Cut the string if it is too long
if len(ascii_string) > length:
ascii_string = ascii_string[:length]
# Format the string
if align == '<':
ascii_string = ascii_string.ljust(length, fill)
elif align == '>':
ascii_string = ascii_string.rjust(length, fill)
else:
assert False, _('Wrong aling option. It should be < or >')
# Sanity-check
assert len(ascii_string) == length, \
_("The formated string must match the given length")
# Return string
return ascii_string
def _formatNumber(self, number, int_length, dec_length=0,
include_sign=False):
"""Formats the number into a fixed length ASCII (iso-8859-1) record.
Note:
'Todos los campos numéricos se presentarán alineados a la derecha
y rellenos a ceros por la izquierda sin signos y sin empaquetar.'
(http://www.boe.es/boe/dias/2008/10/23/pdfs/A42154-42190.pdf)
"""
# Separate the number parts (-55.23 => int_part=55, dec_part=0.23,
# sign='N')
if number == '':
number = 0.0
number = float(number)
sign = number >= 0 and ' ' or 'N'
number = abs(number)
int_part = int(number)
# Format the string
ascii_string = ''
if include_sign:
ascii_string += sign
if dec_length > 0:
ascii_string += '%0*.*f' % (int_length + dec_length + 1,
dec_length, number)
ascii_string = ascii_string.replace('.', '')
elif int_length > 0:
ascii_string += '%.*d' % (int_length, int_part)
# Sanity-check
assert len(ascii_string) == (include_sign and 1 or 0) + int_length + \
dec_length, _("The formated string must match the given length")
# Return the string
return ascii_string
def _formatBoolean(self, value, yes='X', no=' '):
"""
Formats a boolean value into a fixed length ASCII (iso-8859-1) record.
"""
return value and yes or no
def _get_formatted_declaration_record(self, cr, uid, report, context=None):
return ''
def _get_formatted_main_record(self, cr, uid, report, context=None):
return ''
def _get_formatted_other_records(self, cr, uid, report, context=None):
return ''
def _do_global_checks(self, report, contents, context=None):
return True
def action_get_file(self, cr, uid, ids, context=None):
"""Action that exports the data into a BOE formatted text file.
@return: Action dictionary for showing exported file.
"""
if not context.get('active_id') or not context.get('active_model'):
return False
report = self.pool[context['active_model']].browse(
cr, uid, context['active_id'], context=context)
contents = ''
# Add header record
contents += self._get_formatted_declaration_record(cr, uid, report,
context=context)
# Add main record
contents += self._get_formatted_main_record(cr, uid, report,
context=context)
# Adds other fields
contents += self._get_formatted_other_records(cr, uid, report,
context=context)
# Generate the file and save as attachment
res = base64.encodestring(contents)
file_name = _("%s_report_%s.txt") % (
report.number, time.strftime(_(DEFAULT_SERVER_DATE_FORMAT)))
# Delete old files
attachment_obj = self.pool['ir.attachment']
attachment_ids = attachment_obj.search(
cr, uid, [('name', '=', file_name),
('res_model', '=', report._model._name)],
context=context)
if attachment_ids:
attachment_obj.unlink(cr, uid, attachment_ids, context=context)
attachment_obj.create(cr, uid, {"name": file_name,
"datas": res,
"datas_fname": file_name,
"res_model": report._model._name,
"res_id": report.id,
}, context=context)
self.write(cr, uid, ids,
{'state': 'get', 'data': res, 'name': file_name},
context=context)
# Force view to be the parent one
data_obj = self.pool['ir.model.data']
result = data_obj._get_id(cr, uid, 'l10n_es_aeat',
'wizard_aeat_export')
view_id = data_obj.browse(cr, uid, result, context=context).res_id
# TODO: Permitir si se quiere heredar la vista padre
return {
'type': 'ir.actions.act_window',
'res_model': self._name,
'view_mode': 'form',
'view_type': 'form',
'view_id': [view_id],
'res_id': ids[0],
'target': 'new',
}
| agpl-3.0 | -6,541,515,499,416,233,000 | 40.829787 | 79 | 0.544888 | false |
koalakoker/PGap | PGap.py | 1 | 17080 | '''
Created on 30/mag/2015
@author: koala
'''
import pygtk
pygtk.require('2.0')
import gtk
from gtk import gdk
import pango
from NoteModel import NoteModel
import noteBrowserWidget
import os
PROGRAM_NAME = "PGap"
class PGapMain:
columnInfo = ('Note title', 'ID', 'Creation Time', 'Last Modify')
def getColumnInfo(self, istr):
#returns the first showColumn that match istr inside columnInfo
i = 0
pos = -1
for s in self.columnInfo:
if (s == istr):
if (pos == -1):
pos = i
i += 1
return pos
# close the window and quit
def delete_event(self, widget=None, event=None, data=None):
if (self.NoteStore.modified):
message = gtk.MessageDialog(type=gtk.MESSAGE_WARNING, buttons=gtk.BUTTONS_NONE)
message.add_button(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)
message.add_button(gtk.STOCK_NO, gtk.RESPONSE_NO)
message.add_button(gtk.STOCK_YES, gtk.RESPONSE_YES)
message.set_default_response(gtk.RESPONSE_YES)
message.set_title("Save Notes")
message.set_markup("Notes have been modified. Save changes?")
res = message.run()
message.destroy()
if (res == gtk.RESPONSE_YES):
self.onSave(None)
elif ((res == gtk.RESPONSE_CANCEL) or (res == gtk.RESPONSE_DELETE_EVENT)):
return
gtk.main_quit()
return False
def __init__(self):
self.gladefile = "pgapgui.glade"
self.builder = gtk.Builder()
self.builder.add_from_file(self.gladefile)
self.window = self.builder.get_object("pgapui")
self.window.resize(1024,600)
self.window.show_all()
self.window.connect("delete_event", self.delete_event)
#create tagTable
self.tagTable = gtk.TextTagTable()
self.tag_bold = gtk.TextTag("Bold")
self.tag_bold.set_property("weight", pango.WEIGHT_BOLD)
self.tagTable.add(self.tag_bold)
self.tag_underline = gtk.TextTag("Underline")
self.tag_underline.set_property("underline", pango.UNDERLINE_SINGLE)
self.tagTable.add(self.tag_underline)
self.tag_italic = gtk.TextTag("Italic")
self.tag_italic.set_property("style", pango.STYLE_ITALIC)
self.tagTable.add(self.tag_italic)
self.tag_link = gtk.TextTag("Link")
self.tag_link.set_property("underline", pango.UNDERLINE_SINGLE)
self.tag_link.set_property("style", pango.STYLE_ITALIC)
color = gdk.Color(0,0,65535) #Link color
self.tag_link.set_property("foreground-gdk", color)
self.tag_link.connect("event", self.tagLinkEvent)
self.tagTable.add(self.tag_link)
self.tag_hidden = gtk.TextTag("Hidden")
self.tag_hidden.set_property("underline", pango.UNDERLINE_SINGLE)
self.tag_hidden.set_property("style", pango.STYLE_ITALIC)
color = gdk.Color(65535,0,0) #Link color
self.tag_hidden.set_property("foreground-gdk", color)
self.tag_hidden.set_property("invisible", True)
self.tagTable.add(self.tag_hidden)
# create a TreeStore with one string showColumn to use as the model
self.NoteStore = NoteModel(self.tagTable)
self.NoteStore.connect('modified_title', self.onTitleChanged)
self.NoteStore.connect('modified_text', self.onTextChanged)
#Populate
# self.NoteStore.populate()
# create the TreeView using NoteStore
self.treeview = self.builder.get_object("treeview")
self.treeview.set_model(self.NoteStore)
self.treeviewSelection = self.treeview.get_selection()
self.tvcolumn = []
self.cell = []
for i in range(len(self.columnInfo)):
# create the TreeViewColumn to display the data
self.tvcolumn.append(gtk.TreeViewColumn(self.columnInfo[i]))
# add tvcolumn to treeview
self.treeview.append_column(self.tvcolumn[i])
# create a CellRendererText to render the data
self.cell.append(gtk.CellRendererText())
# add the cell to the tvcolumn and allow it to expand
self.tvcolumn[i].pack_start(self.cell[i], True)
# set the cell "text" attribute to showColumn 0 - retrieve text
# from that showColumn in NoteStore
self.tvcolumn[i].add_attribute(self.cell[i], 'text', i)
# Allow sorting on the showColumn
self.tvcolumn[i].set_sort_column_id(i)
# Allow resize of showColumn
self.tvcolumn[i].set_resizable(True)
# make it searchable
self.treeview.set_search_column(0)
# Allow drag and drop reordering of rows
self.treeview.set_reorderable(True)
#update column view accordind glade file
self.updateColumnView(None)
#Make first column of TreeView editable (Note name)
col = self.treeview.get_column(0)
renderer = col.get_cell_renderers()[0]
renderer.set_property('editable', True)
renderer.connect('edited', self.cell_edited_callback)
self.textview = self.builder.get_object("textview")
self.noteSelect(0)
self.keyCtrlPressed = False
self.fileSelected = None
self.updateTitle()
# Clipboard
self.clipboard = gtk.Clipboard()
# Note Browser Widget
self.noteBrowser = noteBrowserWidget.noteBrowserWidget(self.gladefile, self.NoteStore)
handlers = { "onDeleteWindow": gtk.main_quit,
"onNewButton": self.onTestClk,
"onDeleteButton": self.onTestClk,
"onTestClk": self.onTestClk,
"on_ID_toggled": self.updateColumnView,
"on_Last modify_toggled": self.updateColumnView,
"on_Creation Time_toggled": self.updateColumnView,
"keypress": self.onKeyPress,
"keyrelease": self.onKeyRelease,
"mousemove": self.onMouseMove,
"onCursorChanged": self.onNoteSelectionChange,
"onNewNote": self.onNewNote,
"on_BIU_button_clicked": self.on_BIU_button_clicked,
"onSave" : self.onSave,
"onSaveAs" : self.onSaveAs,
"onOpen" : self.onOpen,
"onQuit" : self.delete_event
}
self.builder.connect_signals(handlers)
def onTitleChanged(self, NoteModel):
self.updateTitle()
def onTextChanged(self, NoteModel):
piter = self.getNoteSelected()
path = NoteModel.get_path(piter)
self.NoteStore.emit("row-changed", path, piter)
def updateTitle(self):
fileSelected = self.fileSelected
modIndincator = ""
if (self.NoteStore.modified):
modIndincator = "*"
newTitle = PROGRAM_NAME
if (fileSelected != None):
newTitle += " - " + fileSelected + modIndincator
else:
newTitle += " - new notebook" + modIndincator
self.window.set_title(newTitle)
def updateColumnView(self, CheckMenuItem):
for i in range(len(self.columnInfo)):
itm = self.builder.get_object(self.columnInfo[i])
if (itm != None):
self.tvcolumn[i].set_visible(itm.get_active())
def on_BIU_button_clicked(self, button, tag = None):
if (tag == None):
# Select Tag from button clicked
# Note: Tag name must be set in the button label
tag = self.tagTable.lookup(button.get_label())
try:
bounds = self.textbuffer.get_selection_bounds()
if len(bounds) != 0:
start, end = bounds
#Verify if tag is link (in this case the user have to select the link Eg. Note ID)
if (tag == self.tag_link):
if (self.textbuffer.isTagSelected(start, self.tag_link)):
self.textbuffer.removeLink(self.tag_link, self.tag_hidden, start)
else:
noteID = self.noteBrowser.run()
if (noteID != 0):
self.textbuffer.addLink(self.tag_link, self.tag_hidden, start, end, "#" + str(noteID))
else:
self.textbuffer.toggleTag(tag, start, end)
except AttributeError:
pass
def onKeyEsc(self):
try:
bounds = self.textbuffer.get_selection_bounds()
if len(bounds) != 0:
tb = self.textbuffer
tb.select_range(tb.get_end_iter(),tb.get_end_iter())
return
except:
pass
self.delete_event()
def onKeyRelease(self, widget, event):
keyPressName = gdk.keyval_name(event.keyval)
if ((keyPressName == "Control_L") or (keyPressName == "Control_R")):
self.keyCtrlPressed = False
def onKeyPress(self, widget, event):
keyPressName = gdk.keyval_name(event.keyval)
if (self.keyCtrlPressed):
if (keyPressName == "b"):
self.on_BIU_button_clicked(None, self.tag_bold)
if (keyPressName == "i"):
self.on_BIU_button_clicked(None, self.tag_italic)
if (keyPressName == "u"):
self.on_BIU_button_clicked(None, self.tag_underline)
if (keyPressName == "l"):
self.on_BIU_button_clicked(None, self.tag_link)
if (keyPressName == "z"):
self.textbuffer.undo()
if ((keyPressName == "y") or (keyPressName == "Z")):
self.textbuffer.redo()
if (keyPressName == "s"):
self.onSave()
if ((keyPressName == "Control_L") or (keyPressName == "Control_R")):
self.keyCtrlPressed = True
if (keyPressName == "Escape"):
self.onKeyEsc()
# print (gtk.gdk.keyval_name(event.keyval))
def onMouseMove(self, widget, event):
start = widget.get_iter_at_location(int(event.x), int(event.y))
if (self.textbuffer.isTagSelected(start, self.tag_link)):
cur = gtk.gdk.Cursor(gtk.gdk.HAND1)
event.window.set_cursor(cur)
widget.set_tooltip_text("ctrl + mouse left click to open the link")
self.textview.grab_focus()
else:
event.window.set_cursor(None)
widget.set_tooltip_text(None)
def tagLinkEvent(self, tag, widget, event, piter):
if (event.type == gtk.gdk.BUTTON_PRESS):
if (event.button == 1):
start = widget.get_iter_at_location(int(event.x), int(event.y))
tb = self.textbuffer
if (tb.isTagSelected(start, self.tag_link)):
if (self.keyCtrlPressed):
link = tb.getLink(self.tag_link, self.tag_hidden, start)
if (tb.isInternalLink(link)):
noteID = tb.getNoteIDFromLink(link)
noteIDPath = self.NoteStore.findNoteID(noteID)
self.noteSelect(noteIDPath)
def getNoteSelected(self):
# Returns the node of self.TreeView that is selected or None
itersel = None
treeView = self.treeview
if (treeView != None):
itersel = treeView.get_selection().get_selected()[1]
return itersel
def onNoteSelectionChange(self, treeView):
itersel = self.getNoteSelected()
if (itersel == None):
itersel = self.NoteStore.get_iter_root()
if (itersel != None):
self.textbuffer = self.NoteStore.get_value(itersel, 4)
self.textview.set_buffer(self.textbuffer)
self.textview.set_sensitive(True)
self.textview.grab_focus()
else:
self.textview.set_sensitive(False)
def onNewNote(self, button = None):
# Create new node
piter = self.NoteStore.CreateNewNote(self.getNoteSelected())
path = None
if (piter != None):
path = self.NoteStore.get_path(piter)
if (path != None):
self.treeview.expand_to_path(path)
self.treeview.set_cursor(path)
def onTestClk(self, button):
# TextBuffer2HTMLConvert.toHTML(self.textbuffer)
# TextBuffer2HTMLConvert.serialize(self.textbuffer)
# self.NoteStore.populate()
# self.clipboard.request_text(self.rowChangedCallback)
## self.appoTxt = gtk.TextBuffer()
## self.clipboard.request_rich_text(self.appoTxt, self.callbackrich)
# diag = gtk.MessageDialog()
# diag.run()
# menu = gtk.Menu()
# menu_item = gtk.MenuItem("A menu item")
# menu.append(menu_item)
# menu_item.run()
# menu.popup(None, None, None, 0, 0)
pass
def callbackrich(self, clipboard, clformat, text, length, data = None):
print ("Hey")
# print (self, clipboard, clformat, text, length, data)
def rowChangedCallback(self, clipboard, text, data = None):
print (text)
def cell_edited_callback(self, cellrenderertext, path, new_text):
piter = self.NoteStore.get_iter(path)
self.NoteStore.set_value(piter, 0, new_text)
def getNoteValue(self, col):
#Get the value from the selected note in the Tree View
piter = self.treeview.get_selection().get_selected()[1]
return self.NoteStore.get_value(piter,col)
def setNoteValue(self, col, value):
#Set the vaule to the selected note in the Tree View
piter = self.treeview.get_selection().get_selected()[1]
self.NoteStore.set_value(piter, col, value)
def getNoteIter(self):
#Get the selected noter iter for the Note Model
return self.treeview.get_selection().get_selected()[1]
def noteSelect(self, path):
self.treeviewSelection.select_path(path)
self.treeview.emit("cursor-changed")
def onSave(self, menuItm = None):
if (self.fileSelected == None):
self.onSaveAs(None)
else:
if (self.NoteStore.save(self.fileSelected) == True):
pass
def onSaveAs(self, menuItm):
chooser = gtk.FileChooserDialog(title="Save notes file",action=gtk.FILE_CHOOSER_ACTION_SAVE,
buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_SAVE,gtk.RESPONSE_OK))
chooser.set_current_folder(os.getcwd())
filefilter = gtk.FileFilter()
filefilter.set_name("PGap note file")
filefilter.add_pattern("*.xml")
chooser.add_filter(filefilter)
filefilter = gtk.FileFilter()
filefilter.set_name("All files")
filefilter.add_pattern("*")
chooser.add_filter(filefilter)
chooser.set_do_overwrite_confirmation(True)
if (chooser.run() == gtk.RESPONSE_OK):
fileSelected = chooser.get_filename()
if (self.NoteStore.save(fileSelected) == True):
self.fileSelected = fileSelected
self.updateTitle()
else:
pass
chooser.destroy()
def onOpen(self, menuItm):
chooser = gtk.FileChooserDialog(title="Open notes file",action=gtk.FILE_CHOOSER_ACTION_OPEN,
buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_OPEN,gtk.RESPONSE_OK))
chooser.set_current_folder(os.getcwd())
filefilter = gtk.FileFilter()
filefilter.set_name("PGap note file")
filefilter.add_pattern("*.xml")
chooser.add_filter(filefilter)
filefilter = gtk.FileFilter()
filefilter.set_name("All files")
filefilter.add_pattern("*")
chooser.add_filter(filefilter)
if (chooser.run() == gtk.RESPONSE_OK):
fileSelected = chooser.get_filename()
if (self.NoteStore.load(fileSelected) == False):
md = gtk.MessageDialog(self.window,
gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_INFO,
gtk.BUTTONS_CLOSE, "File type not supported")
md.run()
md.destroy()
else:
self.fileSelected = fileSelected
self.updateTitle()
self.noteSelect(0)
chooser.destroy()
if __name__ == '__main__':
main = PGapMain()
gtk.main()
| gpl-2.0 | -4,816,864,838,686,431,000 | 39.378251 | 114 | 0.565457 | false |
akissa/repoze.who.plugins.saml2 | repoze/who/plugins/utils.py | 1 | 2114 | # -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4
# repoze.who.plugins.saml2: A SAML2 plugin for repoze.who
# Copyright (C) 2015 Andrew Colin Kissa <[email protected]>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
"""
repoze.who.plugins.saml2: A SAML2 plugin for repoze.who
Copyright (C) 2015 Andrew Colin Kissa <[email protected]>
"""
from lasso import SAML2_ATTRIBUTE_NAME_FORMAT_BASIC
def get_attributes_from_assertion(assertion):
attributes = dict()
if not assertion:
return attributes
for _statement in assertion.attributeStatement:
for attribute in _statement.attribute:
name = None
nickname = None
_format = SAML2_ATTRIBUTE_NAME_FORMAT_BASIC
try:
name = attribute.name.decode('ascii')
except BaseException:
pass
else:
try:
if attribute.nameFormat:
_format = attribute.nameFormat.decode('ascii')
if attribute.friendlyName:
nickname = attribute.friendlyName
except BaseException:
pass
try:
values = attribute.attributeValue
if values:
attributes[(name, _format)] = []
if nickname:
attributes[nickname] = attributes[(name, _format)]
for value in values:
content = [_any.exportToXml() for _any in value.any]
content = ''.join(content)
attributes[
(name, _format)
].append(content.decode('utf8'))
except BaseException:
pass
attributes['__issuer'] = assertion.issuer.content
attributes['__nameid'] = assertion.subject.nameID.content
return attributes
| mpl-2.0 | 5,629,052,774,001,114,000 | 38.886792 | 78 | 0.542573 | false |
Haynie-Research-and-Development/jarvis | deps/lib/python3.4/site-packages/pubnub/endpoints/presence/here_now.py | 2 | 2298 | from pubnub import utils
from pubnub.endpoints.endpoint import Endpoint
from pubnub.enums import HttpMethod, PNOperationType
from pubnub.models.consumer.presence import PNHereNowResult
class HereNow(Endpoint):
HERE_NOW_PATH = "/v2/presence/sub-key/%s/channel/%s"
HERE_NOW_GLOBAL_PATH = "/v2/presence/sub-key/%s"
def __init__(self, pubnub):
Endpoint.__init__(self, pubnub)
self._channels = []
self._channel_groups = []
self._include_state = False
self._include_uuids = True
def channels(self, channels):
utils.extend_list(self._channels, channels)
return self
def channel_groups(self, channel_groups):
utils.extend_list(self._channel_groups, channel_groups)
return self
def include_state(self, should_include_state):
self._include_state = should_include_state
return self
def include_uuids(self, include_uuids):
self._include_uuids = include_uuids
return self
def custom_params(self):
params = {}
if len(self._channel_groups) > 0:
params['channel-group'] = utils.join_items_and_encode(self._channel_groups)
if self._include_state:
params['state'] = "1"
if not self._include_uuids:
params['disable_uuids'] = "1"
return params
def build_path(self):
if len(self._channels) == 0 and len(self._channel_groups) == 0:
return HereNow.HERE_NOW_GLOBAL_PATH % self.pubnub.config.subscribe_key
else:
return HereNow.HERE_NOW_PATH % (self.pubnub.config.subscribe_key,
utils.join_channels(self._channels))
def http_method(self):
return HttpMethod.GET
def validate_params(self):
self.validate_subscribe_key()
def is_auth_required(self):
return True
def create_response(self, envelope):
return PNHereNowResult.from_json(envelope, self._channels)
def request_timeout(self):
return self.pubnub.config.non_subscribe_request_timeout
def connect_timeout(self):
return self.pubnub.config.connect_timeout
def operation_type(self):
return PNOperationType.PNHereNowOperation
def name(self):
return "HereNow"
| gpl-2.0 | 5,272,564,497,552,531,000 | 28.844156 | 87 | 0.632724 | false |
francois-berder/PyLetMeCreate | letmecreate/click/lora.py | 1 | 4980 | #!/usr/bin/env python3
"""Python binding of Lora wrapper of LetMeCreate library."""
import ctypes
LORA_CLICK_AUTO_FREQ_BAND_250KHZ = 0
LORA_CLICK_AUTO_FREQ_BAND_125KHZ = 1
LORA_CLICK_AUTO_FREQ_BAND_62_5KHZ = 2
LORA_CLICK_AUTO_FREQ_BAND_31_3KHZ = 3
LORA_CLICK_AUTO_FREQ_BAND_15_6KHZ = 4
LORA_CLICK_AUTO_FREQ_BAND_7_8KHZ = 5
LORA_CLICK_AUTO_FREQ_BAND_3_9KHZ = 6
LORA_CLICK_AUTO_FREQ_BAND_200KHZ = 7
LORA_CLICK_AUTO_FREQ_BAND_100KHZ = 8
LORA_CLICK_AUTO_FREQ_BAND_50KHZ = 9
LORA_CLICK_AUTO_FREQ_BAND_25KHZ = 10
LORA_CLICK_AUTO_FREQ_BAND_12_5KHZ = 11
LORA_CLICK_AUTO_FREQ_BAND_6_3KHZ = 12
LORA_CLICK_AUTO_FREQ_BAND_3_1KHZ = 13
LORA_CLICK_AUTO_FREQ_BAND_166_7KHZ = 14
LORA_CLICK_AUTO_FREQ_BAND_83_3KHZ = 15
LORA_CLICK_AUTO_FREQ_BAND_41_7KHZ = 16
LORA_CLICK_AUTO_FREQ_BAND_20_8KHZ = 17
LORA_CLICK_AUTO_FREQ_BAND_10_4KHZ = 18
LORA_CLICK_AUTO_FREQ_BAND_5_2KHZ = 19
LORA_CLICK_AUTO_FREQ_BAND_2_6KHZ = 20
LORA_CLICK_AUTO_FREQ_BAND_COUNT = 21
LORA_CLICK_CODING_RATE_4_5 = 0
LORA_CLICK_CODING_RATE_4_6 = 1
LORA_CLICK_CODING_RATE_4_7 = 2
LORA_CLICK_CODING_RATE_4_8 = 3
LORA_CLICK_CODING_RATE_COUNT = 4
LORA_CLICK_BANDWIDTH_125KHZ = 0
LORA_CLICK_BANDWIDTH_250KHZ = 1
LORA_CLICK_BANDWIDTH_500KHZ = 2
class LoraClickConfig(ctypes.Structure):
"""Lora Click configuration"""
_fields_ = [
("frequency", ctypes.c_uint32),
("spreading_factor", ctypes.c_uint8),
("auto_freq_band", ctypes.c_uint),
("coding_rate", ctypes.c_uint),
("bandwidth", ctypes.c_uint),
("power", ctypes.c_int8),
("bitrate", ctypes.c_uint16),
("freq_deviation", ctypes.c_uint16),
("preamble_length", ctypes.c_uint16),
("enable_crc_header", ctypes.c_bool)]
_LIB = ctypes.CDLL('libletmecreate_click.so')
_LIB.lora_click_get_default_configuration.restype = LoraClickConfig
def get_default_configuration():
"""Returns default configuration:
frequency = 868000000
spreading_factor = 12
auto_freq_band = LORA_CLICK_AUTO_FREQ_BAND_125KHZ
coding_rate = LORA_CLICK_CODING_RATE_4_8
bandwidth = LORA_CLICK_BANDWIDTH_250KHZ
power = 14
bitrate = 5000
freq_deviation = 5000
preamble_length = 8
enable_crc_header = true
"""
return _LIB.lora_click_get_default_configuration()
def init(mikrobus_index, config):
"""Initialize the Lora Click and configure it.
mikrobus_index: 0 (MIKROBUS_1) or 1 (MIKROBUS_2)
config: Configuration of the Lora Click
Note: An exception is thrown if it fails to initialize the Lora Click.
"""
ret = _LIB.lora_click_init(mikrobus_index, config)
if ret < 0:
raise Exception("")
def configure(config):
"""Configure the Lora Click
config: Configuration of the Lora Click
Note: An exception is thrown if it fails to configure the Lora Click.
"""
ret = _LIB.lora_click_configure(config)
if ret < 0:
raise Exception("")
def send(data):
"""Send a list of bytes
data: list of bytes
This is a blocking call.
Note: An exception is thrown if it fails to send all bytes.
"""
length = len(data)
tx_buffer = (ctypes.c_uint8 * length)(*data)
ret = _LIB.lora_click_send(tx_buffer, length)
if ret < 0:
raise Exception("")
def receive(length):
"""Receive a list of bytes
length: Number of bytes to receive
This is a blocking call, it will not return until the number of requested
bytes has been received.
Note: An exception is thrown if it fails to receive all bytes.
"""
rx_buffer = (ctypes.c_uint8 * length)()
ret = _LIB.lora_click_receive(rx_buffer, length)
if ret < 0:
raise Exception("")
return [rx_buffer[i] for i in range(length)]
def write_eeprom(start_address, data):
"""Write some bytes in EEPROM
start_address: Must be in range 0x300-0x3FF
data: A list of bytes to write
Note: An exception is thrown if it fails to write bytes to the EEPROM.
"""
length = len(data)
tmp = (ctypes.c_uint8 * length)(*data)
ret = _LIB.lora_click_write_eeprom(start_address, tmp, length)
if ret < 0:
raise Exception("")
def read_eeprom(start_address, length):
"""Read a list of bytes from EEPROM
start_address: Must be in range 0x300-0x3FF
length: Number of bytes to read
Note: An exception is thrown if it fails to read bytes from the EEPROM.
"""
data = (ctypes.c_uint8 * length)()
ret = _LIB.lora_click_read_eeprom(start_address, data, length)
if ret < 0:
raise Exception("")
return [data[i] for i in range(length)]
def get_eui():
"""Read the EUI from the Lora Click
This function returns a list of 8 bytes representing the EUI of the
device.
Note: An exception is thrown if it fails to read the EUI.
"""
eui = (ctypes.c_uint8 * 8)()
ret = _LIB.lora_click_get_eui(eui)
if ret < 0:
raise Exception("")
return [eui[i] for i in range(8)]
| bsd-3-clause | 2,717,723,095,195,862,500 | 27.62069 | 77 | 0.657831 | false |
oliverlee/bicycle | python/pitch_constraint.py | 1 | 3051 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Calculate the pitch needed to maintain contact between the front wheel and
ground.
"""
from sympy import simplify, symbols
from sympy.physics.mechanics import ReferenceFrame, Point
from sympy.physics.mechanics import msprint
from mpmath import findroot
from sympy.utilities import lambdify
from sympy.printing import ccode
import textwrap
## define coordinates
# phi: roll
# theta: pitch
# delta: steer
phi, theta, delta = symbols('φ θ δ')
# rR: rear radius
# rF: front radius
rR, rF = symbols('rR rF')
# cR: distance from rear wheel center to steer axis
# cF: distance from front wheel center to steer axis
# ls: steer axis separation
cR, cF, ls = symbols('cR cF ls')
benchmark_parameters = {
rR: 0.3,
rF: 0.35,
cR: 0.9534570696121847,
ls: 0.2676445084476887,
cF: 0.0320714267276193,
}
## define reference frames
# N: inertial frame
# B: rear aseembly frame
# H: front assembly frame
N = ReferenceFrame('N')
B = N.orientnew('B', 'body', [0, phi, theta], 'zxy') # yaw is ignored
H = B.orientnew('H', 'axis', [delta, B.z])
## define points
# rear wheel/ground contact point
pP = Point('P')
# define unit vectors from rear/front wheel centers to ground
# along the wheel plane
R_z = ((B.y ^ N.z) ^ B.y).normalize()
F_z = ((H.y ^ N.z) ^ H.y).normalize()
# define rear wheel center point
pRs = pP.locatenew('R*', -rR*R_z)
# "top" of steer axis, point of SA closest to R*
# orthogonal projection of rear wheel center on steer axis
pRh = pRs.locatenew('R^', cR*B.x)
# orthogonal projection of front wheel center on steer axis
pFh = pRh.locatenew('S^', ls*B.z)
# front wheel center point
pFs = pFh.locatenew('S*', cF*H.x)
# front wheel/ground contact point
pQ = pFs.locatenew('Q', rF*F_z)
# N.z component of vector to pQ from pP
# this is our configuration constraint
f = simplify(pQ.pos_from(pP) & N.z)
print("f = {}\n".format(msprint(f)))
# calculate the derivative of f for use with Newton-Raphson
df = f.diff(theta)
print("df/dθ = {}\n".format(msprint(df)))
# constraint function for zero steer/lean configuration and
# using the benchmark parameters
f0 = lambdify(theta, f.subs({phi: 0, delta: 0}).subs(benchmark_parameters))
df0 = lambdify(theta, df.subs({phi: 0, delta: 0}).subs(benchmark_parameters))
print("verifying constraint equations are correct")
print("for zero steer/lean, pitch should be pi/10")
findroot(f0, 0.3, solver="newton", tol=1e-8, verbose=True, df=df0)
# convert to moore parameters
c_sym = symbols('x[1] pitch x[2] m_rr m_rf m_d1 m_d3 m_d2')
c_sym_dict = dict(zip([phi, theta, delta, rR, rF, cR, cF, ls], c_sym))
fc = ccode(f.subs(c_sym_dict))
dfc = ccode(df.subs(c_sym_dict))
cpp_math = {
'cos': 'std::cos',
'sin': 'std::sin',
'pow': 'std::pow',
'sqrt': 'std::sqrt',
}
fcs = fc
dfcs = dfc
for k, v in cpp_math.items():
fcs = fcs.replace(k, v)
dfcs = dfcs.replace(k, v)
print('\nf:')
print(textwrap.fill(fcs, 110, break_long_words=False))
print('\ndf:')
print(textwrap.fill(dfcs, 110, break_long_words=False))
| bsd-2-clause | 3,405,988,545,000,898,000 | 26.205357 | 77 | 0.68231 | false |
patricksanders/muse | muse/muse.py | 1 | 5263 | import jinja2
import webapp2
import os
import json
import random
from settings import *
from google.appengine.api import memcache
from apis.pyechonest import config as enconfig
from apis.pyechonest import *
#from apis.rdio import Rdio
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
NUM_SONGS = 15
random.seed()
client = memcache.Client()
# Set EchoNest API credentials (values located in settings.py)
enconfig.ECHO_NEST_API_KEY = ECHONEST_API_KEY
enconfig.ECHO_NEST_CONSUMER_KEY = ECHONEST_CONSUMER_KEY
enconfig.ECHO_NEST_SHARED_SECRET = ECHONEST_SHARED_SECRET
# Initialize Rdio connection
#rdio = Rdio((RDIO_CONSUMER_KEY, RDIO_CONSUMER_SECRET))
class MainPage(webapp2.RequestHandler):
def get(self):
memcache.add(key='hot_list',
value=artist.top_hottt(results=10), time=3600)
hot_list = client.gets('hot_list')
template_values = {
'hot_list': hot_list,
'tracking': TRACKING,
}
template = JINJA_ENVIRONMENT.get_template('templates/index.html')
self.response.write(template.render(template_values))
class AboutPage(webapp2.RequestHandler):
def get(self):
template_values = {
'tracking': TRACKING,
}
template = JINJA_ENVIRONMENT.get_template('templates/about.html')
self.response.write(template.render(template_values))
class GetArtist(webapp2.RequestHandler):
def get(self):
query = self.request.get('name')
section = self.request.get('section')
""" Retrieve artist from Echo Nest
Put artist object and song list in memcache if they're
not there already
"""
memcache.add(key=query, value=artist.Artist(query), time=3600)
en_artist = client.gets(query)
song_list_key = 'song_list-' + en_artist.name
memcache.add(key=song_list_key,
value=en_artist.get_songs(results=NUM_SONGS), time=3600)
song_list = client.gets(song_list_key)
""" Generate response
Responses are based on request contents
If no 'section' parameter is present, the generic artist page is
generated and returned (using 'name' parameter)
Subsequent AJAX calls are differentiated by the 'section' parameter
being one of the following values:
overview
song_length
blogs
"""
if not section:
images = en_artist.get_images(results=15)
image_url = images[random.randint(0,14)]['url']
template_values = {
'image_url': image_url,
'artist_name': en_artist.name,
'tracking': TRACKING,
}
template = JINJA_ENVIRONMENT.get_template('templates/artist.html')
self.response.write(template.render(template_values))
elif section == 'stats':
data = self.getStats(en_artist, song_list)
json.dump(data, self.response)
elif section == 'overview':
data = self.getOverview(en_artist)
self.response.headers['Content-Type'] = 'application/json'
json.dump(data, self.response)
elif section == 'song_length':
data = self.getSongLength(en_artist, song_list)
self.response.headers['Content-Type'] = 'application/json'
json.dump(data, self.response)
elif section == 'blogs':
data = self.getBlogs(en_artist, song_list)
self.response.headers['Content-Type'] = 'application/json'
json.dump(data, self.response)
else:
self.response.headers['Content-Type'] = 'text/plain'
self.response.write('That section doesn\'t exist')
""" Returns dict with relevant blog entries """
def getBlogs(self, en_artist, song_list):
doc_counts = en_artist.get_doc_counts()
blog_list = en_artist.get_blogs(results=5, high_relevance=True)
blogs = []
for blog in blog_list:
blogs.append(
{
'name': blog['name'],
'url': blog['url'],
})
data = {
'blog_count': doc_counts['blogs'],
'blog_list': blogs,
}
return data
""" Returns dict including general artist info """
def getOverview(self, en_artist):
similar_artists = en_artist.get_similar(results=7)
terms = en_artist.get_terms()
similar_list = []
for item in similar_artists:
similar_list.append(item.name)
terms_list = []
for item in terms:
terms_list.append(item['name'])
data = {
'term_list': terms_list,
'similar_list': similar_list,
}
return data
""" Returns dict including avg and total song length """
def getSongLength(self, en_artist, song_list):
""" Calculate total and average song length """
total_song_length = 0
for song in song_list:
total_song_length += song.get_audio_summary()['duration']
total_song_length = total_song_length / 60
avg_song_length = total_song_length / NUM_SONGS
data = {
'total_songs': NUM_SONGS,
'total_song_length': total_song_length,
'avg_song_length': avg_song_length,
}
return data
def getStats(self, en_artist, song_list):
hotttnesss = en_artist.hotttnesss * 50
familiarity = en_artist.familiarity * 50
""" Calculate average danceability """
total_danceability = 0
for song in song_list:
total_danceability += song.get_audio_summary()['danceability']
danceability = (total_danceability / NUM_SONGS) * 100
data = {
'hotttnesss': hotttnesss,
'familiarity': familiarity,
'danceability': danceability,
}
return data
app = webapp2.WSGIApplication([('/', MainPage),
('/about', AboutPage),
('/artist', GetArtist)],
debug=True) | bsd-3-clause | 830,532,179,626,433,300 | 28.082873 | 69 | 0.693711 | false |
HelloLily/hellolily | lily/messaging/email/migrations/0025_migrate_public_setting.py | 1 | 1027 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from ..models.models import EmailAccount
PUBLIC = 0
def migrate_public_setting(apps, schema_editor):
EmailAccount = apps.get_model('email', 'EmailAccount')
email_accounts = EmailAccount.objects.all()
for email_account in email_accounts:
if email_account.public:
email_account.privacy = PUBLIC
email_account.save()
def migrate_public_setting_backwards(apps, schema_editor):
EmailAccount = apps.get_model('email', 'EmailAccount')
email_accounts = EmailAccount.objects.all()
for email_account in email_accounts:
if email_account.privacy == PUBLIC:
email_account.public = True
email_account.save()
class Migration(migrations.Migration):
dependencies = [
('email', '0024_emailaccount_privacy'),
]
operations = [
migrations.RunPython(migrate_public_setting, migrate_public_setting_backwards)
]
| agpl-3.0 | -7,111,744,024,382,527,000 | 24.675 | 86 | 0.673807 | false |
T2DREAM/t2dream-portal | src/encoded/get_file_uuid.py | 1 | 1886 | #!/usr/bin/env python2
import argparse
import os
import sys
from collections import OrderedDict
from pyramid.compat import bytes_
from pyramid.httpexceptions import HTTPBadRequest
from pyramid.view import view_config
from pyramid.response import Response
from snovault import TYPES
from collections import OrderedDict
from snovault.util import simple_path_ids
from urllib.parse import (
parse_qs,
urlencode,
)
from pyramid.config import Configurator
import pprint
import csv
import io
import json
import subprocess
import requests
import shlex
import sys
import logging
import re
log = logging.getLogger(__name__)
EPILOG = '''
%(prog)s GET file uuids associated with the experiment
Basic Useage:
sudo /srv/encoded/bin/py %(prog)s --accession TSTSR999372
accession id
'''
def get_files_uuids(result_dict):
file_uuids = []
for file in result_dict["files"]:
file_uuids.append(file["uuid"])
return list(set(file_uuids))
def file_uuid(accession):
HEADERS = {'accept': 'application/json'}
path = ('http://ec2-34-219-91-34.us-west-2.compute.amazonaws.com/experiment/' + accession)
response = requests.get(path,headers=HEADERS)
response_json_dict = response.json()
results = json.dumps(response_json_dict, indent=4, separators=(',', ': '))
uuids_in_results = get_files_uuids(response_json_dict)
fout = io.StringIO()
writer = csv.writer(fout, delimiter='\n')
writer.writerows([uuids_in_results])
return uuids_in_results
def main():
parser = argparse.ArgumentParser(
description=__doc__, epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--accession', help="accession id")
args = parser.parse_args()
accession = args.accession
response = file_uuid(accession)
print("\n".join(response))
if __name__ == "__main__":
main()
| mit | 1,046,697,810,688,850,400 | 27.575758 | 94 | 0.711559 | false |
gannetson/sportschooldeopenlucht | apps/contentplugins/migrations/0001_initial.py | 1 | 3815 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'PictureItem'
db.create_table('contentitem_contentplugins_pictureitem', (
('contentitem_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fluent_contents.ContentItem'], unique=True, primary_key=True)),
('image', self.gf('sorl.thumbnail.fields.ImageField')(max_length=100)),
('align', self.gf('django.db.models.fields.CharField')(max_length=50)),
))
db.send_create_signal('contentplugins', ['PictureItem'])
def backwards(self, orm):
# Deleting model 'PictureItem'
db.delete_table('contentitem_contentplugins_pictureitem')
models = {
'contentplugins.pictureitem': {
'Meta': {'ordering': "('placeholder', 'sort_order')", 'object_name': 'PictureItem', 'db_table': "'contentitem_contentplugins_pictureitem'", '_ormbases': ['fluent_contents.ContentItem']},
'align': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'contentitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fluent_contents.ContentItem']", 'unique': 'True', 'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'fluent_contents.contentitem': {
'Meta': {'ordering': "('placeholder', 'sort_order')", 'object_name': 'ContentItem'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contentitems'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['fluent_contents.Placeholder']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_fluent_contents.contentitem_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'default': '1', 'db_index': 'True'})
},
'fluent_contents.placeholder': {
'Meta': {'unique_together': "(('parent_type', 'parent_id', 'slot'),)", 'object_name': 'Placeholder'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'default': "'m'", 'max_length': '1'}),
'slot': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['contentplugins'] | bsd-3-clause | 5,727,575,383,363,468,000 | 63.677966 | 208 | 0.588729 | false |
jasonacox/SentryPi | sentrypi-freezer.py | 1 | 2996 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Freezer Status - JSON Output
#
# Description:
# This script reads the 1-wire freezer temperature and prints JSON
# Additionally, this script will send SNS messages if temperature
# rises above ALERTTEMP or above freezing.
#
# Jason A. Cox, @jasonacox
# https://github.com/jasonacox/SentryPI
import os
import RPi.GPIO as io
import boto3
import glob
import time
import datetime
#
# Config Settings
#
# Create an SNS client for alerts
client = boto3.client(
"sns",
aws_access_key_id="--------------------",
aws_secret_access_key="----------------------------------------",
region_name="us-east-1")
SNSTopicArn = "arn:aws:sns:us-east-1:------------:SentryPiAlerts"
## read temp from DS18B20 1-Wire Probe
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28-0516a30385ff*')[0]
device_file = device_folder + '/w1_slave'
freezer_file = "/var/www-tmp/sentrypi-freezer"
freezer_thaw_file = "/var/www-tmp/sentrypi-freezer-thaw"
# Alert values in C
ALERTTEMP = -10
THAWTEMP = 0
#
# Functions
#
def read_temp_raw():
f = open(device_file, 'r')
lines = f.readlines()
f.close()
return lines
def read_temp():
lines = read_temp_raw()
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos + 2:]
temp_c = float(temp_string) / 1000.0
return temp_c
#
# Main Function
#
def main():
# Main program block
watchdog = 0
while watchdog < 10:
watchdog = watchdog + 1
now = datetime.datetime.utcnow()
iso_time = now.strftime("%Y-%m-%dT%H:%M:%SZ")
ftemp = read_temp()
if ftemp != 85:
print(
"{ \"datetime\": \"%s\", \"temperature\": %f, \"humidity\": %d }"
% (iso_time, ftemp, 0))
if ftemp >= THAWTEMP:
# freezer thawing
client.publish(
Message="Freezer FAILURE (%0.1f°F)" % ((1.8 * ftemp) + 32),
TopicArn="%s" % SNSTopicArn)
# check for alert but send only 1
if ftemp > ALERTTEMP and os.path.isfile(freezer_file) == False:
# freezer too hot!
client.publish(
Message="Freezer Temp WARNING (%0.1f°F)" % (
(1.8 * ftemp) + 32),
TopicArn="%s" % SNSTopicArn)
os.system("touch %s" % freezer_file)
if ftemp <= ALERTTEMP and os.path.isfile(freezer_file) == True:
os.remove(freezer_file)
client.publish(
Message="Freezer Temp Good (%0.1f°F)" % (
(1.8 * ftemp) + 32),
TopicArn="%s" % SNSTopicArn)
break
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
| mit | -3,744,901,409,135,362,600 | 25.486726 | 81 | 0.533244 | false |
matematik7/CSSQC | tests/test_indentation.py | 1 | 1775 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------
# test_indentation.py
#
# test for indentation rule
# ----------------------------------------------------------------
# copyright (c) 2014 - Domen Ipavec
# Distributed under The MIT License, see LICENSE
# ----------------------------------------------------------------
import unittest
from cssqc.parser import CSSQC
from cssqc.qualityWarning import QualityWarning
class Test_indentation(unittest.TestCase):
def parse_indentation(self, c, i):
c.parse('''div,
span {{
{0}margin: 0;
padding: 0;
{0}{0}color: blue;
{0}.class1,
.class2,
{0}{0}.class3 {{
{0}{0}top: 0;
bottom: 0;
{0}{0}{0}width: 100%;
{0}}}
}}
@c1: #fff;
@c2: #bbb;
.border-box {{
{0}-webkit-box-sizing: border-box;
{0} -moz-box-sizing: border-box;
{0} box-sizing: border-box;
}}
.class4 {{
{0}margin: 0
}}
.class5 {{
{0}padding: 0
}}'''.format(i))
self.assertEqual(c.warnings, [
QualityWarning('indentation', 2),
QualityWarning('indentation', 4),
QualityWarning('indentation', 5),
QualityWarning('indentation', 8),
QualityWarning('indentation', 9),
QualityWarning('indentation', 12),
QualityWarning('indentation', 13),
QualityWarning('indentation', 18),
QualityWarning('indentation', 29)
])
def test_tab(self):
c = CSSQC({"indentation": "tab"})
self.parse_indentation(c, '\t')
def test_4spaces(self):
c = CSSQC({"indentation": "4"})
self.parse_indentation(c, ' ')
def test_2spaces(self):
c = CSSQC({"indentation": "2"})
self.parse_indentation(c, ' ')
| mit | 3,743,093,510,685,540,000 | 24.357143 | 66 | 0.513803 | false |
nicolasfauchereau/windspharm | lib/windspharm/tools.py | 1 | 9009 | """
Tools for managing data for use with `~windspharm.standard.VectorWind`
(or indeed `spharm.Spharmt`).
"""
# Copyright (c) 2012-2013 Andrew Dawson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import
import numpy as np
def __order_dims(d, inorder):
if 'x' not in inorder or 'y' not in inorder:
raise ValueError('a latitude-longitude grid is required')
lonpos = inorder.lower().find('x')
latpos = inorder.lower().find('y')
d = np.rollaxis(d, lonpos)
if latpos < lonpos:
latpos += 1
d = np.rollaxis(d, latpos)
outorder = inorder.replace('x', '')
outorder = outorder.replace('y', '')
outorder = 'yx' + outorder
return d, outorder
def __reshape(d):
out = d.reshape(d.shape[:2] + (np.prod(d.shape[2:]),))
return out, d.shape
def prep_data(data, dimorder):
"""
Prepare data for input to `~windspharm.standard.VectorWind` (or to
`spharm.Spharmt` method calls).
Returns a dictionary of intermediate information that can be passed
to `recover_data` or `get_recovery` to recover the original shape
and order of the data.
**Arguments:**
*data*
Data array. The array must be at least 2D.
*dimorder*
String specifying the order of dimensions in the data array. The
characters 'x' and 'y' represent longitude and latitude
respectively. Any other characters can be used to represent
other dimensions.
**Returns:**
*pdata*
*data* reshaped/reordered to (latitude, longitude, other).
*info*
A dictionary of information required to recover *data*.
**See also:**
`recover_data`, `get_recovery`.
**Examples:**
Prepare an array with dimensions (12, 17, 73, 144) where the
dimensions are (time, level, latitude, longitude)::
pdata, info = prep_data(data, 'tzyx')
Prepare an array with dimensions (144, 16, 73, 21) where the first
dimension is longitude and the third dimension is latitude. The
characters used to represent the other dimensions are arbitrary::
pdata, info = prep_data(data, 'xayb')
"""
# Returns the prepared data and some data info to help data recovery.
pdata, intorder = __order_dims(data, dimorder)
pdata, intshape = __reshape(pdata)
info = dict(intermediate_shape=intshape,
intermediate_order=intorder,
original_order=dimorder)
return pdata, info
def recover_data(pdata, info):
"""
Recover the shape and dimension order of an array output from
`~windspharm.standard.VectorWind` methods (or from `spharm.Spharmt`
methods).
This function performs the opposite of `prep_data`.
For recovering the shape of multiple variables, see `get_recovery`.
**Arguments:**
*pdata*
Data array with either 2 or 3 dimensions. The first two
dimensions are latitude and longitude respectively.
*info*
Information dictionary output from `prep_data`.
**Returns:**
*data*
The data reshaped/reordered.
**See also:**
`prep_data`, `get_recovery`.
**Example:**
Recover the original input shape and dimension order of an array
processed with `prep_data` or an output of
`~windspharm.standard.VectorWind` or `sparm.Spharmt` method calls on
such data::
data = recover_data(pdata, info)
"""
# Convert to intermediate shape (full dimensionality, windspharm order).
data = pdata.reshape(info['intermediate_shape'])
# Re-order dimensions correctly.
rolldims = np.array([info['intermediate_order'].index(dim)
for dim in info['original_order'][::-1]])
for i in xrange(len(rolldims)):
# Roll the axis to the front.
data = np.rollaxis(data, rolldims[i])
rolldims = np.where(rolldims < rolldims[i], rolldims + 1, rolldims)
return data
__recover_docstring_template = """Shape/dimension recovery.
Recovers variable shape/dimension according to:
{!s}
Returns a `list` of variables.
"""
def get_recovery(info):
"""
Return a function that can be used to recover the shape and
dimension order of multiple arrays output from
`~windspharm.standard.VectorWind` methods (or from `spharm.Spharmt`
methods) according to a single dictionary of recovery information.
**Argument:**
*info*
Information dictionary output from `prep_data`.
**Returns:**
*recover*
A function used to recover arrays.
**See also:**
`recover_data`, `prep_data`.
**Example:**
Generate a function to recover the original input shape and
dimension order of arrays processed with `prep_data` and outputs of
`~windspharm.standard.VectorWind` method calls on this data::
u, info = prep_data(u, 'tzyx')
v, info = prep_data(v, 'tzyx')
w = VectorWind(u, v)
sf, vp = w.sfvp()
recover = get_recovery(info)
u, v, sf, vp = recover(u, v, sf, vp)
"""
def __recover(*args):
return [recover_data(arg, info) for arg in args]
info_nice = ["'{!s}': {!s}".format(key, value)
for key, value in info.items()]
__recover.__name__ = 'recover'
__recover.__doc__ = __recover_docstring_template.format(
'\n'.join(info_nice))
return __recover
def reverse_latdim(u, v, axis=0):
"""
Reverse the order of the latitude dimension of zonal and meridional
wind components.
**Arguments:**
*u*, *v*
Zonal and meridional wind components respectively.
**Optional argument:**
*axis*
Index of the latitude dimension. This dimension will be reversed
in the input arrays. Defaults to 0 (the first dimension).
**Returns:**
*ur*, *vr*
Zonal and meridional wind components with the latitude dimensions
reversed. These are always copies of the input.
**See also:**
`order_latdim`.
**Examples:**
Reverse the dimension corresponding to latitude when it is the first
dimension of the inputs::
u, v = reverse_latdim(u, v)
Reverse the dimension corresponding to latitude when it is the third
dimension of the inputs::
u, v = reverse_latdim(u, v, axis=2)
"""
slicelist = [slice(0, None)] * u.ndim
slicelist[axis] = slice(None, None, -1)
u = u.copy()[slicelist]
v = v.copy()[slicelist]
return u, v
def order_latdim(latdim, u, v, axis=0):
"""Ensure the latitude dimension is north-to-south.
Returns copies of the latitude dimension and wind components
with the latitude dimension going from north to south. If the
latitude dimension is already in this order then the output will
just be copies of the input.
**Arguments:**
*latdim*
Array of latitude values.
*u*, *v*
Zonal and meridional wind components respectively.
**Keyword argument:**
*axis*
Index of the latitude dimension in the zonal and meridional wind
components. Defaults to 0 (the first dimension).
**Returns:**
*latdimr*
Possibly reversed *latdim*, always a copy of *latdim*.
*ur*, *vr*
Possibly reversed *u* and *v* respectively. Always copies of *u*
and *v* respectively.
**See also:**
`reverse_latdim`.
**Examples:**
Order the latitude dimension when latitude is the first dimension of
the wind components::
latdim, u, v = order_latdim(latdim, u, v)
Order the latitude dimension when latitude is the third dimension of
the wind components::
latdim, u, v = order_latdim(latdim, u, v, axis=2)
"""
latdim = latdim.copy()
if latdim[0] < latdim[-1]:
latdim = latdim[::-1]
# reverse_latdim() will make copies of u and v
u, v = reverse_latdim(u, v, axis=axis)
else:
# we return copies from this function
u, v = u.copy(), v.copy()
return latdim, u, v
| mit | 9,096,566,046,557,755,000 | 27.782748 | 79 | 0.651793 | false |
shaddyx/simpleDecorators | tests/CacheTest.py | 1 | 1515 | import unittest
from simpledecorators.Cache import Cache, TimeCacheStorage
called = 0
class SafeTest(unittest.TestCase):
def test_AddToCache(self):
global called
called = 0
@Cache(cacheStorage=TimeCacheStorage(time_seconds=1, maxCount=1000))
def func1(a,b,c):
global called
called += 1
return a + b + c
a = func1(1, 2, 3)
b = func1(1, 2, 3)
c = func1(1, 2, 3)
d = func1(1, 2, 3)
self.assertEqual(a, 1 + 2 + 3)
self.assertEqual(a, b)
self.assertEqual(b, c)
self.assertEqual(c, d)
self.assertEqual(called, 1)
def test_ReturnsNone(self):
global called
called = 0
@Cache(cacheStorage=TimeCacheStorage(time_seconds=1, maxCount=1000))
def func1(a, b, c):
global called
called += 1
return None
a = func1(1, 2, 3)
b = func1(1, 2, 3)
c = func1(1, 2, 3)
d = func1(1, 2, 3)
self.assertEqual(a, None)
self.assertEqual(a, b)
self.assertEqual(b, c)
self.assertEqual(c, d)
self.assertEqual(called, 1)
def test_sameArguments(self):
@Cache()
def func1(a, b, c):
return 1
@Cache()
def func2(a, b, c):
return 2
a = func1(1, 2, 3)
b = func2(1, 2, 3)
self.assertEqual(a, 1)
self.assertEqual(b, 2)
if __name__ == "__main__":
unittest.main()
| mit | 941,584,951,735,983,100 | 23.047619 | 76 | 0.512871 | false |
proffalken/edison | cmdb/reports.py | 1 | 2103 | # This file is part of the Edison Project.
# Please refer to the LICENSE document that was supplied with this software for information on how it can be used.
try:
from geraldo import Report, landscape, ReportBand, ObjectValue, SystemField,BAND_WIDTH, Label,ReportGroup
from reportlab.lib.pagesizes import A5
from reportlab.lib.units import cm
from reportlab.lib.enums import TA_RIGHT, TA_CENTER
class ReportCfgItem(Report):
title = 'Server Audit'
author = 'Matthew Macdonald-Wallace'
page_size = landscape(A5)
margin_left = 2*cm
margin_top = 0.5*cm
margin_right = 0.5*cm
margin_bottom = 0.5*cm
class band_detail(ReportBand):
height = 0.5*cm
elements=(
ObjectValue(attribute_name='Hostname', left=0.5*cm),
ObjectValue(attribute_name='Rack', left=3*cm),
)
class band_page_header(ReportBand):
height = 1.3*cm
elements = [
SystemField(expression='%(report_title)s', top=0.1*cm, left=0, width=BAND_WIDTH,
style={'fontName': 'Helvetica-Bold', 'fontSize': 14, 'alignment': TA_CENTER}),
Label(text="Hostname", top=0.8*cm, left=0.5*cm),
Label(text=u"Rack", top=0.8*cm, left=3*cm),
SystemField(expression=u'Page %(page_number)d of %(page_count)d', top=0.1*cm,
width=BAND_WIDTH, style={'alignment': TA_RIGHT}),
]
borders = {'bottom': True}
class band_page_footer(ReportBand):
height = 0.5*cm
elements = [
Label(text='Geraldo Reports', top=0.1*cm),
SystemField(expression=u'Printed in %(now:%Y, %b %d)s at %(now:%H:%M)s', top=0.1*cm,
width=BAND_WIDTH, style={'alignment': TA_RIGHT}),
]
borders = {'top': True}
groups = [
ReportGroup(attribute_name = 'Hostname',
band_header = ReportBand(
height = 0.7*cm,
elements = [
ObjectValue(attribute_name='Hostname', left=0, top=0.1*cm, width=20*cm,
get_value=lambda instance: 'Hostname: ' + (instance.Hostname),
style={'fontName': 'Helvetica-Bold', 'fontSize': 12})
],
borders = {'bottom': True},
)
),
]
except ImportError:
geraldo_loaded = False
| bsd-3-clause | 4,797,318,441,502,722,000 | 32.919355 | 114 | 0.642891 | false |
Petraea/jsonbot | jsb/lib/container.py | 1 | 1727 | # jsb/container.py
#
#
""" container for bot to bot communication. """
__version__ = "1"
## jsb imports
from jsb.lib.persist import Persist
from jsb.utils.name import stripname
from jsb.lib.gozerevent import GozerEvent
from jsb.imports import getjson
## basic imports
import hmac
import uuid
import time
import hashlib
## defines
idattributes = ['createtime', 'origin', 'type', 'idtime', 'payload']
## functions
def getid(container):
name = ""
for attr in idattributes:
try: name += str(container[attr])
except KeyError: pass
return uuid.uuid3(uuid.NAMESPACE_URL, name).hex
## classes
class Container(GozerEvent):
""" Container for bot to bot communication. Provides a hmac id that can be checked. """
def __init__(self, origin=None, payload=None, type="event", key=None, how="direct"):
GozerEvent.__init__(self)
self.createtime = time.time()
self.origin = origin
self.type = str(type)
self.payload = payload
self.makeid()
if key: self.makehmac(key)
else: self.makehmac(self.id)
def makeid(self):
self.idtime = time.time()
self.id = getid(self)
def makehmac(self, key):
self.hash = "sha512"
self.hashkey = key
self.digest = hmac.new(key, self.payload, hashlib.sha512).hexdigest()
def save(self, attributes=[]):
target = {}
if attributes:
for key in attributes: target[key] = self[key]
else: target = cpy(self)
targetfile = getdatadir() + os.sep + "containers" + os.sep + str(self.createtime) + "_" + stripname(self.origin)
p = Persist(targetfile)
p.data = getjson().dumps(target)
p.save()
| mit | -2,278,081,894,544,858,400 | 24.028986 | 120 | 0.623046 | false |
kmichalak/rosie | rosie/cli/test_actions_InitConfigAction.py | 1 | 2740 | import os
from unittest import TestCase
from rosie.cli.actions import InitConfigAction
class TestInitConfigAction(TestCase):
config_file_path = 'test_file.py'
def tearDown(self):
if os.path.exists(self.config_file_path):
os.remove(self.config_file_path)
def test_action_gets_file_name_from_args(self):
# given
action = InitConfigAction()
# when
action.run(server_url='http://test.server.org',
username='test_user',
password='secret_pass',
params_file=self.config_file_path)
# then
self.assertTrue(os.path.exists(self.config_file_path))
def test_action_stores_given_server_url(self):
# given
server_url = 'http://test.server.org'
expected_config_part = 'JENKINS_URL = \'%s\'' % server_url
action = InitConfigAction()
# when
action.run(server_url=server_url,
username='test_user',
password='secret_pass',
params_file=self.config_file_path)
with open(self.config_file_path) as config_file:
s = config_file.read()
# then
self.assertTrue(s.find(expected_config_part) > -1,
'Could not find configuration for server URL %s'
% server_url)
def test_action_stores_given_user_name(self):
# given
user_name = 'test_user'
expected_config_part = 'USER_NAME = \'%s\'' % user_name
action = InitConfigAction()
# when
action.run(server_url='http://test.server.org',
username=user_name,
password='secret_pass',
params_file=self.config_file_path)
with open(self.config_file_path) as config_file:
s = config_file.read()
# then
self.assertTrue(s.find(expected_config_part) > -1,
'Could not find configuration part for user name %s'
% user_name)
def test_action_stores_given_password(self):
# given
password = 'secret_pass'
expected_config_part = 'PASSWORD = \'%s\'' % password
action = InitConfigAction()
# when
action.run(server_url='http://test.server.org',
username='test_user',
password=password,
params_file=self.config_file_path)
with open(self.config_file_path) as config_file:
s = config_file.read()
# then
self.assertTrue(s.find(expected_config_part) > -1,
'Could not find configuration part for password %s'
% password)
| apache-2.0 | -4,067,880,509,539,386,400 | 32.414634 | 76 | 0.54708 | false |
codyparker/channels-obstruction | channels_obstruction/urls.py | 1 | 1029 | from django.conf.urls import url, include
from django.contrib import admin
from game.views import *
from rest_framework.routers import DefaultRouter
from django.contrib.auth.views import login, logout
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^register/', CreateUserView.as_view()),
url(r'^login/$', login, {'template_name': 'login.html'}),
url(r'^logout/$', logout, {'next_page': '/'}),
url(r'^game/(?P<game_id>\d+)/$', GameView.as_view()),
url(r'^lobby/$', LobbyView.as_view()),
url(r'^$', HomeView.as_view()),
]
# urls for api - django rest framework
urlpatterns += [
url(r'^game-from-id/(?P<game_id>\d+)/$', SingleGameViewSet.as_view()),
url(r'^current-user/', CurrentUserView.as_view()),
]
router = DefaultRouter()
router.register(r'player-games', PlayerGameViewSet, 'player_games')
router.register(r'available-games', AvailableGameViewSet, 'available_games')
router.register(r'game-squares', GameSquaresViewSet, 'game_squares')
urlpatterns += router.urls | mit | 3,101,498,162,907,690,000 | 33.333333 | 76 | 0.683188 | false |
sveetch/boussole | tests/010_inspector/007_parents.py | 1 | 4810 | # -*- coding: utf-8 -*-
import os
def test_001_basic(settings, inspector):
"""Looking for parents of basic sample"""
sources = [
os.path.join(settings.sample_path, 'main_basic.scss'),
os.path.join(settings.sample_path, 'main_depth_import-3.scss'),
os.path.join(settings.sample_path, 'main_with_subimports.scss'),
os.path.join(settings.sample_path, 'main_using_libs.scss'),
]
sourcepath = os.path.join(settings.sample_path, 'main_basic.scss')
inspector.inspect(*sources, library_paths=settings.libraries_fixture_paths)
parents = inspector.parents(sourcepath)
assert parents == set([
os.path.join(settings.sample_path, 'main_depth_import-1.scss'),
os.path.join(settings.sample_path, 'main_depth_import-2.scss'),
os.path.join(settings.sample_path, 'main_depth_import-3.scss'),
os.path.join(settings.sample_path, 'main_with_subimports.scss'),
os.path.join(settings.sample_path, 'main_using_libs.scss'),
])
def test_002_vendor(settings, inspector):
"""Looking for parents of vendor component"""
sources = [
os.path.join(settings.sample_path, 'main_syntax.scss'),
os.path.join(settings.sample_path, 'main_commented.scss'),
os.path.join(settings.sample_path, 'main_basic.scss'),
os.path.join(settings.sample_path, 'main_depth_import-1.scss'),
os.path.join(settings.sample_path, 'main_depth_import-2.scss'),
os.path.join(settings.sample_path, 'main_depth_import-3.scss'),
os.path.join(settings.sample_path, 'main_with_subimports.scss'),
os.path.join(settings.sample_path, 'main_using_libs.scss'),
os.path.join(settings.sample_path, 'main_circular_0.scss'),
os.path.join(settings.sample_path, 'main_circular_1.scss'),
os.path.join(settings.sample_path, 'main_circular_2.scss'),
os.path.join(settings.sample_path, 'main_circular_3.scss'),
os.path.join(settings.sample_path, 'main_circular_4.scss'),
os.path.join(settings.sample_path, 'main_circular_bridge.scss'),
os.path.join(settings.sample_path, 'main_circular_5.scss'),
]
sourcepath = os.path.join(settings.sample_path, '_vendor.scss')
inspector.inspect(*sources, library_paths=settings.libraries_fixture_paths)
parents = inspector.parents(sourcepath)
assert parents == set([
os.path.join(settings.sample_path, '_sass_filetest.sass'),
os.path.join(settings.sample_path, 'main_depth_import-1.scss'),
os.path.join(settings.sample_path, 'main_depth_import-2.scss'),
os.path.join(settings.sample_path, 'main_circular_4.scss'),
os.path.join(settings.sample_path, 'main_circular_5.scss'),
os.path.join(settings.sample_path, 'main_circular_bridge.scss'),
os.path.join(settings.sample_path, 'main_commented.scss'),
os.path.join(settings.sample_path, 'main_depth_import-3.scss'),
os.path.join(settings.sample_path, 'main_with_subimports.scss'),
os.path.join(settings.sample_path, 'main_basic.scss'),
os.path.join(settings.sample_path, 'main_syntax.scss'),
os.path.join(settings.sample_path, 'main_circular_3.scss'),
os.path.join(settings.sample_path, 'main_using_libs.scss'),
])
def test_003_library(settings, inspector):
"""Looking for parents of a library component"""
sources = [
os.path.join(settings.sample_path, 'main_syntax.scss'),
os.path.join(settings.sample_path, 'main_commented.scss'),
os.path.join(settings.sample_path, 'main_basic.scss'),
os.path.join(settings.sample_path, 'main_depth_import-1.scss'),
os.path.join(settings.sample_path, 'main_depth_import-2.scss'),
os.path.join(settings.sample_path, 'main_depth_import-3.scss'),
os.path.join(settings.sample_path, 'main_with_subimports.scss'),
os.path.join(settings.sample_path, 'main_using_libs.scss'),
os.path.join(settings.sample_path, 'main_circular_0.scss'),
os.path.join(settings.sample_path, 'main_circular_1.scss'),
os.path.join(settings.sample_path, 'main_circular_2.scss'),
os.path.join(settings.sample_path, 'main_circular_3.scss'),
os.path.join(settings.sample_path, 'main_circular_4.scss'),
os.path.join(settings.sample_path, 'main_circular_bridge.scss'),
os.path.join(settings.sample_path, 'main_circular_5.scss'),
]
sourcepath = os.path.join(settings.lib1_path, 'components/_panels.scss')
inspector.inspect(*sources, library_paths=settings.libraries_fixture_paths)
parents = inspector.parents(sourcepath)
assert parents == set([
os.path.join(settings.lib1_path, 'library_1_fullstack.scss'),
os.path.join(settings.sample_path, 'main_using_libs.scss'),
])
| mit | -5,602,037,356,110,481,000 | 49.631579 | 79 | 0.668815 | false |
asarraf/KuduPyspark | spark1_6_0_kudu.py | 1 | 1709 | # Dated : Aug 25, 2017
# About : Sample Pyspark (1.6.0) code to count number of Rows in a Kudu Table
# Pyspark Version : 1.6.0
# Kudu Version : 1.2.0
# Coder : Ankit Sarraf
import ConfigParser
from pyspark import SparkContext
from pyspark import SparkConf
from pyspark.sql import SQLContext
# Initialize the configuration
conf = (SparkConf().setMaster("yarn-client").setAppName("KuduSpark_1_6_0"))
# Initialize the SparkContext using the above conf
sc = SparkContext(conf = conf)
# Initialize the SQLContext using the above SparkContext
sqlContext = SQLContext(sc)
# Use the ConfigParser
configParser = ConfigParser.ConfigParser()
# Load the file that contains the Configuration Parameters
configParser.read('kudu_configuration.conf')
# Determine the kudu_master as provided in the Config File
kudu_master = configParser.get('Kudu', 'KuduMaster')
# Provide the table to be read
kudu_table = configParser.get('Kudu', 'KuduTable')
# Display Data Read from Config File
print('KUDU MASTER: ' + kudu_master)
print('KUDU TABLES: ' + kudu_table)
# Load the table in the memory
kudu_events_df = sqlContext.read.format('org.apache.kudu.spark.kudu'). \
option('kudu.master', kudu_master). \
option('kudu.table', kudu_table). \
load()
# Display the count of the rows in the table
print('My Count: ' + str(kudu_events_df.count()))
# Register this table as Temporary table
kudu_events_df.registerAsTempTable("myTab")
# Query the Temporary table using SparkSQL Queries
sqlContext.sql("SELECT count(*) FROM myTab").show()
# Results into
# +----+
# | _c0|
# +----+
# |1055|
# +----+
| apache-2.0 | 6,504,802,878,220,540,000 | 30.648148 | 87 | 0.67876 | false |
shellphish/rex | rex/exploit/techniques/call_shellcode.py | 1 | 10390 | import logging
from ... import Vulnerability
from .. import Exploit, CannotExploit
from ..technique import Technique
from ..nopsleds import NopSleds
l = logging.getLogger("rex.exploit.techniques.call_shellcode")
class CallShellcode(Technique):
name = "call_shellcode"
applicable_to = ['unix']
def check(self):
# can only exploit ip overwrites
if not self.crash.one_of([Vulnerability.IP_OVERWRITE, Vulnerability.PARTIAL_IP_OVERWRITE]):
self.check_fail_reason("Cannot control IP.")
return False
if not self._is_stack_executable:
self.check_fail_reason("Stack is not executable.")
return False
return True
def apply(self, use_nopsled=True, **kwargs):
# When ASLR is disabled, there might be a difference between the stack pointer we see in angr and the stack
# pointer in the target process. Here we calculate the difference between our SP and the real one in coredump.
sp_difference = 0
if not self.crash.aslr and \
not self.crash.state.regs.sp.symbolic and \
self.crash.core_registers:
# determine what the stack pointer register is called on this architecture
sp_reg_name = self.crash.project.arch.get_register_by_name('sp').name
if sp_reg_name in self.crash.core_registers:
sp_difference = self.crash.core_registers[sp_reg_name] - \
self.crash.state.solver.eval(self.crash.state.regs.sp)
l.debug("The difference between the stack pointer in the core dump and the stack pointer in angr's "
"final crashing state is %#x bytes.", sp_difference)
# try to write shellcode into global memory
shellcode = self.shellcode.get_default()
# try to write to some known memory address
# 1) find a w+x region we can write to
# 2) see if we can constrain its value to shellcode and the ip to that address
# 3) done
l.debug('try: shellcode in global data')
shc_addr, shc_constraint = self._write_global_data(shellcode)
if shc_addr is not None:
exp = self._attempt_jump([shc_constraint], shc_addr)
if exp is not None:
return exp
# try to see if we can jump directly to the stack
# 1) check that aslr is disabled
# 2) find all the regions on the stack that are touched by stdin
# 3) find the largest of those regions that are not concretely constrained
# 4) check that we can jump to the middle of a nopsled in one of them
# 5) done
if not self.crash.aslr:
l.debug('try: absolute address in stack')
base_stack_addrs = self.crash.stack_control(below_sp=False)
stack_addrs = {}
for addr, size in base_stack_addrs.items():
unconstrained_bufs = self._find_unconstrained_memory_buffers(addr, size)
l.debug("Found %d buffer chunks inside %#x-%#x.", len(unconstrained_bufs), addr, addr+size)
stack_addrs.update(unconstrained_bufs)
word_size = self.crash.state.arch.bits // self.crash.state.arch.byte_width
for root in sorted(stack_addrs, key=lambda a: -stack_addrs[a]):
if stack_addrs[root] < len(shellcode):
continue
# Where do we want to write the shellcode to? Note that we are not always able to write the shellcode
# from the very beginning of root. Some smart probing is necessary.
# FIXME: I'm not smart enough to do a smart probing.
for offset in range(0, stack_addrs[root] - len(shellcode), word_size):
sc_data = self.crash.state.memory.load(root + offset, len(shellcode))
sc_constraint = sc_data == shellcode
if self.crash.state.solver.satisfiable(extra_constraints=(sc_constraint,)):
break
else:
l.debug("Cannot write shellcode in region %#x(%#x bytes). Probe the next region.",
root, stack_addrs[root]
)
continue
l.debug("We may write shellcode on the stack at root={:x} offset={:x} loc={:x}".format(root, offset, root + offset))
if use_nopsled:
nopsled_size, nopsled_chunk = self._determine_nopsled_length(stack_addrs, root, offset, shellcode)
else:
nopsled_size = 0
nopsled_chunk = None
# try the addresses in a spiral pattern
addrs = list(range(nopsled_size + 1))
cur = len(addrs) // 2
for i in range(len(addrs)):
if i % 2 == 0:
cur += i
else:
cur -= i
addr = root + offset + addrs[cur]
if addr % self.crash.state.arch.instruction_alignment != 0:
continue
if nopsled_size > 0:
# update sc_constraint
nopsled_size = root + stack_addrs[root] - len(shellcode) - addr
works, sc_constraint = self._attempt_write_nopsled(self.crash.state, shellcode, root + offset,
nopsled_size, nopsled_chunk)
if not works:
continue
adjusted_addr = addr + sp_difference
exp = self._attempt_jump([sc_constraint], adjusted_addr, bypasses_aslr=False)
if exp is not None:
l.info("Got Exploit!")
return exp
# try to read shellcode into memory into one of the aforementioned addresses
l.debug("try: read shellcode into global data")
try:
shc_addr, shc_constraint = self._read_in_global_data(shellcode)
except CannotExploit as e:
raise CannotExploit("[%s] cannot call read (all other call-shellcodes failed)" % self.name) from e
exp = self._attempt_jump([shc_constraint], shc_addr)
if exp is not None:
return exp
raise CannotExploit("[%s] EVERYTHING FAILED" % self.name)
def _find_unconstrained_memory_buffers(self, addr, size):
"""
Determine if the memory buffer has enough freedom, i.e., is "unconstrained enough", to store shellcode in the
future.
:param int addr: The beginning address of the buffer.
:param int size: Maximum size of the buffer.
:return: A dict with (root, length) as k-v pairs where each element represents a buffer if we
believe the buffer starting from `root` with `length` bytes is "unconstrained enough". If no
such buffer can be found, an empty list is returned.
:rtype: list[tuple]
"""
buffer_chunks = { }
def _record_buffer(root, new_addr):
if root is None:
root = new_addr
buffer_chunks[root] = 1
else:
buffer_chunks[root] += 1
return root
root = None
for subaddr in range(addr, addr + size):
val = self.crash.state.memory.load(subaddr, 1)
# TODO: This sucks. do a real approximation with something like DVSA.
if any('aeg_stdin' in name for name in val.variables):
if not any(c.op == '__eq__' for c in self.crash.state.solver.constraints if not
c.variables - val.variables):
# this is the best case: this byte seems entirely unconstrained
root = _record_buffer(root, subaddr)
continue
elif not any(c.args[0] is val for c in self.crash.state.solver.constraints if c.op == '__eq__'):
# this is a looser constraint: there does not exist any constraint that's like the following:
# val == N
root = _record_buffer(root, subaddr)
continue
# it is unlikely that the current byte can be part of the shellcode. reset root
root = None
return buffer_chunks
def _determine_nopsled_length(self, stack_addrs, root, offset, shellcode):
min_nopsled_size = 0
max_nopsled_size = stack_addrs[root] - offset - len(shellcode)
nopsled_chunks = NopSleds.get_nopsleds(self.crash.state.arch)
assert nopsled_chunks
nopsled_chunk = nopsled_chunks[0] # TODO: use more than one nopsleds
while min_nopsled_size < max_nopsled_size:
attempt = (min_nopsled_size + max_nopsled_size + 1) // 2
works, sc_constraint = self._attempt_write_nopsled(self.crash.state, shellcode, root + offset, attempt,
nopsled_chunk)
if not works:
# we are trying to write too many. Write less!
max_nopsled_size = attempt - 1
else:
# try to write more?
min_nopsled_size = attempt
return min_nopsled_size, nopsled_chunk
def _attempt_jump(self, constraints, addr, bypasses_nx=False, bypasses_aslr=True):
all_constraints = list(constraints) + [self.crash.state.regs.ip == addr]
if self.crash.state.solver.satisfiable(extra_constraints=all_constraints):
self.crash.state.solver.add(*all_constraints)
return Exploit(self.crash, bypasses_aslr=bypasses_aslr, bypasses_nx=bypasses_nx, target_instruction_pointer=addr)
return None
@staticmethod
def _attempt_write_nopsled(state, shellcode, start, nopsled_size, nopsled_chunk):
nopsled_count = nopsled_size // len(nopsled_chunk)
rounded_size = nopsled_count * len(nopsled_chunk)
sc_data = state.memory.load(start, len(shellcode) + rounded_size)
sc_constraint = sc_data == nopsled_chunk * nopsled_count + shellcode
return state.solver.satisfiable(extra_constraints=(sc_constraint,)), sc_constraint
| bsd-2-clause | -7,863,996,589,730,166,000 | 47.101852 | 132 | 0.570452 | false |
iafan/zing | pootle/apps/pootle_project/views.py | 1 | 6882 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
# Copyright (C) Zing contributors.
#
# This file is a part of the Zing project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from django.http import Http404
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.utils.functional import cached_property
from django.utils.lru_cache import lru_cache
from pootle.core.browser import ItemTypes
from pootle.core.decorators import get_path_obj, permission_required
from pootle.core.helpers import get_sidebar_announcements_context
from pootle.core.views import (PootleBrowseView, PootleExportView,
PootleTranslateView)
from pootle_app.models import Directory
from pootle_app.views.admin.permissions import admin_permissions
from pootle_store.models import Store
from .models import Project, ProjectResource, ProjectSet
class ProjectMixin(object):
model = Project
browse_url_path = "pootle-project-browse"
export_url_path = "pootle-project-export"
translate_url_path = "pootle-project-translate"
template_extends = 'projects/base.html'
@property
def ctx_path(self):
return "/projects/%s/" % self.project.code
@property
def permission_context(self):
return self.project.directory
@cached_property
def project(self):
project = get_object_or_404(
Project.objects.select_related("directory"),
code=self.kwargs["project_code"])
if project.disabled and not self.request.user.is_superuser:
raise Http404
return project
@property
def url_kwargs(self):
return {
"project_code": self.project.code,
"dir_path": self.kwargs["dir_path"],
"filename": self.kwargs["filename"]}
@lru_cache()
def get_object(self):
if not (self.kwargs["dir_path"] or self.kwargs["filename"]):
return self.project
project_path = (
"/%s/%s%s"
% (self.project.code,
self.kwargs['dir_path'],
self.kwargs['filename']))
regex = r"^/[^/]*%s$" % project_path
if not self.kwargs["filename"]:
dirs = Directory.objects.live()
if self.kwargs['dir_path'].count("/"):
tp_prefix = "parent__" * self.kwargs['dir_path'].count("/")
dirs = dirs.select_related(
"%stranslationproject" % tp_prefix,
"%stranslationproject__language" % tp_prefix)
resources = dirs.filter(
pootle_path__endswith=project_path,
pootle_path__regex=regex,
)
else:
resources = (
Store.objects.live()
.select_related("translation_project__language")
.filter(translation_project__project=self.project)
.filter(pootle_path__endswith=project_path)
.filter(pootle_path__regex=regex))
if resources:
return ProjectResource(
resources,
("/projects/%(project_code)s/%(dir_path)s%(filename)s"
% self.kwargs))
raise Http404
@property
def resource_path(self):
return "%(dir_path)s%(filename)s" % self.kwargs
class ProjectBrowseView(ProjectMixin, PootleBrowseView):
@property
def stats(self):
return self.object.get_stats_for_user(self.request.user)
@cached_property
def items(self):
return self.object.get_children_for_user(self.request.user)
@property
def pootle_path(self):
return self.object.pootle_path
@property
def permission_context(self):
return self.project.directory
@cached_property
def sidebar_announcements(self):
return get_sidebar_announcements_context(
self.request,
(self.project, ))
@property
def url_kwargs(self):
return self.kwargs
def get_item_type(self, path_obj):
return ItemTypes.LANGUAGE
def get_item_title(self, path_obj):
if self.kwargs['dir_path'] or self.kwargs['filename']:
return path_obj.translation_project.language.name
return path_obj.language.name
class ProjectTranslateView(ProjectMixin, PootleTranslateView):
required_permission = "administrate"
@property
def pootle_path(self):
return self.object.pootle_path
class ProjectExportView(ProjectMixin, PootleExportView):
required_permission = 'administrate'
source_language = "en"
@get_path_obj
@permission_required('administrate')
def project_admin_permissions(request, project):
ctx = {
'page': 'admin-permissions',
'browse_url': reverse('pootle-project-browse', kwargs={
'project_code': project.code,
'dir_path': '',
'filename': '',
}),
'translate_url': reverse('pootle-project-translate', kwargs={
'project_code': project.code,
'dir_path': '',
'filename': '',
}),
'project': project,
'directory': project.directory,
}
return admin_permissions(request, project.directory,
'projects/admin/permissions.html', ctx)
class ProjectsMixin(object):
template_extends = 'projects/all/base.html'
browse_url_path = "pootle-projects-browse"
export_url_path = "pootle-projects-export"
translate_url_path = "pootle-projects-translate"
@lru_cache()
def get_object(self):
user_projects = (
Project.objects.for_user(self.request.user)
.select_related('directory')
)
return ProjectSet(user_projects)
@property
def permission_context(self):
return self.get_object().directory
@property
def has_admin_access(self):
return self.request.user.is_superuser
@property
def url_kwargs(self):
return {}
class ProjectsBrowseView(ProjectsMixin, PootleBrowseView):
@property
def sidebar_announcements(self):
return {}, None
def get(self, *args, **kwargs):
response = super(ProjectsBrowseView, self).get(*args, **kwargs)
response.set_cookie('pootle-language', "projects")
return response
def get_item_type(self, path_obj):
return ItemTypes.PROJECT
def get_item_title(self, path_obj):
return path_obj.fullname
class ProjectsTranslateView(ProjectsMixin, PootleTranslateView):
required_permission = "administrate"
class ProjectsExportView(ProjectsMixin, PootleExportView):
required_permission = 'administrate'
source_language = "en"
| gpl-3.0 | -8,620,018,919,134,642,000 | 29.451327 | 79 | 0.622929 | false |
uq-eresearch/archaeology-reference-collections | apps/botanycollection/migrations/0002_auto__chg_field_woodfeatures_rays_structure.py | 1 | 17779 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'WoodFeatures.rays_structure'
db.alter_column(u'botanycollection_woodfeatures', 'rays_structure', self.gf('django.db.models.fields.CharField')(max_length=100))
def backwards(self, orm):
# Changing field 'WoodFeatures.rays_structure'
db.alter_column(u'botanycollection_woodfeatures', 'rays_structure', self.gf('django.db.models.fields.CharField')(max_length=50))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'botanycollection.accession': {
'Meta': {'object_name': 'Accession'},
'accession_notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'altitude': ('django.db.models.fields.CharField', [], {'max_length': '14', 'blank': 'True'}),
'biological_synonym': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'collection_date': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'collector': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'collector_serial_no': ('django.db.models.fields.CharField', [], {'max_length': '22', 'blank': 'True'}),
'common_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'contributor': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '27', 'blank': 'True'}),
'cultivar': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'date_contributed': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'detdate': ('django.db.models.fields.CharField', [], {'max_length': '9', 'blank': 'True'}),
'detna': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'family': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_level_flag': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'lat_long': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'location_notes': ('django.db.models.fields.CharField', [], {'max_length': '162', 'blank': 'True'}),
'material': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'preservation_state': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'related_accession': ('django.db.models.fields.CharField', [], {'max_length': '19', 'blank': 'True'}),
'site_name': ('django.db.models.fields.CharField', [], {'max_length': '214', 'blank': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '67', 'blank': 'True'}),
'source_number': ('django.db.models.fields.CharField', [], {'max_length': '26', 'blank': 'True'}),
'species': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'species_author': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'sspau': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'sspna': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'subfam': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'tribe': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'uq_accession': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '14', 'blank': 'True'}),
'varau': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'varna': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'weblinks': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'botanycollection.accessionphoto': {
'Meta': {'object_name': 'AccessionPhoto'},
'accession': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['botanycollection.Accession']"}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '100', 'blank': 'True'}),
'filesize': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'height': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255'}),
'md5sum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'mime_type': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'original_filedate': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'original_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'upload_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'uploaded_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['auth.User']"}),
'width': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'botanycollection.seedfeatures': {
'Meta': {'object_name': 'SeedFeatures'},
'accession': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['botanycollection.Accession']", 'unique': 'True'}),
'anatomy_longitudinal_sections': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'anatomy_transverse_section': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'embryo_endosperm': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'hilum_details': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'other_identification_information': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'references_and_links': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'seed_type': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'shape_2d': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'shape_3d': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'shape_detail': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'size': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'special_features': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'surface_inner_texture': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'surface_outer_texture': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'testa_endocarp_thickness': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'})
},
u'botanycollection.woodfeatures': {
'Meta': {'object_name': 'WoodFeatures'},
'accession': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['botanycollection.Accession']", 'unique': 'True'}),
'aggregate_rays': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'australia': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'axial_canals': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'axial_parenchyma_arrangment': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'axial_parenchyma_bands': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'axial_parenchyma_present': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'cambial_variants': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'common_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'druses': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'family': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'fibre_helical_thickenings': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'fibre_pits': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'fibres_wall_thickeness': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'fusiform_parenchyma_cells': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'helical_thickenings': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'included_phloem': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'indigenous_name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'intervessels_pits_arrangment': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'intervessels_pits_size': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'intervessels_pits_specific_shapes': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'lactifers_tanniferous_tubes': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_caledonia': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'parenchyma_like_fibres_present': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'perforation_plates_types': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'prismatic_crystal': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'radial_secretory_canals': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'radial_tracheids_for_gymnosperms': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'rays': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'rays_cellular_composition': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'rays_height': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'rays_sheat_cells': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'rays_structure': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'rays_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'rays_width': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'reference_specimens': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'silica': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'solitary_vessels_with_angular_outline': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'species': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'spetate_fibres_present': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'storied_structure': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'tile_cells': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'tracheid_diameter': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'turkey': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'vascularvasicentric_tracheids_present': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'vessels': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'vessels_arrangment': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'vessels_deposits': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'vessels_grouping': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'vessels_porosity': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'vessels_rays_pitting': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'vessels_tyloses': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'walls': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['botanycollection'] | bsd-3-clause | -265,070,471,046,577,630 | 91.124352 | 195 | 0.561505 | false |
MobileWebApps/backend-python-rest-gae | lib/markdown/extensions/wikilinks.py | 1 | 5322 | '''
WikiLinks Extension for Python-Markdown
======================================
Converts [[WikiLinks]] to relative links. Requires Python-Markdown 2.0+
Basic usage:
>>> import markdown
>>> text = "Some text with a [[WikiLink]]."
>>> html = markdown.markdown(text, ['wikilinks'])
>>> print html
<p>Some text with a <a class="wikilink" href="/WikiLink/">WikiLink</a>.</p>
Whitespace behavior:
>>> print markdown.markdown('[[ foo bar_baz ]]', ['wikilinks'])
<p><a class="wikilink" href="/foo_bar_baz/">foo bar_baz</a></p>
>>> print markdown.markdown('foo [[ ]] bar', ['wikilinks'])
<p>foo bar</p>
To define custom settings the simple way:
>>> print markdown.markdown(text,
... ['wikilinks(base_url=/wiki/,end_url=.html,html_class=foo)']
... )
<p>Some text with a <a class="foo" href="/wiki/WikiLink.html">WikiLink</a>.</p>
Custom settings the complex way:
>>> md = markdown.Markdown(
... extensions = ['wikilinks'],
... extension_configs = {'wikilinks': [
... ('base_url', app_scaffolding),
... ('end_url', '.html'),
... ('html_class', '') ]},
... safe_mode = True)
>>> print md.convert(text)
<p>Some text with a <a href="http://example.com/WikiLink.html">WikiLink</a>.</p>
Use MetaData with mdx_meta.py (Note the blank html_class in MetaData):
>>> text = """wiki_base_url: http://example.com/
... wiki_end_url: .html
... wiki_html_class:
...
... Some text with a [[WikiLink]]."""
>>> md = markdown.Markdown(extensions=['meta', 'wikilinks'])
>>> print md.convert(text)
<p>Some text with a <a href="http://example.com/WikiLink.html">WikiLink</a>.</p>
MetaData should not carry over to next document:
>>> print md.convert("No [[MetaData]] here.")
<p>No <a class="wikilink" href="/MetaData/">MetaData</a> here.</p>
Define a custom URL builder:
>>> def my_url_builder(label, base, end):
... return '/bar/'
>>> md = markdown.Markdown(extensions=['wikilinks'],
... extension_configs={'wikilinks' : [('build_url', my_url_builder)]})
>>> print md.convert('[[foo]]')
<p><a class="wikilink" href="/bar/">foo</a></p>
From the command line:
python markdown.py -x wikilinks(base_url=http://example.com/,end_url=.html,html_class=foo) src.txt
By [Waylan Limberg](http://achinghead.com/).
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
Dependencies:
* [Python 2.3+](http://python.org)
* [Markdown 2.0+](http://packages.python.org/Markdown/)
'''
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..inlinepatterns import Pattern
from ..util import etree
import re
def build_url(label, base, end):
""" Build a url from the label, a base, and an end. """
clean_label = re.sub(r'([ ]+_)|(_[ ]+)|([ ]+)', '_', label)
return '%s%s%s'% (base, clean_label, end)
class WikiLinkExtension(Extension):
def __init__(self, configs):
# set extension defaults
self.config = {
'base_url' : ['/', 'String to append to beginning or URL.'],
'end_url' : ['/', 'String to append to end of URL.'],
'html_class' : ['wikilink', 'CSS hook. Leave blank for none.'],
'build_url' : [build_url, 'Callable formats URL from label.'],
}
configs = dict(configs) or {}
# Override defaults with user settings
for key, value in configs.items():
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
self.md = md
# append to end of inline patterns
WIKILINK_RE = r'\[\[([\w0-9_ -]+)\]\]'
wikilinkPattern = WikiLinks(WIKILINK_RE, self.getConfigs())
wikilinkPattern.md = md
md.inlinePatterns.add('wikilink', wikilinkPattern, "<not_strong")
class WikiLinks(Pattern):
def __init__(self, pattern, config):
super(WikiLinks, self).__init__(pattern)
self.config = config
def handleMatch(self, m):
if m.group(2).strip():
base_url, end_url, html_class = self._getMeta()
label = m.group(2).strip()
url = self.config['build_url'](label, base_url, end_url)
a = etree.Element('a')
a.text = label
a.set('href', url)
if html_class:
a.set('class', html_class)
else:
a = ''
return a
def _getMeta(self):
""" Return meta data or config data. """
base_url = self.config['base_url']
end_url = self.config['end_url']
html_class = self.config['html_class']
if hasattr(self.md, 'Meta'):
if 'wiki_base_url' in self.md.Meta:
base_url = self.md.Meta['wiki_base_url'][0]
if 'wiki_end_url' in self.md.Meta:
end_url = self.md.Meta['wiki_end_url'][0]
if 'wiki_html_class' in self.md.Meta:
html_class = self.md.Meta['wiki_html_class'][0]
return base_url, end_url, html_class
def makeExtension(configs=None) :
return WikiLinkExtension(configs=configs)
| bsd-3-clause | -3,976,461,663,669,070,300 | 34.245033 | 102 | 0.559376 | false |
johnkerl/scripts-math | pythonlib/bin/qr.py | 1 | 1720 | #!/usr/bin/python -Wall
# ================================================================
# Copyright (c) John Kerl 2007
# [email protected]
# ================================================================
from __future__ import division # 1/2 = 0.5, not 0.
from sackmat_m import *
# ----------------------------------------------------------------
def test_submx_premul():
A = sackmat([
[1,2,3],
[4,5,6],
[7,8,9]])
Q = sackmat([
[0,1],
[1,0]])
[sr, ar] = [1, 1]
Q.printp("Q")
A.printp("Old A")
A.premultiply_by_submatrix(Q, sr, ar)
A.printp("New A")
# ----------------------------------------------------------------
def test_tip():
A = sackmat([
[1,2,3],
[4,5,6],
[7,8,9]])
A.printp("A")
A.transpose_in_place()
A.printp("A^t")
# ----------------------------------------------------------------
def test_mk_seq():
A = make_seq_matrix(1,1); A.printp("seq")
A = make_seq_matrix(2,2); A.printp("seq")
A = make_seq_matrix(3,3); A.printp("seq")
A = make_seq_matrix(4,4); A.printp("seq")
A = make_nseq_matrix(1,1); A.printp("nseq")
A = make_nseq_matrix(2,2); A.printp("nseq")
A = make_nseq_matrix(3,4); A.printp("nseq")
A = make_nseq_matrix(4,5); A.printp("nseq")
# ----------------------------------------------------------------
def test_qr():
#A = sackmat([[1,2],[3,4]])
#A = sackmat([
# [3,0,0],
# [4,1,0],
# [0,0,1]])
A = sackmat([
[-1,2,3],
[4,-5,6],
[7,8,-9]])
[nr,nc] = A.dims()
Q = make_zero_matrix(nr,nr)
R = make_zero_matrix(nr,nc)
A.QR_decomp(Q,R)
QR = Q*R
A.printp("A")
Q.printp("Q")
R.printp("R")
QR.printp("QR")
# ----------------------------------------------------------------
#test_submx_premul()
#test_tip()
#test_mk_seq()
test_qr()
| bsd-2-clause | -7,716,046,529,800,229,000 | 21.933333 | 66 | 0.402907 | false |
nedbat/coveragepy | tests/coveragetest.py | 2 | 18042 | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Base test case class for coverage.py testing."""
import contextlib
import datetime
import difflib
import glob
import io
import os
import os.path
import random
import re
import shlex
import sys
import pytest
import coverage
from coverage import env
from coverage.cmdline import CoverageScript
from coverage.misc import import_local_file
from tests.helpers import arcs_to_arcz_repr, arcz_to_arcs, assert_count_equal
from tests.helpers import nice_file, run_command
from tests.mixins import PytestBase, StdStreamCapturingMixin, SysPathModulesMixin, TempDirMixin
# Status returns for the command line.
OK, ERR = 0, 1
# The coverage/tests directory, for all sorts of finding test helping things.
TESTS_DIR = os.path.dirname(__file__)
class CoverageTest(
StdStreamCapturingMixin,
SysPathModulesMixin,
TempDirMixin,
PytestBase,
):
"""A base class for coverage.py test cases."""
# Standard unittest setting: show me diffs even if they are very long.
maxDiff = None
# Tell newer unittest implementations to print long helpful messages.
longMessage = True
# Let stderr go to stderr, pytest will capture it for us.
show_stderr = True
# Temp dirs go to $TMPDIR/coverage_test/*
temp_dir_prefix = "coverage_test/"
if os.getenv('COVERAGE_ENV_ID'): # pragma: debugging
temp_dir_prefix += "{}/".format(os.getenv('COVERAGE_ENV_ID'))
# Keep the temp directories if the env says to.
# $set_env.py: COVERAGE_KEEP_TMP - Keep the temp directories made by tests.
keep_temp_dir = bool(int(os.getenv("COVERAGE_KEEP_TMP", "0")))
def setup_test(self):
super().setup_test()
# Attributes for getting info about what happened.
self.last_command_status = None
self.last_command_output = None
self.last_module_name = None
def start_import_stop(self, cov, modname, modfile=None):
"""Start coverage, import a file, then stop coverage.
`cov` is started and stopped, with an `import_local_file` of
`modname` in the middle. `modfile` is the file to import as `modname`
if it isn't in the current directory.
The imported module is returned.
"""
cov.start()
try: # pragma: nested
# Import the Python file, executing it.
mod = import_local_file(modname, modfile)
finally: # pragma: nested
# Stop coverage.py.
cov.stop()
return mod
def get_module_name(self):
"""Return a random module name to use for this test run."""
self.last_module_name = 'coverage_test_' + str(random.random())[2:]
return self.last_module_name
def _check_arcs(self, a1, a2, arc_type):
"""Check that the arc lists `a1` and `a2` are equal.
If they are equal, return empty string. If they are unequal, return
a string explaining what is different.
"""
# Make them into multi-line strings so we can see what's going wrong.
s1 = arcs_to_arcz_repr(a1)
s2 = arcs_to_arcz_repr(a2)
if s1 != s2:
lines1 = s1.splitlines(True)
lines2 = s2.splitlines(True)
diff = "".join(difflib.ndiff(lines1, lines2))
return "\n" + arc_type + " arcs differ: minus is expected, plus is actual\n" + diff
else:
return ""
def check_coverage(
self, text, lines=None, missing="", report="",
excludes=None, partials="",
arcz=None, arcz_missing=None, arcz_unpredicted=None,
arcs=None, arcs_missing=None, arcs_unpredicted=None,
):
"""Check the coverage measurement of `text`.
The source `text` is run and measured. `lines` are the line numbers
that are executable, or a list of possible line numbers, any of which
could match. `missing` are the lines not executed, `excludes` are
regexes to match against for excluding lines, and `report` is the text
of the measurement report.
For arc measurement, `arcz` is a string that can be decoded into arcs
in the code (see `arcz_to_arcs` for the encoding scheme).
`arcz_missing` are the arcs that are not executed, and
`arcz_unpredicted` are the arcs executed in the code, but not deducible
from the code. These last two default to "", meaning we explicitly
check that there are no missing or unpredicted arcs.
Returns the Coverage object, in case you want to poke at it some more.
"""
__tracebackhide__ = True # pytest, please don't show me this function.
# We write the code into a file so that we can import it.
# Coverage.py wants to deal with things as modules with file names.
modname = self.get_module_name()
self.make_file(modname + ".py", text)
if arcs is None and arcz is not None:
arcs = arcz_to_arcs(arcz)
if arcs_missing is None and arcz_missing is not None:
arcs_missing = arcz_to_arcs(arcz_missing)
if arcs_unpredicted is None and arcz_unpredicted is not None:
arcs_unpredicted = arcz_to_arcs(arcz_unpredicted)
# Start up coverage.py.
cov = coverage.Coverage(branch=True)
cov.erase()
for exc in excludes or []:
cov.exclude(exc)
for par in partials or []:
cov.exclude(par, which='partial')
mod = self.start_import_stop(cov, modname)
# Clean up our side effects
del sys.modules[modname]
# Get the analysis results, and check that they are right.
analysis = cov._analyze(mod)
statements = sorted(analysis.statements)
if lines is not None:
if isinstance(lines[0], int):
# lines is just a list of numbers, it must match the statements
# found in the code.
assert statements == lines, f"{statements!r} != {lines!r}"
else:
# lines is a list of possible line number lists, one of them
# must match.
for line_list in lines:
if statements == line_list:
break
else:
assert False, f"None of the lines choices matched {statements!r}"
missing_formatted = analysis.missing_formatted()
if isinstance(missing, str):
msg = f"{missing_formatted!r} != {missing!r}"
assert missing_formatted == missing, msg
else:
for missing_list in missing:
if missing_formatted == missing_list:
break
else:
assert False, f"None of the missing choices matched {missing_formatted!r}"
if arcs is not None:
# print("Possible arcs:")
# print(" expected:", arcs)
# print(" actual:", analysis.arc_possibilities())
# print("Executed:")
# print(" actual:", sorted(set(analysis.arcs_executed())))
# TODO: this would be nicer with pytest-check, once we can run that.
msg = (
self._check_arcs(arcs, analysis.arc_possibilities(), "Possible") +
self._check_arcs(arcs_missing, analysis.arcs_missing(), "Missing") +
self._check_arcs(arcs_unpredicted, analysis.arcs_unpredicted(), "Unpredicted")
)
if msg:
assert False, msg
if report:
frep = io.StringIO()
cov.report(mod, file=frep, show_missing=True)
rep = " ".join(frep.getvalue().split("\n")[2].split()[1:])
assert report == rep, f"{report!r} != {rep!r}"
return cov
@contextlib.contextmanager
def assert_warnings(self, cov, warnings, not_warnings=()):
"""A context manager to check that particular warnings happened in `cov`.
`cov` is a Coverage instance. `warnings` is a list of regexes. Every
regex must match a warning that was issued by `cov`. It is OK for
extra warnings to be issued by `cov` that are not matched by any regex.
Warnings that are disabled are still considered issued by this function.
`not_warnings` is a list of regexes that must not appear in the
warnings. This is only checked if there are some positive warnings to
test for in `warnings`.
If `warnings` is empty, then `cov` is not allowed to issue any
warnings.
"""
saved_warnings = []
def capture_warning(msg, slug=None, once=False): # pylint: disable=unused-argument
"""A fake implementation of Coverage._warn, to capture warnings."""
# NOTE: we don't implement `once`.
if slug:
msg = f"{msg} ({slug})"
saved_warnings.append(msg)
original_warn = cov._warn
cov._warn = capture_warning
try:
yield
except: # pylint: disable=try-except-raise
raise
else:
if warnings:
for warning_regex in warnings:
for saved in saved_warnings:
if re.search(warning_regex, saved):
break
else:
msg = f"Didn't find warning {warning_regex!r} in {saved_warnings!r}"
assert False, msg
for warning_regex in not_warnings:
for saved in saved_warnings:
if re.search(warning_regex, saved):
msg = f"Found warning {warning_regex!r} in {saved_warnings!r}"
assert False, msg
else:
# No warnings expected. Raise if any warnings happened.
if saved_warnings:
assert False, f"Unexpected warnings: {saved_warnings!r}"
finally:
cov._warn = original_warn
def assert_same_files(self, flist1, flist2):
"""Assert that `flist1` and `flist2` are the same set of file names."""
flist1_nice = [nice_file(f) for f in flist1]
flist2_nice = [nice_file(f) for f in flist2]
assert_count_equal(flist1_nice, flist2_nice)
def assert_exists(self, fname):
"""Assert that `fname` is a file that exists."""
msg = "File %r should exist" % fname
assert os.path.exists(fname), msg
def assert_doesnt_exist(self, fname):
"""Assert that `fname` is a file that doesn't exist."""
msg = "File %r shouldn't exist" % fname
assert not os.path.exists(fname), msg
def assert_file_count(self, pattern, count):
"""Assert that there are `count` files matching `pattern`."""
files = sorted(glob.glob(pattern))
msg = "There should be {} files matching {!r}, but there are these: {}"
msg = msg.format(count, pattern, files)
assert len(files) == count, msg
def assert_recent_datetime(self, dt, seconds=10, msg=None):
"""Assert that `dt` marks a time at most `seconds` seconds ago."""
age = datetime.datetime.now() - dt
assert age.total_seconds() >= 0, msg
assert age.total_seconds() <= seconds, msg
def command_line(self, args, ret=OK):
"""Run `args` through the command line.
Use this when you want to run the full coverage machinery, but in the
current process. Exceptions may be thrown from deep in the code.
Asserts that `ret` is returned by `CoverageScript.command_line`.
Compare with `run_command`.
Returns None.
"""
ret_actual = command_line(args)
assert ret_actual == ret, f"{ret_actual!r} != {ret!r}"
# Some distros rename the coverage command, and need a way to indicate
# their new command name to the tests. This is here for them to override,
# for example:
# https://salsa.debian.org/debian/pkg-python-coverage/-/blob/master/debian/patches/02.rename-public-programs.patch
coverage_command = "coverage"
def run_command(self, cmd):
"""Run the command-line `cmd` in a sub-process.
`cmd` is the command line to invoke in a sub-process. Returns the
combined content of `stdout` and `stderr` output streams from the
sub-process.
See `run_command_status` for complete semantics.
Use this when you need to test the process behavior of coverage.
Compare with `command_line`.
"""
_, output = self.run_command_status(cmd)
return output
def run_command_status(self, cmd):
"""Run the command-line `cmd` in a sub-process, and print its output.
Use this when you need to test the process behavior of coverage.
Compare with `command_line`.
Handles the following command names specially:
* "python" is replaced with the command name of the current
Python interpreter.
* "coverage" is replaced with the command name for the main
coverage.py program.
Returns a pair: the process' exit status and its stdout/stderr text,
which are also stored as `self.last_command_status` and
`self.last_command_output`.
"""
# Make sure "python" and "coverage" mean specifically what we want
# them to mean.
split_commandline = cmd.split()
command_name = split_commandline[0]
command_args = split_commandline[1:]
if command_name == "python":
# Running a Python interpreter in a sub-processes can be tricky.
# Use the real name of our own executable. So "python foo.py" might
# get executed as "python3.3 foo.py". This is important because
# Python 3.x doesn't install as "python", so you might get a Python
# 2 executable instead if you don't use the executable's basename.
command_words = [os.path.basename(sys.executable)]
elif command_name == "coverage":
if env.JYTHON: # pragma: only jython
# Jython can't do reporting, so let's skip the test now.
if command_args and command_args[0] in ('report', 'html', 'xml', 'annotate'):
pytest.skip("Can't run reporting commands in Jython")
# Jython can't run "coverage" as a command because the shebang
# refers to another shebang'd Python script. So run them as
# modules.
command_words = "jython -m coverage".split()
else:
# The invocation requests the coverage.py program. Substitute the
# actual coverage.py main command name.
command_words = [self.coverage_command]
else:
command_words = [command_name]
cmd = " ".join([shlex.quote(w) for w in command_words] + command_args)
# Add our test modules directory to PYTHONPATH. I'm sure there's too
# much path munging here, but...
pythonpath_name = "PYTHONPATH"
if env.JYTHON:
pythonpath_name = "JYTHONPATH" # pragma: only jython
testmods = nice_file(self.working_root(), "tests/modules")
zipfile = nice_file(self.working_root(), "tests/zipmods.zip")
pypath = os.getenv(pythonpath_name, '')
if pypath:
pypath += os.pathsep
pypath += testmods + os.pathsep + zipfile
self.set_environ(pythonpath_name, pypath)
self.last_command_status, self.last_command_output = run_command(cmd)
print(self.last_command_output)
return self.last_command_status, self.last_command_output
def working_root(self):
"""Where is the root of the coverage.py working tree?"""
return os.path.dirname(nice_file(coverage.__file__, ".."))
def report_from_command(self, cmd):
"""Return the report from the `cmd`, with some convenience added."""
report = self.run_command(cmd).replace('\\', '/')
assert "error" not in report.lower()
return report
def report_lines(self, report):
"""Return the lines of the report, as a list."""
lines = report.split('\n')
assert lines[-1] == ""
return lines[:-1]
def line_count(self, report):
"""How many lines are in `report`?"""
return len(self.report_lines(report))
def squeezed_lines(self, report):
"""Return a list of the lines in report, with the spaces squeezed."""
lines = self.report_lines(report)
return [re.sub(r"\s+", " ", l.strip()) for l in lines]
def last_line_squeezed(self, report):
"""Return the last line of `report` with the spaces squeezed down."""
return self.squeezed_lines(report)[-1]
def get_measured_filenames(self, coverage_data):
"""Get paths to measured files.
Returns a dict of {filename: absolute path to file}
for given CoverageData.
"""
return {os.path.basename(filename): filename
for filename in coverage_data.measured_files()}
class UsingModulesMixin:
"""A mixin for importing modules from tests/modules and tests/moremodules."""
def setup_test(self):
super().setup_test()
# Parent class saves and restores sys.path, we can just modify it.
sys.path.append(nice_file(TESTS_DIR, "modules"))
sys.path.append(nice_file(TESTS_DIR, "moremodules"))
def command_line(args):
"""Run `args` through the CoverageScript command line.
Returns the return code from CoverageScript.command_line.
"""
script = CoverageScript()
ret = script.command_line(shlex.split(args))
return ret
| apache-2.0 | 957,895,446,708,358,500 | 37.883621 | 118 | 0.599767 | false |
jpzk/evopy | evopy/external/playdoh/asyncjobhandler.py | 2 | 15068 | """
Asynchronous Job Manager
"""
from cache import *
from gputools import *
from pool import *
from rpc import *
from resources import *
from numpy import sum
import cPickle
import os
import os.path
import time
import hashlib
import random
import traceback
__all__ = ['Job', 'JobRun', 'AsyncJobHandler', 'submit_jobs']
class Job(object):
jobdir = JOBDIR
def __init__(self, function, *args, **kwds):
"""
Constructor.
*Arguments*
`function`
The function to evaluate, it is a native Python function. Function
serialization should take care of ensuring that this function is
correctly defined in the namespace.
`*args, **kwds`
The arguments of the function.
"""
self.function = function
self.args = args
self.kwds = kwds
self.result = None
self.status = 'queued'
def compute_id(self):
"""
Computes a unique identifier of the job.
"""
m = hashlib.sha1()
pid = os.getpid() # current process id
# t = int(time.time() * 10000) % 1000000 # time
s = str(self.function) # function name
s += cPickle.dumps(self.args, -1) # args dump
s += cPickle.dumps(self.kwds, -1) # args dump
m.update(str(random.random())) # random number
m.update(str(pid))
m
m.update(s)
hash = m.hexdigest()
self._id = hash
return self._id
def get_id(self):
if not hasattr(self, '_id'):
self.compute_id()
return self._id
id = property(get_id)
def get_filename(self):
return os.path.join(Job.jobdir, self.id + '.pkl')
filename = property(get_filename)
def evaluate(self):
"""
Evaluates the function on the given arguments.
"""
try:
self.status = 'processing'
self.result = self.function(*self.args, **self.kwds)
self.status = 'finished'
except Exception as inst:
# add the traceback to the exception
msg = traceback.format_exc()
inst.traceback = msg
log_warn("An exception has occurred in %s, print exc.traceback \
where exc is the Exception object returned by playdoh.map" %
self.function.__name__)
self.result = inst
self.status = 'crashed'
return self.result
def record(self):
"""
Records the job after evaluation on the disk.
"""
if not os.path.exists(self.jobdir):
log_debug("creating '%s' folder for code pickling" % self.jobdir)
os.mkdir(self.jobdir)
log_debug("writing '%s'" % self.filename)
# delete shared data before pickling
if 'shared_data' in self.kwds:
del self.kwds['shared_data']
file = open(self.filename, 'wb')
cPickle.dump(self, file, -1)
file.close()
@staticmethod
def load(id):
"""
Returns the Job object stored in the filesystem using its identifier.
"""
try:
filename = os.path.join(Job.jobdir, id + '.pkl')
log_debug("opening file '%s'" % filename)
file = open(filename, 'rb')
job = cPickle.load(file)
file.close()
# time.sleep(.005)
except IOError:
log_debug("file '%s' not found" % filename)
job = None
except EOFError:
log_debug("EOF error with '%s', trying again..." % filename)
time.sleep(.2)
file = open(filename, 'rb')
job = cPickle.load(file)
file.close()
return job
@staticmethod
def erase(id):
"""
Erases the Job object stored in the filesystem using its identifier.
"""
filename = os.path.join(Job.jobdir, id + '.pkl')
log_debug("erasing '%s'" % filename)
try:
os.remove(filename)
except:
log_warn("Unable to erase <%s>" % filename)
@staticmethod
def erase_all():
"""
Erases all Job objects stored in the filesystem.
"""
files = os.listdir(Job.jobdir)
log_debug("erasing all files in '%s'" % Job.jobdir)
[os.remove(os.path.join(Job.jobdir, filename)) for filename in files]
def eval_job(job, shared_data={}):
"""
Evaluates a job. Must be global to be passed to CustomPool.
Handles Exceptions.
"""
if len(shared_data) > 0:
job.kwds['shared_data'] = shared_data
result = job.evaluate()
job.record()
return result
class AsyncJobHandler(object):
"""
A Handler object handling asynchronous job management, on the server side.
"""
def __init__(self):
"""
max_cpu is the maximum number of CPUs dedicated to the cluster
idem for max_gpu
None = use all CPUs/GPUs available
"""
self.handlers = []
self.cpu = MAXCPU
self.gpu = 0
self.pool = None
self.cpool = None
self.jobs = {}
def add_jobs(self, jobs):
for job in jobs:
self.jobs[job.id] = job
return [job.id for job in jobs]
def initialize_cpool(self, type, units, do_redirect):
pool = self.pools[type]
# status = pool.get_status()
unitindices = pool.get_idle_units(units)
if len(unitindices) != units:
msg = "not enough %s(s) available, exiting now" % (type)
log_warn(msg)
raise Exception(msg)
log_debug("found %d %s(s) available: %s" % (units, type,
str(unitindices)))
self.cpool = CustomPool(unitindices, unittype=type,
do_redirect=do_redirect)
# links the global Pool object to the CustomPool object
self.cpool.pool = pool
def submit(self, jobs, type='CPU', units=None, shared_data={},
do_redirect=None):
"""
Submit jobs.
*Arguments*
`jobs`
A list of Job objects.
"""
job_ids = self.add_jobs(jobs)
# By default, use all resources assigned to the current client
# for this handler.
# If units is set, then use only this number of units
# if units is None:
# units = self.resources[type][self.client]
# find idle units
if units is None:
log_warn("units should not be None in submit")
if self.cpool is None:
self.initialize_cpool(type, units, do_redirect)
else:
self.cpool.set_units(units)
pool_ids = self.cpool.submit_tasks(eval_job, shared_data, jobs)
for i in xrange(len(jobs)):
id = job_ids[i]
self.jobs[id].pool_id = pool_ids[i]
return job_ids
def get_pool_ids(self, job_ids):
"""
Converts job ids (specific to AsyncJobHander) to pool ids
(specific to the CustomPool object)
"""
return [self.jobs[id].pool_id for id in job_ids]
def get_status(self, job_ids):
if job_ids is None:
statuss = None
raise Exception("The job identifiers must be specified")
else:
statuss = []
for id in job_ids:
job = Job.load(id)
if job is not None:
log_debug("job file '%s' found" % id)
status = job.status
elif id in self.jobs.keys():
log_debug("job file '%s' not found" % id)
status = self.jobs[id].status
else:
log_warn("job '%s' not found" % id)
status = None
statuss.append(status)
return statuss
def get_results(self, job_ids):
if job_ids is None:
results = None
raise Exception("Please specify job identifiers.")
else:
results = []
for id in job_ids:
job = Job.load(id)
if job is not None:
result = job.result
else:
# if job is None, it means that it probably has
# not finished yet
result = None
# if self.pool is not None:
log_debug("Tasks have not finished yet, waiting...")
self.cpool.join()
job = Job.load(id)
if job is not None:
result = job.result
results.append(result)
return results
def has_finished(self, job_ids):
if self.cpool is not None:
pool_ids = self.get_pool_ids(job_ids)
return self.cpool.has_finished(pool_ids)
else:
log_warn("The specified job identifiers haven't been found")
return None
def erase(self, job_ids):
log_debug("Erasing job results")
[Job.erase(id) for id in job_ids]
def close(self):
if hasattr(self, 'cpool'):
if self.cpool is not None:
self.cpool.close()
else:
log_warn("The pool object has already been closed")
def kill(self):
# TODO: jobids?
if self.cpool is not None:
self.cpool.kill()
else:
log_warn("The pool object has already been killed")
class JobRun(object):
"""
Contains information about a parallel map that has been launched
by the ``map_async`` function.
Methods:
``get_status()``
Returns the current status of the jobs.
``get_result(jobids=None)``
Returns the result. Blocks until the jobs have finished.
You can specify jobids to retrieve only some of the results,
in that case it must
be a list of job identifiers.
"""
def __init__(self, type, jobs, machines=[]):
self.type = type
self.jobs = jobs
self.machines = machines # list of Machine object
self._machines = [m.to_tuple() for m in self.machines]
self.local = None
self.jobids = None
def set_local(self, v):
self.local = v
def set_jobids(self, jobids):
self.jobids = jobids
def get_machines(self):
return self._machines
def get_machine_index(self, machine):
for i in xrange(len(self.machines)):
if (self.machines[i] == machine):
return i
def concatenate(self, lists):
lists2 = []
[lists2.extend(l) for l in lists]
return lists2
def get_status(self):
GC.set(self.get_machines(), handler_class=AsyncJobHandler)
disconnect = GC.connect()
status = GC.get_status(self.jobids)
if disconnect:
GC.disconnect()
return self.concatenate(status)
def get_results(self, ids=None):
if ids is None:
ids = self.jobids
GC.set(self.get_machines(), handler_class=AsyncJobHandler)
disconnect = GC.connect()
if not self.local:
log_info("Retrieving job results...")
results = GC.get_results(ids)
GC.erase(self.jobids)
if disconnect:
GC.disconnect()
# clients = RpcClients(self.get_machines(),
# handler_class=AsyncJobHandler)
# clients.connect()
# results = clients.get_results(self.jobids)
# clients.erase(self.jobids)
# clients.disconnect()
results = self.concatenate(results)
if self.local:
close_servers(self.get_machines())
return results
def get_result(self):
return self.get_results()
def __repr__(self):
nmachines = len(self.machines)
if nmachines > 1:
plural = 's'
else:
plural = ''
return "<Task: %d jobs on %d machine%s>" % (len(self.jobs),
nmachines, plural)
def create_jobs(fun, argss, kwdss):
"""
Create Job objects
"""
jobs = []
k = len(argss) # number of non-named arguments
keys = kwdss.keys() # keyword arguments
i = 0 # task index
while True:
try:
args = [argss[l][i] for l in xrange(k)]
kwds = dict([(key, kwdss[key][i]) for key in keys])
except:
break
jobs.append(Job(fun, *args, **kwds))
i += 1
return jobs
def split_jobs(jobs, machines, allocation):
"""
Splits jobs among workers
"""
total_units = allocation.total_units
njobs = len(jobs)
# charge[i] is the number of jobs on machine #i
i = 0 # worker index
charge = []
for m in machines:
nbr_units = allocation[m] # number of workers on this machine
charge.append(nbr_units * njobs / total_units)
i += 1
charge[-1] = njobs - sum(charge[:-1], dtype=int)
sjobs = []
i = 0 # worker index
total = 0 # total jobs
for m in machines:
k = charge[i]
sjobs.append(jobs[total:(total + k)])
total += k
i += 1
if total >= njobs:
break
return sjobs
def submit_jobs(fun,
allocation,
unit_type='CPU',
shared_data={},
local=None,
do_redirect=None,
argss=[],
kwdss={}):
"""
Submit map jobs. Use ``map_async`` instead.
"""
machines = allocation.machines
# creates Job objects
jobs = create_jobs(fun, argss, kwdss)
# creates a JobRun object
myjobs = JobRun(unit_type, jobs, machines)
# splits jobs
sjobs = split_jobs(jobs, machines, allocation)
units = [allocation[m] for m in myjobs.get_machines()]
# are jobs running locally?
if local is None and (len(machines) == 1) and (machines[0].ip == LOCAL_IP):
myjobs.set_local(True)
if local is not None:
myjobs.set_local(local)
GC.set(myjobs.get_machines(), handler_class=AsyncJobHandler)
disconnect = GC.connect()
# Submits jobs to the machines
# clients = RpcClients(myjobs.get_machines(), handler_class=AsyncJobHandler)
jobids = GC.submit(sjobs, type=unit_type, units=units,
shared_data=shared_data,
do_redirect=do_redirect)
if disconnect:
GC.disconnect()
# Records job ids
myjobs.set_jobids(jobids)
return myjobs
| gpl-3.0 | 8,393,490,049,809,186,000 | 28.138 | 79 | 0.522564 | false |
namcap/Digital-Image-Processing-in-Python | 1.DFT_and_DCT/Swap Phase.py | 1 | 1819 | #! /usr/bin/python3
if __name__=="__main__":
from PIL import Image
import numpy as np
from matplotlib import pyplot as plt
from scipy.fftpack import dct
from scipy.fftpack import idct
#
default_paths=["img1.jpg","img2.jpg"]
paths=[None]*2
images=[None]*2
DFT=[None]*2
iDFT=[None]*2
Magnitude=[None]*2
phase=[None]*2
for i in range(2):
while True:
paths[i]=input("Path to the image: (default to "+default_paths[i]+") ")
if paths[i] is "":
paths[i]=default_paths[i]
try:
images[i]=Image.open(paths[i])
break
except FileNotFoundError as e:
print(e)
print("Try again.")
#Image processing
#Get grayscale
images[i]=images[i].convert('L')
#Find DFT
DFT[i]=np.fft.fft2(images[i])
Magnitude[i]=abs(DFT[i])
phase[i]=DFT[i]/Magnitude[i]
#Swap the phases of these two images
iDFT[0]=abs(np.fft.ifft2(Magnitude[0]*phase[1]))
iDFT[1]=abs(np.fft.ifft2(Magnitude[1]*phase[0]))
#Show results
#The 'interpolation="none"' makes pyplot display the image pixel by pixel
plt.subplot(221),plt.imshow(images[0], cmap = 'gray',interpolation="none")
plt.title('Original Image#1'), plt.xticks([]), plt.yticks([])
plt.subplot(222),plt.imshow(images[1], cmap = 'gray',interpolation="none")
plt.title('Original Image#2'), plt.xticks([]), plt.yticks([])
plt.subplot(223),plt.imshow(iDFT[0], cmap = 'gray',interpolation="none")
plt.title('Magnitude#1 + Phase#2'), plt.xticks([]), plt.yticks([])
plt.subplot(224),plt.imshow(iDFT[1], cmap = 'gray',interpolation="none")
plt.title('Magnitude#2 + Phase#1'), plt.xticks([]), plt.yticks([])
plt.show()
| mit | 4,967,214,744,849,835,000 | 34.666667 | 83 | 0.582738 | false |
OpenMined/PySyft | packages/syft/src/syft/lib/python/__init__.py | 1 | 31373 | # stdlib
from typing import Optional
# syft relative
from . import collections
from ...ast import add_classes
from ...ast import add_dynamic_objects
from ...ast import add_methods
from ...ast import add_modules
from ...ast.globals import Globals
from ...core.node.abstract.node import AbstractNodeClient
from ..misc.union import UnionGenerator
from .bool import Bool
from .complex import Complex
from .dict import Dict
from .float import Float
from .int import Int
from .iterator import Iterator
from .list import List
from .none import SyNone
from .none import _SyNone
from .primitive_container import Any
from .primitive_interface import PyPrimitive
from .range import Range
from .set import Set
from .slice import Slice
from .string import String
from .tuple import Tuple
for syft_type in [
Bool,
Complex,
Dict,
Float,
Int,
SyNone,
_SyNone,
Any,
PyPrimitive,
Slice,
String,
Tuple,
]:
syft_type.__module__ = __name__
def create_python_ast(client: Optional[AbstractNodeClient] = None) -> Globals:
ast = Globals(client)
modules = ["syft", "syft.lib", "syft.lib.python", "syft.lib.python.collections"]
classes = [
("syft.lib.python.Bool", "syft.lib.python.Bool", Bool),
("syft.lib.python.Complex", "syft.lib.python.Complex", Complex),
("syft.lib.python.Dict", "syft.lib.python.Dict", Dict),
("syft.lib.python.Float", "syft.lib.python.Float", Float),
("syft.lib.python.Int", "syft.lib.python.Int", Int),
("syft.lib.python.List", "syft.lib.python.List", List),
("syft.lib.python.Slice", "syft.lib.python.Slice", Slice),
("syft.lib.python.Range", "syft.lib.python.Range", Range),
("syft.lib.python.String", "syft.lib.python.String", String),
("syft.lib.python._SyNone", "syft.lib.python._SyNone", _SyNone),
("syft.lib.python.PyPrimitive", "syft.lib.python.PyPrimitive", PyPrimitive),
("syft.lib.python.Any", "syft.lib.python.Any", Any),
("syft.lib.python.Tuple", "syft.lib.python.Tuple", Tuple),
("syft.lib.python.Iterator", "syft.lib.python.Iterator", Iterator),
("syft.lib.python.Set", "syft.lib.python.Set", Set),
(
"syft.lib.python.collections.OrderedDict",
"syft.lib.python.collections.OrderedDict",
collections.OrderedDict,
),
]
methods = [
# Range methods - quite there
("syft.lib.python.Range.__eq__", "syft.lib.python.Bool"),
("syft.lib.python.Range.__ge__", "syft.lib.python.Bool"),
("syft.lib.python.Range.__gt__", "syft.lib.python.Bool"),
("syft.lib.python.Range.__le__", "syft.lib.python.Bool"),
("syft.lib.python.Range.__lt__", "syft.lib.python.Bool"),
("syft.lib.python.Range.__ne__", "syft.lib.python.Bool"),
("syft.lib.python.Range.__bool__", "syft.lib.python.Bool"),
("syft.lib.python.Range.__contains__", "syft.lib.python.Bool"),
("syft.lib.python.Range.__getitem__", "syft.lib.python.Any"),
("syft.lib.python.Range.__len__", "syft.lib.python.Int"),
("syft.lib.python.Range.__iter__", "syft.lib.python.Iterator"),
("syft.lib.python.Range.__sizeof__", "syft.lib.python.Int"),
(
"syft.lib.python.Range.start",
UnionGenerator["syft.lib.python.Int", "syft.lib.python._SyNone"],
),
(
"syft.lib.python.Range.step",
UnionGenerator["syft.lib.python.Int", "syft.lib.python._SyNone"],
),
(
"syft.lib.python.Range.stop",
UnionGenerator["syft.lib.python.Int", "syft.lib.python._SyNone"],
),
(
"syft.lib.python.Range.count",
UnionGenerator["syft.lib.python.Int", "syft.lib.python._SyNone"],
),
(
"syft.lib.python.Range.index",
UnionGenerator["syft.lib.python.Int", "syft.lib.python._SyNone"],
),
# Slice methods - quite there
("syft.lib.python.Slice.__eq__", "syft.lib.python.Bool"),
("syft.lib.python.Slice.__ge__", "syft.lib.python.Bool"),
("syft.lib.python.Slice.__gt__", "syft.lib.python.Bool"),
("syft.lib.python.Slice.__le__", "syft.lib.python.Bool"),
("syft.lib.python.Slice.__lt__", "syft.lib.python.Bool"),
("syft.lib.python.Slice.__ne__", "syft.lib.python.Bool"),
("syft.lib.python.Slice.__str__", "syft.lib.python.String"),
("syft.lib.python.Slice.indices", "syft.lib.python.Tuple"),
(
"syft.lib.python.Slice.start",
UnionGenerator["syft.lib.python.Int", "syft.lib.python._SyNone"],
),
(
"syft.lib.python.Slice.step",
UnionGenerator["syft.lib.python.Int", "syft.lib.python._SyNone"],
),
(
"syft.lib.python.Slice.stop",
UnionGenerator["syft.lib.python.Int", "syft.lib.python._SyNone"],
),
# List methods - quite there
("syft.lib.python.List.__len__", "syft.lib.python.Int"),
("syft.lib.python.List.__getitem__", "syft.lib.python.Any"),
("syft.lib.python.List.__iter__", "syft.lib.python.Iterator"),
("syft.lib.python.List.__add__", "syft.lib.python.List"),
("syft.lib.python.List.append", "syft.lib.python._SyNone"),
("syft.lib.python.List.__gt__", "syft.lib.python.Bool"),
("syft.lib.python.List.__lt__", "syft.lib.python.Bool"),
("syft.lib.python.List.__le__", "syft.lib.python.Bool"),
("syft.lib.python.List.__ge__", "syft.lib.python.Bool"),
("syft.lib.python.List.__iadd__", "syft.lib.python.List"),
("syft.lib.python.List.__imul__", "syft.lib.python.List"),
("syft.lib.python.List.__iadd__", "syft.lib.python.List"),
("syft.lib.python.List.__contains__", "syft.lib.python.Bool"),
("syft.lib.python.List.__delattr__", "syft.lib.python.None"),
("syft.lib.python.List.__delitem__", "syft.lib.python.None"),
("syft.lib.python.List.__eq__", "syft.lib.python.Bool"),
("syft.lib.python.List.__mul__", "syft.lib.python.List"),
("syft.lib.python.List.__ne__", "syft.lib.python.Bool"),
("syft.lib.python.List.__sizeof__", "syft.lib.python.Int"),
("syft.lib.python.List.__len__", "syft.lib.python.Int"),
("syft.lib.python.List.__getitem__", "syft.lib.python.Any"),
("syft.lib.python.List.__setitem__", "syft.lib.python._SyNone"),
("syft.lib.python.List.__rmul__", "syft.lib.python.List"),
("syft.lib.python.List.copy", "syft.lib.python.List"),
("syft.lib.python.List.count", "syft.lib.python.Int"),
("syft.lib.python.List.sort", "syft.lib.python._SyNone"),
("syft.lib.python.List.reverse", "syft.lib.python._SyNone"),
("syft.lib.python.List.remove", "syft.lib.python._SyNone"),
("syft.lib.python.List.pop", "syft.lib.python.Any"),
("syft.lib.python.List.index", "syft.lib.python.Any"),
("syft.lib.python.List.insert", "syft.lib.python._SyNone"),
("syft.lib.python.List.clear", "syft.lib.python._SyNone"),
("syft.lib.python.List.extend", "syft.lib.python._SyNone"),
("syft.lib.python.List.__reversed__", "syft.lib.python.Iterator"),
("syft.lib.python.List.__delitem__", "syft.lib.python._SyNone"),
# Bool methods - quite there
("syft.lib.python.Bool.__abs__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__eq__", "syft.lib.python.Bool"),
("syft.lib.python.Bool.__add__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__and__", "syft.lib.python.Bool"),
("syft.lib.python.Bool.__ceil__", "syft.lib.python.Bool"),
("syft.lib.python.Bool.__divmod__", "syft.lib.python.Tuple"),
("syft.lib.python.Bool.__floor__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__float__", "syft.lib.python.Float"),
("syft.lib.python.Bool.__floordiv__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__ge__", "syft.lib.python.Bool"),
("syft.lib.python.Bool.__gt__", "syft.lib.python.Bool"),
("syft.lib.python.Bool.__invert__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__le__", "syft.lib.python.Bool"),
("syft.lib.python.Bool.__lt__", "syft.lib.python.Bool"),
("syft.lib.python.Bool.__lshift__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__mod__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__mul__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__ne__", "syft.lib.python.Bool"),
("syft.lib.python.Bool.__neg__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__or__", "syft.lib.python.Bool"),
("syft.lib.python.Bool.__pos__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__pow__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__radd__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__rand__", "syft.lib.python.Bool"),
("syft.lib.python.Bool.__rdivmod__", "syft.lib.python.Bool"),
("syft.lib.python.Bool.__rfloordiv__", "syft.lib.python.Bool"),
("syft.lib.python.Bool.__rlshift__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__rmod__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__rmul__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__ror__", "syft.lib.python.Bool"),
("syft.lib.python.Bool.__round__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__rpow__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__rrshift__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__rshift__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__rsub__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__rtruediv__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__rxor__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__sub__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__truediv__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__xor__", "syft.lib.python.Int"),
("syft.lib.python.Bool.__trunc__", "syft.lib.python.Int"),
("syft.lib.python.Bool.conjugate", "syft.lib.python.Int"),
("syft.lib.python.Bool.bit_length", "syft.lib.python.Int"),
("syft.lib.python.Bool.as_integer_ratio", "syft.lib.python.Tuple"),
("syft.lib.python.Bool.numerator", "syft.lib.python.Int"),
("syft.lib.python.Bool.real", "syft.lib.python.Int"),
("syft.lib.python.Bool.imag", "syft.lib.python.Int"),
("syft.lib.python.Bool.denominator", "syft.lib.python.Int"),
# Float methods - subject to further change due
("syft.lib.python.Float.__add__", "syft.lib.python.Float"),
("syft.lib.python.Float.__truediv__", "syft.lib.python.Float"),
("syft.lib.python.Float.__divmod__", "syft.lib.python.Float"),
("syft.lib.python.Float.__eq__", "syft.lib.python.Bool"),
("syft.lib.python.Float.__ge__", "syft.lib.python.Bool"),
("syft.lib.python.Float.__lt__", "syft.lib.python.Bool"),
("syft.lib.python.Float.__le__", "syft.lib.python.Bool"),
("syft.lib.python.Float.__gt__", "syft.lib.python.Bool"),
("syft.lib.python.Float.__add__", "syft.lib.python.Float"),
("syft.lib.python.Float.__abs__", "syft.lib.python.Float"),
("syft.lib.python.Float.__bool__", "syft.lib.python.Bool"),
("syft.lib.python.Float.__sub__", "syft.lib.python.Float"),
("syft.lib.python.Float.__rsub__", "syft.lib.python.Float"),
("syft.lib.python.Float.__mul__", "syft.lib.python.Float"),
("syft.lib.python.Float.__rmul__", "syft.lib.python.Float"),
("syft.lib.python.Float.__divmod__", "syft.lib.python.Tuple"),
("syft.lib.python.Float.__int__", "syft.lib.python.Int"),
("syft.lib.python.Float.__neg__", "syft.lib.python.Float"),
("syft.lib.python.Float.__ne__", "syft.lib.python.Bool"),
("syft.lib.python.Float.__floordiv__", "syft.lib.python.Float"),
("syft.lib.python.Float.__truediv__", "syft.lib.python.Float"),
("syft.lib.python.Float.__mod__", "syft.lib.python.Float"),
("syft.lib.python.Float.__rmod__", "syft.lib.python.Float"),
("syft.lib.python.Float.__rdivmod__", "syft.lib.python.Tuple"),
("syft.lib.python.Float.__rfloordiv__", "syft.lib.python.Float"),
("syft.lib.python.Float.__round__", "syft.lib.python.Int"),
("syft.lib.python.Float.__rtruediv__", "syft.lib.python.Float"),
("syft.lib.python.Float.__sizeof__", "syft.lib.python.Int"),
("syft.lib.python.Float.__trunc__", "syft.lib.python.Int"),
("syft.lib.python.Float.as_integer_ratio", "syft.lib.python.Tuple"),
("syft.lib.python.Float.is_integer", "syft.lib.python.Bool"),
("syft.lib.python.Float.__pow__", "syft.lib.python.Float"),
("syft.lib.python.Float.__rpow__", "syft.lib.python.Float"),
("syft.lib.python.Float.__iadd__", "syft.lib.python.Float"),
("syft.lib.python.Float.__isub__", "syft.lib.python.Float"),
("syft.lib.python.Float.__imul__", "syft.lib.python.Float"),
("syft.lib.python.Float.__imod__", "syft.lib.python.Float"),
("syft.lib.python.Float.__ipow__", "syft.lib.python.Float"),
("syft.lib.python.Float.__pos__", "syft.lib.python.Float"),
("syft.lib.python.Float.conjugate", "syft.lib.python.Float"),
("syft.lib.python.Float.imag", "syft.lib.python.Int"),
("syft.lib.python.Float.real", "syft.lib.python.Float"),
# String Methods
("syft.lib.python.String.__add__", "syft.lib.python.String"),
("syft.lib.python.String.__contains__", "syft.lib.python.Bool"),
("syft.lib.python.String.__eq__", "syft.lib.python.Bool"),
("syft.lib.python.String.__float__", "syft.lib.python.Float"),
("syft.lib.python.String.__ge__", "syft.lib.python.Bool"),
("syft.lib.python.String.__getitem__", "syft.lib.python.String"),
("syft.lib.python.String.__gt__", "syft.lib.python.Bool"),
("syft.lib.python.String.__int__", "syft.lib.python.Int"),
("syft.lib.python.String.__iter__", "syft.lib.python.Any"),
("syft.lib.python.String.__le__", "syft.lib.python.Bool"),
("syft.lib.python.String.__len__", "syft.lib.python.Int"),
("syft.lib.python.String.__lt__", "syft.lib.python.Bool"),
("syft.lib.python.String.__mod__", "syft.lib.python.String"),
("syft.lib.python.String.__mul__", "syft.lib.python.String"),
("syft.lib.python.String.__ne__", "syft.lib.python.Bool"),
("syft.lib.python.String.__reversed__", "syft.lib.python.String"),
("syft.lib.python.String.__sizeof__", "syft.lib.python.Int"),
("syft.lib.python.String.__str__", "syft.lib.python.String"),
("syft.lib.python.String.capitalize", "syft.lib.python.String"),
("syft.lib.python.String.casefold", "syft.lib.python.String"),
("syft.lib.python.String.center", "syft.lib.python.String"),
("syft.lib.python.String.count", "syft.lib.python.Int"),
("syft.lib.python.String.encode", "syft.lib.python.String"),
("syft.lib.python.String.expandtabs", "syft.lib.python.String"),
("syft.lib.python.String.find", "syft.lib.python.Int"),
("syft.lib.python.String.format", "syft.lib.python.String"),
("syft.lib.python.String.format_map", "syft.lib.python.String"),
("syft.lib.python.String.index", "syft.lib.python.Int"),
("syft.lib.python.String.isalnum", "syft.lib.python.Bool"),
("syft.lib.python.String.isalpha", "syft.lib.python.Bool"),
("syft.lib.python.String.isdecimal", "syft.lib.python.Bool"),
("syft.lib.python.String.isdigit", "syft.lib.python.Bool"),
("syft.lib.python.String.isidentifier", "syft.lib.python.Bool"),
("syft.lib.python.String.islower", "syft.lib.python.Bool"),
("syft.lib.python.String.isnumeric", "syft.lib.python.Bool"),
("syft.lib.python.String.isprintable", "syft.lib.python.Bool"),
("syft.lib.python.String.isspace", "syft.lib.python.Bool"),
("syft.lib.python.String.isupper", "syft.lib.python.Bool"),
("syft.lib.python.String.join", "syft.lib.python.String"),
("syft.lib.python.String.ljust", "syft.lib.python.String"),
("syft.lib.python.String.lower", "syft.lib.python.String"),
("syft.lib.python.String.lstrip", "syft.lib.python.String"),
("syft.lib.python.String.partition", "syft.lib.python.Tuple"),
("syft.lib.python.String.replace", "syft.lib.python.String"),
("syft.lib.python.String.rfind", "syft.lib.python.Int"),
("syft.lib.python.String.rindex", "syft.lib.python.Int"),
("syft.lib.python.String.rjust", "syft.lib.python.String"),
("syft.lib.python.String.rpartition", "syft.lib.python.Tuple"),
("syft.lib.python.String.rsplit", "syft.lib.python.List"),
("syft.lib.python.String.rstrip", "syft.lib.python.String"),
("syft.lib.python.String.split", "syft.lib.python.List"),
("syft.lib.python.String.splitlines", "syft.lib.python.List"),
("syft.lib.python.String.startswith", "syft.lib.python.Bool"),
("syft.lib.python.String.strip", "syft.lib.python.String"),
("syft.lib.python.String.swapcase", "syft.lib.python.String"),
("syft.lib.python.String.title", "syft.lib.python.String"),
("syft.lib.python.String.translate", "syft.lib.python.String"),
("syft.lib.python.String.upper", "syft.lib.python.String"),
("syft.lib.python.String.zfill", "syft.lib.python.String"),
("syft.lib.python.String.__contains__", "syft.lib.python.Bool"),
("syft.lib.python.String.__rmul__", "syft.lib.python.String"),
("syft.lib.python.String.endswith", "syft.lib.python.Bool"),
("syft.lib.python.String.isascii", "syft.lib.python.Bool"),
("syft.lib.python.String.istitle", "syft.lib.python.Bool"),
# Dict methods
("syft.lib.python.Dict.__contains__", "syft.lib.python.Bool"),
("syft.lib.python.Dict.__eq__", "syft.lib.python.Bool"),
("syft.lib.python.Dict.__format__", "syft.lib.python.String"),
("syft.lib.python.Dict.__ge__", "syft.lib.python.Bool"),
("syft.lib.python.Dict.__getitem__", "syft.lib.python.Any"),
("syft.lib.python.Dict.__gt__", "syft.lib.python.Bool"),
("syft.lib.python.Dict.__iter__", "syft.lib.python.Iterator"),
("syft.lib.python.Dict.__le__", "syft.lib.python.Bool"),
("syft.lib.python.Dict.__len__", "syft.lib.python.Int"),
("syft.lib.python.Dict.__lt__", "syft.lib.python.Bool"),
("syft.lib.python.Dict.__ne__", "syft.lib.python.Bool"),
("syft.lib.python.Dict.__sizeof__", "syft.lib.python.Int"),
("syft.lib.python.Dict.__str__", "syft.lib.python.String"),
("syft.lib.python.Dict.copy", "syft.lib.python.Dict"),
("syft.lib.python.Dict.clear", "syft.lib.python._SyNone"),
("syft.lib.python.Dict.fromkeys", "syft.lib.python.Dict"),
# Rename get to dict_get because of conflict
("syft.lib.python.Dict.dict_get", "syft.lib.python.Any"),
("syft.lib.python.Dict.items", "syft.lib.python.Iterator"),
("syft.lib.python.Dict.keys", "syft.lib.python.Iterator"),
("syft.lib.python.Dict.pop", "syft.lib.python.Any"),
("syft.lib.python.Dict.popitem", "syft.lib.python.Tuple"),
("syft.lib.python.Dict.setdefault", "syft.lib.python.Any"),
("syft.lib.python.Dict.values", "syft.lib.python.Iterator"),
# Int methods - subject to further change
("syft.lib.python.Int.__add__", "syft.lib.python.Int"),
("syft.lib.python.Int.__truediv__", "syft.lib.python.Float"),
("syft.lib.python.Int.__divmod__", "syft.lib.python.Float"),
("syft.lib.python.Int.__floordiv__", "syft.lib.python.Float"),
("syft.lib.python.Int.__invert__", "syft.lib.python.Int"),
("syft.lib.python.Int.__abs__", "syft.lib.python.Int"),
("syft.lib.python.Int.__bool__", "syft.lib.python.Bool"),
("syft.lib.python.Int.__divmod__", "syft.lib.python.Tuple"),
("syft.lib.python.Int.__rdivmod__", "syft.lib.python.Int"),
("syft.lib.python.Int.__radd__", "syft.lib.python.Int"),
("syft.lib.python.Int.__sub__", "syft.lib.python.Int"),
("syft.lib.python.Int.__rsub__", "syft.lib.python.Int"),
("syft.lib.python.Int.__rtruediv__", "syft.lib.python.Int"),
("syft.lib.python.Int.__mul__", "syft.lib.python.Int"),
("syft.lib.python.Int.__rmul__", "syft.lib.python.Int"),
("syft.lib.python.Int.__ceil__", "syft.lib.python.Int"),
("syft.lib.python.Int.__eq__", "syft.lib.python.Bool"),
("syft.lib.python.Int.__float__", "syft.lib.python.Float"),
("syft.lib.python.Int.__floor__", "syft.lib.python.Int"),
("syft.lib.python.Int.__floordiv__", "syft.lib.python.Int"),
("syft.lib.python.Int.__rfloordiv__", "syft.lib.python.Int"),
("syft.lib.python.Int.__truediv__", "syft.lib.python.Float"),
("syft.lib.python.Int.__mod__", "syft.lib.python.Int"),
("syft.lib.python.Int.__rmod__", "syft.lib.python.Int"),
("syft.lib.python.Int.__pow__", "syft.lib.python.Int"),
("syft.lib.python.Int.__rpow__", "syft.lib.python.Int"),
("syft.lib.python.Int.__lshift__", "syft.lib.python.Int"),
("syft.lib.python.Int.__rlshift__", "syft.lib.python.Int"),
("syft.lib.python.Int.__round__", "syft.lib.python.Int"),
("syft.lib.python.Int.__rshift__", "syft.lib.python.Int"),
("syft.lib.python.Int.__rrshift__", "syft.lib.python.Int"),
("syft.lib.python.Int.__and__", "syft.lib.python.Int"),
("syft.lib.python.Int.__rand__", "syft.lib.python.Int"),
("syft.lib.python.Int.__xor__", "syft.lib.python.Int"),
("syft.lib.python.Int.__xor__", "syft.lib.python.Int"),
("syft.lib.python.Int.__rxor__", "syft.lib.python.Int"),
("syft.lib.python.Int.__or__", "syft.lib.python.Int"),
("syft.lib.python.Int.__ror__", "syft.lib.python.Int"),
("syft.lib.python.Int.__ge__", "syft.lib.python.Bool"),
("syft.lib.python.Int.__lt__", "syft.lib.python.Bool"),
("syft.lib.python.Int.__le__", "syft.lib.python.Bool"),
("syft.lib.python.Int.__gt__", "syft.lib.python.Bool"),
("syft.lib.python.Int.__iadd__", "syft.lib.python.Int"),
("syft.lib.python.Int.__isub__", "syft.lib.python.Int"),
("syft.lib.python.Int.__imul__", "syft.lib.python.Int"),
("syft.lib.python.Int.__ifloordiv__", "syft.lib.python.Int"),
("syft.lib.python.Int.__itruediv__", "syft.lib.python.Int"),
("syft.lib.python.Int.__imod__", "syft.lib.python.Int"),
("syft.lib.python.Int.__ipow__", "syft.lib.python.Int"),
("syft.lib.python.Int.__ne__", "syft.lib.python.Bool"),
("syft.lib.python.Int.__neg__", "syft.lib.python.Int"),
("syft.lib.python.Int.__pos__", "syft.lib.python.Int"),
("syft.lib.python.Int.as_integer_ratio", "syft.lib.python.Tuple"),
("syft.lib.python.Int.bit_length", "syft.lib.python.Int"),
("syft.lib.python.Int.denominator", "syft.lib.python.Int"),
("syft.lib.python.Int.from_bytes", "syft.lib.python.Int"),
("syft.lib.python.Int.real", "syft.lib.python.Int"),
("syft.lib.python.Int.imag", "syft.lib.python.Int"),
("syft.lib.python.Int.numerator", "syft.lib.python.Int"),
("syft.lib.python.Int.conjugate", "syft.lib.python.Int"),
("syft.lib.python.Int.__trunc__", "syft.lib.python.Int"),
# Tuple
("syft.lib.python.Tuple.__add__", "syft.lib.python.Tuple"),
("syft.lib.python.Tuple.__contains__", "syft.lib.python.Bool"),
("syft.lib.python.Tuple.__ne__", "syft.lib.python.Bool"),
("syft.lib.python.Tuple.__ge__", "syft.lib.python.Bool"),
("syft.lib.python.Tuple.__gt__", "syft.lib.python.Bool"),
("syft.lib.python.Tuple.__le__", "syft.lib.python.Bool"),
("syft.lib.python.Tuple.__lt__", "syft.lib.python.Bool"),
("syft.lib.python.Tuple.__mul__", "syft.lib.python.Tuple"),
("syft.lib.python.Tuple.__rmul__", "syft.lib.python.Tuple"),
("syft.lib.python.Tuple.__len__", "syft.lib.python.Int"),
("syft.lib.python.Tuple.__getitem__", "syft.lib.python.Any"),
("syft.lib.python.Tuple.count", "syft.lib.python.Int"),
("syft.lib.python.Tuple.index", "syft.lib.python.Int"),
("syft.lib.python.Tuple.__iter__", "syft.lib.python.Iterator"),
# PyContainer - quite there
("syft.lib.python.Any.__add__", "syft.lib.python.Any"),
("syft.lib.python.Any.__iter__", "syft.lib.python.Iterator"),
("syft.lib.python.Any.__next__", "syft.lib.python.Any"),
("syft.lib.python.Any.__len__", "syft.lib.python.Int"),
("syft.lib.python.Any.__radd__", "syft.lib.python.Any"),
("syft.lib.python.Any.__truediv__", "syft.lib.python.Any"),
("syft.lib.python.Any.__rtruediv__", "syft.lib.python.Any"),
("syft.lib.python.Any.__floordiv__", "syft.lib.python.Any"),
("syft.lib.python.Any.__rfloordiv__", "syft.lib.python.Any"),
("syft.lib.python.Any.__mul__", "syft.lib.python.Any"),
("syft.lib.python.Any.__rmul__", "syft.lib.python.Any"),
("syft.lib.python.Any.__sub__", "syft.lib.python.Any"),
("syft.lib.python.Any.__rsub__", "syft.lib.python.Any"),
(
"syft.lib.python.Iterator.__next__",
UnionGenerator[
"syft.lib.python.Int",
"syft.lib.python.Float",
"syft.lib.python.String",
"torch.nn.Parameter",
"torch.Tensor",
],
), # temp until casting
("syft.lib.python.Iterator.__iter__", "syft.lib.python.Iterator"),
("syft.lib.python.Iterator.__len__", "syft.lib.python.Int"),
("syft.lib.python.Set.__and__", "syft.lib.python.Set"),
("syft.lib.python.Set.__contains__", "syft.lib.python.Bool"),
("syft.lib.python.Set.__eq__", "syft.lib.python.Bool"),
("syft.lib.python.Set.__ge__", "syft.lib.python.Bool"),
("syft.lib.python.Set.__gt__", "syft.lib.python.Bool"),
("syft.lib.python.Set.__iand__", "syft.lib.python.Set"),
("syft.lib.python.Set.__ior__", "syft.lib.python.Set"),
("syft.lib.python.Set.__isub__", "syft.lib.python.Set"),
("syft.lib.python.Set.__ixor__", "syft.lib.python.Set"),
("syft.lib.python.Set.__le__", "syft.lib.python.Bool"),
("syft.lib.python.Set.__len__", "syft.lib.python.Int"),
("syft.lib.python.Set.__lt__", "syft.lib.python.Bool"),
("syft.lib.python.Set.__ne__", "syft.lib.python.Bool"),
("syft.lib.python.Set.__or__", "syft.lib.python.Set"),
("syft.lib.python.Set.__sub__", "syft.lib.python.Set"),
("syft.lib.python.Set.__xor__", "syft.lib.python.Set"),
("syft.lib.python.Set.add", "syft.lib.python._SyNone"),
("syft.lib.python.Set.clear", "syft.lib.python._SyNone"),
("syft.lib.python.Set.difference", "syft.lib.python.Set"),
("syft.lib.python.Set.difference_update", "syft.lib.python._SyNone"),
("syft.lib.python.Set.discard", "syft.lib.python._SyNone"),
("syft.lib.python.Set.intersection", "syft.lib.python.Set"),
("syft.lib.python.Set.intersection_update", "syft.lib.python._SyNone"),
("syft.lib.python.Set.isdisjoint", "syft.lib.python.Bool"),
("syft.lib.python.Set.issuperset", "syft.lib.python.Bool"),
("syft.lib.python.Set.pop", "syft.lib.python._SyNone"),
("syft.lib.python.Set.remove", "syft.lib.python._SyNone"),
(
"syft.lib.python.Set.symmetric_difference_update",
"syft.lib.python._SyNone",
),
("syft.lib.python.Set.symmetric_difference", "syft.lib.python.Set"),
("syft.lib.python.Set.union", "syft.lib.python.Set"),
("syft.lib.python.Set.update", "syft.lib.python._SyNone"),
(
"syft.lib.python.collections.OrderedDict.__contains__",
"syft.lib.python.Bool",
),
(
"syft.lib.python.collections.OrderedDict.__delitem__",
"syft.lib.python._SyNone",
),
(
"syft.lib.python.collections.OrderedDict.__eq__",
"syft.lib.python.Bool",
),
(
"syft.lib.python.collections.OrderedDict.__ge__",
"syft.lib.python.Bool",
),
(
"syft.lib.python.collections.OrderedDict.__getitem__",
"syft.lib.python.Any",
),
(
"syft.lib.python.collections.OrderedDict.__gt__",
"syft.lib.python.Bool",
),
(
"syft.lib.python.collections.OrderedDict.__le__",
"syft.lib.python.Bool",
),
(
"syft.lib.python.collections.OrderedDict.__iter__",
"syft.lib.python.Iterator",
),
("syft.lib.python.collections.OrderedDict.__len__", "syft.lib.python.Int"),
(
"syft.lib.python.collections.OrderedDict.__lt__",
"syft.lib.python.Bool",
),
(
"syft.lib.python.collections.OrderedDict.__ne__",
"syft.lib.python.Bool",
),
(
"syft.lib.python.collections.OrderedDict.__setitem__",
"syft.lib.python._SyNone",
),
(
"syft.lib.python.collections.OrderedDict.clear",
"syft.lib.python._SyNone",
),
(
"syft.lib.python.collections.OrderedDict.copy",
"syft.lib.python.collections.OrderedDict",
),
(
"syft.lib.python.collections.OrderedDict.fromkeys",
"syft.lib.python.collections.OrderedDict",
),
("syft.lib.python.collections.OrderedDict.items", "syft.lib.python.Iterator"),
("syft.lib.python.collections.OrderedDict.keys", "syft.lib.python.Iterator"),
(
"syft.lib.python.collections.OrderedDict.move_to_end",
"syft.lib.python._SyNone",
),
("syft.lib.python.collections.OrderedDict.pop", "syft.lib.python.Any"),
("syft.lib.python.collections.OrderedDict.popitem", "syft.lib.python.Any"),
(
"syft.lib.python.collections.OrderedDict.setdefault",
"syft.lib.python.Any",
),
(
"syft.lib.python.collections.OrderedDict.update",
"syft.lib.python._SyNone",
),
(
"syft.lib.python.collections.OrderedDict.values",
"syft.lib.python.Iterator",
),
("syft.lib.python.collections.OrderedDict.items", "syft.lib.python.List"),
(
"syft.lib.python.collections.OrderedDict.dict_get",
"syft.lib.python.Any",
),
]
dynamic_objects = [("syft.lib.python.Bool.my_field", "syft.lib.python.Int")]
add_modules(ast, modules)
add_classes(ast, classes)
add_methods(ast, methods)
add_dynamic_objects(ast, dynamic_objects)
for klass in ast.classes:
klass.create_pointer_class()
klass.create_send_method()
klass.create_storable_object_attr_convenience_methods()
return ast
| apache-2.0 | -4,590,487,544,868,598,000 | 53.091379 | 86 | 0.581041 | false |
ContributeToScience/participant-booking-app | booking/staff/views.py | 1 | 11256 | import collections
import httplib
import urllib
import urlparse
from django.conf import settings
from django.contrib.messages import error, success, warning
from django.core.urlresolvers import reverse_lazy
from django.db.models import F
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_GET
from paypal import PayPalConfig, PayPalInterface, PayPalAPIResponseError
from core.decorators import staff_required
from scientist.models import ParticipantResearch, Research
@staff_required
def award_participants(request, template='staff/award_participants.html', extra_context=None):
"""
Staff user: award to participants page
**Context**
``RequestContext``
**Template:**
:template:`staff/award_participants.html`
"""
payment_paypal_dict = {}
payment_amazon_dict = {}
payment_type = None
if request.method == 'POST':
payment_type = request.POST.get('payment_type', None)
paypal_type = request.POST.get('paypal_type', None)
award_participant_id_list = request.POST.getlist('award_participant_id', [])
_participant_group_by_payment_type(request, award_participant_id_list, payment_paypal_dict, payment_amazon_dict)
redirect_url = _payment_of_paypal(request, payment_paypal_dict, paypal_type)
if redirect_url:
return HttpResponseRedirect(redirect_url)
_payment_of_amazon(request, payment_amazon_dict)
award_participant_list = ParticipantResearch.objects.filter(confirmed=True, award_credit__gt=0,
scientist_award_dt__isnull=False,
participant_resp_dt__isnull=False,
superuser_award_dt__isnull=True).filter(
award_credit__gt=F('donate_credit')).order_by('-created')
if payment_type:
_participant_group_by_payment_type(request, award_participant_list, payment_paypal_dict, payment_amazon_dict,
False)
if payment_type == 'paypal':
award_participant_list = payment_paypal_dict.values()
elif payment_type == 'amazon':
award_participant_list = payment_amazon_dict.values()
context = {
'award_participant_list': award_participant_list,
'payment_type': payment_type
}
if extra_context:
context.update(extra_context)
return render_to_response(template, context, context_instance=RequestContext(request))
def _participant_group_by_payment_type(request, participant_list, payment_paypal_dict, payment_amazon_dict,
search=True):
if participant_list and len(participant_list) > 0:
for item in participant_list:
if search:
participant = ParticipantResearch.objects.get(id=item)
else:
participant = item
user_profile = participant.participant.userprofile
participant_info = user_profile.basic_info.get('participant', None)
payment_type = participant_info.get('payment_type', None) if participant_info else None
payment_account = participant_info.get('payment_account', None) if participant_info else None
if payment_type == 'paypal':
payment_paypal_dict['%s_%s' % (payment_account, participant.id)] = participant
elif payment_type == 'amazon':
payment_amazon_dict['%s_%s' % (payment_account, participant.id)] = participant
else:
username = user_profile.get_full_name()
warning(request, _(u'Please check %s user profile whether to fill in payment information' % username))
def _payment_of_paypal(request, payment_paypal_dict, paypal_type):
if payment_paypal_dict and len(payment_paypal_dict) > 0:
if paypal_type == 'MassPay':
_mass_pay(request, payment_paypal_dict)
elif paypal_type == 'AdaptivePay':
return _adaptive_pay(request, payment_paypal_dict)
else:
error(request, _(u'Please choose a payment method in MassPay or AdaptivePay'))
def _payment_of_amazon(request, payment_amazon_dict):
#TODO: Amazon payment method
print payment_amazon_dict
pass
def _mass_pay(request, payment_paypal_dict):
PAYPAL_CONFIG = PayPalConfig(API_USERNAME=settings.PAYPAL_API_USERNAME,
API_PASSWORD=settings.PAYPAL_API_PASSWORD,
API_SIGNATURE=settings.PAYPAL_API_SIGNATURE,
API_ENVIRONMENT=settings.PAYPAL_API_ENVIRONMENT,
DEBUG_LEVEL=0)
paypal_interface = PayPalInterface(config=PAYPAL_CONFIG)
payment_dict = {'RECEIVERTYPE': 'EmailAddress', }
for i, item in enumerate(payment_paypal_dict.items()):
payment_account = (item[0])[0:item[0].find('_')]
participant = item[1]
payment_credit = participant.award_credit - participant.donate_credit
if payment_credit > 0:
if payment_account:
payment_dict['L_EMAIL%s' % i] = payment_account
payment_dict['L_AMT%s' % i] = payment_credit
payment_dict['L_NOTE%s' % i] = participant.research.name
participant.superuser_award_dt = now()
participant.payment_type = 'paypal'
participant.payment_account = payment_account
try:
resp = paypal_interface._call('MassPay', **payment_dict)
if resp['ACK'] == 'Success':
for payment_account, participant in payment_paypal_dict.items():
participant.payment_status = True
participant.payment_resp = 'MassPay Success'
participant.save()
success(request, _(u'Payment of payment successful'))
except PayPalAPIResponseError as e:
error(request, _(u'%s') % e.message)
def _adaptive_pay(request, payment_paypal_dict):
#Set headers
headers = {
'X-PAYPAL-SECURITY-USERID': settings.PAYPAL_API_USERNAME,
'X-PAYPAL-SECURITY-PASSWORD': settings.PAYPAL_API_PASSWORD,
'X-PAYPAL-SECURITY-SIGNATURE': settings.PAYPAL_API_SIGNATURE,
'X-PAYPAL-APPLICATION-ID': settings.PAYPAL_APPLICTION_ID,
'X-PAYPAL-SERVICE-VERSION': '1.1.0',
'X-PAYPAL-REQUEST-DATA-FORMAT': 'NV',
'X-PAYPAL-RESPONSE-DATA-FORMAT': 'NV'
}
#Set POST Parameters
params = collections.OrderedDict()
params['requestEnvelope.errorLanguage'] = 'en_US'
params['requestEnvelope.detailLevel'] = 'ReturnAll'
params['reverseAllParallelPaymentsOnError'] = 'true'
params['actionType'] = 'PAY'
params['currencyCode'] = 'USD'
params['feesPayer'] = 'EACHRECEIVER'
payment_params = '?type=AdaptivePay'
for i, item in enumerate(payment_paypal_dict.items()):
payment_account = (item[0])[0:item[0].find('_')]
participant = item[1]
payment_params += '&pr=%s' % item[0]
payment_credit = participant.award_credit - participant.donate_credit
if payment_credit > 0:
if payment_account:
params['receiverList.receiver(%d).email' % i] = payment_account
params['receiverList.receiver(%d).amount' % i] = payment_credit
params['returnUrl'] = '%s/staff/payment/paypal/complete/%s' % (settings.SITE_NAME, payment_params)
params['cancelUrl'] = '%s%s' % (settings.SITE_NAME, request.path)
#params['returnUrl'] = 'http://www.baidu.com
#params['cancelUrl'] = 'http://www.baidu.com
#Set Client Details
params['clientDetails.ipAddress'] = '127.0.0.1'
params['clientDetails.deviceId'] = 'mydevice'
params['clientDetails.applicationId'] = 'PayNvpDemo'
enc_params = urllib.urlencode(params)
#Connect to sand box and POST.
conn = httplib.HTTPSConnection(settings.PAYPAL_SERVICE)
conn.request("POST", "/AdaptivePayments/Pay/", enc_params, headers)
#Check the response - should be 200 OK.
response = conn.getresponse()
if response.status == 200:
#Get the reply and print it out.
data = response.read()
result = urlparse.parse_qs(data)
if result['responseEnvelope.ack'][0] == 'Success':
return '%s?cmd=_ap-payment&paykey=%s' % (settings.PAYPAL_ACTION, result['payKey'][0])
else:
error(request, _(u'%s' % result['error(0).message'][0]))
else:
warning(request, _(u'Request an exception, please try again later'))
@require_GET
@csrf_exempt
def payment_paypal_complete(request):
pr_list = request.GET.getlist('pr', [])
#payment_type = request.GET.get('type', '')
try:
for pr in pr_list:
payment_account = pr[0:pr.find('_')]
pr_id = pr[pr.find('_') + 1:len(pr)]
pr = ParticipantResearch.objects.get(id=pr_id)
pr.superuser_award_dt = now()
pr.payment_type = 'paypal'
pr.payment_account = payment_account
pr.payment_status = True
pr.payment_resp = 'AdaptivePay Success'
pr.save()
except Exception as e:
error(request, _(u'%s' % e.message))
return HttpResponseRedirect(reverse_lazy('award_participants'))
@staff_required
def award_participants_history(request, template='staff/award_participants_history.html', extra_context=None):
"""
Staff user: award to participants history page
**Context**
``RequestContext``
**Template:**
:template:`staff/award_participants_history.html`
"""
award_participant_history_list = ParticipantResearch.objects.filter(confirmed=True, award_credit__gt=0,
scientist_award_dt__isnull=False,
participant_resp_dt__isnull=False,
superuser_award_dt__isnull=False).order_by(
'-created')
context = {
'award_participant_history_list': award_participant_history_list,
}
if extra_context:
context.update(extra_context)
return render_to_response(template, context, context_instance=RequestContext(request))
@staff_required
def scientists_payment_history(request, template='staff/scientists_payment_history.html', extra_context=None):
"""
Staff user: scientists payment history page
**Context**
``RequestContext``
**Template:**
:template:`staff/scientists_payment_history.html`
"""
scientist_payment_history_list = Research.objects.filter(is_paid=True).order_by('-created')
context = {
'scientist_payment_history_list': scientist_payment_history_list,
}
if extra_context:
context.update(extra_context)
return render_to_response(template, context, context_instance=RequestContext(request)) | gpl-2.0 | -5,788,404,606,282,582,000 | 38.222997 | 120 | 0.627488 | false |
buck06191/BayesCMD | scripts/results_processing/results_processing_example.py | 1 | 3664 | """Process results from 280917."""
import os
import argparse
from bayescmd.results_handling import kde_plot
from bayescmd.results_handling import scatter_dist_plot
from bayescmd.results_handling import data_import
from bayescmd.results_handling import plot_repeated_outputs
from bayescmd.results_handling import histogram_plot
from bayescmd.results_handling import data_merge_by_date
from bayescmd.abc import import_actual_data
from bayescmd.abc import priors_creator
from bayescmd.util import findBaseDir
from distutils import dir_util
import json
import numpy as np
BASEDIR = os.path.abspath(findBaseDir('BayesCMD'))
ap = argparse.ArgumentParser('Choose results to process:')
ap.add_argument(
'parent_dir',
metavar="PARENT_DIR",
help='Parent directory holding model run folders')
ap.add_argument(
'conf',
metavar="config_file",
help='Config file used to generate model runs')
args = ap.parse_args()
######### LOAD DATA ############
# Define the location of the parameter combinations and distances -
# use `data_merge_by_date` and `data_merge_by_batch` to help with this
date = '280917'
pfile = data_merge_by_date(data, args.parent_directory)
results = data_import(pfile)
# Set input file name here for reading - this is typically the measured data
input_file_name = ""
input_path = os.path.join(BASEDIR, , input_file_name)
d0 = import_actual_data(input_path)
# Add paths to any other required files e.g. OpenOpt fits
openopt_path = os.path.join(BASEDIR, 'data', 'model_run_output.csv')
######### CONFIG FILE ###########
# Extract information from config.
with open(args.conf, 'r') as conf_f:
conf = json.load(conf_f)
targets = conf['targets']
model_name = conf['model_name']
inputs = conf['inputs']
# Define priors based on config options.
params = priors_creator(conf['priors']['defaults'],
conf['priors']['variation'])
config = {
"model_name": model_name,
"targets": targets,
"inputs": inputs,
"parameters": params,
"input_path": input_path,
"openopt_path": openopt_path,
"zero_flag": {k: False for k in targets}
}
d = 'euclidean'
lim = 1000
## Set path to save figures to
figPath = "/home/buck06191/Dropbox/phd/Bayesian_fitting/{}/{}/"\
"Figures/{}".format(model_name, date, d)
dir_util.mkpath(figPath)
## Plot histograms of errors for checking
print("Plotting total histogram")
hist1 = histogram_plot(results, frac=1)
hist1.savefig(
os.path.join(figPath, 'full_histogram_real.png'), bbox_inches='tight')
print("Plotting fraction histogram")
hist2 = histogram_plot(results, limit=lim)
hist2.savefig(
os.path.join(figPath, 'fraction_histogram_real.png'), bbox_inches='tight')
## Print posteriors
print("Considering lowest {} values".format(lim))
print("Generating KDE plot")
openopt_medians = {"r_t": 0.016243,
"sigma_coll": 78.000000,
"cytox_tot_tis": 0.006449,
"Vol_mit": 0.084000,
"O2_n": 0.030000,
"r_0": 0.011808,
"v_cn": 30.000000,
"r_m": 0.021274}
g = kde_plot(results, params, limit=lim, n_ticks=4,
openopt_medians=openopt_medians)
g.fig.savefig(
os.path.join(figPath, 'kde_{}_real.png'
.format(str(lim).replace('.', '_'))),
bbox_inches='tight')
## Print posterior predictive
print("Generating averaged time series plot")
fig = plot_repeated_outputs(results, n_repeats=25, limit=lim, **config)
fig.set_size_inches(18.5, 12.5)
fig.savefig(
os.path.join(figPath, 'TS_{}_real.png'
.format(str(lim).replace('.', '_'))),
dpi=100)
| gpl-2.0 | 9,153,548,067,244,811,000 | 27.850394 | 78 | 0.665393 | false |
xupingmao/xnote | core/xmanager.py | 1 | 25975 | # encoding=utf-8
# @author xupingmao
# @since
# @modified 2021/06/12 16:51:12
"""Xnote 模块管理器
* HandlerManager HTTP请求处理器加载和注册
* CronTaskManager 定时任务注册和执行
* EventManager 事件管理器
"""
from __future__ import print_function
import os
import sys
import gc
import re
import traceback
import time
import copy
import json
import profile
import inspect
import six
import web
import xconfig
import xtemplate
import xtables
import xutils
import xauth
import threading
from collections import deque
from threading import Thread, Timer, current_thread
from xutils import Storage
from xutils import Queue, tojson, MyStdout, cacheutil, u, dbutil, fsutil
__version__ = "1.0"
__author__ = "xupingmao ([email protected])"
__copyright__ = "(C) 2016-2020 xupingmao. GNU GPL 3."
__contributors__ = []
dbutil.register_table("schedule", "任务调度表 <schedule:id>")
TASK_POOL_SIZE = 500
# 对外接口
_manager = None
_event_manager = None
def do_wrap_handler(pattern, handler_clz):
# Python2中自定义类不是type类型
# 这里只能处理类,不处理字符串
if not inspect.isclass(handler_clz):
return handler_clz
def wrap_result(result):
if isinstance(result, (list, dict)):
web.header("Content-Type", "application/json")
return tojson(result)
return result
class WrappedHandler:
"""默认的handler装饰器
1. 装饰器相对于继承来说,性能略差一些,但是更加安全,父类的方法不会被子类所覆盖
2. 为什么不用Python的装饰器语法
1. 作为一个通用的封装,所有子类必须通过这层安全过滤,而不是声明才过滤
2. 子类不用引入额外的模块
"""
# 使用浮点数是为了防止自动转大数
visited_count = 0.0
handler_class = handler_clz
def __init__(self):
self.target_class = handler_clz
self.target = handler_clz()
self.pattern = pattern
def GET(self, *args):
WrappedHandler.visited_count += 1.0
threading.current_thread().handler_class = self.target
result = wrap_result(self.target.GET(*args))
threading.current_thread().handler_class = None
return result
def POST(self, *args):
"""常用于提交HTML FORM表单、新增资源等"""
WrappedHandler.visited_count += 1.0
threading.current_thread().handler_class = self.target
result = wrap_result(self.target.POST(*args))
threading.current_thread().handler_class = None
return result
def HEAD(self, *args):
return wrap_result(self.target.HEAD(*args))
def OPTIONS(self, *args):
return wrap_result(self.target.OPTIONS(*args))
def PROPFIND(self, *args):
return wrap_result(self.target.PROPFIND(*args))
def PROPPATCH(self, *args):
return wrap_result(self.target.PROPPATCH(*args))
def PUT(self, *args):
"""更新资源,带条件时是幂等方法"""
return wrap_result(self.target.PUT(*args))
def LOCK(self, *args):
return wrap_result(self.target.LOCK(*args))
def UNLOCK(self, *args):
return wrap_result(self.target.UNLOCK(*args))
def MKCOL(self, *args):
return wrap_result(self.target.MKCOL(*args))
def COPY(self, *args):
return wrap_result(self.target.COPY(*args))
def MOVE(self, *args):
return wrap_result(self.target.MOVE(*args))
def DELETE(self, *args):
return wrap_result(self.target.DELETE(*args))
def SEARCH(self, *args):
return wrap_result(self.target.SEARCH(*args))
def CONNECT(self, *args):
"""建立tunnel隧道"""
return wrap_result(self.target.CONNECT(*args))
def __getattr__(self, name):
xutils.error("xmanager", "unknown method %s" % name)
return getattr(self.target, name)
def search_priority(self):
return 0
def search_match(self, input):
if hasattr(self.target, "search_match"):
return self.search_match(input)
return False
def search(self, *args):
""" 如果子类实现了搜索接口,通过该方法调用 """
if hasattr(self.target, "search"):
return self.search(*args)
return None
return WrappedHandler
def notfound():
"""404请求处理器"""
import xtemplate
raise web.notfound(xtemplate.render("common/page/notfound.html", show_aside = False))
class WebModel:
def __init__(self):
self.name = ""
self.url = ""
self.description = ""
self.searchkey = ""
def init(self):
if self.name == "":
self.name = self.searchkey
if self.name == "": # if still empty
self.name = self.url
self.searchkey = self.name + self.url + self.searchkey + self.description
self.description = "[工具]" + self.description
def log(msg):
# six.print_(time.strftime("%Y-%m-%d %H:%M:%S"), msg)
xutils.info("xmanager", msg)
def warn(msg):
# six.print_(time.strftime("%Y-%m-%d %H:%M:%S"), msg)
xutils.warn("xmanager", msg)
class HandlerManager:
"""模块管理器
启动时自动加载`handlers`目录下的处理器以及定时任务
"""
def __init__(self, app, vars, mapping = None, last_mapping=None):
self.app = app # webpy app
if mapping is None:
self.basic_mapping = [] # webpy mapping
self.mapping = []
else:
self.basic_mapping = mapping
self.mapping = copy.copy(mapping)
if last_mapping is None:
self.last_mapping = []
else:
self.last_mapping = last_mapping
self.vars = vars
self.search_dict = {}
self.task_dict = {}
self.model_list = []
self.black_list = ["__pycache__"]
self.failed_mods = []
self.debug = True
self.report_loading = False
self.report_unload = True
self.task_manager = CronTaskManager(app)
# stdout装饰器,方便读取print内容
if not isinstance(sys.stdout, MyStdout):
sys.stdout = MyStdout(sys.stdout)
def reload_module(self, name):
try:
if self.report_unload:
log("del " + name)
del sys.modules[name]
__import__(name)
if self.report_loading:
log("reimport " + name)
except Exception as e:
xutils.print_exc()
finally:
pass
def do_reload_inner_modules(self):
del sys.modules['xtemplate']
import xtemplate
xtemplate.reload()
def reload(self):
"""重启handlers目录下的所有的模块"""
self.mapping = []
self.model_list = []
self.failed_mods = []
# 移除所有的事件处理器
remove_event_handlers()
# 重新加载内部模块
self.do_reload_inner_modules()
# 重新加载HTTP处理器
self.load_model_dir(xconfig.HANDLERS_DIR)
self.mapping += self.basic_mapping
self.mapping += self.last_mapping
self.app.init_mapping(self.mapping)
# set 404 page
self.app.notfound = notfound
def get_mod(self, module, name):
namelist = name.split(".")
del namelist[0]
mod = module
for name in namelist:
mod = getattr(mod, name)
return mod
def load_model_dir(self, parent = xconfig.HANDLERS_DIR):
dirname = parent.replace(".", "/")
if not os.path.exists(dirname):
return
for filename in os.listdir(dirname):
try:
filepath = os.path.join(dirname, filename)
if os.path.isdir(filepath):
self.load_model_dir(parent + "." + filename)
continue
name, ext = os.path.splitext(filename)
if os.path.isfile(filepath) and ext == ".py":
modname = parent + "." + name
old_mod = sys.modules.get(modname)
if old_mod is not None:
if hasattr(old_mod, "unload"):
old_mod.unload()
if self.report_unload:
log("del %s" % modname)
del sys.modules[modname] # reload module
# Py3: __import__(name, globals=None, locals=None, fromlist=(), level=0)
# Py2: __import__(name, globals={}, locals={}, fromlist=[], level=-1)
# fromlist不为空(任意真值*-*)可以得到子模块,比如__import__("os.path", fromlist=1)返回<module "ntpath" ...>
# 参考Python源码import.c即可
# <code>has_from = PyObject_IsTrue(fromlist);</code>实际上是个Bool值
# level=0表示绝对路径,-1是默认的
# mod = __import__(modname, fromlist=1, level=0)
# six的这种方式也不错
mod = six._import_module(modname)
self.resolve_module(mod, modname)
except Exception as e:
self.failed_mods.append([filepath, e])
log("Fail to load module '%s'" % filepath)
log("Model traceback (most recent call last):")
xutils.print_exc()
self.report_failed()
def report_failed(self):
for info in self.failed_mods:
log("Failed info: %s" % info)
def resolve_module(self, module, modname):
name = modname
modpath = "/".join(modname.split(".")[1:-1])
if not modpath.startswith("/"):
modpath = "/" + modpath
if hasattr(module, "xurls"):
xurls = module.xurls
for i in range(0, len(xurls), 2):
url = xurls[i]
handler = xurls[i+1]
if not url.startswith(modpath):
log("WARN: pattern %r is invalid, should starts with %r" % (url, modpath))
self.add_mapping(url, handler)
# xurls拥有最高优先级,下面代码兼容旧逻辑
elif hasattr(module, "handler"):
self.resolve_module_old(module, modname)
def get_url_old(self, name):
namelist = name.split(".")
del namelist[0]
return "/" + "/".join(namelist)
def resolve_module_old(self, module, modname):
name = modname
handler = module.handler
clz = name.replace(".", "_")
self.vars[clz] = module.handler
if hasattr(module.handler, "__url__"):
url = module.handler.__url__
elif hasattr(handler, "__xurl__"):
url = handler.__xurl__
elif hasattr(handler, "xurl"):
url = handler.xurl
else:
url = self.get_url_old(name)
self.add_mapping(url, handler)
if hasattr(module, "searchable"):
if not module.searchable:
return
wm = WebModel()
wm.url = url
if hasattr(module, "searchkey"):
wm.searchkey = module.searchkey
if hasattr(module, "name"):
wm.name = module.name
if hasattr(module, "description"):
wm.description = module.description
wm.init()
self.model_list.append(wm)
def load_task(self, module, name):
if not hasattr(module, "task"):
return
task = module.task
if hasattr(task, "taskname"):
taskname = task.taskname
self.task_dict[taskname] = task()
log("Load task (%s,%s)" % (taskname, module.__name__))
def get_mapping(self):
return self.mapping
def add_mapping(self, url, handler):
self.mapping.append(url)
self.mapping.append(do_wrap_handler(url, handler))
if self.report_loading:
log("Load mapping (%s, %s)" % (url, handler))
def run_task(self):
self.task_manager.do_run_task()
def load_tasks(self):
self.task_manager.do_load_tasks()
def get_task_list(self):
return self.task_manager.get_task_list()
class CronTaskManager:
"""定时任务管理器,模拟crontab"""
def __init__(self, app):
self.task_list = []
self.app = app
self.thread_started = False
def _match(self, current, pattern):
if pattern == "mod5":
return current % 5 == 0
return str(current) == pattern or pattern == "*" or pattern == "no-repeat"
def match(self, task, tm=None):
"""是否符合运行条件"""
if tm is None:
tm = time.localtime()
if self._match(tm.tm_wday+1, task.tm_wday) \
and self._match(tm.tm_hour, task.tm_hour) \
and self._match(tm.tm_min, task.tm_min):
return True
return False
def do_run_task(self):
"""执行定时任务"""
def request_url(task):
url = task.url
if url is None: url = ""
quoted_url = xutils.quote_unicode(url)
if quoted_url.startswith(("http://", "https://")):
# 处理外部HTTP请求
response = xutils.urlopen(quoted_url).read()
xutils.log("Request %r success" % quoted_url)
return response
elif url.startswith("script://"):
name = url[len("script://"):]
return xutils.exec_script(name, False)
cookie = xauth.get_user_cookie("admin")
url = url + "?content=" + xutils.quote_unicode(str(task.message))
return self.app.request(url, headers=dict(COOKIE=cookie))
def check_and_run(task, tm):
if self.match(task, tm):
put_task_async(request_url, task)
try:
xutils.trace("RunTask", task.url)
if task.tm_wday == "no-repeat":
# 一次性任务直接删除
dbutil.delete(task.id)
self.load_tasks()
except Exception as e:
xutils.log("run task [%s] failed, %s" % (task.url, e))
def fire_cron_events(tm):
fire("cron.minute", tm)
if tm.tm_min == 0:
fire("cron.hour", tm)
def run():
while True:
# 获取时间信息
tm = time.localtime()
# 定时任务
for task in self.task_list:
check_and_run(task, tm)
# cron.* 事件
put_task_async(fire_cron_events, tm)
tm = time.localtime()
# 等待下一个分钟
sleep_sec = 60 - tm.tm_sec % 60
if sleep_sec > 0:
time.sleep(sleep_sec)
self.do_load_tasks()
if not self.thread_started:
# 任务队列处理线程,开启两个线程
WorkerThread("WorkerThread-1").start()
WorkerThread("WorkerThread-2").start()
# 定时任务调度线程
CronTaskThread(run).start()
self.thread_started = True
def add_task(self, url, interval):
if self._add_task(url, interval):
self.save_tasks()
def del_task(self, url):
self.load_tasks()
def _add_task(self, task):
url = task.url
try:
self.task_list.append(task)
return True
except Exception as e:
print("Add task %s failed, %s" % (url, e))
return False
def do_load_tasks(self):
tasks = dbutil.prefix_list("schedule")
self.task_list = list(tasks)
# 系统默认的任务
backup_task = xutils.Storage(name="[系统]备份", url="/system/backup",
tm_wday = "*", tm_hour="11", tm_min="0",
message = "", sound=0, webpage=0, id=None)
clean_task = xutils.Storage(name = "[系统]磁盘清理", url="/cron/diskclean",
tm_wday = "*", tm_hour="*", tm_min="0",
message = "", sound=0, webpage=0, id=None)
stats_task = xutils.Storage(name = "[系统]数据统计", url = "/cron/stats",
tm_wday = "*", tm_hour="10", tm_min="0",
message = "", sound=0, webpage=0, id=None)
msg_refresh_task = xutils.Storage(name = "[系统]随手记后台刷新信息", url = "/message/refresh",
tm_wday = "*", tm_hour="*", tm_min="29",
message = "", sound=0, webpage=0, id=None)
self.task_list.append(backup_task)
self.task_list.append(clean_task)
self.task_list.append(stats_task)
self.task_list.append(msg_refresh_task)
def save_tasks(self):
self.load_tasks()
def get_task_list(self):
return copy.deepcopy(self.task_list)
class CronTaskThread(Thread):
"""检查定时任务触发条件线程"""
def __init__(self, func, *args):
super(CronTaskThread, self).__init__(name="CronTaskDispatcher")
# 守护线程,防止卡死
self.setDaemon(True)
self.func = func
self.args = args
def run(self):
self.func(*self.args)
class WorkerThread(Thread):
"""执行任务队列的线程,内部有一个队列,所有线程共享
"""
# deque是线程安全的
_task_queue = deque()
def __init__(self, name="WorkerThread"):
super(WorkerThread, self).__init__()
self.setDaemon(True)
self.setName(name)
def run(self):
while True:
# queue.Queue默认是block模式
# 但是deque没有block模式,popleft可能抛出IndexError异常
try:
if self._task_queue:
func, args, kw = self._task_queue.popleft()
func(*args, **kw)
else:
time.sleep(0.01)
except Exception as e:
xutils.print_exc()
class EventHandler:
"""事件处理器"""
def __init__(self, event_type, func, is_async = True, description = ''):
self.event_type = event_type
self.key = None
self.func = func
self.is_async = is_async
self.description = description
self.profile = True
func_name = get_func_abs_name(func)
if self.description:
self.key = "%s:%s" % (func_name, self.description)
else:
self.key = func_name
def execute(self, ctx=None):
if self.is_async:
put_task(self.func, ctx)
else:
try:
if self.profile:
start = time.time()
self.func(ctx)
if self.profile:
stop = time.time()
xutils.trace("EventHandler", self.key, int((stop-start)*1000))
except:
xutils.print_exc()
def __eq__(self, other):
if self.key is not None:
return self.key == other.key
return type(self) == type(other) and self.func == other.func
def __str__(self):
if self.is_async:
return "<EventHandler %s async>" % self.key
return "<EventHandler %s>" % self.key
class SearchHandler(EventHandler):
def execute(self, ctx=None):
try:
matched = self.pattern.match(ctx.key)
if not matched:
return
start = time.time()
ctx.groups = matched.groups()
self.func(ctx)
stop = time.time()
xutils.trace("SearchHandler", self.key, int((stop-start)*1000))
except:
xutils.print_exc()
def __str__(self):
pattern = u(self.pattern.pattern)
return "<SearchHandler /%s/ %s>" % (pattern, self.key)
def get_func_abs_name(func):
module = inspect.getmodule(func)
if module is not None:
return module.__name__ + "." + func.__name__
else:
# print(dir(func))
# print(func.__qualname__)
func_globals = func.__globals__
script_name = func_globals.get("script_name", "unknown")
script_name = xutils.unquote(script_name)
return script_name + "." + func.__name__
# inspect.getfile(func)
# return "<string>." + func.__name__
class EventManager:
"""事件管理器,每个事件由一个执行器链组成,执行器之间有一定的依赖性
@since 2018/01/10
"""
_handlers = dict()
def add_handler(self, handler):
"""
注册事件处理器
事件处理器的去重,通过判断是不是同一个函数,不通过函数名,如果修改初始化脚本需要执行【重新加载模块】功能
"""
event_type = handler.event_type
handlers = self._handlers.get(event_type, [])
if handler in handlers:
warn("handler %s is already registered" % handler)
return
# XXX 使用str(handler)在Python2.7环境下报错
xutils.trace("EventRegister", "%s" % handler)
handlers.append(handler)
self._handlers[event_type] = handlers
def fire(self, event_type, ctx=None):
handlers = self._handlers.get(event_type, [])
for handler in handlers:
handler.execute(ctx)
def remove_handlers(self, event_type = None):
"""移除事件处理器"""
if event_type is None:
self._handlers = dict()
else:
self._handlers[event_type] = []
@xutils.log_init_deco("xmanager.init")
def init(app, vars, last_mapping = None):
global _manager
global _event_manager
_event_manager = EventManager()
_manager = HandlerManager(app, vars, last_mapping = last_mapping)
# 初始化
reload()
# 启动任务
_manager.run_task()
return _manager
def instance():
global _manager
return _manager
@xutils.log_init_deco("xmanager.reload")
def reload():
_event_manager.remove_handlers()
xauth.refresh_users()
# 重载处理器
_manager.reload()
# 重新加载定时任务
_manager.load_tasks()
cacheutil.clear_temp()
load_init_script()
fire("sys.reload")
def load_init_script():
if xconfig.INIT_SCRIPT is not None and os.path.exists(xconfig.INIT_SCRIPT):
try:
xutils.exec_script(xconfig.INIT_SCRIPT)
except:
xutils.print_exc()
print("Failed to execute script %s" % xconfig.INIT_SCRIPT)
def put_task(func, *args, **kw):
"""添加异步任务到队列,如果队列满了会自动降级成同步执行"""
if len(WorkerThread._task_queue) > TASK_POOL_SIZE:
# TODO 大部分是写日志的任务,日志任务单独加一个线程处理
func_name = get_func_abs_name(func)
xutils.warn_sync("xmanager", "task deque is full func_name=%s" % func_name)
try:
func(*args, **kw)
except Exception:
xutils.print_exc()
else:
put_task_async(func, *args, **kw)
def put_task_async(func, *args, **kw):
"""添加异步任务到队列"""
WorkerThread._task_queue.append([func, args, kw])
def load_tasks():
_manager.load_tasks()
def get_task_list():
return _manager.get_task_list()
def request(*args, **kw):
global _manager
# request参数如下
# localpart='/', method='GET', data=None, host="0.0.0.0:8080", headers=None, https=False, **kw
return _manager.app.request(*args, **kw)
def add_event_handler(handler):
_event_manager.add_handler(handler)
def remove_event_handlers(event_type=None):
_event_manager.remove_handlers(event_type)
def set_event_handlers0(event_type, handlers, is_async=True):
_event_manager.remove_handlers(event_type)
for handler in handlers:
_event_manager.add_handler(event_type, handler, is_async)
def fire(event_type, ctx=None):
"""发布一个事件"""
_event_manager.fire(event_type, ctx)
def listen(event_type_list, is_async = True, description = None):
"""事件监听器注解"""
def deco(func):
global _event_manager
if isinstance(event_type_list, list):
for event_type in event_type_list:
handler = EventHandler(event_type, func,
is_async = is_async,
description = description)
_event_manager.add_handler(handler)
else:
event_type = event_type_list
handler = EventHandler(event_type, func,
is_async = is_async,
description = description)
_event_manager.add_handler(handler)
return func
return deco
def searchable(pattern = r".*", description = None, event_type = "search"):
"""搜索装饰器"""
def deco(func):
handler = SearchHandler(event_type, func, description = description)
# unicode_pat = r"^%s\Z" % u(pattern)
unicode_pat = u(pattern)
handler.pattern = re.compile(unicode_pat)
_event_manager.add_handler(handler)
return func
return deco
def find_plugins(category, orderby=None):
return xutils.call("plugin.find_plugins", category, orderby=orderby)
def add_visit_log(user_name, url):
return xutils.call("plugin.add_visit_log", user_name, url)
| gpl-3.0 | -8,350,629,747,782,482,000 | 29.256188 | 108 | 0.54203 | false |
adjustive/dapp | dproject/settings.py | 1 | 3656 | """
Django settings for dproject project.
Generated by 'django-admin startproject' using Django 1.9.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
UPLOADED_DIR = BASE_DIR + '/uploads/uploaded/files/'
#UPLOADED_DIR = BASE_DIR + '/uploads/uploaded/input/'
#UPLOADED_OUTPUT_DIR = BASE_DIR + '/uploads/uploaded/input/'
CONFIG_TEMP = BASE_DIR + '/uploads/uploaded/config_temp/'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1()u&f)^aq!1t1(kp%k+6%yq$_u%4_wmo-f&a6d)rq(ddubcg_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'dapp'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'dproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [''],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
os.path.join(BASE_DIR, "assets"),
os.path.join(BASE_DIR, "uploads"),
os.path.join(BASE_DIR, "result")
]
STATIC_ROOT = ""
#STATIC_ROOT = "/home/vivek/code/dapp/static/"
| gpl-3.0 | -6,799,277,370,082,388,000 | 25.492754 | 91 | 0.683807 | false |
christopher-wahl/sdss_qso_python | analysis/slope_fit.py | 1 | 4730 | from typing import Callable, Union
from numpy import diag, sqrt
from scipy.optimize import curve_fit
from spectrum import Iterable, Spectrum, Tuple
def __linear_func( x: float, m: float, b: float ) -> float:
return m * x + b
def __quad_func( x: float, a: float, b: float, c: float ) -> float:
return a * (x ** 2) + b * x + c
def __log10_fit( x: float, a: float, b: float ) -> float:
from numpy import log10
return a * log10( x ) + b
def generic_fit( fit_function: Callable, x_data: Iterable[ float ], y_data: Iterable[ float ],
get_uncertainty: bool = False ) -> tuple:
"""
Attempts a fit to whatever callable fit_function is passed. The function must be of the form f( x, ... ). Returned
tuple will be in the order of constants in the method header after the first, x, value.
If get_uncertainty is passed, returns a tuple of ( fit values, uncertainty of fit values )
:param fit_function:
:type fit_function: Callable
:param x_data:
:type x_data: Iterable
:param y_data:
:type y_data: Iterable
:param get_uncertainty:
:type get_uncertainty: bool
:return:
:rtype: tuple
"""
coeff, pcov = curve_fit( fit_function, x_data, y_data )
uncert = sqrt( diag( pcov ) )
return tuple( coeff ) if not get_uncertainty else (tuple( coeff ), tuple( uncert ))
def generic_linear_fit( x_data: Iterable[ float ], y_data: Iterable[ float ], get_uncertainty: bool = False ) -> Union[
Tuple[ float, float ], Tuple[ Tuple[ float, float ], Tuple[ float, float ] ] ]:
"""
Performs a generic linear fit to x and y data.
returns a tuple of ( m, b )
If get_uncertainty is True, will return the uncertainties of the fit values as well. The returned value will be
( ( m, b ), ( uncertainty of m, uncertainty of b ) )
:param x_data:
:type x_data: Iterable
:param y_data:
:type y_data: Iterable
:param get_uncertainty:
:type get_uncertainty: bool
:return:
:rtype: tuple
"""
return generic_fit( __linear_func, x_data, y_data, get_uncertainty )
def generic_log10_fit( x_data: Iterable[ float ], y_data: Iterable[ float ], get_uncertainty: bool = False ) -> Union[
Tuple[ float, float ], Tuple[ Tuple[ float, float ], Tuple[ float, float ] ] ]:
"""
Performs a generic log10 fit to x and y data. for the form a * log10( x ) + b
returns a tuple of ( a, b )
If get_uncertainty is True, will return the uncertainties of the fit values as well. The returned value will be
( ( a, b ), ( uncertainty of a, uncertainty of b ) )
:param x_data:
:type x_data: Iterable
:param y_data:
:type y_data: Iterable
:param get_uncertainty:
:type get_uncertainty: bool
:return:
:rtype: tuple
"""
return generic_fit( __log10_fit, x_data, y_data, get_uncertainty )
def generic_quad_fit( x_data: Iterable[ float ], y_data: Iterable[ float ], get_uncertainty: bool = False ) -> Union[
Tuple[ float, float, float ], Tuple[ Tuple[ float, float, float ], Tuple[ float, float, float ] ] ]:
"""
Performs a generic quadaratic fit to x and y data. Returns a tuple of ( a, b, c ) for ax^2 + bx + c
If get_uncertainty is True, will return the uncertainties of the fit values as well. The returned value will be
( ( a, b, c ), ( uncertainty of a, uncertainty of b, uncertainty of c ) )
:param x_data:
:type x_data: Iterable
:param y_data:
:type y_data: Iterable
:param get_uncertainty:
:type get_uncertainty: bool
:return:
:rtype: tuple
"""
return generic_fit( __quad_func, x_data, y_data, get_uncertainty )
def spectrum_linear_fit( spec: Spectrum, wl_low: float = None, wl_high: float = None ) -> Tuple[ float, float ]:
"""
Applies a linear fit to a Specturm over the specified wavelength range. If no wl_ values are passed,
the entirely of the spectrum range is used.
Returns a tuple of ( m, b ) for:
Flux Density = m * Wavelength + b
:param spec: Spectrum to slope fit
:type spec: Spectrum
:param wl_low: Low limit of wavelength range. Defaults to None
:type wl_low: float
:param wl_high: Upper limit of wavelength range. Defaults to None
:type wl_high: float
:return: ( m, b )
:rtype: tuple
"""
wls = spec.getWavelengths()
if wl_low is not None:
wls = filter( lambda wl: wl >= wl_low, wls )
if wl_high is not None:
wls = filter( lambda wl: wl <= wl_high, wls )
fluxdata = [ spec.getFlux( wl ) for wl in wls ] # Can't use .getFluxlist here in clase wavelength limits used
return generic_linear_fit( wls, fluxdata )
| mit | -2,830,930,559,680,619,500 | 34.298507 | 120 | 0.628753 | false |
D-K-E/PySesh | pysesh/core/modules/TextOperations/MDCParser/lex/mdctext_sign.py | 1 | 1076 | ##################################
# lex taken from the jsesh
# ################################
# original author: Serge Rosmorduc
# python author: Kaan Eraslan
# license: GPL-3, see LICENSE
# No Warranty
#
# Note: Explanations in the docstrings are for the most part taken
# from java source files
###################################
class MDCSign(object):
"Sign object for mdc text"
def __init__(self, sign_type: int, sign_string: str):
"Constructor"
self.sign_type = sign_type
self.sign_string = sign_string
#
#
def getType(self) -> int:
"Get sign type for the instance"
return self.sign_type
#
def setType(self, s_type: int) -> None:
"Set sign type for the instance"
self.sign_type = s_type
return None
#
def getSignString(self) -> str:
"Get sign string from the instance"
return self.sign_type
#
def setSignString(self, s_string: str) -> None:
"Set sign to the instance"
self.sign_string = s_string
return None
#
| gpl-3.0 | -3,139,373,562,718,866,000 | 27.315789 | 66 | 0.552045 | false |
alok108/doghustle | doghustle/auth/forms.py | 1 | 1676 | from django.contrib.auth.forms import UserCreationForm,AuthenticationForm
from django import forms
from django.contrib.auth.models import User
class SimpleUserCreation(UserCreationForm):
email = forms.EmailField(required=True, widget=forms.widgets.TextInput(attrs={'class':'form-control','placeholder': 'Email Address'}))
full_name = forms.CharField(required=True, widget=forms.widgets.TextInput(attrs={'class':'form-control','placeholder': 'Full Name'}))
username = forms.CharField(required=False, widget=forms.widgets.HiddenInput())
first_name = forms.CharField(required=False, widget=forms.widgets.HiddenInput())
last_name = forms.CharField(required=False, widget=forms.widgets.HiddenInput())
password1 = forms.CharField(widget=forms.widgets.PasswordInput(attrs={'class':'form-control','placeholder': 'Password'}))
password2 = forms.CharField(widget=forms.widgets.PasswordInput(attrs={'class':'form-control','placeholder': 'Password Confirmation'}))
class Meta:
model = User
fields = ["full_name","email","password1","password2"]
class SimpleAuthForm(AuthenticationForm):
email = forms.EmailField(widget=forms.widgets.TextInput(attrs={'class':'form-control','placeholder': 'Email Address'}))
username = forms.CharField(required=False, widget=forms.widgets.HiddenInput())
password = forms.CharField(widget=forms.widgets.PasswordInput(attrs={'class':'form-control','placeholder': 'Password'}))
class Meta:
model = User
fields = ["email","username","password"]
def __init__(self, *args, **kwargs):
super(SimpleAuthForm, self).__init__(*args, **kwargs)
self.fields.keyOrder = [
'email',
'username',
'password']
| gpl-2.0 | 3,129,969,944,355,798,500 | 51.375 | 135 | 0.74105 | false |
jggatc/pyjsdl | surface.py | 1 | 12005 | #Pyjsdl - Copyright (C) 2013 James Garnon <https://gatc.ca/>
#Released under the MIT License <https://opensource.org/licenses/MIT>
from pyjsdl.pyjsobj import HTML5Canvas
from pyjsdl.rect import Rect, rectPool
from pyjsdl.color import Color
from __pyjamas__ import JS
import sys
if sys.version_info < (3,):
from pyjsdl.util import _range as range
__docformat__ = 'restructuredtext'
class Surface(HTML5Canvas):
"""
**pyjsdl.Surface**
* Surface.get_size
* Surface.get_width
* Surface.get_height
* Surface.get_rect
* Surface.resize
* Surface.copy
* Surface.subsurface
* Surface.getSubimage
* Surface.blit
* Surface.set_colorkey
* Surface.get_colorkey
* Surface.replace_color
* Surface.get_at
* Surface.set_at
* Surface.fill
* Surface.get_parent
* Surface.get_offset
* Surface.toDataURL
"""
def __init__(self, size, *args, **kwargs):
"""
Return Surface subclassed from a Canvas implementation.
The size argument is the dimension (w,h) of surface.
Module initialization places pyjsdl.Surface in module's namespace.
"""
self.width = int(size[0])
self.height = int(size[1])
HTML5Canvas.__init__(self, self.width, self.height)
HTML5Canvas.resize(self, self.width, self.height)
self._display = None #display surface
self._super_surface = None
self._offset = (0,0)
self._colorkey = None
self._stroke_style = None
self._fill_style = None
self._nonimplemented_methods()
def __str__(self):
s = "<%s(%dx%d)>"
return s % (self.__class__.__name__, self.width, self.height)
def __repr__(self):
return self.__str__()
def get_size(self):
"""
Return width and height of surface.
"""
return (self.width, self.height)
def get_width(self):
"""
Return width of surface.
"""
return self.width
def get_height(self):
"""
Return height of surface.
"""
return self.height
def resize(self, width, height):
"""
Resize surface.
"""
self.width = int(width)
self.height = int(height)
HTML5Canvas.resize(self, self.width, self.height)
def get_rect(self, **attr):
"""
Return rect of the surface.
An optional keyword argument of the rect position.
"""
rect = Rect(0, 0, self.width, self.height)
for key in attr:
setattr(rect, key, attr[key])
return rect
def copy(self):
"""
Return Surface that is a copy of this surface.
"""
surface = Surface((self.width,self.height))
surface.drawImage(self.canvas, 0, 0)
return surface
def subsurface(self, rect):
"""
Return Surface that represents a subsurface.
The rect argument is the area of the subsurface.
Argument can be 't'/'f' for data sync to/from subsurface.
"""
if rect in ('t', 'f'):
if not self._super_surface:
return
if rect == 't':
self.drawImage(self._super_surface.canvas, self._offset[0], self._offset[1], self.width, self.height, 0, 0, self.width, self.height)
else:
self._super_surface.drawImage(self.canvas, self._offset[0], self._offset[1])
return
if hasattr(rect, 'width'):
_rect = rect
else:
_rect = Rect(rect)
surf_rect = self.get_rect()
if not surf_rect.contains(_rect):
raise ValueError('subsurface outside surface area')
surface = self.getSubimage(_rect.x, _rect.y, _rect.width, _rect.height)
surface._super_surface = self
surface._offset = (_rect.x,_rect.y)
surface._colorkey = self._colorkey
return surface
def getSubimage(self, x, y, width, height):
"""
Return subimage of Surface.
Arguments include x, y, width, and height of the subimage.
"""
surface = Surface((width,height))
surface.drawImage(self.canvas, x, y, width, height, 0, 0, width, height)
return surface
def blit(self, surface, position, area=None):
"""
Draw given surface on this surface at position.
Optional area delimitates the region of given surface to draw.
"""
if not area:
rect = rectPool.get(position[0],position[1],surface.width,surface.height)
self.impl.canvasContext.drawImage(surface.canvas, rect.x, rect.y)
else:
rect = rectPool.get(position[0],position[1],area[2], area[3])
self.impl.canvasContext.drawImage(surface.canvas, area[0], area[1], area[2], area[3], rect.x, rect.y, area[2], area[3])
if self._display:
surface_rect = self._display._surface_rect
else:
surface_rect = self.get_rect()
changed_rect = surface_rect.clip(rect)
rectPool.append(rect)
return changed_rect
def _blits(self, surfaces):
ctx = self.impl.canvasContext
for surface, rect in surfaces:
ctx.drawImage(surface.canvas, rect.x, rect.y)
def _blit_clear(self, surface, rect_list):
ctx = self.impl.canvasContext
for r in rect_list:
ctx.drawImage(surface.canvas, r.x,r.y,r.width,r.height, r.x,r.y,r.width,r.height)
def set_colorkey(self, color, flags=None):
"""
Set surface colorkey.
"""
if self._colorkey:
self.replace_color((0,0,0,0),self._colorkey)
self._colorkey = None
if color:
self._colorkey = Color(color)
self.replace_color(self._colorkey)
return None
def get_colorkey(self):
"""
Return surface colorkey.
"""
if self._colorkey:
return ( self._colorkey.r,
self._colorkey.g,
self._colorkey.b,
self._colorkey.a )
else:
return None
def _getPixel(self, imagedata, index):
return JS("imagedata.data[@{{index}}];")
def _setPixel(self, imagedata, index, dat):
data = str(dat)
JS("imagedata.data[@{{index}}]=@{{data}};")
return
def replace_color(self, color, new_color=None):
"""
Replace color with with new_color or with alpha.
"""
pixels = self.impl.getImageData(0,0,self.width,self.height)
if hasattr(color, 'a'):
color1 = color
else:
color1 = Color(color)
if new_color is None:
alpha_zero = True
else:
if hasattr(new_color, 'a'):
color2 = new_color
else:
color2 = Color(new_color)
alpha_zero = False
if alpha_zero:
r1,g1,b1,a1 = color1.r, color1.g, color1.b, color1.a
a2 = 0
for i in range(0, len(pixels.data), 4):
if ( self._getPixel(pixels,i) == r1 and
self._getPixel(pixels,i+1) == g1 and
self._getPixel(pixels,i+2) == b1 and
self._getPixel(pixels,i+3) == a1 ):
self._setPixel(pixels, i+3, a2)
else:
r1,g1,b1,a1 = color1.r, color1.g, color1.b, color1.a
r2,g2,b2,a2 = color2.r, color2.g, color2.b, color2.a
for i in range(0, len(pixels.data), 4):
if ( self._getPixel(pixels,i) == r1 and
self._getPixel(pixels,i+1) == g1 and
self._getPixel(pixels,i+2) == b1 and
self._getPixel(pixels,i+3) == a1 ):
self._setPixel(pixels, i, r2)
self._setPixel(pixels, i+1, g2)
self._setPixel(pixels, i+2, b2)
self._setPixel(pixels, i+3, a2)
self.impl.putImageData(pixels,0,0,0,0,self.width,self.height)
return None
def get_at(self, pos):
"""
Get color of a surface pixel. The pos argument represents x,y position of pixel.
Return color (r,g,b,a) of a surface pixel.
"""
pixel = self.impl.getImageData(pos[0], pos[1], 1, 1)
return Color([self._getPixel(pixel,i) for i in (0,1,2,3)])
def set_at(self, pos, color):
"""
Set color of a surface pixel.
The arguments represent position x,y and color of pixel.
"""
if self._fill_style != color:
self._fill_style = color
if hasattr(color, 'a'):
_color = color
else:
_color = Color(color)
self.setFillStyle(_color)
self.fillRect(pos[0], pos[1], 1, 1)
return None
def fill(self, color=None, rect=None):
"""
Fill surface with color.
"""
if color is None:
HTML5Canvas.fill(self)
return
if color:
if self._fill_style != color:
self._fill_style = color
if hasattr(color, 'a'):
self.setFillStyle(color)
else:
self.setFillStyle(Color(color))
if not rect:
_rect = Rect(0, 0, self.width, self.height)
else:
if self._display:
surface_rect = self._display._surface_rect
else:
surface_rect = self.get_rect()
if hasattr(rect, 'width'):
_rect = surface_rect.clip( rect )
else:
_rect = surface_rect.clip( Rect(rect) )
if not _rect.width or not _rect.height:
return _rect
self.fillRect(_rect.x, _rect.y, _rect.width, _rect.height)
else:
_rect = Rect(0, 0, self.width, self.height)
self.clear()
return _rect
def get_parent(self):
"""
Return parent Surface of subsurface.
"""
return self._super_surface #if delete, delete subsurface...
def get_offset(self):
"""
Return offset of subsurface in surface.
"""
return self._offset
def toDataURL(self, datatype=None):
"""
Return surface data as a base64 data string.
Optional datatype to set data format, default to 'image/png'.
Implemented with HTML5 Canvas toDataURL method.
"""
if not datatype:
return self.canvas.toDataURL()
else:
return self.canvas.toDataURL(datatype)
def _nonimplemented_methods(self):
self.convert = lambda *arg: self
self.convert_alpha = lambda *arg: self
self.set_alpha = lambda *arg: None
self.get_alpha = lambda *arg: None
self.lock = lambda *arg: None
self.unlock = lambda *arg: None
self.mustlock = lambda *arg: False
self.get_locked = lambda *arg: False
self.get_locks = lambda *arg: ()
class Surf(object):
def __init__(self, image):
self.canvas = image
self.width, self.height = self.canvas.width, self.canvas.height
self._nonimplemented_methods()
def get_size(self):
return (self.width, self.height)
def get_width(self):
return self.width
def get_height(self):
return self.height
def _nonimplemented_methods(self):
self.convert = lambda *arg: self
self.convert_alpha = lambda *arg: self
self.set_alpha = lambda *arg: None
self.get_alpha = lambda *arg: None
self.lock = lambda *arg: None
self.unlock = lambda *arg: None
self.mustlock = lambda *arg: False
self.get_locked = lambda *arg: False
self.get_locks = lambda *arg: ()
class IndexSizeError(Exception):
pass
| mit | -6,440,782,024,990,421,000 | 31.622283 | 148 | 0.541608 | false |
chapmanb/metasv | metasv/process_age_alignment.py | 1 | 22283 | from __future__ import print_function
import logging
import multiprocessing
from collections import defaultdict
import pybedtools
from defaults import MIN_INV_SUBALIGN_LENGTH, MIN_DEL_SUBALIGN_LENGTH
logger = logging.getLogger(__name__)
def get_insertion_breakpoints(age_records, intervals, expected_bp_pos, window=20, start=0, dist_to_expected_bp=50):
func_logger = logging.getLogger("%s-%s" % (get_insertion_breakpoints.__name__, multiprocessing.current_process()))
bedtools_intervals = [pybedtools.Interval("1", interval[0], interval[1]) for interval in sorted(intervals)]
func_logger.info("bedtools_intervals %s" % (str(bedtools_intervals)))
if not bedtools_intervals:
return []
potential_breakpoints = sorted(list(set(
[interval.start for interval in bedtools_intervals] + [interval.end for interval in bedtools_intervals])))
breakpoints = []
for breakpoint in potential_breakpoints[1:-1]:
# Check if the breakpoint is within window distance of a validated breakpoint
if min([window + 1] + [abs(b[0] - breakpoint) for b in breakpoints]) <= window:
continue
func_logger.info("\tExamining potential breakpoint %d for support" % breakpoint)
left_support = [interval[0] for interval in intervals if abs(interval[0] - breakpoint) <= window]
right_support = [interval[1] for interval in intervals if abs(interval[1] - breakpoint) <= window]
counter_examples = [age_record for age_record in age_records if age_record.has_long_ref_flanks() and (
age_record.has_ref_deletion(window) or age_record.has_insertion(min_diff=1,
max_diff=49)) and age_record.breakpoint_match(
breakpoint, window)]
if counter_examples:
counter_example_ends = [age_record.start1_end1s for age_record in counter_examples]
func_logger.info("\t\tSkipping breakpoint %d due to %s" % (breakpoint, str(counter_example_ends)))
continue
if left_support:
func_logger.info("\t\tLeft support %s" % (str(left_support)))
if right_support:
func_logger.info("\t\tRight support %s" % (str(right_support)))
if (left_support and right_support) and min(
[window + 1] + [abs(b[0] - breakpoint) for b in breakpoints]) > window:
both_support = [age_record for age_record in age_records if
age_record.has_insertion(min_diff=50, max_diff=1000000000) and age_record.breakpoint_match(
breakpoint, window)]
if both_support:
func_logger.info("\t\tboth_support = %s" % (str(both_support)))
func_logger.info("\t\tinsertion lengths = %s" % (
str([age_record.insertion_length() for age_record in both_support])))
insertion_length = max([0] + [age_record.insertion_length() for age_record in both_support])
func_logger.info("\t\tInsertion length = %d" % insertion_length)
breakpoints.append((breakpoint, insertion_length))
func_logger.info("Nonfiltered breakpoints as %s" % (str(breakpoints)))
if len(breakpoints)>1:
breakpoints=filter(lambda x: min(abs(x[0]-expected_bp_pos[0]),abs(expected_bp_pos[1]-x[0]))<dist_to_expected_bp,breakpoints)
func_logger.info("Gathered breakpoints as %s" % (str(breakpoints)))
return [(start + b[0], b[1]) for b in breakpoints]
def get_deletion_breakpoints(age_records, window=20, min_flank_length=50, start=0):
func_logger = logging.getLogger("%s-%s" % (get_deletion_breakpoints.__name__, multiprocessing.current_process()))
potential_breakpoints = sorted(
[age_record.start1_end1s[0][1] for age_record in age_records] + [age_record.start1_end1s[1][0] for age_record in
age_records])
breakpoints = []
for breakpoint in potential_breakpoints:
left_support = [age_record for age_record in age_records if
abs(age_record.start1_end1s[0][1] - breakpoint) < window]
right_support = [age_record for age_record in age_records if
abs(age_record.start1_end1s[1][0] - breakpoint) < window]
if (left_support or right_support) and min([window + 1] + [abs(b - breakpoint) for b in breakpoints]) >= window:
breakpoints.append(breakpoint)
func_logger.info("Gathered breakpoints as %s" % (str(breakpoints)))
return [start + breakpoint for breakpoint in breakpoints]
def check_closeness_to_bp(pos,pad,dist_to_expected_bp,LR_bp,seq_length=0):
if LR_bp == 'L':
return abs(pos-pad)<dist_to_expected_bp
else:
return abs(pos-(seq_length-pad))<dist_to_expected_bp
def get_inversion_breakpoints(age_records, window=20, min_endpoint_dist=10, start=0, pad=500, dist_to_expected_bp=400, min_inv_subalign_len=MIN_INV_SUBALIGN_LENGTH):
func_logger = logging.getLogger("%s-%s" % (get_deletion_breakpoints.__name__, multiprocessing.current_process()))
potential_breakpoints = []
for age_record in age_records:
polarities=[abs(age_record.polarities1[i]-age_record.polarities2[i]) for i in range(age_record.nfrags)]
good_intervals=[i for i in range(age_record.nfrags) if abs(age_record.start1_end1s[i][1]-age_record.start1_end1s[i][0]) > min_inv_subalign_len and abs(age_record.start2_end2s[i][1]-age_record.start2_end2s[i][0]) > min_inv_subalign_len]
good_intervals=[i for i in good_intervals if abs(age_record.start1_end1s[i][1]-age_record.start1_end1s[i][0]) <= max(age_record.inputs[0].length-2*(pad-dist_to_expected_bp),pad+dist_to_expected_bp)]
func_logger.info('Good intervals: %s'%str(good_intervals))
if len(good_intervals)<2:
func_logger.info('Not enough good interval for this age record: %s'%str(age_record))
continue
candidate_inv_intervals=[]
inv_interval=-1
long_inversion=False
left_end_near_l_bp=filter(lambda x: check_closeness_to_bp(min(age_record.start1_end1s[x]),pad,dist_to_expected_bp,"L"), good_intervals)
right_end_near_r_bp=filter(lambda x: check_closeness_to_bp(max(age_record.start1_end1s[x]),pad,dist_to_expected_bp,"R",age_record.inputs[0].length), good_intervals)
right_end_near_l_bp=filter(lambda x: check_closeness_to_bp(max(age_record.start1_end1s[x]),pad,dist_to_expected_bp,"L"), good_intervals)
left_end_near_r_bp=filter(lambda x: check_closeness_to_bp(min(age_record.start1_end1s[x]),pad,dist_to_expected_bp,"R",age_record.inputs[0].length), good_intervals)
candidate_inv_intervals=list(set(left_end_near_l_bp)&set(right_end_near_r_bp))
candidate_norm_intervals=list(set(left_end_near_r_bp)|set(right_end_near_l_bp))
if len(candidate_inv_intervals)>1 and len(candidate_norm_intervals)<=1:
candidate_inv_intervals=list(set(candidate_inv_intervals)-set(candidate_norm_intervals))
if len(candidate_inv_intervals)>1:
dist_to_exp_bps=map(lambda x: abs(min(age_record.start1_end1s[x])-pad)+abs(max(age_record.start1_end1s[x])-(age_record.inputs[0].length-pad)),candidate_inv_intervals)
inv_interval=min(enumerate(dist_to_exp_bps),key=lambda x:x[1])[0]
elif len(candidate_inv_intervals)==1 :
inv_interval=candidate_inv_intervals[0]
if inv_interval==-1:
#Potentially long inversion
candidate_inv_intervals=[i for i in left_end_near_l_bp if ((set(candidate_norm_intervals)&set(left_end_near_r_bp))-set([i]))] + \
[i for i in right_end_near_r_bp if ((set(candidate_norm_intervals)&set(right_end_near_l_bp))-set([i]))]
if len(candidate_inv_intervals)>1:
candidate_inv_intervals=[i for i in set(candidate_inv_intervals)&set(left_end_near_l_bp) if (pad< (sum(age_record.start1_end1s[i])/2.0))] + \
[i for i in set(candidate_inv_intervals)&set(right_end_near_r_bp) if ((age_record.inputs[0].length-pad) > (sum(age_record.start1_end1s[i])/2.0))]
if candidate_inv_intervals:
func_logger.info('Potentially long-inversion interval: %s'%candidate_inv_intervals)
long_inversion=True
if len(candidate_inv_intervals)>1:
dist_to_exp_bps=map(lambda x: abs(min(age_record.start1_end1s[x])-pad) if i in left_end_near_l_bp else abs(max(age_record.start1_end1s[x])-(age_record.inputs[0].length-pad)),candidate_inv_intervals)
inv_interval=min(enumerate(dist_to_exp_bps),key=lambda x:x[1])[0]
else:
inv_interval=candidate_inv_intervals[0]
elif age_record.inputs[0].length > ((2*pad+min_inv_subalign_len)):
long_inversion=True
if inv_interval==-1:
func_logger.info('Not candidate inversion interval found for this age record: %s'%str(age_record))
continue
func_logger.info('age_record: %s'%str(age_record))
func_logger.info('inverted interval: %s'%str(inv_interval))
candidate_norm_intervals=filter(lambda x: polarities[x]!=polarities[inv_interval], set(candidate_norm_intervals)-set([inv_interval]))
if long_inversion and (inv_interval not in set(left_end_near_l_bp) & set(right_end_near_r_bp)) :
candidate_norm_intervals=list(set(candidate_norm_intervals)&set(left_end_near_r_bp if (inv_interval in left_end_near_l_bp) else right_end_near_l_bp))
if not candidate_norm_intervals:
func_logger.info('Cannot find the normal interval for this age record: %s'%str(age_record))
continue
if len(candidate_norm_intervals)>1:
candidate_norm_intervals=map(lambda x: (x,abs(age_record.start1_end1s[x][0]-age_record.start1_end1s[x][1])),set(candidate_norm_intervals))
norm_interval,norm_length=max(candidate_norm_intervals,key=lambda x:x[2])
else:
norm_interval=candidate_norm_intervals[0]
func_logger.info('norm_interval: %s'%str(norm_interval))
s_inv=sorted(age_record.start1_end1s[inv_interval])
s_norm=sorted(age_record.start1_end1s[norm_interval])
if (s_norm[0]-s_inv[0])*(s_norm[1]-s_inv[1])<=0:
func_logger.info('Bad intervals (one fully covers the other): %s'%str(age_record))
continue
if not long_inversion:
interval=age_record.start2_end2s[inv_interval]
if min([interval[0],abs(interval[0]-age_record.inputs[1].length),
interval[1],abs(interval[1]-age_record.inputs[1].length)]) < min_endpoint_dist:
func_logger.info('Inverted interval end points are too close to borders in Seq2: %s'%str(age_record))
continue
if (((s_norm[1]>s_inv[1]) and ((s_inv[1]-s_norm[0])>10)) or ((s_norm[0]<s_inv[0]) and ((s_norm[1]-s_inv[0])>10))):
func_logger.info('Bad middle bp in seq1 (covers>10): %s'%str(age_record))
continue
if (((s_norm[1]>s_inv[1]) and ((s_norm[0]-s_inv[1])>50)) or ((s_norm[0]<s_inv[0]) and ((s_inv[0]-s_norm[1])>50))):
func_logger.info('Bad middle bp in seq1 (apart>50): %s'%str(age_record))
continue
bp_idx = 0 if (s_norm[1]>s_inv[1]) else 1
bp1=s_inv[bp_idx]
bp2=s_norm[bp_idx]
bp1_seq2=age_record.start2_end2s[inv_interval][filter(lambda x:age_record.start1_end1s[inv_interval][x]==bp1,[0,1])[0]]
bp2_seq2=age_record.start2_end2s[norm_interval][filter(lambda x:age_record.start1_end1s[norm_interval][x]==bp2,[0,1])[0]]
if abs(bp1_seq2-bp2_seq2)>10:
func_logger.info('BPs do not match in seq2: %s'%str(age_record))
continue
potential_breakpoints += [bp1,bp2]
potential_breakpoints=sorted(potential_breakpoints)
breakpoints = []
for breakpoint in potential_breakpoints:
if min([window + 1] + [abs(b - breakpoint) for b in breakpoints]) >= window:
breakpoints.append(breakpoint)
func_logger.info("Gathered breakpoints as %s" % (str(breakpoints)))
return [start + breakpoint for breakpoint in breakpoints]
def get_duplication_breakpoints(age_records, window=20, max_endpoint_dist=10, start=0, pad=500, dist_to_expected_bp=400):
func_logger = logging.getLogger("%s-%s" % (get_deletion_breakpoints.__name__, multiprocessing.current_process()))
potential_breakpoints = []
for age_record in age_records:
left_end_near_l_bp=filter(lambda x: check_closeness_to_bp(min(age_record.start1_end1s[x]),pad,dist_to_expected_bp,"L"), [0,1])
right_end_near_r_bp=filter(lambda x: check_closeness_to_bp(max(age_record.start1_end1s[x]),pad,dist_to_expected_bp,"R",age_record.inputs[0].length), [0,1])
if (not left_end_near_l_bp) or (not right_end_near_r_bp):
func_logger.info('Not close to expected BPs: %s'%str(age_record))
continue
if len(left_end_near_l_bp)==2 and len(right_end_near_r_bp)==1:
left_end_near_l_bp = list(set(left_end_near_l_bp)-set(right_end_near_r_bp))
elif len(left_end_near_l_bp)==1 and len(right_end_near_r_bp)==2:
right_end_near_r_bp = list(set(right_end_near_r_bp)-set(left_end_near_l_bp))
elif len(left_end_near_l_bp)==2 and len(right_end_near_r_bp)==2:
dist_to_exp_l_bp=map(lambda x: abs(min(age_record.start1_end1s[x])-pad),[0,1])
dist_to_exp_r_bp=map(lambda x: abs(max(age_record.start1_end1s[x])-(age_record.inputs[0].length-pad)),[0,1])
left_end_near_l_bp, right_end_near_r_bp = [[0],[1]] if (dist_to_exp_l_bp[0]+dist_to_exp_r_bp[1]) < (dist_to_exp_l_bp[1]+dist_to_exp_r_bp[0]) else [[1],[0]]
l_interval = left_end_near_l_bp[0]
r_interval = right_end_near_r_bp[0]
bp_idx_l = 0 if age_record.start1_end1s[l_interval][0]<age_record.start1_end1s[l_interval][1] else 1
bp_idx_r = 1 if age_record.start1_end1s[r_interval][0]<age_record.start1_end1s[r_interval][1] else 0
if abs(age_record.start2_end2s[l_interval][bp_idx_l]-age_record.start2_end2s[r_interval][bp_idx_r]) > 10:
func_logger.info('BPs do not match in seq2: %s'%str(age_record))
continue
end_l_seq2 = age_record.start2_end2s[l_interval][1-bp_idx_l]
end_r_seq2 = age_record.start2_end2s[r_interval][1-bp_idx_r]
if max(min(end_r_seq2,end_l_seq2),
min(end_l_seq2-age_record.inputs[1].length,
end_r_seq2-age_record.inputs[1].length)) > max_endpoint_dist:
func_logger.info('End points are too close to borders in Seq2: %s'%str(age_record))
continue
potential_breakpoints += [age_record.start1_end1s[l_interval][bp_idx_l],age_record.start1_end1s[r_interval][bp_idx_r]]
potential_breakpoints=sorted(potential_breakpoints)
breakpoints = []
for breakpoint in potential_breakpoints:
if min([window + 1] + [abs(b - breakpoint) for b in breakpoints]) >= window:
breakpoints.append(breakpoint)
func_logger.info("Gathered breakpoints as %s" % (str(breakpoints)))
return [start + breakpoint for breakpoint in breakpoints]
def get_reference_intervals(age_records, start=0, min_interval_len=100):
intervals = []
for age_record in age_records:
intervals += map(lambda x: (min(x) + start - 1, max(x) + start - 1),
[interval for interval in age_record.start1_end1s if
abs(interval[0] - interval[1]) >= min_interval_len])
return intervals
def process_age_records(age_records, sv_type="INS", ins_min_unaligned=10, min_interval_len=200, pad=500,
min_deletion_len=30, min_del_subalign_len=MIN_DEL_SUBALIGN_LENGTH,
min_inv_subalign_len=MIN_INV_SUBALIGN_LENGTH, dist_to_expected_bp=400):
func_logger = logging.getLogger("%s-%s" % (process_age_records.__name__, multiprocessing.current_process()))
good_age_records = age_records
if sv_type == "INS":
good_age_records = [age_record for age_record in good_age_records if
not age_record.almost_all_bases_aligned(ins_min_unaligned)]
good_age_records = [age_record for age_record in good_age_records if not age_record.is_reference()]
elif sv_type == "DEL":
good_age_records = [age_record for age_record in good_age_records if
len(age_record.start1_end1s) == 2 and min(age_record.ref_flanking_regions) >= min_del_subalign_len]
good_age_records = [age_record for age_record in good_age_records if
abs(age_record.start1_end1s[0][1] - age_record.start1_end1s[1][0]) >= min_deletion_len]
good_age_records = [age_record for age_record in good_age_records if
float(age_record.score) / sum(age_record.ref_flanking_regions) >= 0.7]
good_age_records = [age_record for age_record in good_age_records if
abs(age_record.start2_end2s[0][1] - age_record.start2_end2s[1][0]) <= 50]
good_age_records = [age_record for age_record in good_age_records if
check_closeness_to_bp(min(age_record.start1_end1s[0][1],
age_record.start1_end1s[1][0]),
pad,dist_to_expected_bp,"L") and
check_closeness_to_bp(max(age_record.start1_end1s[0][1],
age_record.start1_end1s[1][0]),
pad,dist_to_expected_bp,"R",
age_record.inputs[0].length)]
elif sv_type == "INV":
good_age_records = [age_record for age_record in good_age_records if
len(age_record.start1_end1s) >= 2 and min(map(lambda x:abs(x[1]-x[0]),age_record.start1_end1s)) >= min_inv_subalign_len]
elif sv_type == "DUP":
good_age_records = [age_record for age_record in good_age_records if
len(age_record.start1_end1s) == 2 and min(age_record.ref_flanking_regions) >= 100]
else:
pass
# Add some features to an info dict
info = defaultdict(int)
info["BA_NUM_GOOD_REC"] = len(good_age_records)
if not good_age_records:
func_logger.warning("No good records found for getting breakpoints")
return [], dict(info)
for rec in good_age_records:
info["BA_FLANK_PERCENT"] = int(max(info["BA_FLANK_PERCENT"], rec.flank_percent))
info["BA_NFRAGS"] = int(max(info["BA_NFRAGS"], rec.nfrags))
info["BA_NUM_ALT"] = int(max(info["BA_NUM_ALT"], rec.n_alt))
info["BA_PERCENT_MATCH"] = int(max(info["BA_PERCENT_MATCH"], rec.percent))
func_logger.info("Found %d good records for getting breakpoints" % (len(good_age_records)))
func_logger.info("Good records")
for age_record in good_age_records:
func_logger.info(str(age_record))
sv_region = good_age_records[0].contig.sv_region
if sv_type == "DEL":
breakpoints = get_deletion_breakpoints(good_age_records, start=sv_region.pos1 - pad)
elif sv_type == "INS":
reference_intervals = get_reference_intervals(good_age_records, start=1, min_interval_len=min_interval_len)
func_logger.info("Gathered reference intervals as %s" % (str(reference_intervals)))
breakpoints = get_insertion_breakpoints(good_age_records, reference_intervals, expected_bp_pos=[2*pad,max((sv_region.pos2-sv_region.pos1),0)] ,start=sv_region.pos1 - pad)
elif sv_type == "INV":
breakpoints = get_inversion_breakpoints(good_age_records, start=sv_region.pos1 - pad ,pad=pad, min_inv_subalign_len=min_inv_subalign_len, dist_to_expected_bp=dist_to_expected_bp)
elif sv_type == "DUP":
breakpoints = get_duplication_breakpoints(good_age_records, start=sv_region.pos1 - pad ,pad=pad, dist_to_expected_bp=dist_to_expected_bp)
else:
return [], dict(info)
func_logger.info("Detected breakpoints as %s" % (str(breakpoints)))
# Add a few more features related to the breakpoints computed
info["BA_NUM_BP"] = len(breakpoints)
if sv_type == "DEL":
if len(breakpoints) == 2:
func_logger.info("True deletion interval %s" % (str(breakpoints)))
else:
func_logger.info("False deletion interval %s" % (str(breakpoints)))
return [], dict(info)
elif sv_type == "INS":
if len(breakpoints) == 1:
if sv_region.pos2 - sv_region.pos1 <= 20:
info["BA_BP_SCORE"] = abs(breakpoints[0][0] - sv_region.pos1)
if abs(breakpoints[0][0] - sv_region.pos1) > 20:
return [], dict(info)
else:
diff1 = breakpoints[0][0] - sv_region.pos1
diff2 = sv_region.pos2 - breakpoints[0][0]
info["BA_BP_SCORE"] = min(abs(diff1 - pad), abs(diff2 - pad))
if not (pad - 25 <= diff1 <= pad + 25 or pad - 25 <= diff2 <= pad + 25):
return [], dict(info)
func_logger.info("True insertion interval %s" % (str(breakpoints)))
else:
return [], dict(info)
elif sv_type == "INV":
if len(breakpoints) == 2:
func_logger.info("True inversion interval %s" % (str(breakpoints)))
else:
func_logger.info("False inversion interval %s" % (str(breakpoints)))
return [], dict(info)
elif sv_type == "DUP":
if len(breakpoints) == 2:
func_logger.info("True duplication interval %s" % (str(breakpoints)))
else:
func_logger.info("False duplication interval %s" % (str(breakpoints)))
return [], dict(info)
return breakpoints, dict(info)
| bsd-2-clause | 6,027,422,886,592,959,000 | 57.639474 | 243 | 0.612709 | false |
EnviDat/ckanext-oaipmh_repository | ckanext/oaipmh_repository/oaipmh_error.py | 1 | 3392 | class OAIPMHError(Exception):
def __init__(self, code, message):
self.code = code
self.message = message
def set_message(self, message):
self.message = message
def as_xml_dict(self):
return {'error': {'@code': self.code, '#text': self.message}}
def __repr__(self):
return str(self)
def __str__(self):
return str(self).encode('utf-8')
def __unicode__(self):
return u'OAIPMHError({0}): \'{1}\' '.format(self.code, self.message)
# badArgument (all verbs)
class BadArgumentError(OAIPMHError):
def __init__(self, message=''):
if not message:
message = '''The request includes illegal arguments, is missing required
arguments, includes a repeated argument, or values for arguments
have an illegal syntax.'''
OAIPMHError.__init__(self, 'badArgument', message)
# badVerb (N/A).
class BadVerbError(OAIPMHError):
def __init__(self, message=''):
if not message:
message = '''Value of the verb argument is not a legal OAI-PMH verb, the
verb argument is missing, or the verb argument is repeated.'''
OAIPMHError.__init__(self, 'badVerb', message)
# badResumptionToken (ListIdentifiers, ListRecords, ListSets)
class BadResumptionTokenError(OAIPMHError):
def __init__(self, message=''):
if not message:
message = '''The value of the resumptionToken argument is invalid or expired.'''
OAIPMHError.__init__(self, 'badResumptionToken', message)
# cannotDisseminateFormat (GetRecord, ListIdentifiers, ListRecords)
class CannotDisseminateFormatError(OAIPMHError):
def __init__(self, message=''):
if not message:
message = '''The metadata format identified by the value given for the
metadataPrefix argument is not supported by the item or by
the repository.'''
OAIPMHError.__init__(self, 'cannotDisseminateFormat', message)
# idDoesNotExist (GetRecordList, MetadataFormats)
class IdDoesNotExistError(OAIPMHError):
def __init__(self, message=''):
if not message:
message = '''The value of the identifier argument is unknown or illegal
in this repository.'''
OAIPMHError.__init__(self, 'idDoesNotExist', message)
# noRecordsMatch (ListIdentifiers, ListRecords)
class NoRecordsMatchError(OAIPMHError):
def __init__(self, message=''):
if not message:
message = '''The combination of the values of the from, until, set and
metadataPrefix arguments results in an empty list.'''
OAIPMHError.__init__(self, 'noRecordsMatch', message)
# noMetadataFormats (ListMetadataFormats)
class NoMetadataFormatsError(OAIPMHError):
def __init__(self, message=''):
if not message:
message = '''There are no metadata formats available for the specified item.'''
OAIPMHError.__init__(self, 'noMetadataFormats', message)
# noSetHierarchy (ListSets, ListIdentifiers, ListRecords)
class NoSetHierarchyError(OAIPMHError):
def __init__(self, message=''):
if not message:
message = '''The repository does not support sets.'''
OAIPMHError.__init__(self, 'noSetHierarchy', message)
| agpl-3.0 | -6,533,082,040,135,875,000 | 36.688889 | 92 | 0.625 | false |
LI3DS/lids-api | api_li3ds/apis/sensor.py | 1 | 2589 | # -*- coding: utf-8 -*-
from flask_restplus import fields
from api_li3ds.app import api, Resource, defaultpayload
from api_li3ds.database import Database
nssensor = api.namespace('sensors', description='sensors related operations')
sensor_model_post = nssensor.model(
'Sensor Model Post',
{
'name': fields.String,
'serial_number': fields.String,
'brand': fields.String,
'model': fields.String,
'description': fields.String,
'type': fields.String(required=True),
'specifications': fields.Raw,
})
sensor_model = nssensor.inherit(
'Sensor Model',
sensor_model_post,
{
'id': fields.Integer
})
@nssensor.route('/', endpoint='sensors')
class Sensors(Resource):
@nssensor.marshal_with(sensor_model)
def get(self):
'''List sensors'''
return Database.query_asjson("select * from li3ds.sensor")
@api.secure
@nssensor.expect(sensor_model_post)
@nssensor.marshal_with(sensor_model)
@nssensor.response(201, 'Sensor created')
def post(self):
'''Create a sensor'''
return Database.query_asdict(
"""
insert into li3ds.sensor (name, serial_number, brand,
model, description, specifications, type)
values (%(name)s, %(serial_number)s, %(brand)s,
%(model)s, %(description)s, %(specifications)s, %(type)s)
returning *
""",
defaultpayload(api.payload)
), 201
@nssensor.route('/<int:id>/', endpoint='sensor')
@nssensor.response(404, 'Sensor not found')
class OneSensor(Resource):
@nssensor.marshal_with(sensor_model)
def get(self, id):
'''Get one sensor given its identifier'''
res = Database.query_asjson(
"select * from li3ds.sensor where id=%s", (id,)
)
if not res:
nssensor.abort(404, 'sensor not found')
return res
@api.secure
@nssensor.response(410, 'Sensor deleted')
def delete(self, id):
'''Delete a sensor given its identifier'''
res = Database.rowcount("delete from li3ds.sensor where id=%s", (id,))
if not res:
nssensor.abort(404, 'Sensor not found')
return '', 410
@nssensor.route('/types/', endpoint='sensor_types')
class Sensor_types(Resource):
def get(self):
'''Sensor type list'''
return Database.query_aslist(
'''select unnest(enum_range(enum_first(null::li3ds.sensor_type),
null::li3ds.sensor_type))'''
)
| gpl-3.0 | 7,488,057,765,695,068,000 | 27.766667 | 79 | 0.592507 | false |
BruceDLong/CodeDog | xlator_Java.py | 1 | 47251 | #This file, along with Lib_Java.py specify to the CodeGenerater how to compile CodeDog source code into Java source code.
import progSpec
import codeDogParser
from progSpec import cdlog, cdErr, logLvl
from codeGenerator import codeItemRef, codeUserMesg, codeAllocater, codeParameterList, makeTagText, codeAction, getModeStateNames, codeExpr, convertType, generateGenericStructName, getGenericTypeSpec
###### Routines to track types of identifiers and to look up type based on identifier.
def getContainerType(typeSpec, actionOrField):
idxType=''
if progSpec.isNewContainerTempFunc(typeSpec):
containerTypeSpec = progSpec.getContainerSpec(typeSpec)
if 'owner' in containerTypeSpec: owner=progSpec.getOwnerFromTypeSpec(containerTypeSpec)
else: owner='me'
if 'indexType' in containerTypeSpec:
if 'IDXowner' in containerTypeSpec['indexType']:
idxOwner=containerTypeSpec['indexType']['IDXowner'][0]
idxType=containerTypeSpec['indexType']['idxBaseType'][0][0]
idxType=applyOwner(typeSpec, idxOwner, idxType, '')
else:
idxType=containerTypeSpec['indexType']['idxBaseType'][0][0]
else:
idxType = progSpec.getFieldType(typeSpec)
adjustBaseTypes(idxType, True)
if(isinstance(containerTypeSpec['datastructID'], str)):
datastructID = containerTypeSpec['datastructID']
else: # it's a parseResult
datastructID = containerTypeSpec['datastructID'][0]
elif progSpec.isOldContainerTempFunc(typeSpec): print("Deprecated container type:", typeSpec); exit(2);
else:
owner = progSpec.getOwnerFromTypeSpec(typeSpec)
datastructID = 'None'
return [datastructID, idxType, owner]
def adjustBaseTypes(fieldType, isContainer):
javaType = ''
if fieldType !="":
if isContainer:
if fieldType=='int': javaType = 'Integer'
elif fieldType=='long': javaType = 'Long'
elif fieldType=='double': javaType = 'Double'
elif fieldType=='timeValue': javaType = 'Long' # this is hack and should be removed ASAP
elif fieldType=='int64': javaType = 'Long'
elif fieldType=='string': javaType = 'String'
elif fieldType=='uint': javaType = 'Integer'
else:
javaType = fieldType
else:
if(fieldType=='int32'): javaType= 'int'
elif(fieldType=='uint32'or fieldType=='uint'): javaType='int' # these should be long but Java won't allow
elif(fieldType=='int64' or fieldType=='uint64'):javaType= 'long'
elif(fieldType=='uint8' or fieldType=='uint16'):javaType='uint32'
elif(fieldType=='int8' or fieldType=='int16'): javaType='int32'
elif(fieldType=='char' ): javaType= 'char'
elif(fieldType=='bool' ): javaType= 'boolean'
elif(fieldType=='string'): javaType= 'String'
else: javaType=progSpec.flattenObjectName(fieldType)
return javaType
def isJavaPrimativeType(fieldType):
if fieldType=="int" or fieldType=="boolean" or fieldType=="float" or fieldType=="double" or fieldType=="long" or fieldType=="char": return True
return False
def applyOwner(typeSpec, owner, langType, actionOrField, varMode):
if owner=='const':
if actionOrField=="field": langType = "final static "+langType
else: langType = "final "+langType
elif owner=='me':
langType = langType
elif owner=='my':
langType = langType
elif owner=='our':
langType = langType
elif owner=='their':
langType = langType
elif owner=='itr':
itrType = progSpec.fieldTypeKeyword(progSpec.getItrTypeOfDataStruct(langType, typeSpec))
langType = itrType
if itrType=='nodeType':
print("TODO: design iterators in Java!!!!!!!!!!!!!!!!!!!!!!!!!!",itrType)
exit(2)
elif owner=='we':
langType = 'static '+langType
else:
cdErr("ERROR: Owner of type not valid '" + owner + "'")
return langType
def getUnwrappedClassOwner(classes, typeSpec, fieldType, varMode, ownerIn):
ownerOut = ownerIn
baseType = progSpec.isWrappedType(classes, fieldType)
if baseType!=None: # TODO: When this is all tested and stable, un-hardcode and optimize this!!!!!
if 'ownerMe' in baseType:
ownerOut = 'their'
else:
ownerOut=ownerIn
return ownerOut
def getReqTagString(classes, typeSpec):
reqTagString = ""
reqTagList = progSpec.getReqTagList(typeSpec)
if(reqTagList != None):
reqTagString = "<"
count = 0
for reqTag in reqTagList:
reqOwner = progSpec.getOwnerFromTemplateArg(reqTag)
varTypeKeyword = progSpec.getTypeFromTemplateArg(reqTag)
unwrappedOwner=getUnwrappedClassOwner(classes, typeSpec, varTypeKeyword, 'alloc', reqOwner)
unwrappedTypeKeyword = progSpec.getUnwrappedClassFieldTypeKeyWord(classes, varTypeKeyword)
reqType = adjustBaseTypes(unwrappedTypeKeyword, True)
if(count>0):reqTagString += ", "
reqTagString += reqType
count += 1
reqTagString += ">"
return reqTagString
def xlateLangType(classes, typeSpec, owner, fieldType, varMode, actionOrField, xlator):
# varMode is 'var' or 'arg' or 'alloc'. Large items are passed as pointers
if progSpec.isOldContainerTempFunc(typeSpec): print("Deprecated container type:", typeSpec); exit(2);
if(isinstance(fieldType, str)):
langType = adjustBaseTypes(fieldType, progSpec.isNewContainerTempFunc(typeSpec))
else: langType = progSpec.flattenObjectName(fieldType[0])
langType = applyOwner(typeSpec, owner, langType, actionOrField, varMode)
if langType=='TYPE ERROR': print(langType, owner, fieldType);
InnerLangType = langType
reqTagString = getReqTagString(classes, typeSpec)
langType += reqTagString
if progSpec.isNewContainerTempFunc(typeSpec):
return [langType, InnerLangType]
if owner =="const": InnerLangType = fieldType
return [langType, InnerLangType]
def makePtrOpt(typeSpec):
return('')
def isComparableType(typeSpec):
fTypeKW = progSpec.fieldTypeKeyword(typeSpec)
if fTypeKW == 'keyType': return True
if 'generic' in typeSpec and typeSpec['generic'] == 'keyType' and fTypeKW == 'string':
return True
return False
def codeIteratorOperation(itrCommand, fieldType):
result = ''
if itrCommand=='goNext': result='%0.next()'
elif itrCommand=='goPrev':result='%0.JAVA ERROR!'
elif itrCommand=='key': result='%0.getKey()'
elif itrCommand=='val': result='%0'
return result
def recodeStringFunctions(name, typeSpec):
if name == "size": name = "length"
elif name == "subStr":
typeSpec['codeConverter']='%0.substring(%1, %1+%2)'
typeSpec['fieldType']='String'
elif name == "append":
typeSpec['codeConverter']='%0 += %1'
return [name, typeSpec]
def langStringFormatterCommand(fmtStr, argStr):
fmtStr=fmtStr.replace(r'%i', r'%d')
fmtStr=fmtStr.replace(r'%l', r'%d')
S='String.format('+'"'+ fmtStr +'"'+ argStr +')'
return S
def LanguageSpecificDecorations(classes, S, typeSpec, owner, LorRorP_Val, xlator):
return S
def checkForTypeCastNeed(lhsTypeSpec, rhsTypeSpec, RHScodeStr):
LHS_KeyType = progSpec.fieldTypeKeyword(lhsTypeSpec)
RHS_KeyType = progSpec.fieldTypeKeyword(rhsTypeSpec)
if LHS_KeyType == 'bool'or LHS_KeyType == 'boolean':
if progSpec.typeIsPointer(rhsTypeSpec):
return '(' + RHScodeStr + ' == null)'
if (RHS_KeyType=='int' or RHS_KeyType=='flag'):
if RHScodeStr[0]=='!': return '(' + RHScodeStr[1:] + ' == 0)'
else: return '(' + RHScodeStr + ' != 0)'
if RHScodeStr == "0": return "false"
if RHScodeStr == "1": return "true"
return RHScodeStr
def getTheDerefPtrMods(itemTypeSpec):
if itemTypeSpec!=None and isinstance(itemTypeSpec, dict) and 'owner' in itemTypeSpec:
if progSpec.isOldContainerTempFunc(itemTypeSpec): print("Deprecated container type:", itemTypeSpec); exit(2);
return ['', '', False]
def derefPtr(varRef, itemTypeSpec):
[leftMod, rightMod, isDerefd] = getTheDerefPtrMods(itemTypeSpec)
S = leftMod + varRef + rightMod
return [S, isDerefd]
def ChoosePtrDecorationForSimpleCase(owner):
#print("TODO: finish ChoosePtrDecorationForSimpleCase")
return ['','', '','']
def chooseVirtualRValOwner(LVAL, RVAL):
return ['','']
def determinePtrConfigForAssignments(LVAL, RVAL, assignTag, codeStr):
return ['','', '','']
def getCodeAllocStr(varTypeStr, owner):
if(owner!='const'): S="new "+varTypeStr
else: print("ERROR: Cannot allocate a 'const' variable."); exit(1);
return S
def getCodeAllocSetStr(varTypeStr, owner, value):
S=getCodeAllocStr(varTypeStr, owner)
S+='('+value+')'
return S
def getConstIntFieldStr(fieldName, fieldValue):
S= "public static final int "+fieldName+ " = " + fieldValue+ ";\n"
return(S)
def getEnumStr(fieldName, enumList):
S = ''
count=0
for enumName in enumList:
S += " " + getConstIntFieldStr(enumName, str(count))
count=count+1
S += "\n"
# S += 'public static final String ' + fieldName+'Strings[] = {"'+('", "'.join(enumList))+'"};\n'
return(S)
def getEnumStringifyFunc(className, enumList):
print("TODO: finish getEnumStringifyFunc")
def codeIdentityCheck(S, S2, retType1, retType2, opIn):
S2 = adjustQuotesForChar(retType1, retType2, S2)
if opIn == '===':
#print("TODO: finish codeIdentityCk")
return S + " == "+ S2
else:
lFType = progSpec.fieldTypeKeyword(retType1)
rFType = progSpec.fieldTypeKeyword(retType2)
if (lFType=='String' or lFType == "string") and opIn=="==" and (rFType == "String" or rFType == "string"):
return S+'.equals('+S2+')'
else:
if (opIn == '=='): opOut=' == '
elif (opIn == '!='): opOut=' != '
elif (opIn == '!=='): opOut=' != '
else: print("ERROR: '==' or '!=' or '===' or '!==' expected."); exit(2)
return S+opOut+S2
return S
def codeComparisonStr(S, S2, retType1, retType2, op):
S3 = ""
if (op == '<'):
if isComparableType(retType1):
S+='.compareTo('
S3= ") < 0"
else: S+=' < '
elif (op == '>'):
if isComparableType(retType1):
S+='.compareTo('
S3= ") > 0"
else: S+=' > '
elif (op == '<='): S+=' <= '
elif (op == '>='): S+=' >= '
else: print("ERROR: One of <, >, <= or >= expected in code generator."); exit(2)
S2 = adjustQuotesForChar(retType1, retType2, S2)
[S2, isDerefd]=derefPtr(S2, retType2)
S+=S2+S3
return S
###################################################### CONTAINERS
def getContaineCategory(containerSpec):
fTypeKW = progSpec.fieldTypeKeyword(containerSpec)
if fTypeKW=='DblLinkedList':
return 'DblLinkedList'
elif fTypeKW=='TreeMap' or fTypeKW=='Java_Map' or 'RBTreeMap' in fTypeKW or "__Map_" in fTypeKW:
return 'MAP'
elif fTypeKW=='list' or fTypeKW=='Java_ArrayList' or "__List_" in fTypeKW or "__CDList" in fTypeKW:
return 'LIST'
elif 'Multimap' in fTypeKW:
return 'MULTIMAP'
return None
def getContainerTypeInfo(classes, containerType, name, idxType, typeSpecIn, paramList, genericArgs, xlator):
convertedIdxType = ""
typeSpecOut = typeSpecIn
if progSpec.isNewContainerTempFunc(typeSpecIn): return(name, typeSpecOut, paramList, convertedIdxType)
if progSpec.isOldContainerTempFunc(typeSpecIn): print("Deprecated container type:", typeSpecIn); exit(2);
return(name, typeSpecOut, paramList, convertedIdxType)
def codeArrayIndex(idx, containerType, LorR_Val, previousSegName, idxTypeSpec):
if LorR_Val=='RVAL':
#Next line may be cause of bug with printing modes. remove 'not'?
if (previousSegName in getModeStateNames()):
S= '.get((int)' + idx + ')'
elif (containerType== 'ArrayList' or containerType== 'TreeMap' or containerType== 'Java_ArrayList' or containerType== 'Map' or containerType== 'multimap'or containerType== 'Java_Map'):
S= '.get(' + idx + ')'
elif (containerType== 'string'):
S= '.charAt(' + idx + ')' # '.substring(' + idx + ', '+ idx + '+1' +')'
else: S= '[' + idx +']'
else:
if containerType== 'ArrayList' or containerType== 'Java_Map' or containerType== 'Java_ArrayList': S = '.get('+idx+')'
else: S= '[' + idx +']'
return S
###################################################### CONTAINER REPETITIONS
def codeRangeSpec(traversalMode, ctrType, repName, S_low, S_hi, indent, xlator):
if(traversalMode=='Forward' or traversalMode==None):
S = indent + "for("+ctrType+" " + repName+'='+ S_low + "; " + repName + "!=" + S_hi +"; "+ xlator['codeIncrement'](repName) + "){\n"
elif(traversalMode=='Backward'):
S = indent + "for("+ctrType+" " + repName+'='+ S_hi + "-1; " + repName + ">=" + S_low +"; --"+ repName + "){\n"
return (S)
def iterateRangeContainerStr(classes,localVarsAlloc,StartKey,EndKey,ctnrTSpec,ctnrOwner,repName,ctnrName,datastructID,idxTypeKW,indent,xlator):
willBeModifiedDuringTraversal=True # TODO: Set this programatically later.
actionText = ""
loopCounterName = ""
ctnrOwner = progSpec.getOwnerFromTypeSpec(ctnrTSpec)
containedType = progSpec.getFieldTypeNew(ctnrTSpec)
ctrlVarsTypeSpec = {'owner':ctnrOwner, 'fieldType':containedType}
reqTagList = progSpec.getReqTagList(ctnrTSpec)
containerCat = getContaineCategory(ctnrTSpec)
if containerCat=="MAP" or containerCat=="MULTIMAP":
valueFieldType = progSpec.fieldTypeKeyword(ctnrTSpec)
if(reqTagList != None):
ctrlVarsTypeSpec['owner'] = progSpec.getOwnerFromTemplateArg(reqTagList[1])
ctrlVarsTypeSpec['fieldType'] = progSpec.getTypeFromTemplateArg(reqTagList[1])
idxTypeKW = progSpec.getTypeFromTemplateArg(reqTagList[0])
valueFieldType = progSpec.getTypeFromTemplateArg(reqTagList[1])
keyVarSpec = {'owner':ctnrTSpec['owner'], 'fieldType':containedType}
loopCounterName = repName+'_key'
idxTypeKW = adjustBaseTypes(idxTypeKW, True)
valueFieldType = adjustBaseTypes(valueFieldType, True)
localVarsAlloc.append([loopCounterName, keyVarSpec]) # Tracking local vars for scope
localVarsAlloc.append([repName, ctrlVarsTypeSpec]) # Tracking local vars for scope
actionText += (indent + 'for(Map.Entry<'+idxTypeKW+','+valueFieldType+'> '+repName+'Entry : '+ctnrName+'.subMap('+StartKey+', '+EndKey+').entrySet()){\n' +
indent + ' '+valueFieldType+' '+ repName + ' = ' + repName+'Entry.getValue();\n' +
indent + ' ' +idxTypeKW +' '+ repName+'_rep = ' + repName+'Entry.getKey();\n' )
elif datastructID=='list' or (datastructID=='deque' and not willBeModifiedDuringTraversal): pass;
elif datastructID=='deque' and willBeModifiedDuringTraversal: pass;
else:
print("DSID iterateRangeContainerStr:",datastructID,containerCat)
exit(2)
return [actionText, loopCounterName]
def iterateContainerStr(classes,localVarsAlloc,ctnrTSpec,repName,ctnrName,isBackward,indent,genericArgs,xlator):
#TODO: handle isBackward
willBeModifiedDuringTraversal=True # TODO: Set this programatically later.
[datastructID, idxTypeKW, ctnrOwner]=getContainerType(ctnrTSpec, 'action')
actionText = ""
loopCounterName = repName+'_key'
owner = progSpec.getContainerFirstElementOwner(ctnrTSpec)
containedType = progSpec.getContainerFirstElementType(ctnrTSpec)
ctrlVarsTypeSpec = {'owner':ctnrTSpec['owner'], 'fieldType':containedType}
reqTagList = progSpec.getReqTagList(ctnrTSpec)
[LDeclP, RDeclP, LDeclA, RDeclA] = ChoosePtrDecorationForSimpleCase(ctnrOwner)
itrTypeSpec = progSpec.getItrTypeOfDataStruct(datastructID, ctnrTSpec)
itrOwner = progSpec.getOwnerFromTypeSpec(itrTypeSpec)
[LNodeP, RNodeP, LNodeA, RNodeA] = ChoosePtrDecorationForSimpleCase(itrOwner)
itrName = repName + "Itr"
containerCat = getContaineCategory(ctnrTSpec)
itrIncStr = ""
if containerCat=='DblLinkedList': cdErr("TODO: handle dblLinkedList")
if containerCat=='MAP':
reqTagString = getReqTagString(classes, ctnrTSpec)
if(reqTagList != None):
ctrlVarsTypeSpec['owner'] = progSpec.getOwnerFromTemplateArg(reqTagList[1])
ctrlVarsTypeSpec['fieldType'] = progSpec.getTypeFromTemplateArg(reqTagList[1])
if datastructID=='TreeMap' or datastructID=='Java_Map':
keyVarSpec = {'owner':ctnrTSpec['owner'], 'fieldType':idxTypeKW, 'codeConverter':(repName+'.getKey()')}
ctrlVarsTypeSpec['codeConverter'] = (repName+'.getValue()')
iteratorTypeStr="Map.Entry"+reqTagString
actionText += indent + "for("+iteratorTypeStr+" " + repName+' :'+ ctnrName+".entrySet()){\n"
else:
keyVarSpec = {'owner':ctnrTSpec['owner'], 'fieldType':idxTypeKW, 'codeConverter':(repName+'.node.key')}
ctrlVarsTypeSpec['codeConverter'] = (repName+'.node.value')
itrType = progSpec.fieldTypeKeyword(progSpec.getItrTypeOfDataStruct(datastructID, ctnrTSpec)) + ' '
frontItr = ctnrName+'.front()'
if not 'generic' in ctnrTSpec: itrType += reqTagString
actionText += (indent + 'for('+itrType + itrName+' ='+frontItr + '; ' + itrName + '.node!='+ctnrName+'.end().node'+'; '+repName+'.goNext()){\n')
#actionText += (indent + "for("+itrType + itrName+' ='+frontItr + "; " + itrName + " !=" + ctnrName+RDeclP+'end()' +"; ++"+itrName + " ){\n"
# + indent+" "+itrType+repName+" = *"+itrName+";\n")
elif containerCat=="LIST":
containedOwner = progSpec.getOwnerFromTypeSpec(ctnrTSpec)
keyVarSpec = {'owner':containedOwner, 'fieldType':containedType}
[iteratorTypeStr, innerType]=convertType(ctrlVarsTypeSpec, 'var', 'action', genericArgs, xlator)
loopVarName=repName+"Idx";
if(isBackward):
actionText += (indent + "for(int "+loopVarName+'='+ctnrName+'.size()-1; ' + loopVarName +' >=0; --' + loopVarName+'){\n'
+ indent + indent + iteratorTypeStr+' '+repName+" = "+ctnrName+".get("+loopVarName+");\n")
else:
actionText += (indent + "for(int "+loopVarName+"=0; " + loopVarName +' != ' + ctnrName+'.size(); ' + loopVarName+' += 1){\n'
+ indent + indent + iteratorTypeStr+' '+repName+" = "+ctnrName+".get("+loopVarName+");\n")
else: cdErr("iterateContainerStr() datastructID = " + datastructID)
localVarsAlloc.append([loopCounterName, keyVarSpec]) # Tracking local vars for scope
localVarsAlloc.append([repName, ctrlVarsTypeSpec]) # Tracking local vars for scope
return [actionText, loopCounterName, itrIncStr]
###################################################### EXPRESSION CODING
def codeFactor(item, objsRefed, returnType, expectedTypeSpec, LorRorP_Val, genericArgs, xlator):
#### ( value | ('(' + expr + ')') | ('!' + expr) | ('-' + expr) | varRef("varFunRef"))
#print(' factor: ', item)
S=''
retTypeSpec='noType'
item0 = item[0]
#print("ITEM0=", item0, ">>>>>", item)
if (isinstance(item0, str)):
if item0=='(':
[S2, retTypeSpec] = codeExpr(item[1], objsRefed, returnType, expectedTypeSpec, LorRorP_Val, genericArgs, xlator)
S+='(' + S2 +')'
elif item0=='!':
[S2, retTypeSpec] = codeExpr(item[1], objsRefed, returnType, expectedTypeSpec, LorRorP_Val, genericArgs, xlator)
if(progSpec.typeIsPointer(retTypeSpec)):
S= '('+S2+' == null)'
retTypeSpec='bool'
else: S+='!' + S2
elif item0=='-':
[S2, retTypeSpec] = codeExpr(item[1], objsRefed, returnType, expectedTypeSpec, LorRorP_Val, genericArgs, xlator)
S+='-' + S2
elif item0=='[':
count=0
tmp="(Arrays.asList("
for expr in item[1:-1]:
count+=1
[S2, exprTypeSpec] = codeExpr(expr, objsRefed, returnType, expectedTypeSpec, LorRorP_Val, genericArgs, xlator)
if not exprTypeSpec=='noType':
retTypeSpec = adjustBaseTypes(exprTypeSpec, True)
if count>1: tmp+=', '
tmp+=S2
if exprTypeSpec=='Long' or exprTypeSpec=='noType':
if '*' in S2:
numVal = S2
#print 'numVal', numVal
elif int(S2) > 2147483647:
tmp+="L"
retTypeSpec = 'Long'
tmp+="))"
retTypeKW = progSpec.fieldTypeKeyword(retTypeSpec)
if isinstance(exprTypeSpec,str):typeKeyword = exprTypeSpec
elif progSpec.isAContainer(returnType):
reqType = progSpec.getContainerFirstElementType(returnType)
typeKeyword = progSpec.fieldTypeKeyword(reqType)
typeKeyword = adjustBaseTypes(typeKeyword, True)
else: typeKeyword = retTypeKW
S+='new ArrayList<'+typeKeyword+'>'+tmp # ToDo: make this handle things other than long.
else:
expected_KeyType = progSpec.varTypeKeyWord(expectedTypeSpec)
if(item0[0]=="'"): S+=codeUserMesg(item0[1:-1], xlator); retTypeSpec='String'
elif (item0[0]=='"'):
if returnType != None and returnType["fieldType"]=="char":
retTypeSpec='char'
innerS=item0[1:-1]
if len(innerS)==1:
S+="'"+item0[1:-1] +"'"
else:
cdErr("Characters must have exactly 1 character.")
else:
S+='"'+item0[1:-1] +'"'
retTypeSpec='String'
else:
S+=item0;
if retTypeSpec == 'noType' and progSpec.typeIsInteger(expected_KeyType):
retTypeSpec=expected_KeyType
if retTypeSpec == 'noType' and progSpec.isStringNumeric(item0):
retTypeSpec={'owner': 'literal', 'fieldType': 'numeric'}
if retTypeSpec == 'noType' and progSpec.typeIsInteger(expected_KeyType):retTypeSpec=expected_KeyType
else: # CODEDOG LITERALS
if isinstance(item0[0], str):
S+=item0[0]
if '"' in S or "'" in S: retTypeSpec = 'string'
if '.' in S: retTypeSpec = 'double'
if isinstance(S, int): retTypeSpec = 'int64'
else: retTypeSpec = 'int32'
else:
[codeStr, retTypeSpec, prntType, AltIDXFormat]=codeItemRef(item0, 'RVAL', objsRefed, returnType, LorRorP_Val, genericArgs, xlator)
if(codeStr=="NULL"):
codeStr="null"
retTypeSpec={'owner':"PTR"}
typeKeyword = progSpec.fieldTypeKeyword(retTypeSpec)
if (len(item0[0]) > 1 and item0[0][0]==typeKeyword and item0[0][1] and item0[0][1]=='('):
codeStr = 'new ' + codeStr
S+=codeStr # Code variable reference or function call
return [S, retTypeSpec]
######################################################
def adjustQuotesForChar(typeSpec1, typeSpec2, S):
fieldType1 = progSpec.fieldTypeKeyword(typeSpec1)
fieldType2 = progSpec.fieldTypeKeyword(typeSpec2)
if fieldType1 == "char" and (fieldType2 == 'string' or fieldType2 == 'String') and S[0] == '"':
return("'" + S[1:-1] + "'")
return(S)
def adjustConditional(S, conditionType):
if not isinstance(conditionType, str):
if conditionType['owner']=='our' or conditionType['owner']=='their' or conditionType['owner']=='my' or progSpec.isStruct(conditionType['fieldType']):
if S[0]=='!': S = S[1:]+ " == true"
else: S+=" != null"
elif conditionType['owner']=='me' and (conditionType['fieldType']=='flag' or progSpec.typeIsInteger(conditionType['fieldType'])):
if S[0]=='!': S = '('+S[1:]+' == 0)'
else: S = '('+S+') != 0'
conditionType='bool'
return [S, conditionType]
def codeSpecialReference(segSpec, objsRefed, genericArgs, xlator):
S=''
fieldType='void' # default to void
retOwner='me' # default to 'me'
funcName=segSpec[0]
if(len(segSpec)>2): # If there are arguments...
paramList=segSpec[2]
if(funcName=='print'):
S+='System.out.print('
count = 0
for P in paramList:
if(count!=0): S+=" + "
count+=1
[S2, argTypeSpec]=codeExpr(P[0], objsRefed, None, None, 'PARAM', genericArgs, xlator)
if 'fieldType' in argTypeSpec:
fieldType = progSpec.fieldTypeKeyword(argTypeSpec)
fieldType = adjustBaseTypes(fieldType, False)
else: fieldType = argTypeSpec
if fieldType == "timeValue" or fieldType == "int" or fieldType == "double": S2 = '('+S2+')'
S+=S2
S+=")"
retOwner='me'
fieldType='string'
elif(funcName=='AllocateOrClear'):
[varName, varTypeSpec]=codeExpr(paramList[0][0], objsRefed, None, None, 'PARAM', genericArgs, xlator)
S+='if('+varName+' != null){'+varName+'.clear();} else {'+varName+" = "+codeAllocater(varTypeSpec, genericArgs, xlator)+"();}"
elif(funcName=='Allocate'):
[varName, varTypeSpec]=codeExpr(paramList[0][0], objsRefed, None, None, 'PARAM', genericArgs, xlator)
fieldType = progSpec.fieldTypeKeyword(varTypeSpec)
S+=varName+" = "+codeAllocater(varTypeSpec, genericArgs, xlator)+'('
count=0 # TODO: As needed, make this call CodeParameterList() with modelParams of the constructor.
if fieldType=='workerMsgThread':
S += '"workerMsgThread"'
else:
for P in paramList[1:]:
if(count>0): S+=', '
[S2, argTypeSpec]=codeExpr(P[0], objsRefed, None, None, 'PARAM', genericArgs, xlator)
S+=S2
count=count+1
S+=")"
elif(funcName=='break'):
if len(paramList)==0: S='break'
elif(funcName=='return'):
if len(paramList)==0: S+='return'
elif(funcName=='self'):
if len(paramList)==0: S+='this'
elif(funcName=='toStr'):
if len(paramList)==1:
[S2, argTypeSpec]=codeExpr(P[0][0], objsRefed, None, None, 'PARAM', genericArgs, xlator)
[S2, isDerefd]=derefPtr(S2, argTypeSpec)
S+='String.valueOf('+S2+')'
fieldType='String'
else: # Not parameters, i.e., not a function
if(funcName=='self'):
S+='this'
return [S, retOwner, fieldType]
def checkIfSpecialAssignmentFormIsNeeded(AltIDXFormat, RHS, rhsType, LHS, LHSParentType, LHS_FieldType):
# Check for string A[x] = B; If so, render A.put(B,x)
[containerType, idxType, owner]=getContainerType(AltIDXFormat[1], "")
if LHSParentType == 'string' and LHS_FieldType == 'char':
S=AltIDXFormat[0] + '= replaceCharAt(' +AltIDXFormat[0]+', '+ AltIDXFormat[2] + ', ' + RHS + ');\n'
elif containerType == 'ArrayList':
S=AltIDXFormat[0] + '.add(' + AltIDXFormat[2] + ', ' + RHS + ');\n'
elif containerType == 'TreeMap' or containerType == 'Java_Map':
S=AltIDXFormat[0] + '.put(' + AltIDXFormat[2] + ', ' + RHS + ');\n'
elif containerType == 'RBTreeMap' or containerType[:2]=="__" and 'Map' in containerType:
S=AltIDXFormat[0] + '.insert(' + AltIDXFormat[2] + ', ' + RHS + ');\n'
else:
print("ERROR in checkIfSpecialAssignmentFormIsNeeded: containerType not found for ", containerType)
exit(1)
return S
############################################
def codeMain(classes, tags, objsRefed, xlator):
return ["", ""]
def codeArgText(argFieldName, argType, argOwner, typeSpec, makeConst, typeArgList, xlator):
return argType + " " +argFieldName
def codeStructText(classes, attrList, parentClass, classInherits, classImplements, structName, structCode, tags):
classAttrs=''
Platform = progSpec.fetchTagValue(tags, 'Platform')
if len(attrList)>0:
for attr in attrList:
if attr=='abstract': classAttrs += 'abstract '
if parentClass != "":
parentClass = parentClass.replace('::', '_')
parentClass = progSpec.getUnwrappedClassFieldTypeKeyWord(classes, structName)
parentClass=' extends ' +parentClass
elif classInherits!=None:
parentClass=' extends ' + classInherits[0][0]
if classImplements!=None:
# TODO: verify if classImplements is used
#print(structName, "Implements: " , classImplements)
parentClass+=' implements '
count =0
for item in classImplements[0]:
if count>0:
parentClass+= ', '
parentClass+= item
count += 1
if structName =="GLOBAL" and Platform == 'Android':
classAttrs = "public " + classAttrs
S= "\n"+classAttrs +"class "+structName+''+parentClass+" {\n" + structCode + '};\n'
typeArgList = progSpec.getTypeArgList(structName)
if(typeArgList != None):
templateHeader = codeTemplateHeader(structName, typeArgList)
S=templateHeader+" {\n" + structCode + '};\n'
return([S,""])
def produceTypeDefs(typeDefMap, xlator):
return ''
def addSpecialCode(filename):
S='\n\n//////////// Java specific code:\n'
return S
def addGLOBALSpecialCode(classes, tags, xlator):
filename = makeTagText(tags, 'FileName')
specialCode ='const String: filename <- "' + filename + '"\n'
GLOBAL_CODE="""
struct GLOBAL{
%s
}
""" % (specialCode)
codeDogParser.AddToObjectFromText(classes[0], classes[1], GLOBAL_CODE, 'Java special code')
def codeNewVarStr(classes, tags, lhsTypeSpec, varName, fieldDef, indent, objsRefed, actionOrField, genericArgs, localVarsAllocated, xlator):
varDeclareStr = ''
assignValue = ''
isAllocated = fieldDef['isAllocated']
owner = progSpec.getTypeSpecOwner(lhsTypeSpec)
useCtor = False
if fieldDef['paramList'] and fieldDef['paramList'][-1] == "^&useCtor//8":
del fieldDef['paramList'][-1]
useCtor = True
[convertedType, innerType] = convertType(lhsTypeSpec, 'var', actionOrField, genericArgs, xlator)
reqTagList = progSpec.getReqTagList(lhsTypeSpec)
fieldType = progSpec.fieldTypeKeyword(lhsTypeSpec)
if reqTagList and xlator['renderGenerics']=='True' and not progSpec.isWrappedType(classes, fieldType) and not progSpec.isAbstractStruct(classes[0], fieldType):
convertedType = generateGenericStructName(fieldType, reqTagList, genericArgs, xlator)
allocFieldType = convertedType
lhsTypeSpec = getGenericTypeSpec(genericArgs, lhsTypeSpec, xlator)
if 'fromImplemented' in lhsTypeSpec: lhsTypeSpec.pop('fromImplemented')
localVarsAllocated.append([varName, lhsTypeSpec]) # Tracking local vars for scope
else:
localVarsAllocated.append([varName, lhsTypeSpec]) # Tracking local vars for scope
containerTypeSpec = progSpec.getContainerSpec(lhsTypeSpec)
if progSpec.isOldContainerTempFunc(lhsTypeSpec): print("Deprecated container type:", lhsTypeSpec); exit(2);
isAContainer=progSpec.isNewContainerTempFunc(lhsTypeSpec)
fieldType = adjustBaseTypes(convertedType, isAContainer)
if isinstance(containerTypeSpec, str) and containerTypeSpec == None:
if(fieldDef['value']):
[S2, rhsTypeSpec]=codeExpr(fieldDef['value'][0], objsRefed, None, None, 'RVAL', genericArgs, xlator)
RHS = S2
assignValue=' = '+ RHS
#TODO: make test case
else: assignValue=''
elif(fieldDef['value']):
[S2, rhsTypeSpec]=codeExpr(fieldDef['value'][0], objsRefed, lhsTypeSpec, None, 'RVAL', genericArgs, xlator)
S2=checkForTypeCastNeed(convertedType, rhsTypeSpec, S2)
RHS = S2
if varTypeIsValueType(fieldType):
assignValue=' = '+ RHS
else:
#TODO: make test case
constructorExists=False # TODO: Use some logic to know if there is a constructor, or create one.
if (constructorExists):
assignValue=' = new ' + fieldType +'('+ RHS + ')'
else:
assignValue= ' = '+ RHS #' = new ' + fieldType +'();\n'+ indent + varName+' = '+RHS
else: # If no value was given:
CPL=''
if fieldDef['paramList'] != None: # call constructor # curly bracket param list
# Code the constructor's arguments
[CPL, paramTypeList] = codeParameterList(varName, fieldDef['paramList'], None, objsRefed, genericArgs, xlator)
if len(paramTypeList)==1:
if not isinstance(paramTypeList[0], dict):
print("\nPROBLEM: The return type of the parameter '", CPL, "' of "+varName+"(...) cannot be found and is needed. Try to define it.\n", paramTypeList)
exit(1)
rhsTypeSpec = paramTypeList[0]
rhsType = progSpec.getFieldType(rhsTypeSpec)
if not isinstance(rhsType, str) and fieldType==rhsType[0]:
assignValue = " = " + CPL # Act like a copy constructor
elif 'codeConverter' in paramTypeList[0]: #ktl 12.14.17
assignValue = " = " + CPL
else:
if isJavaPrimativeType(fieldType): assignValue = " = " + CPL
else: assignValue = " = new " + fieldType + CPL
if(assignValue==''): assignValue = ' = '+getCodeAllocStr(fieldType, owner)+CPL
elif varTypeIsValueType(fieldType):
if fieldType == 'long' or fieldType == 'int' or fieldType == 'float'or fieldType == 'double': assignValue=' = 0'
elif fieldType == 'string': assignValue=' = ""'
elif fieldType == 'boolean': assignValue=' = false'
elif fieldType == 'char': assignValue=" = ' '"
else: assignValue=''
else:assignValue= " = new " + fieldType + "()"
varDeclareStr= fieldType + " " + varName + assignValue
return(varDeclareStr)
def codeIncrement(varName):
return "++" + varName
def codeDecrement(varName):
return "--" + varName
def varTypeIsValueType(convertedType):
if (convertedType=='int' or convertedType=='long' or convertedType=='byte' or convertedType=='boolean' or convertedType=='char'
or convertedType=='float' or convertedType=='double' or convertedType=='short'):
return True
return False
def codeVarFieldRHS_Str(fieldName, convertedType, fieldType, typeSpec, paramList, objsRefed, isAllocated, typeArgList, genericArgs, xlator):
fieldValueText=""
fieldOwner=progSpec.getTypeSpecOwner(typeSpec)
if fieldOwner=='we':
convertedType = convertedType.replace('static ', '', 1)
if (not varTypeIsValueType(convertedType) and (fieldOwner=='me' or fieldOwner=='we' or fieldOwner=='const')):
if fieldOwner =="const": convertedType = fieldType
if paramList!=None:
#TODO: make test case
if paramList[-1] == "^&useCtor//8":
del paramList[-1]
[CPL, paramTypeList] = codeParameterList(fieldName, paramList, None, objsRefed, genericArgs, xlator)
fieldValueText=" = new " + convertedType + CPL
elif typeArgList == None:
fieldValueText=" = new " + convertedType + "()"
return fieldValueText
def codeConstField_Str(convertedType, fieldName, fieldValueText, className, indent, xlator ):
defn = indent + convertedType + ' ' + fieldName + fieldValueText +';\n';
decl = ''
return [defn, decl]
def codeVarField_Str(convertedType, typeSpec, fieldName, fieldValueText, className, tags, typeArgList, indent):
# TODO: make test case
S=""
fieldOwner=progSpec.getTypeSpecOwner(typeSpec)
Platform = progSpec.fetchTagValue(tags, 'Platform')
# TODO: make next line so it is not hard coded
if(Platform == 'Android' and (convertedType == "TextView" or convertedType == "ViewGroup" or convertedType == "CanvasView" or convertedType == "FragmentTransaction" or convertedType == "FragmentManager" or convertedType == "Menu" or convertedType == "static GLOBAL" or convertedType == "Toolbar" or convertedType == "NestedScrollView" or convertedType == "SubMenu" or convertedType == "APP" or convertedType == "AssetManager" or convertedType == "ScrollView" or convertedType == "LinearLayout" or convertedType == "GUI"or convertedType == "CheckBox" or convertedType == "HorizontalScrollView"or convertedType == "GUI_ZStack"or convertedType == "widget"or convertedType == "GLOBAL")):
S += indent + "public " + convertedType + ' ' + fieldName +';\n';
else:
S += indent + "public " + convertedType + ' ' + fieldName + fieldValueText +';\n';
return [S, '']
###################################################### CONSTRUCTORS
def codeConstructors(className, ctorArgs, ctorOvrRide, ctorInit, copyCtorArgs, funcBody, callSuper, xlator):
if callSuper:
funcBody = ' super();\n' + funcBody
withArgConstructor = ''
if ctorArgs != '':
withArgConstructor = " public " + className + "(" + ctorArgs+"){\n"+funcBody+ ctorInit+" };\n"
copyConstructor = " public " + className + "(final " + className + " fromVar" +"){\n "+ className + " toVar = new "+ className + "();\n" +copyCtorArgs+" };\n"
noArgConstructor = " public " + className + "(){\n"+funcBody+'\n };\n'
# TODO: remove hardCoding
if (className =="ourSubMenu" or className =="GUI"or className =="CanvasView"or className =="APP"or className =="GUI_ZStack"):
return ""
return withArgConstructor + copyConstructor + noArgConstructor
def codeConstructorInit(fieldName, count, defaultVal, xlator):
return " " + fieldName+"= arg_"+fieldName+";\n"
def codeConstructorArgText(argFieldName, count, argType, defaultVal, xlator):
return argType + " arg_"+ argFieldName
def codeCopyConstructor(fieldName, convertedType, isTemplateVar, xlator):
if isTemplateVar: return ""
return " toVar."+fieldName+" = fromVar."+fieldName+";\n"
def codeConstructorCall(className):
return ' INIT();\n'
def codeSuperConstructorCall(parentClassName):
return ' '+parentClassName+'();\n'
def codeFuncHeaderStr(className, fieldName, typeDefName, argListText, localArgsAllocated, inheritMode, overRideOper, isConstructor, typeArgList, typeSpec, indent):
# if fieldName == 'init':
# fieldName = fieldName+'_'+className
if inheritMode=='pure-virtual':
typeDefName = 'abstract '+typeDefName
structCode='\n'; funcDefCode=''; globalFuncs='';
if(className=='GLOBAL'):
if fieldName=='main':
structCode += indent + "public static void " + fieldName +" (String[] args)";
#localArgsAllocated.append(['args', {'owner':'me', 'fieldType':'String', 'argList':None}])
else:
structCode += indent + "public " + typeDefName + ' ' + fieldName +"("+argListText+")"
else:
structCode += indent + "public " + typeDefName +' ' + fieldName +"("+argListText+")"
if inheritMode=='pure-virtual':
structCode += ";\n"
elif inheritMode=='override': pass
return [structCode, funcDefCode, globalFuncs]
def getVirtualFuncText(field):
return ""
def codeTypeArgs(typeArgList):
print("TODO: finish codeTypeArgs")
def codeTemplateHeader(structName, typeArgList):
templateHeader = "\nclass "+structName+"<"
count = 0
for typeArg in typeArgList:
if(count>0):templateHeader+=", "
templateHeader+=typeArg
if isComparableType(typeArg):templateHeader+=" extends Comparable"
count+=1
templateHeader+=">"
return(templateHeader)
def extraCodeForTopOfFuntion(argList):
return ''
def codeSetBits(LHS_Left, LHS_FieldType, prefix, bitMask, RHS, rhsType):
if (LHS_FieldType =='flag' ):
item = LHS_Left+"flags"
mask = prefix+bitMask
if (RHS != 'true' and RHS !='false' and progSpec.fieldTypeKeyword(rhsType)!='bool' ):
RHS += '!=0'
val = '('+ RHS +')?'+mask+':0'
elif (LHS_FieldType =='mode' ):
item = LHS_Left+"flags"
mask = prefix+bitMask+"Mask"
if RHS == 'false': RHS = '0'
if RHS == 'true': RHS = '1'
val = RHS+"<<"+prefix+bitMask+"Offset"
return "{"+item+" &= ~"+mask+"; "+item+" |= ("+val+");}\n"
def codeSwitchBreak(caseAction, indent, xlator):
if not(len(caseAction) > 0 and caseAction[-1]['typeOfAction']=='funcCall' and caseAction[-1]['calledFunc'][0][0] == 'return'):
return indent+" break;\n"
else:
return ''
def applyTypecast(typeInCodeDog, itemToAlterType):
return '((int)'+itemToAlterType+')'
#######################################################
def includeDirective(libHdr):
S = 'import '+libHdr+';\n'
return S
def generateMainFunctionality(classes, tags):
# TODO: Some deInitialize items should automatically run during abort().
# TODO: System initCode should happen first in initialize, last in deinitialize.
runCode = progSpec.fetchTagValue(tags, 'runCode')
if runCode==None: runCode=""
Platform = progSpec.fetchTagValue(tags, 'Platform')
if Platform != 'Android':
mainFuncCode="""
me void: main( ) <- {
initialize(String.join(" ", args))
""" + runCode + """
deinitialize()
endFunc()
}
"""
if Platform == 'Android':
mainFuncCode="""
me void: runDogCode() <- {
""" + runCode + """
}
"""
progSpec.addObject(classes[0], classes[1], 'GLOBAL', 'struct', 'SEQ')
codeDogParser.AddToObjectFromText(classes[0], classes[1], progSpec.wrapFieldListInObjectDef('GLOBAL', mainFuncCode ), 'Java start-up code')
def fetchXlators():
xlators = {}
xlators['LanguageName'] = "Java"
xlators['BuildStrPrefix'] = "Javac "
xlators['fileExtension'] = ".java"
xlators['typeForCounterInt'] = "int"
xlators['GlobalVarPrefix'] = "GLOBAL.static_Global."
xlators['PtrConnector'] = "." # Name segment connector for pointers.
xlators['ObjConnector'] = "." # Name segment connector for classes.
xlators['NameSegConnector'] = "."
xlators['NameSegFuncConnector'] = "."
xlators['doesLangHaveGlobals'] = "False"
xlators['funcBodyIndent'] = " "
xlators['funcsDefInClass'] = "True"
xlators['MakeConstructors'] = "True"
xlators['blockPrefix'] = ""
xlators['usePrefixOnStatics'] = "False"
xlators['iteratorsUseOperators'] = "False"
xlators['renderGenerics'] = "True"
xlators['renameInitFuncs'] = "False"
xlators['codeFactor'] = codeFactor
xlators['codeComparisonStr'] = codeComparisonStr
xlators['codeIdentityCheck'] = codeIdentityCheck
xlators['derefPtr'] = derefPtr
xlators['checkForTypeCastNeed'] = checkForTypeCastNeed
xlators['adjustConditional'] = adjustConditional
xlators['includeDirective'] = includeDirective
xlators['codeMain'] = codeMain
xlators['produceTypeDefs'] = produceTypeDefs
xlators['addSpecialCode'] = addSpecialCode
xlators['applyTypecast'] = applyTypecast
xlators['codeIteratorOperation'] = codeIteratorOperation
xlators['xlateLangType'] = xlateLangType
xlators['getContainerType'] = getContainerType
xlators['recodeStringFunctions'] = recodeStringFunctions
xlators['langStringFormatterCommand'] = langStringFormatterCommand
xlators['LanguageSpecificDecorations'] = LanguageSpecificDecorations
xlators['getCodeAllocStr'] = getCodeAllocStr
xlators['getCodeAllocSetStr'] = getCodeAllocSetStr
xlators['codeSpecialReference'] = codeSpecialReference
xlators['checkIfSpecialAssignmentFormIsNeeded'] = checkIfSpecialAssignmentFormIsNeeded
xlators['getConstIntFieldStr'] = getConstIntFieldStr
xlators['codeStructText'] = codeStructText
xlators['getContainerTypeInfo'] = getContainerTypeInfo
xlators['codeNewVarStr'] = codeNewVarStr
xlators['chooseVirtualRValOwner'] = chooseVirtualRValOwner
xlators['determinePtrConfigForAssignments'] = determinePtrConfigForAssignments
xlators['iterateRangeContainerStr'] = iterateRangeContainerStr
xlators['iterateContainerStr'] = iterateContainerStr
xlators['getEnumStr'] = getEnumStr
xlators['codeVarFieldRHS_Str'] = codeVarFieldRHS_Str
xlators['codeVarField_Str'] = codeVarField_Str
xlators['codeFuncHeaderStr'] = codeFuncHeaderStr
xlators['extraCodeForTopOfFuntion'] = extraCodeForTopOfFuntion
xlators['codeArrayIndex'] = codeArrayIndex
xlators['codeSetBits'] = codeSetBits
xlators['generateMainFunctionality'] = generateMainFunctionality
xlators['addGLOBALSpecialCode'] = addGLOBALSpecialCode
xlators['codeArgText'] = codeArgText
xlators['codeConstructors'] = codeConstructors
xlators['codeConstructorInit'] = codeConstructorInit
xlators['codeIncrement'] = codeIncrement
xlators['codeDecrement'] = codeDecrement
xlators['codeConstructorArgText'] = codeConstructorArgText
xlators['codeSwitchBreak'] = codeSwitchBreak
xlators['codeCopyConstructor'] = codeCopyConstructor
xlators['codeRangeSpec'] = codeRangeSpec
xlators['codeConstField_Str'] = codeConstField_Str
xlators['checkForTypeCastNeed'] = checkForTypeCastNeed
xlators['codeConstructorCall'] = codeConstructorCall
xlators['codeSuperConstructorCall'] = codeSuperConstructorCall
xlators['getVirtualFuncText'] = getVirtualFuncText
xlators['getUnwrappedClassOwner'] = getUnwrappedClassOwner
xlators['makePtrOpt'] = makePtrOpt
return(xlators)
| gpl-2.0 | -8,974,714,541,891,909,000 | 48.895459 | 687 | 0.614569 | false |
trendels/rhino | examples/content_type_versioning.py | 1 | 1165 | import json
from rhino import Mapper, get
# Our internal representation
report = {
'title': 'foo',
'author': 'Fred',
'date': '2015-01-09',
'tags': ['a', 'b', 'c'],
}
# Base class for our representations
class report_repr(object):
@classmethod
def serialize(cls, report):
obj = dict([(k, report[k]) for k in cls.fields])
return json.dumps(obj, sort_keys=True)
# Different versions of the representation
class report_v1(report_repr):
provides = 'application/vnd.acme.report+json;v=1'
fields = ['title', 'author']
class report_v2(report_repr):
provides = 'application/vnd.acme.report+json;v=2'
fields = ['title', 'author', 'date']
class report_v3(report_repr):
provides = 'application/vnd.acme.report+json;v=3'
fields = ['title', 'author', 'date', 'tags']
# One handler can handle multiple representations.
# Here, report_v3 is the default when the client doesn't specify a preference.
@get(produces=report_v1)
@get(produces=report_v2)
@get(produces=report_v3)
def get_report(request):
return report
app = Mapper()
app.add('/', get_report)
if __name__ == '__main__':
app.start_server()
| mit | -6,860,919,423,660,768,000 | 24.326087 | 78 | 0.657511 | false |
fsxfreak/club-suite | clubsuite/suite/migrations/0002_auto_20170219_0729.py | 1 | 3641 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-19 07:29
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('suite', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Budget',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('planned', models.DecimalField(decimal_places=2, default=0, max_digits=10)),
('used', models.DecimalField(decimal_places=2, default=0, max_digits=10)),
('start_date', models.DateField()),
('end_date', models.DateField()),
('cid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='suite.Club')),
],
),
migrations.CreateModel(
name='Division',
fields=[
('name', models.CharField(default='name for this division', max_length=50, primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='EventSignIn',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.BooleanField(default=False)),
('cid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='suite.Club')),
('eid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='suite.Event')),
('uid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Receipt',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.DecimalField(decimal_places=2, default=0.0, max_digits=10)),
('who_purchase', models.CharField(max_length=50)),
('status', models.CharField(choices=[('P', 'Pending'), ('R', 'Requested'), ('A', 'Accepted'), ('D', 'Denied')], default='P', max_length=1)),
('notes', models.CharField(blank=True, max_length=1000)),
('did', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='suite.Division')),
],
),
migrations.CreateModel(
name='Role',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(choices=[('O', 'Owner'), ('A', 'Officer'), ('M', 'Member'), ('P', 'Passerby')], default='P', max_length=1)),
('cid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='suite.Club')),
('uid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='budget',
name='did',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='suite.Division'),
),
migrations.AddField(
model_name='event',
name='did',
field=models.ForeignKey(default=django.utils.timezone.now, on_delete=django.db.models.deletion.CASCADE, to='suite.Division'),
preserve_default=False,
),
]
| mit | 4,585,127,599,005,000,700 | 46.907895 | 156 | 0.573469 | false |
adamdempsey90/fargo3d | utils/python/indices.py | 1 | 1131 | import re
def index(i,j,k):
'''
Fargo3d index calculator.
Input: Strings i,j,k, with the value of the desired index on each direction.
Output: The monodimentional fargo3d index.
'''
value = ''
print i,j,k
#Trivial option
if i == 'i' and j == 'j' and k == 'k':
value += 'l'
return value
if i == 'i' or j == 'j' or k == 'k':
value += 'l'
x = re.match("\w([+-])(\d+)?",i)
y = re.match("\w([+-])(\d+)?",j)
z = re.match("\w([+-])(\d+)?",k)
if x != None:
if int(x.group(2)) >= 2:
print '\nError! The allowed displacement in i direction is up to +/- 1\n'
return
if x.group(1) == '+':
value += 'lxp'
if x.group(1) == '-':
value += 'lxm'
if y != None:
if(y.group(2) == '1'):
value += y.group(1) + 'Nx'
else:
value += y.group(1) + y.group(2) + '*Nx'
if z != None:
if(z.group(2) == '1'):
value += z.group(1) + 'Stride'
else:
value += z.group(1) + z.group(2) + '*Stride'
return value
| gpl-3.0 | 4,786,915,709,673,327,000 | 24.704545 | 85 | 0.431477 | false |
fergalmoran/dss | spa/api/v1/ShowResource.py | 1 | 1087 | from tastypie import fields
from tastypie.authorization import Authorization
from tastypie.exceptions import ImmediateHttpResponse
from tastypie.http import HttpBadRequest
from spa.api.v1.BaseResource import BaseResource
from spa.models import Show
from spa.models.show import ShowOverlapException
DATE_FORMAT = '%d/%m/%Y %H:%M:%S'
class ShowResource(BaseResource):
mix = fields.ToOneField('spa.api.v1.MixResource.MixResource',
'mix', null=False, full=False)
class Meta:
queryset = Show.objects.all()
authorization = Authorization()
resource_name = 'shows'
def obj_create(self, bundle, **kwargs):
try:
return super(ShowResource, self).obj_create(bundle, **kwargs)
except ShowOverlapException:
raise ImmediateHttpResponse(
HttpBadRequest("This event overlaps with an existing event")
)
except Exception, ex:
raise ImmediateHttpResponse(
HttpBadRequest(ex.message)
)
| bsd-2-clause | 6,732,884,751,629,679,000 | 29.970588 | 76 | 0.641214 | false |
bearstech/modoboa | modoboa/admin/templatetags/admin_tags.py | 1 | 8642 | """Admin extension tags."""
from __future__ import unicode_literals
from functools import reduce
from django import template
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _, ugettext_lazy
from modoboa.core import signals as core_signals
from modoboa.lib.templatetags.lib_tags import render_link
from modoboa.lib.web_utils import render_actions
from .. import signals
register = template.Library()
genders = {
"Enabled": (ugettext_lazy("enabled_m"), ugettext_lazy("enabled_f"))
}
@register.simple_tag
def domains_menu(selection, user, ajax_mode=True):
"""Specific menu for domain related operations.
Corresponds to the menu visible on the left column when you go to
*Domains*.
:param str selection: menu entry currently selected
:param ``User`` user: connected user
:rtype: str
:return: rendered menu (as HTML)
"""
domain_list_url = (
"list/" if ajax_mode else reverse("admin:domain_list")
)
entries = [
{"name": "domains",
"label": _("List domains"),
"img": "fa fa-user",
"class": "ajaxnav navigation",
"url": domain_list_url},
]
if user.has_perm("admin.add_domain"):
extra_entries = signals.extra_domain_menu_entries.send(
sender="domains_menu", user=user)
for entry in extra_entries:
entries += entry[1]
entries += [
{"name": "import",
"label": _("Import"),
"img": "fa fa-folder-open",
"url": reverse("admin:domain_import"),
"modal": True,
"modalcb": "admin.importform_cb"},
{"name": "export",
"label": _("Export"),
"img": "fa fa-share-alt",
"url": reverse("admin:domain_export"),
"modal": True,
"modalcb": "admin.exportform_cb"}
]
return render_to_string('common/menulist.html', {
"entries": entries,
"selection": selection,
"user": user
})
@register.simple_tag
def identities_menu(user, selection=None, ajax_mode=True):
"""Menu specific to the Identities page.
:param ``User`` user: the connecter user
:rtype: str
:return: the rendered menu
"""
nav_classes = "navigation"
if ajax_mode:
identity_list_url = "list/"
quota_list_url = "quotas/"
nav_classes += " ajaxnav"
else:
identity_list_url = reverse("admin:identity_list")
quota_list_url = identity_list_url + "#quotas/"
entries = [
{"name": "identities",
"label": _("List identities"),
"img": "fa fa-user",
"class": nav_classes,
"url": identity_list_url},
{"name": "quotas",
"label": _("List quotas"),
"img": "fa fa-hdd-o",
"class": nav_classes,
"url": quota_list_url},
{"name": "import",
"label": _("Import"),
"img": "fa fa-folder-open",
"url": reverse("admin:identity_import"),
"modal": True,
"modalcb": "admin.importform_cb"},
{"name": "export",
"label": _("Export"),
"img": "fa fa-share-alt",
"url": reverse("admin:identity_export"),
"modal": True,
"modalcb": "admin.exportform_cb"}
]
return render_to_string('common/menulist.html', {
"entries": entries,
"user": user
})
@register.simple_tag
def domain_actions(user, domain):
actions = [
{"name": "listidentities",
"url": u"{0}#list/?searchquery=@{1}".format(
reverse("admin:identity_list"), domain.name),
"title": _("View the domain's identities"),
"img": "fa fa-user"}
]
if user.has_perm("admin.change_domain"):
actions.append({
"name": "editdomain",
"title": _("Edit {}").format(domain),
"url": reverse("admin:domain_change", args=[domain.pk]),
"modal": True,
"modalcb": "admin.domainform_cb",
"img": "fa fa-edit"
})
if user.has_perm("admin.delete_domain"):
actions.append({
"name": "deldomain",
"url": reverse("admin:domain_delete", args=[domain.id]),
"title": _("Delete %s?" % domain.name),
"img": "fa fa-trash"
})
responses = signals.extra_domain_actions.send(
sender=None, user=user, domain=domain)
for receiver, response in responses:
if response:
actions += response
return render_actions(actions)
@register.simple_tag
def identity_actions(user, ident):
name = ident.__class__.__name__
objid = ident.id
if name == "User":
actions = []
result = core_signals.extra_account_actions.send(
sender="identity_actions", account=ident)
for action in result:
actions += action[1]
url = (
reverse("admin:account_change", args=[objid]) +
"?active_tab=default"
)
actions += [
{"name": "changeaccount",
"url": url,
"img": "fa fa-edit",
"modal": True,
"modalcb": "admin.editaccount_cb",
"title": _("Edit {}").format(ident.username)},
{"name": "delaccount",
"url": reverse("admin:account_delete", args=[objid]),
"img": "fa fa-trash",
"title": _("Delete %s?" % ident.username)},
]
else:
actions = [
{"name": "changealias",
"url": reverse("admin:alias_change", args=[objid]),
"img": "fa fa-edit",
"modal": True,
"modalcb": "admin.aliasform_cb",
"title": _("Edit {}").format(ident)},
{"name": "delalias",
"url": "{}?selection={}".format(
reverse("admin:alias_delete"), objid),
"img": "fa fa-trash",
"title": _("Delete %s?" % ident.address)},
]
return render_actions(actions)
@register.simple_tag
def check_identity_status(identity):
"""Check if identity is enabled or not."""
if identity.__class__.__name__ == "User":
if hasattr(identity, "mailbox") \
and not identity.mailbox.domain.enabled:
return False
elif not identity.is_active:
return False
elif not identity.enabled or not identity.domain.enabled:
return False
return True
@register.simple_tag
def domain_aliases(domain):
"""Display domain aliases of this domain.
:param domain:
:rtype: str
"""
if not domain.aliases.count():
return '---'
res = ''
for alias in domain.aliases.all():
res += '%s<br/>' % alias.name
return mark_safe(res)
@register.simple_tag
def identity_modify_link(identity, active_tab='default'):
"""Return the appropriate modification link.
According to the identity type, a specific modification link (URL)
must be used.
:param identity: a ``User`` or ``Alias`` instance
:param str active_tab: the tab to display
:rtype: str
"""
linkdef = {"label": identity.identity, "modal": True}
if identity.__class__.__name__ == "User":
linkdef["url"] = reverse("admin:account_change", args=[identity.id])
linkdef["url"] += "?active_tab=%s" % active_tab
linkdef["modalcb"] = "admin.editaccount_cb"
else:
linkdef["url"] = reverse("admin:alias_change", args=[identity.id])
linkdef["modalcb"] = "admin.aliasform_cb"
return render_link(linkdef)
@register.simple_tag
def domadmin_actions(daid, domid):
actions = [{
"name": "removeperm",
"url": "{0}?domid={1}&daid={2}".format(
reverse("admin:permission_remove"), domid, daid),
"img": "fa fa-trash",
"title": _("Remove this permission")
}]
return render_actions(actions)
@register.filter
def gender(value, target):
if value in genders:
trans = target == "m" and genders[value][0] or genders[value][1]
if trans.find("_") == -1:
return trans
return value
@register.simple_tag
def get_extra_admin_content(user, target, currentpage):
results = signals.extra_admin_content.send(
sender="get_extra_admin_content",
user=user, location=target, currentpage=currentpage)
if not results:
return ""
results = reduce(lambda a, b: a + b, [result[1] for result in results])
return mark_safe("".join(results))
| isc | 8,241,718,290,447,684,000 | 29.864286 | 76 | 0.560287 | false |
kastnerkyle/pylearn2 | pylearn2/tests/test_monitor.py | 1 | 19768 | import numpy as np
import warnings
from nose.tools import assert_raises
from theano.compat import exc_message
from theano.compat.python2x import OrderedDict
from theano import shared
from theano import tensor as T
from pylearn2.costs.cost import Cost
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
from pylearn2.models.model import Model
from pylearn2.models.s3c import S3C, E_Step, Grad_M_Step
from pylearn2.monitor import _err_ambig_data
from pylearn2.monitor import _err_no_data
from pylearn2.monitor import Monitor
from pylearn2.monitor import push_monitor
from pylearn2.space import VectorSpace
from pylearn2.testing.datasets import ArangeDataset
from pylearn2.training_algorithms.default import DefaultTrainingAlgorithm
from pylearn2.utils.iteration import _iteration_schemes, has_uniform_batch_size
from pylearn2.utils import py_integer_types
from pylearn2.utils.serial import from_string
from pylearn2.utils.serial import to_string
from pylearn2.utils import sharedX
from pylearn2.testing.prereqs import ReadVerifyPrereq
class DummyModel(Model):
def __init__(self, num_features):
self.input_space = VectorSpace(num_features)
def get_default_cost(self):
return DummyCost()
class DummyDataset(DenseDesignMatrix):
def __init__(self, num_examples, num_features):
rng = np.random.RandomState([4, 12, 17])
super(DummyDataset, self).__init__(
X=rng.uniform(1., 2., (num_examples, num_features))
)
def __getstate__(self):
raise AssertionError("These unit tests only test Monitor "
"functionality. If the Monitor tries to serialize a "
"Dataset, that is an error.")
class DummyCost(Cost):
def get_data_specs(self, model):
return (VectorSpace(1), 'dummy')
def expr(self, model, cost_ipt):
return cost_ipt.sum()
def test_channel_scaling_sequential():
def channel_scaling_checker(num_examples, mode, num_batches, batch_size):
num_features = 2
monitor = Monitor(DummyModel(num_features))
dataset = DummyDataset(num_examples, num_features)
monitor.add_dataset(dataset=dataset, mode=mode,
num_batches=num_batches, batch_size=batch_size)
vis_batch = T.matrix()
mean = vis_batch.mean()
data_specs = (monitor.model.get_input_space(),
monitor.model.get_input_source())
monitor.add_channel(name='mean', ipt=vis_batch, val=mean, dataset=dataset,
data_specs=data_specs)
monitor()
assert 'mean' in monitor.channels
mean = monitor.channels['mean']
assert len(mean.val_record) == 1
actual = mean.val_record[0]
X = dataset.get_design_matrix()
if batch_size is not None and num_batches is not None:
total = min(num_examples, num_batches * batch_size)
else:
total = num_examples
expected = X[:total].mean()
if not np.allclose(expected, actual):
raise AssertionError("Expected monitor to contain %f but it has "
"%f" % (expected, actual))
# Specifying num_batches; even split
yield channel_scaling_checker, 10, 'sequential', 5, None
# Specifying num_batches; even split
yield channel_scaling_checker, 10, 'sequential', 2, None
# Specifying batch_size; even split
yield channel_scaling_checker, 10, 'sequential', None, 5
# Specifying batch_size; even split
yield channel_scaling_checker, 10, 'sequential', None, 2
# Specifying num_batches; uneven split
yield channel_scaling_checker, 10, 'sequential', 4, None
# Specifying num_batches; uneven split
yield channel_scaling_checker, 10, 'sequential', 3, None
# Specifying batch_size; uneven split
yield channel_scaling_checker, 10, 'sequential', None, 3
# Specifying batch_size; uneven split
yield channel_scaling_checker, 10, 'sequential', None, 4
# Specifying both, even split
yield channel_scaling_checker, 10, 'sequential', 2, 5
# Specifying both, even split
yield channel_scaling_checker, 10, 'sequential', 5, 2
# Specifying both, uneven split, dangling batch
yield channel_scaling_checker, 10, 'sequential', 3, 4
# Specifying both, uneven split, non-exhaustive
yield channel_scaling_checker, 10, 'sequential', 3, 3
def test_counting():
BATCH_SIZE = 2
BATCHES = 3
NUM_FEATURES = 4
num_examples = BATCHES * BATCH_SIZE
dataset = DummyDataset( num_examples = num_examples,
num_features = NUM_FEATURES)
algorithm = DefaultTrainingAlgorithm( batch_size = BATCH_SIZE,
batches_per_iter = BATCHES)
model = S3C( nvis = NUM_FEATURES, nhid = 1,
irange = .01, init_bias_hid = 0., init_B = 1.,
min_B = 1., max_B = 1., init_alpha = 1.,
min_alpha = 1., max_alpha = 1., init_mu = 0.,
m_step = Grad_M_Step( learning_rate = 0.),
e_step = E_Step( h_new_coeff_schedule = [ 1. ]))
algorithm.setup(model = model, dataset = dataset)
algorithm.train(dataset = dataset)
if not ( model.monitor.get_batches_seen() == BATCHES):
raise AssertionError('Should have seen '+str(BATCHES) + \
' batches but saw '+str(model.monitor.get_batches_seen()))
assert model.monitor.get_examples_seen() == num_examples
assert isinstance(model.monitor.get_examples_seen(), py_integer_types)
assert isinstance(model.monitor.get_batches_seen(), py_integer_types)
def test_reject_empty():
# Test that Monitor raises an error if asked to iterate over 0 batches
BATCH_SIZE = 2
num_examples = BATCH_SIZE
NUM_FEATURES = 3
model = DummyModel(NUM_FEATURES)
monitor = Monitor.get_monitor(model)
monitoring_dataset = DummyDataset(num_examples = num_examples,
num_features = NUM_FEATURES)
monitor.add_dataset(monitoring_dataset, 'sequential', batch_size=BATCH_SIZE,
num_batches = 0)
name = 'z'
monitor.add_channel(name = name,
ipt = model.input_space.make_theano_batch(),
val = 0.,
data_specs=(model.get_input_space(), model.get_input_source()))
try:
monitor()
except ValueError:
return
assert False
def test_prereqs():
# Test that prereqs get run before the monitoring channels are computed
BATCH_SIZE = 2
num_examples = BATCH_SIZE
NUM_FEATURES = 3
model = DummyModel(NUM_FEATURES)
monitor = Monitor.get_monitor(model)
monitoring_dataset = DummyDataset(num_examples = num_examples,
num_features = NUM_FEATURES)
monitor.add_dataset(monitoring_dataset, 'sequential', batch_size=BATCH_SIZE)
prereq_counter = sharedX(0.)
def prereq(*data):
prereq_counter.set_value(
prereq_counter.get_value()+1.)
name = 'num_prereq_calls'
monitor.add_channel(name = name,
ipt = model.input_space.make_theano_batch(),
val = prereq_counter,
prereqs = [ prereq ],
data_specs=(model.get_input_space(), model.get_input_source()))
channel = monitor.channels[name]
assert len(channel.val_record) == 0
monitor()
assert channel.val_record == [1]
monitor()
assert channel.val_record == [1,2]
def test_revisit():
# Test that each call to monitor revisits exactly the same data
BATCH_SIZE = 3
MAX_BATCH_SIZE = 12
BATCH_SIZE_STRIDE = 3
NUM_BATCHES = 10
num_examples = NUM_BATCHES * BATCH_SIZE
monitoring_dataset = ArangeDataset(num_examples)
for mon_batch_size in xrange(BATCH_SIZE, MAX_BATCH_SIZE + 1,
BATCH_SIZE_STRIDE):
for num_mon_batches in [ 1, 3, num_examples / mon_batch_size, None ]:
for mode in sorted(_iteration_schemes):
if num_mon_batches is None and mode in ['random_uniform', 'random_slice']:
continue
if has_uniform_batch_size(mode) and \
num_mon_batches is not None and \
num_mon_batches * mon_batch_size > num_examples:
num_mon_batches = int(num_examples / float(mon_batch_size))
model = DummyModel(1)
monitor = Monitor.get_monitor(model)
try:
monitor.add_dataset(monitoring_dataset, mode,
batch_size=mon_batch_size, num_batches=num_mon_batches)
except TypeError:
monitor.add_dataset(monitoring_dataset, mode,
batch_size=mon_batch_size, num_batches=num_mon_batches,
seed = 0)
if has_uniform_batch_size(mode) and num_mon_batches is None:
num_mon_batches = int(num_examples / float(mon_batch_size))
elif num_mon_batches is None:
num_mon_batches = int(np.ceil(float(num_examples) /
float(mon_batch_size)))
batches = [ None ] * num_mon_batches
visited = [ False ] * num_mon_batches
batch_idx = shared(0)
class RecorderAndValidator(object):
def __init__(self):
self.validate = False
def __call__(self, *data):
""" Initially, records the batches the monitor shows it.
When set to validate mode, makes sure the batches shown
on the second monitor call match those from the first."""
X, = data
idx = batch_idx.get_value()
batch_idx.set_value(idx + 1)
# Note: if the monitor starts supporting variable batch sizes,
# take this out. Maybe move it to a new test that the iterator's
# uneven property is set accurately
warnings.warn("TODO: add unit test that iterators uneven property is set correctly.")
# assert X.shape[0] == mon_batch_size
if self.validate:
previous_batch = batches[idx]
assert not visited[idx]
visited[idx] = True
if not np.allclose(previous_batch, X):
print 'Visited different data in batch',idx
print previous_batch
print X
print 'Iteration mode', mode
assert False
else:
batches[idx] = X
# end if
# end __call__
#end class
prereq = RecorderAndValidator()
monitor.add_channel(name = 'dummy',
ipt = model.input_space.make_theano_batch(),
val = 0.,
prereqs = [ prereq ],
data_specs=(model.get_input_space(),
model.get_input_source()))
try:
monitor()
except RuntimeError:
print 'monitor raised RuntimeError for iteration mode', mode
raise
assert None not in batches
batch_idx.set_value(0)
prereq.validate = True
monitor()
assert all(visited)
def test_prereqs_batch():
# Test that prereqs get run before each monitoring batch
BATCH_SIZE = 2
num_examples = 2 * BATCH_SIZE
NUM_FEATURES = 3
model = DummyModel(NUM_FEATURES)
monitor = Monitor.get_monitor(model)
monitoring_dataset = DummyDataset(num_examples = num_examples,
num_features = NUM_FEATURES)
monitor.add_dataset(monitoring_dataset, 'sequential', batch_size=BATCH_SIZE)
sign = sharedX(1.)
def prereq(*data):
sign.set_value(
-sign.get_value())
name = 'batches_should_cancel_to_0'
monitor.add_channel(name = name,
ipt = model.input_space.make_theano_batch(),
val = sign,
prereqs = [ prereq ],
data_specs=(model.get_input_space(), model.get_input_source()))
channel = monitor.channels[name]
assert len(channel.val_record) == 0
monitor()
assert channel.val_record == [0]
monitor()
assert channel.val_record == [0,0]
def test_dont_serialize_dataset():
# Test that serializing the monitor does not serialize the dataset
BATCH_SIZE = 2
num_examples = 2 * BATCH_SIZE
NUM_FEATURES = 3
model = DummyModel(NUM_FEATURES)
monitor = Monitor.get_monitor(model)
monitoring_dataset = DummyDataset(num_examples = num_examples,
num_features = NUM_FEATURES)
monitoring_dataset.yaml_src = ""
monitor.add_dataset(monitoring_dataset, 'sequential', batch_size=BATCH_SIZE)
monitor()
to_string(monitor)
def test_serialize_twice():
# Test that a monitor can be serialized twice
# with the same result
model = DummyModel(1)
monitor = Monitor.get_monitor(model)
x = to_string(monitor)
y = to_string(monitor)
assert x == y
def test_save_load_save():
"""
Test that a monitor can be saved, then loaded, and then the loaded
copy can be saved again.
This only tests that the serialization and deserialization processes
don't raise an exception. It doesn't test for correctness at all.
"""
model = DummyModel(1)
monitor = Monitor.get_monitor(model)
num_examples = 2
num_features = 3
num_batches = 1
batch_size = 2
dataset = DummyDataset(num_examples, num_features)
monitor.add_dataset(dataset=dataset,
num_batches=num_batches, batch_size=batch_size)
vis_batch = T.matrix()
mean = vis_batch.mean()
data_specs = (monitor.model.get_input_space(),
monitor.model.get_input_source())
monitor.add_channel(name='mean', ipt=vis_batch, val=mean, dataset=dataset,
data_specs=data_specs)
saved = to_string(monitor)
monitor = from_string(saved)
saved_again = to_string(monitor)
def test_valid_after_serialize():
# Test that serializing the monitor does not ruin it
BATCH_SIZE = 2
num_examples = 2 * BATCH_SIZE
NUM_FEATURES = 3
model = DummyModel(NUM_FEATURES)
monitor = Monitor.get_monitor(model)
monitoring_dataset = DummyDataset(num_examples = num_examples,
num_features = NUM_FEATURES)
monitoring_dataset.yaml_src = ""
monitor.add_dataset(monitoring_dataset, 'sequential', batch_size=BATCH_SIZE)
to_string(monitor)
monitor.redo_theano()
def test_deserialize():
# Test that a monitor can be deserialized
model = DummyModel(1)
monitor = Monitor.get_monitor(model)
x = to_string(monitor)
monitor = from_string(x)
y = to_string(monitor)
def test_prereqs_multidataset():
# Test that prereqs are run on the right datasets
NUM_DATASETS = 4
NUM_FEATURES = 3
model = DummyModel(NUM_FEATURES)
monitor = Monitor.get_monitor(model)
prereq_counters = []
datasets = []
for i in xrange(NUM_DATASETS):
batch_size = i + 1
num_examples = batch_size
dataset = DummyDataset(num_examples = num_examples,
num_features = NUM_FEATURES)
dataset.X[:] = i
datasets.append(dataset)
monitor.add_dataset(dataset, 'sequential', batch_size=batch_size)
prereq_counters.append(sharedX(0.))
channels = []
for i in xrange(NUM_DATASETS):
monitor.add_channel(name = str(i),
ipt = model.input_space.make_theano_batch(),
val = prereq_counters[i],
dataset = datasets[i],
prereqs = [ ReadVerifyPrereq(i, prereq_counters[i]) ],
data_specs=(model.get_input_space(), model.get_input_source()))
channels.append(monitor.channels[str(i)])
for channel in channels:
assert len(channel.val_record) == 0
monitor()
for channel in channels:
assert channel.val_record == [1]
monitor()
for channel in channels:
assert channel.val_record == [1,2]
# check that handling all these datasets did not
# result in them getting serialized
to_string(monitor)
def test_reject_bad_add_dataset():
model = DummyModel(1)
monitor = Monitor.get_monitor(model)
dataset = DummyDataset(1,1)
try:
monitor.add_dataset([dataset],mode=['sequential', 'shuffled'])
except ValueError:
return
raise AssertionError("Monitor.add_dataset accepted bad arguments to "
"add_dataset.")
def test_no_data():
# test that the right error is raised if you
# add a channel to a monitor that has no datasets
BATCH_SIZE = 2
num_examples = BATCH_SIZE
NUM_FEATURES = 3
model = DummyModel(NUM_FEATURES)
monitor = Monitor.get_monitor(model)
name = 'num_prereq_calls'
try:
monitor.add_channel(name = name,
ipt = model.input_space.make_theano_batch(),
data_specs = (model.input_space, 'features'),
val = 0.)
except ValueError, e:
assert exc_message(e) == _err_no_data
return
assert False
def test_ambig_data():
# test that the right error is raised if you
# add a channel to a monitor that has multiple datasets
# and don't specify the dataset
BATCH_SIZE = 2
num_examples = BATCH_SIZE
NUM_FEATURES = 3
model = DummyModel(NUM_FEATURES)
monitor = Monitor.get_monitor(model)
first = DummyDataset(num_examples = num_examples,
num_features = NUM_FEATURES)
second = DummyDataset(num_examples = num_examples,
num_features = NUM_FEATURES)
monitor.add_dataset(first, 'sequential', batch_size=BATCH_SIZE)
monitor.add_dataset(second, 'sequential', batch_size=BATCH_SIZE)
name = 'num_prereq_calls'
try:
monitor.add_channel(name = name,
ipt = model.input_space.make_theano_batch(),
val = 0.,
data_specs=(model.get_input_space(), model.get_input_source()))
except ValueError, e:
assert exc_message(e) == _err_ambig_data
return
assert False
def test_transfer_experience():
# Makes sure the transfer_experience flag of push_monitor works
model = DummyModel(num_features = 3)
monitor = Monitor.get_monitor(model)
monitor.report_batch(2)
monitor.report_batch(3)
monitor.report_epoch()
model = push_monitor(model, "old_monitor", transfer_experience=True)
assert model.old_monitor is monitor
monitor = model.monitor
assert monitor.get_epochs_seen() == 1
assert monitor.get_batches_seen() == 2
assert monitor.get_epochs_seen() == 1
def test_extra_costs():
# Makes sure Monitor.setup checks type of extra_costs
num_features = 3
model = DummyModel(num_features=num_features)
dataset = DummyDataset(num_examples=2, num_features=num_features)
monitor = Monitor.get_monitor(model)
extra_costs = [model.get_default_cost()]
assert_raises(AssertionError, monitor.setup, dataset,
model.get_default_cost(), 1, extra_costs=extra_costs)
extra_costs = OrderedDict()
extra_costs['Cost'] = model.get_default_cost()
monitor.setup(dataset, model.get_default_cost(), 1,
extra_costs=extra_costs)
if __name__ == '__main__':
test_revisit()
| bsd-3-clause | 3,848,146,624,644,614,700 | 31.67438 | 109 | 0.606536 | false |
tensorflow/neural-structured-learning | research/kg_hyp_emb/models/base.py | 1 | 8413 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract class for Knowledge Graph embedding models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from kg_hyp_emb.learning import regularizers
import numpy as np
import tensorflow as tf
class KGModel(tf.keras.Model, abc.ABC):
"""Abstract Knowledge Graph embedding model class.
Module to define basic operations in KG embedding models, including embedding
initialization, computing embeddings and triples' scores.
"""
def __init__(self, sizes, args):
"""Initialize KG embedding model.
Args:
sizes: Tuple of size 3 containing (n_entities, n_rels, n_entities).
args: Namespace with config arguments (see config.py for detailed overview
of arguments supported).
"""
super(KGModel, self).__init__()
self.sizes = sizes
self.rank = args.rank
self.bias = args.bias
self.initializer = getattr(tf.keras.initializers, args.initializer)
self.entity_regularizer = getattr(regularizers, args.regularizer)(
args.entity_reg)
self.rel_regularizer = getattr(regularizers, args.regularizer)(args.rel_reg)
self.entity = tf.keras.layers.Embedding(
input_dim=sizes[0],
output_dim=self.rank,
embeddings_initializer=self.initializer,
embeddings_regularizer=self.entity_regularizer,
name='entity_embeddings')
self.rel = tf.keras.layers.Embedding(
input_dim=sizes[1],
output_dim=self.rank,
embeddings_initializer=self.initializer,
embeddings_regularizer=self.rel_regularizer,
name='relation_embeddings')
train_biases = self.bias == 'learn'
self.bh = tf.keras.layers.Embedding(
input_dim=sizes[0],
output_dim=1,
embeddings_initializer='zeros',
name='head_biases',
trainable=train_biases)
self.bt = tf.keras.layers.Embedding(
input_dim=sizes[0],
output_dim=1,
embeddings_initializer='zeros',
name='tail_biases',
trainable=train_biases)
self.gamma = tf.Variable(
initial_value=args.gamma * tf.keras.backend.ones(1), trainable=False)
@abc.abstractmethod
def get_queries(self, input_tensor):
"""Get query embeddings using head and relationship for an index tensor.
Args:
input_tensor: Tensor of size batch_size x 3 containing triples' indices.
Returns:
Tensor of size batch_size x embedding_dimension representing queries'
embeddings.
"""
pass
@abc.abstractmethod
def get_rhs(self, input_tensor):
"""Get right hand side (tail) embeddings for an index tensor.
Args:
input_tensor: Tensor of size batch_size x 3 containing triples' indices.
Returns:
Tensor of size batch_size x embedding_dimension representing tail
entities' embeddings.
"""
pass
@abc.abstractmethod
def get_candidates(self):
"""Get all candidate tail embeddings in a knowledge graph dataset.
Returns:
Tensor of size n_entities x embedding_dimension representing embeddings
for all enitities in the KG.
"""
pass
@abc.abstractmethod
def similarity_score(self, lhs, rhs, eval_mode):
"""Computes a similarity score between queries and tail embeddings.
Args:
lhs: Tensor of size B1 x embedding_dimension containing queries'
embeddings.
rhs: Tensor of size B2 x embedding_dimension containing tail entities'
embeddings.
eval_mode: boolean to indicate whether to compute all pairs of scores or
not. If False, B1 must be equal to B2.
Returns:
Tensor representing similarity scores. If eval_mode is False, this tensor
has size B1 x 1, otherwise it has size B1 x B2.
"""
pass
def call(self, input_tensor, eval_mode=False):
"""Forward pass of KG embedding models.
Args:
input_tensor: Tensor of size batch_size x 3 containing triples' indices.
eval_mode: boolean to indicate whether to compute scores against all
possible tail entities in the KG, or only individual triples' scores.
Returns:
Tensor containing triple scores. If eval_mode is False, this tensor
has size batch_size x 1, otherwise it has size batch_size x n_entities
where n_entities is the total number of entities in the KG.
"""
lhs = self.get_queries(input_tensor)
lhs_biases = self.bh(input_tensor[:, 0])
if eval_mode:
rhs = self.get_candidates()
rhs_biases = self.bt.embeddings
else:
rhs = self.get_rhs(input_tensor)
rhs_biases = self.bt(input_tensor[:, 2])
predictions = self.score(lhs, lhs_biases, rhs, rhs_biases, eval_mode)
return predictions
def score(self, lhs, lhs_biases, rhs, rhs_biases, eval_mode):
"""Compute triple scores using embeddings and biases."""
score = self.similarity_score(lhs, rhs, eval_mode)
if self.bias == 'constant':
return score + self.gamma
elif self.bias == 'learn':
if eval_mode:
return score + lhs_biases + tf.transpose(rhs_biases)
else:
return score + lhs_biases + rhs_biases
else:
return score
def get_scores_targets(self, input_tensor):
"""Computes triples' scores as well as scores againts all possible entities.
Args:
input_tensor: Tensor of size batch_size x 3 containing triples' indices.
Returns:
scores: Numpy array of size batch_size x n_entities containing queries'
scores against all possible entities in the KG.
targets: Numpy array of size batch_size x 1 containing triples' scores.
"""
cand = self.get_candidates()
cand_biases = self.bt.embeddings
lhs = self.get_queries(input_tensor)
lhs_biases = self.bh(input_tensor[:, 0])
rhs = self.get_rhs(input_tensor)
rhs_biases = self.bt(input_tensor[:, 2])
scores = self.score(lhs, lhs_biases, cand, cand_biases, eval_mode=True)
targets = self.score(lhs, lhs_biases, rhs, rhs_biases, eval_mode=False)
return scores.numpy(), targets.numpy()
def eval(self, examples, filters, batch_size=1000):
"""Compute ranking-based evaluation metrics.
Args:
examples: Tensor of size n_examples x 3 containing triples' indices.
filters: Dict representing entities to skip per query for evaluation in
the filtered setting.
batch_size: batch size to use to compute scores.
Returns:
Evaluation metrics (mean rank, mean reciprocical rank and hits).
"""
mean_rank = {}
mean_reciprocal_rank = {}
hits_at = {}
total_examples = examples.cardinality().numpy()
batch_size = min(batch_size, total_examples)
for missing in ['rhs', 'lhs']:
ranks = np.ones(total_examples)
for counter, input_tensor in enumerate(examples.batch(batch_size)):
if batch_size * counter >= total_examples:
break
# reverse triple for head prediction
if missing == 'lhs':
input_tensor = tf.concat([
input_tensor[:, 2:], input_tensor[:, 1:2] + self.sizes[1] // 2,
input_tensor[:, 0:1]
],
axis=1)
scores, targets = self.get_scores_targets(input_tensor)
for i, query in enumerate(input_tensor):
query = query.numpy()
filter_out = filters[missing][(query[0], query[1])]
filter_out += [query[2]]
scores[i, filter_out] = -1e6
ranks[counter * batch_size:(counter + 1) * batch_size] += np.sum(
(scores >= targets), axis=1)
# compute ranking metrics
mean_rank[missing] = np.mean(ranks)
mean_reciprocal_rank[missing] = np.mean(1. / ranks)
hits_at[missing] = {}
for k in (1, 3, 10):
hits_at[missing][k] = np.mean(ranks <= k)
return mean_rank, mean_reciprocal_rank, hits_at
| apache-2.0 | 4,393,025,333,323,162,000 | 34.952991 | 80 | 0.66314 | false |
klichukb/django-migrate-sql | setup.py | 1 | 2009 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import os
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
class Tox(TestCommand):
user_options = [('tox-args=', 'a', "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = None
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import tox
import shlex
args = self.tox_args
if args:
args = shlex.split(self.tox_args)
errno = tox.cmdline(args=args)
sys.exit(errno)
def get_version(package):
"""
Get migrate_sql version as listed in `__version__` in `__init__.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
with open('README.rst') as readme_file:
readme = readme_file.read()
VERSION = get_version('migrate_sql')
setup(
name='django-migrate-sql',
version=VERSION,
description='Migration support for raw SQL in Django',
long_description=readme,
author='Bogdan Klichuk',
author_email='[email protected]',
packages=find_packages(),
package_dir={'migrate_sql': 'migrate_sql'},
license='BSD',
zip_safe=False,
url='https://github.com/klichukb/django-migrate-sql',
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
tests_require=['tox'],
cmdclass={'test': Tox},
install_requires=[],
)
| isc | 3,437,247,787,245,584,400 | 25.786667 | 75 | 0.603783 | false |
beeftornado/sentry | src/sentry/integrations/metric_alerts.py | 1 | 2544 | from __future__ import absolute_import
from datetime import timedelta
from django.core.urlresolvers import reverse
from sentry.incidents.logic import get_incident_aggregates
from sentry.incidents.models import IncidentStatus, IncidentTrigger, INCIDENT_STATUS
from sentry.utils.assets import get_asset_url
from sentry.utils.http import absolute_uri
QUERY_AGGREGATION_DISPLAY = {
"count()": "events",
"count_unique(tags[sentry:user])": "users affected",
}
def incident_attachment_info(incident, metric_value=None):
logo_url = absolute_uri(get_asset_url("sentry", "images/sentry-email-avatar.png"))
alert_rule = incident.alert_rule
status = INCIDENT_STATUS[IncidentStatus(incident.status)]
agg_text = QUERY_AGGREGATION_DISPLAY.get(
alert_rule.snuba_query.aggregate, alert_rule.snuba_query.aggregate
)
if metric_value is None:
incident_trigger = (
IncidentTrigger.objects.filter(incident=incident).order_by("-date_modified").first()
)
if incident_trigger:
alert_rule_trigger = incident_trigger.alert_rule_trigger
# TODO: If we're relying on this and expecting possible delays between a
# trigger fired and this function running, then this could actually be
# incorrect if they changed the trigger's time window in this time period.
# Should we store it?
start = incident_trigger.date_modified - timedelta(
seconds=alert_rule_trigger.alert_rule.snuba_query.time_window
)
end = incident_trigger.date_modified
else:
start, end = None, None
metric_value = get_incident_aggregates(incident, start, end, use_alert_aggregate=True)[
"count"
]
time_window = alert_rule.snuba_query.time_window // 60
text = "{} {} in the last {} minutes".format(metric_value, agg_text, time_window)
if alert_rule.snuba_query.query != "":
text += "\nFilter: {}".format(alert_rule.snuba_query.query)
ts = incident.date_started
title = u"{}: {}".format(status, alert_rule.name)
title_link = absolute_uri(
reverse(
"sentry-metric-alert",
kwargs={
"organization_slug": incident.organization.slug,
"incident_id": incident.identifier,
},
)
)
return {
"title": title,
"text": text,
"logo_url": logo_url,
"status": status,
"ts": ts,
"title_link": title_link,
}
| bsd-3-clause | 5,311,452,514,871,115,000 | 34.830986 | 96 | 0.634434 | false |
cortesi/qtile | libqtile/utils.py | 1 | 8226 | # Copyright (c) 2008, Aldo Cortesi. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import functools
import os
import operator
import sys
import warnings
import traceback
import importlib
import six
from six.moves import reduce
from . import xcbq
from .log_utils import logger
class QtileError(Exception):
pass
def lget(o, v):
try:
return o[v]
except (IndexError, TypeError):
return None
def translate_masks(modifiers):
"""
Translate a modifier mask specified as a list of strings into an or-ed
bit representation.
"""
masks = []
for i in modifiers:
try:
masks.append(xcbq.ModMasks[i])
except KeyError:
raise KeyError("Unknown modifier: %s" % i)
if masks:
return reduce(operator.or_, masks)
else:
return 0
def translate_modifiers(mask):
r = []
for k, v in xcbq.ModMasks.items():
if mask & v:
r.append(k)
return r
def shuffleUp(lst):
if len(lst) > 1:
c = lst[-1]
lst.remove(c)
lst.insert(0, c)
def shuffleDown(lst):
if len(lst) > 1:
c = lst[0]
lst.remove(c)
lst.append(c)
if sys.version_info < (3, 3):
class lru_cache(object):
"""
A decorator that implements a self-expiring LRU cache for class
methods (not functions!).
Cache data is tracked as attributes on the object itself. There is
therefore a separate cache for each object instance.
"""
def __init__(self, maxsize=128, typed=False):
self.size = maxsize
def __call__(self, f):
cache_name = "_cached_{0}".format(f.__name__)
cache_list_name = "_cachelist_{0}".format(f.__name__)
size = self.size
@functools.wraps(f)
def wrap(self, *args):
if not hasattr(self, cache_name):
setattr(self, cache_name, {})
setattr(self, cache_list_name, [])
cache = getattr(self, cache_name)
cache_list = getattr(self, cache_list_name)
if args in cache:
cache_list.remove(args)
cache_list.insert(0, args)
return cache[args]
else:
ret = f(self, *args)
cache_list.insert(0, args)
cache[args] = ret
if len(cache_list) > size:
d = cache_list.pop()
cache.pop(d)
return ret
return wrap
else:
from functools import lru_cache # noqa: F401
def rgb(x):
"""
Returns a valid RGBA tuple.
Here are some valid specifcations:
#ff0000
ff0000
with alpha: ff0000.5
(255, 0, 0)
(255, 0, 0, 0.5)
"""
if isinstance(x, (tuple, list)):
if len(x) == 4:
alpha = x[3]
else:
alpha = 1
return (x[0] / 255.0, x[1] / 255.0, x[2] / 255.0, alpha)
elif isinstance(x, six.string_types):
if x.startswith("#"):
x = x[1:]
if "." in x:
x, alpha = x.split(".")
alpha = float("0." + alpha)
else:
alpha = 1
if len(x) != 6:
raise ValueError("RGB specifier must be 6 characters long.")
vals = [int(i, 16) for i in (x[0:2], x[2:4], x[4:6])]
vals.append(alpha)
return rgb(vals)
raise ValueError("Invalid RGB specifier.")
def hex(x):
r, g, b, _ = rgb(x)
return '#%02x%02x%02x' % (int(r * 255), int(g * 255), int(b * 255))
def scrub_to_utf8(text):
if not text:
return u""
elif isinstance(text, six.text_type):
return text
else:
return text.decode("utf-8", "ignore")
# WARNINGS
class UnixCommandNotFound(Warning):
pass
class UnixCommandRuntimeError(Warning):
pass
def catch_exception_and_warn(warning=Warning, return_on_exception=None,
excepts=Exception):
"""
.. function:: warn_on_exception(func, [warning_class, return_on_failure,
excepts])
attempts to call func. catches exception or exception tuple and issues
a warning instead. returns value of return_on_failure when the
specified exception is raised.
:param func: a callable to be wrapped
:param warning: the warning class to issue if an exception is
raised
:param return_on_exception: the default return value of the function
if an exception is raised
:param excepts: an exception class (or tuple of exception classes) to
catch during the execution of func
:type excepts: Exception or tuple of Exception classes
:type warning: Warning
:rtype: a callable
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
return_value = return_on_exception
try:
return_value = func(*args, **kwargs)
except excepts as err:
logger.warn(err.strerror)
warnings.warn(err.strerror, warning)
return return_value
return wrapper
return decorator
def get_cache_dir():
"""
Returns the cache directory and create if it doesn't exists
"""
cache_directory = os.path.expandvars('$XDG_CACHE_HOME')
if cache_directory == '$XDG_CACHE_HOME':
# if variable wasn't set
cache_directory = os.path.expanduser("~/.cache")
cache_directory = os.path.join(cache_directory, 'qtile')
if not os.path.exists(cache_directory):
os.makedirs(cache_directory)
return cache_directory
def describe_attributes(obj, attrs, func=lambda x: x):
"""
Helper for __repr__ functions to list attributes with truthy values only
(or values that return a truthy value by func)
"""
pairs = []
for attr in attrs:
value = getattr(obj, attr, None)
if func(value):
pairs.append('%s=%s' % (attr, value))
return ', '.join(pairs)
def safe_import(module_names, class_name, globals_, fallback=None):
"""
Try to import a module, and if it fails because an ImporError
it logs on WARNING, and logs the traceback on DEBUG level
"""
module_path = '.'.join(module_names)
if type(class_name) is list:
for name in class_name:
safe_import(module_names, name, globals_)
return
package = __package__
# TODO: remove when we really want to drop 3.2 support
# python 3.2 don't set __package__
if not package:
package = __name__
try:
module = importlib.import_module(module_path, package)
globals_[class_name] = getattr(module, class_name)
except ImportError as error:
logger.warning("Unmet dependencies for optional Widget: '%s.%s', %s",
module_path, class_name, error)
logger.debug("%s", traceback.format_exc())
if fallback:
globals_[class_name] = fallback(module_path, class_name, error)
| mit | 4,046,155,213,415,968,000 | 29.354244 | 79 | 0.587649 | false |
ktheory/ipcat | IpDbGenerate.py | 1 | 1493 | #!/usr/bin/env python
import pprint
from operator import itemgetter
from socket import inet_aton
from struct import unpack
from urllib import urlopen
pp = pprint.PrettyPrinter(indent=4, width=50)
iplist = {}
#fetch remote datacenter list and convert to searchable datastructure
external_list = 'https://raw.github.com/client9/ipcat/master/datacenters.csv'
fp = urlopen(external_list)
for line in fp:
line = line.strip()
if not line or line[0] == '#':
continue
parts = line.split(",")
newrow = {
'_ip0': unpack("!L", inet_aton(parts[0]))[0],
'_ip1': unpack("!L", inet_aton(parts[1]))[0],
'owner': parts[3],
}
iplist[newrow['_ip0']] = newrow
#return the list of entries, sorted by the lowest ip in the range
iplist = [v for (k,v) in sorted(iplist.iteritems(), key=itemgetter(0))]
#autogenerate the class to perform lookups
print """
from socket import inet_aton
from struct import unpack
from math import floor
class IpDb(object):
iplist = %s
@staticmethod
def find(ipstring):
ip = unpack("!L", inet_aton(ipstring))[0]
high = len(IpDb.iplist)-1
low = 0
while high >= low:
probe = int(floor((high+low)/2))
if IpDb.iplist[probe]['_ip0'] > ip:
high = probe - 1
elif IpDb.iplist[probe]['_ip1'] < ip:
low = probe + 1
else:
return IpDb.iplist[probe]
return None
""" % (pp.pformat(iplist), ) | gpl-3.0 | 2,662,191,604,263,788,500 | 27.188679 | 77 | 0.608171 | false |
Vaidyanath/tempest | tempest/api/compute/keypairs/test_keypairs_negative.py | 1 | 4003 | # Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib import exceptions as lib_exc
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import test
class KeyPairsNegativeTestJSON(base.BaseV2ComputeTest):
@classmethod
def resource_setup(cls):
super(KeyPairsNegativeTestJSON, cls).resource_setup()
cls.client = cls.keypairs_client
def _create_keypair(self, keypair_name, pub_key=None):
self.client.create_keypair(keypair_name, pub_key)
self.addCleanup(self.client.delete_keypair, keypair_name)
@test.attr(type=['negative', 'gate'])
def test_keypair_create_with_invalid_pub_key(self):
# Keypair should not be created with a non RSA public key
k_name = data_utils.rand_name('keypair-')
pub_key = "ssh-rsa JUNK nova@ubuntu"
self.assertRaises(lib_exc.BadRequest,
self._create_keypair, k_name, pub_key)
@test.attr(type=['negative', 'gate'])
def test_keypair_delete_nonexistent_key(self):
# Non-existent key deletion should throw a proper error
k_name = data_utils.rand_name("keypair-non-existent-")
self.assertRaises(lib_exc.NotFound, self.client.delete_keypair,
k_name)
@test.attr(type=['negative', 'gate'])
def test_create_keypair_with_empty_public_key(self):
# Keypair should not be created with an empty public key
k_name = data_utils.rand_name("keypair-")
pub_key = ' '
self.assertRaises(lib_exc.BadRequest, self._create_keypair,
k_name, pub_key)
@test.attr(type=['negative', 'gate'])
def test_create_keypair_when_public_key_bits_exceeds_maximum(self):
# Keypair should not be created when public key bits are too long
k_name = data_utils.rand_name("keypair-")
pub_key = 'ssh-rsa ' + 'A' * 2048 + ' openstack@ubuntu'
self.assertRaises(lib_exc.BadRequest, self._create_keypair,
k_name, pub_key)
@test.attr(type=['negative', 'gate'])
def test_create_keypair_with_duplicate_name(self):
# Keypairs with duplicate names should not be created
k_name = data_utils.rand_name('keypair-')
self.client.create_keypair(k_name)
# Now try the same keyname to create another key
self.assertRaises(lib_exc.Conflict, self._create_keypair,
k_name)
self.client.delete_keypair(k_name)
@test.attr(type=['negative', 'gate'])
def test_create_keypair_with_empty_name_string(self):
# Keypairs with name being an empty string should not be created
self.assertRaises(lib_exc.BadRequest, self._create_keypair,
'')
@test.attr(type=['negative', 'gate'])
def test_create_keypair_with_long_keynames(self):
# Keypairs with name longer than 255 chars should not be created
k_name = 'keypair-'.ljust(260, '0')
self.assertRaises(lib_exc.BadRequest, self._create_keypair,
k_name)
@test.attr(type=['negative', 'gate'])
def test_create_keypair_invalid_name(self):
# Keypairs with name being an invalid name should not be created
k_name = 'key_/.\@:'
self.assertRaises(lib_exc.BadRequest, self._create_keypair,
k_name)
| apache-2.0 | -6,362,632,938,452,893,000 | 41.585106 | 78 | 0.648514 | false |
aviarypl/mozilla-l10n-addons-server | src/olympia/files/utils.py | 1 | 51113 | import collections
import contextlib
import errno
import hashlib
import json
import os
import io
import re
import shutil
import stat
import struct
import tarfile
import tempfile
import zipfile
from datetime import datetime, timedelta
from six import text_type
from django import forms
from django.conf import settings
from django.core.files.storage import (
File as DjangoFile, default_storage as storage)
from django.template.defaultfilters import filesizeformat
from django.utils.encoding import force_text
from django.utils.jslex import JsLexer
from django.utils.translation import ugettext
import flufl.lock
import rdflib
import six
from xml.parsers.expat import ExpatError
from defusedxml import minidom
from defusedxml.common import DefusedXmlException
import olympia.core.logger
from olympia import amo, core
from olympia.access import acl
from olympia.addons.utils import verify_mozilla_trademark
from olympia.amo.utils import decode_json, find_language, rm_local_tmp_dir
from olympia.applications.models import AppVersion
from olympia.lib.safe_xml import lxml
from olympia.lib.crypto.signing import get_signer_organizational_unit_name
from olympia.users.utils import (
mozilla_signed_extension_submission_allowed,
system_addon_submission_allowed)
from olympia.versions.compare import version_int as vint
log = olympia.core.logger.getLogger('z.files.utils')
class ParseError(forms.ValidationError):
pass
VERSION_RE = re.compile(r'^[-+*.\w]{,32}$')
SIGNED_RE = re.compile(r'^META\-INF/(\w+)\.(rsa|sf)$')
# This is essentially what Firefox matches
# (see toolkit/components/extensions/ExtensionUtils.jsm)
MSG_RE = re.compile(r'__MSG_(?P<msgid>[a-zA-Z0-9@_]+?)__')
# The default update URL.
default = (
'https://versioncheck.addons.mozilla.org/update/VersionCheck.php?'
'reqVersion=%REQ_VERSION%&id=%ITEM_ID%&version=%ITEM_VERSION%&'
'maxAppVersion=%ITEM_MAXAPPVERSION%&status=%ITEM_STATUS%&appID=%APP_ID%&'
'appVersion=%APP_VERSION%&appOS=%APP_OS%&appABI=%APP_ABI%&'
'locale=%APP_LOCALE%¤tAppVersion=%CURRENT_APP_VERSION%&'
'updateType=%UPDATE_TYPE%'
)
# number of times this lock has been aquired and not yet released
# could be helpful to debug potential race-conditions and multiple-locking
# scenarios.
_lock_count = {}
def get_filepath(fileorpath):
"""Resolve the actual file path of `fileorpath`.
This supports various input formats, a path, a django `File` object,
`olympia.files.File`, a `FileUpload` or just a regular file-like object.
"""
if isinstance(fileorpath, six.string_types):
return fileorpath
elif isinstance(fileorpath, DjangoFile):
return fileorpath
elif hasattr(fileorpath, 'file_path'): # File
return fileorpath.file_path
elif hasattr(fileorpath, 'path'): # FileUpload
return fileorpath.path
elif hasattr(fileorpath, 'name'): # file-like object
return fileorpath.name
return fileorpath
def id_to_path(pk):
"""
Generate a path from an id, to distribute folders in the file system.
1 => 1/1/1
12 => 2/12/12
123456 => 6/56/123456
"""
pk = six.text_type(pk)
path = [pk[-1]]
if len(pk) >= 2:
path.append(pk[-2:])
else:
path.append(pk)
path.append(pk)
return os.path.join(*path)
def get_file(fileorpath):
"""Get a file-like object, whether given a FileUpload object or a path."""
if hasattr(fileorpath, 'path'): # FileUpload
return storage.open(fileorpath.path)
if hasattr(fileorpath, 'name'):
return fileorpath
return storage.open(fileorpath)
def make_xpi(files):
file_obj = six.BytesIO()
zip_file = zipfile.ZipFile(file_obj, 'w')
for path, data in files.items():
zip_file.writestr(path, data)
zip_file.close()
file_obj.seek(0)
return file_obj
class Extractor(object):
"""Extract add-on info from a manifest file."""
App = collections.namedtuple('App', 'appdata id min max')
@classmethod
def parse(cls, xpi_fobj, minimal=False):
zip_file = SafeZip(xpi_fobj)
certificate = os.path.join('META-INF', 'mozilla.rsa')
certificate_info = None
if zip_file.exists(certificate):
certificate_info = SigningCertificateInformation(
zip_file.read(certificate))
if zip_file.exists('manifest.json'):
data = ManifestJSONExtractor(
zip_file, certinfo=certificate_info).parse(minimal=minimal)
elif zip_file.exists('install.rdf'):
# Note that RDFExtractor is a misnomer, it receives the zip_file
# object because it might need to read other files than just
# the rdf to deal with dictionaries, complete themes etc.
data = RDFExtractor(
zip_file, certinfo=certificate_info).parse(minimal=minimal)
else:
raise forms.ValidationError(
'No install.rdf or manifest.json found')
return data
def get_appversions(app, min_version, max_version):
"""Return the `AppVersion`s that correspond to the given versions."""
qs = AppVersion.objects.filter(application=app.id)
min_appver = qs.get(version=min_version)
max_appver = qs.get(version=max_version)
return min_appver, max_appver
def get_simple_version(version_string):
"""Extract the version number without the ><= requirements.
This simply extracts the version number without the ><= requirement so
it will not be accurate for version requirements that are not >=, <= or
= to a version.
>>> get_simple_version('>=33.0a1')
'33.0a1'
"""
if not version_string:
return ''
return re.sub('[<=>]', '', version_string)
class RDFExtractor(object):
"""Extract add-on info from an install.rdf."""
# https://developer.mozilla.org/en-US/Add-ons/Install_Manifests#type
TYPES = {
'2': amo.ADDON_EXTENSION,
'4': amo.ADDON_THEME,
'8': amo.ADDON_LPAPP,
'64': amo.ADDON_DICT,
'128': amo.ADDON_EXTENSION, # Telemetry Experiment
'256': amo.ADDON_EXTENSION, # WebExtension Experiment
}
# Langpacks and dictionaries, if the type is properly set, are always
# considered restartless.
ALWAYS_RESTARTLESS_TYPES = ('8', '64', '128', '256')
# Telemetry and Web Extension Experiments types.
# See: bug 1220097 and https://github.com/mozilla/addons-server/issues/3315
EXPERIMENT_TYPES = ('128', '256')
manifest = u'urn:mozilla:install-manifest'
is_experiment = False # Experiment extensions: bug 1220097.
def __init__(self, zip_file, certinfo=None):
self.zip_file = zip_file
self.certinfo = certinfo
self.rdf = rdflib.Graph().parse(
data=force_text(zip_file.read('install.rdf')))
self.package_type = None
self.find_root() # Will set self.package_type
def parse(self, minimal=False):
data = {
'guid': self.find('id'),
'type': self.find_type(),
'version': self.find('version'),
'is_webextension': False,
'name': self.find('name'),
'summary': self.find('description'),
}
# Populate certificate information (e.g signed by mozilla or not)
# early on to be able to verify compatibility based on it
if self.certinfo is not None:
data.update(self.certinfo.parse())
if not minimal:
data.update({
'homepage': self.find('homepageURL'),
'is_restart_required': (
self.find('bootstrap') != 'true' and
self.find('type') not in self.ALWAYS_RESTARTLESS_TYPES),
'apps': self.apps(),
'is_multi_package': self.package_type == '32',
})
# We used to simply use the value of 'strictCompatibility' in the
# rdf to set strict_compatibility, but now we enable it or not for
# all legacy add-ons depending on their type. This will prevent
# them from being marked as compatible with Firefox 57.
# This is not true for legacy add-ons already signed by Mozilla.
# For these add-ons we just re-use to whatever
# `strictCompatibility` is set.
if data['type'] not in amo.NO_COMPAT:
if self.certinfo and self.certinfo.is_mozilla_signed_ou:
data['strict_compatibility'] = (
self.find('strictCompatibility') == 'true')
else:
data['strict_compatibility'] = True
else:
data['strict_compatibility'] = False
# `experiment` is detected in in `find_type`.
data['is_experiment'] = self.is_experiment
return data
def find_type(self):
# If the extension declares a type that we know about, use
# that.
# https://developer.mozilla.org/en-US/Add-ons/Install_Manifests#type
self.package_type = self.find('type')
if self.package_type and self.package_type in self.TYPES:
# If it's an experiment, we need to store that for later.
self.is_experiment = self.package_type in self.EXPERIMENT_TYPES
return self.TYPES[self.package_type]
# Look for Complete Themes.
is_complete_theme = (
self.zip_file.source.name.endswith('.jar') or
self.find('internalName')
)
if is_complete_theme:
return amo.ADDON_THEME
# Look for dictionaries.
is_dictionary = (
self.zip_file.exists('dictionaries/') and
any(fname.endswith('.dic') for fname in self.zip_file.namelist())
)
if is_dictionary:
return amo.ADDON_DICT
# Consult <em:type>.
return self.TYPES.get(self.package_type, amo.ADDON_EXTENSION)
def uri(self, name):
namespace = 'http://www.mozilla.org/2004/em-rdf'
return rdflib.term.URIRef('%s#%s' % (namespace, name))
def find_root(self):
# If the install-manifest root is well-defined, it'll show up when we
# search for triples with it. If not, we have to find the context that
# defines the manifest and use that as our root.
# http://www.w3.org/TR/rdf-concepts/#section-triples
manifest = rdflib.term.URIRef(self.manifest)
if list(self.rdf.triples((manifest, None, None))):
self.root = manifest
else:
self.root = next(self.rdf.subjects(None, self.manifest))
def find(self, name, ctx=None):
"""Like $() for install.rdf, where name is the selector."""
if ctx is None:
ctx = self.root
# predicate it maps to <em:{name}>.
match = list(self.rdf.objects(ctx, predicate=self.uri(name)))
# These come back as rdflib.Literal, which subclasses unicode.
if match:
return six.text_type(match[0])
def apps(self):
rv = []
seen_apps = set()
for ctx in self.rdf.objects(None, self.uri('targetApplication')):
app = amo.APP_GUIDS.get(self.find('id', ctx))
if not app:
continue
if app.guid not in amo.APP_GUIDS or app.id in seen_apps:
continue
if app not in amo.APP_USAGE:
# Ignore non-firefoxes compatibility.
continue
seen_apps.add(app.id)
try:
min_appver_text = self.find('minVersion', ctx)
max_appver_text = self.find('maxVersion', ctx)
# Rewrite '*' as '56.*' in legacy extensions, since they
# are not compatible with higher versions.
# We don't do that for legacy add-ons that are already
# signed by Mozilla to allow them for Firefox 57 onwards.
needs_max_56_star = (
app.id in (amo.FIREFOX.id, amo.ANDROID.id) and
max_appver_text == '*' and
not (self.certinfo and self.certinfo.is_mozilla_signed_ou)
)
if needs_max_56_star:
max_appver_text = '56.*'
min_appver, max_appver = get_appversions(
app, min_appver_text, max_appver_text)
except AppVersion.DoesNotExist:
continue
rv.append(Extractor.App(
appdata=app, id=app.id, min=min_appver, max=max_appver))
return rv
class ManifestJSONExtractor(object):
def __init__(self, zip_file, data='', certinfo=None):
self.zip_file = zip_file
self.certinfo = certinfo
if not data:
data = force_text(zip_file.read('manifest.json'))
lexer = JsLexer()
json_string = ''
# Run through the JSON and remove all comments, then try to read
# the manifest file.
# Note that Firefox and the WebExtension spec only allow for
# line comments (starting with `//`), not block comments (starting with
# `/*`). We strip out both in AMO because the linter will flag the
# block-level comments explicitly as an error (so the developer can
# change them to line-level comments).
#
# But block level comments are not allowed. We just flag them elsewhere
# (in the linter).
for name, token in lexer.lex(data):
if name not in ('blockcomment', 'linecomment'):
json_string += token
self.data = decode_json(json_string)
def get(self, key, default=None):
return self.data.get(key, default)
@property
def is_experiment(self):
"""Return whether or not the webextension uses
experiments or theme experiments API.
In legacy extensions this is a different type, but for webextensions
we just look at the manifest."""
experiment_keys = ('experiment_apis', 'theme_experiment')
return any(bool(self.get(key)) for key in experiment_keys)
@property
def gecko(self):
"""Return the "applications|browser_specific_settings["gecko"]" part
of the manifest."""
parent_block = self.get(
'browser_specific_settings', self.get('applications', {}))
return parent_block.get('gecko', {})
@property
def guid(self):
return self.gecko.get('id', None)
@property
def type(self):
return (
amo.ADDON_LPAPP if 'langpack_id' in self.data
else amo.ADDON_STATICTHEME if 'theme' in self.data
else amo.ADDON_DICT if 'dictionaries' in self.data
else amo.ADDON_EXTENSION
)
@property
def strict_max_version(self):
return get_simple_version(self.gecko.get('strict_max_version'))
@property
def strict_min_version(self):
return get_simple_version(self.gecko.get('strict_min_version'))
def apps(self):
"""Get `AppVersion`s for the application."""
type_ = self.type
if type_ == amo.ADDON_LPAPP:
# Langpack are only compatible with Firefox desktop at the moment.
# https://github.com/mozilla/addons-server/issues/8381
# They are all strictly compatible with a specific version, so
# the default min version here doesn't matter much.
apps = (
(amo.FIREFOX, amo.DEFAULT_WEBEXT_MIN_VERSION),
)
elif type_ == amo.ADDON_STATICTHEME:
# Static themes are only compatible with Firefox desktop >= 53
# and Firefox for Android >=65.
apps = (
(amo.FIREFOX, amo.DEFAULT_STATIC_THEME_MIN_VERSION_FIREFOX),
(amo.ANDROID, amo.DEFAULT_STATIC_THEME_MIN_VERSION_ANDROID),
)
elif type_ == amo.ADDON_DICT:
# WebExt dicts are only compatible with Firefox desktop >= 61.
apps = (
(amo.FIREFOX, amo.DEFAULT_WEBEXT_DICT_MIN_VERSION_FIREFOX),
)
else:
webext_min = (
amo.DEFAULT_WEBEXT_MIN_VERSION
if self.get('browser_specific_settings', None) is None
else amo.DEFAULT_WEBEXT_MIN_VERSION_BROWSER_SPECIFIC)
apps = (
(amo.FIREFOX, webext_min),
(amo.ANDROID, amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID),
)
doesnt_support_no_id = (
self.strict_min_version and
(vint(self.strict_min_version) <
vint(amo.DEFAULT_WEBEXT_MIN_VERSION_NO_ID))
)
if self.guid is None and doesnt_support_no_id:
raise forms.ValidationError(
ugettext('GUID is required for Firefox 47 and below.')
)
# If a minimum strict version is specified, it needs to be higher
# than the version when Firefox started supporting WebExtensions
# (We silently ignore apps that the add-on is not compatible with
# below, but we need to be at least compatible with Firefox...)
unsupported_no_matter_what = (
self.strict_min_version and vint(self.strict_min_version) <
vint(amo.DEFAULT_WEBEXT_MIN_VERSION))
if unsupported_no_matter_what:
msg = ugettext('Lowest supported "strict_min_version" is 42.0.')
raise forms.ValidationError(msg)
for app, default_min_version in apps:
if self.guid is None and not self.strict_min_version:
strict_min_version = max(amo.DEFAULT_WEBEXT_MIN_VERSION_NO_ID,
default_min_version)
else:
strict_min_version = (
self.strict_min_version or default_min_version)
strict_max_version = (
self.strict_max_version or amo.DEFAULT_WEBEXT_MAX_VERSION)
# Don't attempt to add support for this app to the WebExtension
# if the `strict_min_version` is below the default minimum version
# that is required to run WebExtensions (48.* for Android and 42.*
# for Firefox).
skip_app = (
self.strict_min_version and vint(self.strict_min_version) <
vint(default_min_version)
)
if skip_app:
continue
try:
min_appver, max_appver = get_appversions(
app, strict_min_version, strict_max_version)
yield Extractor.App(
appdata=app, id=app.id, min=min_appver, max=max_appver)
except AppVersion.DoesNotExist:
continue
def target_locale(self):
"""Guess target_locale for a dictionary from manifest contents."""
try:
dictionaries = self.get('dictionaries', {})
key = force_text(list(dictionaries.keys())[0])
return key[:255]
except (IndexError, UnicodeDecodeError):
# This shouldn't happen: the linter should prevent it, but
# just in case, handle the error (without bothering with
# translations as users should never see this).
raise forms.ValidationError('Invalid dictionaries object.')
def parse(self, minimal=False):
data = {
'guid': self.guid,
'type': self.type,
'version': self.get('version', ''),
'is_webextension': True,
'name': self.get('name'),
'summary': self.get('description'),
'homepage': self.get('homepage_url'),
'default_locale': self.get('default_locale'),
}
# Populate certificate information (e.g signed by mozilla or not)
# early on to be able to verify compatibility based on it
if self.certinfo is not None:
data.update(self.certinfo.parse())
if self.type == amo.ADDON_STATICTHEME:
data['theme'] = self.get('theme', {})
if not minimal:
data.update({
'is_restart_required': False,
'apps': list(self.apps()),
# Langpacks have strict compatibility enabled, rest of
# webextensions don't.
'strict_compatibility': data['type'] == amo.ADDON_LPAPP,
'is_experiment': self.is_experiment,
})
if self.type == amo.ADDON_EXTENSION:
# Only extensions have permissions and content scripts
data.update({
'permissions': self.get('permissions', []),
'content_scripts': self.get('content_scripts', []),
})
elif self.type == amo.ADDON_DICT:
data['target_locale'] = self.target_locale()
return data
class SigningCertificateInformation(object):
"""Process the signature to determine the addon is a Mozilla Signed
extension, so is signed already with a special certificate. We want to
know this so we don't write over it later, and stop unauthorised people
from submitting them to AMO."""
def __init__(self, certificate_data):
pkcs7 = certificate_data
self.cert_ou = get_signer_organizational_unit_name(pkcs7)
@property
def is_mozilla_signed_ou(self):
return self.cert_ou == 'Mozilla Extensions'
def parse(self):
return {'is_mozilla_signed_extension': self.is_mozilla_signed_ou}
def extract_search(content):
def _text(tag):
try:
return dom.getElementsByTagName(tag)[0].childNodes[0].wholeText
except (IndexError, AttributeError):
raise forms.ValidationError(
ugettext('Could not parse uploaded file, missing or empty '
'<%s> element') % tag)
# Only catch basic errors, most of that validation already happened in
# devhub.tasks:annotate_search_plugin_validation
try:
dom = minidom.parse(content)
except DefusedXmlException:
raise forms.ValidationError(
ugettext('OpenSearch: XML Security error.'))
except ExpatError:
raise forms.ValidationError(ugettext('OpenSearch: XML Parse Error.'))
return {
'name': _text('ShortName'),
'description': _text('Description')
}
def parse_search(fileorpath, addon=None):
try:
f = get_file(fileorpath)
data = extract_search(f)
except forms.ValidationError:
raise
except Exception:
log.error('OpenSearch parse error', exc_info=True)
raise forms.ValidationError(ugettext('Could not parse uploaded file.'))
return {'guid': None,
'type': amo.ADDON_SEARCH,
'name': data['name'],
'is_restart_required': False,
'is_webextension': False,
'summary': data['description'],
'version': datetime.now().strftime('%Y%m%d')}
class FSyncMixin(object):
"""Mixin that implements fsync for file extractions.
This mixin uses the `_extract_member` interface used by `ziplib` and
`tarfile` so it's somewhat unversal.
We need this to make sure that on EFS / NFS all data is immediately
written to avoid any data loss on the way.
"""
def _fsync_dir(self, path):
descriptor = os.open(path, os.O_DIRECTORY)
try:
os.fsync(descriptor)
except OSError as exc:
# On some filesystem doing a fsync on a directory
# raises an EINVAL error. Ignoring it is usually safe.
if exc.errno != errno.EINVAL:
raise
os.close(descriptor)
def _fsync_file(self, path):
descriptor = os.open(path, os.O_RDONLY)
os.fsync(descriptor)
os.close(descriptor)
def _extract_member(self, member, targetpath, *args, **kwargs):
"""Extends `ZipFile._extract_member` to call fsync().
For every extracted file we are ensuring that it's data has been
written to disk. We are doing this to avoid any data inconsistencies
that we have seen in the past.
To do this correctly we are fsync()ing all directories as well
only that will ensure we have a durable write for that specific file.
This is inspired by https://github.com/2ndquadrant-it/barman/
(see backup.py -> backup_fsync_and_set_sizes and utils.py)
"""
super(FSyncMixin, self)._extract_member(
member, targetpath, *args, **kwargs)
parent_dir = os.path.dirname(os.path.normpath(targetpath))
if parent_dir:
self._fsync_dir(parent_dir)
self._fsync_file(targetpath)
class FSyncedZipFile(FSyncMixin, zipfile.ZipFile):
"""Subclass of ZipFile that calls `fsync` for file extractions."""
pass
class FSyncedTarFile(FSyncMixin, tarfile.TarFile):
"""Subclass of TarFile that calls `fsync` for file extractions."""
pass
def archive_member_validator(archive, member):
"""Validate a member of an archive member (TarInfo or ZipInfo)."""
filename = getattr(member, 'filename', getattr(member, 'name', None))
filesize = getattr(member, 'file_size', getattr(member, 'size', None))
if filename is None or filesize is None:
raise forms.ValidationError(ugettext('Unsupported archive type.'))
try:
force_text(filename)
except UnicodeDecodeError:
# We can't log the filename unfortunately since it's encoding
# is obviously broken :-/
log.error('Extraction error, invalid file name encoding in '
'archive: %s' % archive)
# L10n: {0} is the name of the invalid file.
msg = ugettext(
'Invalid file name in archive. Please make sure '
'all filenames are utf-8 or latin1 encoded.')
raise forms.ValidationError(msg.format(filename))
if '..' in filename or filename.startswith('/'):
log.error('Extraction error, invalid file name (%s) in '
'archive: %s' % (filename, archive))
# L10n: {0} is the name of the invalid file.
msg = ugettext('Invalid file name in archive: {0}')
raise forms.ValidationError(msg.format(filename))
if filesize > settings.FILE_UNZIP_SIZE_LIMIT:
log.error('Extraction error, file too big (%s) for file (%s): '
'%s' % (archive, filename, filesize))
# L10n: {0} is the name of the invalid file.
raise forms.ValidationError(
ugettext(
'File exceeding size limit in archive: {0}'
).format(filename))
class SafeZip(object):
def __init__(self, source, mode='r', force_fsync=False):
self.source = source
self.info_list = None
self.mode = mode
self.force_fsync = force_fsync
self.is_valid = self.initialize_and_validate()
def initialize_and_validate(self):
"""
Runs some overall archive checks.
"""
# Shortcut to avoid expensive check over and over again
if getattr(self, 'is_valid', False):
return True
if self.force_fsync:
zip_file = FSyncedZipFile(self.source, self.mode)
else:
zip_file = zipfile.ZipFile(self.source, self.mode)
info_list = zip_file.infolist()
for info in info_list:
archive_member_validator(self.source, info)
self.info_list = info_list
self.zip_file = zip_file
return True
def is_signed(self):
"""Tells us if an addon is signed."""
finds = []
for info in self.info_list:
match = SIGNED_RE.match(info.filename)
if match:
name, ext = match.groups()
# If it's rsa or sf, just look for the opposite.
if (name, {'rsa': 'sf', 'sf': 'rsa'}[ext]) in finds:
return True
finds.append((name, ext))
def extract_from_manifest(self, manifest):
"""
Extracts a file given a manifest such as:
jar:chrome/de.jar!/locale/de/browser/
or
locale/de/browser
"""
type, path = manifest.split(':')
jar = self
if type == 'jar':
parts = path.split('!')
for part in parts[:-1]:
jar = self.__class__(six.BytesIO(jar.zip_file.read(part)))
path = parts[-1]
return jar.read(path[1:] if path.startswith('/') else path)
def extract_info_to_dest(self, info, dest):
"""Extracts the given info to a directory and checks the file size."""
self.zip_file.extract(info, dest)
dest = os.path.join(dest, info.filename)
if not os.path.isdir(dest):
# Directories consistently report their size incorrectly.
size = os.stat(dest)[stat.ST_SIZE]
if size != info.file_size:
log.error('Extraction error, uncompressed size: %s, %s not %s'
% (self.source, size, info.file_size))
raise forms.ValidationError(ugettext('Invalid archive.'))
def extract_to_dest(self, dest):
"""Extracts the zip file to a directory."""
for info in self.info_list:
self.extract_info_to_dest(info, dest)
def close(self):
self.zip_file.close()
@property
def filelist(self):
return self.zip_file.filelist
@property
def namelist(self):
return self.zip_file.namelist
def exists(self, path):
try:
return self.zip_file.getinfo(path)
except KeyError:
return False
def read(self, path):
return self.zip_file.read(path)
def extract_zip(source, remove=False, force_fsync=False):
"""Extracts the zip file. If remove is given, removes the source file."""
tempdir = tempfile.mkdtemp(dir=settings.TMP_PATH)
try:
zip_file = SafeZip(source, force_fsync=force_fsync)
zip_file.extract_to_dest(tempdir)
except Exception:
rm_local_tmp_dir(tempdir)
raise
if remove:
os.remove(source)
return tempdir
def extract_extension_to_dest(source, dest=None, force_fsync=False):
"""Extract `source` to `dest`.
`source` can be an extension or extension source, can be a zip, tar
(gzip, bzip) or a search provider (.xml file).
Note that this doesn't verify the contents of `source` except for
that it requires something valid to be extracted.
:returns: Extraction target directory, if `dest` is `None` it'll be a
temporary directory.
"""
target, tempdir = None, None
if dest is None:
target = tempdir = tempfile.mkdtemp(dir=settings.TMP_PATH)
else:
target = dest
try:
if source.endswith(('.zip', '.xpi')):
zip_file = SafeZip(source, force_fsync=force_fsync)
zip_file.extract_to_dest(target)
elif source.endswith(('.tar.gz', '.tar.bz2', '.tgz')):
tarfile_class = (
tarfile.TarFile
if not force_fsync else FSyncedTarFile)
with tarfile_class.open(source) as archive:
archive.extractall(target)
elif source.endswith('.xml'):
shutil.copy(source, target)
if force_fsync:
FSyncMixin()._fsync_file(target)
except (zipfile.BadZipfile, tarfile.ReadError, IOError):
if tempdir is not None:
rm_local_tmp_dir(tempdir)
raise forms.ValidationError(
ugettext('Invalid or broken archive.'))
return target
def copy_over(source, dest):
"""
Copies from the source to the destination, removing the destination
if it exists and is a directory.
"""
if os.path.exists(dest) and os.path.isdir(dest):
shutil.rmtree(dest)
shutil.copytree(source, dest)
# mkdtemp will set the directory permissions to 700
# for the webserver to read them, we need 755
os.chmod(dest, stat.S_IRWXU | stat.S_IRGRP |
stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
shutil.rmtree(source)
def get_all_files(folder, strip_prefix='', prefix=None):
"""Return all files in a file/directory tree.
:param folder: The folder of which to return the file-tree.
:param strip_prefix str: A string to strip in case we're adding a custom
`prefix` Doesn't have any implications if
`prefix` isn't given.
:param prefix: A custom prefix to add to all files and folders.
"""
all_files = []
# Not using os.path.walk so we get just the right order.
def iterate(path):
path_dirs, path_files = storage.listdir(path)
for dirname in sorted(path_dirs):
full = os.path.join(path, dirname)
all_files.append(full)
iterate(full)
for filename in sorted(path_files):
full = os.path.join(path, filename)
all_files.append(full)
iterate(folder)
if prefix is not None:
# This is magic: strip the prefix, e.g /tmp/ and prepend the prefix
all_files = [
os.path.join(prefix, fname[len(strip_prefix) + 1:])
for fname in all_files]
return all_files
def extract_xpi(xpi, path):
"""Extract all files from `xpi` to `path`.
This can be removed in favour of our already extracted git-repositories
once we land and tested them in production.
"""
tempdir = extract_zip(xpi)
all_files = get_all_files(tempdir)
copy_over(tempdir, path)
return all_files
def parse_xpi(xpi, addon=None, minimal=False, user=None):
"""Extract and parse an XPI. Returns a dict with various properties
describing the xpi.
Will raise ValidationError if something went wrong while parsing.
If minimal is True, it avoids validation as much as possible (still raising
ValidationError for hard errors like I/O or invalid json/rdf) and returns
only the minimal set of properties needed to decide what to do with the
add-on: guid, version and is_webextension.
"""
try:
xpi = get_file(xpi)
xpi_info = Extractor.parse(xpi, minimal=minimal)
except forms.ValidationError:
raise
except IOError as e:
if len(e.args) < 2:
err, strerror = None, e[0]
else:
err, strerror = e
log.error('I/O error({0}): {1}'.format(err, strerror))
raise forms.ValidationError(ugettext(
'Could not parse the manifest file.'))
except Exception:
log.error('XPI parse error', exc_info=True)
raise forms.ValidationError(ugettext(
'Could not parse the manifest file.'))
if minimal:
return xpi_info
return check_xpi_info(xpi_info, addon, xpi, user=user)
def check_xpi_info(xpi_info, addon=None, xpi_file=None, user=None):
from olympia.addons.models import Addon, DeniedGuid
guid = xpi_info['guid']
is_webextension = xpi_info.get('is_webextension', False)
# If we allow the guid to be omitted we assume that one was generated
# or existed before and use that one.
# An example are WebExtensions that don't require a guid but we generate
# one once they're uploaded. Now, if you update that WebExtension we
# just use the original guid.
if addon and not guid and is_webextension:
xpi_info['guid'] = guid = addon.guid
if not guid and not is_webextension:
raise forms.ValidationError(ugettext('Could not find an add-on ID.'))
if guid:
current_user = core.get_user()
if current_user:
deleted_guid_clashes = Addon.unfiltered.exclude(
authors__id=current_user.id).filter(guid=guid)
else:
deleted_guid_clashes = Addon.unfiltered.filter(guid=guid)
if addon and addon.guid != guid:
msg = ugettext(
'The add-on ID in your manifest.json or install.rdf (%s) '
'does not match the ID of your add-on on AMO (%s)')
raise forms.ValidationError(msg % (guid, addon.guid))
if (not addon and
# Non-deleted add-ons.
(Addon.objects.filter(guid=guid).exists() or
# DeniedGuid objects for deletions for Mozilla disabled add-ons
DeniedGuid.objects.filter(guid=guid).exists() or
# Deleted add-ons that don't belong to the uploader.
deleted_guid_clashes.exists())):
raise forms.ValidationError(ugettext('Duplicate add-on ID found.'))
if len(xpi_info['version']) > 32:
raise forms.ValidationError(
ugettext('Version numbers should have fewer than 32 characters.'))
if not VERSION_RE.match(xpi_info['version']):
raise forms.ValidationError(
ugettext('Version numbers should only contain letters, numbers, '
'and these punctuation characters: +*.-_.'))
if is_webextension and xpi_info.get('type') == amo.ADDON_STATICTHEME:
max_size = settings.MAX_STATICTHEME_SIZE
if xpi_file and os.path.getsize(xpi_file.name) > max_size:
raise forms.ValidationError(
ugettext(u'Maximum size for WebExtension themes is {0}.')
.format(filesizeformat(max_size)))
if xpi_file:
# Make sure we pass in a copy of `xpi_info` since
# `resolve_webext_translations` modifies data in-place
translations = Addon.resolve_webext_translations(
xpi_info.copy(), xpi_file)
verify_mozilla_trademark(translations['name'], core.get_user())
# Parse the file to get and validate package data with the addon.
if not acl.submission_allowed(user, xpi_info):
raise forms.ValidationError(
ugettext(u'You cannot submit this type of add-on'))
if not addon and not system_addon_submission_allowed(
user, xpi_info):
guids = ' or '.join(
'"' + guid + '"' for guid in amo.SYSTEM_ADDON_GUIDS)
raise forms.ValidationError(
ugettext(u'You cannot submit an add-on with a guid ending '
u'%s' % guids))
if not mozilla_signed_extension_submission_allowed(user, xpi_info):
raise forms.ValidationError(
ugettext(u'You cannot submit a Mozilla Signed Extension'))
return xpi_info
def parse_addon(pkg, addon=None, user=None, minimal=False):
"""
Extract and parse a file path, UploadedFile or FileUpload. Returns a dict
with various properties describing the add-on.
Will raise ValidationError if something went wrong while parsing.
`addon` parameter is mandatory if the file being parsed is going to be
attached to an existing Addon instance.
`user` parameter is mandatory unless minimal `parameter` is True. It should
point to the UserProfile responsible for the upload.
If `minimal` parameter is True, it avoids validation as much as possible
(still raising ValidationError for hard errors like I/O or invalid
json/rdf) and returns only the minimal set of properties needed to decide
what to do with the add-on (the exact set depends on the add-on type, but
it should always contain at least guid, type, version and is_webextension.
"""
name = getattr(pkg, 'name', pkg)
if name.endswith('.xml'):
parsed = parse_search(pkg, addon)
else:
parsed = parse_xpi(pkg, addon, minimal=minimal, user=user)
if not minimal:
if user is None:
# This should never happen and means there is a bug in
# addons-server itself.
raise forms.ValidationError(ugettext('Unexpected error.'))
# FIXME: do the checks depending on user here.
if addon and addon.type != parsed['type']:
msg = ugettext(
"<em:type> in your install.rdf (%s) "
"does not match the type of your add-on on AMO (%s)")
raise forms.ValidationError(msg % (parsed['type'], addon.type))
return parsed
def get_sha256(file_obj, block_size=io.DEFAULT_BUFFER_SIZE):
"""Calculate a sha256 hash for `file_obj`.
`file_obj` must be an open file descriptor. The caller needs to take
care of closing it properly.
"""
hash_ = hashlib.sha256()
for chunk in iter(lambda: file_obj.read(block_size), b''):
hash_.update(chunk)
return hash_.hexdigest()
def update_version_number(file_obj, new_version_number):
"""Update the manifest to have the new version number."""
# Create a new xpi with the updated version.
updated = u'{0}.updated_version_number'.format(file_obj.file_path)
# Copy the original XPI, with the updated install.rdf or package.json.
with zipfile.ZipFile(file_obj.file_path, 'r') as source:
file_list = source.infolist()
with zipfile.ZipFile(updated, 'w', zipfile.ZIP_DEFLATED) as dest:
for file_ in file_list:
content = source.read(file_.filename)
if file_.filename == 'install.rdf':
content = _update_version_in_install_rdf(
content, new_version_number)
if file_.filename in ['package.json', 'manifest.json']:
content = _update_version_in_json_manifest(
content, new_version_number)
dest.writestr(file_, content)
# Move the updated file to the original file.
shutil.move(updated, file_obj.file_path)
def write_crx_as_xpi(chunks, target):
"""Extract and strip the header from the CRX, convert it to a regular ZIP
archive, then write it to `target`. Read more about the CRX file format:
https://developer.chrome.com/extensions/crx
"""
# First we open the uploaded CRX so we can see how much we need
# to trim from the header of the file to make it a valid ZIP.
with tempfile.NamedTemporaryFile('rwb+', dir=settings.TMP_PATH) as tmp:
for chunk in chunks:
tmp.write(chunk)
tmp.seek(0)
header = tmp.read(16)
header_info = struct.unpack('4cHxII', header)
public_key_length = header_info[5]
signature_length = header_info[6]
# This is how far forward we need to seek to extract only a
# ZIP file from this CRX.
start_position = 16 + public_key_length + signature_length
hash = hashlib.sha256()
tmp.seek(start_position)
# Now we open the Django storage and write our real XPI file.
with storage.open(target, 'wb') as file_destination:
bytes = tmp.read(65536)
# Keep reading bytes and writing them to the XPI.
while bytes:
hash.update(bytes)
file_destination.write(bytes)
bytes = tmp.read(65536)
return hash
def _update_version_in_install_rdf(content, new_version_number):
"""Change the version number in the install.rdf provided."""
# We need to use an XML parser, and not a RDF parser, because our
# install.rdf files aren't really standard (they use default namespaces,
# don't namespace the "about" attribute... rdflib can parse them, and can
# now even serialize them, but the end result could be very different from
# the format we need.
tree = lxml.etree.fromstring(content)
# There's two different formats for the install.rdf: the "standard" one
# uses nodes for each item (like <em:version>1.2</em:version>), the other
# alternate one sets attributes on the <RDF:Description
# RDF:about="urn:mozilla:install-manifest"> element.
# Get the version node, if it's the common format, or the Description node
# that has the "em:version" attribute if it's the alternate format.
namespace = 'http://www.mozilla.org/2004/em-rdf#'
version_uri = '{{{0}}}version'.format(namespace)
for node in tree.xpath('//em:version | //*[@em:version]',
namespaces={'em': namespace}):
if node.tag == version_uri: # Common format, version is a node.
node.text = new_version_number
else: # Alternate format, version is an attribute.
node.set(version_uri, new_version_number)
return lxml.etree.tostring(tree, xml_declaration=True, encoding='utf-8')
def _update_version_in_json_manifest(content, new_version_number):
"""Change the version number in the json manifest file provided."""
updated = json.loads(content)
if 'version' in updated:
updated['version'] = new_version_number
return json.dumps(updated)
def extract_translations(file_obj):
"""Extract all translation messages from `file_obj`.
:param locale: if not `None` the list will be restricted only to `locale`.
"""
xpi = get_filepath(file_obj)
messages = {}
try:
with zipfile.ZipFile(xpi, 'r') as source:
file_list = source.namelist()
# Fetch all locales the add-on supports
# see https://developer.chrome.com/extensions/i18n#overview-locales
# for more details on the format.
locales = {
name.split('/')[1] for name in file_list
if name.startswith('_locales/') and
name.endswith('/messages.json')}
for locale in locales:
corrected_locale = find_language(locale)
# Filter out languages we don't support.
if not corrected_locale:
continue
fname = '_locales/{0}/messages.json'.format(locale)
try:
data = source.read(fname)
messages[corrected_locale] = decode_json(data)
except (ValueError, KeyError):
# `ValueError` thrown by `decode_json` if the json is
# invalid and `KeyError` thrown by `source.read`
# usually means the file doesn't exist for some reason,
# we fail silently
continue
except IOError:
pass
return messages
def resolve_i18n_message(message, messages, locale, default_locale=None):
"""Resolve a translatable string in an add-on.
This matches ``__MSG_extensionName__`` like names and returns the correct
translation for `locale`.
:param locale: The locale to fetch the translation for, If ``None``
(default) ``settings.LANGUAGE_CODE`` is used.
:param messages: A dictionary of messages, e.g the return value
of `extract_translations`.
"""
if not message or not isinstance(message, six.string_types):
# Don't even attempt to extract invalid data.
# See https://github.com/mozilla/addons-server/issues/3067
# for more details
return message
match = MSG_RE.match(message)
if match is None:
return message
locale = find_language(locale)
if default_locale:
default_locale = find_language(default_locale)
msgid = match.group('msgid')
default = {'message': message}
if locale in messages:
message = messages[locale].get(msgid, default)
elif default_locale in messages:
message = messages[default_locale].get(msgid, default)
if not isinstance(message, dict):
# Fallback for invalid message format, should be caught by
# addons-linter in the future but we'll have to handle it.
# See https://github.com/mozilla/addons-server/issues/3485
return default['message']
return message['message']
def get_background_images(file_obj, theme_data, header_only=False):
"""Extract static theme header image from `file_obj` and return in dict."""
xpi = get_filepath(file_obj)
if not theme_data:
# we might already have theme_data, but otherwise get it from the xpi.
try:
parsed_data = parse_xpi(xpi, minimal=True)
theme_data = parsed_data.get('theme', {})
except forms.ValidationError:
# If we can't parse the existing manifest safely return.
return {}
images_dict = theme_data.get('images', {})
# Get the reference in the manifest. theme_frame is the Chrome variant.
header_url = images_dict.get(
'headerURL', images_dict.get('theme_frame'))
# And any additional backgrounds too.
additional_urls = (
images_dict.get('additional_backgrounds', []) if not header_only
else [])
image_urls = [header_url] + additional_urls
images = {}
try:
with zipfile.ZipFile(xpi, 'r') as source:
for url in image_urls:
_, file_ext = os.path.splitext(text_type(url).lower())
if file_ext not in amo.THEME_BACKGROUND_EXTS:
# Just extract image files.
continue
try:
images[url] = source.read(url)
except KeyError:
pass
except IOError as ioerror:
log.debug(ioerror)
return images
@contextlib.contextmanager
def atomic_lock(lock_dir, lock_name, lifetime=60):
"""A atomic, NFS safe implementation of a file lock.
Uses `flufl.lock` under the hood. Can be used as a context manager::
with atomic_lock(settings.TMP_PATH, 'extraction-1234'):
extract_xpi(...)
:return: `True` if the lock was attained, we are owning the lock,
`False` if there is an already existing lock.
"""
lock_name = lock_name + '.lock'
count = _lock_count.get(lock_name, 0)
log.debug('Acquiring lock %s, count is %d.' % (lock_name, count))
lock_name = os.path.join(lock_dir, lock_name)
lock = flufl.lock.Lock(lock_name, lifetime=timedelta(seconds=lifetime))
try:
# set `timeout=0` to avoid any process blocking but catch the
# TimeOutError raised instead.
lock.lock(timeout=timedelta(seconds=0))
except flufl.lock.AlreadyLockedError:
# This process already holds the lock
yield False
except flufl.lock.TimeOutError:
# Some other process holds the lock.
# Let's break the lock if it has expired. Unfortunately
# there's a bug in flufl.lock so let's do this manually.
# Bug: https://gitlab.com/warsaw/flufl.lock/merge_requests/1
release_time = lock._releasetime
max_release_time = release_time + flufl.lock._lockfile.CLOCK_SLOP
if (release_time != -1 and datetime.now() > max_release_time):
# Break the lock and try to aquire again
lock._break()
lock.lock(timeout=timedelta(seconds=0))
yield lock.is_locked
else:
# Already locked
yield False
else:
# Is usually `True` but just in case there were some weird `lifetime`
# values set we return the check if we really attained the lock.
yield lock.is_locked
if lock.is_locked:
log.debug('Releasing lock %s.' % lock.details[2])
lock.unlock()
| bsd-3-clause | 889,460,815,450,143,100 | 36.092163 | 79 | 0.610647 | false |
alexsavio/aizkolari | aizkolari_featsets.py | 1 | 3744 | #!/usr/bin/python
#from IPython.core.debugger import Tracer; debug_here = Tracer()
import os, subprocess, re
import numpy as np
import aizkolari_utils as au
#measures = ['jacs', 'modulatedgm', 'norms', 'trace', 'geodan']
measures = ['jacs', 'modulatedgm']
#measures = ['trace']
dists = ['pearson', 'bhattacharyya', 'ttest']
#dists = ['pearson']
#dists = ['bhattacharyya']
#dists = ['ttest']
studies = ['0001', '0002', '0003', '0004', '0005', '0006', '0007', '0008', '0009', '0010']
#studies = ['all']
thrs = [80, 90, 95, 99, 99.5, 99.9, 100]
#thrs = [100]
#studies = ['all']
hostname = get_hostname()
if hostname == 'gicmed' or hostname == 'corsair':
aizko_root = '/home/alexandre/Dropbox/Documents/phd/work/aizkolari/'
rootdir = '/media/oasis_post'
rootdatadir = '/data/oasis_jesper_features'
elif hostname == 'giclus1':
aizko_root = '/home/alexandre/work/aizkolari/'
rootdir = '/home/alexandre/work/oasis_jesper_features'
elif hostname == 'laptosh':
aizko_root = '/home/alexandre/Dropbox/Documents/phd/work/aizkolari/'
rootdir = '/media/oasis/oasis_jesper_features'
aizko_featsets = aizko_root + 'aizkolari_extract_manyfeatsets.py'
globalmask = rootdir + os.path.sep + 'MNI152_T1_1mm_brain_mask_dil.nii.gz'
#otypes = ['svmperf', 'svmperf']
otypes = ['numpybin', 'numpybin']
agesf = rootdir + os.path.sep + 'ages'
gendersf = rootdir + os.path.sep + 'genders'
scaled = True
for m in measures:
for s in studies:
for d in dists:
datadir = rootdatadir + os.path.sep + m
cvdir = rootdir + os.path.sep + 'cv_' + m
tstdir = cvdir + os.path.sep + d + '_' + s
subjsf = cvdir + os.path.sep + 'all.txt'
excluf = tstdir + os.path.sep + 'exclude'
outdir = tstdir
maskregex = 'thrP.nii.gz'
maskdir = outdir
lst = os.listdir (maskdir)
lst = find(lst, maskregex)
mlst = []
for t in thrs:
regex = str(t) + maskregex
mlst.extend(find(lst, regex))
maskargs = []
for k in mlst:
maskpath = maskdir + os.path.sep + k
prefix = remove_ext(os.path.basename(k))
arg = ['-m', maskpath, '-p', prefix]
maskargs.extend(arg)
if s == 'all':
otype = otypes[1]
else:
otype = otypes[0]
if scaled:
comm = [aizko_featsets, '-s', subjsf, '-o', outdir, '-d', datadir, '-g', globalmask, '-t', otype, '--scale', '--scale_min', '-1', '--scale_max', '1']
else:
comm = [aizko_featsets, '-s', subjsf, '-o', outdir, '-d', datadir, '-g', globalmask, '-t', otype]
comm.extend(maskargs)
if os.path.exists(excluf):
comm.extend(['-e', excluf])
exclude = np.loadtxt(excluf, dtype=int)
ages = np.loadtxt(agesf, dtype=int)
genders = np.loadtxt(gendersf, dtype=str)
gends = np.zeros(len(genders), dtype=int)
genders [genders == 'M'] = 1
genders [genders == 'F'] = 0
trainages = ages [exclude == 0]
testages = ages [exclude == 1]
traingenders = gends [exclude == 0]
testgenders = gends [exclude == 1]
np.savetxt (outdir + os.path.sep + 'extra_trainset_feats_fold' + str(s) + '.txt', np.transpose(np.array([trainages,traingenders])), fmt='%i %s')
np.savetxt (outdir + os.path.sep + 'extra_testset_feats_fold' + str(s) + '.txt', np.transpose(np.array([ testages, testgenders])), fmt='%i %s')
print (comm)
#print (comm)
proc = subprocess.call(comm)
| bsd-3-clause | -1,374,853,581,680,293,000 | 31.556522 | 161 | 0.549947 | false |
muhkuh-sys/org.muhkuh.tools-flasher | regression/common/simpelTools/src/command_carrier.py | 1 | 13099 | # coding=utf-8
import datetime, copy, time
from common.simpelTools.src.logging_default import *
from common.simpelTools.src.platform_detect import *
from common.simpelTools.src.magic_strings import strip_command_arguments, helper_type_conversion
'''
this module is intendet to run commands on a command line.
It is designed to make it easy for the programmer to control the
os.execute commands. In Principle just set up the structure,
pass it to the executor, and ook again in the structure to see
the result auf the executed command.
This version favours the storage of the output to stdout direct,
to a python variable or to a file.
'''
# todo: move to simple tools
def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
https://stackoverflow.com/questions/295135/turn-a-string-into-a-valid-filename
"""
import unicodedata
if type(value) is str:
value = u'%s'%value
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(re.sub('[^\w\s-]', '', value).strip().lower())
value = unicode(re.sub('[-\s]+', '-', value))
return value
class command_carrier:
# todo: add a possebilety to run from any location!
ID = None # a ID used for the command to be executed.
time_start = None
time_end = None
time_execute = None
def time_calc(self):
self.time_execute = self.time_end - self.time_start
cmd = '' # command to be executed
executed = False
change_dir_to_bin = False
ret_val = None # returnvalue of executed program
str_stdout = '' # if used, will return the output string of the program
str_stderr = '' # if used, will return the output string of the program
# (prio 1) dirct log to upper consol
direct_out = False # activates direct stdout/err for seeing the progress, but not capturing it.
direct_err = False
# (prio 2) path for output files
path_stdout = None # path for storing logmessages from stdout (if set will not print them)
path_stderr = None # path for storing logmessages from stderr (if set will not print them)
# (prio 3) structure internal string is filled
# info:
# direct:
# + you see the progress
# - you can't use it in the calling python script
# path:
# + you have a file containing the output. more easy to copy, parse, universal, good for large output
# - you do not have the file in the program. Maintain location overwrite and old files etc.
# subprocess.PIPE (default)
# + You have the output returned in a variable useable in python
# + recommanded way for small output (some 100 lines)
# - heavy for huge outputs, don't know if safe against bufferoverflow
# used in function
stream_stdout = None # stream to used for communication. may be default stdx or a file to write to.
stream_stderr = None # stream to used for communication. may be default stdx or a file to write to.
def helper_gen_headder(cmd_carry, form):
tmp = "%s\n[pirate][file][%s][cmd]: %s\n\n---BEGIN LOG---\n" % (
datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), form, cmd_carry.cmd)
tmp_str = str(tmp)
return tmp_str
def helper_gen_footer(cmd_carry, form):
tmp = "---END LOG---\n\n%s\n[pirate][file][%s][cmd]: %s\nReal runtime: %fs\nReturn value: %d" % (
datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), form, cmd_carry.cmd,cmd_carry.time_execute,cmd_carry.ret_val)
tmp_str = str(tmp)
return tmp_str
def runCommandBase(cmd_carry):
'''
Basic function for executing a externl programm.
This does not alter a thing, justrun end return all values
:param cmd_carry: Structure carrying several infos around the executed programm
:return: class command_carrier Representation of command and result
:waring: for large outputs please provide a file to pipe to.
'''
tmp_localwork_dir = os.getcwd() # backup path
if cmd_carry.change_dir_to_bin:
tmp_command_list = cmd_carry.cmd.split()
tmp_binary = os.path.dirname(tmp_command_list[0])
os.chdir(tmp_binary)
cmd_carry.executed = True
# stdout:
if cmd_carry.direct_out:
cmd_carry.stream_stdout = sys.stdout # send directly to consol p.communicate is empty + shell = True
elif cmd_carry.path_stdout:
# there is a path given where to redirect stdout to.
cmd_carry.stream_stdout = open(os.path.realpath(cmd_carry.path_stdout), 'wb', 0)
cmd_carry.stream_stdout.write(
str.encode(helper_gen_headder(cmd_carry, 'stdout'))) # write headline to file containing command call
else:
# will be loaded after execution is done. Stored in variable
cmd_carry.stream_stdout = subprocess.PIPE # use the popen.PIPE This is a constant
# stderr:
if cmd_carry.direct_err:
cmd_carry.stream_stdout = sys.stdout # send directly to consol p.communicate is empty + shell = True
elif cmd_carry.path_stderr:
# there is a path given where to redirect stdout to.
cmd_carry.stream_stderr = open(cmd_carry.path_stderr, 'wb', 0)
cmd_carry.stream_stderr.write(str.encode(helper_gen_headder(cmd_carry, 'stderr')))
# cmd_carry.stream_stderr.write(helper_gen_headder(cmd_carry, 'stderr'))
else:
# cmd_carry.stream_stderr = sys.stderr # send directly o python stdout without using communicate
cmd_carry.stream_stderr = subprocess.PIPE # use the popen.PIPE This is a constant
cmd_carry.time_start = time.time()
p = subprocess.Popen(cmd_carry.cmd,
stdout=cmd_carry.stream_stdout,
stderr=cmd_carry.stream_stderr,
shell=True) # looks like shell True is for the return value, also opens a new shell as in stead inheriting the old window
# comming functions returns when process has finished
b_stdout, b_stderr = p.communicate() # retrieve returnvalues if none
cmd_carry.time_end = time.time()
cmd_carry.time_calc()
cmd_carry.executed = True # just mark the command as executed. For future use
cmd_carry.ret_val = p.returncode
# finish logfiles:
if cmd_carry.path_stderr:
# there is a path given where to redirect stdout to.
# cmd_carry.stream_stderr = open(cmd_carry.path_stderr, 'wb', 0)
cmd_carry.stream_stderr.write(str.encode(helper_gen_footer(cmd_carry, 'stderr')))
cmd_carry.stream_stderr.close()
if cmd_carry.path_stdout:
# there is a path given where to redirect stdout to.
#cmd_carry.stream_stdout = open(cmd_carry.path_stdout, 'wb', 0)
cmd_carry.stream_stdout.write(
str.encode(helper_gen_footer(cmd_carry, 'stdout'))) # write headline to file containing command call
cmd_carry.stream_stdout.close()
if b_stdout:
cmd_carry.str_stdout = helper_type_conversion(b_stdout)
if b_stderr:
cmd_carry.str_stderr = helper_type_conversion(b_stderr)
# import chardet): chardet.detect(open(in_file, 'rb').read())['encoding']
# https://stackoverflow.com/questions/3323770/character-detection-in-a-text-file-in-python-using-the-universal-encoding-detect/3323810#3323810
if cmd_carry.change_dir_to_bin:
os.chdir(tmp_localwork_dir)
return cmd_carry.ret_val
def batch_command_base(cmd_carry_default, array_command, path_logfile = '.', logfile_prefix = 'log'):
"""
Provide a sequence with commands. Every single command will be invoked after each other.
the result is an array consinting of the command carriers used for running every command.
The logfile will look like Logfileprefix_AIndex_TheOutPipe_TheCommand.log
The command is reduced to a character set which is accepted by all filesystems(in theory).
:param cmd_carry_default: The default command carrier, which must be provided. It's taken as default setting.
:param array_command: The array containing the commands as one string per command line
:param path_logfile: a existing folder where to store the logfiles.
:param logfile_prefix: The prefix with which every filename starts. Say a ID, or the name of the test.
:return: An integer Error or a array.
"""
# check
if os.path.isdir(path_logfile) is False:
l.error("Provided path for logfiles does not exist. exit cause of error!")
return 42
result = []
l.info("Representation of executed commands are reduced by the absoulte path. Only filename and binary are displayed!")
for idx, cmd in enumerate(array_command):
l.info("[% 2d]run %s"%(idx, strip_command_arguments(cmd)))
tmp_batch_cpy = copy.deepcopy(cmd_carry_default)
tmp_batch_cpy.cmd = cmd
tmp_batch_cpy.ID = idx
command_representation = strip_command_arguments (tmp_batch_cpy.cmd).replace(' ','_')
tmp_batch_cpy.path_stdout = os.path.join(path_logfile, '%s_%d_stdout.txt'%(logfile_prefix,idx))
tmp_batch_cpy.path_stderr = os.path.join(path_logfile, '%s_%d_stderr.txt'%(logfile_prefix,idx))
runCommandBase(tmp_batch_cpy)
result.append(tmp_batch_cpy)
l.info("[% 2d] return value: %s" % (idx, tmp_batch_cpy.ret_val))
return result
def eval_batch_result(array_command_carry,path_logfile=None,logfile_prefix=None,additional_info=None):
"""
displaying infos from all tests in a kind of tabular
:param array_command_carry: A array consisting of command carriers
:return: 0 in case of that all commands had returnvalue == 0, else number failed commands. -1 in case of a error.
"""
count_failed_commands = 0
h_summary = None
# open summary
if path_logfile:
if os.path.isdir(path_logfile):
summary_file = os.path.join(path_logfile, '%s_00_summary'%(logfile_prefix))
l.info("write summary to %s"%summary_file)
# open lo
h_summary = open(summary_file, 'wb', 0)
else:
l.error("provided path for batch summary does not exist! %s"%path_logfile)
return -1
for entry in array_command_carry:
res = ''
if entry.ret_val:
count_failed_commands += 1
res = 'Err: %03d'%entry.ret_val
else:
res = ' OK '
line = "[%s][R:%s][%s](t:%f): %s"%(entry.ID,entry.ret_val,res,entry.time_execute,entry.cmd)
l.info(line)
if h_summary:
tmp = ''
if type(line) is type(u''):
tmp = line
else:
tmp = str.encode("%s" % line)
h_summary.write(u"%s\n"%tmp)# write
# write additional info
tmp = ''
if type(additional_info) is type(u''):
tmp = line
else:
tmp = str.encode("%s" % additional_info)
h_summary.write(u"%s\n" % tmp) # write
# close summary
if h_summary:
h_summary.close()
return count_failed_commands
def test_batch_command_base():
cmd_carry_default = command_carrier()
log_path = '/tmp/log'
array_command = ['uname','sleep 0.001','rm hallo','ls hallo', 'touch hallo', 'ls hallo']
result = batch_command_base(cmd_carry_default, array_command,log_path)
i = eval_batch_result(result,log_path,'summaryPreafix')
if i == 0:
l.error("Upper commands should have caused a error!")
else:
l.info("simple test logging passed!")
def test_run_command_Base():
test_ret_val = 42
cmd_to_test_stdout_stderr = 'echo "stdoutmessage" && (>&2 echo "error_message") && exit %d' % test_ret_val
test_command = command_carrier()
test_command.cmd = cmd_to_test_stdout_stderr
runCommandBase(test_command)
for i in range(1):
assert i == 0
x = 0
if test_command.str_stdout.strip() != b'stdoutmessage':
print("failed 1")
x = 1
if test_command.str_stderr.strip() != b'error_message':
print("failed 2")
x = 1
if x == 1:
break
else:
print("[TB][simple IO]: everything ok!")
print("Delaytest")
'''
Delaytest:
should print following sequence in a time difference of 5sec
Delaytest
wait 5 sec
Output 1
Output 2
Output END
'''
sec_to_wait = 2
delay_test = "echo 'Following messages should appear in distance of {0} sec' && sleep {0} && echo 'Output 1' && sleep {0} && echo 'Output 2' && sleep {0} && echo 'Output END'".format(
sec_to_wait)
test_delay = command_carrier()
test_delay.cmd = delay_test
test_delay.direct_out = True
runCommandBase(test_delay)
stdout_file = 'stdout.txt'
stderr_file = 'stderr.txt'
test_file = command_carrier()
test_file.path_stdout = stdout_file
test_file.path_stderr = stderr_file
test_file.cmd = cmd_to_test_stdout_stderr
runCommandBase(test_file)
for i in range(1):
assert i == 0
x = 0
if test_file.str_stdout != None:
print("failed 1")
x = 3
if test_file.str_stderr != None:
print("failed 2")
x = 4
if test_file.ret_val != test_ret_val:
print("failed 2")
x = 5
if x == 1:
break
else:
print("[TB][file IO]: everything ok! (please check ffor file existence and content)")
'''
err.file:
2019-04-25 12:32:39
[pirate][file][stderr][cmd]: echo "stdoutmessage" && (>&2 echo "error_message") && exit 42
error_message
out.file:
2019-04-25 12:32:39
[pirate][file][stdout][cmd]: echo "stdoutmessage" && (>&2 echo "error_message") && exit 42
stdoutmessage
'''
print("Done")
if __name__ == "__main__":
# exit(test_batch_command_base())
exit(test_batch_command_base())
| gpl-2.0 | -5,850,925,678,298,282,000 | 33.290576 | 185 | 0.678143 | false |
JaapJoris/bps | numberedmodel/models.py | 1 | 1584 | from django.db import models
class NumberedModel(models.Model):
def number_with_respect_to(self):
return self.__class__.objects.all()
def _renumber(self):
'''Renumbers the queryset while preserving the instance's number'''
queryset = self.number_with_respect_to()
field_name = self.__class__._meta.ordering[-1].lstrip('-')
this_nr = getattr(self, field_name)
if this_nr is None:
this_nr = len(queryset) + 1
# The algorithm: loop over the queryset and set each object's
# number to the counter. When an object's number equals the
# number of this instance, set this instance's number to the
# counter, increment the counter by 1, and finish the loop
counter = 1
inserted = False
for other in queryset.exclude(pk=self.pk):
other_nr = getattr(other, field_name)
if counter >= this_nr and not inserted:
setattr(self, field_name, counter)
inserted = True
counter += 1
if other_nr != counter:
setattr(other, field_name, counter)
super(NumberedModel, other).save()
counter += 1
if not inserted:
setattr(self, field_name, counter)
def save(self, *args, **kwargs):
self._renumber()
super(NumberedModel, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
super(NumberedModel, self).delete(*args, **kwargs)
self._renumber()
class Meta:
abstract = True
| agpl-3.0 | 6,606,948,540,788,826,000 | 35 | 75 | 0.583333 | false |
tonybaloney/st2 | st2common/tests/unit/test_logger.py | 1 | 13949 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
import sys
import json
import uuid
import tempfile
import logging as logbase
import mock
from oslo_config import cfg
from st2common import log as logging
from st2common.logging.formatters import ConsoleLogFormatter
from st2common.logging.formatters import GelfLogFormatter
from st2common.logging.formatters import MASKED_ATTRIBUTE_VALUE
from st2common.models.db.action import ActionDB
from st2common.models.db.execution import ActionExecutionDB
import st2tests.config as tests_config
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
RESOURCES_DIR = os.path.abspath(os.path.join(CURRENT_DIR, '../resources'))
CONFIG_FILE_PATH = os.path.join(RESOURCES_DIR, 'logging.conf')
MOCK_MASKED_ATTRIBUTES_BLACKLIST = [
'blacklisted_1',
'blacklisted_2',
'blacklisted_3',
]
class MockRecord(object):
levelno = 40
msg = None
exc_info = None
exc_text = None
def getMessage(self):
return self.msg
class LoggerTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
tests_config.parse_args()
def setUp(self):
super(LoggerTestCase, self).setUp()
self.config_text = open(CONFIG_FILE_PATH).read()
self.cfg_fd, self.cfg_path = tempfile.mkstemp()
self.info_log_fd, self.info_log_path = tempfile.mkstemp()
self.audit_log_fd, self.audit_log_path = tempfile.mkstemp()
with open(self.cfg_path, 'a') as f:
f.write(self.config_text.format(self.info_log_path,
self.audit_log_path))
def tearDown(self):
self._remove_tempfile(self.cfg_fd, self.cfg_path)
self._remove_tempfile(self.info_log_fd, self.info_log_path)
self._remove_tempfile(self.audit_log_fd, self.audit_log_path)
super(LoggerTestCase, self).tearDown()
def _remove_tempfile(self, fd, path):
os.close(fd)
os.unlink(path)
def test_logger_setup_failure(self):
config_file = '/tmp/abc123'
self.assertFalse(os.path.exists(config_file))
self.assertRaises(Exception, logging.setup, config_file)
def test_logger_set_level(self):
logging.setup(self.cfg_path)
log = logging.getLogger(__name__)
self.assertEqual(log.getEffectiveLevel(), logbase.DEBUG)
log.setLevel(logbase.INFO)
self.assertEqual(log.getEffectiveLevel(), logbase.INFO)
log.setLevel(logbase.WARN)
self.assertEqual(log.getEffectiveLevel(), logbase.WARN)
log.setLevel(logbase.ERROR)
self.assertEqual(log.getEffectiveLevel(), logbase.ERROR)
log.setLevel(logbase.CRITICAL)
self.assertEqual(log.getEffectiveLevel(), logbase.CRITICAL)
log.setLevel(logbase.AUDIT)
self.assertEqual(log.getEffectiveLevel(), logbase.AUDIT)
def test_log_info(self):
"""Test that INFO log entry does not go to the audit log."""
logging.setup(self.cfg_path)
log = logging.getLogger(__name__)
msg = uuid.uuid4().hex
log.info(msg)
info_log_entries = open(self.info_log_path).read()
self.assertIn(msg, info_log_entries)
audit_log_entries = open(self.audit_log_path).read()
self.assertNotIn(msg, audit_log_entries)
def test_log_critical(self):
"""Test that CRITICAL log entry does not go to the audit log."""
logging.setup(self.cfg_path)
log = logging.getLogger(__name__)
msg = uuid.uuid4().hex
log.critical(msg)
info_log_entries = open(self.info_log_path).read()
self.assertIn(msg, info_log_entries)
audit_log_entries = open(self.audit_log_path).read()
self.assertNotIn(msg, audit_log_entries)
def test_log_audit(self):
"""Test that AUDIT log entry goes to the audit log."""
logging.setup(self.cfg_path)
log = logging.getLogger(__name__)
msg = uuid.uuid4().hex
log.audit(msg)
info_log_entries = open(self.info_log_path).read()
self.assertIn(msg, info_log_entries)
audit_log_entries = open(self.audit_log_path).read()
self.assertIn(msg, audit_log_entries)
class ConsoleLogFormatterTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
tests_config.parse_args()
def test_format(self):
formatter = ConsoleLogFormatter()
# No extra attributes
mock_message = 'test message 1'
record = MockRecord()
record.msg = mock_message
message = formatter.format(record=record)
self.assertEqual(message, mock_message)
# Some extra attributes
mock_message = 'test message 2'
record = MockRecord()
record.msg = mock_message
# Add "extra" attributes
record._user_id = 1
record._value = 'bar'
record.ignored = 'foo' # this one is ignored since it doesnt have a prefix
message = formatter.format(record=record)
expected = 'test message 2 (value=\'bar\',user_id=1)'
self.assertEqual(message, expected)
@mock.patch('st2common.logging.formatters.MASKED_ATTRIBUTES_BLACKLIST',
MOCK_MASKED_ATTRIBUTES_BLACKLIST)
def test_format_blacklisted_attributes_are_masked(self):
formatter = ConsoleLogFormatter()
mock_message = 'test message 1'
record = MockRecord()
record.msg = mock_message
# Add "extra" attributes
record._blacklisted_1 = 'test value 1'
record._blacklisted_2 = 'test value 2'
record._blacklisted_3 = {'key1': 'val1', 'blacklisted_1': 'val2', 'key3': 'val3'}
record._foo1 = 'bar'
message = formatter.format(record=record)
expected = ("test message 1 (blacklisted_1='********',blacklisted_2='********',"
"blacklisted_3={'key3': 'val3', 'key1': 'val1', 'blacklisted_1': '********'},"
"foo1='bar')")
self.assertEqual(message, expected)
@mock.patch('st2common.logging.formatters.MASKED_ATTRIBUTES_BLACKLIST',
MOCK_MASKED_ATTRIBUTES_BLACKLIST)
def test_format_custom_blacklist_attributes_are_masked(self):
cfg.CONF.set_override(group='log', name='mask_secrets_blacklist',
override=['blacklisted_4', 'blacklisted_5'])
formatter = ConsoleLogFormatter()
mock_message = 'test message 1'
record = MockRecord()
record.msg = mock_message
# Add "extra" attributes
record._blacklisted_1 = 'test value 1'
record._blacklisted_2 = 'test value 2'
record._blacklisted_3 = {'key1': 'val1', 'blacklisted_1': 'val2', 'key3': 'val3'}
record._blacklisted_4 = 'fowa'
record._blacklisted_5 = 'fiva'
record._foo1 = 'bar'
message = formatter.format(record=record)
expected = ("test message 1 (foo1='bar',blacklisted_1='********',blacklisted_2='********',"
"blacklisted_3={'key3': 'val3', 'key1': 'val1', 'blacklisted_1': '********'},"
"blacklisted_4='********',blacklisted_5='********')")
self.assertEqual(message, expected)
@mock.patch('st2common.logging.formatters.MASKED_ATTRIBUTES_BLACKLIST',
MOCK_MASKED_ATTRIBUTES_BLACKLIST)
def test_format_secret_action_parameters_are_masked(self):
formatter = ConsoleLogFormatter()
mock_message = 'test message 1'
parameters = {
'parameter1': {
'type': 'string',
'required': False
},
'parameter2': {
'type': 'string',
'required': False,
'secret': True
}
}
mock_action_db = ActionDB(pack='testpack', name='test.action', parameters=parameters)
action = mock_action_db.to_serializable_dict()
parameters = {
'parameter1': 'value1',
'parameter2': 'value2'
}
mock_action_execution_db = ActionExecutionDB(action=action, parameters=parameters)
record = MockRecord()
record.msg = mock_message
# Add "extra" attributes
record._action_execution_db = mock_action_execution_db
expected_msg_part = "'parameters': {'parameter1': 'value1', 'parameter2': '********'}"
message = formatter.format(record=record)
self.assertTrue('test message 1' in message)
self.assertTrue(expected_msg_part in message)
class GelfLogFormatterTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
tests_config.parse_args()
def test_format(self):
formatter = GelfLogFormatter()
expected_keys = ['version', 'host', 'short_message', 'full_message',
'timestamp', 'level']
# No extra attributes
mock_message = 'test message 1'
record = MockRecord()
record.msg = mock_message
message = formatter.format(record=record)
parsed = json.loads(message)
for key in expected_keys:
self.assertTrue(key in parsed)
self.assertEqual(parsed['short_message'], mock_message)
self.assertEqual(parsed['full_message'], mock_message)
# Some extra attributes
mock_message = 'test message 2'
record = MockRecord()
record.msg = mock_message
# Add "extra" attributes
record._user_id = 1
record._value = 'bar'
record.ignored = 'foo' # this one is ignored since it doesnt have a prefix
message = formatter.format(record=record)
parsed = json.loads(message)
for key in expected_keys:
self.assertTrue(key in parsed)
self.assertEqual(parsed['short_message'], mock_message)
self.assertEqual(parsed['full_message'], mock_message)
self.assertEqual(parsed['_user_id'], 1)
self.assertEqual(parsed['_value'], 'bar')
self.assertTrue('ignored' not in parsed)
# Record with an exception
mock_exception = Exception('mock exception bar')
try:
raise mock_exception
except Exception:
mock_exc_info = sys.exc_info()
# Some extra attributes
mock_message = 'test message 3'
record = MockRecord()
record.msg = mock_message
record.exc_info = mock_exc_info
message = formatter.format(record=record)
parsed = json.loads(message)
for key in expected_keys:
self.assertTrue(key in parsed)
self.assertEqual(parsed['short_message'], mock_message)
self.assertTrue(mock_message in parsed['full_message'])
self.assertTrue('Traceback' in parsed['full_message'])
self.assertTrue('_exception' in parsed)
self.assertTrue('_traceback' in parsed)
def test_extra_object_serialization(self):
class MyClass1(object):
def __repr__(self):
return 'repr'
class MyClass2(object):
def to_dict(self):
return 'to_dict'
class MyClass3(object):
def to_serializable_dict(self, mask_secrets=False):
return 'to_serializable_dict'
formatter = GelfLogFormatter()
record = MockRecord()
record.msg = 'message'
record._obj1 = MyClass1()
record._obj2 = MyClass2()
record._obj3 = MyClass3()
message = formatter.format(record=record)
parsed = json.loads(message)
self.assertEqual(parsed['_obj1'], 'repr')
self.assertEqual(parsed['_obj2'], 'to_dict')
self.assertEqual(parsed['_obj3'], 'to_serializable_dict')
@mock.patch('st2common.logging.formatters.MASKED_ATTRIBUTES_BLACKLIST',
MOCK_MASKED_ATTRIBUTES_BLACKLIST)
def test_format_blacklisted_attributes_are_masked(self):
formatter = GelfLogFormatter()
# Some extra attributes
mock_message = 'test message 1'
record = MockRecord()
record.msg = mock_message
# Add "extra" attributes
record._blacklisted_1 = 'test value 1'
record._blacklisted_2 = 'test value 2'
record._blacklisted_3 = {'key1': 'val1', 'blacklisted_1': 'val2', 'key3': 'val3'}
record._foo1 = 'bar'
message = formatter.format(record=record)
parsed = json.loads(message)
self.assertEqual(parsed['_blacklisted_1'], MASKED_ATTRIBUTE_VALUE)
self.assertEqual(parsed['_blacklisted_2'], MASKED_ATTRIBUTE_VALUE)
self.assertEqual(parsed['_blacklisted_3']['key1'], 'val1')
self.assertEqual(parsed['_blacklisted_3']['blacklisted_1'], MASKED_ATTRIBUTE_VALUE)
self.assertEqual(parsed['_blacklisted_3']['key3'], 'val3')
self.assertEqual(parsed['_foo1'], 'bar')
# Assert that the original dict is left unmodified
self.assertEqual(record._blacklisted_1, 'test value 1')
self.assertEqual(record._blacklisted_2, 'test value 2')
self.assertEqual(record._blacklisted_3['key1'], 'val1')
self.assertEqual(record._blacklisted_3['blacklisted_1'], 'val2')
self.assertEqual(record._blacklisted_3['key3'], 'val3')
| apache-2.0 | 3,687,753,405,142,566,000 | 34.766667 | 99 | 0.623772 | false |
terrencepreilly/darglint | darglint/integrity_checker.py | 1 | 19027 | """Defines IntegrityChecker."""
import re
import concurrent.futures
from typing import ( # noqa: F401
Any,
cast,
List,
Optional,
Set,
)
from .function_description import ( # noqa: F401
FunctionDescription,
)
from .docstring.base import BaseDocstring
from .docstring.docstring import Docstring
from .docstring.sections import Sections
from .docstring.style import DocstringStyle
from .errors import ( # noqa: F401
DarglintError,
ExcessParameterError,
ExcessRaiseError,
ExcessReturnError,
ExcessVariableError,
ExcessYieldError,
MissingParameterError,
MissingRaiseError,
MissingReturnError,
MissingYieldError,
ParameterTypeMismatchError,
ParameterTypeMissingError,
ReturnTypeMismatchError,
)
from .error_report import (
ErrorReport,
)
from .config import get_config
from .strictness import Strictness
SYNTAX_NOQA = re.compile(r'#\s*noqa:\sS001')
EXPLICIT_GLOBAL_NOQA = re.compile(r'#\s*noqa:\s*\*')
BARE_NOQA = re.compile(r'#\s*noqa([^:]|$)')
class IntegrityChecker(object):
"""Checks the integrity of the docstring compared to the definition."""
def __init__(self, raise_errors=False):
# type: (bool) -> None
"""Create a new checker for the given function and docstring.
Args:
raise_errors: If true, we will allow ParserExceptions to
propagate, crashing darglint. This is mostly useful
for development.
"""
self.errors = list() # type: List[DarglintError]
self._sorted = True
self.config = get_config()
self.raise_errors = raise_errors
# TODO: Move max workers into a configuration option.
# A thread pool for handling checks. Tasks are added to the
# pool when `schedule` is executed, if it has a docstring.
# The pool is collected when `get_error_report_string` is called.
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=4)
def schedule(self, function):
# type: (FunctionDescription) -> None
if self._skip_checks(function):
return
self.executor.submit(self.run_checks, function)
def run_checks(self, function):
# type: (FunctionDescription) -> None
"""Run checks on the given function.
Args:
function: A function whose docstring we are verifying.
Raises:
Exception: If the docstring format isn't supported.
"""
if self._skip_checks(function):
return
function_docstring = cast(str, function.docstring)
if self.config.style == DocstringStyle.GOOGLE:
docstring = Docstring.from_google(
function_docstring,
)
elif self.config.style == DocstringStyle.SPHINX:
docstring = Docstring.from_sphinx(
function_docstring,
)
self._check_variables(docstring, function)
elif self.config.style == DocstringStyle.NUMPY:
docstring = Docstring.from_numpy(
function_docstring,
)
else:
raise Exception('Unsupported docstring format.')
if self.config.strictness != Strictness.FULL_DESCRIPTION:
if docstring.satisfies_strictness(self.config.strictness):
return
if docstring.ignore_all:
return
self._check_parameters(docstring, function)
self._check_parameter_types(docstring, function)
self._check_parameter_types_missing(docstring, function)
self._check_return(docstring, function)
self._check_return_type(docstring, function)
self._check_yield(docstring, function)
self._check_raises(docstring, function)
self._check_style(docstring, function)
self._sorted = False
def _skip_checks(self, function):
# type: (FunctionDescription) -> bool
no_docsting = function.docstring is None
skip_by_regex = (
self.config.ignore_regex and
re.match(self.config.ignore_regex, function.name)
)
skip_property = (
self.config.ignore_properties and function.is_property
)
return bool(no_docsting or skip_by_regex or skip_property)
def _check_parameter_types(self, docstring, function):
# type: (BaseDocstring, FunctionDescription) -> None
error_code = ParameterTypeMismatchError.error_code
if self._ignore_error(docstring, ParameterTypeMismatchError):
return
argument_types = dict(
zip(docstring.get_items(Sections.ARGUMENTS_SECTION) or [],
docstring.get_types(Sections.ARGUMENTS_SECTION) or [])
)
doc_arg_types = list() # type: List[Optional[str]]
for name in function.argument_names:
if name not in argument_types:
doc_arg_types.append(None)
else:
doc_arg_types.append(argument_types[name])
noqa_lookup = docstring.get_noqas()
for name, expected, actual in zip(
function.argument_names,
function.argument_types,
doc_arg_types,
):
if expected is None or actual is None:
continue
noqa_exists = error_code in noqa_lookup
name_has_noqa = noqa_exists and name in noqa_lookup[error_code]
if not (expected == actual or name_has_noqa):
default_line_numbers = docstring.get_line_numbers(
'arguments-section'
)
line_numbers = docstring.get_line_numbers_for_value(
'ident',
name,
) or default_line_numbers
self.errors.append(
ParameterTypeMismatchError(
function.function,
name=name,
expected=expected,
actual=actual,
line_numbers=line_numbers,
)
)
def _check_parameter_types_missing(self, docstring, function):
# type: (BaseDocstring, FunctionDescription) -> None
error_code = ParameterTypeMissingError.error_code
if self._ignore_error(docstring, ParameterTypeMissingError):
return
argument_types = dict(
zip(docstring.get_items(Sections.ARGUMENTS_SECTION) or [],
docstring.get_types(Sections.ARGUMENTS_SECTION) or [])
)
noqa_lookup = docstring.get_noqas()
noqa_exists = error_code in noqa_lookup
for name, argument_type in argument_types.items():
name_has_no_qa = noqa_exists and name in noqa_lookup[error_code]
if argument_type is None and not name_has_no_qa:
default_line_numbers = docstring.get_line_numbers(
'arguments-section'
)
line_numbers = docstring.get_line_numbers_for_value(
'ident',
name,
) or default_line_numbers
self.errors.append(
ParameterTypeMissingError(
function.function,
name=name,
line_numbers=line_numbers,
)
)
def _check_return_type(self, docstring, function):
# type: (BaseDocstring, FunctionDescription) -> None
if function.is_abstract:
return
if self._ignore_error(docstring, ReturnTypeMismatchError):
return
fun_type = function.return_type
doc_type = docstring.get_types(Sections.RETURNS_SECTION)
if not doc_type or isinstance(doc_type, list):
doc_type = None
if fun_type is not None and doc_type is not None:
if fun_type != doc_type:
line_numbers = docstring.get_line_numbers(
'returns-section',
)
self.errors.append(
ReturnTypeMismatchError(
function.function,
expected=fun_type,
actual=doc_type,
line_numbers=line_numbers,
),
)
def _check_yield(self, docstring, function):
# type: (BaseDocstring, FunctionDescription) -> None
if function.is_abstract:
return
doc_yield = docstring.get_section(Sections.YIELDS_SECTION)
fun_yield = function.has_yield
ignore_missing = self._ignore_error(docstring, MissingYieldError)
ignore_excess = self._ignore_error(docstring, ExcessYieldError)
if fun_yield and not doc_yield and not ignore_missing:
self.errors.append(
MissingYieldError(function.function)
)
elif doc_yield and not fun_yield and not ignore_excess:
line_numbers = docstring.get_line_numbers(
'yields-section',
)
self.errors.append(
ExcessYieldError(
function.function,
line_numbers=line_numbers,
)
)
def _check_return(self, docstring, function):
# type: (BaseDocstring, FunctionDescription) -> None
if function.is_abstract:
return
# If there is an empty return, we don't want to make any
# judgement about whether it should be reported, as it is
# probably an early escape.
if function.has_empty_return:
return
doc_return = docstring.get_section(Sections.RETURNS_SECTION)
fun_return = function.has_return
ignore_missing = self._ignore_error(docstring, MissingReturnError)
ignore_excess = self._ignore_error(docstring, ExcessReturnError)
if fun_return and not doc_return and not ignore_missing:
self.errors.append(
MissingReturnError(function.function)
)
elif doc_return and not fun_return and not ignore_excess:
line_numbers = docstring.get_line_numbers(
'returns-section',
)
self.errors.append(
ExcessReturnError(
function.function,
line_numbers=line_numbers,
)
)
def _check_parameters(self, docstring, function):
# type: (BaseDocstring, FunctionDescription) -> None
docstring_arguments = set(docstring.get_items(
Sections.ARGUMENTS_SECTION
) or [])
actual_arguments = set(function.argument_names)
missing_in_doc = actual_arguments - docstring_arguments
missing_in_doc = self._remove_ignored(
docstring,
missing_in_doc,
MissingParameterError,
)
# Get a default line number.
default_line_numbers = docstring.get_line_numbers(
'arguments-section'
)
for missing in missing_in_doc:
# See if the documented argument begins with one
# or two asterisks.
if (
(missing.startswith('**')
and missing[2:] in docstring_arguments)
or (missing.startswith('*')
and missing[1:] in docstring_arguments)
):
continue
# Don't require private arguments.
if missing.startswith('_'):
continue
# We use the default line numbers because a missing
# parameter, by definition, will not have line numbers.
self.errors.append(
MissingParameterError(
function.function,
missing,
line_numbers=default_line_numbers
)
)
missing_in_function = docstring_arguments - actual_arguments
missing_in_function = self._remove_ignored(
docstring,
missing_in_function,
ExcessParameterError,
)
for missing in missing_in_function:
# If the actual argument begins with asterisk(s),
# then check to make sure the unasterisked version
# is not missing.
if (
'*' + missing in actual_arguments or
'**' + missing in actual_arguments
):
continue
line_numbers = docstring.get_line_numbers_for_value(
'arguments-section',
missing,
) or default_line_numbers
self.errors.append(
ExcessParameterError(
function.function,
missing,
line_numbers=line_numbers,
)
)
def _check_variables(self, docstring, function):
# type: (BaseDocstring, FunctionDescription) -> None
described_variables = set(
docstring.get_items(Sections.VARIABLES_SECTION) or []
) # type: Set[str]
actual_variables = set(function.variables)
excess_in_doc = described_variables - actual_variables
# Get a default line number.
default_line_numbers = docstring.get_line_numbers(
'variables-section',
)
for excess in excess_in_doc:
line_numbers = docstring.get_line_numbers_for_value(
'variables-section',
excess,
) or default_line_numbers
self.errors.append(
ExcessVariableError(
function.function,
excess,
line_numbers=line_numbers,
)
)
def _ignore_error(self, docstring, error):
# type: (BaseDocstring, Any) -> bool
"""Return true if we should ignore this error.
Args:
docstring: The docstring we are reporting on.
error: The error we might be ignoring.
Returns:
True if we should ignore all instances of this error,
otherwise false.
"""
error_code = error.error_code
if error_code in self.config.errors_to_ignore:
return True
noqa_lookup = docstring.get_noqas()
inline_error = error_code in noqa_lookup
if inline_error and not noqa_lookup[error_code]:
return True
return False
def _remove_ignored(self, docstring, missing, error):
# type: (BaseDocstring, Set[str], Any) -> Set[str]
"""Remove ignored from missing.
Args:
docstring: The docstring we are reporting on.
missing: A set of missing items.
error: The error being checked.
Returns:
A set of missing items without those to be ignored.
"""
error_code = error.error_code
# Ignore globally
if self._ignore_error(docstring, error):
return set()
# There are no noqa statements
noqa_lookup = docstring.get_noqas()
inline_ignore = error_code in noqa_lookup
if not inline_ignore:
return missing
# We are to ignore specific instances.
return missing - set(noqa_lookup[error_code])
def _check_style(self, docstring, function):
# type: (BaseDocstring, FunctionDescription) -> None
for StyleError, line_numbers in docstring.get_style_errors():
if self._ignore_error(docstring, StyleError):
continue
self.errors.append(StyleError(
function.function,
line_numbers,
))
def _check_raises(self, docstring, function):
# type: (BaseDocstring, FunctionDescription) -> None
if function.is_abstract:
return
exception_types = docstring.get_items(Sections.RAISES_SECTION)
docstring_raises = set(exception_types or [])
actual_raises = function.raises
ignore_raise = set(self.config.ignore_raise)
missing_in_doc = actual_raises - docstring_raises - ignore_raise
missing_in_doc = self._remove_ignored(
docstring,
missing_in_doc,
MissingRaiseError,
)
for missing in missing_in_doc:
self.errors.append(
MissingRaiseError(function.function, missing)
)
# TODO: Disable by default.
#
# Should we even include this? It seems like the user
# would know if this function would be likely to raise
# a certain exception from underlying calls.
#
missing_in_function = docstring_raises - actual_raises
missing_in_function = self._remove_ignored(
docstring,
missing_in_function,
ExcessRaiseError,
)
# Remove AssertionError if there is an assert.
if 'AssertionError' in missing_in_function:
if function.raises_assert:
missing_in_function.remove('AssertionError')
default_line_numbers = docstring.get_line_numbers(
'raises-section',
)
for missing in missing_in_function:
line_numbers = docstring.get_line_numbers_for_value(
'raises-section',
missing,
) or default_line_numbers
self.errors.append(
ExcessRaiseError(
function.function,
missing,
line_numbers=line_numbers,
)
)
def _sort(self):
# type: () -> None
if not self._sorted:
self.errors.sort(key=lambda x: x.function.lineno)
self._sorted = True
def get_error_report(self, verbosity, filename, message_template=None):
# type: (int, str, str) -> ErrorReport
self.executor.shutdown()
return ErrorReport(
errors=self.errors,
filename=filename,
verbosity=verbosity,
message_template=message_template or self.config.message_template,
)
def get_error_report_string(self,
verbosity,
filename,
message_template=None):
# type: (int, str, str) -> str
"""Return a string representation of the errors.
Args:
verbosity: The level of verbosity. Should be an integer
in the range [1,3].
filename: The filename of where the error occurred.
message_template: A python format string for describing
how the error reports should look to the user.
Returns:
A string representation of the errors.
"""
return str(self.get_error_report(
verbosity, filename, message_template
))
| mit | -7,744,339,654,712,942,000 | 34.105166 | 78 | 0.564881 | false |
timpalpant/KaggleTSTextClassification | others/vw_to_submission.py | 1 | 4546 | import os
import subprocess
from collections import defaultdict
from datetime import datetime
import csv
from csv import DictReader
import math
from glob import glob
# Data locations
loc_train = "../data/train.csv"
loc_test = "../data/test.csv"
loc_labels = "../data/trainLabels.csv"
loc_best = "test.pred2.csv" # best submission
loc_model_prefix = "../data/"
loc_preds_prefix = "../data/"
# Will be created
loc_test_vw = "../data/test.vw"
loc_train_vw = "../data/train_y33.vw"
loc_train_vw_temp = "../data/train_yn_temp.vw" # used for relabelling
loc_kaggle_submission = "test.pred.vw.csv"
def load_data(loc_csv, nrows=0):
with open(loc_csv) as fd:
reader = csv.reader(fd)
header = reader.next()[1:]
labels = []
for row in reader:
row = map(int, row)
labels.append(row[1:])
return labels
def to_vw(loc_csv, loc_out, y, y_nr=33, train=True):
print("\nConverting %s"%loc_csv)
with open(loc_out,"wb") as outfile:
distribution = 0
for linenr, row in enumerate( DictReader(open(loc_csv,"rb")) ):
hash_features = ["x35","x91","x94","x95","x34","x4","x65","x64","x61","x3"]
yes_no_features = ["x92","x93","x101","x103","x130","x102","x10","x11","x12","x13","x14","x25","x24","x26","x32","x33","x30","x31","x141","x140","x142","x45","x44","x43","x42","x41","x2","x1","x55","x56","x57","x129","x128","x127","x126","x105","x63","x62","x87","x86","x85","x116","x117","x115","x104","x74","x75","x72","x73","x71"]
pos_features = ["x23","x22","x113","x114","x53","x54","x138","x139"]
float_features = ["x70","x77","x96","x97","x98","x99","x107","x135","x100","x137","x132","x19","x16","x29","x28","x36","x37","x38","x39","x122","x144","x145","x47","x40","x110","x119","x60","x120","x121","x123","x124","x125","x59","x52","x50","x7","x6","x8","x9","x40","x144","x145","x122","x39","x38","x37","x36"]
n_h = ""
n_b = ""
n_p = ""
n_f = ""
n_r = ""
for k in row:
if k is not "id":
if k in hash_features:
n_h += " %s_%s"%(k,row[k])
elif k in yes_no_features:
n_b += " %s_%s"%(k,row[k])
elif k in pos_features:
n_p += " %s_%s"%(k,row[k])
elif k in float_features and row[k] is not "":
n_f += " %s:%s"%(k,row[k])
elif k in float_features and row[k] is "":
n_f += " %s_%s"%(k,row[k])
else:
n_r += " %s_%s"%(k,row[k])
if train:
label = y[linenr][y_nr-1]
if label == 1:
distribution += 1
else:
label = -1
else:
label = 1
id = row["id"]
outfile.write("%s '%s |h%s |b%s |p%s |f%s |r%s\n"%(label,id,n_h,n_b,n_p,n_f,n_r) )
if linenr % 100000 == 0:
print("%s\t%s"%(linenr,distribution))
print(distribution)
def relabel_vw(loc_vw, loc_out, loc_labels, y, y_i = 0):
print("Relabelling to dataset %s..."%loc_out)
start = datetime.now()
with open(loc_out,"wb") as outfile:
for e, line in enumerate( open( loc_vw, "rb") ):
if y[e][y_i-1] == 0:
new_id = -1
else:
new_id = 1
outfile.write( "%s %s\n"%(new_id," ".join(line.strip().split()[1:])) )
print("\ncompleted in :( %s\n"%(str(datetime.now()-start)))
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def to_kaggle(loc_preds, loc_best_sub, loc_out_sub, y_nr):
preds = {}
for e, line in enumerate( open(loc_preds,"rb") ):
preds[line.strip().split()[1]] = sigmoid(float(line.strip().split()[0]))
with open(loc_out_sub,"wb") as outfile:
for e, line in enumerate( open(loc_best_sub,"rb") ):
row = line.strip().split(",")
if e == 0:
outfile.write(line)
elif "y"+str(y_nr)+"," not in line:
outfile.write(line)
else:
outfile.write("%s,%s\n"%(row[0],preds[row[0].replace("_y"+str(y_nr),"")]))
print("Finished writing Kaggle submission: %s"%loc_out_sub)
if __name__ == "__main__":
#Load labels, remove the id
#y = load_data(loc_labels)
#Create train set for label y33, and a test set with dummy labels
#to_vw(loc_train, loc_train_vw, y, y_nr=33, train=True)
#to_vw(loc_test, loc_test_vw, y, train=False)
#Train and test VW now
#Add the VW predictions to our best submission file
to_kaggle("preds_y33.p.txt", loc_best, loc_kaggle_submission, y_nr=33) | gpl-3.0 | -8,946,471,879,089,341,000 | 33.53125 | 339 | 0.527497 | false |
candidtim/vagrant-appindicator | vgapplet/machineindex.py | 1 | 4627 | # Copyright 2014, candidtim (https://github.com/candidtim)
#
# This file is part of Vagrant AppIndicator for Ubuntu.
#
# Vagrant AppIndicator for Ubuntu is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later version.
#
# Foobar is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with Foobar.
# If not, see <http://www.gnu.org/licenses/>.
'''
Parsers for Vagrant machine-index file
'''
import os
import json
from gi.repository import Gio as gio
__VAGRNAT_HOME_VAR = "VAGRANT_HOME"
__MACHINE_INDEX_PATH = "data/machine-index/index"
# module's public interface
class Machine(object):
def __init__(self, id, state, directory, name):
self.id = id
self.state = state
self.directory = directory
self.name = name
def isPoweroff(self):
return self.state == "poweroff"
def isRunning(self):
return self.state == "running"
def isSaved(self):
return self.state == "saved"
def __str__(self):
return "id=%s state=%s directory=%s name=%s" % \
(self.id, self.state, self.directory, self.name)
def __eq__(self, other):
return self.id == other.id
def _changed_state_since(self, other):
assert self == other
return self.state != other.state
class MachineIndexNotFoundError(Exception):
pass
def get_machineindex():
machineindex_path = _resolve_machineindex_path()
with open(machineindex_path, 'r') as machineindex_file:
return _parse_machineindex(machineindex_file)
def diff_machineindexes(new_index, old_index):
'''Returns tuple of 3 items:
(list of new machines, list of removed machines, list of machines that changed state)
'''
new_machines = [machine for machine in new_index if machine not in old_index]
removed_machines = [machine for machine in old_index if machine not in new_index]
changed_machines = [machine for machine in new_index
if machine in old_index and machine._changed_state_since(old_index[old_index.index(machine)])]
went_running = [machine for machine in changed_machines if machine.isRunning()]
return (new_machines, removed_machines, changed_machines)
active_monitors = {}
def subscribe(listener):
def on_machineindex_change(mon, f, o, event):
if event == gio.FileMonitorEvent.CHANGES_DONE_HINT:
listener(get_machineindex())
machineindex_path = _resolve_machineindex_path()
file_to_monitor = gio.File.new_for_path(machineindex_path)
monitor = file_to_monitor.monitor_file(gio.FileMonitorFlags.NONE, None)
handler_id = monitor.connect("changed", on_machineindex_change)
active_monitors[handler_id] = monitor
def unsubscribe_all():
global active_monitors
for handler_id in active_monitors:
monitor = active_monitors[handler_id]
monitor.disconnect(handler_id)
active_monitors = {}
# private implementation
def _resolve_machineindex_path():
vagrant_home = os.getenv(__VAGRNAT_HOME_VAR, "~/.vagrant.d")
machineindex_path = os.path.expanduser(os.path.join(vagrant_home, __MACHINE_INDEX_PATH))
if not os.path.isfile(machineindex_path):
raise MachineIndexNotFoundError(
"Vagrant machine index not found. Is Vagrant installed and at least one VM created?")
return machineindex_path
def _parse_machineindex(machineindex_file):
machineindex_json = json.load(machineindex_file)
version = machineindex_json["version"]
# currently, only one parser version is available:
parser = __MachineIndexParserV1()
return parser.parse(machineindex_json)
class __MachineIndexParser(object):
def parse(self, machineindex_json):
raise NotImplementedError()
class __MachineIndexParserV1(__MachineIndexParser):
def parse(self, machineindex_json):
machineindex = []
machines_json = machineindex_json["machines"]
for machine_id in machines_json:
machine_json = machines_json[machine_id]
machine = Machine(machine_id, machine_json["state"],
machine_json["vagrantfile_path"], machine_json["name"])
machineindex.append(machine)
return tuple(machineindex)
| gpl-3.0 | 2,921,425,436,328,671,700 | 33.022059 | 106 | 0.688999 | false |
sch3m4/intelmq | intelmq/bots/parsers/malwarepatrol/parser_dansguardian.py | 1 | 1085 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
from intelmq.lib import utils
from intelmq.lib.bot import Bot
from intelmq.lib.message import Event
class DansParserBot(Bot):
def process(self):
report = self.receive_message()
if report is None or not report.contains("raw"):
self.acknowledge_message()
return
raw_report = utils.base64_decode(report.value("raw"))
for row in raw_report.split('\n'):
row = row.strip()
if len(row) == 0 or row.startswith('#'):
continue
event = Event(report)
splitted_row = row.split()
columns = ["source.url"]
for key, value in zip(columns, splitted_row):
event.add(key, value, sanitize=True)
event.add('classification.type', u'malware')
event.add("raw", row, sanitize=True)
self.send_message(event)
self.acknowledge_message()
if __name__ == "__main__":
bot = DansParserBot(sys.argv[1])
bot.start()
| agpl-3.0 | 7,067,288,390,485,369,000 | 24.232558 | 61 | 0.570507 | false |
adherrling/destiny-gotg | model.py | 1 | 1400 | #!/usr/bin/python
import os
import json, sqlite3
import initdb
import builddb
import requests, zipfile
import shutil
import discordBot
APP_PATH = "/etc/destinygotg"
DBPATH = f"{APP_PATH}/guardians.db"
def check_db():
"""Check to see if a database exists"""
return os.path.isfile(os.environ['DBPATH'])
def init_db(engine):
"""Sets up the tables for the database"""
initdb.init_db(engine)
def check_manifest():
"""Check to see if manifest file exists"""
return os.path.isfile(os.environ['MANIFEST_CONTENT'])
def get_manifest():
"""Pulls the requested definitions into the manifest database"""
manifest_url = "http://www.bungie.net/Platform/Destiny2/Manifest/"
r = requests.get(manifest_url)
manifest = r.json()
mani_url = f"http://www.bungie.net/{manifest['Response']['mobileWorldContentPaths']['en']}"
#Download the file, write it to MANZIP
r = requests.get(mani_url)
with open(f"{APP_PATH}/MANZIP", "wb") as zipfile:
zipfile.write(r.content)
#Extract the file contents, and rename the extracted file
with zipfile.ZipFile(f"{APP_PATH}/MANZIP") as zipfile:
name = zipfile.namelist()
zipfile.extractall()
shutil.move(name[0], os.environ['MANIFEST_CONTENT'])
def build_db():
"""Main function to build the full database"""
builddb.build_db()
def run_discord(engine):
discordBot.run_bot(engine)
| mit | 8,286,232,855,663,104,000 | 29.434783 | 95 | 0.684286 | false |
RainbowRail/rainbowrail | rrail_pkg/app.py | 1 | 2105 | #!/usr/bin/env python
import bottle, socket
import yaml
import os, io, time
import build
# Site path
path = os.path.join(os.path.abspath(os.path.dirname(__file__)), '_site')
src_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), '_template')
# Method to set up site
def setup(path):
# Link site
# Static Files
# @bottle.route('/assets/<filepath:path>')
# def assets_static(filepath):
# return bottle.static_file(
# filepath,
# root = os.path.join(path, 'assets')
# )
# @bottle.route('/images/<filepath:path>')
# def assets_static(filepath):
# return bottle.static_file(
# filepath,
# root = os.path.join(path, 'images')
# )
# Main Page
@bottle.route('/')
@bottle.route('/<filepath:path>')
def index(filepath=None):
# if os.path.splitext(filepath)[1] in ['.html', '.css', '.js', '.png', '.pjg']:
path, file_name = os.path.split(filepath if filepath else '')
if not file_name:
file_name = 'index.html'
path = os.path.join(os.path.curdir, '_site', path)
return bottle.static_file(
file_name,
root = path
)
return bottle.static_file(
file_name,
root = os.path.join('_site', path)
)
def run(path, hostname, portno):
# Magic to ensure if run as sudo, we build as no-sudo
uid = os.getuid()
if uid == 0:
os.seteuid(int(os.environ['SUDO_UID']));
setup(path)
os.seteuid(0);
else:
setup(path)
# Run the app
try:
bottle.run(host=hostname, port=portno)
except socket.error as e:
print '\033[91m\r', 'Unable to start server!', '\033[0m'
if e.errno == 98:
print '\033[91m\r', 'Server already running on port', portno, '\033[0m'
if e.errno == 13:
print '\033[91m\r', 'You need root permissions to run on port', portno ,'\033[0m'
else:
print e.errno
if __name__ == "__main__":
run('./site', 'localhost', 80)
| mit | 9,113,480,922,288,468,000 | 28.236111 | 93 | 0.545843 | false |
La0/mozilla-relengapi | lib/please_cli/please_cli/create.py | 1 | 2271 | # -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import click
import cookiecutter.main
import please_cli.config
import please_cli.utils
CMD_HELP = '''
Create a new PROJECT from a TEMPLATE
\b
TEMPLATES:
{templates}
'''.format(
templates=''.join([' - ' + i + '\n' for i in please_cli.config.TEMPLATES]),
)
@click.command(
cls=please_cli.utils.ClickCustomCommand,
short_help='Create PROJECT initial structure.',
epilog='Happy hacking!',
help=CMD_HELP,
)
@click.argument(
'template',
required=True,
type=click.Choice(please_cli.config.TEMPLATES),
)
@click.argument(
'project',
required=True,
type=str,
)
@click.pass_context
def cmd(ctx, template, project):
'''
'''
template_dir = os.path.join(please_cli.config.ROOT_DIR, 'nix', 'templates', template)
if not os.path.isdir(template_dir):
raise
project_dir = os.path.join(please_cli.config.SRC_DIR, project)
if os.path.isdir(project_dir):
raise
template_options = dict(project=project)
template_options['project_path'] = project.replace('-', '_')
template_options['project_url'] = 'TODO'
if project.startswith('releng-'):
template_options['project_url'] = f"{project[len('releng-'):]}.mozilla-releng.net"
if project.startswith('shipit-'):
template_options['project_url'] = f"{project[len('shipit-'):]}.shipit.mozilla-releng.net"
click.echo('=> Creating project structure ...')
cookiecutter.main.cookiecutter(
template_dir,
no_input=True,
extra_context=template_options,
output_dir=please_cli.config.SRC_DIR,
)
click.secho(f'\nProject `{project}` created sucessfully!', fg='green', bold=True)
click.echo('\nCode is located at:')
click.echo(f' src/{project}')
click.echo('\nTo enter development environemnt run:')
click.echo(f' ./please shell {project}')
click.echo(f'\nTo read more about `{template}` template visit:')
click.echo(f' https://docs.mozilla-releng.net/develop/template-{template}.html')
click.echo('')
| mpl-2.0 | 2,255,020,309,636,201 | 28.493506 | 97 | 0.654337 | false |
BrentDorsey/pipeline | jupyterhub.ml/config/jupyterhub/jupyterhub_config.py | 1 | 16882 | import os
# Configuration file for jupyterhub.
#------------------------------------------------------------------------------
# Configurable configuration
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# LoggingConfigurable configuration
#------------------------------------------------------------------------------
# A parent class for Configurables that log.
#
# Subclasses have a log trait, and the default behavior is to get the logger
# from the currently running Application.
#------------------------------------------------------------------------------
# SingletonConfigurable configuration
#------------------------------------------------------------------------------
# A configurable that only allows one instance.
#
# This class is for classes that should only have one instance of itself or
# *any* subclass. To create and retrieve such a class use the
# :meth:`SingletonConfigurable.instance` method.
#------------------------------------------------------------------------------
# Application configuration
#------------------------------------------------------------------------------
# This is an application.
# The date format used by logging formatters for %(asctime)s
# c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
# The Logging format template
# c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Set the log level by value or name.
# c.Application.log_level = 30
#------------------------------------------------------------------------------
# JupyterHub configuration
#------------------------------------------------------------------------------
# An Application for starting a Multi-User Jupyter Notebook server.
# Grant admin users permission to access single-user servers.
#
# Users should be properly informed if this is enabled.
c.JupyterHub.admin_access = True
# DEPRECATED, use Authenticator.admin_users instead.
# c.JupyterHub.admin_users = set()
# Answer yes to any questions (e.g. confirm overwrite)
c.JupyterHub.answer_yes = True
# Dict of token:username to be loaded into the database.
#
# Allows ahead-of-time generation of API tokens for use by services.
# c.JupyterHub.api_tokens = {}
# Class for authenticating users.
#
# This should be a class with the following form:
#
# - constructor takes one kwarg: `config`, the IPython config object.
#
# - is a tornado.gen.coroutine
# - returns username on success, None on failure
# - takes two arguments: (handler, data),
# where `handler` is the calling web.RequestHandler,
# and `data` is the POST form data from the login page.
#c.JupyterHub.authenticator_class = 'jupyterhub.auth.PAMAuthenticator'
c.JupyterHub.authenticator_class = os.environ['JUPYTERHUB_AUTHENTICATOR']
c.Auth0OAuthenticator.oauth_callback_url = os.environ['OAUTH_CALLBACK_URL']
c.Auth0OAuthenticator.client_id = os.environ['OAUTH_CLIENT_ID']
c.Auth0OAuthenticator.client_secret = os.environ['OAUTH_CLIENT_SECRET']
# The base URL of the entire application
c.JupyterHub.base_url = '/'
# Whether to shutdown the proxy when the Hub shuts down.
#
# Disable if you want to be able to teardown the Hub while leaving the proxy
# running.
#
# Only valid if the proxy was starting by the Hub process.
#
# If both this and cleanup_servers are False, sending SIGINT to the Hub will
# only shutdown the Hub, leaving everything else running.
#
# The Hub should be able to resume from database state.
c.JupyterHub.cleanup_proxy = True
# Whether to shutdown single-user servers when the Hub shuts down.
#
# Disable if you want to be able to teardown the Hub while leaving the single-
# user servers running.
#
# If both this and cleanup_proxy are False, sending SIGINT to the Hub will only
# shutdown the Hub, leaving everything else running.
#
# If both this and cleanup_proxy are False, sending SIGINT to the Hub will only
# shutdown the Hub, leaving everything else running.
#
# The Hub should be able to resume from database state.
c.JupyterHub.cleanup_servers = True
# The config file to load
# c.JupyterHub.config_file = '/root/config/jupyter/jupyterhub_config.py'
# Confirm that JupyterHub should be run without SSL. This is **NOT RECOMMENDED**
# unless SSL termination is being handled by another layer.
c.JupyterHub.confirm_no_ssl = True
# Number of days for a login cookie to be valid. Default is two weeks.
# c.JupyterHub.cookie_max_age_days = 14
# The cookie secret to use to encrypt cookies.
#
# Loaded from the JPY_COOKIE_SECRET env variable by default.
# c.JupyterHub.cookie_secret = b''
# File in which to store the cookie secret.
# c.JupyterHub.cookie_secret_file = '/root/pipeline/work/jupyterhub/jupyterhub_cookie_secret'
# The location of jupyterhub data files (e.g. /usr/local/share/jupyter/hub)
# c.JupyterHub.data_files_path = '/root/pipeline/work/jupyterhub'
# Include any kwargs to pass to the database connection. See
# sqlalchemy.create_engine for details.
# c.JupyterHub.db_kwargs = {}
# url for the database. e.g. `sqlite:///jupyterhub.sqlite`
#c.JupyterHub.db_url = 'sqlite:////root/jupyterhub.sqlite'
# log all database transactions. This has A LOT of output
# c.JupyterHub.debug_db = False
# show debug output in configurable-http-proxy
# c.JupyterHub.debug_proxy = False
# Send JupyterHub's logs to this file.
#
# This will *only* include the logs of the Hub itself, not the logs of the proxy
# or any single-user servers.
c.JupyterHub.extra_log_file = '/root/logs/jupyterhub.log'
# Extra log handlers to set on JupyterHub logger
# c.JupyterHub.extra_log_handlers = []
# Generate default config file
# #c.JupyterHub.generate_config = False
# The ip for this process
c.JupyterHub.hub_ip = '0.0.0.0'
# The port for this process
# c.JupyterHub.hub_port = 3081
# The prefix for the hub server. Must not be '/'
# c.JupyterHub.hub_prefix = '/hub/'
# The public facing ip of the whole application (the proxy)
c.JupyterHub.ip = '0.0.0.0'
# Supply extra arguments that will be passed to Jinja environment.
# c.JupyterHub.jinja_environment_options = {}
# Interval (in seconds) at which to update last-activity timestamps.
# c.JupyterHub.last_activity_interval = 300
# Specify path to a logo image to override the Jupyter logo in the banner.
c.JupyterHub.logo_file = 'img/logo/pipelineai-split-black-flux-128x128.png'
# File to write PID Useful for daemonizing jupyterhub.
# c.JupyterHub.pid_file = ''
# The public facing port of the proxy
c.JupyterHub.port = 8754
# The ip for the proxy API handlers
c.JupyterHub.proxy_api_ip = '0.0.0.0'
c.Session.debug = True
# The port for the proxy API handlers
# c.JupyterHub.proxy_api_port = 0
# The Proxy Auth token.
#
# Loaded from the CONFIGPROXY_AUTH_TOKEN env variable by default.
# c.JupyterHub.proxy_auth_token = ''
# Interval (in seconds) at which to check if the proxy is running.
# c.JupyterHub.proxy_check_interval = 30
# The command to start the http proxy.
#
# Only override if configurable-http-proxy is not on your PATH
# c.JupyterHub.proxy_cmd = ['configurable-http-proxy']
# Purge and reset the database.
# c.JupyterHub.reset_db = False
# The class to use for spawning single-user servers.
#
# Should be a subclass of Spawner.
#c.JupyterHub.spawner_class = 'jupyterhub.spawner.LocalProcessSpawner'
#c.JupyterHub.spawner_class = 'dockerspawner.DockerSpawner'
c.JupyterHub.spawner_class = 'simplespawner.SimpleLocalProcessSpawner'
c.SimpleLocalProcessSpawner.home_path_template = '/root/'
#c.JupyterHub.spawner_class = 'kubespawner.KubeSpawner'
# Don't try to cleanup servers on exit - since in general for k8s, we want
# the hub to be able to restart without losing user containers
#c.JupyterHub.cleanup_servers = False
# First pulls can be really slow, so let's give it a big timeout
#c.KubeSpawner.start_timeout = 60 * 5
# Our simplest user image! Optimized to just... start, and be small!
#c.KubeSpawner.singleuser_image_spec = 'yuvipanda/simple-singleuser:v1'
# The spawned containers need to be able to talk to the hub through the proxy!
#c.KubeSpawner.hub_connect_ip = os.environ['JUPYTERHUB_SERVICE_HOST']
#c.KubeSpawner.hub_connect_port = os.environ['JUPYTERHUB_SERVICE_PORT_JUPYTERHUB_API_PROXY']
#c.KubeSpawner.mem_limit = '2G'
#c.KubeSpawner.cpu_limit = 1
# Spawn user containers from this image
#c.DockerSpawner.container_image = 'jupyter/pyspark-notebook'
# Have the Spawner override the Docker run command
#c.DockerSpawner.extra_create_kwargs.update({
# 'command': '/usr/local/bin/start-singleuser.sh'
#})
# Path to SSL certificate file for the public facing interface of the proxy
#
# Use with ssl_key
# c.JupyterHub.ssl_cert = ''
# Path to SSL key file for the public facing interface of the proxy
#
# Use with ssl_cert
# c.JupyterHub.ssl_key = ''
# Host to send statds metrics to
# c.JupyterHub.statsd_host = ''
# Port on which to send statsd metrics about the hub
# c.JupyterHub.statsd_port = 8125
# Prefix to use for all metrics sent by jupyterhub to statsd
# c.JupyterHub.statsd_prefix = 'jupyterhub'
# Run single-user servers on subdomains of this host.
#
# This should be the full https://hub.domain.tld[:port]
#
# Provides additional cross-site protections for javascript served by single-
# user servers.
#
# Requires <username>.hub.domain.tld to resolve to the same host as
# hub.domain.tld.
#
# In general, this is most easily achieved with wildcard DNS.
#
# When using SSL (i.e. always) this also requires a wildcard SSL certificate.
# c.JupyterHub.subdomain_host = ''
# Paths to search for jinja templates.
# c.JupyterHub.template_paths = []
# Extra settings overrides to pass to the tornado application.
# c.JupyterHub.tornado_settings = {}
#------------------------------------------------------------------------------
# Spawner configuration
#------------------------------------------------------------------------------
# Base class for spawning single-user notebook servers.
#
# Subclass this, and override the following methods:
#
# - load_state - get_state - start - stop - poll
# Extra arguments to be passed to the single-user server
c.Spawner.args = ['--allow-root']
# The command used for starting notebooks.
#c.Spawner.cmd = ['jupyterhub-singleuser']
#c.Spawner.cmd = ['jupyter labhub']
# Enable debug-logging of the single-user server
c.Spawner.debug = True
# The default URL for the single-user server.
#
# Can be used in conjunction with --notebook-dir=/ to enable full filesystem
# traversal, while preserving user's homedir as landing page for notebook
#
# `%U` will be expanded to the user's username
c.Spawner.default_url = '/lab'
# Disable per-user configuration of single-user servers.
#
# This prevents any config in users' $HOME directories from having an effect on
# their server.
c.Spawner.disable_user_config = True
# Whitelist of environment variables for the subprocess to inherit
c.Spawner.env_keep = ['CUDA_PKG_VERSION', 'CUDA_VERSION', 'CUDNN_VERSION', 'HADOOP_CONF', 'HADOOP_HDFS_HOME', 'HADOOP_CONF_DIR', 'HADOOP_HOME', 'HADOOP_OPTS', 'HADOOP_VERSION', 'HOME', 'HOSTNAME', 'JAVA_HOME', 'LD_LIBRARY_PATH', 'LIBRARY_PATH', 'PATH', 'PYSPARK_VERSION', 'PYTHONPATH', 'CONDA_ROOT', 'CONDA_DEFAULT_ENV', 'VIRTUAL_ENV', 'LANG', 'LC_ALL', 'SPARK_HOME', 'SPARK_VERSION', 'TENSORFLOW_VERSION', 'PYSPARK_PYTHON', 'SPARK_MASTER', 'PYSPARK_SUBMIT_ARGS', 'SPARK_SUBMIT_ARGS', 'TF_CPP_MIN_LOG_LEVEL', 'TF_XLA_FLAGS', 'TENSORFLOW_HOME', 'TENSORFLOW_SERVING_HOME', 'CLASSPATH',]
# Environment variables to load for the Spawner.
#
# Value could be a string or a callable. If it is a callable, it will be called
# with one parameter, which will be the instance of the spawner in use. It
# should quickly (without doing much blocking operations) return a string that
# will be used as the value for the environment variable.
# c.Spawner.environment = {}
# Timeout (in seconds) before giving up on a spawned HTTP server
#
# Once a server has successfully been spawned, this is the amount of time we
# wait before assuming that the server is unable to accept connections.
# c.Spawner.http_timeout = 30
# The IP address (or hostname) the single-user server should listen on
c.Spawner.ip = '0.0.0.0'
# The notebook directory for the single-user server
#
# `~` will be expanded to the user's home directory `%U` will be expanded to the
# user's username
c.Spawner.notebook_dir = 'notebooks'
# An HTML form for options a user can specify on launching their server. The
# surrounding `<form>` element and the submit button are already provided.
#
# For example:
# <br>
# Choose a letter:
# <select name="letter" multiple="true">
# <option value="A">The letter A</option>
# <option value="B">The letter B</option>
# </select>
# c.Spawner.options_form = ''
# Interval (in seconds) on which to poll the spawner.
# c.Spawner.poll_interval = 30
# Timeout (in seconds) before giving up on the spawner.
#
# This is the timeout for start to return, not the timeout for the server to
# respond. Callers of spawner.start will assume that startup has failed if it
# takes longer than this. start should return when the server process is started
# and its location is known.
# c.Spawner.start_timeout = 60
#------------------------------------------------------------------------------
# LocalProcessSpawner configuration
#------------------------------------------------------------------------------
# A Spawner that just uses Popen to start local processes as users.
#
# Requires users to exist on the local system.
#
# This is the default spawner for JupyterHub.
# Seconds to wait for process to halt after SIGINT before proceeding to SIGTERM
# c.LocalProcessSpawner.INTERRUPT_TIMEOUT = 10
# Seconds to wait for process to halt after SIGKILL before giving up
# c.LocalProcessSpawner.KILL_TIMEOUT = 5
# Seconds to wait for process to halt after SIGTERM before proceeding to SIGKILL
# c.LocalProcessSpawner.TERM_TIMEOUT = 5
#------------------------------------------------------------------------------
# Authenticator configuration
#------------------------------------------------------------------------------
# A class for authentication.
#
# The primary API is one method, `authenticate`, a tornado coroutine for
# authenticating users.
# set of usernames of admin users
#
# If unspecified, only the user that launches the server will be admin.
#c.Authenticator.admin_users = {"root"}
# Dictionary mapping authenticator usernames to JupyterHub users.
#
# Can be used to map OAuth service names to local users, for instance.
#
# Used in normalize_username.
# c.Authenticator.username_map = {}
# Regular expression pattern for validating usernames.
#
# If not defined: allow any username.
# c.Authenticator.username_pattern = ''
# Username whitelist.
#
# Use this to restrict which users can login. If empty, allow any user to
# attempt login.
#c.Authenticator.whitelist = set("")
#------------------------------------------------------------------------------
# LocalAuthenticator configuration
#------------------------------------------------------------------------------
# Base class for Authenticators that work with local Linux/UNIX users
#
# Checks for local users, and can attempt to create them if they exist.
# The command to use for creating users as a list of strings.
#
# For each element in the list, the string USERNAME will be replaced with the
# user's username. The username will also be appended as the final argument.
#
# For Linux, the default value is:
#
# ['adduser', '-q', '--gecos', '""', '--disabled-password']
#
# To specify a custom home directory, set this to:
#
# ['adduser', '-q', '--gecos', '""', '--home', '/customhome/USERNAME',
# '--disabled-password']
#
# This will run the command:
#
# adduser -q --gecos "" --home /customhome/river --disabled-password river
#
# when the user 'river' is created.
#c.LocalAuthenticator.add_user_cmd = []
# If a user is added that doesn't exist on the system, should I try to create
# the system user?
c.LocalAuthenticator.create_system_users = False
# Automatically whitelist anyone in this group.
#c.LocalAuthenticator.group_whitelist = set("root")
#------------------------------------------------------------------------------
# PAMAuthenticator configuration
#------------------------------------------------------------------------------
# Authenticate local Linux/UNIX users with PAM
# The encoding to use for PAM
# c.PAMAuthenticator.encoding = 'utf8'
# Whether to open PAM sessions when spawners are started.
#
# This may trigger things like mounting shared filsystems, loading credentials,
# etc. depending on system configuration, but it does not always work.
#
# It can be disabled with::
#
# c.PAMAuthenticator.open_sessions = False
# The PAM service to use for authentication.
# c.PAMAuthenticator.service = 'login'
#MappingKernelManager.cull_idle_timeout = 300
| apache-2.0 | 8,332,629,188,906,026,000 | 35.227468 | 584 | 0.674387 | false |
ingadhoc/odoo-saas-manager | addons/saas_manager/instance_additional.py | 1 | 1560 | # -*- coding: utf-8 -*-
##############################################################################
#
# Saas Manager
# Copyright (C) 2014 Sistemas ADHOC
# No email
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re
from openerp import netsvc
from openerp.osv import osv, fields
class instance_additional(osv.osv):
""""""
_name = 'saas_manager.instance_additional'
_description = 'instance_additional'
_columns = {
'additional_product_id': fields.many2one('product.product', string='Sub Product', required=True),
'instance_id': fields.many2one('saas_manager.instance', string='instance_id', ondelete='cascade', required=True),
}
_defaults = {
}
_constraints = [
]
instance_additional()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -7,807,300,565,723,389,000 | 29.588235 | 122 | 0.621154 | false |
sgnn7/sgfc | sgfc_drone.py | 1 | 2957 | #!/usr/bin/env python2
import time
from functools import partial
import sgfc_communication
import sgfc_io
from sgfc_communication.protobufs import sgfc_pb2 as fc_proto
class SgfcDrone(object):
def __init__(self):
self._throttle = 0.0
self._yaw = 0.0
self._pitch = 0.0
def comm_callback(self, io_dev, data):
message = fc_proto.FlightMessage()
message.ParseFromString(data)
commands = message.payload
for command in commands:
if command.type == fc_proto.FLIGHT_CONTROL_COMMAND:
fc_command = command.flight_control_command
if fc_command.throttle:
throttle = fc_command.throttle
if (throttle != self._throttle):
print("Throttle: %s" % throttle)
self._throttle = throttle
io_dev.set_all_pwm(throttle)
else:
print("WARN: Redundant throttle msg!")
if fc_command.yaw:
yaw = fc_command.yaw
if (yaw != self._yaw):
print("Yaw: %s" % yaw)
self._yaw = yaw
# Do something different here
io_dev.set_all_pwm(yaw)
else:
print("WARN: Redundant yaw msg!")
if fc_command.pitch:
pitch = fc_command.pitch
if (pitch != self._pitch):
print("Pitch: %s" % pitch)
self._pitch = pitch
# Do something different here
io_dev.set_all_pwm(pitch)
else:
print("WARN: Redundant pitch msg!")
def comm_error_callback(self, error):
print(error)
def open_comms(self):
comm_device = None
io_dev = None
try:
io_dev = sgfc_io.get_device('pic18f45k50', sgfc_io.I2C)
io_dev.set_all_pwm(0.0)
time.sleep(0.2)
io_dev.set_all_pwm(1.0)
time.sleep(1)
io_dev.set_all_pwm(0.1)
io_dev.set_all_pwm_clamp(lower=0.5)
comm_device = sgfc_communication.get_device('zigbee_xbee',
'\x00\x02',
partial(self.comm_callback, io_dev),
self.comm_error_callback,
device='/dev/ttyUSB0')
except KeyboardInterrupt as kbi:
if comm_device:
comm_device.close()
if io_dev:
io_dev.set_all_pwm_clamp(lower=0.0)
io_dev.set_all_pwm(0.0)
io_dev.close()
if __name__ == '__main__':
SgfcDrone().open_comms()
| lgpl-2.1 | 1,260,848,690,594,583,000 | 32.602273 | 92 | 0.448766 | false |
neuront/eirx | src/eirx/parse.py | 1 | 3057 | import re
WHITE_RGBA = (255, 255, 255)
BLACK_RGBA = (0, 0, 0)
def _get_size(size):
for m in _SIZE_RE:
match = m[0].match(size)
if match:
return m[1](match.groupdict())
raise ValueError('Invalid size')
def _get_RGBA(opt, index):
if len(opt) > index + 6:
return tuple(int(opt[i * 2 + 3: i * 2 + 5], 16) for i in xrange(3))
raise ValueError('Invalid color format, not xRRGGBB')
def _get_options(opt):
def not_keep_aspect_ratio(opt_result, opt_string, index):
opt_result['size_adj'] = False
return 0
def crop(opt_result, opt_string, index):
opt_result['crop'] = True
return 0
def top_crop(opt_result, opt_string, index):
opt_result['top_crop'] = True
return 0
def frame(opt_result, opt_string, index):
opt_result['frame'] = True
return 0
def window(opt_result, opt_string, index):
opt_result['window'] = True
return 0
def fill_color(opt_result, opt_string, index):
if opt_string[index] == 'w':
opt_result['bgc'] = WHITE_RGBA
return 1
elif opt_string[index] == 'b':
opt_result['bgc'] = BLACK_RGBA
return 1
elif opt_string[index] == 'x':
opt_result['bgc'] = _get_RGBA(opt_string, index)
return 7
raise ValueError('Invalid color format')
def addfilter(opt_result, opt_string, index):
opt_result['filters'].append(opt_string[index: index + 4])
return 4
opt_result = dict(filters=[])
opt_map = dict(
a=not_keep_aspect_ratio,
c=crop,
t=top_crop,
f=frame,
w=window,
F=fill_color,
x=addfilter,
)
i = 0
while i < len(opt):
try:
i += opt_map[opt[i]](opt_result, opt, i + 1)
except LookupError:
raise ValueError('Invalid option')
i += 1
return opt_result
def parse(mode):
parts = mode.split('-')
args = _get_size(parts[0])
if 1 < len(parts):
for opt, value in _get_options(parts[1]).iteritems():
args[opt] = value
return args
def _one_dim(dim, size):
if dim in ['h', 'w', 'wma', 'hma']:
return {dim: int(size), 'size_adj': True}
raise ValueError('Invalid dimension: ' + dim)
def _two_dim(dim_a, size_a, dim_b, size_b):
if dim_a[0] == dim_b[0]:
raise ValueError('Dimension duplicated')
if dim_a in ['h', 'w', 'wma', 'hma'] and dim_b in ['h', 'w', 'wma', 'hma']:
return {dim_a: int(size_a), dim_b: int(size_b)}
raise ValueError('Invalid dimension: {}/{}'.format(dim_a, dim_b))
_SIZE_RE = (
(re.compile('^(?P<dim_a>[a-z]+)(?P<size_a>[0-9]+)(?P<dim_b>[a-z]+)' +
'(?P<size_b>[0-9]+)$'), lambda d: _two_dim(**d)),
(re.compile('^(?P<dim>[a-z]+)(?P<size>[0-9]+)$'),
lambda d: _one_dim(**d)),
(re.compile('^(?P<s>[0-9]+)$'),
lambda d: dict(w=int(d['s']), h=int(d['s']), crop=True)),
(re.compile('^o$'), lambda _: dict()),
)
| mit | -3,878,455,098,277,635,600 | 27.305556 | 79 | 0.527314 | false |
vpal/codekata04 | tests/tests.py | 1 | 1948 | import unittest
from StringIO import StringIO
from codekata04.kata04_part_three import (
Kata04WeatherTable,
Kata04FootballTable
)
class Kata04Tests(unittest.TestCase):
def common_test_record_creation(self):
for test_item in self.test_items:
ds = StringIO(test_item[0])
t = self.target_class(ds)
self.assertEqual(len(t.records), 1)
self.assertEqual(t.records[0][0], test_item[1][0])
self.assertEqual(t.records[0][1], test_item[1][1])
self.assertEqual(t.records[0][2], test_item[1][2])
def common_test_min_diff(self):
ds = StringIO('\n'.join([test_item[0] for test_item in self.test_items]))
t = self.target_class(ds)
self.assertEqual(t.min_diff()[0], self.min_value)
class TestKata04WeatherTable(Kata04Tests):
target_class = Kata04WeatherTable
test_items = [
(' 1 88 59 74 53.8 0.00 F 280 9.6 270 17 1.6 93 23 1004.5', (1, 88, 59)),
(' 9 86 32* 59 6 61.5 0.00 240 7.6 220 12 6.0 78 46 1018.6', (9, 86, 32))
]
min_value = 1
def test_record_creation(self):
super(TestKata04WeatherTable, self).common_test_record_creation()
def test_min_diff(self):
super(TestKata04WeatherTable, self).common_test_min_diff()
class TestKata04FootballTable(Kata04Tests):
target_class = Kata04FootballTable
test_items = [
(' 6. Chelsea 38 17 13 8 66 - 38 64', ('Chelsea', 66, 38)),
(' 12. Middlesbrough 38 12 9 17 35 - 47 45', ('Middlesbrough', 35, 47))
]
min_value = 'Middlesbrough'
def test_record_creation(self):
super(TestKata04FootballTable, self).common_test_record_creation()
def test_min_diff(self):
super(TestKata04FootballTable, self).common_test_min_diff()
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -1,935,647,020,709,956,900 | 33.175439 | 115 | 0.592916 | false |
sloria/osf.io | api_tests/requests/views/test_request_actions_create.py | 1 | 22223 | import mock
import pytest
from api.base.settings.defaults import API_BASE
from api_tests.requests.mixins import NodeRequestTestMixin, PreprintRequestTestMixin
from osf.utils import permissions
@pytest.mark.django_db
@pytest.mark.enable_enqueue
@pytest.mark.enable_quickfiles_creation
class TestCreateNodeRequestAction(NodeRequestTestMixin):
@pytest.fixture()
def url(self, node_request):
return '/{}actions/requests/nodes/'.format(API_BASE)
def create_payload(self, _id=None, **attrs):
payload = {
'data': {
'attributes': attrs,
'relationships': {},
'type': 'node-request-actions'
}
}
if _id:
payload['data']['relationships']['target'] = {
'data': {
'type': 'node-requests',
'id': _id
}
}
return payload
def test_requester_cannot_view(self, app, requester, url):
res = app.get(url, auth=requester.auth, expect_errors=True)
assert res.status_code == 405
def test_requester_cannot_approve(self, app, requester, url, node_request):
initial_state = node_request.machine_state
payload = self.create_payload(node_request._id, trigger='accept')
res = app.post_json_api(url, payload, auth=requester.auth, expect_errors=True)
assert res.status_code == 403
node_request.reload()
assert initial_state == node_request.machine_state
def test_requester_cannot_reject(self, app, requester, url, node_request):
initial_state = node_request.machine_state
payload = self.create_payload(node_request._id, trigger='reject')
res = app.post_json_api(url, payload, auth=requester.auth, expect_errors=True)
assert res.status_code == 403
node_request.reload()
assert initial_state == node_request.machine_state
def test_requester_can_edit_comment(self, app, requester, url, node_request):
initial_state = node_request.machine_state
initial_comment = node_request.comment
payload = self.create_payload(node_request._id, trigger='edit_comment', comment='ASDFG')
res = app.post_json_api(url, payload, auth=requester.auth)
assert res.status_code == 201
node_request.reload()
assert initial_state == node_request.machine_state
assert initial_comment != node_request.comment
def test_admin_can_approve(self, app, admin, url, node_request):
initial_state = node_request.machine_state
assert node_request.creator not in node_request.target.contributors
payload = self.create_payload(node_request._id, trigger='accept')
res = app.post_json_api(url, payload, auth=admin.auth)
assert res.status_code == 201
node_request.reload()
assert initial_state != node_request.machine_state
assert node_request.creator in node_request.target.contributors
def test_admin_can_reject(self, app, admin, url, node_request):
initial_state = node_request.machine_state
assert node_request.creator not in node_request.target.contributors
payload = self.create_payload(node_request._id, trigger='reject')
res = app.post_json_api(url, payload, auth=admin.auth)
assert res.status_code == 201
node_request.reload()
assert initial_state != node_request.machine_state
assert node_request.creator not in node_request.target.contributors
def test_admin_cannot_view(self, app, admin, url):
res = app.get(url, auth=admin.auth, expect_errors=True)
assert res.status_code == 405
def test_admin_cannot_edit_comment(self, app, admin, url, node_request):
initial_state = node_request.machine_state
initial_comment = node_request.comment
payload = self.create_payload(node_request._id, trigger='edit_comment', comment='ASDFG')
res = app.post_json_api(url, payload, auth=admin.auth, expect_errors=True)
assert res.status_code == 403
node_request.reload()
assert initial_state == node_request.machine_state
assert initial_comment == node_request.comment
def test_write_contrib_cannot_approve(self, app, write_contrib, url, node_request):
initial_state = node_request.machine_state
payload = self.create_payload(node_request._id, trigger='accept')
res = app.post_json_api(url, payload, auth=write_contrib.auth, expect_errors=True)
assert res.status_code == 403
node_request.reload()
assert initial_state == node_request.machine_state
def test_write_contrib_cannot_reject(self, app, write_contrib, url, node_request):
initial_state = node_request.machine_state
payload = self.create_payload(node_request._id, trigger='reject')
res = app.post_json_api(url, payload, auth=write_contrib.auth, expect_errors=True)
assert res.status_code == 403
node_request.reload()
assert initial_state == node_request.machine_state
def test_write_contrib_cannot_view(self, app, write_contrib, url):
res = app.get(url, auth=write_contrib.auth, expect_errors=True)
assert res.status_code == 405
def test_write_contrib_cannot_edit_comment(self, app, write_contrib, url, node_request):
initial_state = node_request.machine_state
initial_comment = node_request.comment
payload = self.create_payload(node_request._id, trigger='edit_comment', comment='ASDFG')
res = app.post_json_api(url, payload, auth=write_contrib.auth, expect_errors=True)
assert res.status_code == 403
node_request.reload()
assert initial_state == node_request.machine_state
assert initial_comment == node_request.comment
def test_noncontrib_cannot_approve(self, app, noncontrib, url, node_request):
initial_state = node_request.machine_state
payload = self.create_payload(node_request._id, trigger='accept')
res = app.post_json_api(url, payload, auth=noncontrib.auth, expect_errors=True)
assert res.status_code == 403
node_request.reload()
assert initial_state == node_request.machine_state
def test_noncontrib_cannot_reject(self, app, noncontrib, url, node_request):
initial_state = node_request.machine_state
payload = self.create_payload(node_request._id, trigger='reject')
res = app.post_json_api(url, payload, auth=noncontrib.auth, expect_errors=True)
assert res.status_code == 403
node_request.reload()
assert initial_state == node_request.machine_state
def test_noncontrib_cannot_view(self, app, noncontrib, url):
res = app.get(url, auth=noncontrib.auth, expect_errors=True)
assert res.status_code == 405
def test_noncontrib_cannot_edit_comment(self, app, noncontrib, url, node_request):
initial_state = node_request.machine_state
initial_comment = node_request.comment
payload = self.create_payload(node_request._id, trigger='edit_comment', comment='ASDFG')
res = app.post_json_api(url, payload, auth=noncontrib.auth, expect_errors=True)
assert res.status_code == 403
node_request.reload()
assert initial_state == node_request.machine_state
assert initial_comment == node_request.comment
def test_edits_fail_with_requests_disabled(self, app, requester, url, node_request):
initial_state = node_request.machine_state
initial_comment = node_request.comment
payload = self.create_payload(node_request._id, trigger='edit_comment', comment='ASDFG')
node_request.target.access_requests_enabled = False
node_request.target.save()
res = app.post_json_api(url, payload, auth=requester.auth, expect_errors=True)
assert res.status_code == 403
node_request.reload()
assert initial_state == node_request.machine_state
assert initial_comment == node_request.comment
def test_approves_fail_with_requests_disabled(self, app, admin, url, node_request):
initial_state = node_request.machine_state
assert node_request.creator not in node_request.target.contributors
payload = self.create_payload(node_request._id, trigger='accept')
node_request.target.access_requests_enabled = False
node_request.target.save()
res = app.post_json_api(url, payload, auth=admin.auth, expect_errors=True)
assert res.status_code == 403
node_request.reload()
assert initial_state == node_request.machine_state
assert node_request.creator not in node_request.target.contributors
def test_rejects_fail_with_requests_disabled(self, app, admin, url, node_request):
initial_state = node_request.machine_state
assert node_request.creator not in node_request.target.contributors
payload = self.create_payload(node_request._id, trigger='reject')
node_request.target.access_requests_enabled = False
node_request.target.save()
res = app.post_json_api(url, payload, auth=admin.auth, expect_errors=True)
assert res.status_code == 403
node_request.reload()
assert initial_state == node_request.machine_state
assert node_request.creator not in node_request.target.contributors
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_email_sent_on_approve(self, mock_mail, app, admin, url, node_request):
initial_state = node_request.machine_state
assert node_request.creator not in node_request.target.contributors
payload = self.create_payload(node_request._id, trigger='accept')
res = app.post_json_api(url, payload, auth=admin.auth)
assert res.status_code == 201
node_request.reload()
assert initial_state != node_request.machine_state
assert node_request.creator in node_request.target.contributors
assert mock_mail.call_count == 1
@mock.patch('website.mails.mails.send_mail')
def test_email_sent_on_reject(self, mock_mail, app, admin, url, node_request):
initial_state = node_request.machine_state
assert node_request.creator not in node_request.target.contributors
payload = self.create_payload(node_request._id, trigger='reject')
res = app.post_json_api(url, payload, auth=admin.auth)
assert res.status_code == 201
node_request.reload()
assert initial_state != node_request.machine_state
assert node_request.creator not in node_request.target.contributors
assert mock_mail.call_count == 1
@mock.patch('website.mails.mails.send_mail')
def test_email_not_sent_on_reject(self, mock_mail, app, requester, url, node_request):
initial_state = node_request.machine_state
initial_comment = node_request.comment
payload = self.create_payload(node_request._id, trigger='edit_comment', comment='ASDFG')
res = app.post_json_api(url, payload, auth=requester.auth)
assert res.status_code == 201
node_request.reload()
assert initial_state == node_request.machine_state
assert initial_comment != node_request.comment
assert mock_mail.call_count == 0
def test_set_permissions_on_approve(self, app, admin, url, node_request):
assert node_request.creator not in node_request.target.contributors
payload = self.create_payload(node_request._id, trigger='accept', permissions='admin')
res = app.post_json_api(url, payload, auth=admin.auth)
assert res.status_code == 201
node_request.reload()
assert node_request.target.has_permission(node_request.creator, permissions.ADMIN)
def test_set_visible_on_approve(self, app, admin, url, node_request):
assert node_request.creator not in node_request.target.contributors
payload = self.create_payload(node_request._id, trigger='accept', visible=False)
res = app.post_json_api(url, payload, auth=admin.auth)
assert res.status_code == 201
node_request.reload()
assert node_request.creator in node_request.target.contributors
assert not node_request.target.get_visible(node_request.creator)
def test_accept_request_defaults_to_read_and_visible(self, app, admin, url, node_request):
assert node_request.creator not in node_request.target.contributors
payload = self.create_payload(node_request._id, trigger='accept')
res = app.post_json_api(url, payload, auth=admin.auth)
assert res.status_code == 201
node_request.reload()
assert node_request.creator in node_request.target.contributors
assert node_request.target.has_permission(node_request.creator, permissions.READ)
assert node_request.target.get_visible(node_request.creator)
@pytest.mark.django_db
@pytest.mark.enable_quickfiles_creation
class TestCreatePreprintRequestAction(PreprintRequestTestMixin):
@pytest.fixture()
def url(self, pre_request, post_request, none_request):
return '/{}actions/requests/preprints/'.format(API_BASE)
def create_payload(self, _id=None, **attrs):
payload = {
'data': {
'attributes': attrs,
'relationships': {},
'type': 'preprint-request-actions'
}
}
if _id:
payload['data']['relationships']['target'] = {
'data': {
'type': 'preprint-requests',
'id': _id
}
}
return payload
def test_no_one_can_view(self, app, admin, write_contrib, noncontrib, moderator, url):
for user in [admin, write_contrib, noncontrib, moderator]:
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 405
def test_nonmoderator_cannot_approve(self, app, admin, write_contrib, noncontrib, url, pre_request, post_request, none_request):
for request in [pre_request, post_request, none_request]:
initial_state = request.machine_state
payload = self.create_payload(request._id, trigger='accept')
for user in [admin, write_contrib, noncontrib]:
res = app.post_json_api(url, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 403
request.reload()
assert initial_state == request.machine_state
def test_nonmoderator_cannot_reject(self, app, admin, write_contrib, noncontrib, url, pre_request, post_request, none_request):
for request in [pre_request, post_request, none_request]:
initial_state = request.machine_state
payload = self.create_payload(request._id, trigger='reject')
for user in [admin, write_contrib, noncontrib]:
res = app.post_json_api(url, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 403
request.reload()
assert initial_state == request.machine_state
def test_submitter_can_edit_comment(self, app, admin, url, pre_request, post_request, none_request):
for request in [pre_request, post_request, none_request]:
initial_state = request.machine_state
initial_comment = request.comment
payload = self.create_payload(request._id, trigger='edit_comment', comment='ASDFG')
res = app.post_json_api(url, payload, auth=admin.auth)
assert res.status_code == 201
request.reload()
assert initial_state == request.machine_state
assert initial_comment != request.comment
def test_moderator_can_approve_moderated_requests(self, app, moderator, url, pre_request, post_request):
for request in [pre_request, post_request]:
initial_state = request.machine_state
assert not request.target.is_retracted
payload = self.create_payload(request._id, trigger='accept')
res = app.post_json_api(url, payload, auth=moderator.auth)
assert res.status_code == 201
request.reload()
request.target.reload()
assert initial_state != request.machine_state
assert request.target.is_retracted
def test_moderator_cannot_approve_or_reject_or_edit_comment_nonmoderated_requests(self, app, moderator, url, none_request):
initial_state = none_request.machine_state
assert not none_request.target.is_retracted
payload = self.create_payload(none_request._id, trigger='accept')
res = app.post_json_api(url, payload, auth=moderator.auth, expect_errors=True)
assert res.status_code == 403
none_request.reload()
assert initial_state == none_request.machine_state
assert not none_request.target.is_retracted
payload = self.create_payload(none_request._id, trigger='reject')
res = app.post_json_api(url, payload, auth=moderator.auth, expect_errors=True)
assert res.status_code == 403
none_request.reload()
assert initial_state == none_request.machine_state
assert not none_request.target.is_retracted
initial_comment = none_request.comment
payload = self.create_payload(none_request._id, trigger='edit_comment', comment='ASDFG')
res = app.post_json_api(url, payload, auth=moderator.auth, expect_errors=True)
assert res.status_code == 403
none_request.reload()
assert initial_state == none_request.machine_state
assert initial_comment == none_request.comment
assert not none_request.target.is_retracted
def test_moderator_can_reject_moderated_requests(self, app, moderator, url, pre_request, post_request):
for request in [pre_request, post_request]:
initial_state = request.machine_state
assert not request.target.is_retracted
payload = self.create_payload(request._id, trigger='reject')
res = app.post_json_api(url, payload, auth=moderator.auth)
assert res.status_code == 201
request.reload()
assert initial_state != request.machine_state
assert not request.target.is_retracted
def test_moderator_cannot_edit_comment_moderated_requests(self, app, moderator, url, pre_request, post_request):
for request in [pre_request, post_request]:
initial_state = request.machine_state
initial_comment = request.comment
assert not request.target.is_retracted
payload = self.create_payload(request._id, trigger='edit_comment', comment='ASDFG')
res = app.post_json_api(url, payload, auth=moderator.auth, expect_errors=True)
assert res.status_code == 403
request.reload()
assert initial_state == request.machine_state
assert initial_comment == request.comment
def test_write_contrib_and_noncontrib_cannot_edit_comment(self, app, write_contrib, noncontrib, url, pre_request, post_request, none_request):
for request in [pre_request, post_request, none_request]:
for user in [noncontrib, write_contrib]:
initial_state = request.machine_state
initial_comment = request.comment
payload = self.create_payload(request._id, trigger='edit_comment', comment='{}ASDFG'.format(user._id))
res = app.post_json_api(url, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 403
request.reload()
assert initial_state == request.machine_state
assert initial_comment == request.comment
@pytest.mark.skip('TODO: IN-331 -- add emails')
@mock.patch('website.reviews.listeners.mails.send_mail')
def test_email_sent_on_approve(self, mock_mail, app, moderator, url, pre_request, post_request):
for request in [pre_request, post_request]:
initial_state = request.machine_state
assert not request.target.is_retracted
payload = self.create_payload(request._id, trigger='accept')
res = app.post_json_api(url, payload, auth=moderator.auth)
assert res.status_code == 201
request.reload()
assert initial_state != request.machine_state
assert request.target.is_retracted
assert mock_mail.call_count == 2
@pytest.mark.skip('TODO: IN-331 -- add emails')
@mock.patch('website.reviews.listeners.mails.send_mail')
def test_email_sent_on_reject(self, mock_mail, app, moderator, url, pre_request, post_request):
for request in [pre_request, post_request]:
initial_state = request.machine_state
assert not request.target.is_retracted
payload = self.create_payload(request._id, trigger='reject')
res = app.post_json_api(url, payload, auth=moderator.auth)
assert res.status_code == 201
request.reload()
assert initial_state != request.machine_state
assert not request.target.is_retracted
assert mock_mail.call_count == 2
@pytest.mark.skip('TODO: IN-284/331 -- add emails')
@mock.patch('website.reviews.listeners.mails.send_mail')
def test_email_not_sent_on_edit_comment(self, mock_mail, app, moderator, url, pre_request, post_request):
for request in [pre_request, post_request]:
initial_state = request.machine_state
assert not request.target.is_retracted
payload = self.create_payload(request._id, trigger='edit_comment', comment='ASDFG')
res = app.post_json_api(url, payload, auth=moderator.auth)
assert res.status_code == 201
request.reload()
assert initial_state != request.machine_state
assert not request.target.is_retracted
assert mock_mail.call_count == 0
def test_auto_approve(self, app, auto_withdrawable_pre_mod_preprint, auto_approved_pre_request):
assert auto_withdrawable_pre_mod_preprint.is_retracted
| apache-2.0 | 6,457,259,746,807,409,000 | 50.44213 | 146 | 0.657382 | false |
yrakcaz/music-style-classifier | tests/test.py | 1 | 2101 | #! env python2.7
import sys, subprocess
import json
def main():
argv = sys.argv
argc = len(argv)
if (argc != 2 or (argv[1] != "--svm" and argv[1] != "--knn")):
print("Use --knn or --svm option!")
sys.exit()
print("\033[34m\033[1m================================================\033[0m\033[0m")
print("\033[34m\033[1m TESTS Music Style Classifier \033[0m\033[0m")
print("\033[34m\033[1m================================================\033[0m\033[0m\n")
treat("training/Tracks/ground_truth.csv")
def parse_file(f):
content = []
with open(f) as f:
for l in f:
l = l.replace('\"', '').replace('\n', '')
name = ""
genre = ""
flag = 0
for c in l:
if c == ',':
flag = 1
elif flag == 0:
name += c
elif flag == 1:
genre += c
content.append([name, genre])
return content
def treat(f):
nbtests = 0
nbsucc = 0
for item in parse_file(f):
nbtests += 1
sub = subprocess.Popen(["sh", "-c", "python2.7 classify.py " + sys.argv[1] + " " + item[0]],
bufsize = 0, stdout = subprocess.PIPE, stdin = subprocess.PIPE)
out, err = sub.communicate()
val = out[:len(out) - 1]
success = False
if (val == item[1]):
success = True
print("\033[35mSong : \033[0m" + item[0])
print("\033[35mResult : \033[0m" + val)
print("\033[35mExpected : \033[0m" + item[1])
if (success):
nbsucc += 1
print("\033[32m\033[1m[SUCCESS]\033[0m\033[0m\n")
else:
print("\033[31m\033[1m[FAILURE]\033[0m\033[0m\n")
print("\033[33m\033[1mSuccess : " + str(nbsucc) + "/" + str(nbtests) + "\033[0m\033[0m")
percent = (float(nbsucc) * 100.00) / float(nbtests)
print("\033[33m\033[1m\t-> " + str(percent) + "%" + "\033[0m\033[0m")
main()
| gpl-2.0 | 6,798,761,934,914,217,000 | 34.610169 | 100 | 0.439315 | false |
dendyyangky/sgeede_b2b | sgeede_internal_transfer/wizard/wizard_stock_internal_transfer.py | 2 | 9955 | from openerp import netsvc
from openerp import models, fields, api
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from datetime import datetime
from openerp.osv import osv
import time
class wizard_stock_internal_transfer(models.TransientModel):
_name = 'wizard.stock.internal.transfer'
transfer_id = fields.Many2one('stock.internal.transfer', 'Transfer')
item_ids = fields.One2many('stock.internal.transfer.items', 'transfer_id', 'Items')
def default_get(self, cr, uid, fields, context=None):
if context is None: context = {}
res = super(wizard_stock_internal_transfer, self).default_get(cr, uid, fields, context=context)
transfer_ids = context.get('active_ids', [])
active_model = context.get('active_model')
if not transfer_ids or len(transfer_ids) != 1:
# Partial Picking Processing may only be done for one picking at a time
return res
assert active_model in ('stock.internal.transfer'), 'Bad context propagation'
transfer_id, = transfer_ids
transfers = self.pool.get('stock.internal.transfer').browse(cr, uid, transfer_id, context=context)
company_id = self.pool.get('res.users').browse(cr, uid, uid).company_id.id
company = self.pool.get('res.company').browse(cr, uid, company_id)
items = []
if not company.transit_location_id:
raise osv.except_osv(_('Error!'), _('Please setup your stock transit location in Setting - Internal Transfer Configuration'))
if transfers.state == 'draft':
source_location_id = transfers.source_warehouse_id.lot_stock_id.id
dest_location_id = company.transit_location_id.id
elif transfers.state == 'send':
source_location_id = company.transit_location_id.id
dest_location_id = transfers.dest_warehouse_id.lot_stock_id.id
for transfer in transfers.line_ids:
item = {
'product_id': transfer.product_id.id,
'product_uom_id': transfer.product_uom_id.id,
'product_qty': transfer.product_qty,
'source_location_id': source_location_id,
# 'transit_location_id': transit_location_id,
'dest_location_id' : dest_location_id,
}
if transfer.product_id:
items.append(item)
res.update(item_ids=items)
# raise osv.except_osv(_('Warning !'),_('adsfasdfasdf'))
return res
def button_confirm(self, cr, uid, ids, context):
for tf in self.browse(cr, uid, ids):
if 'active_ids' in context:
transfer = self.pool.get('stock.internal.transfer').browse(cr, uid, context.get('active_ids')[0])
if transfer.state == 'draft':
backorders = []
user_list = []
user_ids = transfer.source_warehouse_id.user_ids
if user_ids :
for user in user_ids :
user_list.append(user.id)
if uid not in user_list:
raise osv.except_osv(_('Warning !'),_('You are not authorized to send or receive products !'))
for line in tf.item_ids:
for trans in transfer.line_ids:
if line.product_id.id == trans.product_id.id:
if line.product_qty > trans.product_qty:
raise osv.except_osv(_('Error!'), _('You have exceed the available product quantity.'))
elif line.product_qty < trans.product_qty:
backorder = {
'product_id' : line.product_id.id,
'product_qty' : trans.product_qty - line.product_qty,
'product_uom_id' : line.product_uom_id.id,
'state' : 'draft',
}
backorders.append(backorder)
self.pool.get('stock.internal.transfer.line').write(cr, uid, trans.id, {
'product_qty' : line.product_qty
})
if backorders:
create_id = self.pool.get('stock.internal.transfer').create(cr, uid, {
'date' : time.strftime('%Y-%m-%d %H:%M:%S'),
'source_location_id' : transfer.source_location_id.id,
'dest_location_id' : transfer.dest_location_id.id,
'backorder_id' : context.get('active_ids')[0],
'state' : 'draft',
})
for backorder in backorders:
backorder['transfer_id'] = create_id
self.pool.get('stock.internal.transfer.line').create(cr, uid, backorder)
type_obj = self.pool.get('stock.picking.type')
type_ids = type_obj.search(cr, uid, [('default_location_src_id', '=', transfer.source_warehouse_id.lot_stock_id.id),
('code', '=', 'outgoing')])
if type_ids:
types = type_obj.browse(cr, uid, type_ids[0])
picking_obj = self.pool.get('stock.picking')
picking_id = picking_obj.create(cr, uid, {
'picking_type_id' : types.id,
'transfer_id' : context.get('active_ids')[0]
})
else:
raise osv.except_osv(_('Error!'), _('Unable to find source location in Stock Picking.'))
move_obj = self.pool.get('stock.move')
for line in tf.item_ids:
move_obj.create(cr,uid,{
'name' : 'Stock Internal Transfer',
'product_id' : line.product_id.id,
'product_uom' : line.product_uom_id.id,
'product_uom_qty' : line.product_qty,
'location_id' : line.source_location_id.id,
'location_dest_id' : line.dest_location_id.id,
'picking_id' : picking_id,
})
picking_obj = self.pool.get('stock.picking')
picking_obj.action_confirm(cr, uid, picking_id)
picking_obj.action_assign(cr, uid, picking_id)
picking_obj.do_internal_transfer_details(cr, uid, picking_id)
wkf_service = netsvc.LocalService('workflow')
wkf_service.trg_validate(uid, 'stock.internal.transfer', context.get('active_ids')[0], 'action_send', cr)
elif transfer.state == 'send':
backorders = []
user_list = []
user_ids = transfer.dest_warehouse_id.user_ids
if user_ids :
for user in user_ids :
user_list.append(user.id)
if uid not in user_list:
raise osv.except_osv(_('Warning !'),_('You are not authorized to send or receive products !'))
for line in tf.item_ids:
for trans in transfer.line_ids:
if line.product_id.id == trans.product_id.id:
if line.product_qty > trans.product_qty:
raise osv.except_osv(_('Error!'), _('You have exceed the available product quantity.'))
elif line.product_qty < trans.product_qty:
backorder = {
'product_id' : line.product_id.id,
'product_qty' : trans.product_qty - line.product_qty,
'product_uom_id' : line.product_uom_id.id,
'state' : 'draft',
}
backorders.append(backorder)
if backorders:
create_id = self.pool.get('stock.internal.transfer').create(cr, uid, {
'date' : time.strftime('%Y-%m-%d %H:%M:%S'),
'source_location_id' : transfer.source_location_id.id,
'dest_location_id' : transfer.dest_location_id.id,
'backorder_id' : context.get('active_ids')[0],
'state' : 'send',
})
for backorder in backorders:
backorder['transfer_id'] = create_id
self.pool.get('stock.internal.transfer.line').create(cr, uid, backorder)
wkf_service.trg_validate(uid, 'stock.internal.transfer', create_id, 'action_send', cr)
type_obj = self.pool.get('stock.picking.type')
type_ids = type_obj.search(cr, uid, [('default_location_dest_id', '=', transfer.dest_warehouse_id.lot_stock_id.id),
('code', '=', 'incoming')])
if type_ids:
types = type_obj.browse(cr, uid, type_ids[0])
picking_obj = self.pool.get('stock.picking')
picking_id = picking_obj.create(cr, uid, {
'picking_type_id' : types.id,
'transfer_id' : context.get('active_ids')[0]
})
else:
raise osv.except_osv(_('Error!'), _('Unable to find destination location in Stock Picking.'))
move_obj = self.pool.get('stock.move')
for line in tf.item_ids:
move_obj.create(cr,uid,{
'name' : 'Stock Internal Transfer',
'product_id' : line.product_id.id,
'product_uom' : line.product_uom_id.id,
'product_uom_qty' : line.product_qty,
'location_id' : line.source_location_id.id,
'location_dest_id' : line.dest_location_id.id,
'picking_id' : picking_id,
})
picking_obj = self.pool.get('stock.picking')
picking_obj.action_confirm(cr, uid, picking_id)
picking_obj.action_assign(cr, uid, picking_id)
picking_obj.do_internal_transfer_details(cr, uid, picking_id)
wkf_service = netsvc.LocalService('workflow')
wkf_service.trg_validate(uid, 'stock.internal.transfer', context.get('active_ids')[0], 'action_receive', cr)
return True
@api.multi
def wizard_view(self):
view = self.env.ref('sgeede_internal_transfer.wizard_stock_internal_transfer_view')
return {
'name': _('Enter Transfer Details'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'wizard.stock.internal.transfer',
'views': [(view.id, 'form')],
'view_id': view.id,
'target': 'new',
'res_id': self.ids[0],
'context': self.env.context,
}
class stock_internal_transfer_items(models.TransientModel):
_name = 'stock.internal.transfer.items'
transfer_id = fields.Many2one('wizard.stock.internal.transfer', 'Transfer')
product_id = fields.Many2one('product.product', 'Product')
product_qty = fields.Float('Quantity')
product_uom_id = fields.Many2one('product.uom', 'Unit of Measure')
source_location_id = fields.Many2one('stock.location', 'Source Location')
transit_location_id = fields.Many2one('stock.location', 'Transit Location')
dest_location_id = fields.Many2one('stock.location', 'Destination Location')
def product_id_change(self, cr, uid, ids, product_id, context=None):
""" Finds UoM of changed product.
@param product_id: Id of changed product.
@return: Dictionary of values.
"""
result = {}
if not product_id:
return {'value': {
'product_uom_id': False,
}}
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
product_uom_id = product.uom_id and product.uom_id.id or False
result['value'] = {'product_uom_id': product_uom_id}
return result
| unlicense | -4,496,261,562,275,443,000 | 37.288462 | 128 | 0.644701 | false |
AMOboxTV/AMOBox.LegoBuild | plugin.video.mrpiracy/resources/lib/Player.py | 1 | 8028 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import xbmcgui
import xbmc
import xbmcvfs
import time
import urllib
import urllib2
import re
import sys
import traceback
import json
from t0mm0.common.net import Net
__SITE__ = 'http://kodi.mrpiracy.xyz/'
__COOKIE_FILE__ = os.path.join(xbmc.translatePath('special://userdata/addon_data/plugin.video.mrpiracy/').decode('utf-8'), 'cookie.mrpiracy')
__HEADERS__ = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:43.0) Gecko/20100101 Firefox/43.0', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7'}
#enen92 class (RatoTv) adapted for MrPiracy.xyz addon
class Player(xbmc.Player):
def __init__(self, url, idFilme, pastaData, temporada, episodio, nome, ano, logo, serieNome):
xbmc.Player.__init__(self)
self.url=url
self.temporada=temporada
self.episodio=episodio
self.playing = True
self.tempo = 0
self.tempoTotal = 0
self.idFilme = idFilme
self.pastaData = xbmc.translatePath(pastaData)
self.nome = nome
self.ano = ano
self.logo = logo
self.serieNome = serieNome
if not xbmcvfs.exists(os.path.join(pastaData,'tracker')):
xbmcvfs.mkdirs(os.path.join(pastaData,'tracker'))
if self.temporada != 0 and self.episodio != 0:
self.pastaVideo = os.path.join(self.pastaData,'tracker',str(self.idFilme)+'S'+str(self.temporada)+'x'+str(self.episodio)+'.mrpiracy')
self.content = 'episode'
else:
self.pastaVideo = os.path.join(self.pastaData,'tracker',str(self.idFilme)+'.mrpiracy')
self.content = 'movie'
def onPlayBackStarted(self):
#print '=======> player Start'
self.tempoTotal = self.getTotalTime()
#print '==========> total time'+str(self.tempoTotal)
if xbmcvfs.exists(self.pastaVideo):
#print "Ja existe um ficheiro do filme"
f = open(self.pastaVideo, "r")
tempo = f.read()
tempoAux = ''
minutos,segundos = divmod(float(tempo), 60)
if minutos > 60:
horas,minutos = divmod(minutos, 60)
tempoAux = "%02d:%02d:%02d" % (horas, minutos, segundos)
else:
tempoAux = "%02d:%02d" % (minutos, segundos)
dialog = xbmcgui.Dialog().yesno('MrPiracy.xyz', u'Já começaste a ver antes.', 'Continuas a partir de %s?' % (tempoAux), '', 'Não', 'Sim')
if dialog:
self.seekTime(float(tempo))
def onPlayBackStopped(self):
#print 'player Stop'
self.playing = False
tempo = int(self.tempo)
#print 'self.time/self.totalTime='+str(self.tempo/self.tempoTotal)
if (self.tempo/self.tempoTotal > 0.90):
self.adicionarVistoBiblioteca()
self.adicionarVistoSite()
try:
xbmcvfs.delete(self.pastaVideo)
except:
print "Não apagou"
pass
def adicionarVistoSite(self):
net = Net()
net.set_cookies(__COOKIE_FILE__)
codigo_fonte = net.http_GET(self.url, headers=__HEADERS__).content
if self.content == 'movie':
visto = re.compile('<a id="watched" href="(.+?)" class="watched ">Marcar como visto<span class="watch"><\/span><\/a>').findall(codigo_fonte)[0]
elif self.content == 'episode':
visto = re.compile('<div class="episode-actions">\s+<a href="(.+?)" class="marcar">Marcar como visto<\/a><a').findall(codigo_fonte)[0]
print "VISTO"
print visto
if visto != '':
marcar = net.http_GET(__SITE__+visto, headers=__HEADERS__).content
def onPlayBackEnded(self):
self.onPlayBackStopped()
def adicionarVistoBiblioteca(self):
pastaVisto=os.path.join(self.pastaData,'vistos')
try:
os.makedirs(pastaVisto)
except:
pass
if int(self.temporada) != 0 and int(self.episodio) != 0:
ficheiro = os.path.join(pastaVisto, str(self.idFilme)+'S'+str(self.temporada)+'x'+str(self.episodio)+'.mrpiracy')
else:
ficheiro = os.path.join(pastaVisto, str(self.idFilme)+'.mrpiracy')
if not os.path.exists(ficheiro):
f = open(ficheiro, 'w')
f.write('')
f.close()
try:
if int(self.temporada) != 0 and int(self.episodio) != 0:
#if xbmc.getCondVisibility('Library.HasContent(TVShows)'):
print "Check if tvshow episode exists in library when marking as watched\n\n"
titulo = re.sub('[^-a-zA-Z0-9_.()\\\/ ]+', '', self.nome)
dados = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params": {"filter":{"and": [{"field": "season", "operator": "is", "value": "%s"}, {"field": "episode", "operator": "is", "value": "%s"}]}, "properties": ["imdbnumber", "title", "year"]}, "id": 1}' % (self.temporada, self.episodio))
dados = unicode(dados, 'utf-8', erros='ignore')
dados = json.loads(dados)
dados = dados['result']['episodes']
dados = [i for i in dados if titulo in i['file']][0]
xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "VideoLibrary.SetEpisodeDetails", "params": {"episodeid" : %s, "playcount" : 1 }, "id": 1 }' % str(dados['episodeid']))
"""metaget = metahandlers.MetaData(preparezip=False)
metaget.get_meta('tvshow', self.serieNome, imdb_id=self.idFilme)
metaget.get_episode_meta(self.serieNome, self.idFilme, self.temporada, self.episodio)
metaget.change_watched(self.content, '', self.idFilme, season=self.temporada, episode=self.episodio, year='', watched=7)"""
else:
#if xbmc.getCondVisibility('Library.HasContent(Movies)'):
print "Check if movie exists in library when marking as watched\n\n"
titulo = re.sub('[^-a-zA-Z0-9_.()\\\/ ]+', '', self.nome)
dados = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovies", "params": {"filter":{"or": [{"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}]}, "properties" : ["imdbnumber", "originaltitle", "year"]}, "id": 1}' % (self.ano, str(int(self.ano)+1), str(int(self.ano)-1)))
dados = unicode(dados, 'utf-8', errors='ignore')
dados = json.loads(dados)
print dados
dados = dados['result']['movies']
print dados
dados = [i for i in dados if self.idFilme in i['file']][0]
xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "VideoLibrary.SetMovieDetails", "params": {"movieid" : %s, "playcount" : 1 }, "id": 1 }' % str(dados['movieid']))
"""metaget = metahandlers.MetaData(preparezip=False)
metaget.get_meta('movie', self.nome ,year=self.ano)
metaget.change_watched(self.content, '', self.idFilme, season='', episode='', year='', watched=7)"""
except:
pass
xbmc.executebuiltin("XBMC.Notification(MrPiracy.xyz,"+"Marcado como visto"+","+"6000"+","+ self.logo+")")
xbmc.executebuiltin("XBMC.Container.Refresh")
else:
print "Já foi colocado antes"
def trackerTempo(self):
try:
self.tempo = self.getTime()
f = open(self.pastaVideo, mode="w")
f.write(str(self.tempo))
f.close()
except:
traceback.print_exc()
print "Não gravou o conteudo em %s" % self.pastaVideo
| gpl-2.0 | 8,139,695,628,807,528,000 | 41.670213 | 414 | 0.551982 | false |
lyijin/working_with_dna_meth | filter_miscalled_Cs.py | 1 | 2696 | #!/usr/bin/env python3
"""
> filter_miscalled_Cs.py <
Script to process the .cov files produced by bismark_methylation_extractor, and
removes methylated positions that potentially arise by chance.
A super-conservative estimate of 1% miscall rate is assumed (might be as low
as 0.1%, evidenced by the CHG and CHH call rate in S. pistillata libraries).
The script ONLY considers positions that have at least one methylated position.
There's no way to "miscall" a C as methylated... if it's not methylated. As
a result, the B-H correction only takes into consideration the universe of
positions with at least one methylated read.
A binomial test is run to check the cumulative P value (P >= k), where k is the
number of methylated sites, and P values are further B-H corrected.
"""
import argparse
import csv
import sys
import re
import scipy.stats
import correct_p_values
import natural_sort
parser = argparse.ArgumentParser(description="""
Script to process the .cov files produced by bismark_methylation_extractor, and
removes methylated positions that potentially arise by chance.""")
parser.add_argument('bismark_cov', metavar="cov_filename",
type=argparse.FileType('r'), nargs='?',
default=sys.stdin, help="Bismark .cov filename.")
parser.add_argument('-v', action='store_true',
help="verbose mode, prints progress to stderr.")
args = parser.parse_args()
# calculate binomial P values first
counter_rows = 0
p_values = {}
with args.bismark_cov as tsv_file:
tsv_reader = csv.reader(tsv_file, delimiter='\t')
for row in tsv_reader:
if not row: continue
if args.v:
counter_rows += 1
if counter_rows % 100000 == 0:
print ('{} rows processed...'.format(counter_rows), file=sys.stderr)
# column 4: sum(methylated reads)
# column 5: sum(non-methylated reads)
meth = int(row[4])
non_meth = int(row[5])
# ignore lines with meth == 0
if meth > 0:
# scipy.stats.binom.sf calculates P(X > n) by default; the "-1"
# is added to achieve P(X >= n).
binom_p = scipy.stats.binom.sf(meth-1, meth + non_meth, 0.01)
p_values[str(row)] = binom_p
# run B-H correction
if args.v:
print ('correcting p values...', file=sys.stderr)
p_values = correct_p_values.correct_p_values(p_values)
# then print valid rows out
if args.v:
print ('printing output and exiting.', file=sys.stderr)
for p in natural_sort.natural_sort(p_values):
# print out rows that have corrected P values < 0.05
if p_values[p] < 0.05:
print ('\t'.join(eval(p))) | gpl-3.0 | 7,099,377,819,242,962,000 | 33.139241 | 84 | 0.660608 | false |
geophysics/mtpy | mtpy/imaging/plotstrike2d.py | 1 | 53768 | # -*- coding: utf-8 -*-
"""
Created on Thu May 30 18:28:24 2013
@author: jpeacock-pr
"""
#==============================================================================
import matplotlib.pyplot as plt
import numpy as np
import os
from matplotlib.ticker import MultipleLocator
import mtpy.imaging.mtplottools as mtpl
import mtpy.analysis.geometry as MTgy
#==============================================================================
class PlotStrike2D(object):
"""
PlotStrike will plot the strike estimated from the invariants, phase tensor
and the tipper in either a rose diagram of xy plot
plots the strike angle as determined by phase tensor azimuth (Caldwell et
al. [2004]) and invariants of the impedance tensor (Weaver et al. [2003]).
The data is split into decades where the histogram for each is plotted in
the form of a rose diagram with a range of 0 to 180 degrees.
Where 0 is North and 90 is East. The median angle of the period band is
set in polar diagram. The top row is the strike estimated from
the invariants of the impedance tensor. The bottom row is the azimuth
estimated from the phase tensor. If tipper is 'y' then the 3rd row is the
strike determined from the tipper, which is orthogonal to the induction
arrow direction.
Arguments:
----------
**fn_list** : list of strings
full paths to .edi files to plot
**z_object** : class mtpy.core.z.Z
object of mtpy.core.z. If this is input be sure the
attribute z.freq is filled. *default* is None
**mt_object** : class mtpy.imaging.mtplot.MTplot
object of mtpy.imaging.mtplot.MTplot
*default* is None
**fignum** : int
figure number to be plotted. *Default* is 1
**fs** : float
font size for labels of plotting. *Default* is 10
**dpi** : int
dots-per-inch resolution of figure, 300 is needed for
publications. *Default* is 300
**thetar** : float
angle of rotation clockwise positive. *Default* is 0
**ptol** : float
Tolerance level to match periods from different edi files.
*Default* is 0.05
**text_dict** : dictionary
*'pad' : float
padding of the angle label at the bottom of each
polar diagram. *Default* is 1.65
*'size' : float
font size
**plot_range** : [ 'data' | (period_min,period_max) ]
period range to estimate the strike angle. Options are:
* *'data'* for estimating the strike for all periods
in the data.
* (pmin,pmax) for period min and period max, input as
(log10(pmin),log10(pmax))
**plot_type** : [ 1 | 2 ]
-*1* to plot individual decades in one plot
-*2* to plot all period ranges into one polar diagram
for each strike angle estimation
**plot_tipper** : [ 'y' | 'n' ]
-*'y'* to plot the tipper strike
-*'n'* to not plot tipper strike
**pt_error_floor** : float
Maximum error in degrees that is allowed to estimate strike.
*Default* is None allowing all estimates to be used.
**fold** : [ True | False ]
*True to plot only from 0 to 180
*False to plot from 0 to 360
:Example: ::
>>> import os
>>> import mtpy.imaging.mtplot as mtplot
>>> edipath = r"/home/EDIFiles"
>>> edilist = [os.path.join(edipath,edi) for edi in os.listdir(edipath)
>>> ... if edi.find('.edi')>0]
>>> #---plot rose plots in decades with tipper and an error floor on pt
>>> strike = mtplot.plot_strike(fn_list=edilist, plot_type=1,\
pt_error_floor=5)
>>> #---plot all decades into one rose plot for each estimation---
>>> strike.plot_type = 2
>>> strike.redraw_plot()
>>> #---save the plot---
>>> strike.save_plot(r"/home/Figures")
'Figure saved to /home/Figures/StrikeAnalysis_.pdf'
Attributes:
-----------
-axhinv matplotlib.axes instance for invariant strike
-axhpt matplotlib.axes instance for phase tensor strike
-axhtip matplotlib.axes instance for tipper strike
-barinv matplotlib.axes.bar instance for invariant strike
-barpt matplotlib.axes.bar instance for pt strike
-bartr matplotlib.axes.bar instance for tipper strike
-bin_width width of histogram bins in degrees
-fig matplotlib.figure instance of plot
-fig_dpi dots-per-inch resolution of figure
-fig_num number of figure being plotted
-fig_size size of figure in inches
-fold boolean to fold angles to range from [0,180] or
[0,360]
-font_size font size of axes tick labels
-mt_list list of mtplot.MTplot instances containing all
the important information for each station
-period_tolerance tolerance to look for periods being plotted
-plot_range range of periods to plot
-plot_tipper string to tell program to plot induction arrows
-plot_type string to tell program how to plot strike angles
-plot_yn plot strike on instance creation
-pt_error_floor error floor to plot phase tensor strike, anything
above this error will not be plotted
-text_pad padding between text and rose diagram
-text_size font size of text labeling the mode of the histogram
-title_dict title dictionary
Methods:
--------
-plot plots the pseudo section
-redraw_plot on call redraws the plot from scratch
-save_figure saves figure to a file of given format
-update_plot updates the plot while still active
-writeTextFiles writes parameters of the phase tensor and tipper
to text files.
"""
def __init__(self, **kwargs):
fn_list = kwargs.pop('fn_list', None)
z_object_list = kwargs.pop('z_object_list', None)
tipper_object_list = kwargs.pop('tipper_object_list', None)
mt_object_list = kwargs.pop('mt_object_list', None)
#------Set attributes of the class-----------------
#--> get the inputs into a list of mt objects
self.mt_list = mtpl.get_mtlist(fn_list=fn_list,
z_object_list=z_object_list,
tipper_object_list=tipper_object_list,
mt_object_list=mt_object_list)
self._rot_z = kwargs.pop('rot_z', 0)
if type(self._rot_z) is float or type(self._rot_z) is int:
self._rot_z = np.array([self._rot_z]*len(self.mt_list))
#if the rotation angle is an array for rotation of different
#freq than repeat that rotation array to the len(mt_list)
elif type(self._rot_z) is np.ndarray:
if self._rot_z.shape[0] != len(self.mt_list):
self._rot_z = np.repeat(self._rot_z, len(self.mt_list))
else:
pass
#--> set plot properties
self.fig_num = kwargs.pop('fig_num', 1)
self.fig_dpi = kwargs.pop('fig_dpi', 300)
self.fig_size = kwargs.pop('fig_size', [7, 5])
self.plot_num = kwargs.pop('plot_num', 1)
self.plot_type = kwargs.pop('plot_type', 2)
self.plot_title = kwargs.pop('plot_title', None)
self.plot_range = kwargs.pop('plot_range', 'data')
self.plot_tipper = kwargs.pop('plot_tipper', 'n')
self.period_tolerance = kwargs.pop('period_tolerance', .05)
self.pt_error_floor = kwargs.pop('pt_error_floor', None)
self.fold = kwargs.pop('fold', True)
self.bin_width = kwargs.pop('bin_width', 5)
self.skew_threshold = kwargs.pop('skew_threshold', 3)
self.font_size = kwargs.pop('font_size', 7)
text_dict = kwargs.pop('text_dict', {})
try:
self.text_pad = text_dict['pad']
except KeyError:
self.text_pad = 0.6
try:
self.text_size = text_dict['size']
except KeyError:
self.text_size = self.font_size
#make a dictionary for plotting titles
self.title_dict = {}
self.title_dict[-5] = '10$^{-5}$--10$^{-4}$s'
self.title_dict[-4] = '10$^{-4}$--10$^{-3}$s'
self.title_dict[-3] = '10$^{-3}$--10$^{-2}$s'
self.title_dict[-2] = '10$^{-2}$--10$^{-1}$s'
self.title_dict[-1] = '10$^{-1}$--10$^{0}$s'
self.title_dict[0] = '10$^{0}$--10$^{1}$s'
self.title_dict[1] = '10$^{1}$--10$^{2}$s'
self.title_dict[2] = '10$^{2}$--10$^{3}$s'
self.title_dict[3] = '10$^{3}$--10$^{4}$s'
self.title_dict[4] = '10$^{4}$--10$^{5}$s'
self.title_dict[5] = '10$^{5}$--10$^{6}$s'
self.plot_yn = kwargs.pop('plot_yn', 'y')
if self.plot_yn=='y':
self.plot()
#---need to rotate data on setting rotz
def _set_rot_z(self, rot_z):
"""
need to rotate data when setting z
"""
#if rotation angle is an int or float make an array the length of
#mt_list for plotting purposes
if type(rot_z) is float or type(rot_z) is int:
self._rot_z = np.array([rot_z]*len(self.mt_list))
#if the rotation angle is an array for rotation of different
#freq than repeat that rotation array to the len(mt_list)
elif type(rot_z) is np.ndarray:
if rot_z.shape[0]!=len(self.mt_list):
self._rot_z = np.repeat(rot_z, len(self.mt_list))
else:
pass
for ii,mt in enumerate(self.mt_list):
mt.rot_z = self._rot_z[ii]
def _get_rot_z(self):
return self._rot_z
rot_z = property(fget=_get_rot_z, fset=_set_rot_z,
doc="""rotation angle(s)""")
def plot(self):
plt.rcParams['font.size'] = self.font_size
plt.rcParams['figure.subplot.left'] = .07
plt.rcParams['figure.subplot.right'] = .98
plt.rcParams['figure.subplot.bottom'] = .09
plt.rcParams['figure.subplot.top'] = .90
plt.rcParams['figure.subplot.wspace'] = .2
plt.rcParams['figure.subplot.hspace'] = .4
bw = self.bin_width
histrange = (0, 360)
#set empty lists that will hold dictionaries with keys as the period
ptlist = []
tiprlist = []
#initialize some parameters
nc = len(self.mt_list)
nt = 0
kk = 0
for dd, mt in enumerate(self.mt_list):
#--> set the period
period = mt.period
#get maximum length of periods
if len(period)>nt:
nt = len(period)
#estimate where only the 2D sections are
dim_2d = MTgy.dimensionality(z_object=mt._Z,
beta_threshold=self.skew_threshold)
index_2d = np.where(dim_2d==2)[0]
#------------get strike from phase tensor strike angle---------------
pt = mt.get_PhaseTensor()
az = (90-pt.azimuth[0][index_2d])%360
azerr = pt.azimuth[1][index_2d]
#need to add 90 because pt assumes 0 is north and
#negative because measures clockwise.
#put an error max on the estimation of strike angle
if self.pt_error_floor:
az[np.where(azerr>self.pt_error_floor)] = 0.0
#make a dictionary of strikes with keys as period
mdictpt = dict([(ff,jj) for ff,jj in zip(mt.period[index_2d],az)])
ptlist.append(mdictpt)
#-----------get tipper strike------------------------------------
tip = mt.get_Tipper()
if tip._Tipper.tipper is None:
tip._Tipper.tipper = np.zeros((len(mt.period), 1, 2),
dtype='complex')
tip.compute_components()
#needs to be negative because measures clockwise
tipr = -tip.ang_real[index_2d]
tipr[np.where(tipr == 180.)] = 0.0
tipr[np.where(tipr == -180.)] = 0.0
#make sure the angle is between 0 and 360
tipr = tipr%360
#make a dictionary of strikes with keys as period
tiprdict = dict([(ff,jj) for ff,jj in zip(mt.period[index_2d],tipr)])
tiprlist.append(tiprdict)
#--> get min and max period
maxper = np.max([np.max(mm.keys()) for mm in ptlist])
minper = np.min([np.min(mm.keys()) for mm in ptlist])
#make empty arrays to put data into for easy manipulation
medpt = np.zeros((nt,nc))
medtipr = np.zeros((nt,nc))
#make a list of periods from the longest period list
plist = np.logspace(np.log10(minper),np.log10(maxper),num=nt,base=10)
pdict = dict([(ii,jj) for jj,ii in enumerate(plist)])
self._plist = plist
#put data into arrays
for ii,mm in enumerate(ptlist):
mperiod=mm.keys()
for jj,mp in enumerate(mperiod):
for kk in pdict.keys():
if mp>kk*(1-self.period_tolerance) and \
mp<kk*(1+self.period_tolerance):
ll = pdict[kk]
medpt[ll,ii] = ptlist[ii][mp]
medtipr[ll,ii] = tiprlist[ii][mp]
else:
pass
#make the arrays local variables
self._medpt = medpt
self._medtp = medtipr
#-----Plot Histograms of the strike angles-----------------------------
if self.plot_range == 'data':
brange=np.arange(np.floor(np.log10(minper)),
np.ceil(np.log10(maxper)),1)
else:
brange=np.arange(np.floor(self.plot_range[0]),
np.ceil(self.plot_range[1]),1)
self._brange = brange
#font dictionary
fd={'size':self.font_size,'weight':'normal'}
#------------------plot indivdual decades------------------------------
if self.plot_type == 1:
#plot specs
plt.rcParams['figure.subplot.hspace'] = .3
plt.rcParams['figure.subplot.wspace'] = .3
self.fig = plt.figure(self.fig_num, dpi=self.fig_dpi)
plt.clf()
nb = len(brange)
for jj, bb in enumerate(brange, 1):
#make subplots for invariants and phase tensor azimuths
if self.plot_tipper == 'n':
self.axhpt = self.fig.add_subplot(1, nb, jj, polar=True)
axlist = [self.axhpt]
if self.plot_tipper == 'y':
self.axhpt = self.fig.add_subplot(2, nb, jj, polar=True)
self.axhtip = self.fig.add_subplot(2, nb, jj+nb,
polar=True)
axlist = [self.axhpt, self.axhtip]
#make a list of indicies for each decades
binlist=[]
for ii,ff in enumerate(plist):
if ff > 10**bb and ff < 10**(bb+1):
binlist.append(ii)
#extract just the subset for each decade
gg = medpt[binlist,:]
if self.plot_tipper == 'y':
tr = medtipr[binlist,:]
#compute the historgram for the tipper strike
trhist = np.histogram(tr[np.nonzero(tr)].flatten(),
bins=360/bw,
range=histrange)
#make a bar graph with each bar being width of bw degrees
bartr = self.axhtip.bar((trhist[1][:-1])*np.pi/180,
trhist[0],
width=bw*np.pi/180)
#set color of the bars according to the number in that bin
#tipper goes from dark blue (low) to light blue (high)
for cc, bar in enumerate(bartr):
try:
fc = float(trhist[0][cc])/trhist[0].max()*.9
except ZeroDivisionError:
fc = 1.0
bar.set_facecolor((0, 1-fc/2, fc))
#estimate the histogram for the decade for invariants and pt
pthist = np.histogram(gg[np.nonzero(gg)].flatten(),
bins=360/bw,
range=histrange)
#plot the histograms
self.barpt = self.axhpt.bar((pthist[1][:-1])*np.pi/180,
pthist[0],
width=bw*np.pi/180)
#set the color of the bars according to the number in that bin
#pt goes from green (low) to orange (high)
for cc,bar in enumerate(self.barpt):
try:
fc = float(pthist[0][cc])/pthist[0].max()*.8
except ZeroDivisionError:
fc = 1.0
bar.set_facecolor((fc,1-fc,0))
#make axis look correct with N to the top at 90.
for aa, axh in enumerate(axlist):
#set multiple locator to be every 15 degrees
axh.xaxis.set_major_locator(MultipleLocator(30*np.pi/180))
#set labels on the correct axis
axh.xaxis.set_ticklabels(['', 'E','','',
'N','','',
'W','','',
'S','',''])
#make a light grid
axh.grid(alpha=.25)
#set pt axes properties
if aa == 0:
#limits go from -180 to 180 as that is how the angle
#is calculated
axh.set_xlim(0,2*np.pi)
#label plot with the mode of the strike angle
ptmode = (90-pthist[1][np.where(
pthist[0] == pthist[0].max())[0][0]])%360
ptmedian = (90-np.median(gg[np.nonzero(gg)]))%360
ptmean = (90-np.mean(gg[np.nonzero(gg)]))%360
axh.text(np.pi, axh.get_ylim()[1]*self.text_pad,
'{0:.1f}$^o$'.format(ptmode),
horizontalalignment='center',
verticalalignment='baseline',
fontdict={'size':self.text_size},
bbox={'facecolor':(.9,.9,0),'alpha':.25})
#print out the results for the strike angles
print '-----Period Range {0:.3g} to {1:.3g} (s)-----'.format(10**bb,
10**(bb+1))
print ' *PT Strike: median={0:.1f} mode={1:.1f} mean={2:.1f}'.format(
ptmedian,
ptmode,
ptmean)
if self.plot_tipper!='y':
print '\n'
#--> set title of subplot
axh.set_title(self.title_dict[bb],fontdict=fd,
bbox={'facecolor':'white','alpha':.25})
#--> set the title offset
axh.titleOffsetTrans._t=(0,.1)
#set tipper axes properties
elif aa == 1:
#limits go from -180 to 180
axh.set_xlim(0, 2*np.pi)
#label plot with mode
tpmode = (90-trhist[1][np.where(
trhist[0] == trhist[0].max())[0][0]])%360
tpmedian = (90-np.median(tr[np.nonzero(tr)]))%360
tpmean = (90-np.mean(tr[np.nonzero(tr)]))%360
axh.text(np.pi,axh.get_ylim()[1]*self.text_pad,
'{0:.1f}$^o$'.format(tpmode),
horizontalalignment='center',
verticalalignment='baseline',
fontdict={'size':self.text_size},
bbox={'facecolor':(0,.1,.9),'alpha':.25})
#print out statistics for strike angle
print ' *Tipper Strike: median={0:.1f} mode={1:.1f} mean={2:.1f}'.format(
tpmedian,
tpmode,
tpmode)
print '\n'
if nb>5:
axh.set_title(self.title_dict[bb],fontdict=fd,
bbox={'facecolor':'white','alpha':.25})
#set plot labels
if jj == 1:
if aa == 0:
axh.set_ylabel('PT Azimuth',fontdict=fd,
labelpad=self.font_size,
bbox={'facecolor':(.9,.9,0),
'alpha':.25})
elif aa == 1:
axh.set_ylabel('Tipper Strike',fd,
labelpad=self.font_size,
bbox={'facecolor':(0,.1,.9),
'alpha':0.25})
plt.setp(axh.yaxis.get_ticklabels(),visible=False)
print 'Note: North is assumed to be 0 and the strike angle is measured'+\
'clockwise positive.'
plt.show()
#------------------Plot strike angles for all period ranges--------------------
elif self.plot_type == 2:
#plot specs
plt.rcParams['figure.subplot.left']=.07
plt.rcParams['figure.subplot.right']=.98
plt.rcParams['figure.subplot.bottom']=.100
plt.rcParams['figure.subplot.top']=.88
plt.rcParams['figure.subplot.hspace']=.3
plt.rcParams['figure.subplot.wspace']=.2
self.fig = plt.figure(self.fig_num,
self.fig_size,
dpi=self.fig_dpi)
plt.clf()
#make subplots for invariants and phase tensor azimuths
if self.plot_tipper == 'n':
self.axhpt = self.fig.add_subplot(1, 1, 1, polar=True)
axlist = [self.axhpt]
else:
self.axhpt = self.fig.add_subplot(1, 2, 1, polar=True)
self.axhtip = self.fig.add_subplot(1, 2, 2, polar=True)
axlist=[self.axhpt, self.axhtip]
#make a list of indicies for each decades
binlist = [pdict[ff] for ff in plist
if ff > 10**brange.min() and ff < 10**brange.max()]
#extract just the subset for each decade
gg = medpt[binlist, :]
#estimate the histogram for the decade for invariants and pt
pthist = np.histogram(gg[np.nonzero(gg)].flatten(),
bins=360/bw,
range=histrange)
#plot the histograms
self.barpt = self.axhpt.bar((pthist[1][:-1])*np.pi/180,
pthist[0],
width=bw*np.pi/180)
#set color of pt from green (low) to orange (high count)
for cc,bar in enumerate(self.barpt):
fc = float(pthist[0][cc])/pthist[0].max()*.8
bar.set_facecolor((fc,1-fc,0))
#plot tipper if desired
if self.plot_tipper == 'y':
tr = self._medtp[binlist,:]
trhist = np.histogram(tr[np.nonzero(tr)].flatten(),
bins=360/bw,
range=histrange)
self.bartr = self.axhtip.bar((trhist[1][:-1])*np.pi/180,
trhist[0],
width=bw*np.pi/180)
#set tipper color from dark blue (low) to light blue (high)
for cc,bar in enumerate(self.bartr):
fc=float(trhist[0][cc])/trhist[0].max()*.9
bar.set_facecolor((0,1-fc/2,fc))
#make axis look correct with N to the top at 90.
for aa, axh in enumerate(axlist):
#set major ticks to be every 30 degrees
axh.xaxis.set_major_locator(MultipleLocator(2*np.pi/12))
#set a light grid
axh.grid(alpha=0.25)
#set tick labels to be invisible
plt.setp(axh.yaxis.get_ticklabels(),visible=False)
#place the correct label at the cardinal directions
axh.xaxis.set_ticklabels(['', 'E', '', '',
'N', '', '',
'W', '', '',
'S', '', ''])
#set pt axes properties
if aa == 0:
axh.set_ylim(0,pthist[0].max())
ptmode = (90-pthist[1][np.where(
pthist[0] == pthist[0].max())[0][0]])%360
ptmedian = (90-np.median(gg[np.nonzero(gg)]))%360
ptmean = (90-np.mean(gg[np.nonzero(gg)]))%360
axh.text(170*np.pi/180,axh.get_ylim()[1]*.65,
'{0:.1f}$^o$'.format(ptmode),
horizontalalignment='center',
verticalalignment='baseline',
fontdict={'size':self.text_size},
bbox={'facecolor':(.9,.9,0),'alpha':0.25})
#print results of strike analysis for pt
print '-----Period Range {0:.3g} to {1:.3g} (s)-----'.format(10**brange[0],
10**brange[-1])
print ' *PT Strike: median={0:.1f} mode={1:.1f} mean={2:.1f}'.format(
ptmedian,
ptmode,
ptmean)
if self.plot_tipper!='y':
print '\n'
axh.set_title('PT Azimuth',fontdict=fd,
bbox={'facecolor':(.9,.9,0),'alpha':0.25})
#set tipper axes properties
elif aa == 2:
axh.set_ylim(0,trhist[0].max())
tpmode = (90-trhist[1][np.where(
trhist[0] == trhist[0].max())[0][0]])%360
tpmedian = (90-np.median(tr[np.nonzero(tr)]))%360
tpmean = (90-np.mean(tr[np.nonzero(tr)]))%360
axh.text(170*np.pi/180,axh.get_ylim()[1]*.65,
'{0:.1f}$^o$'.format(tpmode),
horizontalalignment='center',
verticalalignment='baseline',
fontdict={'size':self.text_size},
bbox={'facecolor':(0,.1,.9),'alpha':0.25})
print ' *Tipper Stike: median={0:.1f} mode={1:.1f} mean={2:.1f}\n'.format(
tpmedian,
tpmode,
tpmean)
axh.set_title('Tipper Strike',fontdict=fd,
bbox={'facecolor':(0,.1,.9),'alpha':0.25})
#move title up a little to make room for labels
axh.titleOffsetTrans._t=(0,.15)
#remind the user what the assumptions of the strike angle are
print 'Note: North is assumed to be 0 and the strike angle is '+\
'measured clockwise positive.'
plt.show()
def save_plot(self, save_fn, file_format='pdf',
orientation='portrait', fig_dpi=None, close_plot='y'):
"""
save_plot will save the figure to save_fn.
Arguments:
-----------
**save_fn** : string
full path to save figure to, can be input as
* directory path -> the directory path to save to
in which the file will be saved as
save_fn/station_name_ResPhase.file_format
* full path -> file will be save to the given
path. If you use this option then the format
will be assumed to be provided by the path
**file_format** : [ pdf | eps | jpg | png | svg ]
file type of saved figure pdf,svg,eps...
**orientation** : [ landscape | portrait ]
orientation in which the file will be saved
*default* is portrait
**fig_dpi** : int
The resolution in dots-per-inch the file will be
saved. If None then the dpi will be that at
which the figure was made. I don't think that
it can be larger than dpi of the figure.
**close_plot** : [ y | n ]
* 'y' will close the plot after saving.
* 'n' will leave plot open
:Example: ::
>>> # to save plot as jpg
>>> import mtpy.imaging.mtplottools as mtplot
>>> p1 = mtplot.PlotPhaseTensorMaps(edilist,freqspot=10)
>>> p1.save_plot(r'/home/MT', file_format='jpg')
'Figure saved to /home/MT/PTMaps/PTmap_phimin_10Hz.jpg'
"""
if fig_dpi == None:
fig_dpi = self.fig_dpi
if os.path.isdir(save_fn) == False:
file_format = save_fn[-3:]
self.fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation)
plt.clf()
plt.close(self.fig)
else:
if not os.path.exists(save_fn):
os.mkdir(save_fn)
save_fn = os.path.join(save_fn,'StrikeAnalysis_'+file_format)
self.fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation)
if close_plot == 'y':
plt.clf()
plt.close(self.fig)
else:
pass
self.fig_fn = save_fn
print 'Saved figure to: '+self.fig_fn
def update_plot(self):
"""
update any parameters that where changed using the built-in draw from
canvas.
Use this if you change an of the .fig or axes properties
:Example: ::
>>> # to change the grid lines to only be on the major ticks
>>> import mtpy.imaging.mtplottools as mtplot
>>> p1 = mtplot.PlotResPhase(r'/home/MT/mt01.edi')
>>> [ax.grid(True, which='major') for ax in [p1.axr,p1.axp]]
>>> p1.update_plot()
"""
self.fig.canvas.draw()
def redraw_plot(self):
"""
use this function if you updated some attributes and want to re-plot.
:Example: ::
>>> # change the color and marker of the xy components
>>> import mtpy.imaging.mtplottools as mtplot
>>> p1 = mtplot.PlotResPhase(r'/home/MT/mt01.edi')
>>> p1.xy_color = (.5,.5,.9)
>>> p1.xy_marker = '*'
>>> p1.redraw_plot()
"""
plt.close(self.fig)
self.plot()
def __str__(self):
"""
rewrite the string builtin to give a useful message
"""
return "Plots phase tensor maps for one freq"
def writeTextFiles(self, save_path=None):
"""
Saves the strike information as a text file.
"""
#check to see if the strikes have been calculated
try:
self.bin_width
except AttributeError:
self.plot()
#get the path to save the file to
if save_path == None:
try:
svpath = os.path.dirname(self.mt_list[0].fn)
except TypeError:
raise IOError('Need to input save_path, could not find path')
else:
svpath = save_path
#set
if self.fold == True:
histrange=(-180,180)
elif self.fold == False:
histrange=(0,360)
#set the bin width
bw = self.bin_width
slistinv = [['station']]
slistpt = [['station']]
slisttip = [['station']]
#calculate the strikes for the different period bands
for jj,bb in enumerate(self._brange):
tstr = self.title_dict[bb].replace('$','')
tstr = tstr.replace('{','').replace('}','').replace('^','e')
tstr = tstr.replace('s', '(s)')
slistinv[0].append(tstr)
slistpt[0].append(tstr)
slisttip[0].append(tstr)
#calculate the strike for the different period bands per station
for kk, mt in enumerate(self.mt_list, 1):
if jj == 0:
slistinv.append([mt.station])
slistpt.append([mt.station])
slisttip.append([mt.station])
zinv = mt.get_Zinvariants()
pt = mt.get_PhaseTensor()
tp = mt.get_Tipper()
bnlist = []
for nn,per in enumerate(mt.period):
if per>10**bb and per<10**(bb+1):
bnlist.append(nn)
#---> strike from invariants
zs = 90-zinv.strike[bnlist]
#fold so the angle goes from 0 to 180
if self.fold == True:
#for plotting put the NW angles into the SE quadrant
zs[np.where(zs>90)] = zs[np.where(zs>90)]-180
zs[np.where(zs<-90)] = zs[np.where(zs<-90)]+180
#leave as the total unit circle 0 to 360
elif self.fold == False:
pass
zshist = np.histogram(zs[np.nonzero(zs)].flatten(),
bins=360/bw,
range=histrange)
#==============================================================
# For putting the values into a useful text file
# need to subtract 90 from the values to put them into
# coordinates where north is 0 and east is 90, which is
# different from plotting where east in the plotting function
# is equal to 0 and north is 90, measuring counter-clockwise
#==============================================================
#==> compute mean
invmean=90-zs.mean()
if invmean<0: invmean+=360
invmed=90-np.median(zs)
#==> compute median
if invmed<0: invmed+=360
#==> compute mode
invmode=90-zshist[1][np.where(
zshist[0] == zshist[0].max())[0][0]]
if invmode<0: invmode+=360
#==> append to list
slistinv[kk].append((invmean,
invmed,
invmode))
#---> strike from phase tensor
az = pt.azimuth[0][bnlist]
#fold so the angle goes from 0 to 180
if self.fold == True:
az[np.where(az>90)] = az[np.where(az>90)]-180
az[np.where(az<-90)] = az[np.where(az<-90)]+180
#leave as the total unit circle 0 to 360
elif self.fold == False:
az[np.where(az<0)] = az[np.where(az<0)]+360
# == > compute mean
ptmean1 = 90-az.mean()
if ptmean1<0: ptmean1 += 360
# == > compute median
ptmed1 = 90-np.median(az)
if ptmed1<0: ptmed1 += 360
# == > compute mode
azhist = np.histogram(az[np.nonzero(az)].flatten(),
bins=360/bw,
range=histrange)
ptmode1 = 90-azhist[1][np.where(
azhist[0] == azhist[0].max())[0][0]]
if ptmode1<0: ptmode1 += 360
slistpt[kk].append((ptmean1,
ptmed1,
ptmode1))
#---> strike from tipper
#needs to be negative because measures clockwise
if tp._Tipper.tipper is None:
tp._Tipper.tipper = np.zeros((len(mt.period), 1, 2),
dtype='complex')
tp.compute_components()
tipr = -tp.ang_real[bnlist]
#fold so the angle goes from 0 to 180
if self.fold == True:
tipr[np.where(tipr>90)] = tipr[np.where(tipr>90)]-180
tipr[np.where(tipr<-90)] = tipr[np.where(tipr<-90)]+180
#leave as the total unit circle 0 to 360
elif self.fold == False:
tipr[np.where(tipr<0)] = tipr[np.where(tipr<0)]+360
tphist = np.histogram(tipr[np.nonzero(tipr)].flatten(),
bins=360/bw,
range=histrange)
#==> compute mean
tpmean1 = 90-tipr.mean()
if tpmean1<0: tpmean1 += 360
#==> compute median
tpmed1 = 90-np.median(tipr)
if tpmed1<0: tpmed1 += 360
#==> compute mode
tpmode1 = 90-tphist[1][np.where(
tphist[0] == tphist[0].max())[0][0]]
if tpmode1<0: tpmode1 += 360
#--> append statistics to list
slisttip[kk].append((tpmean1,
tpmed1,
tpmode1))
#make a list of indicies for each decades
binlist=[]
for ii,ff in enumerate(self._plist):
if ff>10**bb and ff<10**(bb+1):
binlist.append(ii)
#extract just the subset for each decade
hh = self._medinv[binlist,:]
gg = self._medpt[binlist,:]
tr = self._medtp[binlist,:]
#estimate the histogram for the decade for invariants and pt
invhist = np.histogram(hh[np.nonzero(hh)].flatten(),
bins=360/bw,
range=histrange)
pthist = np.histogram(gg[np.nonzero(gg)].flatten(),
bins=360/bw,
range=histrange)
trhist = np.histogram(tr[np.nonzero(tr)].flatten(),
bins=360/bw,
range=histrange)
#--> include the row for mean, median and mode for each parameter
if jj == 0:
slistinv.append(['mean'])
slistinv.append(['median'])
slistinv.append(['mode'])
slistpt.append(['mean'])
slistpt.append(['median'])
slistpt.append(['mode'])
slisttip.append(['mean'])
slisttip.append(['median'])
slisttip.append(['mode'])
#--> compute mean, median and mode for invariants
# == > mean
imean = 90-np.mean(hh[np.nonzero(hh)])
if imean<0: imean += 360
# == > median
imed = 90-np.median(hh[np.nonzero(hh)])
if imed<0: imed +=360
# == > mode
imode = 90-invhist[1][np.where(
invhist[0] == invhist[0].max())[0][0]]
if imode<0: imode +=360
#--> add them to the list of estimates
slistinv[kk+1].append(imean)
slistinv[kk+2].append(imed)
slistinv[kk+3].append(imode)
#--> compute pt statistics
# == > mean
ptmean = 90-np.mean(gg[np.nonzero(gg)])
if ptmean<0: ptmean=np.mean(gg[np.nonzero(gg)])
# == > median
ptmed = 90-np.median(gg[np.nonzero(gg)])
if ptmed<0: ptmed +=360
# == > mode
ptmode = 90-pthist[1][np.where(
pthist[0] == pthist[0].max())[0][0]]
if ptmode<0: ptmode +=360
#--> add the statistics to the parameter list
slistpt[kk+1].append(ptmean)
slistpt[kk+2].append(ptmed)
slistpt[kk+3].append(ptmode)
#--> compute tipper statistics
# == > mean
tpmean = 90-np.mean(tipr[np.nonzero(tipr)])
if tpmean<0: tpmean += 360
# == > median
tpmed = 90-np.median(tipr[np.nonzero(tipr)])
if tpmed<0: tpmed +=360
# == > mode
tpmode = 90-trhist[1][np.where(
trhist[0] == trhist[0].max())[0][0]]
if tpmode<0: tpmode +=360
#--> add the statistics to parameter list
slisttip[kk+1].append(tpmean)
slisttip[kk+2].append(tpmed)
slisttip[kk+3].append(tpmode)
invfid = file(os.path.join(svpath,'Strike.invariants'),'w')
ptfid = file(os.path.join(svpath,'Strike.pt'),'w')
tpfid = file(os.path.join(svpath,'Strike.tipper'),'w')
#---> write strike from the invariants
# == > mean
invfid.write('-'*20+'MEAN'+'-'*20+'\n')
for ii,l1 in enumerate(slistinv):
for jj,l2 in enumerate(l1):
if ii == 0:
invfid.write('{0:^16}'.format(l2))
else:
if jj == 0:
invfid.write('{0:>16}'.format(l2+' '*6))
else:
try:
invfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2[0])))
except IndexError:
invfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2)))
invfid.write('\n')
# == > median
invfid.write('-'*20+'MEDIAN'+'-'*20+'\n')
for ii,l1 in enumerate(slistinv):
for jj,l2 in enumerate(l1):
if ii == 0:
invfid.write('{0:^16}'.format(l2))
else:
if jj == 0:
invfid.write('{0:>16}'.format(l2+' '*6))
else:
try:
invfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2[1])))
except IndexError:
invfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2)))
invfid.write('\n')
# == > mode
invfid.write('-'*20+'MODE'+'-'*20+'\n')
for ii,l1 in enumerate(slistinv):
for jj,l2 in enumerate(l1):
if ii == 0:
invfid.write('{0:^16}'.format(l2))
else:
if jj == 0:
invfid.write('{0:>16}'.format(l2+' '*6))
else:
try:
invfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2[2])))
except IndexError:
invfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2)))
invfid.write('\n')
invfid.close()
#---> write the phase tensor text files
ptfid.write('-'*20+'MEAN'+'-'*20+'\n')
for ii,l1 in enumerate(slistpt):
for jj,l2 in enumerate(l1):
if ii == 0:
ptfid.write('{0:^16}'.format(l2))
else:
if jj == 0:
ptfid.write('{0:>16}'.format(l2+' '*6))
else:
try:
ptfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2[0])))
except IndexError:
ptfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2)))
ptfid.write('\n')
ptfid.write('-'*20+'MEDIAN'+'-'*20+'\n')
for ii,l1 in enumerate(slistpt):
for jj,l2 in enumerate(l1):
if ii == 0:
ptfid.write('{0:^16}'.format(l2))
else:
if jj == 0:
ptfid.write('{0:>16}'.format(l2+' '*6))
else:
try:
ptfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2[1])))
except IndexError:
ptfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2)))
ptfid.write('\n')
ptfid.write('-'*20+'MODE'+'-'*20+'\n')
for ii,l1 in enumerate(slistpt):
for jj,l2 in enumerate(l1):
if ii == 0:
ptfid.write('{0:^16}'.format(l2))
else:
if jj == 0:
ptfid.write('{0:>16}'.format(l2+' '*6))
else:
try:
ptfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2[2])))
except IndexError:
ptfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2)))
ptfid.write('\n')
ptfid.close()
#---> write the tipper text files
tpfid.write('-'*20+'MEAN'+'-'*20+'\n')
for ii,l1 in enumerate(slisttip):
for jj,l2 in enumerate(l1):
if ii == 0:
tpfid.write('{0:^16}'.format(l2))
else:
if jj == 0:
tpfid.write('{0:>16}'.format(l2+' '*6))
else:
try:
tpfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2[0])))
except IndexError:
tpfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2)))
tpfid.write('\n')
tpfid.write('-'*20+'MEDIAN'+'-'*20+'\n')
for ii,l1 in enumerate(slisttip):
for jj,l2 in enumerate(l1):
if ii == 0:
tpfid.write('{0:^16}'.format(l2))
else:
if jj == 0:
tpfid.write('{0:>16}'.format(l2+' '*6))
else:
try:
tpfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2[1])))
except IndexError:
tpfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2)))
tpfid.write('\n')
tpfid.write('-'*20+'MODE'+'-'*20+'\n')
for ii,l1 in enumerate(slisttip):
for jj,l2 in enumerate(l1):
if ii == 0:
tpfid.write('{0:^16}'.format(l2))
else:
if jj == 0:
tpfid.write('{0:>16}'.format(l2+' '*6))
else:
try:
tpfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2[2])))
except IndexError:
tpfid.write('{0:^16}'.format(
'{0: .2f}'.format(l2)))
tpfid.write('\n')
tpfid.close()
| gpl-3.0 | 8,961,299,997,132,101,000 | 41.639175 | 108 | 0.408459 | false |
gmauro/presta | presta/datasets.py | 1 | 2265 | """
"""
import glob
import os
class DatasetsManager(object):
def __init__(self, logger, ids):
self.ids = ids
self.logger = logger
self.fastq_collection = dict()
self.fastq_counter = 0
def collect_fastq_from_fs(self, base_path):
results = dict()
count = 0
file_ext = 'fastq.gz'
ds_dir_label = 'datasets'
filesDepth = []
for depth in ['*', '*/*', '*/*/*', '*/*/*/*']:
filesGlob = glob.glob(os.path.join(base_path, depth))
filesDepth.extend(filter(lambda f: os.path.isfile(f) and
ds_dir_label in f.split('/') and
file_ext in f.split('/')[-1],
filesGlob))
for path in filesDepth:
fname = os.path.basename(path)
extended_id = fname.split('_')[0]
_id = '-'.join(extended_id.split('-')[:-1])
if _id in self.ids:
rd_label_index = path.split('/').index(ds_dir_label) - 1
rd_label = path.split('/')[rd_label_index]
fc_label = rd_label.split('_')[-1][1:]
list_item = dict(extended_id=extended_id,
filename=fname,
filepath=path,
file_ext=file_ext,
_id=_id,
lane=fname.split('_')[2] if fname.split('_')[2].startswith('L') else None,
read_label=fname.split('_')[2] if fname.split('_')[2].startswith('R') else fname.split('_')[3],
rd_label=rd_label,
fc_label=fc_label,
)
if _id not in results:
results[_id] = []
if _id not in self.fastq_collection:
self.fastq_collection[_id] = []
results[_id].append(list_item)
self.fastq_collection[_id].append(list_item)
count += 1
self.fastq_counter += 1
return results, count
@staticmethod
def collect_fastq_from_irods(ipath):
pass
| mit | 499,289,946,597,377,500 | 35.532258 | 128 | 0.430022 | false |
jgoclawski/django-facebook-auth | facebook_auth/models.py | 1 | 9449 | import collections
from datetime import timedelta
import json
import logging
try:
from urllib.error import HTTPError
import urllib.parse as urlparse
except ImportError:
import urlparse
from urllib2 import HTTPError
from django.conf import settings
from django.contrib.auth import models as auth_models
from django.db import models
from django.db.models import Q
from django.dispatch import receiver
from django.utils import timezone
from celery import task
from facepy import exceptions
from facebook_auth import forms
from facebook_auth import graph_api
from facebook_auth import utils
logger = logging.getLogger(__name__)
class FacebookError(Exception):
pass
class FacebookUser(auth_models.User):
user_id = models.BigIntegerField(unique=True)
app_friends = models.ManyToManyField('self')
scope = models.CharField(max_length=512, blank=True, default='')
@property
def access_token(self):
try:
return self._get_token_object().token
except UserToken.DoesNotExist:
return None
@property
def access_token_expiration_date(self):
return self._get_token_object().expiration_date
@property
def graph(self):
return graph_api.get_graph(self._get_token_object().token)
def _get_token_object(self):
return UserTokenManager.get_access_token(self.user_id)
@property
def js_session(self):
return json.dumps({
'access_token': self.access_token,
'uid': self.user_id,
})
@property
def friends(self):
response = utils.get_from_graph_api(self.graph, "me/friends")
if 'data' in response:
return response['data']
else:
logger.warning("OpenGraph error: %s" % response)
return []
def update_app_friends(self):
friends = self.friends
friends_ids = [f['id'] for f in friends]
existing_friends = self.app_friends.all().values_list(
'user_id', flat=True)
new_friends = FacebookUser.objects.filter(
user_id__in=friends_ids).exclude(user_id__in=existing_friends)
removed_friends = self.app_friends.exclude(user_id__in=friends_ids)
self.app_friends.add(*new_friends)
self.app_friends.remove(*removed_friends)
class UserToken(models.Model):
provider_user_id = models.CharField(max_length=255)
token = models.TextField(unique=True)
granted_at = models.DateTimeField(auto_now_add=True)
expiration_date = models.DateTimeField(null=True, blank=True, default=None)
deleted = models.BooleanField(default=False)
class Meta:
verbose_name = 'User token'
verbose_name_plural = 'User tokens'
class TokenDebugException(Exception):
pass
class UserTokenManager(object):
@staticmethod
def insert_token(provider_user_id, token, expiration_date=None):
provider_user_id = str(provider_user_id)
defaults = {'provider_user_id': provider_user_id,
'expiration_date': expiration_date}
obj, created = UserToken.objects.get_or_create(
token=token, defaults=defaults)
if not created:
obj.expiration_date = expiration_date
obj.save()
if obj.provider_user_id != provider_user_id:
extra = {'object_provider_user_id': object.provider_user_id,
'provider_user_id': provider_user_id,
'provider_user_id_type': type(provider_user_id)}
logger.warning('Got different provider_user_id for token.',
extra=extra)
@staticmethod
def get_access_token(provider_user_id):
eldest_wildcarded = timezone.now() - timezone.timedelta(seconds=30)
related_tokens = UserToken.objects.filter(
provider_user_id=provider_user_id, deleted=False)
try:
return (related_tokens
.filter(expiration_date__isnull=True,
granted_at__gte=eldest_wildcarded)
.latest('granted_at'))
except UserToken.DoesNotExist:
return (related_tokens
.exclude(expiration_date__isnull=True)
.latest('expiration_date'))
@staticmethod
def invalidate_access_token(token):
UserToken.objects.filter(token=token).update(deleted=True)
class FacebookTokenManager(object):
TokenInfo = collections.namedtuple('TokenInfo',
['user', 'expires', 'token'])
@staticmethod
def insert_token(access_token, user_id, token_expiration_date=None):
token_manager = UserTokenManager()
if getattr(settings, 'REQUEST_LONG_LIVED_ACCESS_TOKEN', False):
insert_extended_token.delay(access_token, user_id)
token_manager.insert_token(user_id, access_token,
token_expiration_date)
def discover_fresh_access_token(self, access_token):
data = self.debug_token(access_token)
self.insert_token(access_token, data.user, data.expires)
@staticmethod
def convert_expiration_seconds_to_date(seconds):
return timezone.now() + timedelta(seconds=seconds)
@staticmethod
def get_long_lived_access_token(access_token):
graph = graph_api.get_graph()
args = {
'client_id': settings.FACEBOOK_APP_ID,
'client_secret': settings.FACEBOOK_APP_SECRET,
'grant_type': 'fb_exchange_token',
'fb_exchange_token': access_token,
}
data = graph.get('/oauth/access_token', **args)
try:
access_token = urlparse.parse_qs(data)['access_token'][-1]
expires_in_seconds = int(urlparse.parse_qs(data)['expires'][-1])
except KeyError:
logger.warning('Invalid Facebook response.')
raise FacebookError
return access_token, expires_in_seconds
def debug_token(self, token):
graph = utils.get_application_graph()
response = graph.get('/debug_token', input_token=token)
parsed_response = forms.parse_facebook_response(response, token)
if parsed_response.is_valid:
data = parsed_response.parsed_data
self._update_scope(data)
return self.get_token_info(data)
else:
raise TokenDebugException('Invalid Facebook response.',
{'errors': parsed_response.errors})
def _update_scope(self, data):
if 'scopes' in data:
(FacebookUser.objects.filter(user_id=data['user_id'])
.update(scope=','.join(data['scopes'])))
def get_token_info(self, response_data):
return self.TokenInfo(token=response_data['token'],
user=str(response_data['user_id']),
expires=response_data['expires_at'])
@task()
def validate_token(access_token):
manager = FacebookTokenManager()
try:
manager.debug_token(access_token)
except TokenDebugException:
logger.info('Invalid access token')
token_manager = UserTokenManager()
token_manager.invalidate_access_token(access_token)
@task()
def insert_extended_token(access_token, user_id):
manager = FacebookTokenManager()
token_manager = UserTokenManager()
try:
access_token, expires_in_seconds = manager.get_long_lived_access_token(
access_token)
except (exceptions.FacebookError, FacebookError, HTTPError):
pass
else:
token_expiration_date = manager.convert_expiration_seconds_to_date(
expires_in_seconds)
token_manager.insert_token(user_id, access_token,
token_expiration_date)
@receiver(models.signals.post_save, sender=UserToken)
def dispatch_engines_run(sender, instance, created, **kwargs):
if created:
debug_all_tokens_for_user.apply_async(args=[instance.provider_user_id],
countdown=45)
@task()
def debug_all_tokens_for_user(user_id):
manager = FacebookTokenManager()
token_manager = UserTokenManager()
user_tokens = list(
UserToken.objects
.filter(provider_user_id=user_id, deleted=False)
.values_list('token', flat=True)
)
for token in user_tokens:
try:
data = manager.debug_token(token)
except TokenDebugException:
logger.info('Invalid access token')
token_manager.invalidate_access_token(token)
else:
token_manager.insert_token(user_id, data.token, data.expires)
try:
best_token = token_manager.get_access_token(user_id)
except UserToken.DoesNotExist:
logger.info("Best token was deleted by other process.")
else:
if best_token.token not in user_tokens:
logger.info(
'New best token has arrived.'
'Retrying debug_all_tokens_for_user.'
)
debug_all_tokens_for_user.retry(args=[user_id],
countdown=45)
else:
logger.info('Deleting user tokens except best one.')
tokens_to_delete = sorted(user_tokens)
tokens_to_delete.remove(best_token.token)
for token in tokens_to_delete:
token_manager.invalidate_access_token(token)
| mit | 3,231,946,060,336,171,000 | 33.996296 | 79 | 0.621653 | false |
mattpap/sympy-polys | sympy/utilities/pytest.py | 1 | 3948 | """py.test hacks to support XFAIL/XPASS"""
# XXX this should be integrated into py.test
# XXX but we can't force everyone to install py-lib trunk
import sys
try:
# functools is not available in Python 2.4
import functools
except ImportError:
has_functools = False
else:
has_functools = True
try:
# tested with py-lib 0.9.0
from py.__.test.outcome import Outcome, Passed, Failed, Skipped
from py.__.test.terminal.terminal import TerminalSession
from py.test import skip
USE_PYTEST = True
except ImportError:
USE_PYTEST = False
def raises(ExpectedException, code):
assert isinstance(code, str)
frame = sys._getframe(1)
loc = frame.f_locals.copy()
try:
exec code in frame.f_globals, loc
except ExpectedException:
return
raise AssertionError("DID NOT RAISE")
if not USE_PYTEST:
class XFail(Exception):
pass
class XPass(Exception):
pass
class Skipped(Exception):
pass
def XFAIL(func):
def wrapper():
try:
func()
except Exception:
raise XFail()
raise XPass()
if has_functools:
wrapper = functools.update_wrapper(wrapper, func)
return wrapper
def skip(str):
raise Skipped(str)
else:
from time import time as now
__all__ = ['XFAIL']
class XFail(Outcome):
pass
class XPass(Outcome):
pass
TerminalSession.typemap[XFail] = 'f'
TerminalSession.typemap[XPass] = 'X'
TerminalSession.namemap[XFail] = 'XFAIL'
TerminalSession.namemap[XPass] = '*** XPASS ***'
def footer(self, colitems):
super(TerminalSession, self).footer(colitems)
self.endtime = now()
self.out.line()
self.skippedreasons()
self.failures()
self.xpasses()
self.summaryline()
def xpasses(self):
"""report unexpectedly passed tests"""
texts = {}
for colitem, outcome in self.getitemoutcomepairs(XPass):
raisingtb = self.getlastvisible(outcome.excinfo.traceback)
fn = raisingtb.frame.code.path
lineno = raisingtb.lineno
#d = texts.setdefault(outcome.excinfo.exconly(), {})
d = texts.setdefault(outcome.msg, {})
d[(fn,lineno)] = outcome
if texts:
self.out.line()
self.out.sep('_', '*** XPASS ***')
for text, dict in texts.items():
#for (fn, lineno), outcome in dict.items():
# self.out.line('Skipped in %s:%d' %(fn, lineno+1))
#self.out.line("reason: %s" % text)
self.out.line("%s" % text)
self.out.line()
def summaryline(self):
outlist = []
sum = 0
for typ in Passed, XPass, XFail, Failed, Skipped:
l = self.getitemoutcomepairs(typ)
if l:
outlist.append('%d %s' % (len(l), typ.__name__.lower()))
sum += len(l)
elapsed = self.endtime-self.starttime
status = "%s" % ", ".join(outlist)
self.out.sep('=', 'tests finished: %s in %4.2f seconds' %
(status, elapsed))
# SymPy specific
if self.getitemoutcomepairs(Failed):
self.out.line('DO *NOT* COMMIT!')
TerminalSession.footer = footer
TerminalSession.xpasses = xpasses
TerminalSession.summaryline = summaryline
def XFAIL(func):
"""XFAIL decorator"""
def func_wrapper():
try:
func()
except Outcome:
raise # pass-through test outcome
except:
raise XFail('XFAIL: %s' % func.func_name)
else:
raise XPass('XPASS: %s' % func.func_name)
if has_functools:
func_wrapper = functools.update_wrapper(func_wrapper, func)
return func_wrapper
| bsd-3-clause | -4,433,186,619,953,015,000 | 27 | 72 | 0.561297 | false |
imiolek-ireneusz/eduActiv8 | game_boards/game081.py | 1 | 23574 | # -*- coding: utf-8 -*-
import os
import pygame
import random
from math import pi, cos, acos, sin, sqrt
import classes.board
import classes.extras as ex
import classes.game_driver as gd
import classes.level_controller as lc
import classes.simple_vector as sv
class Board(gd.BoardGame):
def __init__(self, mainloop, speaker, config, screen_w, screen_h):
self.level = lc.Level(self, mainloop, 1, 1)
gd.BoardGame.__init__(self, mainloop, speaker, config, screen_w, screen_h, 19, 10)
def create_game_objects(self, level=1):
self.vis_buttons = [0, 0, 0, 0, 1, 0, 1, 0, 0]
self.mainloop.info.hide_buttonsa(self.vis_buttons)
self.hand_id = 0
self.hand_coords = [[], []]
self.board.draw_grid = False
if self.mainloop.scheme is not None:
color1 = self.mainloop.scheme.color1 # bright side of short hand
color3 = self.mainloop.scheme.color3 # inner font color
color5 = self.mainloop.scheme.color5 # dark side of short hand
color7 = self.mainloop.scheme.color7 # inner circle filling
color2 = self.mainloop.scheme.color2 # bright side of long hand
color4 = self.mainloop.scheme.color4 # ex.hsv_to_rgb(170,255,255)#outer font color
color6 = self.mainloop.scheme.color6 # dark side of long hand
color8 = self.mainloop.scheme.color8 # outer circle filling
self.h_col = color5
self.m_col = color6
white = self.mainloop.scheme.u_color
gray = (100, 100, 100)
else:
color1 = ex.hsv_to_rgb(225, 70, 230)
color3 = ex.hsv_to_rgb(225, 255, 255)
color5 = ex.hsv_to_rgb(225, 180, 240)
color7 = ex.hsv_to_rgb(225, 10, 255)
color2 = ex.hsv_to_rgb(170, 70, 230)
color4 = ex.hsv_to_rgb(170, 255, 255)
color6 = ex.hsv_to_rgb(170, 180, 240)
color8 = ex.hsv_to_rgb(170, 10, 255)
self.h_col = ex.hsv_to_rgb(225, 190, 220)
self.m_col = ex.hsv_to_rgb(170, 190, 220)
white = (255, 255, 255)
gray = (100, 100, 100)
transp = (0, 0, 0, 0)
self.colors = [color1, color2]
self.colors2 = [color3, color4]
self.colors3 = [color5, color6]
self.colors4 = [color7, color8]
data = [19, 10, True, True, False, False, False, False, False, True, True, 15]
h_pool = range(1, 13)
m_pool = range(0, 60)
# visual display properties
self.show_outer_ring = data[2]
self.show_minutes = data[3]
self.show_24h = data[4]
self.show_only_quarters_h = data[5]
self.show_only_quarters_m = data[6]
self.show_only_fives_m = data[7]
self.show_roman = data[8]
self.show_highlight = data[9]
self.show_hour_offset = data[10]
self.show_catalan = True
tt = [random.choice(h_pool), random.choice(m_pool)]
self.time = tt
self.tm = self.time[:]
self.digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
self.roman = ["I", "II", "III", "IV", "V", "VI", "VII", "VIII", "IX", "X", "XI", "XII"]
if self.mainloop.lang.lang == "ca":
self.catalan = ["i5", "-5", "1/4", "i5", "-5", "2/4", "i5", "-5", "3/4", "i5", "-5", "12"]
else:
self.catalan = ["5", "10", "1/4", "20", "25", "1/2", "25", "20", "1/4", "10", "5", "0"]
x_count = self.get_x_count(data[1], even=False)
if x_count > data[0]:
data[0] = x_count
self.font_size = 0
self.data = data
self.layout.update_layout(data[0], data[1])
self.board.level_start(data[0], data[1], self.layout.scale)
self.size = self.board.scale * 10
self.board.add_unit(0, 0, 10, 10, classes.board.Ship, "", white, "", self.font_size)
self.clock_canvas = self.board.ships[-1]
self.board.active_ship = self.clock_canvas.unit_id
ans_offset = 10 + (data[0] - 15) // 2
self.board.add_unit(10, 0, data[0] - 10, 2, classes.board.Label, self.lang.d["Set_clock_instr"], white, "", 2)
self.board.units[-1].font_color = gray
self.board.add_unit(ans_offset + 1, 3, 1, 1, classes.board.ImgCenteredShip, "", transp,
img_src='nav_u_mts.png', alpha=True)
self.board.ships[-1].set_tint_color(self.h_col)
self.h_plus = self.board.ships[-1]
self.board.add_unit(ans_offset + 3, 3, 1, 1, classes.board.ImgCenteredShip, "", transp,
img_src='nav_u_mts.png', alpha=True)
self.board.ships[-1].set_tint_color(self.m_col)
self.m_plus = self.board.ships[-1]
self.board.add_unit(ans_offset + 1, 5, 1, 1, classes.board.ImgCenteredShip, "", transp,
img_src='nav_d_mts.png', alpha=True)
self.board.ships[-1].set_tint_color(self.h_col)
self.h_min = self.board.ships[-1]
self.board.add_unit(ans_offset + 3, 5, 1, 1, classes.board.ImgCenteredShip, "", transp,
img_src='nav_d_mts.png', alpha=True)
self.board.ships[-1].set_tint_color(self.m_col)
self.m_min = self.board.ships[-1]
lst = [self.h_plus, self.h_min, self.m_plus, self.m_min]
for each in lst:
each.immobilize()
self.board.add_unit(ans_offset+1, 4, 1, 1, classes.board.Label, "%02d" % self.time[0], white, "", 0)
self.ans_h = self.board.units[-1]
self.board.add_unit(ans_offset + 2, 4, 1, 1, classes.board.Label, ":", white, "", 0)
self.board.add_unit(ans_offset + 3, 4, 1, 1, classes.board.Label, "%02d" % self.time[1], white, "", 0)
self.ans_m = self.board.units[-1]
self.ans_h.immobilize()
self.ans_m.immobilize()
self.ans_h.font_color = color3
self.ans_m.font_color = color4
self.center = [self.size // 2, self.size // 2]
self.clock_canvas.font = self.clock_canvas.board.font_sizes[2]
self.clock_canvas.font2 = self.clock_canvas.board.font_sizes[7]
self.clock_canvas.font3 = self.clock_canvas.board.font_sizes[26]
self.clock_canvas.immobilize()
self.board.add_unit(10, 6, data[0] - 10, 2, classes.board.Letter, "", white, "", 2)
self.text_time = self.board.ships[-1]
self.text_time.immobilize()
self.text_time.font_color = gray
self.update_text_time()
self.canvas = pygame.Surface((self.size, self.size - 1))
if self.mainloop.scheme is not None:
self.canvas.fill(self.mainloop.scheme.u_color)
else:
self.canvas.fill((255, 255, 255))
tint_h = self.colors3[0]
tint_m = self.colors3[1]
shrink = 0.72
self.whs = int(self.size * shrink)
self.hand_h = self.scalled_img(
pygame.image.load(os.path.join('res', 'images', "clock_h.png")).convert_alpha(), self.whs, self.whs)
self.hand_h.fill(tint_h, special_flags=pygame.BLEND_ADD)
self.hand_m = self.scalled_img(
pygame.image.load(os.path.join('res', 'images', "clock_m.png")).convert_alpha(), self.whs, self.whs)
self.hand_m.fill(tint_m, special_flags=pygame.BLEND_ADD)
self.pivot = [self.whs // 2, self.whs // 2]
self.hands = [self.hand_h, self.hand_m]
self.hands_vars()
self.draw_hands()
self.clock_canvas.hidden_value = [2, 3]
self.clock_canvas.font_color = color2
self.clock_canvas.painting = self.canvas.copy()
def update_text_time(self):
if self.mainloop.lang.lang == "ca":
self.lang.numbers[0] = "un"
tt = self.time
if self.mainloop.m.game_variant in [0, 2]:
if self.mainloop.m.game_var2 == 0:
self.text_string = self.lang.time2str(tt[0], tt[1])
if self.lang.lang == "ru":
spk_txt = self.lang.time2spk(tt[0], tt[1])
self.text_time.speaker_val = spk_txt
self.text_time.speaker_val_update = False
else:
self.text_string = self.lang.time2str_short(tt[0], tt[1])
if self.lang.lang == "ru":
spk_txt = self.lang.time2spk_short(tt[0], tt[1])
self.text_time.speaker_val = spk_txt
self.text_time.speaker_val_update = False
if self.lang.lang == "he":
spk_txt = self.lang.time2spk(tt[0], tt[1])
self.text_time.speaker_val = spk_txt
self.text_time.speaker_val_update = False
self.text_time.value = self.text_string
self.text_time.update_me = True
self.ans_h.value = "%02d" % self.time[0]
self.ans_m.value = "%02d" % self.time[1]
self.ans_h.update_me = True
self.ans_m.update_me = True
if self.mainloop.lang.lang == "ca":
self.lang.numbers[0] = "u"
def hands_vars(self):
self.angle_step_12 = 2 * pi / 12
self.angle_step_60 = 2 * pi / 60
self.angle_start = -pi / 2
self.r = self.size // 3 + self.size // 10
self.rs = [self.r * 0.6, self.r * 0.85, self.r * 0.6] # rings
self.rs2 = [self.r * 0.85, self.r * 0.6, self.r * 0.34] # numbers
self.rs3 = [self.r * 0.45, self.r * 0.7, self.r * 0.6] # hands
def draw_hands(self):
if self.show_hour_offset:
a1 = self.angle_start + (2 * pi / 12) * self.time[0] + (self.angle_step_12 * (2 * pi / 60) * self.time[
1]) / (2 * pi)
else:
a1 = self.angle_start + (2 * pi / 12) * self.time[0]
a2 = self.angle_start + (2 * pi / 60) * self.time[1]
self.angles = [a1, a2]
rs = self.rs
rs2 = self.rs2
rs3 = self.rs3
time = self.time
if self.show_outer_ring:
pygame.draw.circle(self.canvas, self.colors4[1], self.center, int(rs[1] + 10), 0)
pygame.draw.circle(self.canvas, self.colors2[1], self.center, int(rs[1] + 10), 1)
pygame.draw.circle(self.canvas, self.colors4[0], self.center, int(rs[2] + 10), 0)
pygame.draw.circle(self.canvas, self.colors2[0], self.center, int(rs[2] + 10), 1)
if self.show_outer_ring:
for i in range(60):
val = str(i + 1)
if self.show_only_quarters_m:
if (i + 1) % 15 != 0:
val = ""
elif self.show_only_fives_m:
if (i + 1) % 5 != 0:
val = ""
if i == 59:
val = "0"
a = self.angle_start + self.angle_step_60 * (i + 1)
if self.show_minutes:
font_size = self.clock_canvas.font3.size(val)
if not self.show_highlight or (i + 1 == time[1] or (time[1] == 0 and i == 59)):
text = self.clock_canvas.font3.render("%s" % (val), 1, self.colors2[1])
else:
text = self.clock_canvas.font3.render("%s" % (val), 1, self.colors[1])
x3 = (rs[1] + 15 * self.layout.scale / 34 + font_size[1] // 2) * cos(a) + self.center[0] - \
font_size[0] / 2
y3 = (rs[1] + 15 * self.layout.scale / 34 + font_size[1] // 2) * sin(a) + self.center[1] - \
font_size[1] / 2
self.canvas.blit(text, (x3, y3))
if self.show_only_quarters_m or self.show_only_fives_m:
if (i + 1) % 15 == 0:
marklen = 20 * self.layout.scale / 34
elif (i + 1) % 5 == 0:
marklen = 15 * self.layout.scale / 34
else:
marklen = 10 * self.layout.scale / 34
else:
marklen = 15 * self.layout.scale / 34
else:
if (i + 1) % 15 == 0:
marklen = 20 * self.layout.scale / 34
elif (i + 1) % 5 == 0:
marklen = 15 * self.layout.scale / 34
else:
marklen = 10 * self.layout.scale / 34
if self.show_outer_ring:
x1 = (rs[1] + 10) * cos(a) + self.center[0]
y1 = (rs[1] + 10) * sin(a) + self.center[1]
x2 = (rs[1] + marklen) * cos(a) + self.center[0]
y2 = (rs[1] + marklen) * sin(a) + self.center[1]
pygame.draw.aaline(self.canvas, self.colors2[1], [x1, y1], [x2, y2])
for i in range(12):
val = str(i + 1)
if self.show_only_quarters_h:
if (i + 1) % 3 != 0:
val = ""
a = self.angle_start + self.angle_step_12 * (i + 1)
x1 = (rs[2] - 3) * cos(a) + self.center[0]
y1 = (rs[2] - 3) * sin(a) + self.center[1]
x2 = (rs[2] + 10) * cos(a) + self.center[0]
y2 = (rs[2] + 10) * sin(a) + self.center[1]
pygame.draw.aaline(self.canvas, self.colors2[0], [x1, y1], [x2, y2])
if self.show_roman:
val = self.hour_to_roman(val)
if not self.show_highlight or i + 1 == time[0]:
text = self.clock_canvas.font.render("%s" % (val), 1, self.colors2[0])
else:
text = self.clock_canvas.font.render("%s" % (val), 1, self.colors[0])
font_size = self.clock_canvas.font.size(val)
if self.show_catalan:
val2 = self.catalan[i]
if (i + 1) * 5 == time[1]:
text2 = self.clock_canvas.font.render("%s" % (val2), 1, self.colors2[1])
else:
text2 = self.clock_canvas.font.render("%s" % (val2), 1, self.colors[1])
font_size2 = self.clock_canvas.font.size(val2)
if self.show_roman:
text_angle = -(360 / 12.0) * (i + 1)
text = pygame.transform.rotate(text, text_angle)
rect = text.get_rect()
x3 = (rs2[2] + 20 + font_size[1] // 2) * cos(a) + self.center[0] - rect.width / 2
y3 = (rs2[2] + 20 + font_size[1] // 2) * sin(a) + self.center[1] - rect.height / 2
else:
x3 = int(
(rs2[2] + 10 * self.layout.scale / 34 + font_size[1] // 2) * cos(a) + self.center[0] - font_size[
0] / 2)
y3 = int(
(rs2[2] + 10 * self.layout.scale / 34 + font_size[1] // 2) * sin(a) + self.center[1] - font_size[
1] / 2)
xc = int(
(rs[2] + 15 * self.layout.scale / 34 + font_size[1] // 2) * cos(a) + self.center[0] - font_size2[
0] / 2)
yc = int(
(rs[2] + 15 * self.layout.scale / 34 + font_size[1] // 2) * sin(a) + self.center[1] - font_size2[
1] / 2)
self.canvas.blit(text, (x3, y3))
self.canvas.blit(text2, (xc, yc))
if self.show_24h:
if i + 13 == 24:
val = "0"
v = 0
else:
val = str(i + 13)
v = i + 13
font_size = self.clock_canvas.font2.size(val)
if not self.show_highlight or v == time[0]:
text = self.clock_canvas.font2.render("%s" % (val), 1, self.colors2[0])
else:
text = self.clock_canvas.font2.render("%s" % (val), 1, self.colors[0])
x3 = (rs2[0] + font_size[1] // 2) * cos(a) + self.center[0] - font_size[0] / 2
y3 = (rs2[0] + font_size[1] // 2) * sin(a) + self.center[1] - font_size[1] / 2
self.canvas.blit(text, (x3, y3))
hand_width = [self.r // 14, self.r // 18]
start_offset = [self.size // 18, self.size // 22]
for i in range(2):
# angle for line
angle = self.angles[i] # angle_start + angle_step*i
x0 = self.center[0] - start_offset[i] * cos(angle)
y0 = self.center[1] - start_offset[i] * sin(angle)
# Calculate the x,y for the end point
x1 = rs3[i] * cos(angle) + self.center[0]
y1 = rs3[i] * sin(angle) + self.center[1]
x2 = hand_width[i] * cos(angle - pi / 2) + self.center[0]
y2 = hand_width[i] * sin(angle - pi / 2) + self.center[1]
x3 = hand_width[i] * cos(angle + pi / 2) + self.center[0]
y3 = hand_width[i] * sin(angle + pi / 2) + self.center[1]
points = [[x0, y0], [x2, y2], [x1, y1], [x3, y3]]
self.hand_coords[i] = points
self.clock_canvas.update_me = True
for i in range(0, 2):
angle = 360 - ((self.angles[i] + pi / 2) * 180 / pi)
img = self.rotatePivoted(self.hands[i], angle, self.pivot)
self.canvas.blit(img[0], ((self.size - self.whs) // 2 + img[1][0], (self.size - self.whs) // 2 + img[1][1]))
self.update_text_time()
self.clock_canvas.update_me = True
self.mainloop.redraw_needed[0] = True
def scalled_img(self, image, new_w, new_h):
'scales image depending on pygame version and bit depth using either smoothscale or scale'
if image.get_bitsize() in [32, 24] and pygame.version.vernum >= (1, 8):
img = pygame.transform.smoothscale(image, (new_w, new_h))
else:
img = pygame.transform.scale(image, (new_w, new_h))
return img
def rotatePivoted(self, img, angle, pivot):
image = pygame.transform.rotate(img, angle)
rect = image.get_rect()
rect.center = pivot
return image, rect
def hour_to_roman(self, val):
val = int(val)
return self.roman[val - 1]
def vector_len(self, v):
return sqrt(v[0] ** 2 + v[1] ** 2)
def scalar_product(self, v1, v2):
return sum([v1[i] * v2[i] for i in range(len(v1))])
def angle(self, v1, v2):
return self.scalar_product(v1, v2) / (self.vector_len(v1) * self.vector_len(v2))
def is_contained(self, pos, coords_id=0):
v0 = sv.Vector2.from_points(self.hand_coords[coords_id][2], self.hand_coords[coords_id][1])
v1 = sv.Vector2.from_points(self.hand_coords[coords_id][0], self.hand_coords[coords_id][1])
v2 = sv.Vector2.from_points(self.hand_coords[coords_id][2], self.hand_coords[coords_id][3])
v3 = sv.Vector2.from_points(self.hand_coords[coords_id][0], self.hand_coords[coords_id][3])
v4 = sv.Vector2.from_points(pos, self.hand_coords[coords_id][1])
v5 = sv.Vector2.from_points(pos, self.hand_coords[coords_id][3])
a1 = 1 - self.angle(v0, v1) # corner 1
a2 = 1 - self.angle(v2, v3) # corner 2
a3 = 1 - self.angle(v0, v4) # point to arm1 of corner1
a4 = 1 - self.angle(v1, v4) # point to arm2 of corner1
a5 = 1 - self.angle(v2, v5) # point to arm1 of corner2
a6 = 1 - self.angle(v3, v5) # point to arm2 of corner2
if (a3 + a4) < a1 and (a5 + a6) < a2:
return True
return False
def current_angle(self, pos, r):
cosa = (pos[0] - self.center[0]) / r
sina = (pos[1] - self.center[1]) / r
if 0 <= cosa <= 1 and -1 <= sina <= 0:
angle = pi / 2 - acos(cosa)
elif 0 <= cosa <= 1 and 0 <= sina <= 1:
angle = acos(cosa) + pi / 2 # ok
elif -1 <= cosa <= 0 and 0 <= sina <= 1:
angle = acos(cosa) + pi / 2 # ok
elif -1 <= cosa <= 0 and -1 <= sina <= 0:
angle = 2 * pi + pi / 2 - acos(cosa)
return angle
def handle(self, event):
gd.BoardGame.handle(self, event)
self.tm = self.time[:]
if event.type == pygame.MOUSEMOTION and self.hand_id > 0:
pos = [event.pos[0] - self.layout.game_left, event.pos[1] - self.layout.top_margin]
r = self.vector_len([pos[0] - self.center[0], pos[1] - self.center[1]])
if r == 0:
r = 0.1
if self.hand_id == 1:
h = (self.current_angle(pos, r)) / self.angle_step_12
if int(h) == 0:
self.tm[0] = 12
else:
self.tm[0] = int(h)
elif self.hand_id == 2:
m = (self.current_angle(pos, r)) / self.angle_step_60
self.tm[1] = int(m)
if 0 <= self.tm[1] < 5 and 55 <= self.time[1] <= 59:
if self.tm[0] == 12:
self.tm[0] = 1
else:
self.tm[0] += 1
elif 0 <= self.time[1] < 5 and 55 <= self.tm[1] <= 59:
if self.tm[0] == 1:
self.tm[0] = 12
else:
self.tm[0] -= 1
elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
active = self.board.active_ship
pos = [event.pos[0] - self.layout.game_left, event.pos[1] - self.layout.top_margin]
if active == 0:
r = self.vector_len([pos[0] - self.center[0], pos[1] - self.center[1]])
if r == 0: r = 0.1
self.hand_id = 0
if self.is_contained(pos, coords_id=0):
self.hand_id = 1
elif self.is_contained(pos, coords_id=1):
self.hand_id = 2
elif self.rs[0] * 1.1 > r:
h = (self.current_angle(pos, r)) / self.angle_step_12
if int(h) == 0:
h = 12
self.tm[0] = int(h)
else:
m = (self.current_angle(pos, r)) / self.angle_step_60
self.tm[1] = int(m)
elif active == 1:
self.change_time_btn(1, 0)
elif active == 2:
self.change_time_btn(0, 1)
elif active == 3:
self.change_time_btn(-1, 0)
elif active == 4:
self.change_time_btn(0, -1)
elif event.type == pygame.MOUSEBUTTONUP and event.button == 1:
self.hand_id = 0
if self.tm != self.time:
self.time = self.tm[:]
self.draw_hands()
self.clock_canvas.painting = self.canvas.copy()
def change_time_btn(self, h, m):
if h != 0:
if h == 1 and self.tm[0] == 12:
self.tm[0] = 1
elif h == -1 and self.tm[0] == 1:
self.tm[0] = 12
elif h > 1 and self.tm[0] > 12 - h:
self.tm[0] = (self.tm[0] + h) - 12
else:
self.tm[0] += h
if m != 0:
if m == 1 and self.tm[1] == 59:
self.tm[1] = 0
self.change_time_btn(1, 0)
elif m == -1 and self.tm[1] == 0:
self.tm[1] = 59
self.change_time_btn(-1, 0)
elif m > 1 and self.tm[1] > 59 - m:
self.change_time_btn(1, 0)
self.tm[1] = (self.tm[1] + m) - 60
else:
self.tm[1] += m
def update(self, game):
game.fill((255, 255, 255))
gd.BoardGame.update(self, game)
def check_result(self):
pass
| gpl-3.0 | -2,524,319,700,950,565,000 | 40.797872 | 120 | 0.488759 | false |
vitaly-krugl/pika | tests/unit/blocking_channel_tests.py | 1 | 4055 | # -*- coding: utf8 -*-
"""
Tests for pika.adapters.blocking_connection.BlockingChannel
"""
from collections import deque
import unittest
import mock
from pika.adapters import blocking_connection
from pika import channel
BLOCKING_CHANNEL = 'pika.adapters.blocking_connection.BlockingChannel'
BLOCKING_CONNECTION = 'pika.adapters.blocking_connection.BlockingConnection'
class ChannelTemplate(channel.Channel):
channel_number = 1
class BlockingChannelTests(unittest.TestCase):
@mock.patch(BLOCKING_CONNECTION)
def _create_connection(self, connection=None):
return connection
def setUp(self):
self.connection = self._create_connection()
channel_impl_mock = mock.Mock(
spec=ChannelTemplate,
is_closing=False,
is_closed=False,
is_open=True)
self.obj = blocking_connection.BlockingChannel(channel_impl_mock,
self.connection)
def tearDown(self):
del self.connection
del self.obj
def test_init_initial_value_confirmation(self):
self.assertFalse(self.obj._delivery_confirmation)
def test_init_initial_value_pending_events(self):
self.assertEqual(self.obj._pending_events, deque())
def test_init_initial_value_buback_return(self):
self.assertIsNone(self.obj._puback_return)
def test_basic_consume_legacy_parameter_queue(self):
# This is for the unlikely scenario where only
# the first parameter is updated
with self.assertRaises(TypeError):
self.obj.basic_consume('queue',
'whoops this should be a callback')
def test_basic_consume_legacy_parameter_callback(self):
with self.assertRaises(TypeError):
self.obj.basic_consume(mock.Mock(), 'queue')
def test_queue_declare_legacy_parameter_callback(self):
with self.assertRaises(TypeError):
self.obj.queue_declare(mock.Mock(), 'queue')
def test_exchange_declare_legacy_parameter_callback(self):
with self.assertRaises(TypeError):
self.obj.exchange_declare(mock.Mock(), 'exchange')
def test_queue_bind_legacy_parameter_callback(self):
with self.assertRaises(TypeError):
self.obj.queue_bind(mock.Mock(),
'queue',
'exchange')
def test_basic_cancel_legacy_parameter(self):
with self.assertRaises(TypeError):
self.obj.basic_cancel(mock.Mock(), 'tag')
def test_basic_get_legacy_parameter(self):
with self.assertRaises(TypeError):
self.obj.basic_get(mock.Mock())
def test_basic_consume(self):
with mock.patch.object(self.obj._impl, '_generate_consumer_tag'):
self.obj._impl._generate_consumer_tag.return_value = 'ctag0'
self.obj._impl.basic_consume.return_value = 'ctag0'
self.obj.basic_consume('queue', mock.Mock())
self.assertEqual(self.obj._consumer_infos['ctag0'].state,
blocking_connection._ConsumerInfo.ACTIVE)
def test_context_manager(self):
with self.obj as chan:
self.assertFalse(chan._impl.close.called)
chan._impl.close.assert_called_once_with(
reply_code=0, reply_text='Normal shutdown')
def test_context_manager_does_not_suppress_exception(self):
class TestException(Exception):
pass
with self.assertRaises(TestException):
with self.obj as chan:
self.assertFalse(chan._impl.close.called)
raise TestException()
chan._impl.close.assert_called_once_with(
reply_code=0, reply_text='Normal shutdown')
def test_context_manager_exit_with_closed_channel(self):
with self.obj as chan:
self.assertFalse(chan._impl.close.called)
chan.close()
chan._impl.close.assert_called_with(
reply_code=0, reply_text='Normal shutdown')
| bsd-3-clause | -2,167,828,103,302,541,300 | 34.26087 | 76 | 0.634772 | false |
Olical/dcpu16py | asm_pyparsing.py | 1 | 16612 | #! /usr/bin/env python
"""
pyparsing based grammar for DCPU-16 0x10c assembler
"""
try:
from itertools import izip_longest
except ImportError:
from itertools import zip_longest as izip_longest
try:
basestring
except NameError:
basestring = str
import logging; log = logging.getLogger("dcpu16_asm")
log.setLevel(logging.DEBUG)
import argparse
import os
import struct
import sys
import pyparsing as P
from collections import defaultdict
# Replace the debug actions so that the results go to the debug log rather
# than stdout, so that the output can be usefully piped.
def _defaultStartDebugAction(instring, loc, expr):
log.debug("Match " + P._ustr(expr) + " at loc " + P._ustr(loc) + "(%d,%d)"
% ( P.lineno(loc,instring), P.col(loc,instring) ))
def _defaultSuccessDebugAction(instring, startloc, endloc, expr, toks):
log.debug("Matched " + P._ustr(expr) + " -> " + str(toks.asList()))
def _defaultExceptionDebugAction(instring, loc, expr, exc):
log.debug("Exception raised:" + P._ustr(exc))
P._defaultStartDebugAction = _defaultStartDebugAction
P._defaultSuccessDebugAction = _defaultSuccessDebugAction
P._defaultExceptionDebugAction = _defaultExceptionDebugAction
# Run with "DEBUG=1 python ./asm_pyparsing.py"
DEBUG = "DEBUG" in os.environ
WORD_MAX = 0xFFFF
# otherwise \n is also treated as ignorable whitespace
P.ParserElement.setDefaultWhitespaceChars(" \t")
identifier = P.Word(P.alphas+"_", P.alphanums+"_")
label = P.Combine(P.Literal(":").suppress() + identifier)
comment = P.Literal(";").suppress() + P.restOfLine
register = (P.Or(P.CaselessKeyword(x) for x in "ABCIJXYZO")
| P.oneOf("PC SP", caseless=True))
stack_op = P.oneOf("PEEK POP PUSH", caseless=True)
hex_literal = P.Combine(P.Literal("0x") + P.Word(P.hexnums))
dec_literal = P.Word(P.nums)
numeric_literal = hex_literal | dec_literal
literal = numeric_literal | identifier
opcode = P.oneOf("SET ADD SUB MUL DIV MOD SHL SHR "
"AND BOR XOR IFE IFN IFG IFB JSR", caseless=True)
basic_operand = P.Group(register("register")
| stack_op("stack_op")
| literal("literal"))
indirect_expr = P.Group(literal("literal")
+ P.Literal("+")
+ register("register"))
hex_literal.setParseAction(lambda s, l, t: int(t[0], 16))
dec_literal.setParseAction(lambda s, l, t: int(t[0]))
register.addParseAction(P.upcaseTokens)
stack_op.addParseAction(P.upcaseTokens)
opcode.addParseAction(P.upcaseTokens)
def sandwich(brackets, expr):
l, r = brackets
return P.Literal(l).suppress() + expr + P.Literal(r).suppress()
indirection_content = indirect_expr("expr") | basic_operand("basic")
indirection = P.Group(sandwich("[]", indirection_content) |
sandwich("()", indirection_content))
operand = basic_operand("basic") | indirection("indirect")
def make_words(data):
return [a << 8 | b for a, b in izip_longest(data[::2], data[1::2],
fillvalue=0)]
def wordize_string(s, l, tokens):
bytes = [ord(c) for c in tokens.string]
# TODO(pwaller): possibly add syntax for packing string data?
packed = False
return make_words(bytes) if packed else bytes
quoted_string = P.quotedString("string").addParseAction(P.removeQuotes).addParseAction(wordize_string)
datum = quoted_string | numeric_literal
def parse_data(string, loc, tokens):
result = []
for token in tokens:
values = datum.parseString(token).asList()
assert all(v < WORD_MAX for v in values), "Datum exceeds word size"
result.extend(values)
return result
# TODO(pwaller): Support for using macro argument values in data statement
datalist = P.commaSeparatedList.copy().setParseAction(parse_data)
data = P.CaselessKeyword("DAT")("opcode") + P.Group(datalist)("data")
line = P.Forward()
macro_definition_args = P.Group(P.delimitedList(P.Optional(identifier("arg"))))("args")
macro_definition = P.Group(
P.CaselessKeyword("#macro").suppress()
+ identifier("name")
+ sandwich("()", macro_definition_args)
+ sandwich("{}", P.Group(P.OneOrMore(line))("lines"))
)("macro_definition")
macro_argument = operand | datum
macro_call_args = P.Group(P.delimitedList(P.Group(macro_argument)("arg")))("args")
macro_call = P.Group(
identifier("name") + sandwich("()", macro_call_args)
)("macro_call")
instruction = (
opcode("opcode")
+ P.Group(operand)("first")
+ P.Optional(P.Literal(",").suppress() + P.Group(operand)("second"))
)
statement = P.Group(
instruction
| data
| macro_definition
| macro_call
)
line << P.Group(
P.Optional(label("label"))
+ P.Optional(statement("statement"), default=None)
+ P.Optional(comment("comment"))
+ P.lineEnd.suppress()
)("line")
full_grammar = (
P.stringStart
+ P.ZeroOrMore(line)
+ (P.stringEnd | P.Literal("#stop").suppress())
)("program")
if DEBUG:
# Turn setdebug on for all parse elements
for name, var in locals().copy().items():
if isinstance(var, P.ParserElement):
var.setName(name).setDebug()
def debug_line(string, location, tokens):
"""
Show the current line number and content being parsed
"""
lineno = string[:location].count("\n")
remaining = string[location:]
line_end = remaining.index("\n") if "\n" in remaining else None
log.debug("====")
log.debug(" Parse line {0}".format(lineno))
log.debug(" '{0}'".format(remaining[:line_end]))
log.debug("====")
line.setDebugActions(debug_line, None, None)
IDENTIFIERS = {"A": 0x0, "B": 0x1, "C": 0x2, "X": 0x3, "Y": 0x4, "Z": 0x5,
"I": 0x6, "J": 0x7,
"POP": 0x18, "PEEK": 0x19, "PUSH": 0x1A,
"SP": 0x1B, "PC": 0x1C,
"O": 0x1D}
OPCODES = {"SET": 0x1, "ADD": 0x2, "SUB": 0x3, "MUL": 0x4, "DIV": 0x5,
"MOD": 0x6, "SHL": 0x7, "SHR": 0x8, "AND": 0x9, "BOR": 0xA,
"XOR": 0xB, "IFE": 0xC, "IFN": 0xD, "IFG": 0xE, "IFB": 0xF}
def process_operand(o, lvalue=False):
"""
Returns (a, x) where a is a value which identifies the nature of the value
and x is either None or a word to be inserted directly into the output stream
(e.g. a literal value >= 0x20)
"""
# TODO(pwaller): Reject invalid lvalues
def invalid_op(reason):
# TODO(pwaller): Need to indicate origin of error
return RuntimeError("Invalid operand, {0}: {1}"
.format(reason, o.asXML()))
def check_indirect_register(register):
if register not in "ABCXYZIJ":
raise invalid_op("only registers A-J can be used for indirection")
if o.basic:
# Literals, stack ops, registers
b = o.basic
if b.register:
return IDENTIFIERS[b.register], None
elif b.stack_op:
return IDENTIFIERS[b.stack_op], None
elif b.literal is not None:
l = b.literal
if not isinstance(l, basestring) and l < 0x20:
return 0x20 | l, None
if l == "": raise invalid_op("this is a bug")
if isinstance(l, int) and not 0 <= l <= WORD_MAX:
raise invalid_op("literal exceeds word size")
return 0x1F, l
elif o.indirect:
i = o.indirect
if i.basic:
# [register], [literal]
ib = i.basic
if ib.register:
check_indirect_register(ib.register)
return 0x8 + IDENTIFIERS[ib.register], None
elif ib.stack_op:
raise invalid_op("don't use PUSH/POP/PEEK with indirection")
elif ib.literal is not None:
return 0x1E, ib.literal
elif i.expr:
# [register+literal]
ie = i.expr
check_indirect_register(ie.register)
return 0x10 | IDENTIFIERS[ie.register], ie.literal
raise invalid_op("this is a bug")
def codegen(source, input_filename="<unknown>"):
try:
parsed = full_grammar.parseString(source)
except P.ParseException as exc:
log.fatal("Parse error:")
log.fatal(" {0}:{1}:{2} HERE {3}"
.format(input_filename, exc.lineno, exc.col,
exc.markInputline()))
return None
log.debug("=====")
log.debug(" Successful parse, XML syntax interpretation:")
log.debug("=====")
log.debug(parsed.asXML())
labels = {}
macros = {}
program = []
# Number of times a given macro has been called so that we can generate
# unique labels
n_macro_calls = defaultdict(int)
def process_macro_definition(statement):
log.debug("Macro definition: {0}".format(statement.asXML()))
macros[statement.name] = statement
def process_macro_call(offset, statement, context=""):
log.debug("--------------")
log.debug("Macro call: {0}".format(statement.asXML()))
log.debug("--------------")
macroname = statement.name
macro = macros.get(macroname, None)
n_macro_calls[macroname] += 1
context = context + macroname + str(n_macro_calls[macroname])
if not macro:
raise RuntimeError("Call to undefined macro: {0}".format(macroname))
assert len(macro.args) == len(statement.args), (
"Wrong number of arguments to macro call {0!r}".format(macroname))
# TODO(pwaller): Check for collisions between argument name and code
# label
args = {}
log.debug("Populated args:")
for name, arg in zip(macro.args, statement.args):
args[name] = arg
log.debug(" - {0}: {1}".format(name, arg))
lines = []
for l in macro.lines:
new_line = l.copy()
s = l.statement
if s:
new_statement = s.copy()
new_line["statement"] = new_statement
#if l.label: new_line["label"] = context + l.label
# Replace literals whose names are macro arguments
# also, substitute labels with (context, label).
# Resolution of a label happens later by first searching for a label
# called `context + label`, and if it doesn't exist `label` is used.
if s and s.first and s.first.basic and s.first.basic.literal:
if s.first.basic.literal in args:
new_statement["first"] = args[s.first.basic.literal]
elif isinstance(s.first.basic.literal, basestring):
new_basic = s.first.basic.copy()
new_basic["literal"] = context, s.first.basic.literal
new_op = new_statement.first.copy()
new_op["basic"] = new_basic
new_statement["first"] = new_op
if s and s.second and s.second.basic and s.second.basic.literal:
if s.second.basic.literal in args:
new_statement["second"] = args[s.second.basic.literal]
elif isinstance(s.second.basic.literal, basestring):
new_basic = s.second.basic.copy()
new_basic["literal"] = context, s.second.basic.literal
new_op = new_statement.second.copy()
new_op["basic"] = new_basic
new_statement["second"] = new_op
# Replace macro call arguments
if s and s.macro_call:
new_macro_call = s.macro_call.copy()
new_statement["macro_call"] = new_macro_call
new_macro_call_args = s.macro_call.args.copy()
new_statement.macro_call["args"] = new_macro_call_args
for i, arg in enumerate(s.macro_call.args):
if arg.basic.literal not in args:
continue
new_macro_call_args[i] = args[arg.basic.literal]
lines.append(new_line)
log.debug("Populated macro: {0}"
.format("\n".join(l.dump() for l in lines)))
# Do code generation
code = []
for l in lines:
a = generate(offset + len(code), l, context)
log.debug("Codegen for statement: {0}".format(l.asXML()))
log.debug(" Code: {0}".format(a))
code.extend(a)
return code
def generate(offset, line, context=""):
log.debug("Interpreting element {0}: {1}".format(i, line))
if line.label:
label = context + line.label
if label in labels:
# TODO(pwaller): Line indications
msg = "Duplicate label definition! {0}".format(label)
log.fatal(msg)
raise RuntimeError(msg)
labels[label] = offset
s = line.statement
if not s: return []
if s.macro_definition:
process_macro_definition(s.macro_definition)
return []
elif s.macro_call:
return process_macro_call(offset, s.macro_call, context)
log.debug("Generating for {0}".format(s.asXML(formatted=False)))
if s.opcode == "DAT":
return s.data
if s.opcode == "JSR":
o = 0x00
a, x = 0x01, None
b, y = process_operand(s.first)
else:
o = OPCODES[s.opcode]
a, x = process_operand(s.first, lvalue=True)
b, y = process_operand(s.second)
code = []
code.append(((b << 10) + (a << 4) + o))
if x is not None: code.append(x)
if y is not None: code.append(y)
return code
for i, line in enumerate(parsed):
program.extend(generate(len(program), line))
log.debug("Labels: {0}".format(labels))
log.debug("program: {0}".format(program))
# Substitute labels
for i, c in enumerate(program):
if isinstance(c, basestring):
if c not in labels:
raise RuntimeError("Undefined label used: {0}".format(c))
program[i] = labels[c]
elif isinstance(c, tuple):
context, label = c
if context + label in labels:
label = context + label
if label not in labels:
raise RuntimeError("Undefined label used: {0}".format(c))
program[i] = labels[label]
# Turn words into bytes
result = bytes()
for word in program:
result += struct.pack(">H", word)
return result
def main():
parser = argparse.ArgumentParser(
description='A simple pyparsing-based DCPU assembly compiler')
parser.add_argument(
'source', metavar='IN', type=str,
help='file path of the file containing the assembly code')
parser.add_argument(
'destination', metavar='OUT', type=str, nargs='?',
help='file path where to store the binary code')
args = parser.parse_args()
if not log.handlers:
from sys import stderr
handler = logging.StreamHandler(stderr)
log.addHandler(handler)
if not DEBUG: handler.setLevel(logging.INFO)
if args.source == "-":
program = codegen(sys.stdin.read(), "<stdin>")
else:
with open(args.source) as fd:
program = codegen(fd.read(), args.source)
if program is None:
log.fatal("No program produced.")
if not DEBUG:
log.fatal("Run with DEBUG=1 ./asm_pyparsing.py "
"for more information.")
return 1
if not args.destination:
if os.isatty(sys.stdout.fileno()):
log.fatal("stdout is a tty, not writing binary. "
"Specify destination file or pipe output somewhere")
else:
sys.stdout.write(program)
else:
with open(args.destination, "wb") as fd:
fd.write(program)
log.info("Program written to {0} ({1} bytes, hash={2})"
.format(args.destination, len(program),
hex(abs(hash(program)))))
return 0
if __name__ == "__main__":
raise SystemExit(main())
| mit | 7,136,708,367,839,636,000 | 33.972632 | 102 | 0.565736 | false |
SequencingDOTcom/App-Chains-Sequencing.com-Real-Time-API | python/UsageExample.py | 1 | 2357 | # encoding: utf-8
from __future__ import unicode_literals
from AppChains import AppChains
class UsageExample(object):
token = '<your token goes here>'
url = 'api.sequencing.com'
def __init__(self):
self.chains = AppChains(self.token, self.url)
#print(self.get_public_beacon_test())
print(self.get_raw_report_test())
self.get_report_test()
self.get_report_batch_test()
def get_public_beacon_test(self):
beacon_result = self.chains.getPublicBeacon(1, 2, 'A')
return beacon_result
def get_raw_report_test(self):
chains_raw_result = self.chains.getRawReport(
'StartApp', 'Chain12', '227680')
return chains_raw_result
def get_report_test(self):
chains_result = self.chains.getReport(
'StartApp', 'Chain87', '227680')
if chains_result.isSucceeded():
print('Request has succeeded')
else:
print('Request has failed')
for r in chains_result.getResults():
file_type = r.getValue().getType()
v = r.getValue()
if file_type == 'TEXT':
print('-> text result type {} = {}'.format(
r.getName(), v.getData()))
elif file_type == 'FILE':
print(' -> file result type {} = {}'.format(
r.getName(), v.getUrl()
))
v.saveTo('/tmp')
def get_report_batch_test(self):
chains_results = self.chains.getReportBatch(
'StartAppBatch', {'Chain85': '227680', 'Chain88': '227680'})
for chains_result in chains_results:
if chains_results[chains_result].isSucceeded():
print('Request has succeeded')
else:
print('Request has failed')
for r in chains_results[chains_result].getResults():
file_type = r.getValue().getType()
v = r.getValue()
if file_type == 'TEXT':
print('-> text result type {} = {}'.format(
r.getName(), v.getData()))
elif file_type == 'FILE':
print(' -> file result type {} = {}'.format(
r.getName(), v.getUrl()
))
v.saveTo('/tmp')
UsageExample()
| mit | -6,927,457,079,036,015,000 | 34.179104 | 72 | 0.515062 | false |
proycon/foliatools | foliatools/xslt.py | 1 | 4500 | # -*- coding: utf8 -*-
import lxml.etree
import sys
import glob
import getopt
import os.path
import io
def transform(xsltfilename, sourcefilename, targetfilename = None, encoding = 'utf-8', **kwargs):
xsldir = os.path.dirname(__file__)
if xsltfilename[0] != '/': xsltfilename = os.path.join(xsldir, xsltfilename)
if not os.path.exists(xsltfilename):
raise Exception("XSL Stylesheet not found: " + xsltfilename)
elif not os.path.exists(sourcefilename):
raise Exception("File not found: " + sourcefilename)
xslt = lxml.etree.parse(xsltfilename)
transformer = lxml.etree.XSLT(xslt)
parsedsource = lxml.etree.parse(sourcefilename)
kwargs = { k: lxml.etree.XSLT.strparam(v) for k,v in kwargs.items() }
transformed = transformer(parsedsource, **kwargs)
if targetfilename:
print("Wrote " + targetfilename,file=sys.stderr)
f = io.open(targetfilename, 'w',encoding='utf-8')
f.write(str(lxml.etree.tostring(transformed, pretty_print=False, encoding=encoding),encoding))
f.close()
else:
print(str(lxml.etree.tostring(transformed, pretty_print=False, encoding=encoding),encoding))
def usage():
print(settings.usage,file=sys.stderr)
print("",file=sys.stderr)
print("Parameters for output:" ,file=sys.stderr)
print(" -e [encoding] Output encoding (default: utf-8)" ,file=sys.stderr)
print("Parameters for processing directories:",file=sys.stderr)
print(" -r Process recursively",file=sys.stderr)
print(" -E [extension] Set extension (default: xml)",file=sys.stderr)
print(" -q Ignore errors",file=sys.stderr)
print(" -s [url] Associate a CSS Stylesheet (URL, may be relative)",file=sys.stderr)
print(" -T Retain tokenisation",file=sys.stderr)
print(" -t [textclass] Text class to output",file=sys.stderr)
class settings:
autooutput = False
extension = 'xml'
recurse = False
ignoreerrors = False
encoding = 'utf-8'
xsltfilename = "undefined.xsl"
outputextension = 'UNDEFINED'
usage = "UNDEFINED"
css = ""
textclass = "current"
def processdir(d):
print("Searching in " + d, file=sys.stderr)
for f in glob.glob(os.path.join(d,'*')):
if f[-len(settings.extension) - 1:] == '.' + settings.extension and f[-len(settings.outputextension) - 1:] != '.' + settings.outputextension:
process(f)
elif settings.recurse and os.path.isdir(f):
processdir(f)
def process(inputfilename):
try:
kwargs = {}
if settings.css:
kwargs['css'] = settings.css
if settings.textclass:
kwargs['textclass'] = settings.textclass
transform(settings.xsltfilename, inputfilename, None, settings.encoding, **kwargs)
except Exception as e:
if settings.ignoreerrors:
print("ERROR: An exception was raised whilst processing " + inputfilename + ":", e, file=sys.stderr)
else:
raise e
def main(xsltfilename, outputextension, usagetext):
try:
opts, args = getopt.getopt(sys.argv[1:], "o:E:hrqs:Tt:", ["help"])
except getopt.GetoptError as err:
print(str(err), file=sys.stderr)
usage()
sys.exit(2)
settings.xsltfilename = xsltfilename
settings.outputextension = outputextension
settings.usage = usagetext
for o, a in opts:
if o == '-h' or o == '--help':
usage()
sys.exit(0)
elif o == '-T':
settings.retaintokenisation = True
elif o == '-e':
settings.encoding = a
elif o == '-E':
settings.extension = a
elif o == '-r':
settings.recurse = True
elif o == '-q':
settings.ignoreerrors = True
elif o == '-s':
settings.css = a
elif o == '-t':
settings.textclass = a
else:
raise Exception("No such option: " + o)
if args:
for x in args:
if os.path.isdir(x):
processdir(x)
elif os.path.isfile(x):
process(x)
else:
print("ERROR: File or directory not found: " + x, file=sys.stderr)
sys.exit(3)
else:
print("ERROR: Nothing to do, specify one or more files or directories",file=sys.stderr)
| gpl-3.0 | -5,817,704,970,516,492,000 | 35 | 149 | 0.587333 | false |
hdknr/paloma | docs/source/conf.py | 1 | 9660 | # -*- coding: utf-8 -*-
#
# paloma documentation build configuration file, created by
# sphinx-quickstart on Sun Mar 4 15:12:23 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'paloma'
copyright = u'2012, harajuku-tech'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1'
# The full version, including alpha/beta/rc tags.
release = '1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'palomadoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'paloma.tex', u'paloma Documentation',
u'harajuku-tech', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'paloma', u'paloma Documentation',
[u'harajuku-tech'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'paloma', u'paloma Documentation',
u'harajuku-tech', 'paloma', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'paloma'
epub_author = u'harajuku-tech'
epub_publisher = u'harajuku-tech'
epub_copyright = u'2012, harajuku-tech'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
#--------
# Django
PRJ_PATH= os.path.dirname( os.path.dirname( os.path.abspath(__file__)) )
print PRJ_PATH
sys.path.insert(0, os.path.join(os.path.dirname(PRJ_PATH ),'example'))
sys.path.insert(0, os.path.join(os.path.dirname(PRJ_PATH ),'example/app'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'app.settings'
# Graphviz to draw ER
extensions.append('sphinx.ext.graphviz')#
| bsd-2-clause | 2,299,315,624,443,555,000 | 31.093023 | 215 | 0.704037 | false |
MDAnalysis/GridDataFormats | gridData/gOpenMol.py | 1 | 10453 | # gridDataFormats --- python modules to read and write gridded data
# Copyright (c) 2009-2014 Oliver Beckstein <[email protected]>
# Released under the GNU Lesser General Public License, version 3 or later.
#
# Part of the documentation and format specification: Copyright CSC, 2005
"""
:mod:`gOpenMol` --- the gOpenMol plt format
===========================================
.. _gOpenMol: http://www.csc.fi/english/pages/g0penMol
The module provides a simple implementation of a reader for gOpenMol_
*plt* files. Plt files are binary files. The :class:`Plt` reader tries
to guess the endianess of the file, but this can fail (with a
:exc:`TypeError`); you are on your own in this case.
Only the reader is implemented. If you want to write gridded data use a format
that is more standard, such as OpenDX (see :mod:`OpenDX`).
Background
----------
gOpenMol http://www.csc.fi/english/pages/g0penMol plt format.
Used to be documented at http://www.csc.fi/gopenmol/developers/plt_format.phtml but currently this is only accessible through the internet archive at
http://web.archive.org/web/20061011125817/http://www.csc.fi/gopenmol/developers/plt_format.phtml
Grid data plt file format
-------------------------
Copyright CSC, 2005. Last modified: September 23, 2003 09:18:50
Plot file (plt) format The plot files are regular 3D grid files for plotting of
molecular orbitals, electron densities or other molecular properties. The plot
files are produced by several programs. It is also possible to format/unformat
plot files using the pltfile program in the utility directory. It is also
possible to produce plot files with external (own) programs. Produce first a
formatted text file and use then the pltfile program to unformat the file for
gOpenMol. The format for the plot files are very simple and a description of
the format can be found elsewhere in this manual. gOpenMol can read binary plot
files from different hardware platforms independent of the system type (little
or big endian machines).
Format of the binary ``*.plt`` file
...................................
The ``*.plt`` file binary and formatted file formats are very simple but please
observe that unformatted files written with a FORTRAN program are not pure
binary files because there are file records between the values while pure
binary files do not have any records between the values. gOpenMol should be
able to figure out if the file is pure binary or FORTRAN unformatted but it is
not very well tested.
Binary ``*.plt`` (grid) file format
...................................
Record number and meaning::
#1: Integer, rank value must always be = 3
#2: Integer, possible values are 1 ... 50. This value is not used but
it can be used to define the type of surface!
Values used (you can use your own value between 1... 50):
1: VSS surface
2: Orbital/density surface
3: Probe surface
200: Gaussian 94/98
201: Jaguar
202: Gamess
203: AutoDock
204: Delphi/Insight
205: Grid
Value 100 is reserved for grid data coming from OpenMol!
#3: Integer, number of points in z direction
#4: Integer, number of points in y direction
#5: Integer, number of points in x direction
#6: Float, zmin value
#7: Float, zmax value
#8: Float, ymin value
#9: Float, ymax value
#10: Float, xmin value
#11: Float, xmax value
#12 ... Float, grid data values running (x is inner loop, then y and last z):
1. Loop in the z direction
2. Loop in the y direction
3. Loop in the x direction
Example::
nx=2 ny=1 nz=3
0,0,0 1,0,0 y=0, z=0
0,0,1 1,0,0 y=0, z=1
0,0,2 1,0,2 y=0, z=2
The formatted (the first few lines) file can look like::
3 2
65 65 65
-3.300000e+001 3.200000e+001 -3.300000e+001 3.200000e+001 -3.300000e+001 3.200000e+001
-1.625609e+001 -1.644741e+001 -1.663923e+001 -1.683115e+001 -1.702274e+001 -1.721340e+001
-1.740280e+001 -1.759018e+001 -1.777478e+001 -1.795639e+001 -1.813387e+001 -1.830635e+001
...
Formatted ``*.plt`` (grid) file format
......................................
Line numbers and variables on the line::
line #1: Integer, Integer. Rank and type of surface (rank is always = 3)
line #2: Integer, Integer, Integer. Zdim, Ydim, Xdim (number of points in the z,y,x directions)
line #3: Float, Float, Float, Float, Float, Float. Zmin, Zmax, Ymin, Ymax, Xmin,Xmax (min and max values)
line #4: ... Float. Grid data values running (x is inner loop, then y and last z) with one or several values per line:
1. Loop in the z direction
2. Loop in the y direction
3. Loop in the x direction
Classes
-------
"""
from __future__ import absolute_import, division, with_statement
import warnings
import struct
import numpy
from six.moves import range
class Record(object):
def __init__(self, key, bintype, values=None):
self.key = key
self.bintype = bintype
self.values = values # dict(value='comment', ...)
def is_legal(self, value):
if self.values is None:
return True
return value in self.values
def is_legal_dict(self, d):
return self.is_legal(d[self.key])
def __repr__(self):
return "Record(%(key)r,%(bintype)r,...)" % vars(self)
class Plt(object):
"""A class to represent a gOpenMol_ plt file.
Only reading is implemented; either supply a filename to the constructor
>>> G = Plt(filename)
or load the file with the read method
>>> G = Plt()
>>> G.read(filename)
The data is held in :attr:`GOpenMol.array` and all header information is in
the dict :attr:`GOpenMol.header`.
:attr:`Plt.shape`
D-tuplet describing size in each dimension
:attr:`Plt.origin`
coordinates of the centre of the grid cell with index 0,0,...,0
:attr:`Plt.delta`
DxD array describing the deltas
"""
_header_struct = (Record('rank', 'I', {3:'dimension'}),
Record('surface','I', {1: 'VSS surface',
2: 'Orbital/density surface',
3: 'Probe surface',
42: 'gridcount',
100: 'OpenMol',
200: 'Gaussian 94/98',
201: 'Jaguar',
202: 'Gamess',
203: 'AutoDock',
204: 'Delphi/Insight',
205: 'Grid',
}), # update in init with all user defined values
Record('nz', 'I'),
Record('ny', 'I'),
Record('nx', 'I'),
Record('zmin', 'f'),
Record('zmax', 'f'),
Record('ymin', 'f'),
Record('ymax', 'f'),
Record('xmin', 'f'),
Record('xmax', 'f'))
_data_bintype = 'f' # write(&value,sizeof(float),1L,output);
def __init__(self, filename=None):
self.filename = str(filename)
# fix header_struct because I cannot do {...}.update()
rec_surf = [r for r in self._header_struct if r.key == 'surface'][0]
rec_surf.values.update(dict((k,'user-defined') for k in range(4,51) if k != 42))
# assemble format
self._headerfmt = "".join([r.bintype for r in self._header_struct])
if not filename is None:
self.read(filename)
def read(self, filename):
"""Populate the instance from the plt file *filename*."""
from struct import calcsize, unpack
if not filename is None:
self.filename = str(filename)
with open(self.filename, 'rb') as plt:
h = self.header = self._read_header(plt)
nentries = h['nx'] * h['ny'] * h['nz']
# quick and dirty... slurp it all in one go
datafmt = h['bsaflag']+str(nentries)+self._data_bintype
a = numpy.array(unpack(datafmt, plt.read(calcsize(datafmt))))
self.header['filename'] = self.filename
self.array = a.reshape(h['nz'], h['ny'], h['nx']).transpose() # unpack plt in reverse!!
self.delta = self._delta()
self.origin = numpy.array([h['xmin'], h['ymin'], h['zmin']]) + 0.5*numpy.diagonal(self.delta)
self.rank = h['rank']
@property
def shape(self):
return self.array.shape
@property
def edges(self):
"""Edges of the grid cells, origin at centre of 0,0,..,0 grid cell.
Only works for regular, orthonormal grids.
"""
return [self.delta[d,d] * numpy.arange(self.shape[d]+1) + self.origin[d]\
- 0.5*self.delta[d,d] for d in range(self.rank)]
def _delta(self):
h = self.header
qmin = numpy.array([h['xmin'],h['ymin'],h['zmin']])
qmax = numpy.array([h['xmax'],h['ymax'],h['zmax']])
delta = numpy.abs(qmax - qmin) / self.shape
return numpy.diag(delta)
def _read_header(self, pltfile):
"""Read header bytes, try all possibilities for byte order/size/alignment."""
nheader = struct.calcsize(self._headerfmt)
names = [r.key for r in self._header_struct]
binheader = pltfile.read(nheader)
def decode_header(bsaflag='@'):
h = dict(zip(names, struct.unpack(bsaflag+self._headerfmt, binheader)))
h['bsaflag'] = bsaflag
return h
for flag in '@=<>':
# try all endinaness and alignment options until we find something that looks sensible
header = decode_header(flag)
if header['rank'] == 3:
break # only legal value according to spec
header = None
if header is None:
raise TypeError("Cannot decode header --- corrupted or wrong format?")
for rec in self._header_struct:
if not rec.is_legal_dict(header):
warnings.warn("Key %s: Illegal value %r" % (rec.key, header[rec.key]))
return header
def histogramdd(self):
"""Return array data as (edges,grid), i.e. a numpy nD histogram."""
return (self.array, self.edges)
| lgpl-3.0 | -8,192,271,792,387,138,000 | 37.858736 | 149 | 0.591313 | false |
googleapis/python-compute | google/cloud/compute_v1/services/interconnect_locations/transports/__init__.py | 1 | 1068 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import InterconnectLocationsTransport
from .rest import InterconnectLocationsRestTransport
# Compile a registry of transports.
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[InterconnectLocationsTransport]]
_transport_registry["rest"] = InterconnectLocationsRestTransport
__all__ = (
"InterconnectLocationsTransport",
"InterconnectLocationsRestTransport",
)
| apache-2.0 | 2,912,776,670,656,290,000 | 32.375 | 74 | 0.766854 | false |
sixohsix/twitter | setup.py | 1 | 2203 | from setuptools import setup, find_packages
with open("README") as f:
long_description = f.read()
version = '1.18.0'
setup(name='twitter',
version=version,
description="An API and command-line toolset for Twitter (twitter.com)",
long_description=long_description,
long_description_content_type="text/markdown",
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*",
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: End Users/Desktop",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Communications :: Chat :: Internet Relay Chat",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
],
keywords='twitter, IRC, command-line tools, web 2.0',
author='Mike Verdone',
author_email='[email protected]',
url='https://mike.verdone.ca/twitter/',
license='MIT License',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=True,
entry_points="""
# -*- Entry points: -*-
[console_scripts]
twitter=twitter.cmdline:main
twitterbot=twitter.ircbot:main
twitter-log=twitter.logger:main
twitter-archiver=twitter.archiver:main
twitter-follow=twitter.follow:main
twitter-stream-example=twitter.stream_example:main
""",
)
| mit | 6,502,890,697,609,322,000 | 40.566038 | 84 | 0.600545 | false |
RPGOne/Skynet | pytorch-master/torch/autograd/function.py | 1 | 9610 | import torch
import torch._C as _C
import torch.utils.hooks as hooks
from collections import OrderedDict
class Function(_C._FunctionBase):
"""Records operation history and defines formulas for differentiating ops.
Every operation performed on :class:`Variable` s creates a new function
object, that performs the computation, and records that it happened.
The history is retained in the form of a DAG of functions, with edges
denoting data dependencies (``input <- output``). Then, when backward is
called, the graph is processed in the topological ordering, by calling
:func:`backward` methods of each :class:`Function` object, and passing
returned gradients on to next :class:`Function` s.
Normally, the only way users interact with functions is by creating
subclasses and defining new operations. This is a recommended way of
extending torch.autograd.
Since Function logic is a hotspot in most scripts, almost all of it
was moved to our C backend, to ensure that the framework overhead is
minimal.
Each function is meant to be used only once (in the forward pass).
Attributes:
saved_tensors: Tuple of Tensors that were saved in the call to
:func:`forward`.
needs_input_grad: Tuple of booleans of length :attr:`num_inputs`,
indicating whether a given input requires gradient. This can be
used to optimize buffers saved for backward, and ignoring gradient
computation in :func:`~Function.backward`.
num_inputs: Number of inputs given to :func:`forward`.
num_outputs: Number of tensors returned by :func:`forward`.
requires_grad: Boolean indicating whether the :func:`backward` will
ever need to be called.
previous_functions: Tuple of (int, Function) pairs of length
:attr:`num_inputs`. Each entry contains a reference to a
:class:`Function` that created corresponding input, and an index
of the previous function output that's been used.
"""
__call__ = _C._FunctionBase._do_forward
def save_for_backward(self, *tensors):
"""Saves given tensors for a future call to :func:`~Function.backward`.
**This should be called at most once, and only from inside the**
:func:`forward` **method.**
Later, saved tensors can be accessed through the :attr:`saved_tensors`
attribute. Before returning them to the user, a check is made, to
ensure they weren't used in any in-place operation that modified
their content.
Arguments can also be ``None``.
"""
self.to_save = tensors
def mark_dirty(self, *args):
"""Marks given tensors as modified in an in-place operation.
**This should be called at most once, only from inside the**
:func:`forward` **method, and all arguments should be inputs.**
Every tensor that's been modified in-place in a call to :func:`forward`
should be given to this function, to ensure correcness of our checks.
It doesn't matter wheter the function is called before or after
modification.
"""
self.dirty_tensors = args
def mark_shared_storage(self, *pairs):
"""Marks that given pairs of distinct tensors are sharing storage.
**This should be called at most once, only from inside the**
:func:`forward` **method, and all arguments should be pairs of
(input, output).**
If some of the outputs are going to be tensors sharing storage with
some of the inputs, all pairs of (input_arg, output_arg) should be
given to this function, to ensure correctness checking of in-place
modification. The only exception is when an output is exactly the same
tensor as input (e.g. in-place ops). In such case it's easy to conclude
that they're sharing data, so we don't require specifying such
dependencies.
This function is not needed in most functions. It's primarily used in
indexing and transpose ops.
"""
self.shared_pairs = pairs
def mark_non_differentiable(self, *args):
"""Marks outputs as non-differentiable.
**This should be called at most once, only from inside the**
:func:`forward` **method, and all arguments should be outputs.**
This will mark outputs as not requiring gradients, increasing the
efficiency of backward computation. You still need to accept a gradient
for each output in :meth:`~Function.backward`, but it's always going to
be ``None``.
This is used e.g. for indices returned from a max :class:`Function`.
"""
self.non_differentiable = args
@staticmethod
def _register_hook(backward_hooks, hook):
if backward_hooks is None:
backward_hooks = OrderedDict()
handle = hooks.RemovableHandle(backward_hooks)
backward_hooks[handle.id] = hook
return backward_hooks, handle
def forward(self, *input):
"""Performs the operation.
This function is to be overriden by all subclasses.
It can take and return an arbitrary number of tensors.
"""
raise NotImplementedError
def backward(self, *grad_output):
"""Defines a formula for differentiating the operation.
This function is to be overriden by all subclasses.
All arguments are tensors. It has to accept exactly as many arguments,
as many outputs did :func:`forward` return, and it should return as
many tensors, as there were inputs to :func:`forward`. Each argument
is the gradient w.r.t the given output, and each returned value should
be the gradient w.r.t. the corresponding input.
"""
raise NotImplementedError
class InplaceFunction(Function):
def __init__(self, inplace=False):
super(InplaceFunction, self).__init__()
self.inplace = inplace
def _nested_map(condition, fn):
def _map(obj):
if condition(obj):
return fn(obj)
elif obj is None:
return None
elif isinstance(obj, (list, tuple)):
return type(obj)(_map(x) for x in obj)
else:
raise ValueError("NestedIOFunction doesn't know how to process "
"an input object of type " + torch.typename(obj))
return _map
def _iter_filter(condition):
def _iter(obj):
if condition(obj):
yield obj
elif obj is None:
return
elif isinstance(obj, (list, tuple)):
for o in obj:
for var in _iter(o):
yield var
else:
raise ValueError("NestedIOFunction doesn't know how to process "
"an input object of type " + torch.typename(obj))
return _iter
def _unflatten(input, proto):
# unflatten a list or tuple input into a nested list/tuple structure
# specified by proto
def unflatten_helper(input, proto):
res = []
if not isinstance(proto, (list, tuple)):
return input[0], input[1:]
for e in proto:
res_e, input = unflatten_helper(input, e)
res.append(res_e)
return type(proto)(res), input
return unflatten_helper(input, proto)[0]
_iter_variables = _iter_filter(lambda o: isinstance(o, torch.autograd.Variable))
_iter_tensors = _iter_filter(torch.is_tensor)
_iter_None_tensors = _iter_filter(lambda o: o is None or torch.is_tensor(o))
_map_variable_tensor = _nested_map(lambda o: isinstance(o, torch.autograd.Variable), lambda o: o.data)
class NestedIOFunction(Function):
def _do_forward(self, *input):
self._nested_input = input
flat_input = tuple(_iter_variables(input))
flat_output = super(NestedIOFunction, self)._do_forward(*flat_input)
nested_output = self._nested_output
nested_variables = _unflatten(flat_output, self._nested_output)
return nested_variables
def _do_backward(self, gradients, retain_variables):
self.retain_variables = retain_variables
result = super(NestedIOFunction, self)._do_backward(gradients, retain_variables)
if not retain_variables:
del self._nested_output
del self._to_save_nested
return result
def backward(self, *gradients):
nested_gradients = _unflatten(gradients, self._nested_output)
result = self.backward_extended(*nested_gradients)
return tuple(_iter_None_tensors(result))
__call__ = _do_forward
def forward(self, *args):
nested_tensors = _map_variable_tensor(self._nested_input)
result = self.forward_extended(*nested_tensors)
del self._nested_input
self._nested_output = result
return tuple(_iter_tensors(result))
def save_for_backward(self, *args):
self.to_save = tuple(_iter_tensors(args))
self._to_save_nested = args
@property
def saved_tensors(self):
flat_tensors = super(NestedIOFunction, self).saved_tensors
return _unflatten(flat_tensors, self._to_save_nested)
def mark_dirty(self, *args, **kwargs):
self.dirty_tensors = tuple(_iter_tensors((args, kwargs)))
def mark_non_differentiable(self, *args, **kwargs):
self.non_differentiable = tuple(_iter_tensors((args, kwargs)))
def forward_extended(self, *input):
raise NotImplementedError
def backward_extended(self, *grad_output):
raise NotImplementedError
| bsd-3-clause | 1,698,297,703,847,405,600 | 37.75 | 102 | 0.650052 | false |
ipazc/mldatahub | mldatahub/tests/odm/test_dataset_odm.py | 1 | 10502 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# MLDataHub
# Copyright (C) 2017 Iván de Paz Centeno <[email protected]>.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 3
# of the License or (at your option) any later version of
# the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
__author__ = 'Iván de Paz Centeno'
import unittest
from mldatahub.config.config import global_config
global_config.set_session_uri("mongodb://localhost:27017/unittests")
from mldatahub.odm.dataset_dao import DatasetDAO, DatasetCommentDAO, DatasetElementDAO, DatasetElementCommentDAO
class TestDatasetODM(unittest.TestCase):
def setUp(self):
self.session = global_config.get_session()
DatasetDAO.query.remove()
DatasetCommentDAO.query.remove()
DatasetElementDAO.query.remove()
DatasetElementCommentDAO.query.remove()
def test_create_remove_dataset(self):
"""
Dataset creation and removal works successfully.
:return:
"""
dataset = DatasetDAO("ip/asd", "example1", "desc", "none")
self.assertTrue(dataset.title, "example")
self.assertTrue(dataset.description, "desc")
self.assertTrue(dataset.reference, "none")
self.session.flush()
dataset2 = DatasetDAO.query.get(title="example1")
self.assertEqual(dataset.title, dataset2.title)
self.assertEqual(dataset.description, dataset2.description)
self.assertEqual(dataset.reference, dataset2.reference)
dataset.delete()
self.session.flush()
dataset3 = DatasetDAO.query.get(title='example1')
self.assertIsNone(dataset3)
def test_create_dataset_add_remove_comment(self):
"""
Dataset creation and removal of comments works successfully.
:return:
"""
dataset = DatasetDAO("ip/asd2", "example2", "desc", "none")
c1 = dataset.add_comment("ivan", "1", "11")
c2 = dataset.add_comment("ivan", "1", "21")
c3 = dataset.add_comment("ivan", "1", "11")
self.session.flush()
dataset2 = DatasetDAO.query.get(title="example2")
self.session.refresh(dataset2)
self.assertEqual(len(dataset2.comments), 3)
self.assertEqual(dataset2.comments[0].author_name, "ivan")
self.assertEqual(dataset2.comments[1].author_name, "ivan")
self.assertEqual(dataset2.comments[2].author_name, "ivan")
dataset.comments[0].delete()
self.session.flush()
dataset3 = DatasetDAO.query.get(title="example2")
self.assertEqual(len(dataset3.comments), 2)
comment = DatasetCommentDAO.query.get(author_name="ivan")
self.assertEqual(comment.dataset_id, dataset._id)
dataset.delete()
self.session.flush()
comment = DatasetCommentDAO.query.get(author_name="ivan")
self.assertIsNone(comment)
def test_create_dataset_add_remove_element(self):
"""
Dataset creation and removal of elements works successfully.
:return:
"""
dataset = DatasetDAO("ip/asd3", "example3", "for content", "unknown")
dataset.add_element("ele1", "description of the element.", None, tags=["tag1", "tag2"])
dataset.add_element("ele2", "description of the element.", None, tags=["tag1"])
self.session.flush()
self.assertEqual(len(dataset.elements), 2)
element = DatasetElementDAO.query.get(tags="tag2")
self.assertEqual(element.title, "ele1")
element.delete()
self.session.flush()
element = DatasetElementDAO.query.get(tags="tag2")
self.assertIsNone(element)
element = DatasetElementDAO.query.get(tags="tag1")
self.assertEqual(element.title, "ele2")
dataset.delete()
element = DatasetElementDAO.query.get(tags="tag1")
self.assertIsNone(element)
def test_create_dataset_element_link_datasets_remove(self):
"""
Dataset element clones itself correctly when it is linked to multiple datasets and removed.
:return:
"""
dataset = DatasetDAO("ip/asd3", "example3", "for content", "unknown")
dataset2 = DatasetDAO("ip/asd4", "example4", "for content", "unknown")
element = dataset.add_element("ele1", "description of the element.", None, tags=["tag1", "tag2"])
self.session.flush()
self.assertEqual(len(dataset.elements), 1)
self.assertEqual(len(dataset2.elements), 0)
element.link_dataset(dataset2)
self.session.flush()
self.assertEqual(len(dataset.elements), 1)
self.assertEqual(len(dataset2.elements), 1)
self.assertEqual(dataset2.elements[0], element)
element.delete()
self.session.flush()
self.assertEqual(len(dataset.elements), 0)
self.assertEqual(len(dataset2.elements), 1)
self.assertNotEqual(element, dataset2.elements[0])
self.assertEqual(element.title, dataset2.elements[0].title)
element = dataset2.elements[0]
element.link_dataset(dataset)
self.session.flush()
self.assertEqual(len(dataset.elements), 1)
self.assertEqual(len(dataset2.elements), 1)
element.delete(owner_id=dataset._id)
self.session.flush()
self.assertEqual(len(dataset.elements), 0)
self.assertEqual(len(dataset2.elements), 1)
self.assertEqual(element.title, dataset2.elements[0].title)
def test_dataset_element_clone(self):
"""
Dataset element clones successfully.
:return:
"""
dataset = DatasetDAO("ip/asd3", "example3", "for content", "unknown")
dataset2 = DatasetDAO("ip/asd4", "example4", "for content", "unknown")
element = dataset.add_element("ele1", "description of the element.", None, tags=["tag1", "tag2"])
self.session.flush()
self.assertEqual(len(dataset.elements), 1)
self.assertEqual(len(dataset2.elements), 0)
cloned_element = element.clone(dataset._id)
self.session.flush()
dataset.update()
self.assertEqual(len(dataset.elements), 2)
self.assertNotEqual(element._id, cloned_element._id)
self.assertEqual(element._id, cloned_element._previous_id)
cloned_element2 = element.clone(dataset2._id)
self.session.flush()
dataset2.update()
self.assertEqual(len(dataset2.elements), 1)
self.assertEqual(element._id, cloned_element2._previous_id)
def test_create_dataset_element_add_remove_comment(self):
"""
Dataset creation and removal of comments from elements works successfully.
:return:
"""
dataset = DatasetDAO("ip/asd4", "example4", "desc", "none")
element = dataset.add_element("ele1", "description of the element.", None, tags=["tag1", "tag2"])
element2 = dataset.add_element("ele2", "description of the element2.", None, tags=["tag1", "tag2"])
element.add_comment("ivan", "1", "11")
element.add_comment("ivan", "1", "21")
element.add_comment("ivan", "1", "11")
self.session.flush()
self.assertEqual(len(element.comments), 3)
self.assertEqual(len(element2.comments), 0)
self.assertEqual(element.comments[0].author_name, "ivan")
self.assertEqual(element.comments[1].author_name, "ivan")
self.assertEqual(element.comments[2].author_name, "ivan")
comment = element.comments[0]
comment.delete()
self.session.flush()
self.session.clear()
element = DatasetElementDAO.query.get(title="ele1")
self.session.refresh(element)
self.assertEqual(len(element.comments), 2)
comment = DatasetElementCommentDAO.query.get(author_name="ivan")
self.assertEqual(comment.element_id, element._id)
element.delete()
self.session.flush()
comment = DatasetElementCommentDAO.query.get(author_name="ivan")
self.assertIsNone(comment)
dataset.delete()
def test_url_prefix_duplication_error(self):
"""
Tests that a duplicated url prefix cannot be retrieved.
:return:
"""
dataset = DatasetDAO("ip/asd5", "example5", "desc", "none")
self.session.flush()
with self.assertRaises(Exception) as ex:
dataset2 = DatasetDAO("ip/asd5", "example5", "desc", "none")
def test_url_prefix_can_be_reutilized_on_delete(self):
"""
Tests that a url prefix can be reutilized.
:return:
"""
dataset = DatasetDAO("ip/asd5", "example6", "desc", "none")
dataset.delete()
dataset2 = DatasetDAO("ip/asd5", "example6", "desc", "none")
self.assertEqual(dataset2.url_prefix, "ip/asd5")
def test_dataset_tags_allow_dicts(self):
"""
Dataset elements tags allow dictionaries as elements.
:return:
"""
dataset = DatasetDAO("ip/asd4", "example4", "desc", "none", tags=["tag1", {"tag2":"tag example"}])
self.session.flush()
self.assertTrue(dataset.tags[0], "tag1")
self.assertTrue(dataset.tags[1], {"tag2":"tag example"})
def test_dataset_element_tags_allow_dicts(self):
"""
Dataset elements tags allow dictionaries as elements.
:return:
"""
dataset = DatasetDAO("ip/asd4", "example4", "desc", "none")
element = dataset.add_element("ele1", "description of the element.", None, tags=["tag1", {"tag2":"tag example"}])
self.session.flush()
element = dataset.elements[0]
self.assertTrue(element.tags[0], "tag1")
self.assertTrue(element.tags[1], {"tag2":"tag example"})
def tearDown(self):
DatasetDAO.query.remove()
DatasetCommentDAO.query.remove()
DatasetElementDAO.query.remove()
DatasetElementCommentDAO.query.remove()
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 5,576,441,355,125,993,000 | 33.768212 | 121 | 0.638476 | false |
repotvsupertuga/repo | plugin.video.SportsDevil/lib/utils/rowbalance.py | 1 | 1901 | # -*- coding: UTF-8 -*-
servers = {"1" : "213.152.180.151",
"2" : "213.152.180.250",
"3" : "190.211.255.166",
"4" : "190.211.255.168",
"5" : "190.211.255.162",
"6" : "190.211.255.165",
"7" : "190.211.255.164",
"8" : "190.211.255.163",
"9" : "190.211.255.175",
"10" : "190.211.255.174",
"11" : "190.211.255.173",
"12" : "190.211.255.171",
"13" : "190.211.255.170",
"14" : "190.211.255.169",
"101" : "209.95.56.134",
"103" : "209.95.56.149",
"104" : "209.95.56.153",
"105" : "209.95.56.207",
"106" : "209.95.56.160",
"107" : "31.24.226.251",
"108" : "31.24.226.250",
"109" : "31.24.226.249",
"110" : "31.24.226.248",
"111" : "31.24.226.247",
"112" : "107.182.226.117",
"113" : "107.182.226.115",
"114" : "107.182.226.112",
"115" : "107.182.226.111",
"116" : "107.182.226.89",
"117" : "185.80.220.82",
"118" : "185.80.220.81",
"119" : "185.80.220.80",
"120" : "185.80.220.79",
"121" : "185.80.220.48",
"122" : "107.182.226.179",
"123" : "107.182.226.151",
"124" : "107.182.226.150",
"125" : "107.182.226.148",
"126" : "107.182.226.144",
"127" : "185.80.221.57",
"128" : "185.80.221.27",
"129" : "185.80.220.143",
"130" : "185.80.220.142",
"131" : "185.80.221.58",
"132" : "173.192.103.16",
"133" : "173.192.103.8",
"134" : "75.126.73.18",
"135" : "75.126.73.34",
"136" : "75.126.73.61"}
def get(srp):
return servers[srp]
| gpl-2.0 | -6,269,091,706,548,958,000 | 34.203704 | 38 | 0.369805 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.