repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
e-democracy/edem.profile.base | setup.py | 1 | 1428 | # -*- coding=utf-8 -*-
import os
from setuptools import setup, find_packages
from version import get_version
version = get_version()
setup(name='edem.profile.base',
version=version,
description="Customization of the core profile system",
long_description=open("README.txt").read() + "\n" +
open(os.path.join("docs", "HISTORY.txt")).read(),
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers for values
classifiers=[
"Development Status :: 1 - Planning",
"Environment :: Web Environment",
"Framework :: Zope2",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
"Natural Language :: English",
"Operating System :: POSIX :: Linux"
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='edem profile',
author='Bill Bushey',
author_email='[email protected]',
url='http://www.e-democracy.org/',
license='GPL 3',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['edem', 'edem.profile'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'gs.skin.ogn.edem',
'gs.profile.base',
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
""",)
| gpl-3.0 | -9,073,239,956,995,717,000 | 31.454545 | 83 | 0.611345 | false |
SandStormHoldings/libvirt-fabric-restapi | macgen.py | 1 | 1414 | #!/usr/bin/python
# macgen.py script to generate a MAC address for virtualized guests
#
from __future__ import print_function
from builtins import map
from builtins import range
import random
import sys
import re
import hashlib
from config import HOSTS,HOST_IDX, main_network
#
def randomMAC(host,counters):
if host not in counters: counters[host]=0
respl = re.compile('(..)')
hsh = hashlib.md5(host.encode('utf-8'))
host_suffix = hsh.hexdigest()[0:4]
mac = [ 0x00, 0x16, 0x3e,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
mapped = list(map(lambda x: "%02x" % x, mac))[0:3]
host_suffix_spl = (respl.findall(host_suffix))
rt= ':'.join(mapped+['%02x'%counters[host]]+host_suffix_spl)
counters[host]+=1
return rt
def genmacs(only_host=None):
rt=''
ipcounter=0
dom_counters = {}
for host,ip in list(HOSTS.items()):
if only_host and host!=only_host: continue
rt+='# %s (%s)\n'%(host,ip)
for i in range(10,255):
vhostaddr = '%s.%s.%s'%(main_network, HOST_IDX[host],i)
rt+= "host virt-%s-%s { hardware ethernet %s; fixed-address %s; }\n"%(host,ipcounter,randomMAC(host,dom_counters),vhostaddr)
ipcounter+=1
rt+="\n"
return rt
if 'range' in sys.argv:
print(genmacs())
elif __name__=='__main__':
print(randomMAC())
| mit | 5,409,914,169,610,697,000 | 26.72549 | 136 | 0.618105 | false |
csbrandt/JSBSim.js | JSBSim/tests/TestICOverride.py | 1 | 4338 | # TestICOverride.py
#
# A regression test that checks that IC loaded from a file by a script can be
# overridden.
#
# Copyright (c) 2014 Bertrand Coconnier
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, see <http://www.gnu.org/licenses/>
#
import sys, unittest
import xml.etree.ElementTree as et
from JSBSim_utils import Table, CreateFDM, ExecuteUntil, SandBox
fpstokts = 0.592484
class TestICOverride(unittest.TestCase):
def setUp(self):
self.sandbox = SandBox()
def tearDown(self):
self.sandbox.erase()
def test_IC_override(self):
# Run the script c1724.xml
script_path = self.sandbox.path_to_jsbsim_file('scripts', 'c1724.xml')
fdm = CreateFDM(self.sandbox)
fdm.load_script(script_path)
vt0 = fdm.get_property_value('ic/vt-kts')
fdm.run_ic()
ExecuteUntil(fdm, 1.0)
# Check that the total velocity exported in the output file matches the IC
# defined in the initialization file
ref = Table()
ref.ReadCSV(self.sandbox('JSBout172B.csv'))
for col, title in enumerate(ref._lines[0]):
if title == 'V_{Total} (ft/s)':
self.assertTrue(abs(ref._lines[1][col] - (vt0 / fpstokts)) < 1E-5,
msg="Original script %s\nThe total velocity is %f. The value %f was expected" % (script_path, ref._lines[1][col], vt0 / fpstokts))
break
else:
self.fail("The total velocity is not exported in %s" % (script_path,))
# Now, we will re-run the same test but the IC will be overridden in the scripts
# The initial total velocity is increased by 1 ft/s
vt0 += 1.0
# The script c1724.xml is loaded and the following line is added in it:
# <property value="..."> ic/vt-kts </property>
# The modified script is then saved with the named 'c1724_0.xml'
tree = et.parse(self.sandbox.elude(script_path))
run_tag = tree.getroot().find("./run")
property = et.SubElement(run_tag, 'property')
property.text = 'ic/vt-kts'
property.attrib['value'] = str(vt0)
tree.write(self.sandbox('c1724_0.xml'))
# Re-run the same check than above. This time we are making sure than the total
# initial velocity is increased by 1 ft/s
self.sandbox.delete_csv_files()
# Because JSBSim internals use static pointers, we cannot rely on Python
# garbage collector to decide when the FDM is destroyed otherwise we can
# get dangling pointers.
del fdm
fdm = CreateFDM(self.sandbox)
fdm.load_script('c1724_0.xml')
self.assertTrue(abs(fdm.get_property_value('ic/vt-kts') - vt0) < 1E-5,
msg="Modified script %s\nThe total velocity in the IC (%f) is different from %f" % (self.sandbox('JSBout172B.csv'), fdm.get_property_value('ic/vt-kts'), vt0))
fdm.run_ic()
ExecuteUntil(fdm, 1.0)
mod = Table()
mod.ReadCSV(self.sandbox('JSBout172B.csv'))
for col, title in enumerate(mod._lines[0]):
if title == 'V_{Total} (ft/s)':
self.assertTrue(abs(mod._lines[1][col] - (vt0 / fpstokts)) < 1E-5,
msg="Modified script %s\nThe total velocity is %f. The value %f was expected" % (self.sandbox('JSBout172B.csv'), mod._lines[1][col], vt0 / fpstokts))
break
else:
self.fail("The total velocity is not exported in %s" % (sandbox('JSBout172B.csv'),))
suite = unittest.TestLoader().loadTestsFromTestCase(TestICOverride)
test_result = unittest.TextTestRunner(verbosity=2).run(suite)
if test_result.failures or test_result.errors:
sys.exit(-1) # 'make test' will report the test failed.
| lgpl-2.1 | -4,971,547,662,809,787,000 | 39.542056 | 182 | 0.638082 | false |
Yordan92/Pac-man-multiplayer | Ghosts.py | 1 | 5718 | from MakeGraph import MakeGraph
from Moving_pacman import PacMan
import pygame
class Ghost(MakeGraph):
index = 0
def __init__(self,class_graph,x,y):
Ghost.index = Ghost.index + 1
self.all_nodes = class_graph.get_nodes()
self.paths_to_all_nodes = class_graph.get_shortest_path()
self.path = []
self.hunting = False
self.name_image_u = "Ghost_red_up"
self.name_image_d = "Ghost_red_down"
self.name_image_l = "Ghost_red_left"
self.name_image_r = "Ghost_red_right"
self.name_image = self.name_image_u
self.cords={'x': x, 'y': y}
# {'x': 92, 'y': 161}
self.index = Ghost.index
def next_hop(self):
if self.path:
return self.path[0]
return []
def find_ghost_cords(self):
ghost_x = int(self.cords['y']/23)
ghost_y = int(self.cords['x']/23)
return (ghost_x,ghost_y)
def get_pictures(self):
if self.index == 0 :
self.name_image_u = "Ghost_red_up"
self.name_image_d = "Ghost_red_down"
self.name_image_l = "Ghost_red_left"
self.name_image_r = "Ghost_red_right"
if self.index == 1:
self.name_image_u = "Ghost_orange_up"
self.name_image_d = "Ghost_orange_down"
self.name_image_l = "Ghost_orange_left"
self.name_image_r = "Ghost_orange_right"
if self.index == 2:
self.name_image_u = "Ghost_pink_up"
self.name_image_d = "Ghost_pink_down"
self.name_image_l = "Ghost_pink_left"
self.name_image_r = "Ghost_pink_right"
if self.index == 3:
self.name_image_u = "Ghost_cyan_up"
self.name_image_d = "Ghost_cyan_down"
self.name_image_l = "Ghost_cyan_left"
self.name_image_r = "Ghost_cyan_right"
def find_closest_nodes(self):
closest_nodes =[]
ghost_x = int(self.cords['x']/23)
ghost_y = int(self.cords['y']/23)
vertex = (ghost_y,ghost_x)
queue = [vertex]
Visited = [vertex]
# if vertex in all_Nodes:
# all_Nodes.remove(vertex)
while queue != []:
new_v = queue.pop(0)
new_v_adj = [(new_v[0] - 1, new_v[1]),
(new_v[0] + 1, new_v[1]),
(new_v[0], new_v[1] - 1),
(new_v[0], new_v[1] + 1)]
for v_adj in new_v_adj:
if self.is_p_vertex(v_adj) and v_adj not in Visited:
if v_adj in self.all_nodes:
closest_nodes.append((v_adj[1],v_adj[0]))
else:
queue.append(v_adj)
Visited.append(v_adj)
return closest_nodes
def find_closest_vertex(self):
closest_nodes =[]
ghost_x = int(self.cords['x']/23)
ghost_y = int(self.cords['y']/23)
vertex = (ghost_y,ghost_x)
queue = [vertex]
map_to_a_vertex = {}
visited_n = [vertex]
# print (self.all_nodes)
if vertex in self.all_nodes:
return []
while queue != []:
new_v = queue.pop(0)
new_v_adj = [(new_v[0] - 1, new_v[1]),
(new_v[0] + 1, new_v[1]),
(new_v[0], new_v[1] - 1),
(new_v[0], new_v[1] + 1)]
for v_adj in new_v_adj:
map_to_a_vertex[v_adj] = new_v
if v_adj in self.all_nodes:
full_path = [v_adj]
while map_to_a_vertex[v_adj] != vertex:
v_adj = map_to_a_vertex[v_adj]
full_path.insert(0,v_adj)
return full_path
if MakeGraph.is_p_vertex(self,v_adj) and v_adj not in visited_n:
queue.append(v_adj)
visited_n.append(v_adj)
def ghost_move(self, pacman_vertex, pacman_cords):
my_cords = (int(self.cords['y']/23),int(self.cords['x']/23))
if my_cords == pacman_vertex:
self.hunting = True
if self.hunting == True:
self.path = self.search_eat(pacman_cords)
if not self.path:
if self.hunting == True:
self.hunting = False
if self.find_closest_vertex() != []:
self.path = self.find_closest_vertex()
else:
for i in self.paths_to_all_nodes[my_cords][pacman_vertex]:
self.path.extend(2*[i])
def ghost_make_move(self):
# if not self.path:
# self.ghost_move(screen,pacman_vertex,pacman_cords)
new_step = self.path.pop(0)
old_step = (int(self.cords['y'] / 23),int(self.cords['x'])/23)
if old_step[0] == new_step[0] and old_step[1]<new_step[1]:
self.name_image = self.name_image_r
if old_step[0] == new_step[0] and old_step[1]>new_step[1]:
self.name_image = self.name_image_l
if old_step[0] < new_step[0] and old_step[1]==new_step[1]:
self.name_image = self.name_image_d
if old_step[0] > new_step[0] and old_step[1]==new_step[1]:
self.name_image = self.name_image_u
self.cords['y'] = new_step[0]*23
self.cords['x'] = new_step[1]*23
def search_eat(self,pacman_cords):
closest_nodes =[]
# pacman_x = int(pacman_cords['x']/23)
# pacman_y = int(pacman_cords['y']/23)
ghost_x = int(self.cords['x']/23)
ghost_y = int(self.cords['y']/23)
vertex = (ghost_y,ghost_x)
queue = [vertex]
map_to_a_vertex = {}
visited_n = [vertex]
if vertex == pacman_cords:
return []
while queue != []:
new_v = queue.pop(0)
new_v_adj = [(new_v[0] - 1, new_v[1]),
(new_v[0] + 1, new_v[1]),
(new_v[0], new_v[1] - 1),
(new_v[0], new_v[1] + 1)]
for v_adj in new_v_adj:
if self.is_p_vertex(v_adj) and v_adj not in visited_n:
queue.append(v_adj)
visited_n.append(v_adj)
map_to_a_vertex[v_adj] = new_v
if v_adj == pacman_cords:
# map_to_a_vertex[v_adj] = new_v
# print(map_to_a_vertex)
# print("abc",v_adj,new_v)
while map_to_a_vertex[v_adj] != vertex:
# print("abc",v_adj)
v_adj = map_to_a_vertex[v_adj]
return [v_adj]
return []
def draw_ghost(self,screen):
ghost = pygame.image.load("Ghosts/Ghost_cyan_down.png")
# print(self.find_closest_vertex())
self.ghost_move(screen,(14,13),(16,14))
# p = self.path[-1]
# pygame.draw.rect(screen, (124, 124, 0),
# (p[1]* 23, p[0] * 23, 23, 23))
screen.blit(ghost,(self.cords['x'], self.cords['y']))
| gpl-3.0 | 8,085,236,753,210,260,000 | 25.719626 | 68 | 0.59164 | false |
digidotcom/epoxy | setup.py | 1 | 1990 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2014 Etherios, Inc. All rights reserved.
# Etherios, Inc. is a Division of Digi International.
from setuptools import setup, find_packages
def get_long_description():
long_description = open('README.md').read()
try:
import subprocess
import pandoc
process = subprocess.Popen(
['which pandoc'],
shell=True,
stdout=subprocess.PIPE,
universal_newlines=True)
pandoc_path = process.communicate()[0]
pandoc_path = pandoc_path.strip('\n')
pandoc.core.PANDOC_PATH = pandoc_path
doc = pandoc.Document()
doc.markdown = long_description
long_description = doc.rst
except:
print("Could not find pandoc or convert properly")
print(" make sure you have pandoc (system) and pyandoc (python module) installed")
return long_description
setup(
name='epoxy',
version='1.0',
description='Inversion of Control Framework for Python',
long_description=get_long_description(),
url='https://github.com/etherios/epoxy',
author="Paul Osborne",
author_email="[email protected]",
packages=find_packages(),
install_requires=open('requirements.txt').read().split(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Application Frameworks",
"Operating System :: OS Independent",
],
)
| mpl-2.0 | -2,129,511,811,997,380,000 | 32.728814 | 91 | 0.650251 | false |
DBuildService/atomic-reactor | atomic_reactor/api.py | 1 | 5538 | """
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
Python API for atomic-reactor. This is the official way of interacting with atomic-reactor.
"""
from atomic_reactor.inner import DockerBuildWorkflow
from atomic_reactor.outer import PrivilegedBuildManager, DockerhostBuildManager
from atomic_reactor.plugins.pre_pull_base_image import PullBaseImagePlugin
from atomic_reactor.plugins.post_tag_and_push import TagAndPushPlugin
__all__ = (
'build_image_in_privileged_container',
'build_image_using_hosts_docker',
'build_image_here',
)
def _prepare_build_json(image, source, target_registries, target_registries_insecure,
dont_pull_base_image, **kwargs):
target_registries = target_registries or []
registries = dict([(registry, {"insecure": target_registries_insecure})
for registry in target_registries])
build_json = {
"image": image,
"source": source,
"postbuild_plugins": [{
"name": TagAndPushPlugin.key,
"args": {
"registries": registries
}
}]
}
if not dont_pull_base_image:
build_json["prebuild_plugins"] = [{
"name": PullBaseImagePlugin.key,
"args": {}
}]
build_json.update(kwargs)
return build_json
def build_image_in_privileged_container(build_image, source, image, target_registries=None,
push_buildroot_to=None,
target_registries_insecure=False,
dont_pull_base_image=False, **kwargs):
"""
build image from provided dockerfile (specified by `source`) in privileged container by
running another docker instance inside the container
:param build_image: str, image where target image should be built
:param source: dict, where/how to get source code to put in image
:param image: str, tag for built image ([registry/]image_name[:tag])
:param target_registries: list of str, list of registries to push image to (might change in
future)
:param push_buildroot_to: str, repository where buildroot should be pushed
:param target_registries_insecure: bool, allow connecting to target registries over plain http
:param dont_pull_base_image: bool, don't pull or update base image specified in dockerfile
:return: BuildResults
"""
build_json = _prepare_build_json(image, source, target_registries, target_registries_insecure,
dont_pull_base_image, **kwargs)
m = PrivilegedBuildManager(build_image, build_json)
build_response = m.build()
if push_buildroot_to:
m.commit_buildroot()
m.push_buildroot(push_buildroot_to)
return build_response
def build_image_using_hosts_docker(build_image, source, image, target_registries=None,
push_buildroot_to=None,
target_registries_insecure=False, dont_pull_base_image=False,
**kwargs):
"""
build image from provided dockerfile (specified by `source`) in privileged container
using docker from host
:param build_image: str, image where target image should be built
:param source: dict, where/how to get source code to put in image
:param image: str, tag for built image ([registry/]image_name[:tag])
:param target_registries: list of str, list of registries to push image to (might change in
future)
:param push_buildroot_to: str, repository where buildroot should be pushed
:param target_registries_insecure: bool, allow connecting to target registries over plain http
:param dont_pull_base_image: bool, don't pull or update base image specified in dockerfile
:return: BuildResults
"""
build_json = _prepare_build_json(image, source, target_registries, target_registries_insecure,
dont_pull_base_image, **kwargs)
m = DockerhostBuildManager(build_image, build_json)
build_response = m.build()
if push_buildroot_to:
m.commit_buildroot()
m.push_buildroot(push_buildroot_to)
return build_response
def build_image_here(source, image, target_registries=None, target_registries_insecure=False,
dont_pull_base_image=False, **kwargs):
"""
build image from provided dockerfile (specified by `source`) in current environment
:param source: dict, where/how to get source code to put in image
:param image: str, tag for built image ([registry/]image_name[:tag])
:param target_registries: list of str, list of registries to push image to (might change in
future)
:param target_registries_insecure: bool, allow connecting to target registries over plain http
:param dont_pull_base_image: bool, don't pull or update base image specified in dockerfile
:return: BuildResults
"""
build_json = _prepare_build_json(image, source, target_registries, target_registries_insecure,
dont_pull_base_image, **kwargs)
m = DockerBuildWorkflow(**build_json)
return m.build_docker_image()
def list_dockerfiles_in_git():
"""
clone provided repo and return all dockerfiles found in the repo
:return:
"""
| bsd-3-clause | 5,659,945,660,937,605,000 | 40.022222 | 98 | 0.650235 | false |
rainest/dance-partner-matching | networkx/generators/tests/test_degree_seq.py | 1 | 7378 | #!/usr/bin/env python
from nose.tools import *
import networkx
from networkx import *
from networkx.generators.degree_seq import *
from networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic
from networkx.utils import uniform_sequence,powerlaw_sequence
"""Generators - Degree Sequence
----------------------
"""
class TestGeneratorsDegreeSequence():
def test_configuration_model(self):
# empty graph has empty degree sequence
deg_seq=[]
G=configuration_model(deg_seq)
assert_equal(G.degree(), {})
deg_seq=[5,3,3,3,3,2,2,2,1,1,1]
G=configuration_model(deg_seq,seed=12345678)
assert_equal(sorted(G.degree().values(),reverse=True),
[5, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1])
assert_equal(sorted(G.degree(range(len(deg_seq))).values(),
reverse=True),
[5, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1])
# test that fixed seed delivers the same graph
deg_seq=[3,3,3,3,3,3,3,3,3,3,3,3]
G1=configuration_model(deg_seq,seed=1000)
G2=configuration_model(deg_seq,seed=1000)
assert_true(is_isomorphic(G1,G2))
G1=configuration_model(deg_seq,seed=10)
G2=configuration_model(deg_seq,seed=10)
assert_true(is_isomorphic(G1,G2))
z=[5,3,3,3,3,2,2,2,1,1,1]
assert_true(is_valid_degree_sequence(z))
assert_raises(networkx.exception.NetworkXError,
configuration_model, z, create_using=DiGraph())
G=havel_hakimi_graph(z)
G=configuration_model(z)
z=[1000,3,3,3,3,2,2,2,1,1,1]
assert_false(is_valid_degree_sequence(z))
def test_expected_degree_graph(self):
# empty graph has empty degree sequence
deg_seq=[]
G=expected_degree_graph(deg_seq)
assert_equal(G.degree(), {})
# test that fixed seed delivers the same graph
deg_seq=[3,3,3,3,3,3,3,3,3,3,3,3]
G1=expected_degree_graph(deg_seq,seed=1000)
G2=expected_degree_graph(deg_seq,seed=1000)
assert_true(is_isomorphic(G1,G2))
G1=expected_degree_graph(deg_seq,seed=10)
G2=expected_degree_graph(deg_seq,seed=10)
assert_true(is_isomorphic(G1,G2))
assert_raises(networkx.exception.NetworkXError,
expected_degree_graph, deg_seq,
create_using=DiGraph())
def test_havel_hakimi_construction(self):
z=[1000,3,3,3,3,2,2,2,1,1,1]
assert_raises(networkx.exception.NetworkXError,
havel_hakimi_graph, z)
z=["A",3,3,3,3,2,2,2,1,1,1]
assert_raises(networkx.exception.NetworkXError,
havel_hakimi_graph, z)
z=[5,4,3,3,3,2,2,2]
G=havel_hakimi_graph(z)
G=configuration_model(z)
z=[6,5,4,4,2,1,1,1]
assert_false(is_valid_degree_sequence(z))
assert_raises(networkx.exception.NetworkXError,
havel_hakimi_graph, z)
z=[10,3,3,3,3,2,2,2,2,2,2]
assert_true(is_valid_degree_sequence(z))
G=havel_hakimi_graph(z)
assert_raises(networkx.exception.NetworkXError,
havel_hakimi_graph, z, create_using=DiGraph())
assert_raises(networkx.exception.NetworkXError,
havel_hakimi_graph, z, create_using=MultiGraph())
def test_degree_sequence_tree(self):
z=[1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
assert_true(is_valid_degree_sequence(z))
G=degree_sequence_tree(z)
assert_true(len(G.nodes())==len(z))
assert_true(len(G.edges())==sum(z)/2)
assert_raises(networkx.exception.NetworkXError,
degree_sequence_tree, z, create_using=DiGraph())
z=[1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
assert_false(is_valid_degree_sequence(z))
assert_raises(networkx.exception.NetworkXError,
degree_sequence_tree, z)
def test_degree_sequences(self):
seq=create_degree_sequence(10,uniform_sequence)
assert_equal(len(seq), 10)
seq=create_degree_sequence(10,powerlaw_sequence)
assert_equal(len(seq), 10)
def test_double_edge_swap(self):
graph = barabasi_albert_graph(200,1)
degreeStart = sorted(graph.degree().values())
G = connected_double_edge_swap(graph, 40)
assert_true(is_connected(graph))
degseq = sorted(graph.degree().values())
assert_true(degreeStart == degseq)
G = double_edge_swap(graph, 40)
degseq2 = sorted(graph.degree().values())
assert_true(degreeStart == degseq2)
def test_degree_seq_c4(self):
G = networkx.cycle_graph(4)
degree_start = sorted(G.degree().values())
G = double_edge_swap(G,1,100)
degseq = sorted(G.degree().values())
assert_true(degree_start == degseq)
def test_construction_smax_graph0(self):
z=["A",3,3,3,3,2,2,2,1,1,1]
assert_raises(networkx.exception.NetworkXError,
li_smax_graph, z)
def test_construction_smax_graph1(self):
z=[5,4,3,3,3,2,2,2]
G=li_smax_graph(z)
degs = sorted(degree(G).values(),reverse=True)
assert_equal(degs, z)
def test_construction_smax_graph2(self):
z=[6,5,4,4,2,1,1,1]
assert_false(is_valid_degree_sequence(z))
assert_raises(networkx.exception.NetworkXError,
li_smax_graph, z)
def test_construction_smax_graph3(self):
z=[10,3,3,3,3,2,2,2,2,2,2]
assert_true(is_valid_degree_sequence(z))
G=li_smax_graph(z)
degs = sorted(degree(G).values(),reverse=True)
assert_equal(degs, z)
assert_raises(networkx.exception.NetworkXError,
li_smax_graph, z, create_using=DiGraph())
class TestRandomClusteredGraph:
def test_valid(self):
node=[1,1,1,2,1,2,0,0]
tri=[0,0,0,0,0,1,1,1]
joint_degree_sequence=zip(node,tri)
G = networkx.random_clustered_graph(joint_degree_sequence)
assert_equal(G.number_of_nodes(),8)
assert_equal(G.number_of_edges(),7)
def test_valid2(self):
G = networkx.random_clustered_graph(\
[(1,2),(2,1),(1,1),(1,1),(1,1),(2,0)])
assert_equal(G.number_of_nodes(),6)
assert_equal(G.number_of_edges(),10)
def test_invalid1(self):
assert_raises((TypeError,networkx.NetworkXError),
networkx.random_clustered_graph,[[1,1],[2,1],[0,1]])
def test_invalid2(self):
assert_raises((TypeError,networkx.NetworkXError),
networkx.random_clustered_graph,[[1,1],[1,2],[0,1]])
def test_li_smax():
G = networkx.barabasi_albert_graph(25,1) #Any old graph
Gdegseq = list(G.degree().values()) #degree sequence
Gdegseq.sort(reverse=True)
# Tests the 'unconstrained version'
assert_true(not (sum(Gdegseq)%2))
Gmax = networkx.li_smax_graph(Gdegseq)
Gmaxdegseq = list(Gmax.degree().values())
Gmaxdegseq.sort(reverse=True)
assert_equal(G.order(),Gmax.order()) #Sanity Check on the nodes
# make sure both graphs have the same degree sequence
assert_equal(Gdegseq,Gmaxdegseq)
# make sure the smax graph is actually bigger
assert_true(networkx.s_metric(G) <= networkx.s_metric(Gmax))
| bsd-2-clause | -4,364,301,775,330,757,000 | 34.471154 | 78 | 0.594741 | false |
padmec-reservoir/ELLIPTIc | elliptic/Kernel/DSL.py | 1 | 3082 | from abc import ABC, abstractmethod
from contextlib import contextmanager
from types import ModuleType
from typing import List, Iterator
from elliptic.Kernel.Expression import Expression
from .Contract import DSLContract
from .TemplateManager import TemplateManagerBase
from .TreeBuilder import TreeBuild
class DSLMeta(ABC):
"""Class which stores information regarding the DSL compilation.
"""
@abstractmethod
def libs(self) -> List[str]:
"""Returns the list of libraries that should be linked against.
Example:
['MOAB', 'Trilinos']
"""
raise NotImplementedError
@abstractmethod
def include_dirs(self) -> List[str]:
"""Returns the list of include directories that should be used when compiling.
Cypyler adds the numpy includes by default. Any extra include paths should be returned here.
Example:
['/usr/local/include/moab']
"""
raise NotImplementedError
class DSLException(Exception):
pass
class DSLBuildError(DSLException):
"""Exception raised when an error related to a DSL build process happens.
"""
class DSL:
"""Defines the interface for interacting with a DSL.
Parameters:
template_manager: A `TemplateManagerBase` subinstance.
dsl_contract: A DSL Contract.
dsl_meta: A `DSLMeta` instance.
"""
def __init__(self,
template_manager: TemplateManagerBase,
dsl_contract: DSLContract,
dsl_meta: DSLMeta) -> None:
self.template_manager = template_manager
self.dsl_contract = dsl_contract
self.dsl_meta = dsl_meta
self.built = False
self.building = False
self.built_module: ModuleType = None
@contextmanager
def root(self) -> Iterator[DSLContract]:
"""Entry point for building expressions.
Should be used as a context manager, using the `with` statement.
"""
if not self.built and not self.building:
self.building = True
root_ = self.dsl_contract.Base()
yield root_
self._build(root_.expr)
else:
raise DSLBuildError("Can't get root while or after building a DSL tree.")
def get_built_module(self) -> ModuleType:
"""Returns the compiled module that holds the generated code.
"""
if not self.built:
raise DSLBuildError("Can't get the built module before finishing building the DSL tree.")
return self.built_module
def _build(self, root: Expression):
"""Builds a DSL tree, generating the corresponding code, given the DSL tree root.
The DSL tree root should always be a StatementRoot instance.
Parameters:
root: The DSL tree root.
"""
tree_builder = TreeBuild(self.template_manager,
self.dsl_meta.libs(), self.dsl_meta.include_dirs())
self.built_module = tree_builder.build(root)
self.building = False
self.built = True
| mit | 7,916,992,614,376,502,000 | 28.92233 | 101 | 0.634653 | false |
coxmediagroup/django-socialregistration | socialregistration/views.py | 1 | 14068 | import uuid
from django.conf import settings
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response
from django.utils.translation import gettext as _
from django.http import HttpResponseRedirect
try:
from django.views.decorators.csrf import csrf_protect
has_csrf = True
except ImportError:
has_csrf = False
from django.contrib.auth.models import User
from django.contrib.auth import login, authenticate, logout as auth_logout
from django.contrib.sites.models import Site
from socialregistration.forms import UserForm, ClaimForm, ExistingUser
from socialregistration.utils import (OAuthClient, OAuthTwitter,
OpenID, _https, DiscoveryFailure, GoogleOpenIDSchemas, YahooOpenIDSchemas, MyOpenIDSchemas)
from socialregistration.models import FacebookProfile, TwitterProfile, OpenIDProfile
from openid.extensions import ax, pape, sreg
from urlparse import urljoin
from django.db import connection
from django.core.urlresolvers import reverse as reverseURL
from socialregistration import util
from openid.consumer import consumer
FB_ERROR = _('We couldn\'t validate your Facebook credentials')
GENERATE_USERNAME = bool(getattr(settings, 'SOCIALREGISTRATION_GENERATE_USERNAME', False))
def _get_next(request):
"""
Returns a url to redirect to after the login
"""
if 'next' in request.session:
next = request.session['next']
del request.session['next']
return next
elif 'next' in request.GET:
return request.GET.get('next')
elif 'next' in request.POST:
return request.POST.get('next')
else:
return getattr(settings, 'LOGIN_REDIRECT_URL', '/')
def setup(request, template='socialregistration/setup.html',
form_class=UserForm, extra_context=dict(), claim_form_class=ClaimForm):
"""
Setup view to create a username & set email address after authentication
"""
try:
social_user = request.session['socialregistration_user']
social_profile = request.session['socialregistration_profile']
except KeyError:
return render_to_response(
template, dict(error=True), context_instance=RequestContext(request))
if not GENERATE_USERNAME:
# User can pick own username
if not request.method == "POST":
form = form_class(social_user, social_profile,)
else:
form = form_class(social_user, social_profile, request.POST)
try:
if form.is_valid():
form.save()
user = form.profile.authenticate()
login(request, user)
del request.session['socialregistration_user']
del request.session['socialregistration_profile']
return HttpResponseRedirect(_get_next(request))
except ExistingUser:
# see what the error is. if it's just an existing user, we want to let them claim it.
if 'submitted' in request.POST:
form = claim_form_class(
request.session['socialregistration_user'],
request.session['socialregistration_profile'],
request.POST
)
else:
form = claim_form_class(
request.session['socialregistration_user'],
request.session['socialregistration_profile'],
initial=request.POST
)
if form.is_valid():
form.save()
user = form.profile.authenticate()
login(request, user)
del request.session['socialregistration_user']
del request.session['socialregistration_profile']
return HttpResponseRedirect(_get_next(request))
extra_context['claim_account'] = True
extra_context.update(dict(form=form))
return render_to_response(template, extra_context,
context_instance=RequestContext(request))
else:
# Generate user and profile
social_user.username = str(uuid.uuid4())[:30]
social_user.save()
social_profile.user = social_user
social_profile.save()
# Authenticate and login
user = social_profile.authenticate()
login(request, user)
# Clear & Redirect
del request.session['socialregistration_user']
del request.session['socialregistration_profile']
return HttpResponseRedirect(_get_next(request))
if has_csrf:
setup = csrf_protect(setup)
def facebook_login(request, template='socialregistration/facebook.html',
extra_context=dict(), account_inactive_template='socialregistration/account_inactive.html'):
"""
View to handle the Facebook login
"""
if request.facebook.uid is None:
extra_context.update(dict(error=FB_ERROR))
return HttpResponseRedirect(reverse('login'))
user = authenticate(uid=request.facebook.uid)
if user is None:
request.session['socialregistration_user'] = User()
request.session['socialregistration_profile'] = FacebookProfile(uid=request.facebook.uid)
request.session['next'] = _get_next(request)
return HttpResponseRedirect(reverse('socialregistration_setup'))
if not user.is_active:
return render_to_response(account_inactive_template, extra_context,
context_instance=RequestContext(request))
login(request, user)
return HttpResponseRedirect(_get_next(request))
def facebook_connect(request, template='socialregistration/facebook.html',
extra_context=dict()):
"""
View to handle connecting existing django accounts with facebook
"""
if request.facebook.uid is None or request.user.is_authenticated() is False:
extra_context.update(dict(error=FB_ERROR))
return render_to_response(template, extra_context,
context_instance=RequestContext(request))
try:
profile = FacebookProfile.objects.get(uid=request.facebook.uid)
except FacebookProfile.DoesNotExist:
profile = FacebookProfile.objects.create(user=request.user,
uid=request.facebook.uid)
return HttpResponseRedirect(_get_next(request))
def logout(request, redirect_url=None):
"""
Logs the user out of django. This is only a wrapper around
django.contrib.auth.logout. Logging users out of Facebook for instance
should be done like described in the developer wiki on facebook.
http://wiki.developers.facebook.com/index.php/Connect/Authorization_Websites#Logging_Out_Users
"""
auth_logout(request)
url = redirect_url or getattr(settings, 'LOGOUT_REDIRECT_URL', '/')
return HttpResponseRedirect(url)
def twitter(request, account_inactive_template='socialregistration/account_inactive.html',
extra_context=dict()):
"""
Actually setup/login an account relating to a twitter user after the oauth
process is finished successfully
"""
client = OAuthTwitter(
request, settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET_KEY,
settings.TWITTER_REQUEST_TOKEN_URL,
)
user_info = client.get_user_info()
if request.user.is_authenticated():
# Handling already logged in users connecting their accounts
try:
profile = TwitterProfile.objects.get(twitter_id=user_info['id'])
except TwitterProfile.DoesNotExist: # There can only be one profile!
profile = TwitterProfile.objects.create(user=request.user, twitter_id=user_info['id'])
return HttpResponseRedirect(_get_next(request))
user = authenticate(twitter_id=user_info['id'])
if user is None:
profile = TwitterProfile(twitter_id=user_info['id'])
user = User()
request.session['socialregistration_profile'] = profile
request.session['socialregistration_user'] = user
request.session['next'] = _get_next(request)
return HttpResponseRedirect(reverse('socialregistration_setup'))
if not user.is_active:
return render_to_response(
account_inactive_template,
extra_context,
context_instance=RequestContext(request)
)
login(request, user)
return HttpResponseRedirect(_get_next(request))
def oauth_redirect(request, consumer_key=None, secret_key=None,
request_token_url=None, access_token_url=None, authorization_url=None,
callback_url=None, parameters=None):
"""
View to handle the OAuth based authentication redirect to the service provider
"""
request.session['next'] = _get_next(request)
client = OAuthClient(request, consumer_key, secret_key,
request_token_url, access_token_url, authorization_url, callback_url, parameters)
return client.get_redirect()
def oauth_callback(request, consumer_key=None, secret_key=None,
request_token_url=None, access_token_url=None, authorization_url=None,
callback_url=None, template='socialregistration/oauthcallback.html',
extra_context=dict(), parameters=None):
"""
View to handle final steps of OAuth based authentication where the user
gets redirected back to from the service provider
"""
client = OAuthClient(request, consumer_key, secret_key, request_token_url,
access_token_url, authorization_url, callback_url, parameters)
extra_context.update(dict(oauth_client=client))
if not client.is_valid():
return HttpResponseRedirect(reverse('login'))
# We're redirecting to the setup view for this oauth service
return HttpResponseRedirect(reverse(client.callback_url))
def openid_redirect(request):
"""
Redirect the user to the openid provider
"""
request.session['next'] = _get_next(request)
openid_provider = request.GET.get('openid_provider', '').strip()
request.session['openid_provider'] = openid_provider
client = OpenID(
request,
'http%s://%s%s' % (
_https(),
Site.objects.get_current().domain,
reverse('openid_callback')
),
openid_provider
)
try:
return client.get_redirect()
except DiscoveryFailure:
request.session['openid_error'] = True
return HttpResponseRedirect(settings.LOGIN_URL)
def openid_callback(request, template='socialregistration/openid.html',
extra_context=dict(), account_inactive_template='socialregistration/account_inactive.html'):
"""
Catches the user when he's redirected back from the provider to our site
"""
client = OpenID(
request,
'http%s://%s%s' % (
_https(),
Site.objects.get_current().domain,
reverse('openid_callback')
),
request.session.get('openid_provider')
)
try:
request_args = util.normalDict(request.GET)
if request.method == 'POST':
request_args.update(util.normalDict(request.POST))
if request_args:
client.complete()
c = client.consumer
return_to = util.getViewURL(request, openid_callback)
response = client.result
ax_items = {}
if response.status == consumer.SUCCESS:
provider = request.session.get('openid_provider')
# Set the schema uri depending on who the openid provier is:
# request only name and email by default (same as Google schemas):
schemas = GoogleOpenIDSchemas()
if 'yahoo' in provider:
schemas = YahooOpenIDSchemas()
if 'myopenid' in provider:
schemas = MyOpenIDSchemas()
ax_response = {}
ax_response = ax.FetchResponse.fromSuccessResponse(response)
if ax_response:
# Name and email schemas are always set, but not others so check if they are not empty first:
birth_date = zip = gender = []
if schemas.birth_date_schema:
birth_date = ax_response.get(schemas.birth_date_schema)
if schemas.zip_schema:
zip = ax_response.get(schemas.zip_schema)
if schemas.gender_schema:
gender = ax_response.get(schemas.gender_schema)
ax_items = {
'display_name': ax_response.get(schemas.name_schema),
'email': ax_response.get(schemas.email_schema),
'birth_date': birth_date,
'home_zip': zip,
'gender': gender,
}
request.session['ax_items'] = ax_items
except Exception, e:
pass
if client.is_valid():
identity = client.result.identity_url
if request.user.is_authenticated():
# Handling already logged in users just connecting their accounts
try:
profile = OpenIDProfile.objects.get(identity=identity)
except OpenIDProfile.DoesNotExist: # There can only be one profile with the same identity
profile = OpenIDProfile.objects.create(user=request.user,
identity=identity)
return HttpResponseRedirect(_get_next(request))
user = authenticate(identity=identity)
if user is None:
request.session['socialregistration_user'] = User()
request.session['socialregistration_profile'] = OpenIDProfile(
identity=identity
)
return HttpResponseRedirect(reverse('socialregistration_setup'))
if not user.is_active:
return render_to_response(
account_inactive_template,
extra_context,
context_instance=RequestContext(request)
)
login(request, user)
return HttpResponseRedirect(_get_next(request))
return HttpResponseRedirect(reverse('login'))
| mit | 469,264,267,113,633,500 | 35.73107 | 109 | 0.643091 | false |
emulbreh/shrubbery | shrubbery/db/utils.py | 1 | 11220 | import re
from django.db import models
from shrubbery.utils.text import camel_case
def no_related_name(hidden=False):
no_related_name._next += 1
name = "_no_related_name_%%(class)s_%s" % (no_related_name._next)
if hidden:
name += '+'
return name
no_related_name._next = 0
def _remove_related_accessors(sender, **kwargs):
for attr in dir(sender):
if attr.startswith('_no_related_name_'):
# delete() requires related descriptors - stupid multi-manager code
#delattr(sender, attr)
pass
models.signals.class_prepared.connect(_remove_related_accessors)
class ImplicitQMixin(object):
def as_q(self):
return models.Q()
def add_to_query(self, query, aliases=None):
query.add_q(self.as_q())
def __and__(self, other):
return self.as_q() & other
def __or__(self, other):
return self.as_q() | other
def __invert__(self):
return ~self.as_q()
class CompoundQ(object):
def __init__(self, objects, conjunction=True):
self.objects = objects
self.conjunction = conjunction
def __invert__(self):
return self.__class__([~obj for obj in self.objects], not self.conjunction)
def combine(self, other, conjunction):
if self.conjunction == conjunction:
# If the connection type is equal, we can avoid further nesting
objects = None
if isinstance(other, models.Q):
# If we already have a builtin Q, we can just add `other` one to it.
for index, obj in enumerate(self.objects):
if isinstance(obj, models.Q):
objects = self.objects[:]
if self.conjunction:
objects[index] &= other
else:
objects[index] |= other
break
elif isinstance(other, CompoundQ) and other.conjunction == conjunction:
# Two CompoundQ objects may be combined in a single new object without nesting
objects = self.objects + other.objects
if not objects:
objects = self.objects + [other]
return self.__class__(objects, conjunction)
return CompoundQ([self, other], conjunction)
def __and__(self, other):
return self.combine(other, True)
def __or__(self, other):
return self.combine(other, False)
def __rand__(self, other):
# Since '&' is supposed to be commutative
return self & other
def __ror__(self, other):
# Since '|' is supposed to be commutative
return self | other
@property
def connector(self):
return self.conjunction and models.sql.where.AND or models.sql.where.OR
def add_to_query(self, query, aliases):
clones = [query.clone().add_q(obj) for obj in self.objects[1:]]
query.add_q(self.objects[0])
for clone in clones:
query.combine(clone, self.connector)
def get_q(obj):
if isinstance(obj, dict):
return models.Q(**obj)
if hasattr(obj, 'add_to_query') or isinstance(obj, models.Q):
return obj
if hasattr(obj, 'as_q'):
return obj.as_q()
raise ValueError()
def get_model(obj, allow_import=False, proxy=True):
if isinstance(obj, models.base.ModelBase):
model = obj
elif isinstance(obj, models.Model):
model = obj.__class__
elif hasattr(obj, 'model'):
return obj.model
elif allow_import and isinstance(obj, str):
module_name, name = obj.rsplit('.')
module = import_module(module_name)
model = getattr(module, name)
else:
raise ValueError
if not proxy:
while model._meta.proxy:
model = model._meta.proxy_for_model
return model
def get_manager(obj):
if isinstance(obj, models.Manager):
return obj
if isinstance(obj, models.base.ModelBase):
return obj._default_manager
raise ValueError
def get_query_set(obj):
""" Returns a QuerySet for a given QuerySet, Manager, Model, or an object with a get_query_set() method. """
if isinstance(obj, models.query.QuerySet):
return obj
if isinstance(obj, models.Manager):
return obj.all()
if isinstance(obj, models.base.ModelBase):
return obj._default_manager.all()
if hasattr(obj, 'get_query_set'):
return obj.get_query_set()
raise ValueError
def fetch(qs, *args):
qs = get_query_set(qs)
for arg in args:
try:
q = get_q(arg)
except ValueError:
if callable(arg):
try:
arg = arg()
except qs.model.DoesNotExist:
continue
if isinstance(arg, Exception):
raise arg
return arg
try:
return qs.get(q)
except qs.model.DoesNotExist:
pass
raise qs.model.DoesNotExist()
def _collect_sub_models(model, abstract, proxy, virtual, direct, sub_models):
for subclass in model.__subclasses__():
if (abstract or not subclass._meta.abstract) and (proxy or not subclass._meta.proxy) and (virtual or not getattr(subclass._meta, 'virtual', False)):
sub_models.add(subclass)
if direct:
continue
_collect_sub_models(subclass, abstract, proxy, virtual, direct, sub_models)
return sub_models
_sub_models_cache = {}
def get_sub_models(model, abstract=False, proxy=False, virtual=False, direct=False):
cache_key = (model, abstract, proxy, direct)
if cache_key not in _sub_models_cache:
_sub_models_cache[cache_key] = _collect_sub_models(model, abstract, proxy, virtual, direct, set())
return _sub_models_cache[cache_key]
# django.db.models.sql.Query utilities
def force_empty(query):
query.add_extra(None, None, ("1=0",), None, None, None)
def remove_join(query, alias, traceless=False):
"""Removes the join from query.join_map, query.alias_map, and query.rev_join_map.
If `traceless=True`, removes it from query.tables and query.alias_refcount as well."""
t_ident = query.rev_join_map[alias]
jm_list = list(query.join_map[t_ident])
jm_list.remove(alias)
query.join_map[t_ident] = tuple(jm_list)
del query.rev_join_map[alias]
del query.alias_map[alias]
if traceless:
query.tables.remove(alias)
del query.alias_refcount[alias]
def forge_join(query, table, alias, lhs, lhs_alias, lhs_col, col, nullable=False, join_type=None):
"""Updates query.join_map, query.alias_map, and query.rev_join_map.
This can be used to replace an existing join or to create a new join."""
if not join_type:
join_type = query.INNER
query.alias_map[alias] = (table, alias, join_type, lhs_alias, lhs_col, col, nullable)
t_ident = (lhs, table, lhs_col, col)
if t_ident in query.join_map:
query.join_map[t_ident] += (alias,)
else:
query.join_map[t_ident] = (alias,)
query.rev_join_map[alias] = t_ident
# Misc
def replace_text(pattern, replacement, model):
if isinstance(pattern, (unicode, str)):
pattern = re.compile(pattern)
fields = []
for field in model._meta.fields:
if isinstance(field, (models.TextField, models.CharField)):
fields.append(field.name)
for obj in get_query_set(model):
for field in fields:
val = getattr(obj, field)
if val and pattern.search(val):
val = pattern.sub(replacement, val)
setattr(obj, field, val)
obj.save()
def unordered_pairs(qs):
objects = list(qs.order_by())
for a in objects:
for b in objects:
if b.pk > a.pk:
yield a, b
def create_intermediate_model(cls, rel_name, attrs, bases=None, meta=None):
if not meta:
meta = {}
if not bases:
bases = (models.Model,)
meta.setdefault('app_label', cls._meta.app_label)
attrs['Meta'] = type('Meta', (object,), meta)
attrs.setdefault('__module__', cls.__module__)
return type("%s%s" % (cls.__name__, camel_case(rel_name, True)), bases, attrs)
def clean_slice(s, count_func, replace_none=False, allow_step=True):
start, stop, step = s.start, s.stop, s.step
if callable(count_func):
count = None
else:
count = count_func
if start is None:
if replace_none:
start = 0
elif start < 0:
if count is None:
count = count_func()
start = max(count + start, 0)
if stop is None:
if replace_none:
if count is None:
count = count_func()
stop = count
elif stop < 0:
if count is None:
count = count_func()
stop = max(count + stop, 0)
if step is None and replace_none:
step = 1
if step and allow_step is not True and step not in allow_step:
raise ValueError("unsupported slice.step")
return slice(start, stop, step)
def get_model_ref(model):
if isinstance(model, str):
return model
return "%s.%s" % (model._meta.app_label, model.__name__)
def get_app_path(module):
for app in settings.INSTALLED_APPS:
if app.startswith(module):
if app == module or module[len(app)] == '.':
return app
raise ValueError
def get_app_label(module):
return get_app_path(module).rsplit('.', 1)[-1]
_pending_reference_lookups = {}
def get_by_ref(s, callback, module=None, app_label=None, field=None):
if '@' in s:
model, module_name = s.split('@')
if module_name:
app_label = None
module = module_name
if '.' in name:
model, field = name.split('.')
else:
bits = s.rsplit('.', 3)
if len(bits) == 1:
model = bits[0]
elif len(bits) == 2:
app_label, model = bits
else:
app_label, model, field = bits
if module and not app_label:
app_label = get_app_label(module)
if app_label:
from django.db.models.loading import get_model
model = get_model(app_label, model)
else:
from django.db.models.loading import get_models
for m in get_model():
if m.__name__ == model:
model = m
break
if not model:
_pending_reference_lookups.setdefault((app_label, model), []).append((field, callback))
else:
_fire_ref_callback(model, field, callback)
def _fire_ref_callback(model, field, callback):
if field:
callback(model._meta.get_field(field))
else:
callback(model)
def _do_pending_reference_lookups(sender, **kwargs):
field, callback = _pending_reference_lookups.pop((sender._meta.app_label, sender.__name__), (None, None))
if callback:
_fire_ref_callback(sender, field, callback)
models.signals.class_prepared.connect(_do_pending_reference_lookups) | mit | 3,695,699,204,477,365,000 | 30.968661 | 156 | 0.582888 | false |
sql-machine-learning/sqlflow | python/runtime/pai/pai_ml/random_forest.py | 1 | 3027 | # Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from runtime import db
def get_train_random_forest_pai_cmd(model_name, data_table, model_attrs,
feature_column_names, label_name):
"""Get a command to submit a KMeans training task to PAI
Args:
model_name: model name on PAI
data_table: input data table name
model_attrs: model attributes for KMeans
feature_column_names: names of feature columns
label_name: name of the label column
Returns:
A string which is a PAI cmd
"""
# default use numTrees = 1
tree_num = model_attrs.get("tree_num", 1)
assert isinstance(tree_num, six.integer_types), \
"tree_num must be an integer"
feature_cols = ",".join(feature_column_names)
return '''pai -name randomforests -DinputTableName="%s" -DmodelName="%s"
-DlabelColName="%s" -DfeatureColNames="%s" -DtreeNum="%d"''' % (
data_table, model_name, label_name, feature_cols, tree_num)
def get_explain_random_forest_pai_cmd(datasource, model_name, data_table,
result_table, label_column):
"""Get a command to submit a PAI RandomForest explain task
Args:
datasource: current datasoruce
model_name: model name on PAI
data_table: input data table name
result_table: name of the result table, PAI will automatically
create this table
label_column: name of the label column
Returns:
A string which is a PAI cmd
"""
# NOTE(typhoonzero): for PAI random forests predicting, we can not load
# the TrainStmt since the model saving is fully done by PAI. We directly
# use the columns in SELECT statement for prediction, error will be
# reported by PAI job if the columns not match.
if not label_column:
return ("must specify WITH label_column when using "
"pai random forest to explain models")
conn = db.connect_with_data_source(datasource)
schema = db.get_table_schema(conn, data_table)
columns = [f[0] for f in schema]
conn.execute("DROP TABLE IF EXISTS %s;" % result_table)
return (
"""pai -name feature_importance -project algo_public """
"""-DmodelName="%s" -DinputTableName="%s" -DoutputTableName="%s" """
"""-DlabelColName="%s" -DfeatureColNames="%s" """
) % (model_name, data_table, result_table, label_column, ",".join(columns))
| apache-2.0 | 8,734,393,181,451,080,000 | 39.905405 | 79 | 0.663693 | false |
pawelmhm/splash | splash/html_element.py | 1 | 12235 | from __future__ import absolute_import
from functools import wraps
from splash.exceptions import DOMError
from splash.jsutils import escape_js
from splash.casperjs_utils import (
VISIBLE_JS_FUNC,
ELEMENT_INFO_JS,
FIELD_VALUE_JS,
FORM_VALUES_JS,
SET_FIELD_VALUE_JS
)
DIMENSIONS_JS_FUNC = """
(function(elem) {
var rect = elem.getClientRects()[0];
return {"x":rect.left, "y": rect.top, "width": rect.width, "height": rect.height}
})(%s)
"""
FETCH_TEXT_JS_FUNC = """
(function(elem) {
return (elem.textContent || elem.innerText || elem.value || '').trim();
})(%s)
"""
FILL_FORM_VALUES_JS = """
function (form, values, setFieldValue) {
Object.keys(values).forEach(function (name) {
var selector = "[name='" + name + "']";
setFieldValue(selector, values[name], form);
});
}
"""
def empty_strings_as_none(meth):
@wraps(meth)
def change_return_value_to_none_for_empty_string(*args, **kwargs):
retval = meth(*args, **kwargs)
return None if retval == '' else retval
return change_return_value_to_none_for_empty_string
def escape_js_args(*args):
return ','.join([
arg.element_js if isinstance(arg, HTMLElement) else escape_js(arg)
for arg in args
])
class HTMLElement(object):
""" Class for manipulating DOM HTML Element """
def __init__(self, tab, storage, event_handlers_storage, events_storage,
node_id):
self.tab = tab
self.storage = storage
self.event_handlers_storage = event_handlers_storage
self.events_storage = events_storage
self.id = node_id
self.element_js = self.get_element_js()
msg = "HTMLElement is created with id=%s in object %s" % (
self.id, self.element_js
)
self.tab.logger.log(msg, min_level=4)
def get_element_js(self):
""" Return JS object to which the element is assigned. """
return 'window["%s"]["%s"]' % (self.storage.name, self.id)
def assert_element_exists(self):
""" Raise exception if the element no longer exists in DOM. """
if not self.exists():
raise DOMError({
'type': DOMError.NOT_IN_DOM_ERROR,
'message': "Element no longer exists in DOM"
})
def assert_node_type(self, node_type):
"""
Raise an exception if the type of the element doesn't match node_type.
"""
actual_type = self.node_property('nodeName').lower()
if actual_type != node_type.lower():
raise DOMError({
'type': DOMError.NOT_COMPATIBLE_NODE_ERROR,
'message': "Node should be {!r}, but got {!r}".format(
node_type, actual_type)
})
def exists(self):
""" Return flag indicating whether element is in DOM """
exists = self.tab.evaljs("document.contains(%s)" % self.element_js)
return bool(exists)
@empty_strings_as_none
def node_property(self, property_name):
""" Return value of the specified property of the element """
return self.tab.evaljs(u"{element}[{property}]".format(
element=self.element_js,
property=escape_js(property_name)
))
@empty_strings_as_none
def set_node_property(self, property_name, property_value):
""" Set value of the specified property of the element """
return self.tab.evaljs(u"{element}[{property}] = {value}".format(
element=self.element_js,
property=escape_js(property_name),
value=escape_js(property_value)
))
def get_node_style(self, property_name):
""" Get value of the style property of the element """
return self.tab.evaljs(u"{element}.style[{property}]".format(
element=self.element_js,
property=escape_js(property_name),
))
def set_node_style(self, property_name, property_value):
""" Set value of the style property of the element """
return self.tab.evaljs(u"{element}.style[{property}] = {value}".format(
element=self.element_js,
property=escape_js(property_name),
value=escape_js(property_value)
))
def node_method(self, method_name):
""" Return function which calls the specified method of the element """
@empty_strings_as_none
def call(*args):
return self.tab.evaljs(u"{element}[{method}]({args})".format(
element=self.element_js,
method=escape_js(method_name),
args=escape_js_args(*args)
))
return call
def mouse_click(self, x=0, y=0, button="left"):
""" Click on the element """
self.assert_element_exists()
dimensions = self._get_dimensions()
self.tab.mouse_click(dimensions["x"] + x, dimensions["y"] + y, button)
def mouse_hover(self, x=0, y=0):
""" Hover over the element """
self.assert_element_exists()
dimensions = self._get_dimensions()
self.tab.mouse_hover(dimensions["x"] + x, dimensions["y"] + y)
def _get_dimensions(self):
return self.tab.evaljs(DIMENSIONS_JS_FUNC % self.element_js)
def styles(self):
""" Return computed styles of the element """
return self.tab.evaljs("getComputedStyle(%s)" % self.element_js)
def bounds(self):
""" Return bounding client rectangle of the element"""
return self.tab.evaljs("%s.getBoundingClientRect()" % self.element_js)
def png(self, width=None, scale_method=None, pad=None):
""" Return screenshot of the element in PNG format.
Optional `pad` can be provided which can be in two formats:
- integer containing amount of pad for all sides
(top, left, bottom, right)
- tuple with `left`, `top`, `right`, `bottom` integer
values for padding
Padding value can be negative which means that the image will be cropped.
"""
if not self.exists() or not self.visible():
return None
region = _bounds_to_region(self.bounds(), pad)
return self.tab.png(width, region=region, scale_method=scale_method)
def jpeg(self, width=None, scale_method=None, quality=None, pad=None):
""" Return screenshot of the element in JPEG format.
Optional `pad` can be provided which can be in two formats:
- integer containing amount of pad for all sides
(top, left, bottom, right)
- tuple with `left`, `top`, `right`, `bottom` integer
values for padding
Padding value can be negative which means that the image will be cropped.
"""
if not self.exists() or not self.visible():
return None
region = _bounds_to_region(self.bounds(), pad)
return self.tab.jpeg(width, region=region, scale_method=scale_method,
quality=quality)
def visible(self):
""" Return flag indicating whether element is visible """
self.assert_element_exists()
return self.tab.evaljs(u"({visible_func})({element})".format(
visible_func=VISIBLE_JS_FUNC,
element=self.element_js
))
def text(self):
""" Return text of the element """
return self.tab.evaljs(FETCH_TEXT_JS_FUNC % self.element_js)
def info(self):
""" Return information about the element """
return self.tab.evaljs(u"({element_info_func})({element}, {visible_func})".format(
element_info_func=ELEMENT_INFO_JS,
element=self.element_js,
visible_func=VISIBLE_JS_FUNC
))
def field_value(self):
""" Return the value of the element if it is a field """
return self.tab.evaljs(u"({field_value_func})({element})".format(
field_value_func=FIELD_VALUE_JS,
element=self.element_js
))
def form_values(self, values='auto'):
""" Return all values of the element if it is a form"""
self.assert_node_type('form')
return self.tab.evaljs(u"({form_values_func})({element}, {values}, {field_value_func})".format(
form_values_func=FORM_VALUES_JS,
field_value_func=FIELD_VALUE_JS,
values=escape_js(values),
element=self.element_js
))
def fill(self, values):
""" Fill the values of the element """
return self.tab.evaljs(u"({fill_form_values_func})({element}, {values}, {set_field_value})".format(
fill_form_values_func=FILL_FORM_VALUES_JS,
element=self.element_js,
values=escape_js(values),
set_field_value=SET_FIELD_VALUE_JS
))
def send_keys(self, text):
""" Send key events to the element separated by whitespaces """
if not self.focused():
self.mouse_click()
self.tab.send_keys(text)
def send_text(self, text):
""" Send text to the element """
if not self.focused():
self.mouse_click()
self.tab.send_text(text)
def focused(self):
""" Return True if the current element is focused """
return self.tab.evaljs(
"{} === document.activeElement".format(self.element_js)
)
def set_event_handler(self, event_name, handler):
""" Set on-event type event listeners to the element """
handler_id = self.event_handlers_storage.add(handler)
func = u"window[{storage_name}][{func_id}]".format(
storage_name=escape_js(self.event_handlers_storage.name),
func_id=escape_js(handler_id),
)
self.tab.evaljs(u"{element}['on' + {event_name}] = {func}".format(
element=self.element_js,
event_name=escape_js(event_name),
func=func
))
return handler_id
def unset_event_handler(self, event_name, handler_id):
""" Remove on-event type event listeners from the element """
self.tab.evaljs(u"{element}['on' + {event_name}] = null".format(
element=self.element_js,
event_name=escape_js(event_name),
))
self.event_handlers_storage.remove(handler_id)
def add_event_handler(self, event_name, handler, options=None):
""" Add event listeners to the element for the specified event """
handler_id = self.event_handlers_storage.add(handler)
func = u"window[{storage_name}][{func_id}]".format(
storage_name=escape_js(self.event_handlers_storage.name),
func_id=escape_js(handler_id),
)
self.tab.evaljs(u"{element}.addEventListener({event_name}, {func}, {options})".format(
element=self.element_js,
event_name=escape_js(event_name),
func=func,
options=escape_js(options)
))
return handler_id
def remove_event_handler(self, event_name, handler_id):
"""
Remove event listeners from the element for the specified event
and handler.
"""
func = u"window[{storage_name}][{func_id}]".format(
storage_name=escape_js(self.event_handlers_storage.name),
func_id=escape_js(handler_id),
)
self.tab.evaljs(u"{element}.removeEventListener({event_name}, {func})".format(
element=self.element_js,
event_name=escape_js(event_name),
func=func
))
self.event_handlers_storage.remove(handler_id)
def submit(self):
""" Submit form element """
self.assert_node_type('form')
self.node_method('submit')()
def _padded(region, pad):
"""
>>> _padded([1, 1, 4, 4], [0, 1, 2 ,3])
(1, 0, 6, 7)
>>> _padded([1, 1, 4, 4], 2)
(-1, -1, 6, 6)
"""
if not pad:
return region
if isinstance(pad, (int, float)):
pad = (pad, pad, pad, pad)
return (
region[0] - pad[0],
region[1] - pad[1],
region[2] + pad[2],
region[3] + pad[3]
)
def _bounds_to_region(bounds, pad):
region = bounds["left"], bounds["top"], bounds["right"], bounds["bottom"]
return _padded(region, pad)
| bsd-3-clause | -242,330,115,481,414,270 | 33.758523 | 107 | 0.586105 | false |
wdecoster/nanoget | nanoget/extraction_functions.py | 1 | 18527 | import logging
from functools import reduce
import nanoget.utils as ut
import pandas as pd
import sys
import pysam
import re
from Bio import SeqIO
import concurrent.futures as cfutures
from itertools import repeat
def process_summary(summaryfile, **kwargs):
"""Extracting information from an albacore summary file.
Only reads which have a >0 length are returned.
The fields below may or may not exist, depending on the type of sequencing performed.
Fields 1-14 are for 1D sequencing.
Fields 1-23 for 2D sequencing.
Fields 24-27, 2-5, 22-23 for 1D^2 (1D2) sequencing
Fields 28-38 for barcoded workflows
1 filename
2 read_id
3 run_id
4 channel
5 start_time
6 duration
7 num_events
8 template_start
9 num_events_template
10 template_duration
11 num_called_template
12 sequence_length_template
13 mean_qscore_template
14 strand_score_template
15 complement_start
16 num_events_complement
17 complement_duration
18 num_called_complement
19 sequence_length_complement
20 mean_qscore_complement
21 strand_score_complement
22 sequence_length_2d
23 mean_qscore_2d
24 filename1
25 filename2
26 read_id1
27 read_id2
28 barcode_arrangement
29 barcode_score
30 barcode_full_arrangement
31 front_score
32 rear_score
33 front_begin_index
34 front_foundseq_length
35 rear_end_index
36 rear_foundseq_length
37 kit
38 variant
"""
logging.info("Nanoget: Collecting metrics from summary file {} for {} sequencing".format(
summaryfile, kwargs["readtype"]))
ut.check_existance(summaryfile)
if kwargs["readtype"] == "1D":
cols = ["channel", "start_time", "duration",
"sequence_length_template", "mean_qscore_template"]
elif kwargs["readtype"] in ["2D", "1D2"]:
cols = ["channel", "start_time", "duration", "sequence_length_2d", "mean_qscore_2d"]
if kwargs["barcoded"]:
cols.append("barcode_arrangement")
logging.info("Nanoget: Extracting metrics per barcode.")
try:
datadf = pd.read_csv(
filepath_or_buffer=summaryfile,
sep="\t",
usecols=cols,
)
except ValueError:
logging.error("Nanoget: did not find expected columns in summary file {}:\n {}".format(
summaryfile, ', '.join(cols)))
sys.exit("ERROR: expected columns in summary file {} not found:\n {}".format(
summaryfile, ', '.join(cols)))
if kwargs["barcoded"]:
datadf.columns = ["channelIDs", "time", "duration", "lengths", "quals", "barcode"]
else:
datadf.columns = ["channelIDs", "time", "duration", "lengths", "quals"]
logging.info("Nanoget: Finished collecting statistics from summary file {}".format(summaryfile))
return ut.reduce_memory_usage(datadf.loc[datadf["lengths"] != 0].copy())
def check_bam(bam, samtype="bam"):
"""Check if bam file is valid.
Bam file should:
- exists
- has an index (create if necessary)
- is sorted by coordinate
- has at least one mapped read
"""
ut.check_existance(bam)
samfile = pysam.AlignmentFile(bam, "rb")
if not samfile.has_index():
pysam.index(bam)
samfile = pysam.AlignmentFile(bam, "rb") # Need to reload the samfile after creating index
logging.info("Nanoget: No index for bam file could be found, created index.")
if not samfile.header['HD']['SO'] == 'coordinate':
logging.error("Nanoget: Bam file {} not sorted by coordinate!.".format(bam))
sys.exit("Please use a bam file sorted by coordinate.")
if samtype == "bam":
logging.info("Nanoget: Bam file {} contains {} mapped and {} unmapped reads.".format(
bam, samfile.mapped, samfile.unmapped))
if samfile.mapped == 0:
logging.error("Nanoget: Bam file {} does not contain aligned reads.".format(bam))
sys.exit("FATAL: not a single read was mapped in bam file {}".format(bam))
return samfile
def process_ubam(bam, **kwargs):
"""Extracting metrics from unaligned bam format
Extracting lengths
"""
logging.info("Nanoget: Starting to collect statistics from ubam file {}.".format(bam))
samfile = pysam.AlignmentFile(bam, "rb", check_sq=False)
if not samfile.has_index():
pysam.index(bam)
# Need to reload the samfile after creating index
samfile = pysam.AlignmentFile(bam, "rb", check_sq=False)
logging.info("Nanoget: No index for bam file could be found, created index.")
datadf = pd.DataFrame(
data=[(read.query_name, ut.ave_qual(read.query_qualities), read.query_length)
for read in samfile.fetch(until_eof=True)],
columns=["readIDs", "quals", "lengths"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: ubam {} contains {} reads.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_bam(bam, **kwargs):
"""Combines metrics from bam after extraction.
Processing function: calls pool of worker functions
to extract from a bam file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from bam file {}.".format(bam))
samfile = check_bam(bam)
chromosomes = samfile.references
if len(chromosomes) > 100 or kwargs["huge"]:
logging.info("Nanoget: lots of contigs (>100) or --huge, not running in separate processes")
datadf = pd.DataFrame(
data=extract_from_bam(bam, None, kwargs["keep_supp"]),
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
else:
unit = chromosomes
with cfutures.ProcessPoolExecutor(max_workers=kwargs["threads"]) as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam,
repeat(bam),
unit,
repeat(kwargs["keep_supp"]))
for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info(f"Nanoget: bam {bam} contains {datadf['lengths'].size} primary alignments.")
return ut.reduce_memory_usage(datadf)
def process_cram(cram, **kwargs):
"""Combines metrics from cram after extraction.
Processing function: calls pool of worker functions
to extract from a cram file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from cram file {}.".format(cram))
samfile = check_bam(cram, samtype="cram")
chromosomes = samfile.references
if len(chromosomes) > 100:
unit = [None]
logging.info("Nanoget: lots of contigs (>100), not running in separate processes")
else:
unit = chromosomes
with cfutures.ProcessPoolExecutor(max_workers=kwargs["threads"]) as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam,
repeat(cram), unit, repeat(kwargs["keep_supp"]))
for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info(f"Nanoget: cram {cram} contains {datadf['lengths'].size} primary alignments.")
return ut.reduce_memory_usage(datadf)
def extract_from_bam(bam, chromosome, keep_supplementary=True):
"""Extracts metrics from bam.
Worker function per chromosome
loop over a bam file and create list with tuples containing metrics:
-qualities
-aligned qualities
-lengths
-aligned lengths
-mapping qualities
-edit distances to the reference genome scaled by read length
"""
samfile = pysam.AlignmentFile(bam, "rb")
if keep_supplementary:
return [
(read.query_name,
ut.ave_qual(read.query_qualities),
ut.ave_qual(read.query_alignment_qualities),
read.query_length,
read.query_alignment_length,
read.mapping_quality,
get_pID(read))
for read in samfile.fetch(reference=chromosome, multiple_iterators=True)
if not read.is_secondary and not read.is_unmapped]
else:
return [
(read.query_name,
ut.ave_qual(read.query_qualities),
ut.ave_qual(read.query_alignment_qualities),
read.query_length,
read.query_alignment_length,
read.mapping_quality,
get_pID(read))
for read in samfile.fetch(reference=chromosome, multiple_iterators=True)
if not read.is_secondary and not read.is_unmapped and not read.is_supplementary]
def get_pID(read):
"""Return the percent identity of a read.
based on the NM tag if present,
if not calculate from MD tag and CIGAR string
read.query_alignment_length can be zero in the case of ultra long reads aligned with minimap2 -L
"""
match = reduce(lambda x, y: x + y[1] if y[0] in (0, 7, 8) else x, read.cigartuples, 0)
ins = reduce(lambda x, y: x + y[1] if y[0] == 1 else x, read.cigartuples, 0)
delt = reduce(lambda x, y: x + y[1] if y[0] == 2 else x, read.cigartuples, 0)
alignment_length = match + ins + delt
try:
return (1 - read.get_tag("NM") / alignment_length) * 100
except KeyError:
try:
return 100 * (1 - (parse_MD(read.get_tag("MD")) + parse_CIGAR(read.cigartuples)) /
alignment_length)
except KeyError:
return None
except ZeroDivisionError:
return None
def parse_MD(MDlist):
"""Parse MD string to get number of mismatches and deletions."""
return sum([len(item) for item in re.split('[0-9^]', MDlist)])
def parse_CIGAR(cigartuples):
"""Count the insertions in the read using the CIGAR string."""
return sum([item[1] for item in cigartuples if item[0] == 1])
def handle_compressed_input(inputfq, file_type="fastq"):
"""Return handles from compressed files according to extension.
Check for which fastq input is presented and open a handle accordingly
Can read from compressed files (gz, bz2, bgz) or uncompressed
Relies on file extensions to recognize compression
"""
ut.check_existance(inputfq)
if inputfq.endswith(('.gz', 'bgz')):
import gzip
logging.info("Nanoget: Decompressing gzipped {} {}".format(file_type, inputfq))
return gzip.open(inputfq, 'rt')
elif inputfq.endswith('.bz2'):
import bz2
logging.info("Nanoget: Decompressing bz2 compressed {} {}".format(file_type, inputfq))
return bz2.open(inputfq, 'rt')
elif inputfq.endswith(('.fastq', '.fq', 'fasta', '.fa', '.fas')):
return open(inputfq, 'r')
else:
logging.error("INPUT ERROR: Unrecognized file extension {}".format(inputfq))
sys.exit('INPUT ERROR:\nUnrecognized file extension in {}\n'
'Supported are gz, bz2, bgz, fastq, fq, fasta, fa and fas'.format(inputfq))
def process_fasta(fasta, **kwargs):
"""Combine metrics extracted from a fasta file."""
logging.info("Nanoget: Starting to collect statistics from a fasta file.")
inputfasta = handle_compressed_input(fasta, file_type="fasta")
return ut.reduce_memory_usage(pd.DataFrame(
data=[len(rec) for rec in SeqIO.parse(inputfasta, "fasta")],
columns=["lengths"]
).dropna())
def process_fastq_plain(fastq, **kwargs):
"""Combine metrics extracted from a fastq file."""
logging.info("Nanoget: Starting to collect statistics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
return ut.reduce_memory_usage(pd.DataFrame(
data=[res for res in extract_from_fastq(inputfastq) if res],
columns=["quals", "lengths"]
).dropna())
def extract_from_fastq(fq):
"""Extract metrics from a fastq file.
Return average quality and read length
"""
for rec in SeqIO.parse(fq, "fastq"):
yield ut.ave_qual(rec.letter_annotations["phred_quality"]), len(rec)
def stream_fastq_full(fastq, threads):
"""Generator for returning metrics extracted from fastq.
Extract from a fastq file:
-readname
-average and median quality
-read_lenght
"""
logging.info("Nanoget: Starting to collect full metrics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
with cfutures.ProcessPoolExecutor(max_workers=threads) as executor:
for results in executor.map(extract_all_from_fastq, SeqIO.parse(inputfastq, "fastq")):
yield results
logging.info("Nanoget: Finished collecting statistics from plain fastq file.")
def extract_all_from_fastq(rec):
"""Extract metrics from a fastq file.
Return identifier, read length, average quality and median quality
"""
return (rec.id,
len(rec),
ut.ave_qual(rec.letter_annotations["phred_quality"]),
None)
def info_to_dict(info):
"""Get the key-value pairs from the albacore/minknow fastq description and return dict"""
return {field.split('=')[0]: field.split('=')[1] for field in info.split(' ')[1:]}
def process_fastq_rich(fastq, **kwargs):
"""Extract metrics from a richer fastq file.
Extract information from fastq files generated by albacore or MinKNOW,
containing richer information in the header (key-value pairs)
read=<int> [72]
ch=<int> [159]
start_time=<timestamp> [2016-07-15T14:23:22Z] # UTC ISO 8601 ISO 3339 timestamp
Z indicates UTC time, T is the delimiter between date expression and time expression
dateutil.parser.parse("2016-07-15T14:23:22Z") imported as dparse
-> datetime.datetime(2016, 7, 15, 14, 23, 22, tzinfo=tzutc())
"""
logging.info("Nanoget: Starting to collect statistics from rich fastq file.")
inputfastq = handle_compressed_input(fastq)
res = []
for record in SeqIO.parse(inputfastq, "fastq"):
try:
read_info = info_to_dict(record.description)
res.append(
(ut.ave_qual(record.letter_annotations["phred_quality"]),
len(record),
read_info["ch"],
read_info["start_time"],
read_info["runid"]))
except KeyError:
logging.error("Nanoget: keyerror when processing record {}".format(record.description))
sys.exit("Unexpected fastq identifier:\n{}\n\n \
missing one or more of expected fields 'ch', 'start_time' or 'runid'".format(
record.description))
df = pd.DataFrame(
data=res,
columns=["quals", "lengths", "channelIDs", "timestamp", "runIDs"]).dropna()
df["channelIDs"] = df["channelIDs"].astype("int64")
return ut.reduce_memory_usage(df)
def readfq(fp):
"""Generator function adapted from https://github.com/lh3/readfq."""
last = None # this is a buffer keeping the last unprocessed line
while True: # mimic closure; is it a bad idea?
if not last: # the first record or a record following a fastq
for l in fp: # search for the start of the next record
if l[0] in '>@': # fasta/q header line
last = l[:-1] # save this line
break
if not last:
break
name, seqs, last = last[1:].partition(" ")[0], [], None
for l in fp: # read the sequence
if l[0] in '@+>':
last = l[:-1]
break
seqs.append(l[:-1])
if not last or last[0] != '+': # this is a fasta record
yield name, ''.join(seqs), None # yield a fasta record
if not last:
break
else: # this is a fastq record
seq, leng, seqs = ''.join(seqs), 0, []
for l in fp: # read the quality
seqs.append(l[:-1])
leng += len(l) - 1
if leng >= len(seq): # have read enough quality
last = None
yield name, seq, ''.join(seqs) # yield a fastq record
break
if last: # reach EOF before reading enough quality
yield name, seq, None # yield a fasta record instead
break
def fq_minimal(fq):
"""Minimal fastq metrics extractor.
Quickly parse a fasta/fastq file - but makes expectations on the file format
There will be dragons if unexpected format is used
Expects a fastq_rich format, but extracts only timestamp and length
"""
try:
while True:
time = next(fq)[1:].split(" ")[4][11:-1]
length = len(next(fq))
next(fq)
next(fq)
yield time, length
except StopIteration:
yield None
def process_fastq_minimal(fastq, **kwargs):
"""Swiftly extract minimal features (length and timestamp) from a rich fastq file"""
infastq = handle_compressed_input(fastq)
try:
df = pd.DataFrame(
data=[rec for rec in fq_minimal(infastq) if rec],
columns=["timestamp", "lengths"]
)
except IndexError:
logging.error("Fatal: Incorrect file structure for fastq_minimal")
sys.exit("Error: file does not match expected structure for fastq_minimal")
return ut.reduce_memory_usage(df)
| gpl-3.0 | -1,263,447,835,598,286,300 | 38.169133 | 100 | 0.616344 | false |
richteer/wambda | creature.py | 1 | 2038 |
class Creature():
_maxhealth = 1
_health = 1
_speed = 1
_ch = '='
_zone = None
_gold = 0
_name = "wat"
_initiative = 0
_x = 0
_y = 0
def __init__(self, hp, speed, ch, name):
self._health = hp
self._maxhealth = hp
self._speed = speed
self._ch = ch
self._name = name
def _set_pos(self, y,x):
self._x = x
self._y = y
def _tick(self):
for c in self._zone.creatures:
if c.__class__.__name__ == "Player":
p = c
if not p:
return None
dx = self._x - p._x
dy = self._y - p._y
if (dx == 0 and abs(dy) == 1) or (abs(dx) == 1 and dy == 0):
p._takedamage(1, source=self)
elif abs(dx) > abs(dy):
if dx < 0:
self.move_right()
else:
self.move_left()
elif abs(dx) < abs(dy):
if dy < 0:
self.move_down()
else:
self.move_up()
def _move(self, y, x):
self._zone.tiles[self._y][self._x].creature = None
self._x += x
self._y += y
self._zone.tiles[self._y][self._x].creature = self
def _trymove(self, y, x):
return not (self._zone.tiles[y][x].solid or self._zone.tiles[y][x].creature)
def _takedamage(self, amount, source=None):
self._health -= amount
if self._health == 0:
self._zone.tiles[self._y][self._x].creature = None
return True
return False
def move_left(self):
if self._trymove(self._y, self._x-self._speed):
self._move(0,-self._speed)
def move_right(self):
if self._trymove(self._y, self._x+self._speed):
self._move(0,self._speed)
def move_up(self):
if self._trymove(self._y-self._speed, self._x):
self._move(-self._speed,0)
def move_down(self):
if self._trymove(self._y+self._speed, self._x):
self._move(self._speed,0)
def pickup(self):
it = self._zone.tiles[self._y][self._x].item
self._zone.tiles[self._y][self._x].item = None
if it:
if hasattr(it, "gold"):
self._gold += it.gold
else:
setattr(self, it.func.__name__, it.func)
else:
pass
# TODO: This
def gencreatures(zone):
c = Creature(2,1,'E',"Emu")
c._initiative = 1
c._set_pos(13,7)
zone.add_creature(c)
| gpl-2.0 | 6,510,516,860,936,087,000 | 18.786408 | 78 | 0.580471 | false |
littlecodersh/itchatmp | itchatmp/controllers/mpapi/mp/users.py | 1 | 8744 | ''' This package is for user managing in wechat mp
1. What can we do?
- set tags and everything around them
- get all your users (of course not at a time) or get detailed information of one
- set blacklist and everything around it
3. I alse listed API list for you:
- TAGS
create_tag
get_tags
update_tag
delete_tag
get_users_of_tag
add_users_into_tag
delete_users_of_tag
get_tags_of_user
- USER INFO
get_users
get_user_info
set_alias
- BLACKLIST
get_blacklist
add_users_into_blacklist
delete_users_of_blacklist
'''
import logging
from ..requests import requests
from itchatmp.utils import retry, encode_send_dict
from itchatmp.config import SERVER_URL
from itchatmp.content import (
IMAGE, VOICE, VIDEO, MUSIC, TEXT, NEWS, CARD)
from itchatmp.returnvalues import ReturnValue
logger = logging.getLogger('itchatmp')
def create_tag(name, id=None, accessToken=None):
''' create_tag
* id is for qy only
'''
data = encode_send_dict({'tag': {'name': name}})
if data is None:
return ReturnValue({'errcode': -10001})
r = requests.post('%s/cgi-bin/tags/create?access_token=%s'
% (SERVER_URL, accessToken), data=data)
def _wrap_result(result):
result = ReturnValue(result.json())
if 'tag' in result:
result['errcode'] = 0
return result
r._wrap_result = _wrap_result
return r
def get_tags(accessToken=None):
r = requests.get('%s/cgi-bin/tags/get?access_token=%s'
% (SERVER_URL, accessToken))
def _wrap_result(result):
result = ReturnValue(result.json())
if 'tags' in result:
result['errcode'] = 0
return result
r._wrap_result = _wrap_result
return r
def update_tag(id, name, accessToken=None):
data = encode_send_dict({'tag': {'name': name, 'id': id}})
if data is None:
return ReturnValue({'errcode': -10001})
r = requests.post('%s/cgi-bin/tags/update?access_token=%s'
% (SERVER_URL, accessToken), data=data)
def _wrap_result(result):
return ReturnValue(result.json())
r._wrap_result = _wrap_result
return r
def delete_tag(id, accessToken=None):
data = encode_send_dict({'tag': {'id': id}})
if data is None:
return ReturnValue({'errcode': -10001})
r = requests.post('%s/cgi-bin/tags/delete?access_token=%s'
% (SERVER_URL, accessToken), data=data)
def _wrap_result(result):
return ReturnValue(result.json())
r._wrap_result = _wrap_result
return r
def get_users_of_tag(id, nextOpenId='', accessToken=None):
data = encode_send_dict({'tagid': id, 'next_openid': nextOpenId})
if data is None:
return ReturnValue({'errcode': -10001})
r = requests.post('%s/cgi-bin/user/tag/get?access_token=%s'
% (SERVER_URL, accessToken), data=data)
def _wrap_result(result):
result = ReturnValue(result.json())
if 'count' in result:
result['errcode'] = 0
return result
r._wrap_result = _wrap_result
return r
def add_users_into_tag(id, userIdList=None, partyList=None, accessToken=None):
if not userIdList:
return ReturnValue({'errcode': 40035, 'errmsg': 'must have one userId'})
data = encode_send_dict({'openid_list': userIdList, 'tagid': id})
if data is None:
return ReturnValue({'errcode': -10001})
r = requests.post('%s/cgi-bin/tags/members/batchtagging?access_token=%s'
% (SERVER_URL, accessToken), data=data)
def _wrap_result(result):
return ReturnValue(result.json())
r._wrap_result = _wrap_result
return r
def delete_users_of_tag(id, userIdList=None, partyList=None, accessToken=None):
if not userIdList: return ReturnValue({'errcode': 40035, 'errmsg': 'must have one userId'})
data = encode_send_dict({'tagid': id, 'openid_list': userIdList})
if data is None: return ReturnValue({'errcode': -10001})
r = requests.post('%s/cgi-bin/tags/members/batchuntagging?access_token=%s'
% (SERVER_URL, accessToken), data=data)
def _wrap_result(result):
return ReturnValue(result.json())
r._wrap_result = _wrap_result
return r
def get_tags_of_user(userId, accessToken=None):
data = encode_send_dict({'openid': userId})
if data is None: return ReturnValue({'errcode': -10001})
r = requests.post('%s/cgi-bin/tags/getidlist?access_token=%s'
% (SERVER_URL, accessToken), data=data)
def _wrap_result(result):
result = ReturnValue(result.json())
if 'tagid_list' in result: result['errcode'] = 0
return result
r._wrap_result = _wrap_result
return r
def set_alias(userId, alias, accessToken=None):
''' this method is for verified service mp only '''
data = encode_send_dict({'openid': userId, 'remark': alias})
if data is None: return ReturnValue({'errcode': -10001})
r = requests.post('%s/cgi-bin/user/info/updateremark?access_token=%s'
% (SERVER_URL, accessToken), data=data)
def _wrap_result(result):
return ReturnValue(result.json())
r._wrap_result = _wrap_result
return r
def get_user_info(userId, accessToken=None):
''' get info of a user or a list of users
* userId can be a list or only one userId
'''
def _batch_get_user_info(userId, accessToken=None):
data = {'user_list': [{'openid': id, 'lang': 'zh-CN'} for id in userId]}
data = encode_send_dict(data)
if data is None: return ReturnValue({'errcode': -10001})
r = requests.post('%s/cgi-bin/user/info/batchget?access_token=%s'
% (SERVER_URL, accessToken), data=data)
def _wrap_result(result):
result = ReturnValue(result.json())
if 'user_info_list' in result: result['errcode'] = 0
return result
r._wrap_result = _wrap_result
return result
def _get_user_info(userId, accessToken=None):
params = {
'access_token': accessToken,
'openid': userId,
'lang': 'zh_CN', }
r = requests.get('%s/cgi-bin/user/info' % SERVER_URL, params=params)
def _wrap_result(result):
result = ReturnValue(result.json())
if 'openid' in result: result['errcode'] = 0
return result
r._wrap_result = _wrap_result
return r
if isinstance(userId, list):
return _batch_get_user_info(userId, accessToken)
else:
return _get_user_info(userId, accessToken)
def get_users(nextOpenId='', departmentId=None, fetchChild=False, status=4, accessToken=None):
''' get users from nextOpenId
* departmentId, fetchChild, status is for qy api
'''
params = {
'access_token': accessToken,
'next_openid': nextOpenId, }
r = requests.get('%s/cgi-bin/user/get' % SERVER_URL, params=params)
def _wrap_result(result):
result = ReturnValue(result.json())
if 'data' in result: result['errcode'] = 0
return result
r._wrap_result = _wrap_result
return r
def get_blacklist(beginOpenId='', accessToken=None):
data = {'begin_openid': beginOpenId}
data = encode_send_dict(data)
r = requests.post('%s/cgi-bin/tags/members/getblacklist?access_token=%s' %
(SERVER_URL, accessToken), data=data)
def _wrap_result(result):
result = ReturnValue(result.json())
if 'data' in result: result['errcode'] = 0
return result
r._wrap_result = _wrap_result
return r
def add_users_into_blacklist(userId, accessToken=None):
''' userId can be a userId or a list of userId '''
if not isinstance(userId, list):
userId = [userId]
data = {'openid_list': userId}
data = encode_send_dict(data)
r = requests.post('%s/cgi-bin/tags/members/batchblacklist?access_token=%s' %
(SERVER_URL, accessToken), data=data)
def _wrap_result(result):
return ReturnValue(result.json())
r._wrap_result = _wrap_result
return r
def delete_users_of_blacklist(userId, accessToken=None):
''' userId can be a userId or a list of userId '''
if not isinstance(userId, list):
userId = [userId]
data = {'openid_list': userId}
data = encode_send_dict(data)
r = requests.post('%s/cgi-bin/tags/members/batchunblacklist?access_token=%s' %
(SERVER_URL, accessToken), data=data)
def _wrap_result(result):
return ReturnValue(result.json())
r._wrap_result = _wrap_result
return r
| mit | 5,263,644,145,367,471,000 | 35.689655 | 95 | 0.612534 | false |
shahabsaf1/Python | plugins/chatter.py | 1 | 1250 | # -*- coding: utf-8 -*-
from utils import *
from cleverbot import Cleverbot
from HTMLParser import HTMLParser
# If someone mentions the bot's username, it replies
commands = [('@' + bot['username'].lower())]
action = 'typing'
hidden = True
def run(msg):
input = msg['text'].replace(bot['first_name'] + ' ', '')
cb = Cleverbot()
unescape = HTMLParser().unescape
try:
message = unescape(cb.ask(input))
except:
message = u'🙃'
send_message(msg['chat']['id'], message, reply_to_message_id = msg['message_id'])
def process(msg):
if ('reply_to_message' in msg and
'text' in msg['reply_to_message'] and
'text' in msg):
if (str(msg['chat']['id']) in groups and
groups[str(msg['chat']['id'])]['special'] != 'log'):
if (msg['reply_to_message']['from']['id'] == bot['id'] and
not msg['text'].startswith(config['command_start'])):
msg['text'] = '@' + bot['username'] + ' ' + msg['text']
if ('text' in msg and
msg['chat']['type'] == 'private' and
not msg['text'].startswith(config['command_start'])):
msg['text'] = '@' + bot['username'] + ' ' + msg['text']
| gpl-2.0 | -4,327,251,469,896,459,000 | 29.974359 | 85 | 0.531676 | false |
gpmidi/zabbixMongoDB | src/mon/zbx/comm/metrics/discovery.py | 1 | 3356 | '''
Created on Jan 12, 2014
@author: Paulson McIntyre (GpMidi) <[email protected]>
'''
# Built-in
import time
import logging
import socket
import collections
import re
try:
# Python 3.x?
import json # @UnresolvedImport
except:
# Python 2.7
import simplejson as json # @UnresolvedImport @Reimport
# Others
# Ours
from mon.zbx.comm.metric import Metric
class DiscoveryMetric(Metric):
""" Helps with low-level item/graph/etc discovery
"""
DEFAULT_KEY = lambda metric: "discovery.default"
DEFAULT_VALUE = lambda metric: json.dumps(dict(data = []))
ENFORCE_METRIC_MACRO = True
ENFORCE_METRIC_MACRO_MATCH = re.compile(r'^\{\#[A-Z0-9_\-]+\}$')
def __init__(self, discovered = None, value = None, **kwargs):
self.discovered = discovered
super(DiscoveryMetric, self).__init__(value = value, **kwargs)
def getDiscovered(self):
if self.discovered is None:
self.discovered = []
assert isinstance(self.discovered, collections.Iterable), "Expected discovered to be itterable. Got %r. " % self.discovered
# TODO: Add handling of discovered not being itterable
if self.ENFORCE_METRIC_MACRO:
for discovered in self.discovered:
assert isinstance(discovered, dict), "Expected discovered type of %r to be a dict" % discovered
for k,v in discovered.items():
assert self.ENFORCE_METRIC_MACRO_MATCH.match(k), "Metric's macro name is invalid. Got %r" % k
assert isinstance(v, str) or isinstance(v, unicode), "Metric value %r isn't a string" % v
return self.discovered
def addMetric(self, **macros):
if self.discovered is None:
self.discovered = []
assert len(macros)>0,"Expected at least one macro to be set. Got %r"%macros
if self.ENFORCE_METRIC_MACRO:
for k,v in macros.items():
assert self.ENFORCE_METRIC_MACRO_MATCH.match(k), "Metric's macro name is invalid. Got %r" % k
assert isinstance(v, str) or isinstance(v, unicode), "Metric value %r isn't a string" % v
self.discovered.append(macros)
def clearMetrics(self):
self.discovered = []
def getValue(self):
asObjs = dict(data = self.getDiscovered())
asStr = json.dumps(asObjs, indent = 0).replace(' ', '\t')\
.replace('{\n', '\t{\n').replace('\n}, \n', '\t}\n\t,\n')\
.replace('"{#', '\t\t"{#').replace('\n}\n', '\t}\n')\
.replace('": "', '":"').replace('\t{\n\"data\": ', '{\n\"data\":')
self.l.log(3, "Created JSON %r from %r", asStr, asObjs)
return asStr
def toZbxJSON(self):
""" Convert to JSON that Zabbix will accept """
# Zabbix has very fragile JSON parser, and we cannot use json to dump whole packet
ret = (
'\t\t{\n'
'\t\t\t"host":%s,\n'
'\t\t\t"key":%s,\n'
'\t\t\t"value":%s,\n'
'\t\t\t"clock":%s}'
) % (
json.dumps(self.getHost()),
json.dumps(self.getKey()),
json.dumps(self.getValue()),
self.getClock(),
)
self.l.log(3, "Serialized %r to %r", self, ret)
return ret
| gpl-2.0 | -6,271,459,724,271,895,000 | 35.879121 | 131 | 0.561979 | false |
cpknowles/BRG | black_rapids_forward_model/black_rapids_depth_integrated.py | 1 | 14468 | from pylab import *
from scipy.interpolate import RectBivariateSpline
from scipy import ndimage as nd
import numpy as np
import gdal
def fill(data, invalid=None):
"""
Replace the value of invalid 'data' cells (indicated by 'invalid')
by the value of the nearest valid data cell
Input:
data: numpy array of any dimension
invalid: a binary array of same shape as 'data'. True cells set where data
value should be replaced.
If None (default), use: invalid = np.isnan(data)
Output:
Return a filled array.
"""
#import numpy as np
#import scipy.ndimage as nd
if invalid is None: invalid = np.isnan(data)
ind = nd.distance_transform_edt(invalid, return_distances=False, return_indices=True)
return data[tuple(ind)]
### DATA ###
data = gdal.Open('input_data_bed_v2/DEM_2010/ifsar_2010.tif')
S_array = data.ReadAsArray()[::-1,:]
ncols = data.RasterXSize
nrows = data.RasterYSize
transf = data.GetGeoTransform()
x = arange(transf[0],transf[0]+transf[1]*data.RasterXSize,transf[1])
y = arange(transf[3],transf[3]+transf[5]*data.RasterYSize,transf[5])[::-1]
S_spline = RectBivariateSpline(x,y,S_array.T,kx=1,ky=1,s=0)
data = gdal.Open('input_data_bed_v2/BED_MC/bed.tif')
B_array = data.ReadAsArray()[::-1,:]
ncols = data.RasterXSize
nrows = data.RasterYSize
transf = data.GetGeoTransform()
x = arange(transf[0],transf[0]+transf[1]*data.RasterXSize,transf[1])
y = arange(transf[3],transf[3]+transf[5]*data.RasterYSize,transf[5])[::-1]
B_spline = RectBivariateSpline(x,y,B_array.T,kx=1,ky=1,s=0)
data = gdal.Open('input_data_bed_v2/SMB_2010_2013/mb_field_25.tif')
adot_array = data.ReadAsArray()[::-1,:]
adot_array = fill(adot_array,adot_array==adot_array.min())
ncols = data.RasterXSize
nrows = data.RasterYSize
transf = data.GetGeoTransform()
x = arange(transf[0],transf[0]+transf[1]*data.RasterXSize,transf[1])
y = arange(transf[3],transf[3]+transf[5]*data.RasterYSize,transf[5])[::-1]
adot_spline = RectBivariateSpline(x,y,adot_array.T,kx=1,ky=1,s=0)
data = gdal.Open('input_data_bed_v2/DH_2010_2013/dhdt_weq_lower.tif')
dhdt_array = data.ReadAsArray()[::-1,:]
dhdt_array[dhdt_array<-1000] = 0
dhdt_array = fill(dhdt_array,dhdt_array==dhdt_array.min())
ncols = data.RasterXSize
nrows = data.RasterYSize
transf = data.GetGeoTransform()
x = arange(transf[0],transf[0]+transf[1]*data.RasterXSize,transf[1])
y = arange(transf[3],transf[3]+transf[5]*data.RasterYSize,transf[5])[::-1]
dhdt_spline = RectBivariateSpline(x,y,dhdt_array.T,kx=1,ky=1,s=0)
from dolfin import *
from ice_model_functions import *
##########################################################
################# SET PETSC OPTIONS ####################
##########################################################
PETScOptions.set("ksp_type","preonly")
PETScOptions.set("pc_type","lu")
PETScOptions.set("pc_factor_mat_solver_package","mumps")
PETScOptions.set("mat_mumps_icntl_14","1000")
PETScOptions.set("ksp_final_residual","0")
##########################################################
################# SET FENICS OPTIONS ###################
##########################################################
parameters['form_compiler']['quadrature_degree'] = 2
parameters['form_compiler']['cpp_optimize'] = True
parameters['form_compiler']['representation'] = 'quadrature'
#parameters['form_compiler']['precision'] = 30
parameters['allow_extrapolation'] = True
ffc_options = {"optimize": True, \
"eliminate_zeros": True, \
"precompute_basis_const": True, \
"precompute_ip_const": True}
##########################################################
#################### CONSTANTS #########################
##########################################################
# TIME
minute = 60.0
hour = 60*minute
day = 24*hour
year = 365*day
# CONSTANTS
rho = 917.
g = 9.81
# RHEOLOGICAL CONSTANTS
rho_i = 910.
n = 3.0
Bc = 3.61e-13*year
Bw = 1.73e3*year
Qc = 6e4
Qw = 13.9e4
Rc = 8.314
gamma = 8.7e-4
eps_reg = Constant(1e-10)
# THERMAL CONTANTS
k = 2.1*year
Cp = 2009.
kappa = k/(rho_i*Cp)
q_geo = 0.042*year
# ADJOINT REG
theta = Constant(1e-10)
# MASS
thklim = 10.
dt = Constant(0.001)
###################################################
########### GEOMETRY AND INPUT DATA ##############
###################################################
##### BOUNDARY DATA #####
class Beta2(Expression):
def eval(self,values,x):
values[0] = 11000.
class S_exp(Expression):
def eval(self,values,x):
values[0] = S_spline(x[0],x[1])
class B_exp(Expression):
def eval(self,values,x):
values[0] = B_spline(x[0],x[1])
class Adot_exp(Expression):
def eval(self,values,x):
values[0] = 1000.0/910.0*adot_spline(x[0],x[1])/3. # Christian provides these fields as mwe/3a, hence correction.
class Dhdt_exp(Expression):
def eval(self,values,x):
values[0] = 1000.0/910.0*dhdt_spline(x[0],x[1])/3.
mesh = Mesh('outline.xml')
# FUNCTION SPACES
Q = FunctionSpace(mesh,"CG",1) # SCALAR
Q2 = MixedFunctionSpace([Q,]*2)
V = MixedFunctionSpace([Q]*5) # VELOCITY + MASS
beta2 = interpolate(Beta2(),Q)
#### !!!!!! #### Note the distinction between effective and normal mass balance !
adot = interpolate(Adot_exp(),Q) - interpolate(Dhdt_exp(),Q) # Effective mass balance (near steady initially)
#adot = interpolate(Adot_exp(),Q) # True mass balance (way imbalanced)
B = interpolate(B_exp(),Q)
S_obs = interpolate(S_exp(),Q)
S0 = interpolate(S_exp(),Q)
H0 = Function(Q)
H0.vector()[:] = S_obs.vector()[:] - B.vector()[:] # Set initial thickness
# FUNCTIONS
U = Function(V)
Lamda = Function(V)
Phi = TestFunction(V)
dU = TrialFunction(V)
gamma = TestFunction(Q)
ubar,vbar,udef,vdef,H = split(U)
phibar,psibar,phidef,psidef,xsi = split(Lamda)
S = B+H
# METRICS FOR COORDINATE TRANSFORM
def dsdx(s):
return 1./H*(S.dx(0) - s*H.dx(0))
def dsdy(s):
return 1./H*(S.dx(1) - s*H.dx(1))
def dsdz(s):
return -1./H
p = 4
# TEST FUNCTION COEFFICIENTS
coef = [lambda s:1.0, lambda s:1./p*((p+1)*s**p - 1)]
dcoef = [lambda s:0, lambda s:(p+1)*s**(p-1)]
u_ = [ubar,udef]
v_ = [vbar,vdef]
phi_ = [phibar,phidef]
psi_ = [psibar,psidef]
u = VerticalBasis(u_,coef,dcoef)
v = VerticalBasis(v_,coef,dcoef)
phi = VerticalBasis(phi_,coef,dcoef)
psi = VerticalBasis(psi_,coef,dcoef)
# TERMWISE STRESSES AND NONLINEARITIES
def A_v():
return Constant(1e-16)
# 2nd INVARIANT STRAIN RATE
def epsilon_dot(s):
return ((u.dx(s,0) + u.ds(s)*dsdx(s))**2 \
+(v.dx(s,1) + v.ds(s)*dsdy(s))**2 \
+(u.dx(s,0) + u.ds(s)*dsdx(s))*(v.dx(s,1) + v.ds(s)*dsdy(s)) \
+0.25*((u.ds(s)*dsdz(s))**2 + (v.ds(s)*dsdz(s))**2 \
+ ((u.dx(s,1) + u.ds(s)*dsdy(s)) + (v.dx(s,0) + v.ds(s)*dsdx(s)))**2) \
+ eps_reg)
# VISCOSITY
def eta_v(s):
return A_v()**(-1./n)/2.*epsilon_dot(s)**((1.-n)/(2*n))
# MEMBRANE STRESSES
E = Constant(1.0)
def membrane_xx(s):
return (phi.dx(s,0) + phi.ds(s)*dsdx(s))*H*(E*eta_v(s))*(4*(u.dx(s,0) + u.ds(s)*dsdx(s)) + 2*(v.dx(s,1) + v.ds(s)*dsdy(s)))
def membrane_xy(s):
return (phi.dx(s,1) + phi.ds(s)*dsdy(s))*H*(E*eta_v(s))*((u.dx(s,1) + u.ds(s)*dsdy(s)) + (v.dx(s,0) + v.ds(s)*dsdx(s)))
def membrane_yx(s):
return (psi.dx(s,0) + psi.ds(s)*dsdx(s))*H*(E*eta_v(s))*((u.dx(s,1) + u.ds(s)*dsdy(s)) + (v.dx(s,0) + v.ds(s)*dsdx(s)))
def membrane_yy(s):
return (psi.dx(s,1) + psi.ds(s)*dsdy(s))*H*(E*eta_v(s))*(2*(u.dx(s,0) + u.ds(s)*dsdx(s)) + 4*(v.dx(s,1) + v.ds(s)*dsdy(s)))
# SHEAR STRESSES
def shear_xz(s):
return dsdz(s)**2*phi.ds(s)*H*eta_v(s)*u.ds(s)
def shear_yz(s):
return dsdz(s)**2*psi.ds(s)*H*eta_v(s)*v.ds(s)
# DRIVING STRESSES
def tau_dx():
return rho*g*H*S.dx(0)*Lamda[0]
def tau_dy():
return rho*g*H*S.dx(1)*Lamda[1]
def boundary_membrane_xx(s):
return phi(s)*H*(E*eta_v(s))*(4*(u.dx(s,0) + u.ds(s)*dsdx(s)) + 2*(v.dx(s,1) + v.ds(s)*dsdy(s)))
def boundary_membrane_xy(s):
return phi(s)*H*(E*eta_v(s))*((u.dx(s,1) + u.ds(s)*dsdy(s)) + (v.dx(s,0) + v.ds(s)*dsdx(s)))
def boundary_membrane_yx(s):
return psi(s)*H*(E*eta_v(s))*((u.dx(s,1) + u.ds(s)*dsdy(s)) + (v.dx(s,0) + v.ds(s)*dsdx(s)))
def boundary_membrane_yy(s):
return psi(s)*H*(E*eta_v(s))*(2*(u.dx(s,0) + u.ds(s)*dsdx(s)) + 4*(v.dx(s,1) + v.ds(s)*dsdy(s)))
N = FacetNormal(mesh)
# GET QUADRATURE POINTS (THIS SHOULD BE ODD: WILL GENERATE THE GAUSS-LEGENDRE RULE
# POINTS AND WEIGHTS OF O(n), BUT ONLY THE POINTS IN [0,1] ARE KEPT< DUE TO SYMMETRY.
points,weights = half_quad(11)
# INSTANTIATE VERTICAL INTEGRATOR
vi = VerticalIntegrator(points,weights)
# FIRST ORDER EQUATIONS
I_x = - vi.intz(membrane_xx) - vi.intz(membrane_xy) - vi.intz(shear_xz) - phi(1)*beta2*u(1) - tau_dx()
I_y = - vi.intz(membrane_yx) - vi.intz(membrane_yy) - vi.intz(shear_yz) - psi(1)*beta2*v(1) - tau_dy()
I = (I_x + I_y)*dx
### MASS BALANCE ###
# SUPG PARAMETERS
h = CellSize(mesh)
tau = h/(2.0*sqrt(U[0]**2 + U[1]**2 + 25.0))
Hmid = 0.5*H + 0.5*H0
xsihat = tau*(U[0]*xsi.dx(0) + U[1]*xsi.dx(1))
# STABILIZED CONTINUITY EQUATION
I += ((H - H0)/dt*xsi - (xsi.dx(0)*U[0]*Hmid + xsi.dx(1)*U[1]*Hmid) + xsihat*(U[0]*Hmid.dx(0) + U[1]*Hmid.dx(1) + Hmid*(U[0].dx(0) + U[1].dx(1))) - (adot)*(xsi + xsihat))*dx# + xsi*(U[0]*Hmid*N[0] + U[1]*Hmid*N[1])*ds(1)
I_misfit = theta*dot(grad(beta2),grad(beta2))*dx
I += I_misfit
# JACOBIAN FOR COUPLED MASS + MOMENTUM SOLVE
R = derivative(I,Lamda,Phi)
J = derivative(R,U,dU)
# Adjoint forms, if so desired
R_adj = derivative(I,U,Phi)
J_adj = derivative(R_adj,Lamda,dU)
G = derivative(I,beta2,gamma)
#####################################################################
######################### I/O Functions ###########################
#####################################################################
# For moving data between vector functions and scalar functions
assigner_inv = FunctionAssigner([Q,Q,Q,Q,Q],V)
assigner = FunctionAssigner(V,[Q,Q,Q,Q,Q])
assigner_vec = FunctionAssigner(Q2,[Q,Q])
#####################################################################
###################### Variational Solvers ########################
#####################################################################
# Positivity constraints and zero-flux boundary conditions don't play well together, so I enforce the former through a non-slip Dirichlet boundary condition on velocity. This is a little weird in the context of glaciers, but it's the only condition that will uphold mass conservation (there is still a fictitious momentum flux across the boundary, aka a non-real stress, but that's more acceptable to me).
bcs = [DirichletBC(V.sub(i),0,lambda x,on:on) for i in range(4)]
bc_2 = DirichletBC(V.sub(4),thklim,lambda x,o:(o and x[0]>393092) or (o and (x[1]>1.5273e6 and x[0]<372129 and x[0]>368953)))
mass_problem = NonlinearVariationalProblem(R,U,J=J,bcs=bcs+[bc_2],form_compiler_parameters=ffc_options)
mass_solver = NonlinearVariationalSolver(mass_problem)
mass_solver.parameters['nonlinear_solver'] = 'snes'
mass_solver.parameters['snes_solver']['method'] = 'vinewtonrsls'
mass_solver.parameters['snes_solver']['relative_tolerance'] = 1e-6
mass_solver.parameters['snes_solver']['absolute_tolerance'] = 1e-6
mass_solver.parameters['snes_solver']['maximum_iterations'] = 10
mass_solver.parameters['snes_solver']['error_on_nonconvergence'] = False
mass_solver.parameters['snes_solver']['linear_solver'] = 'mumps'
bc_adj_1 = DirichletBC(V,[0.0,0.0,0.0,0.0,0.0],lambda x,on:on)
bc_adj_2 = DirichletBC(V.sub(4),0.0,lambda x,on:on)
adj_problem = NonlinearVariationalProblem(R_adj,Lamda,J=J_adj,bcs=[bc_adj_1,bc_adj_2],form_compiler_parameters=ffc_options)
adj_solver = NonlinearVariationalSolver(adj_problem)
adj_solver.parameters['newton_solver']['relative_tolerance'] = 1e-3
adj_solver.parameters['newton_solver']['absolute_tolerance'] = 1e-3
adj_solver.parameters['newton_solver']['maximum_iterations'] = 3
adj_solver.parameters['newton_solver']['error_on_nonconvergence'] = False
adj_solver.parameters['newton_solver']['linear_solver'] = 'mumps'
#####################################################################
################## INITIAL CONDITIONS AND BOUNDS ##################
#####################################################################
l_thick_bound = project(Constant(thklim),Q)
u_thick_bound = project(Constant(1e4),Q)
l_v_bound = project(-10000.0,Q)
u_v_bound = project(10000.0,Q)
l_bound = Function(V)
u_bound = Function(V)
un = Function(Q)
u2n = Function(Q)
vn = Function(Q)
v2n = Function(Q)
lx = Function(Q)
l2x = Function(Q)
mx = Function(Q)
m2x = Function(Q)
p0 = Function(Q)
assigner.assign(U,[un,vn,u2n,v2n,H0])
assigner.assign(l_bound,[l_v_bound]*4+[l_thick_bound])
assigner.assign(u_bound,[u_v_bound]*4+[u_thick_bound])
results_dir = './results/'
Hfile_ptc = File(results_dir + 'H.pvd')
Ufile_ptc = File(results_dir + 'Us.pvd')
bfile_ptc = File(results_dir + 'beta2.pvd')
opt_dir = './results_opt/'
Ufile_opt = File(opt_dir + 'Us.pvd')
bfile_opt = File(opt_dir + 'beta2.pvd')
Us = project(as_vector([u(0),v(0)]))
assigner_inv.assign([lx,l2x,mx,m2x,p0],Lamda)
# Uncomment if you want to start from the end of the last run
#File(results_dir + 'U.xml') >> U
#H0_temp = project(H)
#H0.vector()[:] = H0_temp.vector()[:]
t = 2016.75
#Start slow for convergence. Due to an oddity in topography, this model will not converge for the first 5 or so iterations as it fills a hole, then will work fine after.
dt_schedule = [0.00001]*5 + [0.01]*10 + [0.1]*5 + [0.5]*100 + [1.0]*100
#Time stepping
solve(R==0, U, bcs=bcs+[bc_2])
assigner_inv.assign([un,vn,u2n,v2n,H0],U)
#H0_temp = project(H)
#H0.vector()[:] = H0_temp.vector()[:]
Us_temp = project(as_vector([u(0),v(0)]))
Us.vector()[:] = Us_temp.vector()[:]
S_temp = project(S)
S0.vector()[:] = S_temp.vector()[:]
Hfile_ptc << (H0,t)
Ufile_ptc << (Us,t)
bfile_ptc << (S0,t)
"""
for dts in dt_schedule:
dt.assign(dts)
t += dts
#mass_solver.solve(l_bound,u_bound)
solve(R==0, U, bcs=bcs+[bc_2])
assigner_inv.assign([un,vn,u2n,v2n,H0],U)
#H0_temp = project(H)
#H0.vector()[:] = H0_temp.vector()[:]
Us_temp = project(as_vector([u(0),v(0)]))
Us.vector()[:] = Us_temp.vector()[:]
S_temp = project(S)
S0.vector()[:] = S_temp.vector()[:]
Hfile_ptc << (H0,t)
Ufile_ptc << (Us,t)
bfile_ptc << (S0,t)
"""
File(results_dir + 'Ustar.xml') << U
| mit | 8,416,404,929,511,161,000 | 29.91453 | 406 | 0.593655 | false |
TradeHero/gitinspector | gitinspector/main.py | 1 | 2678 | #!/usr/bin/python
# coding: utf-8
#
# Copyright © 2012-2014 Ejwa Software. All rights reserved.
#
# This file is part of gitinspector.
#
# gitinspector is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gitinspector is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
from __future__ import unicode_literals
import localization
localization.init()
import basedir
import format
import os
import sys
import terminal
import procedure
import subprocess
class Runner:
def __init__(self):
self.repo = "."
self.command_line = "python " + " ".join(sys.argv[:])
self.command_line = self.command_line.replace("main.py", "gitinspector.py")
def output(self):
terminal.skip_escapes(not sys.stdout.isatty())
terminal.set_stdout_encoding()
previous_directory = os.getcwd()
os.chdir(self.repo)
absolute_path = basedir.get_basedir_git()
os.chdir(absolute_path)
procedure.remove_inspection_branches()
procedure.create_branches_for_inspection()
format.output_header()
sorted_branches = procedure.sort_branches_by_last_update()
for (commit, branch_name) in sorted_branches:
if procedure.eligible_for_inspection(commit):
if procedure.switch_to_branch(branch_name):
output = subprocess.Popen(self.command_line, shell=True, bufsize=1, stdout=subprocess.PIPE).stdout
print(output.read())
else:
print("\n\n ==> All eligible branches have been inspected!")
break
procedure.remove_inspection_branches()
os.chdir(previous_directory)
def __check_python_version__():
if sys.version_info < (2, 6):
python_version = str(sys.version_info[0]) + "." + str(sys.version_info[1])
sys.exit(_("gitinspector requires at least Python 2.6 to run (version {0} was found).").format(python_version))
def main():
terminal.check_terminal_encoding()
terminal.set_stdin_encoding()
__run__ = Runner()
__check_python_version__()
__run__.output()
if __name__ == "__main__":
main()
| gpl-3.0 | -6,643,906,900,088,932,000 | 30.127907 | 119 | 0.663429 | false |
nuobit/odoo-addons | connector_sage/models/payroll_sage_labour_agreement/adapter.py | 1 | 1429 | # Copyright NuoBiT Solutions, S.L. (<https://www.nuobit.com>)
# Eric Antones <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl)
from odoo import models, fields
from odoo.addons.component.core import Component
from odoo.addons.queue_job.job import job
class PayrollSageLabourAgreementAdapter(Component):
_name = 'sage.payroll.sage.labour.agreement.adapter'
_inherit = 'sage.adapter'
_apply_on = 'sage.payroll.sage.labour.agreement'
_sql = """select c.CodigoEmpresa, n.CodigoConvenio, n.Convenio, n.FechaRegistroCV, n.FechaFinalNom,
n.FechaRevision, n.CodigoConvenioColectivo, n.CodigoConvenioColectivoAnt,
n.JornadaAnual, n.ConvenioBloqueado
from (select distinct c.CodigoEmpresa, c.CodigoConvenio, c.FechaRegistroCV
from %(schema)s.ConvenioConcepto c
where exists (
select 1
from %(schema)s.Convenio n
where c.CodigoConvenio = n.CodigoConvenio and
c.FechaRegistroCV = n.FechaRegistroCV
)
) c, %(schema)s.Convenio n
where c.CodigoConvenio = n.CodigoConvenio and
c.FechaRegistroCV = n.FechaRegistroCV
"""
_id = ('CodigoEmpresa', 'CodigoConvenio', 'FechaRegistroCV')
| agpl-3.0 | 7,343,226,407,742,488,000 | 43.65625 | 104 | 0.604619 | false |
JackPerdue/easybuild-easyconfigs | setup.py | 1 | 4640 | ##
# Copyright 2012-2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
This script can be used to install easybuild-easyconfigs, e.g. using:
easy_install --user .
or
python setup.py --prefix=$HOME/easybuild
@author: Kenneth Hoste (Ghent University)
"""
import glob
import os
import re
import shutil
import sys
from distutils import log
# note: release candidates should be versioned as a pre-release, e.g. "1.1rc1"
# 1.1-rc1 would indicate a post-release, i.e., and update of 1.1, so beware!
VERSION = "1.13.0.0"
API_VERSION = VERSION.split('.')[0]
EB_VERSION = '.'.join(VERSION.split('.')[0:2])
suff = ''
rc_regexp = re.compile("^.*(rc[0-9]*)$")
res = rc_regexp.search(str(VERSION))
if res:
suff = res.group(1)
dev_regexp = re.compile("^.*[0-9]dev$")
if dev_regexp.match(VERSION):
suff = 'dev'
API_VERSION += suff
EB_VERSION += suff
# log levels: 0 = WARN (default), 1 = INFO, 2 = DEBUG
log.set_verbosity(1)
# try setuptools, fall back to distutils if needed
try:
from setuptools import setup
log.info("Installing with setuptools.setup...")
install_package = 'setuptools'
except ImportError, err:
log.info("Failed to import setuptools.setup (%s), so falling back to distutils.setup" % err)
from distutils.core import setup
install_package = 'distutils'
# utility function to read README file
def read(fname):
"""Read contents of given file."""
return open(os.path.join(os.path.dirname(__file__), fname)).read()
# utility function to get list of data files (i.e. easyconfigs)
def get_data_files():
"""
Return list of data files, i.e. easyconfigs, patches, etc.,
and retain directory structure.
"""
data_files = []
for dirname,dirs,files in os.walk(os.path.join('easybuild', 'easyconfigs')):
if files:
data_files.append((dirname, [os.path.join(dirname, f) for f in files]))
return data_files
log.info("Installing version %s (required versions: API >= %s, easyblocks >= %s)" % (VERSION, API_VERSION, EB_VERSION))
setup(
name = "easybuild-easyconfigs",
version = VERSION,
author = "EasyBuild community",
author_email = "[email protected]",
description = """EasyBuild is a software installation framework in Python that allows you to \
install software in a structured and robust way.
This package contains a collection of easyconfigs, i.e. simple text files written in Python syntax \
that specify the build parameters for software packages (version, compiler toolchain, dependency \
versions, etc.)""",
license = "GPLv2",
keywords = "software build building installation installing compilation HPC scientific",
url = "http://hpcugent.github.com/easybuild",
data_files = get_data_files(),
long_description = read("README.rst"),
classifiers = [
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: GNU General Public License v2 (GPLv2)",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 2.4",
"Topic :: Software Development :: Build Tools",
],
platforms = "Linux",
# install_requires list is not enforced, because of 'old-and-unmanageable' setup?
# do we even want the dependency, since it's artificial?
install_requires = [
"easybuild-framework >= %s" % API_VERSION,
"easybuild-easyblocks >= %s" % EB_VERSION
],
zip_safe = False
)
| gpl-2.0 | -1,068,465,021,707,233,500 | 35.535433 | 119 | 0.669397 | false |
bjornwallner/proq2-server | apps/modeller9v8/modlib/modeller/salign.py | 1 | 7695 | import modeller
class SalignData(object):
"""Data returned from the 'alignment.salign' method"""
def __init__(self, aln_score, qscorepct):
self.aln_score = aln_score
self.qscorepct = qscorepct
def _salign_fw_local_gaps1(aln, feature_weights, ogp, egp, matrix_offset):
"""Local alignment with given parameters"""
return aln.salign(rms_cutoff=3.5, normalize_pp_scores=False,
rr_file='$(LIB)/as1.sim.mat', overhang=0,
gap_penalties_1d=(ogp, egp),
local_alignment=True, matrix_offset=matrix_offset,
matrix_offset_3d=-0.5, gap_penalties_3d=(0, 3),
gap_gap_score=0, gap_residue_score=0,
alignment_type='tree', nsegm=2,
feature_weights=feature_weights,
improve_alignment=True, fit=True, write_fit=False,
output='ALIGNMENT QUALITY')
def _salign_fw_gaps3(aln, feature_weights, ogp3d, egp3d):
"""Global alignment with given parameters"""
ogp = ogp3d
egp = egp3d
def _salign_wrap(aln, **keys):
try:
return aln.salign(auto_overhang=True, overhang_auto_limit=5,
overhang_factor=1, **keys)
except modeller.ModellerError, detail:
print "SALIGN with auto_overhang failed: %s" % str(detail)
print "Retrying without auto_overhang"
return aln.salign(**keys)
return _salign_wrap(aln, rms_cutoff=3.5, normalize_pp_scores=False,
rr_file='$(LIB)/as1.sim.mat', overhang=0,
gap_penalties_1d=(ogp, egp),
local_alignment=False, matrix_offset=-0.2,
gap_penalties_3d=(ogp3d, egp3d), gap_gap_score=0,
gap_residue_score=0, alignment_type='tree',
nsegm=2, feature_weights=feature_weights,
improve_alignment=True, fit=True, write_fit=False,
write_whole_pdb=False, output='ALIGNMENT QUALITY')
def _frange(start, end=None, inc=1.0):
"""A range function that accepts floating point increments"""
if end is None:
end = float(start)
start = 0.0
else:
start = float(start)
count = int((end - start)/inc)
if start + (count*inc) != end:
count += 1
for i in range(count):
yield start + i*inc
class _TemporaryDirectory(object):
"""Create a temporary directory, and delete it when this object
goes out of scope."""
def __init__(self):
import tempfile
import shutil
self.shutil = shutil
self.tmpdir = tempfile.mkdtemp()
def __del__(self):
if hasattr(self, 'shutil'):
self.shutil.rmtree(self.tmpdir)
def get_path(self, path):
"""Return the name of a file in the temporary directory"""
import os
return os.path.join(self.tmpdir, path)
def iterative_structural_align(aln):
"""Given an alignment of structures, iterate over parameter values
to obtain the best structural alignment."""
for seq in aln:
if not hasattr(seq, 'atoms'):
raise modeller.ModellerError("This method only works for an " + \
"alignment of structures.")
tmpdir = _TemporaryDirectory()
fil = tmpdir.get_path("inp.pir")
aln.write(file=fil)
opfile = tmpdir.get_path("salign_local_mid.ali")
opfile2 = tmpdir.get_path("salign_local.ali")
# -- Iterating over values of gap penalties and matrix offset
qmax = 0.0
win_ogp3d = None
fw1=(1., 0., 0., 0., 1., 0.)
fw2=(0., 1., 0., 0., 0., 0.)
fw3=(0., 0., 0., 0., 1., 0.)
# -- Iterating over gap penalties 1D to get initial alignments
print "Iterate over 1D penalties to get initial alignments"
for ogp in _frange(-150, 1, 50):
for egp in _frange(-50, 1, 50):
for mo in _frange(-3.0, -0.05, 0.3):
aln.clear()
aln.append(file=fil)
try:
qwlty1 = _salign_fw_local_gaps1(aln,fw1,ogp,egp,mo)
if qwlty1.qscorepct >= qmax:
qmax = qwlty1.qscorepct
aln.write(file=opfile, alignment_format='PIR')
win_ogp = ogp
win_egp = egp
win_mo = mo
print "Qlty scrs", ogp,"\t",egp,"\t",qwlty1.qscorepct
except modeller.ModellerError, detail:
print "Set of parameters",fw1,ogp,egp,"resulted in the following error\t"+str(detail)
# -- Iterating over gap penalties 3D to get final alignments
print "Iterate over 3D penalties to get final alignments"
qmax3d = 0.
for ogp3d in _frange(0, 3, 1):
for egp3d in range (2, 5, 1):
aln.clear()
aln.append(file=opfile)
try:
qwlty2 = _salign_fw_gaps3(aln,fw2,ogp3d,egp3d)
if qwlty2.qscorepct >= qmax3d:
qmax3d = qwlty2.qscorepct
aln.write(file=opfile2, alignment_format='PIR')
win_ogp3d = ogp3d
win_egp3d = egp3d
print "Qlty scrs", ogp3d,"\t",egp3d,"\t",qwlty2.qscorepct
except modeller.ModellerError,detail:
print "Set of parameters",fw2,ogp3d,egp3d,"resulted in the following error\t"+str(detail)
qmax = max(qmax, qmax3d)
# try alternate initial alignments only if the qmax score is less than 70%
qmax_old = qmax
if qmax_old <= 70:
print "Trying alternate initial alignments"
for ogp in _frange(0.0, 2.2, 0.3):
for egp in _frange(0.1, 2.3, 0.3):
for mo in _frange(-3.0, -0.05, 0.3):
aln.clear()
aln.append(file=fil)
try:
qwlty1 = _salign_fw_local_gaps1(aln,fw3,ogp,egp,mo)
if qwlty1.qscorepct >= qmax:
qmax = qwlty1.qscorepct
aln.write(file=opfile, alignment_format='PIR')
win_ogp = ogp
win_egp = egp
win_mo = mo
print "Qlty scrs", ogp,"\t",egp,"\t",qwlty1.qscorepct
except modeller.ModellerError, detail:
print "Set of parameters",fw3,ogp,egp,"resulted in the following error\t"+str(detail)
# -- Iterating over gap penalties 3D to get final alignments
print "Trying alternate final alignments"
qmax3d = 0.
for ogp3d in _frange(0, 3, 1):
for egp3d in range(2, 5, 1):
aln.clear()
aln.append(file=opfile)
try:
qwlty2 = _salign_fw_gaps3(aln,fw2,ogp3d,egp3d)
if qwlty2.qscorepct >= qmax3d:
qmax3d = qwlty2.qscorepct
aln.write(file=opfile2, alignment_format='PIR')
win_ogp3d = ogp3d
win_egp3d = egp3d
print "Qlty scrs", ogp3d,"\t",egp3d,"\t",qwlty2.qscorepct
except modeller.ModellerError,detail:
print "Set of parameters",fw2,ogp3d,egp3d,"resulted in the following error\t"+str(detail)
qmax = max(qmax, qmax3d)
print "final max quality = ", qmax
if win_ogp3d is None:
raise modeller.ModellerError("Structure alignment failed")
else:
aln.clear()
aln.append(file=opfile2)
| gpl-3.0 | 6,687,180,254,797,475,000 | 41.28022 | 109 | 0.535802 | false |
GoogleCloudPlatform/gsutil | gslib/parallel_tracker_file.py | 1 | 12431 | # -*- coding: utf-8 -*-
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for composite upload tracker file functionality."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from collections import namedtuple
import errno
import json
import random
import six
import gslib
from gslib.exception import CommandException
from gslib.tracker_file import (WriteJsonDataToTrackerFile,
RaiseUnwritableTrackerFileException)
from gslib.utils.constants import UTF8
ObjectFromTracker = namedtuple('ObjectFromTracker', 'object_name generation')
class _CompositeUploadTrackerEntry(object):
"""Enum class for composite upload tracker file JSON keys."""
COMPONENTS_LIST = 'components'
COMPONENT_NAME = 'component_name'
COMPONENT_GENERATION = 'component_generation'
ENC_SHA256 = 'encryption_key_sha256'
PREFIX = 'prefix'
def ReadParallelUploadTrackerFile(tracker_file_name, logger):
"""Read the tracker file from the last parallel composite upload attempt.
If it exists, the tracker file is of the format described in
WriteParallelUploadTrackerFile or a legacy format. If the file doesn't exist
or is formatted incorrectly, then the upload will start from the beginning.
This function is not thread-safe and must be protected by a lock if
called within Command.Apply.
Args:
tracker_file_name: The name of the tracker file to read parse.
logger: logging.Logger for outputting log messages.
Returns:
enc_key_sha256: Encryption key SHA256 used to encrypt the existing
components, or None if an encryption key was not used.
component_prefix: String prefix used in naming the existing components, or
None if no prefix was found.
existing_components: A list of ObjectFromTracker objects representing
the set of files that have already been uploaded.
"""
enc_key_sha256 = None
prefix = None
existing_components = []
tracker_file = None
# If we already have a matching tracker file, get the serialization data
# so that we can resume the upload.
try:
tracker_file = open(tracker_file_name, 'r')
tracker_data = tracker_file.read()
tracker_json = json.loads(tracker_data)
enc_key_sha256 = tracker_json[_CompositeUploadTrackerEntry.ENC_SHA256]
prefix = tracker_json[_CompositeUploadTrackerEntry.PREFIX]
for component in tracker_json[_CompositeUploadTrackerEntry.COMPONENTS_LIST]:
existing_components.append(
ObjectFromTracker(
component[_CompositeUploadTrackerEntry.COMPONENT_NAME],
component[_CompositeUploadTrackerEntry.COMPONENT_GENERATION]))
except IOError as e:
# Ignore non-existent file (happens first time a upload is attempted on an
# object, or when re-starting an upload after a
# ResumableUploadStartOverException), but warn user for other errors.
if e.errno != errno.ENOENT:
logger.warn(
'Couldn\'t read upload tracker file (%s): %s. Restarting '
'parallel composite upload from scratch.', tracker_file_name,
e.strerror)
except (KeyError, ValueError) as e:
# Legacy format did not support user-supplied encryption.
enc_key_sha256 = None
(prefix, existing_components) = _ParseLegacyTrackerData(tracker_data)
finally:
if tracker_file:
tracker_file.close()
return (enc_key_sha256, prefix, existing_components)
def _ParseLegacyTrackerData(tracker_data):
"""Parses a legacy parallel composite upload tracker file.
Args:
tracker_data: Legacy tracker file contents.
Returns:
component_prefix: The prefix used in naming the existing components, or
None if no prefix was found.
existing_components: A list of ObjectFromTracker objects representing
the set of files that have already been uploaded.
"""
# Old tracker files used a non-JSON format.
# The first line represents the prefix, followed by line pairs of object_name
# and generation. Discard the last blank line.
old_tracker_data = tracker_data.split('\n')[:-1]
prefix = None
existing_components = []
if old_tracker_data:
prefix = old_tracker_data[0]
i = 1
while i < len(old_tracker_data) - 1:
(name, generation) = (old_tracker_data[i], old_tracker_data[i + 1])
if not generation:
# Cover the '' case.
generation = None
existing_components.append(ObjectFromTracker(name, generation))
i += 2
return (prefix, existing_components)
def ValidateParallelCompositeTrackerData(tracker_file_name, existing_enc_sha256,
existing_prefix, existing_components,
current_enc_key_sha256, bucket_url,
command_obj, logger, delete_func,
delete_exc_handler):
"""Validates that tracker data matches the current encryption key.
If the data does not match, makes a best-effort attempt to delete existing
temporary component objects encrypted with the old key.
Args:
tracker_file_name: String file name of tracker file.
existing_enc_sha256: Encryption key SHA256 used to encrypt the existing
components, or None if an encryption key was not used.
existing_prefix: String prefix used in naming the existing components, or
None if no prefix was found.
existing_components: A list of ObjectFromTracker objects representing
the set of files that have already been uploaded.
current_enc_key_sha256: Current Encryption key SHA256 that should be used
to encrypt objects.
bucket_url: Bucket URL in which the components exist.
command_obj: Command class for calls to Apply.
logger: logging.Logger for outputting log messages.
delete_func: command.Apply-callable function for deleting objects.
delete_exc_handler: Exception handler for delete_func.
Returns:
prefix: existing_prefix, or None if the encryption key did not match.
existing_components: existing_components, or empty list if the encryption
key did not match.
"""
if six.PY3:
if isinstance(existing_enc_sha256, str):
existing_enc_sha256 = existing_enc_sha256.encode(UTF8)
if isinstance(current_enc_key_sha256, str):
current_enc_key_sha256 = current_enc_key_sha256.encode(UTF8)
if existing_prefix and existing_enc_sha256 != current_enc_key_sha256:
try:
logger.warn(
'Upload tracker file (%s) does not match current encryption '
'key. Deleting old components and restarting upload from '
'scratch with a new tracker file that uses the current '
'encryption key.', tracker_file_name)
components_to_delete = []
for component in existing_components:
url = bucket_url.Clone()
url.object_name = component.object_name
url.generation = component.generation
command_obj.Apply(
delete_func,
components_to_delete,
delete_exc_handler,
arg_checker=gslib.command.DummyArgChecker,
parallel_operations_override=command_obj.ParallelOverrideReason.SPEED)
except: # pylint: disable=bare-except
# Regardless of why we can't clean up old components, need to proceed
# with the user's original intent to upload the file, so merely warn.
component_names = [
component.object_name for component in existing_components
]
logger.warn(
'Failed to delete some of the following temporary objects:\n%s\n'
'(Continuing on to re-upload components from scratch.)',
'\n'.join(component_names))
# Encryption keys have changed, so the old components and prefix
# cannot be used.
return (None, [])
return (existing_prefix, existing_components)
def GenerateComponentObjectPrefix(encryption_key_sha256=None):
"""Generates a random prefix for component objects.
Args:
encryption_key_sha256: Encryption key SHA256 that will be used to encrypt
the components. This is hashed into the prefix to avoid collision
during resumption with a different encryption key.
Returns:
String prefix for use in the composite upload.
"""
return str(
(random.randint(1, (10**10) - 1) + hash(encryption_key_sha256)) % 10**10)
def WriteComponentToParallelUploadTrackerFile(tracker_file_name,
tracker_file_lock,
component,
logger,
encryption_key_sha256=None):
"""Rewrites an existing tracker file with info about the uploaded component.
Follows the format described in _CreateParallelUploadTrackerFile.
Args:
tracker_file_name: Tracker file to append to.
tracker_file_lock: Thread and process-safe Lock protecting the tracker file.
component: ObjectFromTracker describing the object that was uploaded.
logger: logging.Logger for outputting log messages.
encryption_key_sha256: Encryption key SHA256 for use in this upload, if any.
"""
with tracker_file_lock:
(existing_enc_key_sha256, prefix,
existing_components) = (ReadParallelUploadTrackerFile(
tracker_file_name, logger))
if existing_enc_key_sha256 != encryption_key_sha256:
raise CommandException(
'gsutil client error: encryption key SHA256 (%s) in tracker file '
'does not match encryption key SHA256 (%s) of component %s' %
(existing_enc_key_sha256, encryption_key_sha256,
component.object_name))
newly_completed_components = [component]
completed_components = existing_components + newly_completed_components
WriteParallelUploadTrackerFile(tracker_file_name,
prefix,
completed_components,
encryption_key_sha256=encryption_key_sha256)
def WriteParallelUploadTrackerFile(tracker_file_name,
prefix,
components,
encryption_key_sha256=None):
"""Writes information about components that were successfully uploaded.
The tracker file is serialized JSON of the form:
{
"encryption_key_sha256": sha256 hash of encryption key (or null),
"prefix": Prefix used for the component objects,
"components": [
{
"component_name": Component object name,
"component_generation": Component object generation (or null),
}, ...
]
}
where N is the number of components that have been successfully uploaded.
This function is not thread-safe and must be protected by a lock if
called within Command.Apply.
Args:
tracker_file_name: The name of the parallel upload tracker file.
prefix: The generated prefix that used for uploading any existing
components.
components: A list of ObjectFromTracker objects that were uploaded.
encryption_key_sha256: Encryption key SHA256 for use in this upload, if any.
"""
if six.PY3:
if isinstance(encryption_key_sha256, bytes):
encryption_key_sha256 = encryption_key_sha256.decode('ascii')
tracker_components = []
for component in components:
tracker_components.append({
_CompositeUploadTrackerEntry.COMPONENT_NAME: component.object_name,
_CompositeUploadTrackerEntry.COMPONENT_GENERATION: component.generation
})
tracker_file_data = {
_CompositeUploadTrackerEntry.COMPONENTS_LIST: tracker_components,
_CompositeUploadTrackerEntry.ENC_SHA256: encryption_key_sha256,
_CompositeUploadTrackerEntry.PREFIX: prefix
}
WriteJsonDataToTrackerFile(tracker_file_name, tracker_file_data)
| apache-2.0 | -5,276,166,415,215,530,000 | 39.891447 | 80 | 0.690773 | false |
LuyaoHuang/depend-test-framework | examples/vm_basic_doc.py | 1 | 1635 | from utils import STEPS, RESULT, SETUP
import copy
# TODO: use a class for this
DEFAULT = {
'memory': 1048576,
'uuid': 'c156ca6f-3c16-435b-980d-9745e1d84ad1',
'name': 'vm1',
'id': 1,
}
def start_guest(params, env):
"""
Start guest
"""
params.doc_logger.info(STEPS + "# virsh start %s" % params.guest_name)
params.doc_logger.info(RESULT + "Domain %s started" % params.guest_name)
# TODO: move this to another place, since the auto part will need this also
info = dict(env.get_data('$guest_name.config').data)
env.set_data('$guest_name.active', info)
def destroy_guest(params, env):
"""
Destory guest
"""
params.doc_logger.info(STEPS + "# virsh destroy %s" % params.guest_name)
params.doc_logger.info(RESULT + "Domain %s destroyed" % params.guest_name)
def define_guest(params, env):
"""
define a new guest
"""
params.doc_logger.info(STEPS + "# virsh define %s" % params.guest_xml)
params.doc_logger.info(RESULT + "Domain %s defined from %s" % (params.guest_name,
params.guest_xml))
info = dict(DEFAULT)
# TODO: support store mutli domain info
if params.guest_name:
info['name'] = params.guest_name
if params.guest_memory:
info['memory'] = params.guest_memory
env.set_data('$guest_name.config', info)
def undefine_guest(params, env):
"""
undefine guest
"""
params.doc_logger.info(STEPS + "# virsh undefine %s" % params.guest_name)
params.doc_logger.info(RESULT + "Domain %s has been undefined" % (params.guest_name))
| mit | 8,700,338,535,210,262,000 | 29.849057 | 89 | 0.615902 | false |
Nextdoor/ndscheduler | ndscheduler/corescheduler/job.py | 1 | 3499 | """Base class for a job."""
import os
import socket
from ndscheduler.corescheduler import utils
class JobBase:
def __init__(self, job_id, execution_id):
self.job_id = job_id
self.execution_id = execution_id
@classmethod
def create_test_instance(cls):
"""Creates an instance of this class for testing."""
return cls(None, None)
@classmethod
def get_scheduled_description(cls):
hostname = socket.gethostname()
pid = os.getpid()
return 'hostname: %s | pid: %s' % (hostname, pid)
@classmethod
def get_scheduled_error_description(cls):
hostname = socket.gethostname()
pid = os.getpid()
return 'hostname: %s | pid: %s' % (hostname, pid)
@classmethod
def get_running_description(cls):
hostname = socket.gethostname()
pid = os.getpid()
return 'hostname: %s | pid: %s' % (hostname, pid)
@classmethod
def get_failed_description(cls):
hostname = socket.gethostname()
pid = os.getpid()
return 'hostname: %s | pid: %s' % (hostname, pid)
@classmethod
def get_succeeded_description(cls, result=None):
hostname = socket.gethostname()
pid = os.getpid()
return 'hostname: %s | pid: %s' % (hostname, pid)
@classmethod
def get_scheduled_error_result(cls):
return utils.get_stacktrace()
@classmethod
def get_failed_result(cls):
return utils.get_stacktrace()
@classmethod
def meta_info(cls):
"""Returns meta info for this job class.
For example:
{
'job_class_string': 'myscheduler.jobs.myjob.MyJob',
'arguments': [
{'type': 'string', 'description': 'name of this channel'},
{'type': 'string', 'description': 'what this channel does'},
{'type': 'int', 'description': 'created year'}
],
'example_arguments': '["music channel", "it's an awesome channel", 1997]',
'notes': 'need to specify environment variable API_KEY first'
}
The arguments property should be consistent with the run() method.
This info will be used in web ui for explaining what kind of arguments is needed for a job.
You should override this function if you want to make your scheduler web ui informative :)
:return: meta info for this job class.
:rtype: dict
"""
return {
'job_class_string': '%s.%s' % (cls.__module__, cls.__name__),
'arguments': [],
'example_arguments': '',
'notes': ''
}
@classmethod
def run_job(cls, job_id, execution_id, *args, **kwargs):
"""Wrapper to run this job in a static context.
:param str job_id: Job id.
:param str execution_id: Execution id.
:param args:
:param kwargs:
"""
job = cls(job_id, execution_id)
return job.run(*args, **kwargs)
def run(self, *args, **kwargs):
"""The "main" function for a job.
Any subclass has to implement this function.
The return value of this function will be stored in the database as json formatted string
and will be shown for each execution in web ui.
:param args:
:param kwargs:
:return: None or json serializable object.
"""
raise NotImplementedError('Please implement this function')
| bsd-2-clause | -5,716,953,715,774,555,000 | 32.32381 | 99 | 0.577022 | false |
bloer/bgexplorer | bgexplorer/modelviewer/modelviewer.py | 1 | 25907 | #python 2/3 compatibility
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from itertools import chain
from flask import (Blueprint, render_template, request, abort, url_for, g,
Response, make_response, current_app)
import threading
import zlib
import json
import numpy as np
from uncertainties import unumpy
from math import ceil, log10
from io import BytesIO
try:
from matplotlib.figure import Figure
except ImportError:
Figure = None
from bson import ObjectId
from .. import utils
from ..dbview import SimsDbView
from . import billofmaterials as bomfuncs
from ..modeldb import InMemoryCacher
from bgmodelbuilder import units
import logging
log = logging.getLogger(__name__)
from time import sleep
def make_etag(model):
""" Generate a string to use as an etag """
try:
return f"{model.id}-{model.editDetails['date']}"
except AttributeError:
# this should be a dictionary...
return f"{model['_id']}-{model['editDetails']['date']}"
class ModelViewer(object):
"""Blueprint for inspecting saved model definitions
Args:
app: The bgexplorer Flask object
modeldb: a ModelDB object. If None, will get from the Flask object
url_prefix (str): Where to mount this blueprint relative to root
"""
defaultversion='HEAD'
joinkey='___'
def __init__(self, app=None, modeldb=None,
cacher=InMemoryCacher(), url_prefix='/explore'):
self.app = app
self._modeldb = modeldb
self.bp = Blueprint('modelviewer', __name__,
static_folder='static',
template_folder='templates',
url_prefix='/<modelname>/<version>')
self.bp.add_app_template_global(lambda : self, 'getmodelviewer')
self.set_url_processing()
self.register_endpoints()
if self.app:
self.init_app(app, url_prefix)
self._threads = {}
self._cacher = cacher
#### User Overrides ####
self.bomcols = bomfuncs.getdefaultcols()
def init_app(self, app, url_prefix=''):
"""Register ourselves with the app"""
app.register_blueprint(self.bp,
url_prefix=url_prefix+self.bp.url_prefix)
app.extensions['ModelViewer'] = self
@property
def modeldb(self):
return self._modeldb or utils.get_modeldb()
@property
def simsdb(self):
return g.simsdbview.simsdb
def set_url_processing(self):
"""process model objects into URL strings, and pre-load models
into the `flask.g` object before passing to endpoint functions
"""
@self.bp.after_request
def addpostheaders(response):
""" Add cache-control headers to all modelviewer responses """
if self.app.config.get('NO_CLIENT_CACHE'):
return
# todo: add Last-Modified
response.headers["Cache-Control"] = "private, max-age=100"
try:
response.headers['ETag'] = make_etag(g.model)
except (AttributeError, KeyError): # model is not loaded in g
pass
return response
@self.bp.url_defaults
def add_model(endpoint, values):
model = values.pop('model', None) or g.get('model', None)
if model:
#model could be object or dict
name = getattr(model,'name', None) or model.get('name',None)
version = getattr(model,'version', None)
if version is None and hasattr(model,'get'):
version = model.get('version',None)
values.setdefault('modelname', name)
permalink = values.pop('permalink',None)
if permalink is not None:
values['version'] = (version if permalink
else self.defaultversion)
else:
values.setdefault('version',
version if not g.get('permalink')
else self.defaultversion)
elif 'modelid' in values:
values['modelname'] = values.pop('modelid')
values['version'] = '_'
values['permalink'] = 1
#transform components, specs into IDs
if 'component' in values:
values['componentid'] = values.pop('component').id
if 'spec' in values:
values['specid'] = values.pop('spec').getrootspec().id
if 'match' in values:
values['matchid'] = values.pop('match').id
@self.bp.url_value_preprocessor
def find_model(endpoint, values):
# URL has different formats that result in different queries
query = None
if 'modelid' in values:
query = values.pop('modelid')
elif 'modelname' in values:
query={'name': values.pop('modelname')}
version = values.pop('version',self.defaultversion)
if version == '_': #special, means name is actually ID
query['_id'] = query.pop('name')
elif version != self.defaultversion:
query['version'] = version
if not query:
abort(400, "Incomplete model specification")
# this function is called before `before_requests`, but we don't
# want to extract the model if the client requested a cached
# view. So we have to do the cache checking here
etagreq = request.headers.get('If-None-Match')
if etagreq and not self.app.config.get('NO_CLIENT_CACHE'):
# construct the etag from the DB entry
projection = {'editDetails.date': True}
modeldict = self.modeldb.get_raw_model(query, projection)
etag = make_etag(modeldict)
if etagreq == etag:
abort(make_response('', '304 Not Modified',{'ETag': etag}))
# if we get here, it's not in client cache
g.model = utils.getmodelordie(query,self.modeldb)
if version == self.defaultversion:
g.permalink = url_for(endpoint, permalink=True,
**values)
g.simsdbview = utils.get_simsdbview(model=g.model)
#construct the cached datatable in the background
if self._cacher:
self.build_datatable(g.model)
def register_endpoints(self):
"""Define the view functions here"""
@self.bp.route('/')
def overview():
history = self.modeldb.get_model_history(g.model.id)
return render_template('overview.html', history=history)
@self.bp.route('/component/')
@self.bp.route('/component/<componentid>') #should be uuid type?
def componentview(componentid=None):
if componentid:
component = utils.getcomponentordie(g.model, componentid)
matches = g.model.getsimdata(component=component)
datasets = sum((m.dataset or [] for m in matches), [])
return render_template("componentview.html",
component=component, datasets=datasets)
else:
return render_template("componentsoverview.html")
@self.bp.route('/emissions/')
def emissionsoverview():
rootspecs = [ s for s in g.model.specs.values() if not s.parent]
return render_template("emissionsoverview.html",
rootspecs=rootspecs)
@self.bp.route('/emission/<specid>')
def emissionview(specid):
spec = utils.getspecordie(g.model, specid)
#find all simulation datasets associated to this spec
matches = []
if spec.getrootspec() == spec:
matches = g.model.getsimdata(rootspec=spec)
else:
matches = g.model.getsimdata(spec=spec)
datasets = sum((m.dataset or [] for m in matches), [])
return render_template('emissionview.html', spec=spec,
matches=matches, datasets=datasets)
@self.bp.route('/simulations/')
def simulationsoverview():
return render_template("simulationsoverview.html")
@self.bp.route('/queries/')
def queriesoverview():
#build a unique list of all queries
queries = {}
for m in g.model.getsimdata():
key = str(m.query)
if key not in queries:
queries[key] = []
queries[key].append(m)
return render_template('queriesoverview.html',queries=queries)
@self.bp.route('/dataset/<dataset>')
def datasetview(dataset):
detail = self.simsdb.getdatasetdetails(dataset)
return render_template("datasetview.html", dataset=dataset,
detail = detail)
@self.bp.route('/simdatamatch/<matchid>')
def simdatamatchview(matchid):
match = utils.getsimdatamatchordie(g.model, matchid)
linkspec = match.spec.getrootspec()
return render_template("simdatamatchview.html", match=match)
@self.bp.route('/billofmaterials')
def billofmaterials():
bomrows = bomfuncs.getbomrows()
return render_template("billofmaterials.html",
bomrows=bomrows,
bomcols=self.bomcols)
@self.bp.route('/datatable')
def datatable():
"""Return groups and values for all simdatamatches"""
return self.get_datatable(g.model)
@self.bp.route('/tables/default')
def tablesdefault():
"""Show some default tables with the calculated rows"""
return render_template("tablesdefault.html")
@self.bp.route('/charts/default')
def chartsdefault():
"""Show some default charts with the calculated rates"""
return render_template("chartsdefault.html")
@self.bp.route('/spectra/default')
def spectradefault():
return render_template("spectradefault.html")
@self.bp.route('/export')
def export():
"""Present the model as a JSON document"""
d = g.model.todict()
#replace ObjectIds with strings
if isinstance(d.get('_id'), ObjectId):
d['_id'] = str(d['_id'])
if isinstance(d.get('derivedFrom'), ObjectId):
d['derivedFrom'] = str(d['derivedFrom'])
return Response(json.dumps(d), mimetype="application/json")
@self.bp.route('/getspectrum')
@self.bp.route('/getspectrum/<specname>')
def getspectrum(specname=None):
# get the generator for the spectrum
if not specname:
valname = request.args.get('val')
if not valname:
abort(404, "Either spectrum name or value name is required")
specname = g.simsdbview.values_spectra.get(valname)
if not specname:
# valname might have a unit suffix applied to it
index = valname.rfind(' [')
valname = valname[:index]
specname = g.simsdbview.values_spectra.get(valname)
if not specname:
abort(404, f"No spectrum associated to value '{valname}'")
speceval = g.simsdbview.spectra.get(specname)
if speceval is None:
abort(404, f"No spectrum generator for '{specname}'")
log.debug(f"Generating spectrum: {specname}")
title = specname
# get the matches
matches = request.args.getlist('m')
try:
matches = [g.model.simdata[m] for m in matches]
except KeyError:
abort(404, "Request for unknown sim data match")
if not matches:
# matches may be filtered by component or spec
component = None
if 'componentid' in request.args:
component = utils.getcomponentordie(g.model,
request.args['componentid'])
title += ", Component = "+component.name
rootspec = None
if 'specid' in request.args:
rootspec = utils.getspecordie(g.model,
request.args['specid'])
title += ", Source = "+rootspec.name
matches = g.model.getsimdata(rootcomponent=component, rootspec=rootspec)
# test for a group filter
groupname = request.args.get('groupname')
groupval = request.args.get('groupval')
if groupname and groupval and groupval != g.simsdbview.groupjoinkey:
try:
groupfunc = g.simsdbview.groups[groupname]
except KeyError:
abort(404, f"No registered grouping function {groupname}")
def _filter_group(match):
mgval = g.simsdbview.evalgroup(match, groupname, False)
return g.simsdbview.is_subgroup(mgval, groupval)
matches = list(filter(_filter_group, matches))
title += ", "+groupname+" = "
title += '/'.join(g.simsdbview.unflatten_gval(groupval, True))
if not matches:
abort(404, "No sim data matching query")
spectrum = self.simsdb.evaluate([speceval], matches)[0]
if not hasattr(spectrum, 'hist') or not hasattr(spectrum, 'bin_edges'):
abort(500, f"Error generating spectrum, got {type(spectrum)}")
unit = g.simsdbview.spectra_units.get(specname, None)
if unit is not None:
try:
spectrum.hist.ito(unit)
except AttributeError: #not a quantity
pass
fmt = request.args.get("format", "png").lower()
response = None
if fmt == 'tsv':
response = Response(self.streamspectrum(spectrum, sep='\t'),
mimetype='text/tab-separated-value')
elif fmt == 'csv':
response = Response(self.streamspectrum(spectrum, sep=','),
mimetype='text/csv')
elif fmt == 'png':
response = self.specimage(spectrum, title=title)
else:
abort(400, f"Unhandled format specifier {fmt}")
return response
def streamspectrum(self, spectrum, sep=',', include_errs=True,
fmt='{:.5g}'):
""" Return a generator response for a spectrum
Args:
spectrum (Histogram): spectrum to stream
sep (str): separator (e.g. csv or tsv)
include_errs (bool): if True, include a column for errors
fmt (str): format specifier
Returns:
generator to construct Response
"""
bins, vals = spectrum.bin_edges, spectrum.hist
vals_has_units = hasattr(vals, 'units')
bins_has_units = hasattr(bins, 'units')
# yield the header
head = ["Bin", "Value"]
if bins_has_units:
head[0] += f' [{bins.units}]'
if vals_has_units:
head[1] += f' [{vals.units}]'
if include_errs:
head.append('Error')
yield sep.join(head)+'\n'
# now remove units and extract errors
if vals_has_units:
vals = vals.m
if bins_has_units:
bins = bins.m
vals, errs = unumpy.nominal_values(vals), unumpy.std_devs(vals)
for abin, aval, anerr in zip(bins, vals, errs):
yield sep.join((str(abin), fmt.format(aval), fmt.format(anerr)))+'\n'
def specimage(self, spectrum, title=None, logx=True, logy=True):
""" Generate a png image of a spectrum
Args:
spectrum (Histogram): spectrum to plot
title (str): title
logx (bool): set x axis to log scale
logy (bool): set y axis to log scale
Returns:
a Response object
"""
if Figure is None:
abort(500, "Matplotlib is not available")
log.debug("Generating spectrum image")
# apparently this aborts sometimes?
try:
x = spectrum.bin_edges.m
except AttributeError:
x = spectrum.bin_edges
fig = Figure()
ax = fig.subplots()
ax.errorbar(x=x[:-1],
y=unumpy.nominal_values(spectrum.hist),
yerr=unumpy.std_devs(spectrum.hist),
drawstyle='steps-post',
elinewidth=0.6,
)
ax.set_title(title)
if logx:
ax.set_xscale('log')
if logy:
ax.set_yscale('log')
if hasattr(spectrum.bin_edges, 'units'):
ax.set_xlabel(f'Bin [{spectrum.bin_edges.units}]')
if hasattr(spectrum.hist, 'units'):
ax.set_ylabel(f"Value [{spectrum.hist.units}]")
"""
#limit to at most N decades...
maxrange = 100000
ymin, ymax = plt.ylim()
ymax = 10**ceil(log10(ymax))
ymin = max(ymin, ymax/maxrange)
plt.ylim(ymin, ymax)
plt.tick_params(which='major',length=6, width=1)
plt.tick_params(which='minor',length=4,width=1)
iplt.gcf().set_size_inches(9,6)
plt.gca().set_position((0.08,0.1,0.7,0.8))
"""
log.debug("Rendering...")
out = BytesIO()
fig.savefig(out, format='png')
log.debug("Done generating image")
size = out.tell()
out.seek(0)
res = Response(out.getvalue(),
content_type='image/png',
headers={'Content-Length': size,
'Content-Disposition': 'inline',
},
)
return res
#need to pass simsdb because it goes out of context
def streamdatatable(self, model, simsdbview=None):
"""Stream exported data table so it doesn't all go into mem at once
"""
log.debug(f"Generating data table for model {model.id}")
#can't evaluate values if we don't have a simsdb
if simsdbview is None:
simsdbview = utils.get_simsdbview(model=model) or SimsDbView()
simsdb = simsdbview.simsdb
valitems = list(simsdbview.values.values())
matches = model.simdata.values()
#send the header
valheads = ['V_'+v+(' [%s]'%simsdbview.values_units[v]
if v in simsdbview.values_units else '')
for v in simsdbview.values]
yield('\t'.join(chain(['ID'],
('G_'+g for g in simsdbview.groups),
valheads))
+'\n')
#loop through matches
for match in matches:
evals = []
if valitems:
evals = simsdb.evaluate(valitems, match)
for index, vlabel in enumerate(simsdbview.values):
# convert to unit if provided
unit = simsdbview.values_units.get(vlabel,None)
if unit:
try:
evals[index] = evals[index].to(unit).m
except AttributeError: #not a Quantity...
pass
except units.errors.DimensionalityError as e:
if evals[index] != 0 :
log.warning(e)
evals[index] = getattr(evals[index], 'm', 0)
# convert to string
evals[index] = "{:.3g}".format(evals[index])
if match.spec.islimit:
evals[index] = '<'+evals[index]
groupvals = (g(match) for g in simsdbview.groups.values())
groupvals = (simsdbview.groupjoinkey.join(g)
if isinstance(g,(list,tuple)) else g
for g in groupvals)
yield('\t'.join(chain([match.id],
(str(g) for g in groupvals),
evals))
+'\n')
#sleep(0.2) # needed to release the GIL
log.debug(f"Finished generating data table for model {model.id}")
@staticmethod
def datatablekey(model):
return "datatable:"+make_etag(model)
def build_datatable(self, model):
"""Generate a gzipped datatable and cache it
Args:
model: a BgModel
Returns:
None if no cacher is defined
0 if the result is already cached
Thread created to generate the cache otherwise
"""
#don't bother to call if we don't have a cache
if not self._cacher:
return None
#TODO: self._threads and self._Cacher should probably be mutexed
#see if there's already a worker
key = self.datatablekey(model)
if key in self._threads:
return self._threads[key]
#see if it's already cached
if self._cacher.test(key):
return 0
#if we get here, we need to generate it
def cachedatatable(dbview):
compressor = zlib.compressobj()
res = b''.join(compressor.compress(s.encode('utf-8'))
for s in self.streamdatatable(model, dbview))
res += compressor.flush()
self._cacher.store(key, res)
self._threads.pop(key) #is this a bad idea???
dbview = utils.get_simsdbview(model=model)
thread = threading.Thread(target=cachedatatable,name=key,
args=(dbview,))
self._threads[key] = thread
thread.start()
return thread
def get_datatable(self, model):
"""Return a Result object with the encoded or streamed datatable"""
key = self.datatablekey(model)
if not self._cacher: # or self.modeldb.is_model_temp(model.id):
#no cache, so stream it directly, don't bother to zip it
#should really be text/csv, but then browsersr won't let you see it
return Response(self.streamdatatable(model), mimetype='text/plain')
if not self._cacher.test(key):
thread = self.build_datatable(model)
if thread:
thread.join() #wait until it's done
res = self._cacher.get(key)
if not res:
abort(500,"Unable to generate datatable")
return Response(res, headers={'Content-Type':'text/plain;charset=utf-8',
'Content-Encoding':'deflate',
})
def get_componentsort(self, component, includeself=True):
"""Return an array component names in assembly order to be passed
to the javascript analyzer for sorting component names
"""
#TODO: cache this
result = [component.name] if includeself else []
for child in component.getcomponents(merge=False):
branches = self.get_componentsort(child)
if includeself:
branches = [self.joinkey.join((component.name, s))
for s in branches]
result.extend(branches)
return result
def get_groupsort(self):
res = dict(**g.simsdbview.groupsort)
#todo: set up provided lists
if 'Component' not in res:
res['Component'] = self.get_componentsort(g.model.assemblyroot, False)
return res
def eval_matches(self, matches, dovals=True, dospectra=False):
""" Evaluate `matches` for all registered values and specs
Args:
matches: list of SimDataMatch objects to evaluate
dovals (bool): include entries from `self.values` ?
dospectra (bool): include entries from `self.spectra` ?
Returns:
values (dict): dictionary mapping of keys in `self.values` and
`self.spectra` to evaluated results. If a key in
`spectra` conflicts with one in values, it will be
renamed to "spectrum_<key>"
"""
# this no longer works, but wasn't used. Keep around for now...
raise NotImplementedError()
vals = dict(**self.values) if dovals else {}
if dospectra:
for key, spectrum in self.spectra.items():
if key in vals:
key = f'spectrum_{key}'
vals[key] = spectrum
result = dict(zip(vals.keys(),
self.simsdb.evaluate(vals.values(), matches)))
if dovals:
for key, unit in self.values_units.items():
try:
result[key].ito(unit)
except AttributeError:
pass
if dospectra:
for key, unit in self.spectra_units.items():
if dovals and key in self.values:
key = f'spectrum_{key}'
try:
result[key].ito(unit)
except AttributeError:
pass
return result
| bsd-2-clause | 1,891,880,198,293,276,000 | 38.979938 | 88 | 0.53557 | false |
michaelchughes/satdetect | satdetect/ioutil/IOUtil.py | 1 | 2151 | ''' IOUtil.py
'''
import os
import numpy as np
import glob
import joblib
import scipy.io
from skimage.data import imread
from distutils.dir_util import mkpath
def imgpath2list(imgpath):
''' Transform provided path (or path pattern) into a list of valid paths
Args
-------
imgpath : list or string
either exact jpeg path ('/path/to/myfile.jpg')
or pattern to be read by glob ('/path/to/manyfiles/*.jpg')
Returns
--------
imgpathList : list of valid paths on this system
'''
## Remove backslashes from imgpath,
## since these are sometimes added by terminal
if type(imgpath) == str:
imgpath = imgpath.replace('\\', '')
if type(imgpath) == list:
imgpathList = imgpath
elif imgpath.count('*') > 0:
imgpathList = glob.glob(imgpath)
else:
imgpathList = [imgpath]
return imgpathList
def getFilepathParts(path):
''' Transform string defining a filesystem absolute path into component parts
Example
---------
>> getFilepathParts('/data/mhughes/myimage.jpg')
('/data/mhughes/', 'myimage', '.jpg')
'''
pathdirs = path.split(os.path.sep)
pathdir = os.path.sep.join(pathdirs[:-1])
basefields = pathdirs[-1].split('.')
basename = basefields[0]
ext = '.' + basefields[1]
return pathdir, basename, ext
def loadImage(path, basename='', color='rgb'):
''' Load JPEG image from file,
Returns
--------
IM : 2D or 3D array, size H x W x nColors
dtype will be float64, with each pixel in range (0,1)
'''
path = str(path)
if len(basename) > 0:
path = os.path.join(path, basename)
if color == 'gray' or color == 'grey':
IM = imread(path, as_grey=True)
assert IM.ndim == 2
else:
IM = imread(path, as_grey=False)
if not IM.ndim == 3:
raise ValueError('Color image not available.')
if IM.dtype == np.float:
MaxVal = 1.0
elif IM.dtype == np.uint8:
MaxVal = 255
else:
raise ValueError("Unrecognized dtype: %s" % (IM.dtype))
assert IM.min() >= 0.0
assert IM.max() <= MaxVal
IM = np.asarray(IM, dtype=np.float64)
if MaxVal > 1:
IM /= MaxVal
return IM | mit | 501,435,163,611,969,660 | 24.927711 | 79 | 0.623896 | false |
ericholscher/django | django/core/management/__init__.py | 1 | 15955 | import collections
import imp
from importlib import import_module
from optparse import OptionParser, NO_DEFAULT
import os
import sys
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand, CommandError, handle_default_options
from django.core.management.color import color_style
from django.utils import six
# For backwards compatibility: get_version() used to be in this module.
from django import get_version
# A cache of loaded commands, so that call_command
# doesn't have to reload every time it's called.
_commands = None
def find_commands(management_dir):
"""
Given a path to a management directory, returns a list of all the command
names that are available.
Returns an empty list if no commands are defined.
"""
command_dir = os.path.join(management_dir, 'commands')
try:
return [f[:-3] for f in os.listdir(command_dir)
if not f.startswith('_') and f.endswith('.py')]
except OSError:
return []
def find_management_module(app_name):
"""
Determines the path to the management module for the given app_name,
without actually importing the application or the management module.
Raises ImportError if the management module cannot be found for any reason.
"""
parts = app_name.split('.')
parts.append('management')
parts.reverse()
part = parts.pop()
path = None
# When using manage.py, the project module is added to the path,
# loaded, then removed from the path. This means that
# testproject.testapp.models can be loaded in future, even if
# testproject isn't in the path. When looking for the management
# module, we need look for the case where the project name is part
# of the app_name but the project directory itself isn't on the path.
try:
f, path, descr = imp.find_module(part, path)
except ImportError as e:
if os.path.basename(os.getcwd()) != part:
raise e
else:
if f:
f.close()
while parts:
part = parts.pop()
f, path, descr = imp.find_module(part, [path] if path else None)
if f:
f.close()
return path
def load_command_class(app_name, name):
"""
Given a command name and an application name, returns the Command
class instance. All errors raised by the import process
(ImportError, AttributeError) are allowed to propagate.
"""
module = import_module('%s.management.commands.%s' % (app_name, name))
return module.Command()
def get_commands():
"""
Returns a dictionary mapping command names to their callback applications.
This works by looking for a management.commands package in django.core, and
in each installed application -- if a commands package exists, all commands
in that package are registered.
Core commands are always included. If a settings module has been
specified, user-defined commands will also be included.
The dictionary is in the format {command_name: app_name}. Key-value
pairs from this dictionary can then be used in calls to
load_command_class(app_name, command_name)
If a specific version of a command must be loaded (e.g., with the
startapp command), the instantiated module can be placed in the
dictionary in place of the application name.
The dictionary is cached on the first call and reused on subsequent
calls.
"""
global _commands
if _commands is None:
_commands = dict((name, 'django.core') for name in find_commands(__path__[0]))
# Find the installed apps
from django.conf import settings
try:
apps = settings.INSTALLED_APPS
except ImproperlyConfigured:
# Still useful for commands that do not require functional settings,
# like startproject or help
apps = []
# Find and load the management module for each installed app.
for app_name in apps:
try:
path = find_management_module(app_name)
_commands.update(dict((name, app_name)
for name in find_commands(path)))
except ImportError:
pass # No management module - ignore this app
return _commands
def call_command(name, *args, **options):
"""
Calls the given command, with the given options and args/kwargs.
This is the primary API you should use for calling specific commands.
Some examples:
call_command('syncdb')
call_command('shell', plain=True)
call_command('sqlall', 'myapp')
"""
# Load the command object.
try:
app_name = get_commands()[name]
except KeyError:
raise CommandError("Unknown command: %r" % name)
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, name)
# Grab out a list of defaults from the options. optparse does this for us
# when the script runs from the command line, but since call_command can
# be called programmatically, we need to simulate the loading and handling
# of defaults (see #10080 for details).
defaults = {}
for opt in klass.option_list:
if opt.default is NO_DEFAULT:
defaults[opt.dest] = None
else:
defaults[opt.dest] = opt.default
defaults.update(options)
return klass.execute(*args, **defaults)
class LaxOptionParser(OptionParser):
"""
An option parser that doesn't raise any errors on unknown options.
This is needed because the --settings and --pythonpath options affect
the commands (and thus the options) that are available to the user.
"""
def error(self, msg):
pass
def print_help(self):
"""Output nothing.
The lax options are included in the normal option parser, so under
normal usage, we don't need to print the lax options.
"""
pass
def print_lax_help(self):
"""Output the basic options available to every command.
This just redirects to the default print_help() behavior.
"""
OptionParser.print_help(self)
def _process_args(self, largs, rargs, values):
"""
Overrides OptionParser._process_args to exclusively handle default
options and ignore args and other options.
This overrides the behavior of the super class, which stop parsing
at the first unrecognized option.
"""
while rargs:
arg = rargs[0]
try:
if arg[0:2] == "--" and len(arg) > 2:
# process a single long option (possibly with value(s))
# the superclass code pops the arg off rargs
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
# the superclass code pops the arg off rargs
self._process_short_opts(rargs, values)
else:
# it's either a non-default option or an arg
# either way, add it to the args list so we can keep
# dealing with options
del rargs[0]
raise Exception
except: # Needed because we might need to catch a SystemExit
largs.append(arg)
class ManagementUtility(object):
"""
Encapsulates the logic of the django-admin.py and manage.py utilities.
A ManagementUtility has a number of commands, which can be manipulated
by editing the self.commands dictionary.
"""
def __init__(self, argv=None):
self.argv = argv or sys.argv[:]
self.prog_name = os.path.basename(self.argv[0])
def main_help_text(self, commands_only=False):
"""
Returns the script's main help text, as a string.
"""
if commands_only:
usage = sorted(get_commands().keys())
else:
usage = [
"",
"Type '%s help <subcommand>' for help on a specific subcommand." % self.prog_name,
"",
"Available subcommands:",
]
commands_dict = collections.defaultdict(lambda: [])
for name, app in six.iteritems(get_commands()):
if app == 'django.core':
app = 'django'
else:
app = app.rpartition('.')[-1]
commands_dict[app].append(name)
style = color_style()
for app in sorted(commands_dict.keys()):
usage.append("")
usage.append(style.NOTICE("[%s]" % app))
for name in sorted(commands_dict[app]):
usage.append(" %s" % name)
# Output an extra note if settings are not properly configured
try:
from django.conf import settings
settings.INSTALLED_APPS
except ImproperlyConfigured as e:
usage.append(style.NOTICE(
"Note that only Django core commands are listed as settings "
"are not properly configured (error: %s)." % e))
return '\n'.join(usage)
def fetch_command(self, subcommand):
"""
Tries to fetch the given subcommand, printing a message with the
appropriate command called from the command line (usually
"django-admin.py" or "manage.py") if it can't be found.
"""
# Get commands outside of try block to prevent swallowing exceptions
commands = get_commands()
try:
app_name = commands[subcommand]
except KeyError:
sys.stderr.write("Unknown command: %r\nType '%s help' for usage.\n" %
(subcommand, self.prog_name))
sys.exit(1)
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, subcommand)
return klass
def autocomplete(self):
"""
Output completion suggestions for BASH.
The output of this function is passed to BASH's `COMREPLY` variable and
treated as completion suggestions. `COMREPLY` expects a space
separated string as the result.
The `COMP_WORDS` and `COMP_CWORD` BASH environment variables are used
to get information about the cli input. Please refer to the BASH
man-page for more information about this variables.
Subcommand options are saved as pairs. A pair consists of
the long option string (e.g. '--exclude') and a boolean
value indicating if the option requires arguments. When printing to
stdout, a equal sign is appended to options which require arguments.
Note: If debugging this function, it is recommended to write the debug
output in a separate file. Otherwise the debug output will be treated
and formatted as potential completion suggestions.
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'DJANGO_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
curr = cwords[cword-1]
except IndexError:
curr = ''
subcommands = list(get_commands()) + ['help']
options = [('--help', None)]
# subcommand
if cword == 1:
print(' '.join(sorted(filter(lambda x: x.startswith(curr), subcommands))))
# subcommand options
# special case: the 'help' subcommand has no options
elif cwords[0] in subcommands and cwords[0] != 'help':
subcommand_cls = self.fetch_command(cwords[0])
# special case: 'runfcgi' stores additional options as
# 'key=value' pairs
if cwords[0] == 'runfcgi':
from django.core.servers.fastcgi import FASTCGI_OPTIONS
options += [(k, 1) for k in FASTCGI_OPTIONS]
# special case: add the names of installed apps to options
elif cwords[0] in ('dumpdata', 'sql', 'sqlall', 'sqlclear',
'sqlcustom', 'sqlindexes', 'sqlsequencereset', 'test'):
try:
from django.conf import settings
# Get the last part of the dotted path as the app name.
options += [(a.split('.')[-1], 0) for a in settings.INSTALLED_APPS]
except ImportError:
# Fail silently if DJANGO_SETTINGS_MODULE isn't set. The
# user will find out once they execute the command.
pass
options += [(s_opt.get_opt_string(), s_opt.nargs) for s_opt in
subcommand_cls.option_list]
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword-1]]
options = [opt for opt in options if opt[0] not in prev_opts]
# filter options by current input
options = sorted((k, v) for k, v in options if k.startswith(curr))
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print(opt_label)
sys.exit(1)
def execute(self):
"""
Given the command-line arguments, this figures out which subcommand is
being run, creates a parser appropriate to that command, and runs it.
"""
# Preprocess options to extract --settings and --pythonpath.
# These options could affect the commands that are available, so they
# must be processed early.
parser = LaxOptionParser(usage="%prog subcommand [options] [args]",
version=get_version(),
option_list=BaseCommand.option_list)
self.autocomplete()
try:
options, args = parser.parse_args(self.argv)
handle_default_options(options)
except: # Needed because parser.parse_args can raise SystemExit
pass # Ignore any option errors at this point.
try:
subcommand = self.argv[1]
except IndexError:
subcommand = 'help' # Display help if no arguments were given.
if subcommand == 'help':
if len(args) <= 2:
parser.print_lax_help()
sys.stdout.write(self.main_help_text() + '\n')
elif args[2] == '--commands':
sys.stdout.write(self.main_help_text(commands_only=True) + '\n')
else:
self.fetch_command(args[2]).print_help(self.prog_name, args[2])
elif subcommand == 'version':
sys.stdout.write(parser.get_version() + '\n')
# Special-cases: We want 'django-admin.py --version' and
# 'django-admin.py --help' to work, for backwards compatibility.
elif self.argv[1:] == ['--version']:
# LaxOptionParser already takes care of printing the version.
pass
elif self.argv[1:] in (['--help'], ['-h']):
parser.print_lax_help()
sys.stdout.write(self.main_help_text() + '\n')
else:
self.fetch_command(subcommand).run_from_argv(self.argv)
def execute_from_command_line(argv=None):
"""
A simple method that runs a ManagementUtility.
"""
utility = ManagementUtility(argv)
utility.execute()
| bsd-3-clause | -5,019,545,367,211,549,000 | 38.105392 | 98 | 0.59812 | false |
Nexedi/neoppod | neo/neoctl/neoctl.py | 1 | 7980 | #
# Copyright (C) 2006-2019 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
from neo.lib import util
from neo.lib.app import BaseApplication, buildOptionParser
from neo.lib.connection import ClientConnection, ConnectionClosed
from neo.lib.protocol import ClusterStates, NodeStates, ErrorCodes, Packets
from .handler import CommandEventHandler
class NotReadyException(Exception):
pass
@buildOptionParser
class NeoCTL(BaseApplication):
connection = None
connected = False
@classmethod
def _buildOptionParser(cls):
# XXX: Use argparse sub-commands.
parser = cls.option_parser
parser.description = "NEO Control node"
parser('a', 'address', default='127.0.0.1:9999',
parse=lambda x: util.parseNodeAddress(x, 9999),
help="address of an admin node")
parser.argument('cmd', nargs=argparse.REMAINDER,
help="command to execute; if not supplied,"
" the list of available commands is displayed")
def __init__(self, address, **kw):
super(NeoCTL, self).__init__(**kw)
self.server = self.nm.createAdmin(address=address)
self.handler = CommandEventHandler(self)
self.response_queue = []
def __getConnection(self):
if not self.connected:
self.connection = ClientConnection(self, self.handler, self.server)
# Never delay reconnection to master. This speeds up unit tests
# and it should not change anything for normal use.
try:
self.connection.setReconnectionNoDelay()
except ConnectionClosed:
self.connection = None
while not self.connected:
if self.connection is None:
raise NotReadyException('not connected')
self.em.poll(1)
return self.connection
def __ask(self, packet):
# TODO: make thread-safe
connection = self.__getConnection()
connection.ask(packet)
response_queue = self.response_queue
assert len(response_queue) == 0
while self.connected:
self.em.poll(1)
if response_queue:
break
else:
raise NotReadyException, 'Connection closed'
response = response_queue.pop()
if response[0] == Packets.Error and \
response[1] == ErrorCodes.NOT_READY:
raise NotReadyException(response[2])
return response
def enableStorageList(self, uuid_list):
"""
Put all given storage nodes in "running" state.
"""
packet = Packets.AddPendingNodes(uuid_list)
response = self.__ask(packet)
if response[0] != Packets.Error or response[1] != ErrorCodes.ACK:
raise RuntimeError(response)
return response[2]
def tweakPartitionTable(self, uuid_list=(), dry_run=False):
response = self.__ask(Packets.TweakPartitionTable(dry_run, uuid_list))
if response[0] != Packets.AnswerTweakPartitionTable:
raise RuntimeError(response)
return response[1:]
def setNumReplicas(self, nr):
response = self.__ask(Packets.SetNumReplicas(nr))
if response[0] != Packets.Error or response[1] != ErrorCodes.ACK:
raise RuntimeError(response)
return response[2]
def setClusterState(self, state):
"""
Set cluster state.
"""
packet = Packets.SetClusterState(state)
response = self.__ask(packet)
if response[0] != Packets.Error or response[1] != ErrorCodes.ACK:
raise RuntimeError(response)
return response[2]
def _setNodeState(self, node, state):
"""
Kill node, or remove it permanently
"""
response = self.__ask(Packets.SetNodeState(node, state))
if response[0] != Packets.Error or response[1] != ErrorCodes.ACK:
raise RuntimeError(response)
return response[2]
def getClusterState(self):
"""
Get cluster state.
"""
packet = Packets.AskClusterState()
response = self.__ask(packet)
if response[0] != Packets.AnswerClusterState:
raise RuntimeError(response)
return response[1]
def getLastIds(self):
response = self.__ask(Packets.AskLastIDs())
if response[0] != Packets.AnswerLastIDs:
raise RuntimeError(response)
return response[1:]
def getLastTransaction(self):
response = self.__ask(Packets.AskLastTransaction())
if response[0] != Packets.AnswerLastTransaction:
raise RuntimeError(response)
return response[1]
def getRecovery(self):
response = self.__ask(Packets.AskRecovery())
if response[0] != Packets.AnswerRecovery:
raise RuntimeError(response)
return response[1:]
def getNodeList(self, node_type=None):
"""
Get a list of nodes, filtering with given type.
"""
packet = Packets.AskNodeList(node_type)
response = self.__ask(packet)
if response[0] != Packets.AnswerNodeList:
raise RuntimeError(response)
return response[1] # node_list
def getPartitionRowList(self, min_offset=0, max_offset=0, node=None):
"""
Get a list of partition rows, bounded by min & max and involving
given node.
"""
packet = Packets.AskPartitionList(min_offset, max_offset, node)
response = self.__ask(packet)
if response[0] != Packets.AnswerPartitionList:
raise RuntimeError(response)
return response[1:]
def startCluster(self):
"""
Set cluster into "verifying" state.
"""
return self.setClusterState(ClusterStates.VERIFYING)
def killNode(self, node):
return self._setNodeState(node, NodeStates.DOWN)
def dropNode(self, node):
return self._setNodeState(node, NodeStates.UNKNOWN)
def getPrimary(self):
"""
Return the primary master UUID.
"""
packet = Packets.AskPrimary()
response = self.__ask(packet)
if response[0] != Packets.AnswerPrimary:
raise RuntimeError(response)
return response[1]
def repair(self, *args):
response = self.__ask(Packets.Repair(*args))
if response[0] != Packets.Error or response[1] != ErrorCodes.ACK:
raise RuntimeError(response)
return response[2]
def truncate(self, tid):
response = self.__ask(Packets.Truncate(tid))
if response[0] != Packets.Error or response[1] != ErrorCodes.ACK:
raise RuntimeError(response)
return response[2]
def checkReplicas(self, *args):
response = self.__ask(Packets.CheckReplicas(*args))
if response[0] != Packets.Error or response[1] != ErrorCodes.ACK:
raise RuntimeError(response)
return response[2]
def flushLog(self):
conn = self.__getConnection()
conn.send(Packets.FlushLog())
while conn.pending():
self.em.poll(1)
def getMonitorInformation(self):
response = self.__ask(Packets.AskMonitorInformation())
if response[0] != Packets.AnswerMonitorInformation:
raise RuntimeError(response)
return response[1:]
| gpl-2.0 | -5,549,362,815,099,683,000 | 34.625 | 79 | 0.624561 | false |
klahnakoski/MySQL-to-S3 | vendor/mo_times/dates.py | 1 | 13619 | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski ([email protected])
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import math
import re
from datetime import datetime, date, timedelta
from decimal import Decimal
from time import time as _time
from mo_dots import Null
from mo_future import unichr, text_type, long
from mo_logs import Except
from mo_logs.strings import deformat
from mo_times.durations import Duration, MILLI_VALUES
from mo_times.vendor.dateutil.parser import parse as parse_date
_utcnow = datetime.utcnow
try:
import pytz
except Exception:
pass
ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
class Date(object):
__slots__ = ["unix"]
MIN = None
MAX = None
def __new__(cls, *args, **kwargs):
if not args or (len(args) == 1 and args[0] == None):
return Null
return parse(*args)
def __init__(self, *args):
if self.unix is None:
self.unix = parse(*args).unix
def __nonzero__(self):
return True
def floor(self, duration=None):
if duration is None: # ASSUME DAY
return _unix2Date(math.floor(self.unix / 86400) * 86400)
elif duration.month:
dt = unix2datetime(self.unix)
month = int(math.floor((dt.year*12+dt.month-1) / duration.month) * duration.month)
year = int(math.floor(month/12))
month -= 12*year
return Date(datetime(year, month+1, 1))
elif duration.milli % (7 * 86400000) == 0:
offset = 4*86400
return _unix2Date(math.floor((self.unix + offset) / duration.seconds) * duration.seconds - offset)
else:
return _unix2Date(math.floor(self.unix / duration.seconds) * duration.seconds)
def format(self, format="%Y-%m-%d %H:%M:%S"):
try:
return unix2datetime(self.unix).strftime(format)
except Exception as e:
from mo_logs import Log
Log.error("Can not format {{value}} with {{format}}", value=unix2datetime(self.unix), format=format, cause=e)
@property
def milli(self):
return self.unix*1000
@property
def hour(self):
"""
:return: HOUR (int) IN THE GMT DAY
"""
return int(int(self.unix)/60/60 % 24)
def addDay(self):
return Date(unix2datetime(self.unix) + timedelta(days=1))
def add(self, other):
if other==None:
return Null
elif isinstance(other, (datetime, date)):
return _unix2Date(self.unix - datetime2unix(other))
elif isinstance(other, Date):
return _unix2Date(self.unix - other.unix)
elif isinstance(other, timedelta):
return Date(unix2datetime(self.unix) + other)
elif isinstance(other, Duration):
if other.month:
value = unix2datetime(self.unix)
if (value+timedelta(days=1)).month != value.month:
# LAST DAY OF MONTH
output = add_month(value+timedelta(days=1), other.month) - timedelta(days=1)
return Date(output)
else:
day = value.day
num_days = (add_month(datetime(value.year, value.month, 1), other.month+1) - timedelta(days=1)).day
day = min(day, num_days)
curr = set_day(value, day)
output = add_month(curr, other.month)
return Date(output)
else:
return _unix2Date(self.unix + other.seconds)
else:
from mo_logs import Log
Log.error("can not subtract {{type}} from Date", type=other.__class__.__name__)
@staticmethod
def now():
return _unix2Date(_time())
@staticmethod
def eod():
"""
RETURN END-OF-TODAY (WHICH IS SAME AS BEGINNING OF TOMORROW)
"""
return _unix2Date(Date.today().unix + 86400)
@staticmethod
def today():
return _unix2Date(math.floor(_time() / 86400) * 86400)
@staticmethod
def range(min, max, interval):
v = min
while v < max:
yield v
v = v + interval
def __str__(self):
return str(unix2datetime(self.unix))
def __repr__(self):
return unix2datetime(self.unix).__repr__()
def __sub__(self, other):
if other == None:
return None
if isinstance(other, datetime):
return Duration(self.unix - Date(other).unix)
if isinstance(other, Date):
return Duration(self.unix - other.unix)
return self.add(-other)
def __lt__(self, other):
other = Date(other)
return self.unix < other.unix
def __eq__(self, other):
if other == None:
return Null
try:
return other.unix == self.unix
except Exception:
pass
try:
return Date(other).unix == self.unix
except Exception:
return False
def __le__(self, other):
other = Date(other)
return self.unix <= other.unix
def __gt__(self, other):
other = Date(other)
return self.unix > other.unix
def __ge__(self, other):
other = Date(other)
return self.unix >= other.unix
def __add__(self, other):
return self.add(other)
def __data__(self):
return self.unix
@classmethod
def min(cls, *values):
output = Null
for v in values:
if output == None and v != None:
output = v
elif v < output:
output = v
return output
def parse(*args):
try:
if len(args) == 1:
a0 = args[0]
if isinstance(a0, (datetime, date)):
output = _unix2Date(datetime2unix(a0))
elif isinstance(a0, Date):
output = _unix2Date(a0.unix)
elif isinstance(a0, (int, long, float, Decimal)):
a0 = float(a0)
if a0 > 9999999999: # WAY TOO BIG IF IT WAS A UNIX TIMESTAMP
output = _unix2Date(a0 / 1000)
else:
output = _unix2Date(a0)
elif isinstance(a0, text_type) and len(a0) in [9, 10, 12, 13] and is_integer(a0):
a0 = float(a0)
if a0 > 9999999999: # WAY TOO BIG IF IT WAS A UNIX TIMESTAMP
output = _unix2Date(a0 / 1000)
else:
output = _unix2Date(a0)
elif isinstance(a0, text_type):
output = unicode2Date(a0)
else:
output = _unix2Date(datetime2unix(datetime(*args)))
else:
if isinstance(args[0], text_type):
output = unicode2Date(*args)
else:
output = _unix2Date(datetime2unix(datetime(*args)))
return output
except Exception as e:
from mo_logs import Log
Log.error("Can not convert {{args}} to Date", args=args, cause=e)
def add_month(offset, months):
month = int(offset.month+months-1)
year = offset.year
if not 0 <= month < 12:
r = _mod(month, 12)
year += int((month - r) / 12)
month = r
month += 1
output = datetime(
year=year,
month=month,
day=offset.day,
hour=offset.hour,
minute=offset.minute,
second=offset.second,
microsecond=offset.microsecond
)
return output
def set_day(offset, day):
output = datetime(
year=offset.year,
month=offset.month,
day=day,
hour=offset.hour,
minute=offset.minute,
second=offset.second,
microsecond=offset.microsecond
)
return output
def parse_time_expression(value):
def simple_date(sign, dig, type, floor):
if dig or sign:
from mo_logs import Log
Log.error("can not accept a multiplier on a datetime")
if floor:
return Date(type).floor(Duration(floor))
else:
return Date(type)
terms = re.match(r'(\d*[|\w]+)\s*([+-]\s*\d*[|\w]+)*', value).groups()
sign, dig, type = re.match(r'([+-]?)\s*(\d*)([|\w]+)', terms[0]).groups()
if "|" in type:
type, floor = type.split("|")
else:
floor = None
if type in MILLI_VALUES.keys():
value = Duration(dig+type)
else:
value = simple_date(sign, dig, type, floor)
for term in terms[1:]:
if not term:
continue
sign, dig, type = re.match(r'([+-])\s*(\d*)([|\w]+)', term).groups()
if "|" in type:
type, floor = type.split("|")
else:
floor = None
op = {"+": "__add__", "-": "__sub__"}[sign]
if type in MILLI_VALUES.keys():
if floor:
from mo_logs import Log
Log.error("floor (|) of duration not accepted")
value = value.__getattribute__(op)(Duration(dig+type))
else:
value = value.__getattribute__(op)(simple_date(sign, dig, type, floor))
return value
def unicode2Date(value, format=None):
"""
CONVERT UNICODE STRING TO UNIX TIMESTAMP VALUE
"""
## http://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior
if value == None:
return None
if format != None:
try:
if format.endswith("%S.%f") and "." not in value:
value += ".000"
return _unix2Date(datetime2unix(datetime.strptime(value, format)))
except Exception as e:
from mo_logs import Log
Log.error("Can not format {{value}} with {{format}}", value=value, format=format, cause=e)
value = value.strip()
if value.lower() == "now":
return _unix2Date(datetime2unix(_utcnow()))
elif value.lower() == "today":
return _unix2Date(math.floor(datetime2unix(_utcnow()) / 86400) * 86400)
elif value.lower() in ["eod", "tomorrow"]:
return _unix2Date(math.floor(datetime2unix(_utcnow()) / 86400) * 86400 + 86400)
if any(value.lower().find(n) >= 0 for n in ["now", "today", "eod", "tomorrow"] + list(MILLI_VALUES.keys())):
return parse_time_expression(value)
try: # 2.7 DOES NOT SUPPORT %z
local_value = parse_date(value) #eg 2014-07-16 10:57 +0200
return _unix2Date(datetime2unix((local_value - local_value.utcoffset()).replace(tzinfo=None)))
except Exception as e:
e = Except.wrap(e) # FOR DEBUGGING
pass
formats = [
"%Y-%m-%dT%H:%M:%S",
"%Y-%m-%dT%H:%M:%S.%f"
]
for f in formats:
try:
return _unix2Date(datetime2unix(datetime.strptime(value, f)))
except Exception:
pass
deformats = [
"%Y-%m",# eg 2014-07-16 10:57 +0200
"%Y%m%d",
"%d%m%Y",
"%d%m%y",
"%d%b%Y",
"%d%b%y",
"%d%B%Y",
"%d%B%y",
"%Y%m%d%H%M%S",
"%Y%m%dT%H%M%S",
"%d%m%Y%H%M%S",
"%d%m%y%H%M%S",
"%d%b%Y%H%M%S",
"%d%b%y%H%M%S",
"%d%B%Y%H%M%S",
"%d%B%y%H%M%S"
]
value = deformat(value)
for f in deformats:
try:
return unicode2Date(value, format=f)
except Exception:
pass
else:
from mo_logs import Log
Log.error("Can not interpret {{value}} as a datetime", value= value)
DATETIME_EPOCH = datetime(1970, 1, 1)
DATE_EPOCH = date(1970, 1, 1)
def datetime2unix(value):
try:
if value == None:
return None
elif isinstance(value, datetime):
diff = value - DATETIME_EPOCH
return diff.total_seconds()
elif isinstance(value, date):
diff = value - DATE_EPOCH
return diff.total_seconds()
else:
from mo_logs import Log
Log.error("Can not convert {{value}} of type {{type}}", value=value, type=value.__class__)
except Exception as e:
from mo_logs import Log
Log.error("Can not convert {{value}}", value=value, cause=e)
def unix2datetime(unix):
return datetime.utcfromtimestamp(unix)
def unix2Date(unix):
if not isinstance(unix, float):
from mo_logs import Log
Log.error("problem")
return _unix2Date(unix)
def _unix2Date(unix):
output = object.__new__(Date)
output.unix = unix
return output
delchars = "".join(c for c in map(unichr, range(256)) if not c.isalnum())
def deformat(value):
"""
REMOVE NON-ALPHANUMERIC CHARACTERS
"""
output = []
for c in value:
if c in delchars:
continue
output.append(c)
return "".join(output)
Date.MIN = Date(datetime(1, 1, 1))
Date.MAX = Date(datetime(2286, 11, 20, 17, 46, 39))
def is_integer(s):
if s is True or s is False:
return False
try:
if float(s) == round(float(s), 0):
return True
return False
except Exception:
return False
def _mod(value, mod=1):
"""
RETURN NON-NEGATIVE MODULO
RETURN None WHEN GIVEN INVALID ARGUMENTS
"""
if value == None:
return None
elif mod <= 0:
return None
elif value < 0:
return (value % mod + mod) % mod
else:
return value % mod
| mpl-2.0 | -7,967,396,073,122,584,000 | 27.080412 | 121 | 0.543432 | false |
coolsvap/clapper | ansible-tests/validations/library/discovery_diff.py | 1 | 1534 | #!/usr/bin/env python
import json
import os
import sys
from ansible.module_utils.basic import *
from subprocess import Popen, PIPE
DOCUMENTATION = '''
---
module: discovery_diff
short_description: Provide difference in hardware configuration
author: "Swapnil Kulkarni, @coolsvap"
'''
def get_node_hardware_data(hw_id, upenv):
'''Read the inspector data about the given node from Swift'''
p = Popen(('swift', 'download', '--output', '-', 'ironic-inspector', hw_id),
env=upenv, stdout=PIPE, stderr=PIPE)
if p.wait() == 0:
return json.loads(p.stdout.read())
def main():
module = AnsibleModule(
argument_spec={}
)
upenv = os.environ.copy()
with open("files/env_vars.json") as data_file:
env_data = json.load(data_file)
upenv.update(env_data)
p = Popen(('swift', 'list', 'ironic-inspector'), env=upenv, stdout=PIPE, stderr=PIPE)
if p.wait() != 0:
print "Error running `swift list ironic-inspector`"
print p.stderr.read()
sys.exit(1)
hardware_ids = [i.strip() for i in p.stdout.read().splitlines() if i.strip()]
hw_dicts = {}
for hwid in hardware_ids:
hw_dicts[hwid] = get_node_hardware_data(hwid, upenv)
# TODO(coolsvap) find a way to compare the obtained data in meaningful manner
result = {
'changed': True,
'msg': 'Discovery data for %d servers' % len(hw_dicts.keys()),
'results': hw_dicts,
}
module.exit_json(**result)
if __name__ == '__main__':
main()
| apache-2.0 | -5,402,917,734,117,416,000 | 24.566667 | 89 | 0.6206 | false |
simvisage/oricreate | docs/howtos/ex08_rigid_facets/sim013_single_fold_quad_psi_cntl.py | 1 | 4000 | r'''
Fold control using dihedral angle with quadrilateral facets
-----------------------------------------------------------
This example shows the folding process controlled by a dihedral
angle between two facets. In addition to the previous
example, this one introduces quadrilateral facets that
are symmetrically composed of triangles with fixed fold line - i.e.
dihedral angle ''psi'' is equal to zero. The example represents
a standard Miura-Ori vertex with all associated kinematic constraints.
'''
import numpy as np
from oricreate.api import \
SimulationTask, SimulationConfig, \
GuConstantLength, GuDofConstraints, GuPsiConstraints, fix, \
FTV, FTA
def create_cp_factory():
# begin
from oricreate.api import CreasePatternState, CustomCPFactory
x = np.array([[-1, 0, 0],
[0, 0, 0],
[1, 1, 0],
[2, 0, 0],
[1, -1, 0],
[-1, 1, 0],
[-1, -1, 0],
[2, 1, 0],
[2, -1, 0],
], dtype='float_')
L = np.array([[0, 1], [1, 2], [1, 5],
[1, 3], [2, 3],
[1, 4], [3, 4],
#[1, 5],
[6, 1],
[0, 5], [2, 5],
[0, 6], [4, 6],
[3, 7], [2, 7],
[3, 8], [4, 8]
],
dtype='int_')
F = np.array([[5, 1, 2],
[1, 3, 2],
[1, 4, 3],
[1, 4, 6],
[0, 1, 5],
[0, 1, 6],
[3, 2, 7],
[3, 4, 8],
], dtype='int_')
cp = CreasePatternState(X=x,
L=L,
F=F
)
cp_factory = CustomCPFactory(formed_object=cp)
# end
return cp_factory
if __name__ == '__main__':
cp_factory_task = create_cp_factory()
cp = cp_factory_task.formed_object
# Link the crease factory with the constraint client
gu_constant_length = GuConstantLength()
psi_max = np.pi * .49
gu_psi_constraints = \
GuPsiConstraints(forming_task=cp_factory_task,
psi_constraints=[([(2, 1.0)], 0.0),
([(7, 1.0)], 0.0),
([(4, 1.0)], 0.0),
([(6, 1.0)], 0.0),
([(3, 1.0)], lambda t: -psi_max * t),
#([(5, 1.0)], lambda t: psi_max * t),
])
dof_constraints = fix([0], [1]) + fix([1], [0, 1, 2]) \
+ fix([2, 4], [2])
gu_dof_constraints = GuDofConstraints(dof_constraints=dof_constraints)
sim_config = SimulationConfig(goal_function_type='none',
gu={'cl': gu_constant_length,
'u': gu_dof_constraints,
'psi': gu_psi_constraints},
acc=1e-8, MAX_ITER=100)
sim_task = SimulationTask(previous_task=cp_factory_task,
config=sim_config,
n_steps=25)
cp.u[(0, 3), 2] = -0.1
cp.u[(1), 2] = 0.1
sim_task.u_1
cp = sim_task.formed_object
ftv = FTV()
ftv.add(sim_task.sim_history.viz3d_dict['node_numbers'], order=5)
ftv.add(sim_task.sim_history.viz3d)
ftv.add(gu_dof_constraints.viz3d)
fta = FTA(ftv=ftv)
fta.init_view(a=200, e=35, d=5, f=(0, 0, 0), r=0)
fta.add_cam_move(a=200, e=34, n=5, d=5, r=0,
duration=10,
vot_fn=lambda cmt: np.linspace(0, 1, 4),
azimuth_move='damped',
elevation_move='damped',
distance_move='damped')
fta.plot()
fta.render()
fta.configure_traits()
| gpl-3.0 | 4,225,928,669,675,981,000 | 32.613445 | 79 | 0.4195 | false |
arielalmendral/ert | python/python/ert/enkf/plot_data/ensemble_plot_gen_kw_vector.py | 2 | 1627 | # Copyright (C) 2014 Statoil ASA, Norway.
#
# The file 'ensemble_plot_gen_kw_vector.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
from cwrap import BaseCClass
from ert.enkf import EnkfPrototype
class EnsemblePlotGenKWVector(BaseCClass):
TYPE_NAME = "ensemble_plot_gen_kw_vector"
_size = EnkfPrototype("int enkf_plot_gen_kw_vector_get_size(ensemble_plot_gen_kw_vector)")
_get_value = EnkfPrototype("double enkf_plot_gen_kw_vector_iget(ensemble_plot_gen_kw_vector, int)")
def __init__(self):
raise NotImplementedError("Class can not be instantiated directly!")
def __len__(self):
""" @rtype: int """
return self._size()
def getValue(self, index):
""" @rtype: float """
return self[index]
def __iter__(self):
cur = 0
while cur < len(self):
yield self[cur]
cur += 1
def __getitem__(self, index):
""" @rtype: float """
return self._get_value(index)
def __repr__(self):
return 'EnsemblePlotGenKWVector(size = %d) %s' % (len(self), self._ad_str())
| gpl-3.0 | -4,897,229,285,005,481,000 | 32.204082 | 103 | 0.657652 | false |
OMS-NetZero/FAIR | fair/tools/magicc.py | 1 | 6123 | from __future__ import division
import numpy as np
from scipy.interpolate import interp1d
def _import_emis_file(rcp):
if rcp in ['rcp3pd', 'rcp26']:
from ..RCPs.rcp26 import Emissions as rcp_emis
elif rcp=='rcp45':
from ..RCPs.rcp45 import Emissions as rcp_emis
elif rcp in ['rcp6', 'rcp60']:
from ..RCPs.rcp60 import Emissions as rcp_emis
elif rcp=='rcp85':
from ..RCPs.rcp85 import Emissions as rcp_emis
else:
raise ValueError('rcp must be rcp26, rcp45, rcp60 or rcp85')
return rcp_emis
def scen_open(filename,
include_cfcs='rcp45',
startyear=1765,
harmonise=None):
"""
Opens a MAGICC6 .SCEN file and extracts the data. Interpolates linearly
between non-consecutive years in the SCEN file. Fills in chlorinated gases
from a specified RCP scenario or from custom emissions.
Inputs:
filename: the .SCEN file to open
Keywords:
include_cfcs: string, False, or nt x 16 numpy array
MAGICC files do not come loaded with CFCs (indices 24-39).
They are given in the harmonised files at
http://www.pik-potsdam.de/~mmalte/rcps/.
- Specify 'rcp3pd', 'rcp45', 'rcp6' or 'rcp85' to use these RCPs.
- Use False to ignore and create a 24-species emission file.
- Provide an array to tack your own chlorinated gases onto the SCEN
startyear: First year of output file. If before first year of the SCEN
file, use RCP4.5 to fill
harmonise: None, or year
Linearly interpolate between 2000 in the RCP file and the specified
year. If None, do not harmonise
Returns:
nt x 40 numpy emissions array
nt is defined as <last year of SCEN file> -
<earlier of startyear and first year of SCEN file> + 1
It is assumed that the .SCEN files follow the formatting convention on
the MAGICC wiki at
http://wiki.magicc.org/index.php?title=Creating_MAGICC_Scenario_Files.
"""
with open(filename) as f:
# First line is the number of time steps in the SCEN file
str_nt = f.readline().strip()
nt = int(str_nt)
# Next 6 lines are unused by FaIR.
for i in range(6):
f.readline()
# Eighth line is the column headers. When object orientation is
# implemented this will be important.
headers = f.readline().split()
# Ninth line is the units. Again this will be important in OO-FaIR
units = f.readline().split()
# Now the data!
scen_emissions = np.genfromtxt(filename, skip_header=9, max_rows=nt)
emissions = np.copy(scen_emissions)
scen_years = scen_emissions[:,0]
# Interpolate between non-consecutive years in SCEN file
f = interp1d(scen_years, emissions[:,1:], kind='linear', axis=0,
assume_sorted=True, bounds_error=True)
full_years = np.arange(scen_years[0], scen_years[-1]+1)
emissions_filled = np.hstack((full_years[:,None], f(full_years)))
# Add CFCs if requested
if type(include_cfcs) is np.ndarray:
if include_cfcs.shape != (len(full_years), 16):
raise ValueError("If chlorinated gas emissions are provided by " +
"the user they should be of size nt x 16 where nt is the number "+
"of consecutive years. Size is %s." % str(include_cfcs.shape))
emissions_filled = np.append(emissions_filled, include_cfcs, axis=1)
elif include_cfcs==False:
pass
elif include_cfcs.lower()[:3]=='rcp':
rcp_emis = _import_emis_file(include_cfcs.lower()).emissions
# Need to ensure only years present in the SCEN file are taken
# Cheers: https://stackoverflow.com/questions/3522946/
# using-numpy-arrays-as-lookup-tables
if int(scen_years[0])<1765:
raise ValueError("CFCs can only be infilled from RCPs as far "+
"back as 1765 at present")
rcp_years = np.arange(scen_years[0], scen_years[-1]+1)
mapping = dict(zip(rcp_emis[:,0], range(rcp_emis.shape[0])))
rcp_cfcs = np.array([rcp_emis[mapping[key],24:] for key in rcp_years])
emissions_filled = np.append(emissions_filled, rcp_cfcs, axis=1)
else:
raise ValueError("include_cfcs should be an nt x 16 numpy array, a " +
"string (rcp3pd, rcp45, rcp6 or rcp85) or False.")
# Fill in any pre-SCEN years from RCP4.5. All pathways are identical <2000
if scen_years[0]>startyear:
if scen_years[0]>2000:
raise ValueError("Can only fill in history unambiguously if " +
"first year in SCEN file is 2000 or earlier. You have requested "+
"startyear=%d and the first year in SCEN file is %d"
% (startyear, scen_years[0]))
else:
# tack RCP45 on to beginning
rcp_emis = _import_emis_file('rcp45').emissions
rcp_years = np.arange(startyear, scen_years[0])
mapping = dict(zip(rcp_emis[:,0], range(rcp_emis.shape[0])))
if include_cfcs==False:
rcp_all = np.array([rcp_emis[mapping[key],:24] for key in rcp_years])
else:
rcp_all = np.array([rcp_emis[mapping[key],:] for key in rcp_years])
emissions_filled = np.insert(emissions_filled, 0, rcp_all, axis=0)
# harmonise?
if harmonise is not None:
harmonise = int(harmonise)
if harmonise < 2000:
raise ValueError("Cannot harmonise before 2000.")
elif harmonise > scen_years[-1]:
raise ValueError("Cannot harmonise after last year of " +
"input dataset")
rcp_emis_2000 = rcp_emis[2000-startyear,:]
for j in range(1,emissions_filled.shape[1]):
f = interp1d((2000,harmonise), (rcp_emis_2000[j],
emissions_filled[harmonise-startyear,j]))
emissions_filled[2000-startyear:harmonise-startyear,j] = f(
np.arange(2000,harmonise))
return emissions_filled
| apache-2.0 | 8,400,502,087,455,092,000 | 41.520833 | 85 | 0.614895 | false |
NCI-GDC/gdcdatamodel | migrations/update_case_cache_append_only.py | 1 | 5901 | #!/usr/bin/env python
from psqlgraph import Node, Edge
from gdcdatamodel import models as md
CACHE_EDGES = {
Node.get_subclass_named(edge.__src_class__): edge
for edge in Edge.get_subclasses()
if 'RelatesToCase' in edge.__name__
}
LEVEL_1_SQL = """
INSERT INTO {cache_edge_table} (src_id, dst_id, _props, _sysan, acl)
SELECT {cls_table}.node_id, node_case.node_id,
'{{}}'::jsonb, '{{}}'::jsonb, '{{}}'::text[]
FROM {cls_table}
-- Step directly to case
JOIN {cls_to_case_edge_table}
ON {cls_table}.node_id = {cls_to_case_edge_table}.src_id
JOIN node_case
ON node_case.node_id = {cls_to_case_edge_table}.dst_id
-- Append only, e.g. insert only those missing
WHERE NOT EXISTS (
SELECT 1 FROM {cache_edge_table}
WHERE {cls_table}.node_id = {cache_edge_table}.src_id
AND node_case.node_id = {cache_edge_table}.dst_id)
"""
APPEND_CACHE_FROM_PARENT_SQL = """
INSERT INTO {cache_edge_table} (src_id, dst_id, _props, _sysan, acl)
SELECT DISTINCT {cls_table}.node_id, node_case.node_id,
'{{}}'::jsonb, '{{}}'::jsonb, '{{}}'::text[]
FROM {cls_table}
-- Step to parent
JOIN {cls_to_parent_edge_table}
ON {cls_table}.node_id = {cls_to_parent_edge_table}.src_id
JOIN {parent_table}
ON {parent_table}.node_id = {cls_to_parent_edge_table}.dst_id
-- Step to parent's related cases
JOIN {parent_cache_edge_table}
ON {parent_table}.node_id = {parent_cache_edge_table}.src_id
JOIN node_case
ON node_case.node_id = {parent_cache_edge_table}.dst_id
-- Append only, e.g. insert only those missing
WHERE NOT EXISTS (
SELECT 1 FROM {cache_edge_table}
WHERE {cls_table}.node_id = {cache_edge_table}.src_id
AND node_case.node_id = {cache_edge_table}.dst_id)
"""
def max_distances_from_case():
"""Breadth first search for max depth every class is from case"""
distances = {}
to_visit = [(md.Case, -1)]
while to_visit:
cls, level = to_visit.pop(0)
if cls not in distances:
children = (
link['src_type']
for _, link in cls._pg_backrefs.items()
)
to_visit.extend((child, level+1) for child in children)
distances[cls] = max(distances.get(cls, level+1), level)
return distances
def get_levels():
"""Returns a map of levels -> [classes] where a level is the max
distance a class is from a case
"""
distances = max_distances_from_case()
distinct_distances = set(distances.values())
levels = {
level: [
cls for cls, distance in distances.items() if distance == level
] for level in distinct_distances
}
return levels
def append_cache_from_parent(graph, child, parent):
"""Creates case cache edges from :param:`parent` that do not already
exist for :param:`child`
Add child cache edges for:
{child -> parent -> case} / {child -> case}
"""
description = child.label + ' -> ' + parent.label + ' -> case'
if parent not in CACHE_EDGES:
print("skipping:", description, ": parent is not cached")
elif child not in CACHE_EDGES:
print("skipping:", description, ": child is not cached")
elif child is parent:
print("skipping:", description, ": cycle")
else:
print(description)
for cls_to_parent_edge in get_edges_between(child, parent):
statement = APPEND_CACHE_FROM_PARENT_SQL.format(
cache_edge_table=CACHE_EDGES[child].__tablename__,
cls_table=child.__tablename__,
cls_to_parent_edge_table=cls_to_parent_edge.__tablename__,
parent_table=parent.__tablename__,
parent_cache_edge_table=CACHE_EDGES[parent].__tablename__,
)
graph.current_session().execute(statement)
def append_cache_from_parents(graph, cls):
"""Creates case cache edges that all parents have that do not already
exist
"""
parents = {
link['dst_type']
for link in cls._pg_links.itervalues()
}
for parent in parents:
append_cache_from_parent(graph, cls, parent)
def get_edges_between(src, dst):
"""Returns all edges from src -> dst (directionality matters)"""
return [
edge
for edge in Edge.get_subclasses()
if edge.__src_class__ == src.__name__
and edge.__dst_class__ == dst.__name__
and edge not in CACHE_EDGES.values()
]
def seed_level_1(graph, cls):
"""Set the case cache for all nodes max 1 step from case"""
for case_edge in get_edges_between(cls, md.Case):
statement = LEVEL_1_SQL.format(
cache_edge_table=CACHE_EDGES[cls].__tablename__,
cls_table=cls.__tablename__,
cls_to_case_edge_table=case_edge.__tablename__,
)
print('Seeding {} through {}'.format(cls.get_label(), case_edge.__name__))
graph.current_session().execute(statement)
def update_case_cache_append_only(graph):
"""Server-side update case cache for all entities
1) Seed direct relationships from level L1 (1 step from case)
2) Visit all nodes in levels stepping out from case and for each
entity in that level L, add the related case edges from all
parents in level L-1 that do not already exist in level L
"""
cls_levels = get_levels()
for cls in Node.get_subclasses():
seed_level_1(graph, cls)
for level in sorted(cls_levels)[2:]:
print("\n\nLevel:", level)
for cls in cls_levels[level]:
append_cache_from_parents(graph, cls)
def main():
print("No main() action defined, please manually call "
"update_case_cache_append_only(graph)")
if __name__ == '__main__':
main()
| apache-2.0 | -4,557,591,127,347,263,500 | 28.068966 | 82 | 0.597356 | false |
Southpaw-TACTIC/TACTIC | src/pyasm/widget/error_wdg.py | 1 | 6906 | ###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
__all__ = ['Error403Wdg', 'Error404Wdg' ]
from pyasm.web import Widget, DivWdg, HtmlElement, Table, SpanWdg, WebContainer
from .input_wdg import HiddenWdg, TextWdg, PasswordWdg
from .web_wdg import SignOutLinkWdg
from .header_wdg import ProjectSwitchWdg
class ErrorWdg(Widget):
LOGIN_MSG = 'login_message'
def get_display(self):
box = DivWdg(css='login')
box.add_style("margin-top: auto")
box.add_style("margin-bottom: auto")
box.add_style("text-align: center")
script = HtmlElement.script('''function login(e) {
if (!e) var e = window.event;
if (e.keyCode == 13) {
submit_icon_button('Submit');
}}
''')
div = DivWdg()
div.add_style("margin: 0px 0px")
div.add_class("centered")
div.add( HtmlElement.br(3) )
div.add(self.get_error_wdg() )
box.add(div)
widget = Widget()
#widget.add( HtmlElement.br(3) )
table = Table()
table.add_style("width: 100%")
table.add_style("height: 85%")
table.add_row()
td = table.add_cell()
td.add_style("vertical-align: middle")
td.add_style("text-align: center")
td.add_style("background: transparent")
td.add(box)
widget.add(table)
return widget
def get_error_wdg(self):
'''function to override'''
pass
def set_message(self, message):
self.message = message
def set_status(self, status):
self.status = status
class Error404Wdg(ErrorWdg):
''' this should be displaying the error status and message, not necessarily 404'''
def __init__(self):
# just defaults to 404
self.status = 404
self.message = ''
super(Error404Wdg, self).__init__()
def get_error_wdg(self):
kwargs = {
}
from tactic.ui.panel import HashPanelWdg
widget = HashPanelWdg.get_widget_from_hash("/error404", return_none=True, kwargs=kwargs)
if widget:
return widget
div = DivWdg()
error_div = DivWdg()
error_div.add("<hr/>")
error_div.add("Error %s" % self.status)
error_div.add("<hr/>")
div.add(error_div)
error_div.add_style("font-size: 18px")
error_div.add_style("font-weight: bold")
error_div.add_style("padding: 10px")
error_div.add_style("width: auto")
error_div.add_color("background", "background", -3)
error_div.add_color("color", "color")
#error_div.add_border()
error_div.add_style("margin-left: 5px")
error_div.add_style("margin-right: 5px")
error_div.add_style("margin-top: -10px")
div.add("<br/>")
span = DivWdg()
#span.add_color("color", "color")
#span.add_style("color", "#FFF")
if self.status == 404:
span.add(HtmlElement.b("You have tried to access a url that is not recognized."))
else:
span.add(HtmlElement.b(self.message))
span.add(HtmlElement.br(2))
web = WebContainer.get_web()
root = web.get_site_root()
if self.message.startswith('No project ['):
label = 'You may need to correct the default_project setting in the TACTIC config.'
else:
label = "Go to the Main page for a list of valid projects"
span.add(label)
div.add(span)
div.add(HtmlElement.br())
from tactic.ui.widget import ActionButtonWdg
button_div = DivWdg()
button_div.add_style("width: 90px")
button_div.add_style("margin: 0px auto")
div.add(button_div)
button = ActionButtonWdg(title="Go to Main", tip='Click to go to main page')
button_div.add(button)
button.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
document.location = '/';
'''
} )
button.add_event("onmouseup", "document.location='/'")
return div
class Error403Wdg(ErrorWdg):
''' this should be displaying the error status and message, not necessarily 404'''
def __init__(self):
# just defaults to 404
self.status = 403
self.message = ''
super(Error403Wdg, self).__init__()
def get_error_wdg(self):
div = DivWdg()
error_div = DivWdg()
error_div.add("<hr/>")
error_div.add("Error %s - Permission Denied" % self.status)
error_div.add("<hr/>")
div.add(error_div)
error_div.add_style("font-size: 16px")
error_div.add_style("font-weight: bold")
error_div.add_style("width: 97%")
error_div.add_color("background", "background", -3)
error_div.add_border()
error_div.add_style("margin-left: 5px")
error_div.add_style("margin-top: -10px")
div.add("<br/>")
span = DivWdg()
#span.add_color("color", "color")
#span.add_style("color", "#FFF")
if self.status == 403:
span.add("<b>You have tried to access a url that is not permitted.</b>")
else:
span.add(HtmlElement.b(self.message))
span.add(HtmlElement.br(2))
web = WebContainer.get_web()
root = web.get_site_root()
span.add("Go back to the Main page for a list of valid projects")
div.add(span)
div.add(HtmlElement.br())
table = Table()
div.add(table)
table.add_row()
table.add_style("margin-left: auto")
table.add_style("margin-right: auto")
from tactic.ui.widget import ActionButtonWdg
button = ActionButtonWdg(title="Go to Main", tip='Click to go to main page')
table.add_cell(button)
button.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
document.location = '/projects';
'''
} )
button.add_style("margin-left: auto")
button.add_style("margin-right: auto")
button = ActionButtonWdg(title="Sign Out", tip='Click to Sign Out')
table.add_cell(button)
button.add_behavior( {
'type': 'click_up',
'login': web.get_user_name(),
'cbjs_action': '''
var server = TacticServerStub.get();
server.execute_cmd("SignOutCmd", {login: bvr.login} );
window.location.href='%s';
''' % root
} )
button.add_style("margin-left: auto")
button.add_style("margin-right: auto")
return div
| epl-1.0 | 5,733,914,920,654,536,000 | 28.262712 | 96 | 0.5585 | false |
novafloss/populous | populous/cli.py | 1 | 2649 | import importlib
import logging
import click
import click_log
import six
from .loader import load_blueprint
from .exceptions import ValidationError, YAMLError, BackendError
logger = logging.getLogger('populous')
click_log.basic_config(logger)
def get_blueprint(files, **kwargs):
try:
return load_blueprint(*files, **kwargs)
except (YAMLError, ValidationError) as e:
raise click.ClickException(six.text_type(e))
except Exception as e:
raise click.ClickException("Unexpected error during the blueprint "
"loading: {}".format(e))
@click.group()
@click.version_option()
@click_log.simple_verbosity_option(logger)
def cli():
pass
@cli.group()
def run():
pass
def _generic_run(modulename, classname, files, **kwargs):
try:
try:
module = importlib.import_module(
'populous.backends.' + modulename,
package='populous.backends'
)
backend_cls = getattr(module, classname)
except (ImportError, AttributeError):
raise click.ClickException("Backend not found.")
backend = backend_cls(**kwargs)
blueprint = get_blueprint(files, backend=backend)
try:
with backend.transaction():
blueprint.generate()
logger.info("Closing DB transaction...")
finally:
backend.close()
logger.info("Have fun!")
except BackendError as e:
raise click.ClickException(six.text_type(e))
@run.command()
@click.option('--host', help="Database host address")
@click.option('--port', type=int, help="Database host port")
@click.option('--db', help="Database name")
@click.option('--user', help="Postgresql user name used to authenticate")
@click.option('--password', help="Postgresql password used to authenticate")
@click.argument('files', nargs=-1, required=True)
def postgres(host, port, db, user, password, files):
return _generic_run('postgres', 'Postgres', files, host=host, port=port,
db=db, user=user, password=password)
@cli.command()
def generators():
"""
List all the available generators.
"""
from populous import generators
base = generators.Generator
for name in dir(generators):
generator = getattr(generators, name)
if isinstance(generator, type) and issubclass(generator, base):
name = generator.__name__
doc = (generator.__doc__ or '').strip()
if doc:
click.echo("{} - {}".format(name, doc))
else:
click.echo(name)
| mit | -7,277,352,776,441,271,000 | 26.309278 | 76 | 0.619857 | false |
highweb-project/highweb-webcl-html5spec | tools/android/loading/activity_lens.py | 1 | 11113 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Gives a picture of the CPU activity between timestamps.
When executed as a script, takes a loading trace, and prints the activity
breakdown for the request dependencies.
"""
import collections
import logging
import operator
import request_track
class ActivityLens(object):
"""Reconstructs the activity of the main renderer thread between requests."""
_SCRIPT_EVENT_NAMES = ('EvaluateScript', 'FunctionCall')
_PARSING_EVENT_NAMES = ('ParseHTML', 'ParseAuthorStyleSheet')
def __init__(self, trace):
"""Initializes an instance of ActivityLens.
Args:
trace: (LoadingTrace) loading trace.
"""
self._trace = trace
events = trace.tracing_track.GetEvents()
self._renderer_main_pid_tid = self._GetRendererMainThreadId(events)
self._tracing = self._trace.tracing_track.TracingTrackForThread(
self._renderer_main_pid_tid)
@classmethod
def _GetRendererMainThreadId(cls, events):
"""Returns the most active main renderer thread.
Several renderers may be running concurrently, but we assume that only one
of them is busy during the time covered by the loading trace.. It can be
selected by looking at the number of trace events generated.
Args:
events: [tracing.Event] List of trace events.
Returns:
(PID (int), TID (int)) of the busiest renderer main thread.
"""
events_count_per_pid_tid = collections.defaultdict(int)
main_renderer_thread_ids = set()
for event in events:
tracing_event = event.tracing_event
pid = event.tracing_event['pid']
tid = event.tracing_event['tid']
events_count_per_pid_tid[(pid, tid)] += 1
if (tracing_event['cat'] == '__metadata'
and tracing_event['name'] == 'thread_name'
and event.args['name'] == 'CrRendererMain'):
main_renderer_thread_ids.add((pid, tid))
pid_tid_events_counts = sorted(events_count_per_pid_tid.items(),
key=operator.itemgetter(1), reverse=True)
if (len(pid_tid_events_counts) > 1
and pid_tid_events_counts[0][1] < 2 * pid_tid_events_counts[1][1]):
logging.warning(
'Several active renderers (%d and %d with %d and %d events).'
% (pid_tid_events_counts[0][0][0], pid_tid_events_counts[1][0][0],
pid_tid_events_counts[0][1], pid_tid_events_counts[1][1]))
return pid_tid_events_counts[0][0]
def _OverlappingMainRendererThreadEvents(self, start_msec, end_msec):
return self._tracing.OverlappingEvents(start_msec, end_msec)
@classmethod
def _ClampedDuration(cls, event, start_msec, end_msec):
return max(0, (min(end_msec, event.end_msec)
- max(start_msec, event.start_msec)))
@classmethod
def _ThreadBusyness(cls, events, start_msec, end_msec):
"""Amount of time a thread spent executing from the message loop."""
busy_duration = 0
message_loop_events = [
e for e in events
if (e.tracing_event['cat'] == 'toplevel'
and e.tracing_event['name'] == 'MessageLoop::RunTask')]
for event in message_loop_events:
clamped_duration = cls._ClampedDuration(event, start_msec, end_msec)
busy_duration += clamped_duration
interval_msec = end_msec - start_msec
assert busy_duration <= interval_msec
return busy_duration
@classmethod
def _ScriptsExecuting(cls, events, start_msec, end_msec):
"""Returns the time during which scripts executed within an interval.
Args:
events: ([tracing.Event]) list of tracing events.
start_msec: (float) start time in ms, inclusive.
end_msec: (float) end time in ms, inclusive.
Returns:
A dict {URL (str) -> duration_msec (float)}. The dict may have a None key
for scripts that aren't associated with a URL.
"""
script_to_duration = collections.defaultdict(float)
script_events = [e for e in events
if ('devtools.timeline' in e.tracing_event['cat']
and (e.tracing_event['name']
in cls._SCRIPT_EVENT_NAMES))]
for event in script_events:
clamped_duration = cls._ClampedDuration(event, start_msec, end_msec)
script_url = event.args['data'].get('scriptName', None)
script_to_duration[script_url] += clamped_duration
return dict(script_to_duration)
@classmethod
def _FullyIncludedEvents(cls, events, event):
"""Return a list of events wholly included in the |event| span."""
(start, end) = (event.start_msec, event.end_msec)
result = []
for event in events:
if start <= event.start_msec < end and start <= event.end_msec < end:
result.append(event)
return result
@classmethod
def _Parsing(cls, events, start_msec, end_msec):
"""Returns the HTML/CSS parsing time within an interval.
Args:
events: ([tracing.Event]) list of events.
start_msec: (float) start time in ms, inclusive.
end_msec: (float) end time in ms, inclusive.
Returns:
A dict {URL (str) -> duration_msec (float)}. The dict may have a None key
for tasks that aren't associated with a URL.
"""
url_to_duration = collections.defaultdict(float)
parsing_events = [e for e in events
if ('devtools.timeline' in e.tracing_event['cat']
and (e.tracing_event['name']
in cls._PARSING_EVENT_NAMES))]
for event in parsing_events:
# Parsing events can contain nested script execution events, avoid
# double-counting by discounting these.
nested_events = cls._FullyIncludedEvents(events, event)
events_tree = _EventsTree(event, nested_events)
js_events = events_tree.DominatingEventsWithNames(cls._SCRIPT_EVENT_NAMES)
duration_to_subtract = sum(
cls._ClampedDuration(e, start_msec, end_msec) for e in js_events)
tracing_event = event.tracing_event
clamped_duration = cls._ClampedDuration(event, start_msec, end_msec)
if tracing_event['name'] == 'ParseAuthorStyleSheet':
url = tracing_event['args']['data']['styleSheetUrl']
else:
url = tracing_event['args']['beginData']['url']
parsing_duration = clamped_duration - duration_to_subtract
assert parsing_duration >= 0
url_to_duration[url] += parsing_duration
return dict(url_to_duration)
def GenerateEdgeActivity(self, dep):
"""For a dependency between two requests, returns the renderer activity
breakdown.
Args:
dep: (Request, Request, str) As returned from
RequestDependencyLens.GetRequestDependencies().
Returns:
{'edge_cost': (float) ms, 'busy': (float) ms,
'parsing': {'url' -> time_ms}, 'script' -> {'url' -> time_ms}}
"""
(first, second, reason) = dep
(start_msec, end_msec) = request_track.IntervalBetween(
first, second, reason)
assert end_msec - start_msec >= 0.
events = self._OverlappingMainRendererThreadEvents(start_msec, end_msec)
result = {'edge_cost': end_msec - start_msec,
'busy': self._ThreadBusyness(events, start_msec, end_msec),
'parsing': self._Parsing(events, start_msec, end_msec),
'script': self._ScriptsExecuting(events, start_msec, end_msec)}
return result
def BreakdownEdgeActivityByInitiator(self, dep):
"""For a dependency between two requests, categorizes the renderer activity.
Args:
dep: (Request, Request, str) As returned from
RequestDependencyLens.GetRequestDependencies().
Returns:
{'script': float, 'parsing': float, 'other_url': float,
'unknown_url': float, 'unrelated_work': float}
where the values are durations in ms:
- idle: The renderer main thread was idle.
- script: The initiating file was executing.
- parsing: The initiating file was being parsed.
- other_url: Other scripts and/or parsing activities.
- unknown_url: Activity which is not associated with a URL.
- unrelated_work: Activity unrelated to scripts or parsing.
"""
activity = self.GenerateEdgeActivity(dep)
breakdown = {'unrelated_work': activity['busy'],
'idle': activity['edge_cost'] - activity['busy'],
'script': 0, 'parsing': 0,
'other_url': 0, 'unknown_url': 0}
for kind in ('script', 'parsing'):
for (script_name, duration_ms) in activity[kind].items():
if not script_name:
breakdown['unknown_url'] += duration_ms
elif script_name == dep[0].url:
breakdown[kind] += duration_ms
else:
breakdown['other_url'] += duration_ms
breakdown['unrelated_work'] -= sum(
breakdown[x] for x in ('script', 'parsing', 'other_url', 'unknown_url'))
return breakdown
def MainRendererThreadBusyness(self, start_msec, end_msec):
"""Returns the amount of time the main renderer thread was busy.
Args:
start_msec: (float) Start of the interval.
end_msec: (float) End of the interval.
"""
events = self._OverlappingMainRendererThreadEvents(start_msec, end_msec)
return self._ThreadBusyness(events, start_msec, end_msec)
class _EventsTree(object):
"""Builds the hierarchy of events from a list of fully nested events."""
def __init__(self, root_event, events):
"""Creates the tree.
Args:
root_event: (Event) Event held by the tree root.
events: ([Event]) List of events that are fully included in |root_event|.
"""
self.event = root_event
self.start_msec = root_event.start_msec
self.end_msec = root_event.end_msec
self.children = []
events.sort(key=operator.attrgetter('start_msec'))
if not events:
return
current_child = (events[0], [])
for event in events[1:]:
if event.end_msec < current_child[0].end_msec:
current_child[1].append(event)
else:
self.children.append(_EventsTree(current_child[0], current_child[1]))
current_child = (event, [])
self.children.append(_EventsTree(current_child[0], current_child[1]))
def DominatingEventsWithNames(self, names):
"""Returns a list of the top-most events in the tree with a matching name.
"""
if self.event.name in names:
return [self.event]
else:
result = []
for child in self.children:
result += child.DominatingEventsWithNames(names)
return result
if __name__ == '__main__':
import sys
import json
import loading_trace
import request_dependencies_lens
filename = sys.argv[1]
json_dict = json.load(open(filename))
loading_trace = loading_trace.LoadingTrace.FromJsonDict(json_dict)
activity_lens = ActivityLens(loading_trace)
dependencies_lens = request_dependencies_lens.RequestDependencyLens(
loading_trace)
deps = dependencies_lens.GetRequestDependencies()
for requests_dep in deps:
print activity_lens.GenerateEdgeActivity(requests_dep)
| bsd-3-clause | 3,075,392,639,984,073,700 | 38.130282 | 80 | 0.651219 | false |
prospero78/pyPC | pak_pc/pak_gui/pak_widgets/mod_frm_cpu.py | 1 | 1734 | # -*- coding: utf8 -*-
'''
Этот модуль заточен под отображение состояния центрального процессора.
Ввиду его отдельности -- его можно втыкать куда угодно. )
'''
from Tkinter import Frame
from pak_pc.pak_gui.pak_widgets.mod_frm_reg import ClsFrmReg
from pak_pc.pak_gui.pak_widgets.mod_frm_reg_pc import ClsFrmRegPC
from pak_pc.pak_gui.pak_widgets.mod_frm_reg_bp import ClsFrmRegBP
from pak_pc.pak_gui.pak_widgets.mod_frm_cpu_frec import ClsFrmCpuFreq
from pak_pc.pak_gui.pak_widgets.mod_frm_reg_sp import ClsFrmRegSP
class ClsFrmCPU(Frame):
"""
Класс описывающий фрейм для отображения информации о состоянии
процессора.
:param root: ссылка на корневой класс.
"""
def __init__(self, root=None):
Frame.__init__(self, master=root, border=3, relief='sunken')
self.pack(side='top', fill='x')
# отображаем регистр программной отладки
self.frm_reg_bp = ClsFrmRegBP(root=self)
# отображаем программный счётчик
self.frm_reg_pc = ClsFrmRegPC(root=self)
# self.frm_reg_a.lbl_name['text']='reg_a'
# отображаем указатель стека
self.frm_reg_sp = ClsFrmRegSP(root=self)
# отображение регистра А
self.frm_reg_a = ClsFrmReg(root=self)
self.frm_reg_a.lbl_name['text'] = 'reg_a'
# отображение частоты виртуального процессора
self.frm_cpu_freq = ClsFrmCpuFreq(root=self)
| lgpl-3.0 | -1,709,216,792,433,874,700 | 32.214286 | 70 | 0.683154 | false |
alex-petrenko/hierarchical-rl | microtbs_rl/algorithms/a2c/multi_env.py | 1 | 3019 | import threading
import numpy as np
from queue import Queue
from microtbs_rl.utils.common_utils import *
logger = logging.getLogger(os.path.basename(__file__))
class _MultiEnvWorker:
"""
Helper class for the MultiEnv.
Currently implemented with threads, and it's slow because of GIL.
It would be much better to implement this with multiprocessing.
"""
def __init__(self, idx, make_env_func):
self.idx = idx
self.env = make_env_func()
self.env.seed(idx)
self.observation = self.env.reset()
self.action_queue = Queue()
self.result_queue = Queue()
self.thread = threading.Thread(target=self.start)
self.thread.start()
def start(self):
while True:
action = self.action_queue.get()
if action is None: # stop signal
logger.info('Stop worker %d...', self.idx)
break
observation, reward, done, _ = self.env.step(action)
if done:
observation = self.env.reset()
self.result_queue.put((observation, reward, done))
self.action_queue.task_done()
class MultiEnv:
"""Run multiple gym-compatible environments in parallel, keeping more or less the same interface."""
def __init__(self, num_envs, make_env_func):
self.num_envs = num_envs
self.workers = [_MultiEnvWorker(i, make_env_func) for i in range(num_envs)]
self.action_space = self.workers[0].env.action_space
self.observation_space = self.workers[0].env.observation_space
self.curr_episode_reward = [0] * num_envs
self.episode_rewards = [[]] * num_envs
def initial_observations(self):
return [worker.observation for worker in self.workers]
def step(self, actions):
"""Obviously, returns vectors of obs, rewards, dones instead of usual single values."""
assert len(actions) == len(self.workers)
for worker, action in zip(self.workers, actions):
worker.action_queue.put(action)
results = []
for worker in self.workers:
worker.action_queue.join()
results.append(worker.result_queue.get())
observations, rewards, dones = zip(*results)
for i in range(self.num_envs):
self.curr_episode_reward[i] += rewards[i]
if dones[i]:
self.episode_rewards[i].append(self.curr_episode_reward[i])
self.curr_episode_reward[i] = 0
return observations, rewards, dones
def close(self):
logger.info('Stopping multi env...')
for worker in self.workers:
worker.action_queue.put(None) # terminate
worker.thread.join()
def calc_avg_rewards(self, n):
avg_reward = 0
for i in range(self.num_envs):
last_episodes_rewards = self.episode_rewards[i][-n:]
avg_reward += np.mean(last_episodes_rewards)
return avg_reward / float(self.num_envs)
| mit | 1,352,694,666,587,328,800 | 30.447917 | 104 | 0.60583 | false |
greenpau/ndmtk | setup.py | 1 | 9670 | #
# ndmtk - Network Discovery and Management Toolkit
# Copyright (C) 2016 Paul Greenberg @greenpau
# See LICENSE.txt for licensing details
#
# File: setup.py
#
from __future__ import print_function;
try:
from setuptools import setup;
except ImportError:
from ez_setup import use_setuptools;
use_setuptools();
from setuptools.command.install import install;
from setuptools.command.sdist import sdist;
from setuptools.command.test import test;
from setuptools.command.develop import develop;
from setuptools import setup;
from codecs import open;
import traceback;
import unittest;
import os;
import sys;
import re;
import stat;
import unittest;
pkg_name = 'ndmtk';
pkg_ver = '0.2.0';
cmdclass = {};
def _load_test_suite():
test_loader = unittest.TestLoader();
test_suite = test_loader.discover(os.path.join(pkg_dir, pkg_name, 'tests'), pattern='test_*.py');
return test_suite;
def remove_ansible_files(ansible_dirs):
for ansible_dir in ansible_dirs:
for suffix in ['.py', '.pyc']:
for plugin_type in ['plugins/action', 'plugins/callback']:
plugin_file = os.path.join(ansible_dir, plugin_type , pkg_name + suffix);
if os.path.isfile(plugin_file) or os.path.islink(plugin_file):
print("[INFO] found '%s'" % plugin_file);
try:
if os.path.islink(plugin_file):
os.unlink(plugin_file);
else:
os.remove(plugin_file);
print("[INFO] removed '%s'" % plugin_file);
except:
exc_type, exc_value, exc_traceback = sys.exc_info();
print("[ERROR] failed to remove %s %s" % (plugin_file, ''.join(traceback.format_exception(exc_type, exc_value, exc_traceback))));
return
def pre_build_toolkit():
for ts in _load_test_suite():
tsr=unittest.TextTestRunner();
tr = tsr.run(ts);
if len(tr.failures) > 0:
for tf in tr.failures:
print('[ERROR] ' + str(tf[1]));
return [];
print("[INFO] checking whether 'ansible' python package is installed ...");
ansible_dirs = _find_py_package('ansible');
if len(ansible_dirs) == 0:
print("[ERROR] 'ansible' python package was not found");
return [];
print("[INFO] the path to 'ansible' python package is: " + str(ansible_dirs));
remove_ansible_files(ansible_dirs);
return ansible_dirs;
def _find_utility(name):
x = any(os.access(os.path.join(path, name), os.X_OK) for path in os.environ["PATH"].split(os.pathsep));
return x;
def _find_py_package(name):
pkg_dirs = [];
for path in sys.path:
if not re.search('site-packages$', path):
continue;
if not os.path.exists(path):
continue;
if not os.path.isdir(path):
continue
target = os.path.join(path, name);
if not os.path.exists(target):
continue;
if not os.path.isdir(target):
continue;
if target not in pkg_dirs:
pkg_dirs.append(target);
return pkg_dirs;
def _post_build_toolkit(ansible_dirs, plugin_dir=None):
if plugin_dir is None:
plugin_dirs = _find_py_package(pkg_name);
if len(plugin_dirs) > 0:
print("[INFO] the path to '" + pkg_name + "' python package is: " + str(plugin_dirs));
for d in plugin_dirs:
if re.search('bdist', d) or re.search('build', d):
continue;
plugin_dir = d;
break;
if plugin_dir is None:
print("[ERROR] failed to find '" + pkg_name + "' python package, aborting!");
return;
if re.search('bdist', plugin_dir) or re.search('build', plugin_dir):
return;
if re.search('site-packages.?$', plugin_dir):
plugin_dir += pkg_name;
print("[INFO] the path to '" + pkg_name + "' python package is: " + str(plugin_dir));
'''
Create a symlink, i.e. `ln -s TARGET LINK_NAME`
'''
_egg_files = [];
for ansible_dir in ansible_dirs:
for i in ['action', 'callback']:
symlink_target = os.path.join(plugin_dir, 'plugins/' + i + '/ndmtk.py');
symlink_name = os.path.join(ansible_dir, 'plugins/' + i + '/ndmtk.py');
try:
os.symlink(symlink_target, symlink_name);
os.chmod(symlink_name, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH);
_egg_files.append(symlink_name);
_egg_files.append(symlink_name + 'c');
print("[INFO] created symlink '" + symlink_name + "' to plugin '" + symlink_target + "'");
except:
exc_type, exc_value, exc_traceback = sys.exc_info();
print('[ERROR] an attempt to create a symlink ' + symlink_name + ' to plugin ' + symlink_target + ' failed, aborting!');
print(traceback.format_exception(exc_type, exc_value, exc_traceback));
return;
class install_(install):
def run(self):
ansible_dirs = pre_build_toolkit();
if len(ansible_dirs) == 0:
return 1;
install.run(self);
if len(ansible_dirs) > 0:
self.execute(_post_build_toolkit, (ansible_dirs, self.install_lib, ), msg="running post_install_scripts");
cmdclass['install'] = install_;
cmdclass['bdist_wheel'] = install_;
class uninstall_(develop):
def run(self):
plugin_dirs = [];
for dp in sys.path:
if not re.search('site-packages$', dp):
continue;
ds = [name for name in os.listdir(dp) if os.path.isdir(os.path.join(dp, name))];
if ds:
for d in ds:
if not re.match(pkg_name, d):
continue;
if os.path.join(dp, d) not in plugin_dirs:
plugin_dirs.append(os.path.join(dp, d));
if plugin_dirs:
for dp in plugin_dirs:
try:
for root, dirs, files in os.walk(dp, topdown=False):
for name in files:
if os.path.islink(os.path.join(root, name)):
os.unlink(os.path.join(root, name));
else:
os.remove(os.path.join(root, name));
for name in dirs:
os.rmdir(os.path.join(root, name));
os.rmdir(dp);
print("[INFO] deleted '" + dp + "'");
except:
print("[INFO] failed to delete '" + dp + "'");
exc_type, exc_value, exc_traceback = sys.exc_info();
print(traceback.format_exception(exc_type, exc_value, exc_traceback));
else:
print("[INFO] no relevant files for the uninstall found, all clean");
ansible_dirs = _find_py_package('ansible');
if len(ansible_dirs) == 0:
print("[ERROR] 'ansible' python package was not found");
return;
remove_ansible_files(ansible_dirs);
return;
cmdclass['uninstall'] = uninstall_;
pkg_dir = os.path.abspath(os.path.dirname(__file__));
pkg_license='OSI Approved :: GNU General Public License v3 or later (GPLv3+)';
pkg_description = 'Network Discovery and Management Toolkit packaged as Ansible Plugin';
pkg_url = 'https://github.com/greenpau/' + pkg_name;
#pkg_download_url = 'http://pypi.python.org/packages/source/' + pkg_name[0] + '/' + pkg_name + '/' + pkg_name + '-' + pkg_ver + '.tar.gz';
pkg_download_url = 'https://github.com/greenpau/ndmtk/archive/master.zip';
pkg_author = 'Paul Greenberg';
pkg_author_email = '[email protected]';
pkg_packages = [pkg_name.lower()];
pkg_requires = ['ansible>=2.0'];
pkg_data=[
'*.yml',
'*.j2',
'tests/*.py',
'plugins/callback/*.py',
'plugins/action/*.py',
'plugins/action/*.j2',
'plugins/action/*.yml',
'plugins/action/files/cli/os/*.yml',
'plugins/action/files/cli/core/*.yml',
'README.rst',
'LICENSE.txt',
];
pkg_platforms='any';
pkg_classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'Intended Audience :: Telecommunications Industry',
'License :: ' + pkg_license,
'Programming Language :: Python',
'Operating System :: POSIX :: Linux',
'Topic :: Utilities',
'Topic :: System :: Networking',
'Topic :: System :: Networking :: Monitoring',
'Topic :: System :: Systems Administration',
];
pkg_keywords=[
'ansible',
'ansible plugin',
'network',
'ssh',
'telnet',
'console',
'automation',
'network automation',
'network discovery',
];
pkg_test_suite='setup._load_test_suite';
pkg_long_description=pkg_description;
with open(os.path.join(pkg_dir, pkg_name, 'README.rst'), encoding='utf-8') as f:
pkg_long_description = f.read();
setup(
name=pkg_name,
version=pkg_ver,
description=pkg_description,
long_description=pkg_long_description,
url=pkg_url,
download_url=pkg_download_url,
author=pkg_author,
author_email=pkg_author_email,
license=pkg_license,
platforms=pkg_platforms,
classifiers=pkg_classifiers,
packages=pkg_packages,
package_data= {
pkg_name.lower() : pkg_data,
},
keywords=pkg_keywords,
install_requires=pkg_requires,
test_suite=pkg_test_suite,
cmdclass=cmdclass
);
| gpl-3.0 | -8,815,621,890,355,421,000 | 35.217228 | 153 | 0.57001 | false |
amarian12/p2pool-adaptive-nvc | p2pool/networks.py | 1 | 2034 | from p2pool.bitcoin import networks
from p2pool.util import math
# CHAIN_LENGTH = number of shares back client keeps
# REAL_CHAIN_LENGTH = maximum number of shares back client uses to compute payout
# REAL_CHAIN_LENGTH must always be <= CHAIN_LENGTH
# REAL_CHAIN_LENGTH must be changed in sync with all other clients
# changes can be done by changing one, then the other
nets = dict(
novacoin=math.Object(
PARENT=networks.nets['novacoin'],
SHARE_PERIOD=15, # seconds
CHAIN_LENGTH=12*60*60//10, # shares
REAL_CHAIN_LENGTH=12*60*60//10, # shares
TARGET_LOOKBEHIND=200, # shares
SPREAD=3, # blocks
IDENTIFIER='e037d5b8c6923610'.decode('hex'),
PREFIX='7208c1a53ef659b0'.decode('hex'),
P2P_PORT=39946,
MIN_TARGET=0,
MAX_TARGET=2**256//2**20 - 1,
PERSIST=True,
WORKER_PORT=8891,
BOOTSTRAP_ADDRS='37.57.95.59:8777 81.200.241.54:8777 82.200.205.39:8777 82.234.193.23:8777 85.198.114.251:8777 85.234.62.99:8777 89.239.190.22:8777 89.250.210.94:8777 94.198.0.39:8777 95.84.138.99:8777 109.238.244.73:8777 176.37.148.85:8777 178.19.249.43:8777 178.159.127.151:8777 188.130.184.1:8777 212.98.191.90:8777 212.113.35.38:8777'.split(' '),
ANNOUNCE_CHANNEL='#p2pool-alt',
VERSION_CHECK=lambda v: v >= 60011,
),
novacoin_testnet=math.Object(
PARENT=networks.nets['novacoin_testnet'],
SHARE_PERIOD=4, # seconds
CHAIN_LENGTH=20*60//3, # shares
REAL_CHAIN_LENGTH=20*60//3, # shares
TARGET_LOOKBEHIND=200, # shares
SPREAD=3, # blocks
IDENTIFIER='e037d5b8c7923110'.decode('hex'),
PREFIX='7208c1a54ef619b0'.decode('hex'),
P2P_PORT=18777,
MIN_TARGET=0,
MAX_TARGET=2**256//2**20 - 1,
PERSIST=False,
WORKER_PORT=18336,
BOOTSTRAP_ADDRS=''.split(' '),
ANNOUNCE_CHANNEL='#p2pool-alt',
VERSION_CHECK=lambda v: v >= 60011,
),
)
for net_name, net in nets.iteritems():
net.NAME = net_name
| gpl-3.0 | 8,468,677,192,613,597,000 | 40.510204 | 358 | 0.636676 | false |
bburan/psiexperiment | psi/controller/output.py | 1 | 9465 | import logging
log = logging.getLogger(__name__)
from types import GeneratorType
from functools import partial
import numpy as np
from atom.api import (Unicode, Enum, Event, Typed, Property, Float, Int, Bool,
List)
import enaml
from enaml.application import deferred_call
from enaml.core.api import Declarative, d_
from enaml.workbench.api import Extension
from ..util import coroutine, SignalBuffer
from .queue import AbstractSignalQueue
from psi.core.enaml.api import PSIContribution
import time
class Synchronized(PSIContribution):
outputs = Property()
engines = Property()
def _get_outputs(self):
return self.children
def _get_engines(self):
return set(o.engine for o in self.outputs)
class Output(PSIContribution):
name = d_(Unicode()).tag(metadata=True)
label = d_(Unicode()).tag(metadata=True)
target_name = d_(Unicode())
target = d_(Typed(Declarative).tag(metadata=True), writable=False)
channel = Property()
engine = Property()
# These two are defined as properties because it's theoretically possible
# for the output to transform these (e.g., an output could upsample
# children or "equalize" something before passing it along).
fs = Property().tag(metadata=True)
calibration = Property().tag(metadata=True)
filter_delay = Property().tag(metadata=True)
# TODO: clean this up. it's sort of hackish.
token = d_(Typed(Declarative))
# Can the user configure properties (such as the token) via the GUI?
configurable = d_(Bool(True))
callbacks = List()
def connect(self, cb):
self.callbacks.append(cb)
def notify(self, data):
# Correct for filter delay
d = data.copy()
d['t0'] += self.filter_delay
for cb in self.callbacks:
cb(d)
def _get_engine(self):
if self.channel is None:
return None
else:
return self.channel.engine
def _get_channel(self):
from .channel import Channel
target = self.target
while True:
if target is None:
return None
elif isinstance(target, Channel):
return target
else:
target = target.target
def _get_filter_delay(self):
return self.target.filter_delay
def _get_fs(self):
return self.channel.fs
def _get_calibration(self):
return self.channel.calibration
def is_ready(self):
raise NotImplementedError
class BufferedOutput(Output):
dtype = Unicode('double')
buffer_size = Property()
active = Bool(False)
source = Typed(object)
_buffer = Typed(SignalBuffer)
_offset = Int(0)
def _get_buffer_size(self):
return self.channel.buffer_size
def _default__buffer(self):
return SignalBuffer(self.fs, self.buffer_size, 0, self.dtype)
def get_samples(self, offset, samples, out):
lb = offset
ub = offset + samples
buffered_lb = self._buffer.get_samples_lb()
buffered_ub = self._buffer.get_samples_ub()
log.trace('Getting %d samples from %d to %d for %s', samples, lb, ub,
self.name)
log.trace('Buffer has %d to %d for %s', buffered_lb, buffered_ub,
self.name)
if lb > buffered_ub:
# This breaks an implicit software contract.
raise SystemError('Mismatch between offsets')
elif lb == buffered_ub:
log.trace('Generating new data')
pass
elif lb >= buffered_lb and ub <= buffered_ub:
log.trace('Extracting from buffer')
out[:] = self._buffer.get_range_samples(lb, ub)
samples = 0
offset = ub
elif lb >= buffered_lb and ub > buffered_ub:
log.trace('Extracting from buffer and generating new data')
b = self._buffer.get_range_samples(lb)
s = b.shape[-1]
out[:s] = b
samples -= s
offset += s
# Don't generate new samples if occuring before activation.
if (samples > 0) and (offset < self._offset):
s = min(self._offset-offset, samples)
data = np.zeros(s)
self._buffer.append_data(data)
if (samples == s):
out[-samples:] = data
else:
out[-samples:-samples+s] = data
samples -= s
offset += s
# Generate new samples
if samples > 0:
data = self.get_next_samples(samples)
self._buffer.append_data(data)
out[-samples:] = data
def get_next_samples(self, samples):
raise NotImplementedError
def activate(self, offset):
log.debug('Activating %s at %d', self.name, offset)
self.active = True
self._offset = offset
self._buffer.invalidate_samples(offset)
def deactivate(self, offset):
log.debug('Deactivating %s at %d', self.name, offset)
self.active = False
self.source = None
self._buffer.invalidate_samples(offset)
def is_ready(self):
return self.source is not None
def get_duration(self):
return self.source.get_duration()
class EpochOutput(BufferedOutput):
def get_next_samples(self, samples):
log.trace('Getting %d samples for %s', samples, self.name)
if self.active:
buffered_ub = self._buffer.get_samples_ub()
# Pad with zero
zero_padding = max(self._offset-buffered_ub, 0)
zero_padding = min(zero_padding, samples)
waveform_samples = samples - zero_padding
waveforms = []
if zero_padding:
w = np.zeros(zero_padding, dtype=self.dtype)
waveforms.append(w)
if waveform_samples:
w = self.source.next(waveform_samples)
waveforms.append(w)
if self.source.is_complete():
self.deactivate(self._buffer.get_samples_ub())
waveform = np.concatenate(waveforms, axis=-1)
else:
waveform = np.zeros(samples, dtype=self.dtype)
return waveform
class QueuedEpochOutput(BufferedOutput):
queue = d_(Typed(AbstractSignalQueue))
auto_decrement = d_(Bool(False))
complete_cb = Typed(object)
complete = d_(Event(), writable=False)
def _observe_queue(self, event):
self.source = self.queue
self._update_queue()
def _observe_target(self, event):
self._update_queue()
def _update_queue(self):
if self.queue is not None and self.target is not None:
self.queue.set_fs(self.fs)
self.queue.connect(self.notify)
def get_next_samples(self, samples):
if self.active:
waveform, empty = self.queue.pop_buffer(samples, self.auto_decrement)
if empty and self.complete_cb is not None:
self.complete = True
log.debug('Queue empty. Calling complete callback.')
deferred_call(self.complete_cb)
self.active = False
else:
waveform = np.zeros(samples, dtype=np.double)
return waveform
def add_setting(self, setting, averages=None, iti_duration=None):
with enaml.imports():
from .output_manifest import initialize_factory
# Make a copy to ensure that we don't accidentally modify in-place
context = setting.copy()
if averages is None:
averages = context.pop(f'{self.name}_averages')
if iti_duration is None:
iti_duration = context.pop(f'{self.name}_iti_duration')
# Somewhat surprisingly it appears to be faster to use factories in the
# queue rather than creating the waveforms for ABR tone pips, even for
# very short signal durations.
#context['fs'] = self.fs
#context['calibration'] = self.calibration
# I'm not in love with this since it requires hooking into the
# manifest system.
factory = initialize_factory(self, self.token, context)
duration = factory.get_duration()
self.queue.append(factory, averages, iti_duration, duration, setting)
def activate(self, offset):
log.debug('Activating output at %d', offset)
super().activate(offset)
self.queue.set_t0(offset/self.fs)
def get_duration(self):
# TODO: add a method to get actual duration from queue.
return np.inf
class SelectorQueuedEpochOutput(QueuedEpochOutput):
selector_name = d_(Unicode('default'))
class ContinuousOutput(BufferedOutput):
def get_next_samples(self, samples):
if self.active:
return self.source.next(samples)
else:
return np.zeros(samples, dtype=np.double)
class DigitalOutput(Output):
pass
class Trigger(DigitalOutput):
duration = d_(Float(0.1))
def fire(self):
if self.engine.configured:
self.engine.fire_sw_do(self.channel.name, duration=self.duration)
class Toggle(DigitalOutput):
state = Bool(False)
def _observe_state(self, event):
if self.engine is not None and self.engine.configured:
self.engine.set_sw_do(self.channel.name, event['value'])
def set_high(self):
self.state = True
def set_low(self):
self.state = False
| mit | 1,338,692,515,982,732,500 | 28.670846 | 81 | 0.604015 | false |
elemel/torn | lib/torn/ik.py | 1 | 1441 | from euclid import *
from math import *
__all__ = ['solve']
def solve(vertices, target):
if len(vertices) == 2:
return solve_one_edge(vertices, target)
elif len(vertices) == 3:
return solve_two_edges(vertices, target)
else:
return vertices
def solve_one_edge(vertices, target):
v1, v2 = vertices
if v1 == target:
return vertices
return v1, v1 + abs(v2 - v1) * (target - v1).normalize()
def solve_two_edges(vertices, target):
v1, v2, v3 = vertices
u = target - v1
d = abs(u)
u1 = v2 - v1
u2 = v3 - v2
d1 = abs(u1)
d2 = abs(u2)
if d == 0:
v3 = v2 - (d2 / d1) * u1
elif d >= d1 + d2:
v2 = v1 + (d1 / d) * u
v3 = v2 + (d2 / d) * u
elif d <= d1 - d2:
v2 = v1 + (d1 / d) * u
v3 = v2 - (d2 / d) * u
elif d <= d2 - d1:
v2 = v1 - (d1 / d) * u
v3 = v2 + (d2 / d) * u
else:
# Closed form solution 2 from "Oh My God, I Inverted Kine!" by
# Jeff Lander.
#
# http://www.darwin3d.com/gamedev/articles/col0998.pdf
a1 = atan2(u.y, u.x)
a2 = acos((u.x ** 2 + u.y ** 2 + d1 ** 2 -
d2 ** 2) / (2 * d1 * d))
# Maintain winding.
if u1.x * u2.y - u2.x * u1.y < 0:
a = a1 + a2
else:
a = a1 - a2
v2 = v1 + d1 * Vector2(cos(a), sin(a))
v3 = target
return v1, v2, v3
| mit | -3,554,647,439,984,454,700 | 24.732143 | 70 | 0.461485 | false |
montylounge/django-sugar | sugar/templatetags/media.py | 1 | 1435 | from django.template import Library, Node
from django.template.loader import render_to_string
from django.contrib.sites.models import Site
from django.conf import settings
import os, urlparse
register = Library()
def _absolute_url(url):
if url.startswith('http://') or url.startswith('https://'):
return url
domain = Site.objects.get_current().domain
return 'http://%s%s' % (domain, url)
@register.simple_tag
def media(filename, flags=''):
"""
Autor: http://softwaremaniacs.org/blog/2009/03/22/media-tag/
{% load media %}
<link rel="stylesheet" href="{% media "css/style.css" %}">
{% media "css/style.css" %} <!-- ...style.css?123456789 -->
{% media "css/style.css" "no-timestamp" %} <!-- ...style.css -->
{% media "images/edit.png" "timestamp" %} <!-- ...edit.png?123456789 -->
{% media "images/edit.png" "absolute" %} <!-- http://example.com/media/edit.png -->
"""
flags = set(f.strip() for f in flags.split(','))
url = urlparse.urljoin(settings.MEDIA_URL, filename)
if 'absolute' in flags:
url = _absolute_url(url)
if (filename.endswith('.css') or filename.endswith('.js')) and 'no-timestamp' not in flags or \
'timestamp' in flags:
fullname = os.path.join(settings.MEDIA_ROOT, filename)
if os.path.exists(fullname):
url += '?%d' % os.path.getmtime(fullname)
return url | bsd-3-clause | 6,945,768,594,678,225,000 | 37.810811 | 99 | 0.611847 | false |
google/mirandum | alerts/main/migrations/0015_auto_20160606_0610.py | 1 | 4393 | # -*- coding: utf-8 -*-
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0014_session'),
]
operations = [
migrations.AddField(
model_name='alertconfig',
name='animation_in',
field=models.CharField(default=b'fadeIn', max_length=100, null=True, blank=True, choices=[(b'fadeIn', b'Fade In'), (b'fadeInDown', b'Fade In Down'), (b'fadeInDownBig', b'Fade In Down Big'), (b'fadeInLeft', b'Fade In Left'), (b'fadeInLeftBig', b'Fade In Left Big'), (b'fadeInRight', b'Fade In Right'), (b'fadeInRightBig', b'Fade In'), (b'fadeInUp', b'Fade In Up'), (b'fadeInUpBig', b'Fade In Up Big'), (b'zoomIn', b'Zoom In'), (b'zoomInDown', b'Zoom In Down'), (b'zoomInLeft', b'Zoom In Left'), (b'zoomInRight', b'Zoom In Right'), (b'zoomInUp', b'Zoom In Up'), (b'bounceIn', b'Bounce In'), (b'bounceInDown', b'Bounce In Down'), (b'bounceInLeft', b'Bounce In Left'), (b'bounceInRight', b'Bounce In Right'), (b'bounceInUp', b'Bounce In Up'), (b'slideInUp', b'Slide In Up'), (b'slideInDown', b'Slide In Down'), (b'slideInLeft', b'Slide In Left'), (b'slideInRight', b'Slide In Right')]),
preserve_default=True,
),
migrations.AddField(
model_name='alertconfig',
name='animation_out',
field=models.CharField(default=b'fadeOut', max_length=100, null=True, blank=True, choices=[(b'fadeOut', b'Fade Out'), (b'fadeOutDown', b'Fade Out Down'), (b'fadeOutDownBig', b'Fade Out Down Big'), (b'fadeOutLeft', b'Fade Out Left'), (b'fadeOutLeftBig', b'Fade Out Left Big'), (b'fadeOutRight', b'Fade Out Right'), (b'fadeOutRightBig', b'Fade Out'), (b'fadeOutUp', b'Fade Out Up'), (b'fadeOutUpBig', b'Fade Out Up Big'), (b'zoomOut', b'Zoom Out'), (b'zoomOutDown', b'Zoom Out Down'), (b'zoomOutLeft', b'Zoom Out Left'), (b'zoomOutRight', b'Zoom Out Right'), (b'zoomOutUp', b'Zoom Out Up'), (b'bounceOut', b'Bounce Out'), (b'bounceOutDown', b'Bounce Out Down'), (b'bounceOutLeft', b'Bounce Out Left'), (b'bounceOutRight', b'Bounce Out Right'), (b'bounceOutUp', b'Bounce Out Up'), (b'slideOutUp', b'Slide Out Up'), (b'slideOutDown', b'Slide Out Down'), (b'slideOutLeft', b'Slide Out Left'), (b'slideOutRight', b'Slide Out Right')]),
preserve_default=True,
),
migrations.AddField(
model_name='alertconfig',
name='font_effect',
field=models.CharField(default=b'shadow', max_length=100, null=True, blank=True, choices=[(b'shadow', b'Normal Shadow'), (b'anaglyph', b'Anaglyph'), (b'brick-sign', b'Brick Sign'), (b'canvas-print', b'Canvas Print'), (b'crackle', b'Crackle'), (b'decaying', b'Decaying'), (b'destruction', b'Destruction'), (b'distressed', b'Distressed'), (b'distressed-wood', b'Distressed Wood'), (b'emboss', b'Emboss'), (b'fire', b'Fire'), (b'fire-animation', b'Fire Animation'), (b'fragile', b'Fragile'), (b'grass', b'Grass'), (b'ice', b'Ice'), (b'mitosis', b'Mitosis'), (b'neon', b'Neon'), (b'outline', b'Outline'), (b'putting-green', b'Putting Green'), (b'scuffed-steel', b'Scuffed Steel'), (b'splintered', b'Splintered'), (b'static', b'Static'), (b'stonewash', b'Stonewash'), (b'3d', b'3d'), (b'3d-float', b'3d Float'), (b'vintage', b'Vintage'), (b'wallpaper', b'Wallpaper')]),
preserve_default=True,
),
migrations.AddField(
model_name='alertconfig',
name='layout',
field=models.CharField(blank=True, max_length=100, null=True, help_text=b'Alert layout (only available with v2 AlertBox)', choices=[(b'vertical', b'Image above text'), (b'side', b'Image next to text'), (b'above', b'Text on top of image')]),
preserve_default=True,
),
]
| apache-2.0 | 7,792,831,731,491,586,000 | 85.137255 | 941 | 0.644434 | false |
FordyceLab/AcqPack | acqpack/fractioncollector.py | 1 | 3626 | import numpy as np
import pandas as pd
import utils as ut
class FractionCollector:
"""
A high-level wrapper around an XY stage.
"""
def __init__(self, xy):
self.frames = pd.DataFrame(index=['trans', 'position_table'])
self.add_frame('hardware')
self.XY = xy
def add_frame(self, name, trans=np.eye(3,3), position_table=None):
"""
Adds coordinate frame. Frame requires affine transform to hardware coordinates; position_table optional.
:param name: (str) the name to be given to the frame (e.g. hardware)
:param trans: (np.ndarray <- str) xyw affine transform matrix; if string, tries to load delimited file
:param position_table: (None | pd.DataFrame <- str) position_table; if string, tries to load delimited file
"""
if isinstance(trans, str):
trans = ut.read_delim_pd(trans).select_dtypes(['number']).values
if isinstance(position_table, str):
position_table = ut.read_delim_pd(position_table)
assert(isinstance(trans, np.ndarray)) # trans: numpy array of shape (3,3)
assert(trans.shape==(3,3)) # check size
assert(np.array_equal(np.linalg.norm(trans[:-1,:-1]),
np.linalg.norm(np.eye(2,2)))) # Frob norm rotation invariant (no scaling)
assert(trans[-1,-1] != 0) # cannot be singular matrix
# position_table: DataFrame with x,y OR None
if isinstance(position_table, pd.DataFrame):
assert(set(list('xy')).issubset(position_table.columns)) # contains 'x','y' columns
else:
assert(position_table is None)
self.frames[name] = None
self.frames[name].trans = trans
self.frames[name].position_table = position_table
def where(self, frame=None):
"""
Retrieves current hardware (x,y). If frame is specified, transforms hardware coordinates into
frame's coordinates.
:param frame: (str) name of frame to specify transform (optional)
:return: (tup) current position
"""
where = self.XY.where_xy()
if frame is not None:
where += (1,)
x, y, _ = tuple(np.dot(where, np.linalg.inv(self.frames[frame].trans)))
where = x, y
return where
def home(self):
"""
Homes XY axes.
"""
self.XY.home_xy()
# TODO: if no columns specified, transform provided XY to hardware coordinates.
# TODO: default frame?
def goto(self, frame, lookup_columns, lookup_values):
"""
Finds lookup_values in lookup_columns of frame's position_list; retrieves corresponding X,Y
Transforms X,Y to hardware X,Y by frame's transform.
Moves to hardware X,Y.
:param frame: (str) frame that specifies position_list and transform
:param lookup_columns: (str | list) column(s) to search in position_table
:param lookup_values: (val | list) values(s) to find in lookup_columns
"""
trans, position_table = self.frames[frame]
if lookup_columns=='xy':
lookup_values = tuple(lookup_values) + (1,)
xh, yh = np.dot(lookup_values, trans)
else:
xy = tuple(ut.lookup(position_table, lookup_columns, lookup_values)[['x', 'y']].iloc[0])
xyw = xy + (1,) # concatenate for translation
xh, yh, _ = np.dot(xyw, trans) # get hardware coordinates
self.XY.goto_xy(xh, yh)
def exit(self):
"""
Send exit command to XY.
"""
self.XY.exit()
| mit | 6,732,813,930,604,466,000 | 37.168421 | 115 | 0.593767 | false |
juju-solutions/charms.reactive | charms/reactive/relations.py | 1 | 33565 | # Copyright 2014-2017 Canonical Limited.
#
# This file is part of charms.reactive
#
# charms.reactive is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charms.reactive is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import importlib
from inspect import isclass
from charmhelpers.core import hookenv
from charmhelpers.core import unitdata
from charmhelpers.cli import cmdline
from charms.reactive.flags import get_flags
from charms.reactive.flags import _get_flag_value
from charms.reactive.flags import set_flag
from charms.reactive.flags import clear_flag
from charms.reactive.flags import StateList
from charms.reactive.bus import _append_path
try:
from importlib.metadata import entry_points
except ImportError:
from pkg_resources import iter_entry_points
def entry_points():
group = 'charms.reactive.relation_factory'
return {group: list(iter_entry_points(group))}
__all__ = [
'endpoint_from_name',
'endpoint_from_flag',
'relation_from_flag', # DEPRECATED
'scopes', # DEPRECATED
'RelationBase', # DEPRECATED
'relation_from_state', # DEPRECATED
]
# arbitrary obj instances to use as defaults instead of None
ALL = object()
TOGGLE = object()
def endpoint_from_name(endpoint_name):
"""The object used for interacting with the named relations, or None.
"""
if endpoint_name is None:
return None
factory = relation_factory(endpoint_name)
if factory:
return factory.from_name(endpoint_name)
def relation_from_name(relation_name):
"""
.. deprecated:: 0.6.0
Alias for :func:`endpoint_from_name`
"""
return endpoint_from_name(relation_name)
def endpoint_from_flag(flag):
"""The object used for interacting with relations tied to a flag, or None.
"""
relation_name = None
value = _get_flag_value(flag)
if isinstance(value, dict) and 'relation' in value:
# old-style RelationBase
relation_name = value['relation']
elif flag.startswith('endpoint.'):
# new-style Endpoint
relation_name = flag.split('.')[1]
elif '.' in flag:
# might be an unprefixed new-style Endpoint
relation_name = flag.split('.')[0]
if relation_name not in hookenv.relation_types():
return None
if relation_name:
factory = relation_factory(relation_name)
if factory:
return factory.from_flag(flag)
return None
def relation_from_flag(flag):
"""
.. deprecated:: 0.6.0
Alias for :func:`endpoint_from_flag`
"""
return endpoint_from_flag(flag)
def relation_from_state(state):
"""
.. deprecated:: 0.5.0
Alias for :func:`endpoint_from_flag`
"""
return endpoint_from_flag(state)
class RelationFactory(object):
"""Produce objects for interacting with a relation.
Interfaces choose which RelationFactory is used for their relations
by adding a RelationFactory subclass to
``$CHARM_DIR/hooks/relations/{interface}/{provides,requires,peer}.py``.
This is normally a RelationBase subclass.
"""
_factories = []
@classmethod
def discover(cls):
for ep in entry_points().get('charms.reactive.relation_factory', []):
factory = ep.load()
factory.load()
RelationFactory._factories.append(factory)
@classmethod
def get_factory(cls, relation_name):
for factory in RelationFactory._factories:
if factory.from_name(relation_name):
return factory
return None
@classmethod
def from_name(cls, relation_name):
raise NotImplementedError()
@classmethod
def from_flag(cls, state):
raise NotImplementedError()
def relation_factory(relation_name):
"""Get the RelationFactory for the given relation name.
Looks for a RelationFactory in the first file matching:
``$CHARM_DIR/hooks/relations/{interface}/{provides,requires,peer}.py``
"""
factory = RelationFactory.get_factory(relation_name)
if factory:
return factory
role, interface = hookenv.relation_to_role_and_interface(relation_name)
if not (role and interface):
hookenv.log('Unable to determine role and interface for relation '
'{}'.format(relation_name), hookenv.ERROR)
return None
return _find_relation_factory(_relation_module(role, interface))
def _relation_module(role, interface):
"""
Return module for relation based on its role and interface, or None.
Prefers new location (reactive/relations) over old (hooks/relations).
"""
_append_path(hookenv.charm_dir())
_append_path(os.path.join(hookenv.charm_dir(), 'hooks'))
base_module = 'relations.{}.{}'.format(interface, role)
for module in ('reactive.{}'.format(base_module), base_module):
if module in sys.modules:
break
try:
importlib.import_module(module)
break
except ImportError:
continue
else:
hookenv.log('Unable to find implementation for relation: '
'{} of {}'.format(role, interface), hookenv.ERROR)
return None
return sys.modules[module]
def _find_relation_factory(module):
"""
Attempt to find a RelationFactory subclass in the module.
Note: RelationFactory and RelationBase are ignored so they may
be imported to be used as base classes without fear.
"""
if not module:
return None
# All the RelationFactory subclasses
candidates = [o for o in (getattr(module, attr) for attr in dir(module))
if (o is not RelationFactory and
o is not RelationBase and
isclass(o) and
issubclass(o, RelationFactory))]
# Filter out any factories that are superclasses of another factory
# (none of the other factories subclass it). This usually makes
# the explict check for RelationBase and RelationFactory unnecessary.
candidates = [c1 for c1 in candidates
if not any(issubclass(c2, c1) for c2 in candidates
if c1 is not c2)]
if not candidates:
hookenv.log('No RelationFactory found in {}'.format(module.__name__),
hookenv.WARNING)
return None
if len(candidates) > 1:
raise RuntimeError('Too many RelationFactory found in {}'
''.format(module.__name__))
return candidates[0]
class scopes(object):
"""
These are the recommended scope values for relation implementations.
To use, simply set the ``scope`` class variable to one of these::
class MyRelationClient(RelationBase):
scope = scopes.SERVICE
"""
GLOBAL = 'global'
"""
All connected services and units for this relation will share a single
conversation. The same data will be broadcast to every remote unit, and
retrieved data will be aggregated across all remote units and is expected
to either eventually agree or be set by a single leader.
"""
SERVICE = 'service'
"""
Each connected service for this relation will have its own conversation.
The same data will be broadcast to every unit of each service's conversation,
and data from all units of each service will be aggregated and is expected
to either eventually agree or be set by a single leader.
"""
UNIT = 'unit'
"""
Each connected unit for this relation will have its own conversation. This
is the default scope. Each unit's data will be retrieved individually, but
note that due to how Juju works, the same data is still broadcast to all
units of a single service.
"""
class AutoAccessors(type):
"""
Metaclass that converts fields referenced by ``auto_accessors`` into
accessor methods with very basic doc strings.
"""
def __new__(cls, name, parents, dct):
for field in dct.get('auto_accessors', []):
meth_name = field.replace('-', '_')
meth = cls._accessor(field)
meth.__name__ = meth_name
meth.__module__ = dct.get('__module__')
meth.__doc__ = 'Get the %s, if available, or None.' % field
dct[meth_name] = meth
return super(AutoAccessors, cls).__new__(cls, name, parents, dct)
@staticmethod
def _accessor(field):
def __accessor(self):
return self.get_remote(field)
return __accessor
class RelationBase(RelationFactory, metaclass=AutoAccessors):
"""
A base class for relation implementations.
"""
_cache = {}
scope = scopes.UNIT
"""
Conversation scope for this relation.
The conversation scope controls how communication with connected units
is aggregated into related :class:`Conversations <Conversation>`, and
can be any of the predefined :class:`scopes`, or any arbitrary string.
Connected units which share the same scope will be considered part of
the same conversation. Data sent to a conversation is sent to all units
that are a part of that conversation, and units that are part of a
conversation are expected to agree on the data that they send, whether
via eventual consistency or by having a single leader set the data.
The default scope is :attr:`scopes.UNIT`.
"""
class states(StateList):
"""
This is the set of :class:`States <charms.reactive.bus.State>` that this
relation could set.
This should be defined by the relation subclass to ensure that
states are consistent and documented, as well as being discoverable
and introspectable by linting and composition tools.
For example::
class MyRelationClient(RelationBase):
scope = scopes.GLOBAL
auto_accessors = ['host', 'port']
class states(StateList):
connected = State('{relation_name}.connected')
available = State('{relation_name}.available')
@hook('{requires:my-interface}-relation-{joined,changed}')
def changed(self):
self.set_state(self.states.connected)
if self.host() and self.port():
self.set_state(self.states.available)
"""
pass
auto_accessors = []
"""
Remote field names to be automatically converted into accessors with
basic documentation.
These accessors will just call :meth:`get_remote` using the
:meth:`default conversation <conversation>`. Note that it is highly
recommended that this be used only with :attr:`scopes.GLOBAL` scope.
"""
@classmethod
def _startup(cls):
# update data to be backwards compatible after fix for issue 28
_migrate_conversations()
if hookenv.hook_name().endswith('-relation-departed'):
def depart_conv():
cls(hookenv.relation_type()).conversation().depart()
hookenv.atexit(depart_conv)
def __init__(self, relation_name, conversations=None):
self._relation_name = relation_name
self._conversations = conversations or [Conversation.join(self.scope)]
@property
def relation_name(self):
"""
Name of the relation this instance is handling.
"""
return self._relation_name
@classmethod
def from_state(cls, state):
"""
.. deprecated:: 0.6.1
use :func:`endpoint_from_flag` instead
"""
return cls.from_flag(state)
@classmethod
def from_flag(cls, flag):
"""
Find relation implementation in the current charm, based on the
name of an active flag.
You should not use this method directly.
Use :func:`endpoint_from_flag` instead.
"""
value = _get_flag_value(flag)
if value is None:
return None
relation_name = value['relation']
conversations = Conversation.load(value['conversations'])
return cls.from_name(relation_name, conversations)
@classmethod
def from_name(cls, relation_name, conversations=None):
"""
Find relation implementation in the current charm, based on the
name of the relation.
:return: A Relation instance, or None
"""
if relation_name is None:
return None
relation_class = cls._cache.get(relation_name)
if relation_class:
return relation_class(relation_name, conversations)
role, interface = hookenv.relation_to_role_and_interface(relation_name)
if role and interface:
relation_class = cls._find_impl(role, interface)
if relation_class:
cls._cache[relation_name] = relation_class
return relation_class(relation_name, conversations)
return None
@classmethod
def _find_impl(cls, role, interface):
"""
Find relation implementation based on its role and interface.
"""
module = _relation_module(role, interface)
if not module:
return None
return cls._find_subclass(module)
@classmethod
def _find_subclass(cls, module):
"""
Attempt to find subclass of :class:`RelationBase` in the given module.
Note: This means strictly subclasses and not :class:`RelationBase` itself.
This is to prevent picking up :class:`RelationBase` being imported to be
used as the base class.
"""
for attr in dir(module):
candidate = getattr(module, attr)
if (isclass(candidate) and issubclass(candidate, cls) and
candidate is not RelationBase):
return candidate
return None
def conversations(self):
"""
Return a list of the conversations that this relation is currently handling.
Note that "currently handling" means for the current state or hook context,
and not all conversations that might be active for this relation for other
states.
"""
return list(self._conversations)
def conversation(self, scope=None):
"""
Get a single conversation, by scope, that this relation is currently handling.
If the scope is not given, the correct scope is inferred by the current
hook execution context. If there is no current hook execution context, it
is assume that there is only a single global conversation scope for this
relation. If this relation's scope is not global and there is no current
hook execution context, then an error is raised.
"""
if scope is None:
if self.scope is scopes.UNIT:
scope = hookenv.remote_unit()
elif self.scope is scopes.SERVICE:
scope = hookenv.remote_service_name()
else:
scope = self.scope
if scope is None:
raise ValueError('Unable to determine default scope: no current hook or global scope')
for conversation in self._conversations:
if conversation.scope == scope:
return conversation
else:
raise ValueError("Conversation with scope '%s' not found" % scope)
def set_state(self, state, scope=None):
"""
Set the state for the :class:`Conversation` with the given scope.
In Python, this is equivalent to::
relation.conversation(scope).set_state(state)
See :meth:`conversation` and :meth:`Conversation.set_state`.
"""
self.conversation(scope).set_state(state)
set_flag = set_state
def remove_state(self, state, scope=None):
"""
Remove the state for the :class:`Conversation` with the given scope.
In Python, this is equivalent to::
relation.conversation(scope).remove_state(state)
See :meth:`conversation` and :meth:`Conversation.remove_state`.
"""
self.conversation(scope).remove_state(state)
remove_flag = remove_state
def is_state(self, state, scope=None):
"""
Test the state for the :class:`Conversation` with the given scope.
In Python, this is equivalent to::
relation.conversation(scope).is_state(state)
See :meth:`conversation` and :meth:`Conversation.is_state`.
"""
return self.conversation(scope).is_state(state)
is_flag_set = is_state
def toggle_state(self, state, active=TOGGLE, scope=None):
"""
Toggle the state for the :class:`Conversation` with the given scope.
In Python, this is equivalent to::
relation.conversation(scope).toggle_state(state, active)
See :meth:`conversation` and :meth:`Conversation.toggle_state`.
"""
self.conversation(scope).toggle_state(state, active)
toggle_flag = toggle_state
def set_remote(self, key=None, value=None, data=None, scope=None, **kwdata):
"""
Set data for the remote end(s) of the :class:`Conversation` with the given scope.
In Python, this is equivalent to::
relation.conversation(scope).set_remote(key, value, data, scope, **kwdata)
See :meth:`conversation` and :meth:`Conversation.set_remote`.
"""
self.conversation(scope).set_remote(key, value, data, **kwdata)
def get_remote(self, key, default=None, scope=None):
"""
Get data from the remote end(s) of the :class:`Conversation` with the given scope.
In Python, this is equivalent to::
relation.conversation(scope).get_remote(key, default)
See :meth:`conversation` and :meth:`Conversation.get_remote`.
"""
return self.conversation(scope).get_remote(key, default)
def set_local(self, key=None, value=None, data=None, scope=None, **kwdata):
"""
Locally store some data, namespaced by the current or given :class:`Conversation` scope.
In Python, this is equivalent to::
relation.conversation(scope).set_local(data, scope, **kwdata)
See :meth:`conversation` and :meth:`Conversation.set_local`.
"""
self.conversation(scope).set_local(key, value, data, **kwdata)
def get_local(self, key, default=None, scope=None):
"""
Retrieve some data previously set via :meth:`set_local`.
In Python, this is equivalent to::
relation.conversation(scope).get_local(key, default)
See :meth:`conversation` and :meth:`Conversation.get_local`.
"""
return self.conversation(scope).get_local(key, default)
class Conversation(object):
"""
Converations are the persistent, evolving, two-way communication between
this service and one or more remote services.
Conversations are not limited to a single Juju hook context. They represent
the entire set of interactions between the end-points from the time the
relation is joined until it is departed.
Conversations evolve over time, moving from one semantic state to the next
as the communication progresses.
Conversations may encompass multiple remote services or units. While a
database client would connect to only a single database, that database will
likely serve several other services. On the other hand, while the database
is only concerned about providing a database to each service as a whole, a
load-balancing proxy must consider each unit of each service individually.
Conversations use the idea of :class:`scope` to determine how units and
services are grouped together.
"""
def __init__(self, namespace, units, scope):
self.namespace = namespace
self.units = set(units)
self.scope = scope
@classmethod
def _key(cls, namespace, scope):
return 'reactive.conversations.%s.%s' % (namespace, scope)
@property
def key(self):
"""
The key under which this conversation will be stored.
"""
return self._key(self.namespace, self.scope)
@property
def relation_name(self):
return self.namespace.split(':')[0]
@property
def relation_ids(self):
"""
The set of IDs of the specific relation instances that this conversation
is communicating with.
"""
if self.scope == scopes.GLOBAL:
# the namespace is the relation name and this conv speaks for all
# connected instances of that relation
return hookenv.relation_ids(self.namespace)
else:
# the namespace is the relation ID
return [self.namespace]
@classmethod
def join(cls, scope):
"""
Get or create a conversation for the given scope and active hook context.
The current remote unit for the active hook context will be added to
the conversation.
Note: This uses :mod:`charmhelpers.core.unitdata` and requires that
:meth:`~charmhelpers.core.unitdata.Storage.flush` be called.
"""
relation_name = hookenv.relation_type()
relation_id = hookenv.relation_id()
unit = hookenv.remote_unit()
service = hookenv.remote_service_name()
if scope is scopes.UNIT:
scope = unit
namespace = relation_id
elif scope is scopes.SERVICE:
scope = service
namespace = relation_id
else:
namespace = relation_name
key = cls._key(namespace, scope)
data = unitdata.kv().get(key, {'namespace': namespace, 'scope': scope, 'units': []})
conversation = cls.deserialize(data)
conversation.units.add(unit)
unitdata.kv().set(key, cls.serialize(conversation))
return conversation
def depart(self):
"""
Remove the current remote unit, for the active hook context, from
this conversation. This should be called from a `-departed` hook.
"""
unit = hookenv.remote_unit()
self.units.remove(unit)
if self.units:
unitdata.kv().set(self.key, self.serialize(self))
else:
unitdata.kv().unset(self.key)
@classmethod
def deserialize(cls, conversation):
"""
Deserialize a :meth:`serialized <serialize>` conversation.
"""
return cls(**conversation)
@classmethod
def serialize(cls, conversation):
"""
Serialize a conversation instance for storage.
"""
return {
'namespace': conversation.namespace,
'units': sorted(conversation.units),
'scope': conversation.scope,
}
@classmethod
def load(cls, keys):
"""
Load a set of conversations by their keys.
"""
conversations = []
for key in keys:
conversation = unitdata.kv().get(key)
if conversation:
conversations.append(cls.deserialize(conversation))
return conversations
def set_state(self, state):
"""
Activate and put this conversation into the given state.
The relation name will be interpolated in the state name, and it is
recommended that it be included to avoid conflicts with states from
other relations. For example::
conversation.set_state('{relation_name}.state')
If called from a converation handling the relation "foo", this will
activate the "foo.state" state, and will add this conversation to
that state.
Note: This uses :mod:`charmhelpers.core.unitdata` and requires that
:meth:`~charmhelpers.core.unitdata.Storage.flush` be called.
"""
state = state.format(relation_name=self.relation_name)
value = _get_flag_value(state, {
'relation': self.relation_name,
'conversations': [],
})
if self.key not in value['conversations']:
value['conversations'].append(self.key)
set_flag(state, value)
set_flag = set_state
def remove_state(self, state):
"""
Remove this conversation from the given state, and potentially
deactivate the state if no more conversations are in it.
The relation name will be interpolated in the state name, and it is
recommended that it be included to avoid conflicts with states from
other relations. For example::
conversation.remove_state('{relation_name}.state')
If called from a converation handling the relation "foo", this will
remove the conversation from the "foo.state" state, and, if no more
conversations are in this the state, will deactivate it.
"""
state = state.format(relation_name=self.relation_name)
value = _get_flag_value(state)
if not value:
return
if self.key in value['conversations']:
value['conversations'].remove(self.key)
if value['conversations']:
set_flag(state, value)
else:
clear_flag(state)
remove_flag = remove_state
def is_state(self, state):
"""
Test if this conversation is in the given state.
"""
state = state.format(relation_name=self.relation_name)
value = _get_flag_value(state)
if not value:
return False
return self.key in value['conversations']
is_flag_set = is_state
def toggle_state(self, state, active=TOGGLE):
"""
Toggle the given state for this conversation.
The state will be set ``active`` is ``True``, otherwise the state will be removed.
If ``active`` is not given, it will default to the inverse of the current state
(i.e., ``False`` if the state is currently set, ``True`` if it is not; essentially
toggling the state).
For example::
conv.toggle_state('{relation_name}.foo', value=='foo')
This will set the state if ``value`` is equal to ``foo``.
"""
if active is TOGGLE:
active = not self.is_state(state)
if active:
self.set_state(state)
else:
self.remove_state(state)
toggle_flag = toggle_state
def set_remote(self, key=None, value=None, data=None, **kwdata):
"""
Set data for the remote end(s) of this conversation.
Data can be passed in either as a single dict, or as key-word args.
Note that, in Juju, setting relation data is inherently service scoped.
That is, if the conversation only includes a single unit, the data will
still be set for that unit's entire service.
However, if this conversation's scope encompasses multiple services,
the data will be set for all of those services.
:param str key: The name of a field to set.
:param value: A value to set. This value must be json serializable.
:param dict data: A mapping of keys to values.
:param **kwdata: A mapping of keys to values, as keyword arguments.
"""
if data is None:
data = {}
if key is not None:
data[key] = value
data.update(kwdata)
if not data:
return
for relation_id in self.relation_ids:
hookenv.relation_set(relation_id, data)
def get_remote(self, key, default=None):
"""
Get a value from the remote end(s) of this conversation.
Note that if a conversation's scope encompasses multiple units, then
those units are expected to agree on their data, whether that is through
relying on a single leader to set the data or by all units eventually
converging to identical data. Thus, this method returns the first
value that it finds set by any of its units.
"""
cur_rid = hookenv.relation_id()
departing = hookenv.hook_name().endswith('-relation-departed')
for relation_id in self.relation_ids:
units = hookenv.related_units(relation_id)
if departing and cur_rid == relation_id:
# Work around the fact that Juju 2.0 doesn't include the
# departing unit in relation-list during the -departed hook,
# by adding it back in ourselves.
units.append(hookenv.remote_unit())
for unit in units:
if unit not in self.units:
continue
value = hookenv.relation_get(key, unit, relation_id)
if value:
return value
return default
def set_local(self, key=None, value=None, data=None, **kwdata):
"""
Locally store some data associated with this conversation.
Data can be passed in either as a single dict, or as key-word args.
For example, if you need to store the previous value of a remote field
to determine if it has changed, you can use the following::
prev = conversation.get_local('field')
curr = conversation.get_remote('field')
if prev != curr:
handle_change(prev, curr)
conversation.set_local('field', curr)
Note: This uses :mod:`charmhelpers.core.unitdata` and requires that
:meth:`~charmhelpers.core.unitdata.Storage.flush` be called.
:param str key: The name of a field to set.
:param value: A value to set. This value must be json serializable.
:param dict data: A mapping of keys to values.
:param **kwdata: A mapping of keys to values, as keyword arguments.
"""
if data is None:
data = {}
if key is not None:
data[key] = value
data.update(kwdata)
if not data:
return
unitdata.kv().update(data, prefix='%s.%s.' % (self.key, 'local-data'))
def get_local(self, key, default=None):
"""
Retrieve some data previously set via :meth:`set_local` for this conversation.
"""
key = '%s.%s.%s' % (self.key, 'local-data', key)
return unitdata.kv().get(key, default)
def _migrate_conversations(): # noqa
"""
Due to issue #28 (https://github.com/juju-solutions/charms.reactive/issues/28),
conversations needed to be updated to be namespaced per relation ID for SERVICE
and UNIT scope. To ensure backwards compatibility, this updates all convs in
the old format to the new.
TODO: Remove in 2.0.0
"""
for key, data in unitdata.kv().getrange('reactive.conversations.').items():
if 'local-data' in key:
continue
if 'namespace' in data:
continue
relation_name = data.pop('relation_name')
if data['scope'] == scopes.GLOBAL:
data['namespace'] = relation_name
unitdata.kv().set(key, data)
else:
# split the conv based on the relation ID
new_keys = []
for rel_id in hookenv.relation_ids(relation_name):
new_key = Conversation._key(rel_id, data['scope'])
new_units = set(hookenv.related_units(rel_id)) & set(data['units'])
if new_units:
unitdata.kv().set(new_key, {
'namespace': rel_id,
'scope': data['scope'],
'units': sorted(new_units),
})
new_keys.append(new_key)
unitdata.kv().unset(key)
# update the states pointing to the old conv key to point to the
# (potentially multiple) new key(s)
for flag in get_flags():
value = _get_flag_value(flag)
if not value:
continue
if key not in value['conversations']:
continue
value['conversations'].remove(key)
value['conversations'].extend(new_keys)
set_flag(flag, value)
@cmdline.subcommand()
def relation_call(method, relation_name=None, flag=None, state=None, *args):
"""Invoke a method on the class implementing a relation via the CLI"""
if relation_name:
relation = relation_from_name(relation_name)
if relation is None:
raise ValueError('Relation not found: %s' % relation_name)
elif flag or state:
relation = relation_from_flag(flag or state)
if relation is None:
raise ValueError('Relation not found: %s' % (flag or state))
else:
raise ValueError('Must specify either relation_name or flag')
result = getattr(relation, method)(*args)
if isinstance(relation, RelationBase) and method == 'conversations':
# special case for conversations to make them work from CLI
result = [c.scope for c in result]
return result
hookenv.atstart(RelationBase._startup)
RelationFactory.discover()
| apache-2.0 | -2,251,918,463,404,787,500 | 34.556144 | 98 | 0.623834 | false |
robertnishihara/ray | python/ray/signature.py | 1 | 5787 | import inspect
from inspect import Parameter
import logging
from ray.utils import is_cython
# Logger for this module. It should be configured at the entry point
# into the program using Ray. Ray provides a default configuration at
# entry/init points.
logger = logging.getLogger(__name__)
# This dummy type is also defined in ArgumentsBuilder.java. Please keep it
# synced.
DUMMY_TYPE = b"__RAY_DUMMY__"
def get_signature(func):
"""Get signature parameters.
Support Cython functions by grabbing relevant attributes from the Cython
function and attaching to a no-op function. This is somewhat brittle, since
inspect may change, but given that inspect is written to a PEP, we hope
it is relatively stable. Future versions of Python may allow overloading
the inspect 'isfunction' and 'ismethod' functions / create ABC for Python
functions. Until then, it appears that Cython won't do anything about
compatability with the inspect module.
Args:
func: The function whose signature should be checked.
Returns:
A function signature object, which includes the names of the keyword
arguments as well as their default values.
Raises:
TypeError: A type error if the signature is not supported
"""
# The first condition for Cython functions, the latter for Cython instance
# methods
if is_cython(func):
attrs = [
"__code__", "__annotations__", "__defaults__", "__kwdefaults__"
]
if all(hasattr(func, attr) for attr in attrs):
original_func = func
def func():
return
for attr in attrs:
setattr(func, attr, getattr(original_func, attr))
else:
raise TypeError(
f"{func!r} is not a Python function we can process")
return inspect.signature(func)
def extract_signature(func, ignore_first=False):
"""Extract the function signature from the function.
Args:
func: The function whose signature should be extracted.
ignore_first: True if the first argument should be ignored. This should
be used when func is a method of a class.
Returns:
List of Parameter objects representing the function signature.
"""
signature_parameters = list(get_signature(func).parameters.values())
if ignore_first:
if len(signature_parameters) == 0:
raise ValueError("Methods must take a 'self' argument, but the "
f"method '{func.__name__}' does not have one.")
signature_parameters = signature_parameters[1:]
return signature_parameters
def flatten_args(signature_parameters, args, kwargs):
"""Validates the arguments against the signature and flattens them.
The flat list representation is a serializable format for arguments.
Since the flatbuffer representation of function arguments is a list, we
combine both keyword arguments and positional arguments. We represent
this with two entries per argument value - [DUMMY_TYPE, x] for positional
arguments and [KEY, VALUE] for keyword arguments. See the below example.
See `recover_args` for logic restoring the flat list back to args/kwargs.
Args:
signature_parameters (list): The list of Parameter objects
representing the function signature, obtained from
`extract_signature`.
args: The non-keyword arguments passed into the function.
kwargs: The keyword arguments passed into the function.
Returns:
List of args and kwargs. Non-keyword arguments are prefixed
by internal enum DUMMY_TYPE.
Raises:
TypeError: Raised if arguments do not fit in the function signature.
Example:
>>> flatten_args([1, 2, 3], {"a": 4})
[None, 1, None, 2, None, 3, "a", 4]
"""
reconstructed_signature = inspect.Signature(
parameters=signature_parameters)
try:
reconstructed_signature.bind(*args, **kwargs)
except TypeError as exc: # capture a friendlier stacktrace
raise TypeError(str(exc)) from None
list_args = []
for arg in args:
list_args += [DUMMY_TYPE, arg]
for keyword, arg in kwargs.items():
list_args += [keyword, arg]
return list_args
def recover_args(flattened_args):
"""Recreates `args` and `kwargs` from the flattened arg list.
Args:
flattened_args: List of args and kwargs. This should be the output of
`flatten_args`.
Returns:
args: The non-keyword arguments passed into the function.
kwargs: The keyword arguments passed into the function.
"""
assert len(flattened_args) % 2 == 0, (
"Flattened arguments need to be even-numbered. See `flatten_args`.")
args = []
kwargs = {}
for name_index in range(0, len(flattened_args), 2):
name, arg = flattened_args[name_index], flattened_args[name_index + 1]
if name == DUMMY_TYPE:
args.append(arg)
else:
kwargs[name] = arg
return args, kwargs
def _convert_from_parameter_kind(kind):
if kind == Parameter.POSITIONAL_ONLY:
return 0
if kind == Parameter.POSITIONAL_OR_KEYWORD:
return 1
if kind == Parameter.VAR_POSITIONAL:
return 2
if kind == Parameter.KEYWORD_ONLY:
return 3
if kind == Parameter.VAR_KEYWORD:
return 4
def _convert_to_parameter_kind(value):
if value == 0:
return Parameter.POSITIONAL_ONLY
if value == 1:
return Parameter.POSITIONAL_OR_KEYWORD
if value == 2:
return Parameter.VAR_POSITIONAL
if value == 3:
return Parameter.KEYWORD_ONLY
if value == 4:
return Parameter.VAR_KEYWORD
| apache-2.0 | -1,852,312,170,600,221,700 | 32.258621 | 79 | 0.654398 | false |
FabriceSalvaire/Musica | other-examples/audio/generate-wav-tone.py | 1 | 7254 | ####################################################################################################
#
# Musica - A Music Theory Package for Python
# Copyright (C) 2017 Fabrice Salvaire
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
####################################################################################################
#! /usr/bin/env python
####################################################################################################
# https://en.wikipedia.org/wiki/WAV
# https://fr.wikipedia.org/wiki/Waveform_Audio_File_Format
####################################################################################################
import argparse
import wave
import numpy as np
####################################################################################################
class ToneGenerator:
##############################################
def __init__(self,
frequency=440, # Hz LA3 A4
amplitude=1.,
):
self._frequency = frequency
self._amplitude = amplitude
##############################################
def generate(self, times):
pulsation = 2 * np.pi * self._frequency
pcm = self._amplitude * np.sin(pulsation * times)
return pcm
####################################################################################################
class WavFile:
##############################################
def __init__(self,
sample_rate=44100, # Hz
number_of_bits=16,
number_of_channels=2,
duration=1, # s
rise_time=100, # ms
fall_time=100,
):
self._sample_rate = sample_rate
self._number_of_bits = number_of_bits
self._number_of_channels = number_of_channels
self._duration = duration
self._rise_time = rise_time
self._fall_time = fall_time
self._sampling_period = 1 / self._sample_rate
##############################################
def make_t(self, i_start, i_stop):
# stop = start + size * self._sampling_period
# times = np.arange(start, stop, self._sampling_period)
# return times[:size]
# return np.arange(start, stop, self._sampling_period)
return np.arange(i_start, i_stop) * self._sampling_period
##############################################
def encode_frames(self, pcm, to_stereo=False):
if self._number_of_bits:
dtype = '<i2'
else:
raise NotImplementedError
mono_frames = np.array(pcm, dtype=dtype)
if to_stereo:
stereo_frames = np.zeros(2 * mono_frames.size, dtype=dtype)
stereo_frames[0:-1:2] = mono_frames
stereo_frames[1::2] = mono_frames
frames = stereo_frames
else:
frames = mono_frames
return frames.tobytes('C')
##############################################
def make_wave(self, path, tone_generator):
wave_file = wave.open(path, 'wb')
sample_width = self._number_of_bits // 8
number_of_frames = int(self._sample_rate * self._duration)
# (nchannels, sampwidth, framerate, nframes, comptype, compname)
wave_file.setparams((self._number_of_channels,
sample_width,
self._sample_rate,
number_of_frames,
'NONE',
'not compressed'))
# 16-bit = 32767
amplitude_max = 2 ** self._number_of_bits // 2 - 1
chunk_size = 10000
time_window = chunk_size * self._sampling_period
for chunk in range(number_of_frames // 1000 + 2):
i_start = chunk * chunk_size
i_stop = i_start + chunk_size
start = i_start * self._sampling_period
stop = min(i_stop * self._sampling_period, self._duration)
i_stop = int(stop / self._sampling_period)
times = self.make_t(i_start, i_stop)
# print(chunk, i_start, i_stop, stop, times)
pcm = tone_generator.generate(times)
if start < self._rise_time:
pcm *= np.minimum(times / self._rise_time, np.ones(times.size))
if stop > self._duration - self._fall_time:
pcm *= np.minimum(- (times - self._duration) / self._fall_time, np.ones(times.size))
pcm *= amplitude_max
frames = self.encode_frames(pcm, to_stereo=self._number_of_channels == 2)
wave_file.writeframes(frames) # writeframesraw
if stop == self._duration:
break
wave_file.close()
####################################################################################################
if __name__ == "__main__":
# cut at 14.7 kHz ???
argument_parser = argparse.ArgumentParser()
argument_parser.add_argument('-r', '--rate', help="Sample rate in Hz",
type=int, default=44100)
argument_parser.add_argument('-b', '--bits', help="Number of bits in each sample",
type=int, choices=(16,), default=16)
argument_parser.add_argument('-c', '--channels', help="Number of channels to produce",
type=int, default=2)
argument_parser.add_argument('-t', '--time', help="Duration of the wave in ms.",
type=float, default=1.)
argument_parser.add_argument('-rt', '--rise-time', help="Rise time in ms.",
type=float, default=100.)
argument_parser.add_argument('-ft', '--fall-time', help="Fall time is ms.",
type=float, default=100.)
argument_parser.add_argument('-a', '--amplitude', help="Amplitude of the wave on a scale of 0.0-1.0.",
type=float, default=1,)
argument_parser.add_argument('-f', '--frequency', help="Frequency of the wave in Hz",
type=float, default=440.0) # LA3 A4
argument_parser.add_argument('filename', help="The file to generate.")
args = argument_parser.parse_args()
wav_file = WavFile(
sample_rate=args.rate,
number_of_bits=args.bits,
number_of_channels=args.channels,
duration=args.time / 1000,
rise_time=args.rise_time / 1000,
fall_time=args.fall_time / 1000,
)
tone_generator = ToneGenerator(
frequency=args.frequency,
amplitude=args.amplitude,
)
wav_file.make_wave(args.filename, tone_generator)
| gpl-3.0 | 8,334,232,289,626,942,000 | 34.73399 | 106 | 0.489109 | false |
hendrikwout/pynacolada | old/pynacolada/pynacolada.py | 1 | 59125 | from operator import itemgetter,mul
import numpy as np
from netcdf import netcdf_file
import sys
from Scientific.IO import NetCDF
from ncdfextract import ncgettypecode
from ncdfproc import nccopydimension,nccopyattrvar
class SomeError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def ncvartypeoffset(ncfile,var):
""" purpose: get binary data type and offset of a variable in netcdf file
unfortunately, getting these properties are not explicitely implemented in scipy, but most of this code is stolen from scipy: /usr/lib/python2.7/dist-packages/scipy/io/netcdf.py
ncfile is a scipy.io.netcdf.netcdf_file
var variable we want to calculate the offset from
"""
oripos=ncfile.fp.tell()
ncfile.fp.seek(0)
magic = ncfile.fp.read(3)
ncfile.__dict__['version_byte'] = np.fromstring(ncfile.fp.read(1), '>b')[0]
# Read file headers and set data.
ncfile._read_numrecs()
ncfile._read_dim_array()
ncfile._read_gatt_array()
header = ncfile.fp.read(4)
count = ncfile._unpack_int()
vars = []
for ic in range(count):
vars.append(list(ncfile._read_var()))
ivar = np.where(np.array(vars) == var)[0][0]
ncfile.fp.seek(oripos)
return vars[ivar][6] , vars[ivar][7]
def rwicecube(filestream,shp,refiter,dimiter,dimpos,refnoiter,dimnoiter,icecube,vtype,vsize,voffset,rwchsize,mode):
"""
read or write data icecube from binary data and put it in an array
filestream: binary file reference
shp: shape of the filestream
refiter: reference to dimensions over which no slice is performed
pos: current index position of the non-sliced dimensions
"""
# e.g. shp = (200,100,50,50,20)
# refiter = (1,3,4)
# dimpos = (5,10,9)
# extend so that structured arrays are read at once
lennoiter = long(1)
for irefnoiter,erefnoiter in enumerate(refnoiter):
lennoiter = lennoiter*len(dimnoiter[irefnoiter])
fpos = 0
# e.g. fpos = (9)+ 20*(10) + 50*50*20*(5)
for idimpos,edimpos in enumerate(dimpos):
curadd = np.mod(dimiter[idimpos][np.mod(edimpos,len(dimiter[idimpos]))],shp[refiter[idimpos]])
#e.g. if edimpos == (5): curadd = 50*50*20*(5)
# exclude trivial special case of only 1 iteration step
# --> in that case fpos is just zero.
if refiter > [-1]:
if ((refiter[idimpos] + 1) < len(shp)):
for i in range(refiter[idimpos] + 1,len(shp)) :
curadd = curadd * shp[i]
fpos = fpos + curadd
# Initialize (for reading) or prepare (for writing) icecube array
if mode == 'read':
icecube = np.zeros((lennoiter,),dtype=vtype)*np.nan
elif mode == 'write':
icecube = np.reshape(icecube,(lennoiter,))
dimnoiterpos = [0]*len(dimnoiter)
j = 0
while j < lennoiter:
fposicecube = fpos
for idimpos,edimpos in enumerate(dimnoiterpos):
curadd = np.mod(dimnoiter[idimpos][np.mod(edimpos,len(dimnoiter[idimpos]))],shp[refnoiter[idimpos]])
# e.g. fposicecube = (1)*52
# e.g. fposicecube = (9)+ 20*(10) + 50*50*20*(5)
if ((refnoiter[idimpos] + 1) < len(shp)):
for i in range(refnoiter[idimpos] + 1,len(shp)) :
curadd = curadd * shp[i]
fposicecube = fposicecube + curadd
filestream.seek(voffset+vsize*fposicecube)
if mode == 'read':
temp = np.fromfile(filestream,dtype='>'+vtype[1],count=rwchsize)
# temp.byteswap(True)
icecube[j:(j+rwchsize)] = temp
elif mode == 'write':
temp = np.array(icecube[j:(j+rwchsize)],dtype='>'+vtype[1])
filestream.write(temp)
# go to next data strip
if dimnoiterpos != []:
# rwchsize: allow reading of chunks for the inner dimensions
dimnoiterpos[-1] = dimnoiterpos[-1] + rwchsize
for idimidx,edimidx in reversed(list(enumerate(dimnoiterpos))):
if idimidx > 0:
while dimnoiterpos[idimidx] >= len(dimnoiter[idimidx]):
dimnoiterpos[idimidx-1] = dimnoiterpos[idimidx-1] + 1
dimnoiterpos[idimidx] -= len(dimnoiter[idimidx])
j = j+rwchsize
icecube.shape = [len(e) for e in dimnoiter]
if mode == 'read':
return icecube
def writeicecubeps(fstream,shp,refiter,dimiter,dimiterpos,refnoiter,dimnoiter,data,vtype,vsize,voffset,rwchsize):
"""
write an icecube and perform an in-memory Post Swap of dimensions before (very fast)
hereby, we acquire the order of the icecube dimensions
"""
refnoitersort,trns,dimnoitersort = zip(*sorted(zip(refnoiter,range(len(refnoiter)),dimnoiter),key=itemgetter(0,1)))
rwicecube(fstream,shp,refiter,dimiter,dimiterpos,refnoitersort,dimnoitersort,np.transpose(data,trns),vtype,vsize,voffset,rwchsize,'write')
def readicecubeps(fstream,shp,refiter,dimiter,dimiterpos,refnoiter,dimnoiter,vtype,vsize,voffset,rwchsize):
"""
read an icecube by sorting the indices (highest at the back).
perform an in-memory Post Swap of dimensions (very fast) to compensate for the sorting.
we allow reading in chunks according to the inner dimensions. They will be mostly there because we allow an max-icecubesize
"""
refnoitersort,trns,dimnoitersort = zip(*sorted(zip(refnoiter,range(len(refnoiter)),dimnoiter),key=itemgetter(0,1)))
# print 'shp',shp,refnoitersort,[len(e) for e in dimnoitersort],refiter
if str(type(fstream))[7:11] == 'list':
# this statement assumes that the first dimension of the datastream represents the 'listing' of the files
if refnoitersort[0] == 0:
# print([len(fstream)]+list([len(e) for e in dimnoitersort[1:]]))
icecube = np.zeros([len(fstream)]+list([len(e) for e in dimnoitersort[1:]]))
# icecube = np.zeros([len(fstream)]+list(dimnoitersort[1:]))
refiterred = list(refiter); refiterred = [e -1 for e in refiterred]
refnoitersortred = list(refnoitersort[1:]); refnoitersortred = [e - 1 for e in refnoitersortred]
dimnoitersortred = list(dimnoitersort[1:])
shpred = list(shp[1:])
# print 'shpred',shpred,refnoitersortred,[len(e) for e in dimnoitersortred],refiterred
for ifn,efn in enumerate(fstream):
tfile = open(fstream[ifn],'r')
icecube[ifn] =rwicecube(tfile,shpred,refiterred,dimiter,dimiterpos,refnoitersortred,dimnoitersortred,None,vtype,vsize,voffset,rwchsize,'read')
tfile.close()
elif 0 in refiter:
irefiter = refiter.index(0)
ifn = dimiterpos[irefiter]
tfile = open(fstream[ifn],'r')
refiterred = list(refiter); refiterred.pop(irefiter); refiterred = [ e-1 for e in refiterred]
dimiterred = list(dimiter); dimiterred.pop(irefiter)
dimiterposred = list(dimiterpos);dimiterposred.pop(irefiter)
shpred = list(shp[1:])
refnoitersortred = list(refnoiter); refnoitersortred = [e-1 for e in refnoitersortred]
icecube =rwicecube(tfile,shpred,refiterred,dimiterred,dimiterposred,refnoitersortred,dimnoitersort,None,vtype,vsize,voffset,rwchsize,'read')
tfile.close()
else:
icecube =rwicecube(fstream,shp,refiter,dimiter,dimiterpos,refnoitersort,dimnoitersort,None,vtype,vsize,voffset,rwchsize,'read')
# build the 'inverse permutation' operator for tranposition before writeout
inv = range(len(trns))
for itrns, etrns in enumerate(trns):
inv[etrns] = itrns
return np.transpose(icecube,inv)
# we kunnen eens proberen om een variabele aan te maken met een vooraf gespecifieerde dimensie!
def pcd(func,dnamsel,datin,datout,appenddim = False, maxmembytes = 10000000, forcearray=False):
""" purpose (see also README.md): process binary NetCDF data streams
func: the function/analysis to be applied on the data input streams
specified below. The arguments of func correspond to the respective
data input streams. The output needs to be a numpy.array(), or a list
of numpy.array() of which each item corresponds to the data output
streams specified below.
dnamsel: the dimensions on which the function needs to be applied. The
function will be repeated along the other dimensions
datin: list of data input variables/streams e.g.
[{'file':ncfile,'varname':'T',...},{'file':ncfile2,'varname':'P',...}]
possible dictionary keywords hereby:
- 'file': <pointer to Scientific.IO.NetCDF.NetCDFFile('filename','r')>, or a list of filenames that represent NetCDF files all with exactly the same (NetCDF) data structure
- 'varname': NetCDF variable
- 'daliases' (optional): aliases for dimensions to 'align' dimensions
for different data input variables
- 'predim' (optional): when 'file' is a filename list,. 'predim' will
be the name of the dimension that represents the
filelist. When not specified, the outer
dimension of the netcdf files will be expanded
only if it appears to have a length of 1. In all
other cases, the name predim<0,1/2/3>... will be
taken as extra dimensions.
- 'dsel': select a subset of the data by specifying dimension indices,
for example {'level' : range(0,5,1), 'lat' : range(50,60,1),
'lon' : range(70,80,1)} will select a subspace of the first 5
levels, and a inner horizontal domain.
datout: list of data output data variables/streams, in a similar fashion
as datin
appenddim: append dnamsel with the inner dimensions of the data input
streams when possible. This will generally lead to an increased
performance. This option needs be supported by the func.
maxmembytes: the maximum amount of bytes that is allowed for the buffers to be read/written from/to the input/output data streams.
"""
# track the dimensions to be selected when multiples are given
dnamseldef = []
for ednamsel in dnamsel:
if ((type(ednamsel).__name__ == 'tuple') | (type(ednamsel).__name__ == 'list')):
dnamseldef.append(ednamsel[0])
for idatin,edatin in reversed(list(enumerate(datin))):
ncfn = None
if type(datin[idatin]['file']).__name__ == 'list':
ncfn = datin[idatin]['file'][0]
# we assume a netcdf file
elif type(datin[idatin]['file']).__name__ == 'NetCDFFile':
# obtain file name from open netcdf!! very nasty!!!
ncfn = str(datin[idatin]['file'])[19:(str(datin[idatin]['file']).index("'",19))]
# we assume a file name
elif type(datin[idatin]['file']).__name__ == 'str':
ncfn = datin[idatin]['file']
nctemp = netcdf_file(ncfn,'r')
for edim in nctemp.variables[datin[idatin]['varname']].dimensions:
if edim in ednamsel:
dnamseldef[-1] = str(edim)
nctemp.close()
else:
dnamseldef.append(ednamsel)
# obtain definitions of the variable stream input
vsdin = [] # input variable stream definitions
for idatin,edatin in enumerate(datin):
# read in scipy.netcdf mode to obtain varariable offsets
vsdin.append(dict())
if type(datin[idatin]['file']).__name__ == 'list':
ncfn = datin[idatin]['file'][0]
nctemp = netcdf_file(ncfn,'r')
vsdin[idatin]['dnams'] = []
for idim in range(len(nctemp.variables[datin[idatin]['varname']].dimensions)):
edim = nctemp.variables[datin[idatin]['varname']].dimensions[idim]
if 'daliases' in datin[idatin]:
if edim in datin[idatin]['daliases']:
edim = datin[idatin]['daliases'][edim]
vsdin[idatin]['dnams'].append(str(edim))
#print '1dnams', idatin,vsdin[idatin]['dnams']
# this statement could be a problem when defining daliases for the file-list dimension?!!!
predim = None
if 'predim' in datin[idatin]:
predim = datin[idatin]['predim']
if ((max(nctemp.variables[datin[idatin]['varname']].shape[0],1) == 1) & ((predim == None) | (nctemp.variables[datin[idatin]['varname']].dimensions[0] == predim))):
# we expand the first dimension (which only has length 1 per file)
vsdin[idatin]['dims'] = [len(datin[idatin]['file'])]+list(nctemp.variables[datin[idatin]['varname']].shape[1:])
else:
# we add a new dimension # warning! uncatched problems can occur when predim0 already exists
if predim == None:
predim = 'predim0'
idimextra = 0
while predim in vsdin[idatin]['dnams']:
predim = 'predim'+str(idimextra)
idimextra = idimextra + 1
vsdin[idatin]['dnams'].insert(0,predim)
vsdin[idatin]['dims'] = [len(datin[idatin]['file'])]+list(nctemp.variables[datin[idatin]['varname']].shape[:])
#print '2dnams', idatin,vsdin[idatin]['dnams']
else:
# we assume a netcdf file
if type(datin[idatin]['file']).__name__ == 'NetCDFFile':
# obtain file name from open netcdf!! very nasty!!!
ncfn = str(datin[idatin]['file'])[19:(str(datin[idatin]['file']).index("'",19))]
# we assume a file name
elif type(datin[idatin]['file']).__name__ == 'str':
ncfn = datin[idatin]['file']
else:
raise SomeError("Input file "+ str(datin[idatin]) + " ("+str(idatin)+") could not be recognized.")
nctemp = netcdf_file(ncfn,'r')
vsdin[idatin]['dnams'] = []
for idim in range(len(nctemp.variables[datin[idatin]['varname']].dimensions)):
edim = nctemp.variables[datin[idatin]['varname']].dimensions[idim]
if 'daliases' in datin[idatin]:
if edim in datin[idatin]['daliases']:
edim = datin[idatin]['daliases'][edim]
vsdin[idatin]['dnams'].append(str(edim))
vsdin[idatin]['dims'] = list(nctemp.variables[datin[idatin]['varname']].shape)
vsdin[idatin]['itemsize'] = nctemp.variables[datin[idatin]['varname']].itemsize()
vsdin[idatin]['dtype'] = nctemp.variables[datin[idatin]['varname']]._dtype
vsdin[idatin]['voffset'] = nctemp.variables[datin[idatin]['varname']]._voffset
# dselmarker
vsdin[idatin]['dsel'] = [range(e) for e in vsdin[idatin]['dims']]
if 'dsel' in datin[idatin]:
# cropped dimensions
for edcrop in datin[idatin]['dsel']:
if edcrop in vsdin[idatin]['dnams']:
vsdin[idatin]['dsel'][vsdin[idatin]['dnams'].index(edcrop)] = list(datin[idatin]['dsel'][edcrop])
else:
print("Warning, dimension '"+ str(edcrop) + "' not in netcdf variable '"+ncfn+ "("+datin[idatin]['varname']+")'.")
nctemp.close()
# obtain definitions of the variable stream output
vsdout = [] # output variable stream definitions
for idatout,edatout in enumerate(datout):
vsdout.append(dict())
if edatout['varname'] in edatout['file'].variables:
vsdout[idatout]['dnams'] = []
for idim,edim in enumerate(datout[idatout]['file'].variables[datout[idatout]['varname']].dimensions):
vsdout[idatout]['dnams'].append(str(edim))
# we assume a netcdf file
if type(datout[idatout]['file']).__name__ == 'NetCDFFile':
# obtain file name from open netcdf!! very nasty!!!
ncfn = str(datout[idatout]['file'])[19:(str(datout[idatout]['file']).index("'",19))]
# we assume a file name
elif type(datout[idatout]['file']).__name__ == 'str':
ncfn = datout[idatout]['file']
else:
raise SomeError("Input file "+ str(datout[idatout]) + " ("+str(idatout)+") could not be recognized.")
datout[idatout]['file'].sync()
nctemp = netcdf_file(ncfn,'r')
vsdout[idatout]['dims'] = [max(e,1) for e in nctemp.variables[datout[idatout]['varname']].shape]
# dselmarker
vsdout[idatout]['dsel'] = [range(e) for e in vsdout[idatout]['dims']]
vsdout[idatout]['itemsize'] = nctemp.variables[datout[idatout]['varname']].itemsize()
vsdout[idatout]['dtype']= nctemp.variables[datout[idatout]['varname']]._dtype
vsdout[idatout]['voffset'] = nctemp.variables[datout[idatout]['varname']]._voffset
nctemp.close()
#datout[idatout]['file'] = NetCDF.NetCDFFile(ncfn,'a')
else:
# the variable doesn't exists (we will create it afterwards)
vsdout[idatout]['dnams'] = None
vsdout[idatout]['dims'] = None
# dselmarker
vsdout[idatout]['dsel'] = None
vsdout[idatout]['itemsize'] = None
vsdout[idatout]['dtype'] = None
# collecting the involved dimensions (will be considered as the standard output dimensions)
dnamsstd = [] # standard output dimensions: list of all output dimensions: this is collected from the input dimensions, the output dimensions and the selected/processed dimensions
dimsstd = [] # maximum length of an output dimension
idimsstd = 0
for ivsdin,evsdin in enumerate(vsdin):
dnaminlast = None
idx = 0
for idnam,ednam in reversed(list(enumerate(evsdin['dnams']))):
if ednam not in dnamsstd:
# In dnamsstd, ednam should be just after the dimensions preceding ednams in dnams
# # actually, we also want that, in dnamsstd, ednam should be just before the dimensions succeeding ednams in dnams. Sometimes, this is not possible at the same time. But it will be the case if that is possible when applying one of the criteria
idx = 0
# implement this also below, I mean that
for idnam2,ednam2 in enumerate(dnamsstd):
if ednam2 in evsdin['dnams'][0:(idnam+1)]:
idx = max(idx ,dnamsstd.index(ednam2) + 1)
dnamsstd.insert(idx ,ednam)
if ednam not in dnamseldef:
# dselmarker
if vsdin[ivsdin]['dsel'][idnam] != False: # type(edim).__name__ == 'list':
dimsstd.insert(idx ,int(len(vsdin[ivsdin]['dsel'][idnam])))
else:
dimsstd.insert(idx ,int(vsdin[ivsdin]['dims'][idnam]))
else:
# In this case, wait for assigning the output dimensions. This actually depends on the specified function
dimsstd.insert(idx ,None)
else:
if ((len(vsdin[ivsdin]['dsel'][idnam]) != 1) & (dimsstd[dnamsstd.index(ednam)] != 1) & \
# we allow non-equal dimension lengths, as long as the dimension is covered/captured by the function
# maybe still allow non-equal dimension length not covered by the function????
(dimsstd[dnamsstd.index(ednam)] != None) & \
(len(vsdin[ivsdin]['dsel'][idnam]) != dimsstd[dnamsstd.index(ednam)])):
print(vsdin[ivsdin]['dims'][idnam])
print(vsdin[ivsdin]['dims'][idnam])
raise SomeError("The corresponding output dnamensions (index: "+str(dnamsstd.index(ednam))+") of the input variable "+str(ivsdin)+ " "+ str(idnam)+ " "+" have a different length and not equal to 1.")
else:
# None means it's considered by the function
if (dimsstd[dnamsstd.index(ednam)] != None):
# dselmarker
if vsdin[ivsdin]['dsel'][idnam] != False:
dimsstd[dnamsstd.index(ednam)] = max(dimsstd[dnamsstd.index(ednam)],len(vsdin[ivsdin]['dsel'][idnam]))
else:
dimsstd[dnamsstd.index(ednam)] = max(dimsstd[dnamsstd.index(ednam)],vsdin[ivsdin]['dims'][idnam])
# add the missing dimensions selected for the function.
idnam = 0
# TODO: the position of this dimension needs to be accordence to the
# dimension-order suggested by the input(output?) netcdf files!
for idnamseldef,ednamseldef in enumerate(dnamseldef):
if ednamseldef not in dnamsstd:
dnamsstd.insert(idnam,ednamseldef)
dimsstd.insert(idnam,None) # to be defined from the function
idnam = idnam +1
else:
idnam = dnamsstd.index(ednam)+1
#print ("dimsstd", dimsstd)
# dimsstd: list the specific output dimensions
# if function dimension: data output dimension should be the same as the function output dimension, but this should be checked afterwards.
# if not function dimension:
# # look what's the output dimension like. If the dimension is not in the output variable, we add a dummy 1-dimension
# we need to create/list adimsstd also before!! And then append them with the missing dimensions, as dummy 1-dimensions. If that is not sufficient, we will just get an error message.
# get references to the standard output dimensions on which the function is applied
refdfuncstd = []
for idnamseldef,ednamseldef in enumerate(dnamseldef):
refdfuncstd.append(dnamsstd.index(ednamseldef))
# all standard output dimensions (cfr. dimsstd, dnamsstd) are now collected...
# add the standard output dimensions that are missing in each seperate input variable as a dummy 1-dimension
for ivsdin,evsdin in enumerate(vsdin):
idnam = 0
# the dimension of the list should always the first dimension! This is assumed in the rwicecube (see statement 'if refnoitersort[0] == 0:')
if type(datin[idatin]['file']).__name__ == 'list':
idnam = 1
for idnamsstd,ednamsstd in enumerate(dnamsstd):
if ednamsstd not in vsdin[ivsdin]['dnams']:
vsdin[ivsdin]['dnams'].insert(idnam,ednamsstd)
vsdin[ivsdin]['dims'].insert(idnam,1)
vsdin[ivsdin]['dsel'].insert(idnam,False)
# dselmarker
idnam = idnam + 1
else:
idnam = vsdin[ivsdin]['dnams'].index(ednamsstd) + 1
# do the same for the data output variables
# # vsdin[ivsdin]['refdstd']: references of data stream dimensions (vsdin[..]['dnams'] to the standard dimensions (dnamsstd)
for ivsdin,evsdin in enumerate(vsdin):
vsdin[ivsdin]['refdstd']= list([])
for idim,edim in enumerate(vsdin[ivsdin]['dnams']):
vsdin[ivsdin]['refdstd'].append(dnamsstd.index(edim))
for ivsdout,evsdout in enumerate(vsdout):
if vsdout[ivsdout]['dnams'] == None:
vsdout[ivsdout]['dnams'] = dnamsstd
# adimfuncin: the input dimensions of the function based on the refdfuncstd
# adimfuncin: the dimensions of the function input
# adimfuncin = np.zeros((len(vsdin),len(refdfuncstd)),dtype='int32') - 1
for ivsdout in range(len(vsdout)):
if vsdout[ivsdout]['dnams'] == None:
vsdout[ivsdout]['dnams'] == dnamsstd
# vsdout[..]['refdstd']: references of data stream dimensions (vsdout[..]['dnams'] to the standard dimensions (dnamsstd)
for ivsdout,evsdout in enumerate(vsdout):
vsdout[ivsdout]['refdstd'] = list([])
for idim,edim in enumerate(vsdout[ivsdout]['dnams']):
vsdout[ivsdout]['refdstd'].append(dnamsstd.index(edim))
# arefdfuncout: references of the function dimensions to the data output stream dimensions
arefdfuncout = []
for ivsdout,evsdout in enumerate(vsdout):
arefdfuncout.append([])
for idnamseldef,ednamseldef in enumerate(dnamseldef):
arefdfuncout[ivsdout].append(vsdout[ivsdout]['dnams'].index(ednamseldef))
# is arefdfuncout[ivsdout][irefdfuncout] == vsdout[ivsdout]['refdstd'].index(erefdfuncstd) ???
# arefdfuncin: references of the function dimensions to the data input stream dimensions
arefdfuncin = []
for ivsdin,evsdin in enumerate(vsdin):
arefdfuncin.append([])
for idnamseldef,ednamseldef in enumerate(dnamseldef):
arefdfuncin[ivsdin].append(vsdin[ivsdin]['dnams'].index(ednamseldef))
adimfuncin = []
alendfuncin = []
for ivsdin,evsdin in enumerate(vsdin):
alendfuncin.append(1)
adimfuncin.append([])
for irefdfuncstd,erefdfuncstd in enumerate(refdfuncstd):
# dselmarker
# edim = evsdin['dims'][vsdin[ivsdin]['refdstd'].index(erefdfuncstd)]
if evsdin['dsel'][vsdin[ivsdin]['refdstd'].index(erefdfuncstd)] != False:
adimfuncin[ivsdin].append(list(evsdin['dsel'][vsdin[ivsdin]['refdstd'].index(erefdfuncstd)]))
else:
adimfuncin[ivsdin].append(range(evsdin['dims'][vsdin[ivsdin]['refdstd'].index(erefdfuncstd)]))
alendfuncin[ivsdin] = alendfuncin[ivsdin]*len(adimfuncin[ivsdin][irefdfuncstd])
# 'probe' function output dimensions
dummydat = []
shapein = []
for ivsdin,evsdin in enumerate(vsdin):
shapein.append([len(e) for e in adimfuncin[ivsdin]])
# if shapein[ivsdin] == tuple():
# if forcenum and (not forcearray):
# # to do: make it
# if appenddim == True:
# dummydat.append(0.)
# else:
# raise SomeError('we can not force to be numeric if the option appenddim is switched on')
# else:
# np.zeros(shapein[ivsdin])
# we are in the special case of an extra dimension to be added to the input
if (list() in shapein) and (forcearray):
for ivsdin,evsdin in enumerate(vsdin):
dummydat.append(np.zeros([1]+shapein[ivsdin]))
else:
for ivsdin,evsdin in enumerate(vsdin):
dummydat.append(np.zeros(shapein[ivsdin]))
ddout = func(*dummydat)
if (type(ddout).__name__ == 'tuple'):
ddout = list(ddout)
if (type(ddout).__name__ != 'list'):
ddout = list([ddout])
# obtain output data type. If not specified, we obtain it from the function output.
# meanwhile, check whether the number of input dimensions are the same as the number of output dimensions.
if len(ddout) != len(vsdout):
raise SomeError('the amount of output variables in from '+ str(func) + ' ('+str(len(ddout))+') is not the same as specified ('+str(len(vsdout))+')')
for iddout in range(len(ddout)):
if type(ddout[iddout] ) != np.ndarray:
ddout[iddout] = np.array(ddout[iddout])
# we are in the special case of an extra dimension to be added to the input -> so we remove it again
if (list() in shapein) and (forcearray):
if len(ddout[iddout].shape) > 1:
ddout[iddout].shape = ddout[iddout].shape[1:]
else:
ddout[iddout].shape = []
if (len(np.array(ddout[iddout]).shape) != len(adimfuncin[ivsdin])):
raise SomeError('The amount of input ('+str(len(adimfuncin[ivsdin]))+') and output dimensions ('+str(len(ddout[iddout].shape))+') of function is not the same')
if vsdout[iddout]['dims'] == None:
vsdout[iddout]['dims'] = list(dimsstd)
# overwrite dimensions with the function output dimensions
for irefdfuncout,erefdfuncout in enumerate(arefdfuncout[iddout]):
vsdout[iddout]['dims'][erefdfuncout] = ddout[iddout].shape[irefdfuncout]
vsdout[iddout]['dsel'] = [range(e) for e in vsdout[iddout]['dims']] # Added for consistency with input dimensions. As output dimension, it is not really used.
if vsdout[iddout]['dtype'] == None:
# output netcdf variable does not exist... creating
# why does this needs to be little endian????
vsdout[iddout]['dtype'] = '>'+ncgettypecode(ddout[iddout].dtype)
# try to copy dimension from data input
for idnams,ednams in enumerate(vsdout[iddout]['dnams']):
if ednams not in datout[iddout]['file'].dimensions:
dimensionfound = False
idatin = 0
while ((not dimensionfound) & (idatin < (len(datin) ))):
templopen = False
# try to copy the dimension from the input data
if type(datin[idatin]['file']).__name__ == 'NetCDFFile':
nctemplate = datin[idatin]['file']
elif type(datin[idatin]['file']).__name__ == 'str':
nctemplate = NetCDF.NetCDFFile(datin[idatin]['file'],'r')
templopen = True
elif type(datin[idatin]['file']).__name__ == 'list':
nctemplate = NetCDF.NetCDFFile(datin[idatin]['file'][0],'r')
templopen = True
else:
raise SomeError("Input file "+ str(datin[idatin]) + " ("+str(idatin)+") could not be recognized.")
if ednams in nctemplate.dimensions:
# dselmarker
if vsdin[idatin]['dsel'][idnams] != False: # type(edim).__name__ == 'list':
if (vsdout[iddout]['dims'][idnams] == len(vsdin[idatin]['dsel'][idnams])):
datout[iddout]['file'].createDimension(ednams,vsdout[iddout]['dims'][idnams])
if ednams in nctemplate.variables:
datout[iddout]['file'].createVariable(ednams,nctemplate.variables[ednams].typecode(),(ednams,))
datout[iddout]['file'].variables[ednams][:] = np.array(nctemplate.variables[ednams][:])[vsdin[idatin]['dsel'][idnams]]
nccopyattrvar(nctemplate,datout[iddout]['file'],varin=ednams,)
dimensionfound = True
else:
if (vsdout[iddout]['dims'][idnams] == nctemplate.dimensions[ednams]):
nccopydimension(nctemplate,datout[iddout]['file'], ednams)
dimensionfound = True
if templopen:
nctemplate.close()
idatin = idatin + 1
if dimensionfound == False:
datout[iddout]['file'].createDimension(ednams,vsdout[iddout]['dims'][idnams])
# if a template file needed to be opened, close it again
# check here whether the output dimensions are ok , otherwise make alternative dimensions
dnams = []
for idim,edim in enumerate(vsdout[iddout]['dims']):
if (vsdout[iddout]['dims'][idim] == datout[iddout]['file'].dimensions[vsdout[iddout]['dnams'][idim]]):
dnams.append(vsdout[iddout]['dnams'][idim])
else:
# if (vsdout[iddout]['dims'][idim] == 1):
# # if dimension is one, just remove it
# vsdout[iddout]['dims'].pop(idim)
# else:
dnams.append(vsdout[iddout]['dnams'][idim])
# else, make a new alternative dimension with a similar name
dnamidx = -1
while (dnams[idim] in datout[iddout]['file'].dimensions):
dnamidx = dnamidx + 1
dnams[idim] = str(vsdout[iddout]['dnams'][idim])+'_'+str(dnamidx)
datout[iddout]['file'].createDimension(dnams[idim],vsdout[iddout]['dims'][idim])
vsdout[iddout]['dnams'] = dnams
datout[iddout]['file'].createVariable(datout[iddout]['varname'],vsdout[iddout]['dtype'][1],tuple(vsdout[iddout]['dnams']))
# we should check this at the time the dimensions are not created
if (vsdout[iddout]['dims'] != list(datout[iddout]['file'].variables[datout[iddout]['varname']].shape)):
raise SomeError("dimensions of output file ( "+str(vsdout[iddout]['dims'])+"; "+ str(vsdout[iddout]['dnams'])+") do not correspond with intended output dimension "+str(datout[iddout]['file'].variables[datout[iddout]['varname']].shape)+"; "+str(datout[iddout]['file'].variables[datout[iddout]['varname']].dimensions))
for idatin,edatin in enumerate(datin):
if type(datin[idatin]['file']).__name__ == 'NetCDFFile':
# obtain file pointer!! very nasty!!
ncfn = str(datin[idatin]['file'])[19:(str(datin[idatin]['file']).index("'",19))]
vsdin[idatin]['fp'] = open(ncfn,'r')
elif type(datin[idatin]['file']).__name__ == 'str':
ncfn = datin[idatin]['file']
vsdin[idatin]['fp'] = open(ncfn,'r')
elif type(datin[idatin]['file']).__name__ == 'list':
# !!!!!if series/list of file names, then the file poniters will open when at read-time
ncfn = datin[idatin]['file']
vsdin[idatin]['fp'] = datin[idatin]['file']
else:
raise SomeError("Input file "+ str(datin[idatin]) + " ("+str(idatin)+") could not be recognized.")
for idatout,edatout in enumerate(datout):
# obtain file pointer!! very nasty!!
datout[idatout]['file'].flush()
ncfn = str(datout[idatout]['file'])[19:(str(datout[idatout]['file']).index("'",19))]
vsdout[idatout]['fp'] = open(ncfn,'r+')
# in order to discover variable offsets
nctemp = netcdf_file(ncfn,'r')
vsdout[idatout]['itemsize'] = nctemp.variables[datout[idatout]['varname']].itemsize()
vsdout[idatout]['voffset'] = nctemp.variables[datout[idatout]['varname']]._voffset
nctemp.close()
# # bugfix 2013-11-26: this paragraph is replaced by the one below (analogous to the 'construction' of adimfuncin
# # next: check whether the output variable dimensions (if already present) are not too large, otherwise raise error. + Construct final output dimension specs
# adimfuncout = [[None]*len(refdfuncstd)]*(len(vsdout))
# alendfuncout = []
# for ivsdout,evsdout in enumerate(vsdout):
# alendfuncout.append(1)
# for irefdfuncstd,erefdfuncstd in enumerate(refdfuncstd):
# # dselmarker
# # edim = evsdout['dims'][vsdout[ivsdout]['refdstd'].outdex(erefdfuncstd)]
# if evsdout['dsel'][vsdout[ivsdout]['refdstd'].index(erefdfuncstd)] != False:
# adimfuncout[ivsdout][irefdfuncstd] = evsdout['dsel'][vsdout[ivsdout]['refdstd'].index(erefdfuncstd)]
# else:
# adimfuncout[ivsdout][irefdfuncstd] = range(evsdout['dims'][vsdout[ivsdout]['refdstd'].index(erefdfuncstd)])
# alendfuncout[ivsdout] = alendfuncout[ivsdout]*len(adimfuncout[ivsdout][irefdfuncstd])
adimfuncout = []
alendfuncout = []
for ivsdout,evsdout in enumerate(vsdout):
alendfuncout.append(1)
adimfuncout.append([])
for irefdfuncstd,erefdfuncstd in enumerate(refdfuncstd):
# dselmarker
# edim = evsdout['dims'][vsdout[ivsdout]['refdstd'].index(erefdfuncstd)]
if evsdout['dsel'][vsdout[ivsdout]['refdstd'].index(erefdfuncstd)] != False:
adimfuncout[ivsdout].append(list(evsdout['dsel'][vsdout[ivsdout]['refdstd'].index(erefdfuncstd)]))
else:
adimfuncout[ivsdout].append(range(evsdout['dims'][vsdout[ivsdout]['refdstd'].index(erefdfuncstd)]))
alendfuncout[ivsdout] = alendfuncout[ivsdout]*len(adimfuncout[ivsdout][irefdfuncstd])
# arefsin: references of the standard dimensions to the data stream dimensions
arefsin = []
for ivsdin,evsdin in enumerate(vsdin):
arefsin.append([None]*len(vsdin[ivsdin]['refdstd']))
# loop over the data stream dimensions
for irefdstd,erefdstd in enumerate(vsdin[ivsdin]['refdstd']):
arefsin[ivsdin][erefdstd] = irefdstd
# arefsout: references of the standard dimensions to the data stream dimensions
arefsout = []
for ivsdout,evsdout in enumerate(vsdout):
arefsout.append([None]*len(vsdout[ivsdout]['refdstd']))
# loop over the data stream dimensions
for irefdstd,erefdstd in enumerate(vsdout[ivsdout]['refdstd']):
arefsout[ivsdout][erefdstd] = irefdstd
#print ("dnamsstd", dnamsstd)
if appenddim == True:
membytes = 0
# dsellen = len(dnamseldef)
# a temporary copy of alenfunc*
alendfuncin_tmp = list(alendfuncin)
alendfuncout_tmp = list(alendfuncout)
# we try will to read the data in even larger icecubes to reduce disk access!
idnam = len(dnamsstd) - 1
cont = True
maxarefdfunc = len(refdfuncstd)
while ((idnam >= maxarefdfunc) & (membytes <= maxmembytes) & cont):
# while loop quite extensive but does what is should-> should be reduced and simplified
cont = False # only continue to the next loop if idnam+1 (in previous loop) was (inserted) in refdfuncstd
ednam = dnamsstd[idnam]
if idnam not in refdfuncstd:
for ivsdin,evsdin in enumerate(vsdin):
# dselmarker
if vsdin[ivsdin]['dsel'][arefsin[ivsdin][idnam]] != False:
alendfuncin_tmp[ivsdin] = alendfuncin_tmp[ivsdin] *len(vsdin[ivsdin]['dsel'][arefsin[ivsdin][idnam]])
else:
alendfuncin_tmp[ivsdin] = alendfuncin_tmp[ivsdin] *vsdin[ivsdin]['dims'][arefsin[ivsdin][idnam]]
for ivsdout,evsdout in enumerate(vsdout):
alendfuncout_tmp[ivsdout] = alendfuncout_tmp[ivsdout] *vsdout[ivsdout]['dims'][arefsout[ivsdout][idnam]]
# recalculate the amount of bytes
tmpmembytes = 0
for ivsdin,evsdin in enumerate(vsdin):
tmpmembytes = tmpmembytes + alendfuncin_tmp[ivsdin] * vsdin[ivsdin]['itemsize']
for ivsdout,evsdout in enumerate(vsdout):
tmpmembytes = tmpmembytes + alendfuncout_tmp[ivsdout] * vsdout[ivsdout]['itemsize']
# if used memory still below threshold, we add it to the current dimension to the icecubes
if tmpmembytes <= maxmembytes:
refdfuncstd.insert(maxarefdfunc,idnam)
for ivsdin,evsdin in enumerate(vsdin):
# arefdfuncin: references of the function dimensions to the data input stream dimensions
arefdfuncin[ivsdin].insert(maxarefdfunc, arefsin[ivsdin][idnam])
if vsdin[ivsdin]['dsel'][arefsin[ivsdin][idnam]] != False:
adimfuncin[ivsdin].insert(maxarefdfunc,vsdin[ivsdin]['dsel'][arefsin[ivsdin][idnam]])
alendfuncin[ivsdin] = alendfuncin[ivsdin] *len(vsdin[ivsdin]['dsel'][arefsin[ivsdin][idnam]])
else:
adimfuncin[ivsdin].insert(maxarefdfunc,range(vsdin[ivsdin]['dims'][arefsin[ivsdin][idnam]]))
alendfuncin[ivsdin] = alendfuncin[ivsdin] *vsdin[ivsdin]['dims'][arefsin[ivsdin][idnam]]
for ivsdout,evsdout in enumerate(vsdout):
arefdfuncout[ivsdout].insert(maxarefdfunc, arefsout[ivsdout][idnam])
adimfuncout[ivsdout].insert(maxarefdfunc,range(vsdout[ivsdout]['dims'][arefsout[ivsdout][idnam]]))
alendfuncout[ivsdout] = alendfuncout[ivsdout] *vsdout[ivsdout]['dims'][arefsout[ivsdout][idnam]]
# recalculate the amount of bytes
membytes = 0
for ivsdin,evsdin in enumerate(vsdin):
membytes = membytes + alendfuncin[ivsdin] * vsdin[ivsdin]['itemsize']
for ivsdout,evsdout in enumerate(vsdout):
membytes = membytes + alendfuncout[ivsdout] * vsdout[ivsdout]['itemsize']
cont = True
# if used memory still below threshold, we add it to the current dimension to the icecubes
else:
cont = True
idnam = idnam - 1
refdnoiterstd = list(refdfuncstd)
alendnoiterin = list(alendfuncin)
adimnoiterin = []
arefdnoiterin = []
for ivsdin,evsdin in enumerate(vsdin):
adimnoiterin.append(list(adimfuncin[ivsdin]))
arefdnoiterin.append(list(arefdfuncin[ivsdin]))
alendnoiterout = list(alendfuncout)
adimnoiterout = []
arefdnoiterout = []
for ivsdout,evsdout in enumerate(vsdout):
adimnoiterout.append(list(adimfuncout[ivsdout]))
arefdnoiterout.append(list(arefdfuncout[ivsdout]))
# membytes: minimum total memory that will be used. We will the increase usage when possible/allowed.
membytes = 0
for ivsdin,evsdin in enumerate(vsdin):
membytes = membytes + alendfuncin[ivsdin] * vsdin[ivsdin]['itemsize']
for ivsdout,evsdout in enumerate(vsdout):
membytes = membytes + alendfuncout[ivsdout] * vsdout[ivsdout]['itemsize']
if membytes > maxmembytes:
print ('Warning, used memory ('+str(membytes)+') exceeds maximum memory ('+str(maxmembytes)+').')
else:
# a temporary copy of alennoiter*
alendnoiterin_tmp = list(alendnoiterin)
alendnoiterout_tmp = list(alendnoiterout)
# we try will to read the data in even larger icecubes to reduce disk access!
idnam = len(dnamsstd) - 1
cont = True
while ((idnam >= 0) & (membytes <= maxmembytes) & cont):
# while loop quite extensive but does what is should-> should be reduced and simplified
cont = False # only continue to the next loop if idnam+1 (in previous loop) was (inserted) in refdnoiterstd
if idnam not in refdnoiterstd:
for ivsdin,evsdin in enumerate(vsdin):
# dselmarker
if vsdin[ivsdin]['dsel'][arefsin[ivsdin][idnam]] != False:
alendnoiterin_tmp[ivsdin] = alendnoiterin_tmp[ivsdin] *len(vsdin[ivsdin]['dsel'][arefsin[ivsdin][idnam]])
else:
alendnoiterin_tmp[ivsdin] = alendnoiterin_tmp[ivsdin] *vsdin[ivsdin]['dims'][arefsin[ivsdin][idnam]]
for ivsdout,evsdout in enumerate(vsdout):
alendnoiterout_tmp[ivsdout] = alendnoiterout_tmp[ivsdout] *vsdout[ivsdout]['dims'][arefsout[ivsdout][idnam]]
# recalculate the amount of bytes
tmpmembytes = 0
for ivsdin,evsdin in enumerate(vsdin):
tmpmembytes = tmpmembytes + alendnoiterin_tmp[ivsdin] * vsdin[ivsdin]['itemsize']
for ivsdout,evsdout in enumerate(vsdout):
tmpmembytes = tmpmembytes + alendnoiterout_tmp[ivsdout] * vsdout[ivsdout]['itemsize']
# if used memory still below threshold, we add it to the current dimension to the icecubes
if tmpmembytes <= maxmembytes:
refdnoiterstd.insert(0,idnam)
for ivsdin,evsdin in enumerate(vsdin):
arefdnoiterin[ivsdin].insert(0, arefsin[ivsdin][idnam])
if vsdin[ivsdin]['dsel'][arefsin[ivsdin][idnam]] != False:
adimnoiterin[ivsdin].insert(0,vsdin[ivsdin]['dsel'][arefsin[ivsdin][idnam]])
alendnoiterin[ivsdin] = alendnoiterin[ivsdin] *len(vsdin[ivsdin]['dsel'][arefsin[ivsdin][idnam]])
else:
adimnoiterin[ivsdin].insert(0,range(vsdin[ivsdin]['dims'][arefsin[ivsdin][idnam]]))
alendnoiterin[ivsdin] = alendnoiterin[ivsdin] *vsdin[ivsdin]['dims'][arefsin[ivsdin][idnam]]
for ivsdout,evsdout in enumerate(vsdout):
arefdnoiterout[ivsdout].insert(0, arefsout[ivsdout][idnam])
adimnoiterout[ivsdout].insert(0,range(vsdout[ivsdout]['dims'][arefsout[ivsdout][idnam]]))
alendnoiterout[ivsdout] = alendnoiterout[ivsdout] *vsdout[ivsdout]['dims'][arefsout[ivsdout][idnam]]
#dnamseldefnoiter.insert(0,dnamsstd[idnam])
# recalculate the amount of bytes
membytes = 0
for ivsdin,evsdin in enumerate(vsdin):
membytes = membytes + alendnoiterin[ivsdin] * vsdin[ivsdin]['itemsize']
for ivsdout,evsdout in enumerate(vsdout):
membytes = membytes + alendnoiterout[ivsdout] * vsdout[ivsdout]['itemsize']
cont = True
# if used memory still below threshold, we add it to the current dimension to the icecubes
else:
cont = True
idnam = idnam - 1
# adimnoiterin[ivsdin,irefdnoiterstd] = evsdin['dims'][vsdin[ivsdin]['refdstd'].index(erefdnoiterstd)]
# arefdfuncin: references of the function dimensions to the data input stream dimensions
# arefdnoiterin: references of the icecube dimensions to the data input stream dimensions
# # vsdin[ivsdin]['refdstd']: references of data stream dimensions (vsdin[..]['dnams'] to the standard dimensions (dnamsstd)
# dnamseldefnoiter: references
# print ('dims in:',vsdin[ivsdin]['dims'])
# print ('dnams in:',vsdin[ivsdin]['dnams'])
# print ('dims out:',vsdout[ivsdin]['dims'])
# print ('dnams out:',vsdout[ivsdin]['dnams'])
# guess from residual dimensions that are not in refnoiterin
refditerstd = []
dimiterstd = []
for idim,edim in enumerate(dimsstd):
if idim not in refdnoiterstd:
refditerstd.append(idim)
dimiterstd.append(edim)
if refditerstd == []:
refditerstd = [-1]
dimiterstd = [1]
# guess from residual dimensions that are not in refnoiterin
arefditerin = []
adimiterin = []
for ivsdin,evsdin in enumerate(vsdin):
arefditerin.append([])
adimiterin.append([])
for idim,edim in enumerate(vsdin[ivsdin]['dims']):
if idim not in arefdnoiterin[ivsdin]:
arefditerin[ivsdin].append(idim)
if vsdin[ivsdin]['dsel'][idim] != False:
adimiterin[ivsdin].append(vsdin[ivsdin]['dsel'][idim])
else:
adimiterin[ivsdin].append(range(edim))
if arefditerin[ivsdin] == []:
arefditerin[ivsdin] = [-1]
adimiterin[ivsdin] = [range(1)]
# guess from residual dimensions that are not in refnoiterin
arefditerout = []
adimiterout = []
for ivsdout,evsdout in enumerate(vsdout):
arefditerout.append([])
adimiterout.append([])
for idim,edim in enumerate(vsdout[ivsdout]['dims']):
if idim not in arefdnoiterout[ivsdout]:
arefditerout[ivsdout].append(idim)
if vsdout[ivsdout]['dsel'][idim] != False:
adimiterout[ivsdout].append(vsdout[ivsdout]['dsel'][idim])
else:
adimiterout[ivsdout].append(range(edim))
if arefditerout[ivsdout] == []:
arefditerout[ivsdout] = [-1]
adimiterout[ivsdout] = [range(1)]
dimitermax = []
for iref,eref in enumerate(refditerstd):
dimitermax.append(1)
for ivsdin,evsdin in enumerate(vsdin):
dimitermax[iref] = max(dimitermax[iref],len(adimiterin[ivsdin][iref]))
for ivsdout,evsdout in enumerate(vsdout):
dimitermax[iref] = max(dimitermax[iref],len(adimiterout[ivsdout][iref]))
rwchunksizein = [1]*len(vsdin)
for ivsdin,evsdin in enumerate(vsdin):
idim = len(vsdin[ivsdin]['dims'])-1
while ((idim in arefdnoiterin[ivsdin]) & (idim >= 0) & (vsdin[ivsdin]['dsel'][idim] == range(vsdin[ivsdin]['dims'][idim])) & ((type(datin[ivsdin]['file']).__name__ != 'list') | (idim != 0))):
# The inner dimensions just have to be referenced so not in correct order. We know that they will be read in the correct order in the end
rwchunksizein[ivsdin] = rwchunksizein[ivsdin]*vsdin[ivsdin]['dims'][idim]
idim = idim - 1
#print ("rwchunksizein", rwchunksizein)
rwchunksizeout = [1]*len(vsdout)
for ivsdout,evsdout in enumerate(vsdout):
idim = len(vsdout[ivsdout]['dims']) -1
while ((idim in arefdnoiterout[ivsdout]) & (idim >= 0)):
# The inner dimensions just have to be referenced so not in correct order. We know that they will be read in the correct order in the end
rwchunksizeout[ivsdout] = rwchunksizeout[ivsdout]*vsdout[ivsdout]['dims'][idim]
idim = idim - 1
#print ("rwchunksizein",rwchunksizeout)
adimnoapplyout = []
for ivsdout,evsdout in enumerate(vsdout):
adimnoapplyout.append([])
for irefdnoiterout in range(len(arefdnoiterout[ivsdout])-len(arefdfuncout[ivsdout])):
adimnoapplyout[ivsdout].append(adimnoiterout[ivsdout][irefdnoiterout])
if adimnoapplyout[ivsdout] == []:
adimnoapplyout[ivsdout] = [range(1)]
adimnoapplyin = []
for ivsdin,evsdin in enumerate(vsdin):
adimnoapplyin.append([])
for irefdnoiterin in range(len(arefdnoiterin[ivsdin])-len(arefdfuncin[ivsdin])):
adimnoapplyin[ivsdin].append(adimnoiterin[ivsdin][irefdnoiterin])
if adimnoapplyin[ivsdin] == []:
adimnoapplyin[ivsdin] = [range(1)]
dimnoapplymax = []
for iref in range(len(arefdnoiterout[ivsdout])-len(arefdfuncout[ivsdout])):
dimnoapplymax.append(1)
for ivsdin,evsdin in enumerate(vsdin):
dimnoapplymax[iref] = max(dimnoapplymax[iref],len(adimnoapplyin[ivsdin][iref]))
for ivsdout,evsdout in enumerate(vsdout):
dimnoapplymax[iref] = max(dimnoapplymax[iref],len(adimnoapplyout[ivsdout][iref]))
if dimnoapplymax == []:
dimnoapplymax = [1]
lennoapplymax = reduce(mul,dimnoapplymax)
lenitermax = reduce(mul,dimitermax)
dimiterpos = [0]*len(dimitermax)
sys.stdout.write(str(0)+'/'+str(lenitermax))
for j in range(lenitermax):
# reading icecube, rearranged in the order of dimensions specified by arefnoiterin
dataicecubein = []
for ivsdin,evsdin in enumerate(vsdin):
# dataicecubein.append(np.zeros((elendnoiterin,),dtype=vsdin[ilendnoiterin]['dtype']))
# if ((refiter == 0) & (type(vsdin[ivsdin]['fp']) == 'list'):
# dataicecubein.append(np.zeros(
dataicecubein.append(np.array(readicecubeps(\
vsdin[ivsdin]['fp'],\
vsdin[ivsdin]['dims'],\
arefditerin[ivsdin],\
adimiterin[ivsdin],\
dimiterpos,\
arefdnoiterin[ivsdin],\
adimnoiterin[ivsdin],\
vsdin[ivsdin]['dtype'],\
vsdin[ivsdin]['itemsize'],\
vsdin[ivsdin]['voffset'],\
rwchunksizein[ivsdin],\
), dtype=vsdin[ivsdin]['dtype']).ravel())
dataicecubeout = []
for ilendnoiterout,elendnoiterout in enumerate(alendnoiterout):
dataicecubeout.append(np.zeros((elendnoiterout,),dtype=vsdout[ilendnoiterout]['dtype'][1]))
dimnoapplypos = [0]*len(dimnoapplymax)
k = 0
sys.stdout.write(' '+'('+str(0)+'/'+str(lennoapplymax)+')')
for k in range(lennoapplymax):
# actually, this is just the end of the file output already written
ahunkin = []
shapein = []
for ivsdin, evsdin in enumerate(vsdin):
pos = 0
# e.g. pos = (9)+ 20*(10) + 50*50*20*(5)
for idimpos,edimpos in enumerate(dimnoapplypos):
curadd = np.mod(edimpos,len(adimnoapplyin[ivsdin][idimpos]))
#e.g. if edimpos == (5): curadd = 50*50*20*(5)
if ((idimpos + 1) < len(arefdnoiterin[ivsdin])):
for i in range(idimpos + 1,len(arefdnoiterin[ivsdin])) :
# here, we assume that the dimensions of the chunk are already in the order considered by adimsnoiter(out) etc. (cfr. preceeded transposition in readicecubeps)
curadd = curadd * len(adimnoiterin[ivsdin][i])
# curaddout = curaddout * dimnoiteroutref[i]
pos = pos + curadd
ahunkin.append(dataicecubein[ivsdin][pos:(pos+alendfuncin[ivsdin])])
shapein.append([len(e) for e in adimfuncin[ivsdin]])
#ahunkin[ivsdin].shape = [len(e) for e in adimfuncin[ivsdin]]
# we are in the special case of an extra dimension to be prepended to the input
if (list() in shapein) and (forcearray):
for ivsdin,evsdin in enumerate(vsdin):
ahunkin[ivsdin].shape = [1]+shapein[ivsdin]
else:
for ivsdin,evsdin in enumerate(vsdin):
ahunkin[ivsdin].shape = shapein[ivsdin]
# apply the function
ahunkout = func(*ahunkin)
if (type(ahunkout).__name__ == 'tuple'):
ahunkout = list(ahunkout)
if (type(ahunkout).__name__ != 'list'):
ahunkout = list([ahunkout])
for ihunkout in range(len(ahunkout)):
ahunkout[ihunkout] = np.array(ahunkout[ihunkout])
# e.g. posout = (9)+ 20*(10) + 50*50*20*(5)
# we are in the special case of an extra dimension to be prepended to the input -> so we remove this dimension again
if (list() in shapein) and (forcearray):
if len(ahunkout[iddout].shape) > 1:
ahunkout[iddout].shape = ahunkout[iddout].shape[1:]
else:
ahunkout[iddout].shape = []
posout = 0
for idimpos,edimpos in enumerate(dimnoapplypos):
curadd = np.mod(edimpos,len(adimnoapplyout[ihunkout][idimpos]))
#e.g. if edimpos == (5): curadd = 50*50*20*(5)
if ((idimpos + 1) < len(arefdnoiterout[ihunkout])):
for i in range(idimpos + 1,len(arefdnoiterout[ihunkout])) :
# here, we assume that the idims are in the intended order (cfr. subsequent transposition in writeicecubeps)
curadd = curadd * len(adimnoiterout[ihunkout][i])
# curaddout = curaddout * dimnoiteroutref[i]
posout = posout + curadd
dataicecubeout[ihunkout][posout:(posout+alendfuncout[ihunkout])] = np.array(ahunkout[ihunkout].ravel(),dtype=vsdout[ihunkout]['dtype'][1])
# go to next data slice
dimnoapplypos[-1] = dimnoapplypos[-1] + 1
for idimidx,edimidx in reversed(list(enumerate(dimnoapplypos))):
# # alternative (makes 'dimiter' redundant)
# if dimiterpos[idimidx] == shp[refiter[idimidx]]:
if idimidx > 0:
if dimnoapplypos[idimidx] == dimnoapplymax[idimidx]:
dimnoapplypos[idimidx-1] = dimnoapplypos[idimidx-1] + 1
dimnoapplypos[idimidx] = 0
sys.stdout.write ('\b'*(len('('+str(k)+'/'+str(lennoapplymax)+')')))
sys.stdout.write ('('+str(k+1)+'/'+str(lennoapplymax)+')')
# if lennoapplymax == 1:
# sys.stdout.write ('\b'*(len('('+str(k)+'/'+str(lennoapplymax)+')')))
# sys.stdout.write ('('+str(k+1)+'/'+str(lennoapplymax)+')')
for idimsout in range(len(dataicecubeout)):
dataicecubeout[idimsout].shape = [len(e) for e in adimnoiterout[idimsout]]
for ivsdout in range(len(vsdout)):
writeicecubeps(\
vsdout[ivsdout]['fp'],
vsdout[ivsdout]['dims'],\
arefditerout[ivsdout],\
adimiterout[ivsdout],\
dimiterpos,\
arefdnoiterout[ivsdout],\
adimnoiterout[ivsdout],\
dataicecubeout[ivsdout],\
vsdout[ivsdout]['dtype'],\
vsdout[ivsdout]['itemsize'],\
vsdout[ivsdout]['voffset'],\
rwchunksizeout[ivsdout])
# go to next data slice
dimiterpos[-1] = dimiterpos[-1] + 1
for idimidx,edimidx in reversed(list(enumerate(dimiterpos))):
# # alternative (makes 'dimiter' redundant)
# if dimiterpos[idimidx] == shp[refiter[idimidx]]:
if dimiterpos[idimidx] == dimitermax[idimidx]:
if idimidx > 0:
dimiterpos[idimidx-1] = dimiterpos[idimidx-1] + 1
dimiterpos[idimidx] = 0
sys.stdout.write ('\b \b'*(len('('+str(k+1)+'/'+str(lennoapplymax)+')')))
sys.stdout.write ('\b \b'*4)
sys.stdout.write ('\b \b'*len(str(j)+'/'+str(lenitermax)))
sys.stdout.write (str(j+1)+'/'+str(lenitermax))
for ivsdin,evsdin in enumerate(vsdin):
if type(vsdin[ivsdin]['fp']).__name__ == 'file':
vsdin[ivsdin]['fp'].close()
for ivsdout,evsdout in enumerate(vsdout):
vsdout[ivsdout]['fp'].close()
print(' ')
| gpl-3.0 | -6,246,336,281,871,507,000 | 51.979391 | 332 | 0.582512 | false |
gaufung/LMDI | Figures/app.py | 1 | 5703 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties, findfont
from matplotlib.legend_handler import HandlerPatch
import matplotlib.patches as mpatches
import matplotlib as mpl
from matplotlib import cm
import xlrd
from config import SHEET_INDEX, ROW_START, ROW_END, COLUMN_START, COLUMN_END,FILE_NAME
from model import Unit
plt.rcParams["font.family"] = "Calibri"
fp = FontProperties(family='Calibri')
font = findfont(fp)
#color handlermap
#cmap = plt.cm.jet
# extract all colors from the .jet map
# cmaplist = [cmap(i) for i in range(cmap.N)]
# # force the first color entry to be grey
# cmaplist[0] = (.5,.5,.5,1.0)
# # create the new map
# cmap = cmap.from_list('Custom cmap', cmaplist, cmap.N)
# bounds = np.linspace(0,20,21)
# norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
class MplColorHelper:
def __init__(self, cmap_name, start_val, stop_val):
self.cmap_name = cmap_name
self.cmap = plt.get_cmap(cmap_name)
self.norm = mpl.colors.Normalize(vmin=start_val, vmax=stop_val)
self.scalarMap = cm.ScalarMappable(norm=self.norm, cmap=self.cmap)
def get_rgb(self, val):
return self.scalarMap.to_rgba(val)
col = MplColorHelper('rainbow',0,10)
class HandlerSquare(HandlerPatch):
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize, trans):
center = xdescent + 0.5 * (width - height), ydescent
p = mpatches.Rectangle(xy=center, width=height,
height=height, angle=0.0)
self.update_prop(p, orig_handle, legend)
p.set_transform(trans)
return [p]
def read_data():
workbook = xlrd.open_workbook(FILE_NAME)
sheet = workbook.sheets()[0]
result = []
yct = []
for row_idx in range(ROW_START,ROW_END+1):
row = sheet.row_values(row_idx)[COLUMN_START:COLUMN_END+1]
result.append(Unit(row[0], float(row[1]), float(row[2]), col.get_rgb(float(row[1]))))
yct.append(float(row[1]))
for item in result:
if '%.2f' % item.yct == '7.29':
item._color=col.get_rgb(8.0)
if '%.2f' % item.yct == '6.02':
item._color=col.get_rgb(7.5)
if '%.2f' % item.yct == '5.55':
item._color=col.get_rgb(7)
if '%.2f' % item.yct == '5.17':
item._color=col.get_rgb(6.5)
if '%.2f' % item.yct == '4.59':
item._color=col.get_rgb(6)
if '%.2f' % item.yct == '4.47':
item._color=col.get_rgb(5.5)
if '%.2f' % item.yct == '4.36':
item._color=col.get_rgb(5)
if '%.2f' % item.yct == '4.42':
item._color=col.get_rgb(4.5)
# yct = sorted(yct)
# dic={}
# for idx,value in enumerate(yct):
# dic[value]=idx
# for item in result:
# item._color=col.get_rgb(dic[item.yct])
return result
def _print(data):
for item in data:
print(item.name,item.yct,item.pei, item.color)
def _bottom(data, current_idx, is_yct=True):
pass
def stack_values(data1, data2):
result1 = [0.0]
result2 = [0.0]
for item1,item2 in zip(data1, data2):
result1.append(result1[-1]+item1.yct)
result2.append(result2[-1]+item2.pei)
return result1, result2
def draw():
row_data = read_data()
data_by_yct = sorted(row_data, key=lambda unit: unit.yct, reverse=True)
data_by_pei = sorted(row_data, key=lambda unit: unit.pei, reverse=True)
stack1, stack2 = stack_values(data_by_yct, data_by_pei)
index = 0.5
bw = 0.5
plt.figure(figsize=(7,9))
plt.axis([0,3,-0.1,100])
baritems= []
labelitems= []
for idx,item in enumerate(data_by_yct):
baritems.append(plt.bar(index, np.array([item.yct]), bw, color=item.color, edgecolor='None',
label=item.name, bottom=stack1[idx]))
labelitems.append(item.name)
for idx,item in enumerate(data_by_pei):
plt.bar(index + 1, np.array([item.pei]), bw, color=item.color, edgecolor='None',
bottom=stack2[idx])
handlermap = {item[0]:HandlerSquare() for item in baritems}
plt.legend([item[0] for item in baritems],labelitems,handler_map=handlermap,loc='right', ncol=1,fontsize=12, frameon=False)
#add text
for idx in range(0,30):
if data_by_yct[idx].yct >= 3.33:
plt.text(0.5, stack1[idx]+0.5*data_by_yct[idx].yct, '%.2f' % data_by_yct[idx].yct,ha='center',va='center')
for idx in range(0,30):
if data_by_pei[idx].pei >= 3.33:
plt.text(1.5, stack2[idx]+0.5*data_by_pei[idx].pei, '%.2f' % data_by_pei[idx].pei,ha='center',va='center')
#decorate
x_ticks = [0.5, 1.5]
x_label = [r"$D_{YCT}$", r"$D_{PEI}$"]
y_ticks = np.arange(0,101,10)
y_labels = np.array([str(item) for item in y_ticks])
for y_value in y_ticks[1:]:
plt.plot([0,0.25],[y_value,y_value],'k-',linewidth=0.5)
plt.plot([0.75,1.25],[y_value,y_value],'k-',linewidth=0.5)
plt.plot([1.75,2],[y_value,y_value],'k-',linewidth=0.5)
plt.plot([0,2],[0,0],'k-',linewidth=1.0)
plt.plot([2,2],[0,100],'k-',linewidth=0.5)
plt.ylabel('Percentage (%)',fontproperties=fp, fontsize=15)
plt.xticks(x_ticks, x_label, fontsize=12,fontproperties=fp)
plt.yticks(y_ticks, y_labels,fontproperties=fp)
gca = plt.gca()
gca.xaxis.set_ticks_position('bottom')
gca.yaxis.set_ticks_position('left')
gca.yaxis.set_ticks_position('left')
gca.spines['right'].set_color('none')
gca.spines['top'].set_color('none')
gca.spines['bottom'].set_color('none')
#plt.show()
plt.savefig('yct_and_pei_dpi_800.jpg',dpi=800)
if __name__ == '__main__':
draw() | mit | -4,575,353,626,627,105,300 | 36.774834 | 127 | 0.607049 | false |
xs2maverick/adhocracy3.mercator | src/adhocracy_frontend/adhocracy_frontend/tests/acceptance/shared.py | 1 | 5414 | """Shared acceptance test functions."""
from random import choice
from time import sleep
import requests
import json
from splinter.driver.webdriver import WebDriverElement
from adhocracy_core.testing import god_login
from adhocracy_core.testing import god_password
from adhocracy_core.testing import participant_password
from adhocracy_core.testing import participant_login
from selenium.common.exceptions import NoSuchElementException
root_uri = 'http://localhost:9080'
verbose = False
ALPHABET = 'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz0123456789'
WHITESPACE = ' '
def get_random_string(n=10, whitespace=False) -> str:
"""Return autogenerated string."""
alphabet = ALPHABET + WHITESPACE if whitespace else ALPHABET
return ''.join(choice(alphabet) for i in range(n))
def wait(condition, step=0.1, max_steps=10) -> bool:
"""Wait for a condition to become true."""
for i in range(max_steps - 1):
try:
result = condition()
if hasattr(result, 'visible'):
if result.visible:
return True
else:
sleep(step)
else:
if result:
return True
else:
sleep(step)
except ValueError:
pass
except NoSuchElementException:
pass
return condition()
def login(browser, name_or_email, password,
expect_success=True,
visit_root=True):
"""Login user with name and password."""
if is_logged_in(browser):
return
login_url = browser.app_url + 'login'
browser.visit(login_url)
fill_input(browser, '.login [name="nameOrEmail"]', name_or_email)
fill_input(browser, '.login [name="password"]', password)
click_button(browser, '.login [type="submit"]')
if expect_success and not browser.wait_for_condition(is_logged_in, 20):
raise Exception('login failed.')
if visit_root:
browser.visit(browser.root_url)
def login_god(browser, **kwargs):
"""Login god user."""
login(browser, god_login, god_password, **kwargs)
def login_participant(browser, **kwargs):
"""Login participant user."""
login(browser, participant_login, participant_password, **kwargs)
def logout(browser):
"""Logout user."""
if is_logged_in(browser):
click_button(browser, '.user-indicator-logout')
browser.wait_for_condition(is_logged_out, 30)
def is_logged_in(browser):
"""Check if user is logged in."""
return browser.is_element_present_by_css('.user-indicator-logout')
def is_logged_out(browser):
"""Check if user is logged out."""
return browser.is_element_not_present_by_css(
'.user-indicator-logout')
def fill_input(browser, css_selector, value):
"""Find `css_selector` and fill value."""
element = browser.find_by_css(css_selector).first
element.fill(value)
def click_button(browser, css_selector):
"""Find `css_selector` and click."""
element = browser.find_by_css(css_selector).first
element.click()
def title_is_in_listing(listing, title: str) -> bool:
"""Check that a listing element with text == `title` exists."""
for element in listing.find_by_css('.listing-element'):
wait(lambda: element.text, max_steps=5)
if element.text == title:
return True
def get_listing_create_form(listing) -> WebDriverElement:
"""Open and return the create form of a listing."""
return listing.find_by_css('.listing-create-form').first
def get_column_listing(browser, column_name: str) -> WebDriverElement:
"""Return the listing in the content column ."""
column = browser.find_by_css('.moving-column-' + column_name)
listing = column.first.find_by_css('.listing')
return listing
def get_list_element(listing, text, descendant=None, max_steps=20):
"""Return list element with text == `text`."""
for element in listing.find_by_css('.listing-element'):
wait(lambda: element.text, max_steps=max_steps)
if descendant is None:
element_text = element.text
else:
element_text = element.find_by_css(descendant).first.text
if element_text == text:
return element
def api_login(name_or_email: str, password: str) -> dict:
"""Login user and return user token and path."""
uri = root_uri + '/login_username'
headers = {
'Content-Type': 'application/json;charset=UTF-8',
'Accept': 'application/json, text/plain, */*',
'Accept-Encoding': 'gzip,deflate',
'Connection': 'keep-alive',
'Accept-Language': 'en-US,en;q=0.8',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64)',
'Content-Length': '36'
}
body = json.dumps({
'name': name_or_email,
'password': password
})
response = requests.post(uri, headers=headers, data=body)
if verbose:
print('\n')
print(uri)
print(headers)
print(body)
print(response)
print(response.text)
assert response.status_code == 200
data = response.json()
assert data['status'] == 'success'
return {'user_token': data['user_token'],
'user_path': data['user_path']}
def api_login_god() -> dict:
"""Login in as god and return user token and path."""
return api_login(god_login, god_password)
| agpl-3.0 | -2,467,685,987,288,279,600 | 29.937143 | 75 | 0.634281 | false |
matrix-org/synapse | tests/storage/test_state.py | 1 | 17131 | # Copyright 2018-2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from synapse.api.constants import EventTypes, Membership
from synapse.api.room_versions import RoomVersions
from synapse.storage.state import StateFilter
from synapse.types import RoomID, UserID
from tests.unittest import HomeserverTestCase
logger = logging.getLogger(__name__)
class StateStoreTestCase(HomeserverTestCase):
def prepare(self, reactor, clock, hs):
self.store = hs.get_datastore()
self.storage = hs.get_storage()
self.state_datastore = self.storage.state.stores.state
self.event_builder_factory = hs.get_event_builder_factory()
self.event_creation_handler = hs.get_event_creation_handler()
self.u_alice = UserID.from_string("@alice:test")
self.u_bob = UserID.from_string("@bob:test")
self.room = RoomID.from_string("!abc123:test")
self.get_success(
self.store.store_room(
self.room.to_string(),
room_creator_user_id="@creator:text",
is_public=True,
room_version=RoomVersions.V1,
)
)
def inject_state_event(self, room, sender, typ, state_key, content):
builder = self.event_builder_factory.for_room_version(
RoomVersions.V1,
{
"type": typ,
"sender": sender.to_string(),
"state_key": state_key,
"room_id": room.to_string(),
"content": content,
},
)
event, context = self.get_success(
self.event_creation_handler.create_new_client_event(builder)
)
self.get_success(self.storage.persistence.persist_event(event, context))
return event
def assertStateMapEqual(self, s1, s2):
for t in s1:
# just compare event IDs for simplicity
self.assertEqual(s1[t].event_id, s2[t].event_id)
self.assertEqual(len(s1), len(s2))
def test_get_state_groups_ids(self):
e1 = self.inject_state_event(self.room, self.u_alice, EventTypes.Create, "", {})
e2 = self.inject_state_event(
self.room, self.u_alice, EventTypes.Name, "", {"name": "test room"}
)
state_group_map = self.get_success(
self.storage.state.get_state_groups_ids(self.room, [e2.event_id])
)
self.assertEqual(len(state_group_map), 1)
state_map = list(state_group_map.values())[0]
self.assertDictEqual(
state_map,
{(EventTypes.Create, ""): e1.event_id, (EventTypes.Name, ""): e2.event_id},
)
def test_get_state_groups(self):
e1 = self.inject_state_event(self.room, self.u_alice, EventTypes.Create, "", {})
e2 = self.inject_state_event(
self.room, self.u_alice, EventTypes.Name, "", {"name": "test room"}
)
state_group_map = self.get_success(
self.storage.state.get_state_groups(self.room, [e2.event_id])
)
self.assertEqual(len(state_group_map), 1)
state_list = list(state_group_map.values())[0]
self.assertEqual({ev.event_id for ev in state_list}, {e1.event_id, e2.event_id})
def test_get_state_for_event(self):
# this defaults to a linear DAG as each new injection defaults to whatever
# forward extremities are currently in the DB for this room.
e1 = self.inject_state_event(self.room, self.u_alice, EventTypes.Create, "", {})
e2 = self.inject_state_event(
self.room, self.u_alice, EventTypes.Name, "", {"name": "test room"}
)
e3 = self.inject_state_event(
self.room,
self.u_alice,
EventTypes.Member,
self.u_alice.to_string(),
{"membership": Membership.JOIN},
)
e4 = self.inject_state_event(
self.room,
self.u_bob,
EventTypes.Member,
self.u_bob.to_string(),
{"membership": Membership.JOIN},
)
e5 = self.inject_state_event(
self.room,
self.u_bob,
EventTypes.Member,
self.u_bob.to_string(),
{"membership": Membership.LEAVE},
)
# check we get the full state as of the final event
state = self.get_success(self.storage.state.get_state_for_event(e5.event_id))
self.assertIsNotNone(e4)
self.assertStateMapEqual(
{
(e1.type, e1.state_key): e1,
(e2.type, e2.state_key): e2,
(e3.type, e3.state_key): e3,
# e4 is overwritten by e5
(e5.type, e5.state_key): e5,
},
state,
)
# check we can filter to the m.room.name event (with a '' state key)
state = self.get_success(
self.storage.state.get_state_for_event(
e5.event_id, StateFilter.from_types([(EventTypes.Name, "")])
)
)
self.assertStateMapEqual({(e2.type, e2.state_key): e2}, state)
# check we can filter to the m.room.name event (with a wildcard None state key)
state = self.get_success(
self.storage.state.get_state_for_event(
e5.event_id, StateFilter.from_types([(EventTypes.Name, None)])
)
)
self.assertStateMapEqual({(e2.type, e2.state_key): e2}, state)
# check we can grab the m.room.member events (with a wildcard None state key)
state = self.get_success(
self.storage.state.get_state_for_event(
e5.event_id, StateFilter.from_types([(EventTypes.Member, None)])
)
)
self.assertStateMapEqual(
{(e3.type, e3.state_key): e3, (e5.type, e5.state_key): e5}, state
)
# check we can grab a specific room member without filtering out the
# other event types
state = self.get_success(
self.storage.state.get_state_for_event(
e5.event_id,
state_filter=StateFilter(
types={EventTypes.Member: {self.u_alice.to_string()}},
include_others=True,
),
)
)
self.assertStateMapEqual(
{
(e1.type, e1.state_key): e1,
(e2.type, e2.state_key): e2,
(e3.type, e3.state_key): e3,
},
state,
)
# check that we can grab everything except members
state = self.get_success(
self.storage.state.get_state_for_event(
e5.event_id,
state_filter=StateFilter(
types={EventTypes.Member: set()}, include_others=True
),
)
)
self.assertStateMapEqual(
{(e1.type, e1.state_key): e1, (e2.type, e2.state_key): e2}, state
)
#######################################################
# _get_state_for_group_using_cache tests against a full cache
#######################################################
room_id = self.room.to_string()
group_ids = self.get_success(
self.storage.state.get_state_groups_ids(room_id, [e5.event_id])
)
group = list(group_ids.keys())[0]
# test _get_state_for_group_using_cache correctly filters out members
# with types=[]
(state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache(
self.state_datastore._state_group_cache,
group,
state_filter=StateFilter(
types={EventTypes.Member: set()}, include_others=True
),
)
self.assertEqual(is_all, True)
self.assertDictEqual(
{
(e1.type, e1.state_key): e1.event_id,
(e2.type, e2.state_key): e2.event_id,
},
state_dict,
)
(state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache(
self.state_datastore._state_group_members_cache,
group,
state_filter=StateFilter(
types={EventTypes.Member: set()}, include_others=True
),
)
self.assertEqual(is_all, True)
self.assertDictEqual({}, state_dict)
# test _get_state_for_group_using_cache correctly filters in members
# with wildcard types
(state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache(
self.state_datastore._state_group_cache,
group,
state_filter=StateFilter(
types={EventTypes.Member: None}, include_others=True
),
)
self.assertEqual(is_all, True)
self.assertDictEqual(
{
(e1.type, e1.state_key): e1.event_id,
(e2.type, e2.state_key): e2.event_id,
},
state_dict,
)
(state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache(
self.state_datastore._state_group_members_cache,
group,
state_filter=StateFilter(
types={EventTypes.Member: None}, include_others=True
),
)
self.assertEqual(is_all, True)
self.assertDictEqual(
{
(e3.type, e3.state_key): e3.event_id,
# e4 is overwritten by e5
(e5.type, e5.state_key): e5.event_id,
},
state_dict,
)
# test _get_state_for_group_using_cache correctly filters in members
# with specific types
(state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache(
self.state_datastore._state_group_cache,
group,
state_filter=StateFilter(
types={EventTypes.Member: {e5.state_key}}, include_others=True
),
)
self.assertEqual(is_all, True)
self.assertDictEqual(
{
(e1.type, e1.state_key): e1.event_id,
(e2.type, e2.state_key): e2.event_id,
},
state_dict,
)
(state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache(
self.state_datastore._state_group_members_cache,
group,
state_filter=StateFilter(
types={EventTypes.Member: {e5.state_key}}, include_others=True
),
)
self.assertEqual(is_all, True)
self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict)
# test _get_state_for_group_using_cache correctly filters in members
# with specific types
(state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache(
self.state_datastore._state_group_members_cache,
group,
state_filter=StateFilter(
types={EventTypes.Member: {e5.state_key}}, include_others=False
),
)
self.assertEqual(is_all, True)
self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict)
#######################################################
# deliberately remove e2 (room name) from the _state_group_cache
cache_entry = self.state_datastore._state_group_cache.get(group)
state_dict_ids = cache_entry.value
self.assertEqual(cache_entry.full, True)
self.assertEqual(cache_entry.known_absent, set())
self.assertDictEqual(
state_dict_ids,
{
(e1.type, e1.state_key): e1.event_id,
(e2.type, e2.state_key): e2.event_id,
},
)
state_dict_ids.pop((e2.type, e2.state_key))
self.state_datastore._state_group_cache.invalidate(group)
self.state_datastore._state_group_cache.update(
sequence=self.state_datastore._state_group_cache.sequence,
key=group,
value=state_dict_ids,
# list fetched keys so it knows it's partial
fetched_keys=((e1.type, e1.state_key),),
)
cache_entry = self.state_datastore._state_group_cache.get(group)
state_dict_ids = cache_entry.value
self.assertEqual(cache_entry.full, False)
self.assertEqual(cache_entry.known_absent, {(e1.type, e1.state_key)})
self.assertDictEqual(state_dict_ids, {(e1.type, e1.state_key): e1.event_id})
############################################
# test that things work with a partial cache
# test _get_state_for_group_using_cache correctly filters out members
# with types=[]
room_id = self.room.to_string()
(state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache(
self.state_datastore._state_group_cache,
group,
state_filter=StateFilter(
types={EventTypes.Member: set()}, include_others=True
),
)
self.assertEqual(is_all, False)
self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict)
room_id = self.room.to_string()
(state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache(
self.state_datastore._state_group_members_cache,
group,
state_filter=StateFilter(
types={EventTypes.Member: set()}, include_others=True
),
)
self.assertEqual(is_all, True)
self.assertDictEqual({}, state_dict)
# test _get_state_for_group_using_cache correctly filters in members
# wildcard types
(state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache(
self.state_datastore._state_group_cache,
group,
state_filter=StateFilter(
types={EventTypes.Member: None}, include_others=True
),
)
self.assertEqual(is_all, False)
self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict)
(state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache(
self.state_datastore._state_group_members_cache,
group,
state_filter=StateFilter(
types={EventTypes.Member: None}, include_others=True
),
)
self.assertEqual(is_all, True)
self.assertDictEqual(
{
(e3.type, e3.state_key): e3.event_id,
(e5.type, e5.state_key): e5.event_id,
},
state_dict,
)
# test _get_state_for_group_using_cache correctly filters in members
# with specific types
(state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache(
self.state_datastore._state_group_cache,
group,
state_filter=StateFilter(
types={EventTypes.Member: {e5.state_key}}, include_others=True
),
)
self.assertEqual(is_all, False)
self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict)
(state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache(
self.state_datastore._state_group_members_cache,
group,
state_filter=StateFilter(
types={EventTypes.Member: {e5.state_key}}, include_others=True
),
)
self.assertEqual(is_all, True)
self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict)
# test _get_state_for_group_using_cache correctly filters in members
# with specific types
(state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache(
self.state_datastore._state_group_cache,
group,
state_filter=StateFilter(
types={EventTypes.Member: {e5.state_key}}, include_others=False
),
)
self.assertEqual(is_all, False)
self.assertDictEqual({}, state_dict)
(state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache(
self.state_datastore._state_group_members_cache,
group,
state_filter=StateFilter(
types={EventTypes.Member: {e5.state_key}}, include_others=False
),
)
self.assertEqual(is_all, True)
self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict)
| apache-2.0 | -4,163,653,240,456,441,000 | 35.217759 | 88 | 0.556885 | false |
kenjyoung/MinAtar | examples/human_play.py | 1 | 3764 | ################################################################################################################
# Authors: #
# Kenny Young ([email protected]) #
# Tian Tian ([email protected]) #
# #
# python3 human_test.py -g <game> #
################################################################################################################
import argparse
import tkinter as Tk
from minatar import Environment, GUI
################################################################################################################
# Script that allows a human to play any of the MinAtar games. Use arrow keys to move and space to fire.
# Pressing q will exit the game, r will restart.
#
################################################################################################################
parser = argparse.ArgumentParser()
parser.add_argument("--game", "-g", type=str)
args = parser.parse_args()
# Setup game environment and GUI
env = Environment(args.game)
gui = GUI(env.game_name(), env.n_channels)
# Thread safe variables for use with GUI
action = Tk.IntVar()
action.set(0)
action_taken = Tk.BooleanVar()
action_taken.set(False)
action_released = Tk.BooleanVar()
action_released.set(False)
G = Tk.DoubleVar()
G.set(0.0)
is_terminate = Tk.BooleanVar()
is_terminate.set(False)
# Map input keys to agent actions
key_action_map = {' ':5, 'left':1, 'up':2, 'right':3, 'down':4}
# Key press handler for human player
def on_key_event(event):
if event.key == 'q': # quit the game
gui.quit()
elif (event.key == 'r'): # reset the game environment
env.reset()
elif(event.key in key_action_map):
key_action = key_action_map[event.key]
action.set(key_action)
# When new action is selected it has not yet been taken or released
action_released.set(False)
action_taken.set(False)
# Key release handlr for human player
def on_release_event(event):
if(event.key in key_action_map):
key_action = key_action_map[event.key]
a = action.get()
if(a==key_action):
# If released action has already been taken set action to no-op immediately
if(action_taken.get()):
action.set(0)
# Otherwise note that it has been released so that we can set back to no-op when it is taken
else:
action_released.set(True)
################################################################################################################
# play
#
# Allows user to the play the game and displays state and score via gui.
#
################################################################################################################
def play():
if is_terminate.get() == True:
gui.quit()
# Get players actions selection, if that action has been released, set action back to no-op for next frame
a = action.get()
action_taken.set(True)
if(action_released.get()):
action.set(0)
r, t = env.act(a)
is_terminate.set(t)
G.set(G.get()+r)
gui.display_state(env.state())
gui.set_message("Score: " + str(G.get()))
gui.update(50, play)
# Hook up the key handler and initiate game play
gui.overwrite_key_handle(on_key_event, on_release_event)
gui.update(0, play)
gui.run()
print("Final Score: "+str(G.get()))
| gpl-3.0 | -8,559,620,511,119,420,000 | 39.042553 | 112 | 0.459086 | false |
thortex/rpi3-webiopi | webiopi_0.7.1/python/webiopi/devices/memory/__init__.py | 1 | 12397 | # Copyright 2014 Andreas Riegg - t-h-i-n-x.net
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Changelog
#
# 1.0 2014-06-26 Initial release.
# 1.1 2014-08-28 Added bit access.
# 1.2 2014-08-31 Added NON-REST multiple read/write byte methods
# to speed up direct Python access.
# 1.3 2014-11-13 Changed parameter order for writeMemoryBytes and
# optimized it.
# 1.4 2014-12-08 Simplified multiple read/write byte methods and
# made start/stop bounds checking more strict.
# Added REST mapping for multiple bytes reading.
# Made addressing scheme uniform for all slot types.
#
# Usage remarks
#
# - The smallest possible memory unit is 1 byte (8 bits)
# - Addressed slots can be
# - bits ( 1 bit)
# - bytes ( 8 bits)
# - words (16 bits)
# - longs (32 bits)
# - All memory address slots are mapped strictly sequential in ascending
# order like channel numbers starting at 0 with MSB first for non
# single-bit slots. This results in the following address slot mapping:
# |<- bit 0 bit 31 ->|
# 01010101010101010101010101010101
# |byte 0| byte 1| byte 2| byte 3|
# | -- word 0 -- | -- word 1 -- |
# | ---------- long 0 ---------- |
# - Where applicable, start and stop have the same meaning as range and
# list slices in Python. Start is included, stop is excluded.
#
from webiopi.decorators.rest import request, response
from webiopi.utils.types import toint, M_JSON
class Memory():
def __init__(self, byteCount):
self._byteCount = byteCount
#---------- Abstraction framework contracts ----------
def __family__(self):
return "Memory"
#---------- Memory abstraction REST implementation ----------
@request("GET", "memory/bit/*")
@response(contentType=M_JSON)
def memoryBitWildcard(self):
values = {}
for i in range(self.byteCount()):
valbyte = self.readMemoryByte(i)
for j in range(8):
position = 7 - j
values[i*8 + j] = "%d" % ((valbyte & (1 << position)) >> position)
return values
# {
# "0": "0",
# "1": "0",
# "2": "0",
# "3": "1"
# "4": "0"
# "5": "0"
# "6": "1"
# "7": "0"
# ...
# }
@request("GET", "memory/byte/*")
@response(contentType=M_JSON)
def memoryByteWildcard(self):
values = {}
byteValues = self.readMemoryBytes()
for i in range(len(byteValues)):
values[i] = "0x%02X" % byteValues[i]
return values
# {
# "0": "0x12",
# "1": "0x34",
# "2": "0xDE",
# "3": "0xFF"
# }
@request("GET", "memory/bytes/%(bounds)s")
@response(contentType=M_JSON)
def memoryBytes(self, bounds):
(start, stop) = bounds.split(",")
start = toint(start)
stop = toint(stop)
values = {}
byteValues = self.readMemoryBytes(start, stop)
for i in range(start, stop):
values[i] = "0x%02X" % byteValues[i - start]
return values
# {
# "1": "0x34",
# "2": "0xDE",
# }
@request("GET", "memory/word/*")
@response(contentType=M_JSON)
def memoryWordWildcard(self):
values = {}
for i in range(self.wordCount()):
values[i] = "0x%04X" % self.readMemoryWord(i)
return values
# {
# "0": "0x1234",
# "1": "0xDEFF"
# }
@request("GET", "memory/long/*")
@response(contentType=M_JSON)
def memoryLongWildcard(self):
values = {}
for i in range(self.longCount()):
values[i] = "0x%08X" % self.readMemoryLong(i)
return values
# {
# "0": "0x1234DEFF"
# }
@request("GET", "memory/bit/count")
@response("%d")
def bitCount(self):
return self._byteCount * 8
@request("GET", "memory/byte/count")
@response("%d")
def byteCount(self):
return self._byteCount
@request("GET", "memory/word/count")
@response("%d")
def wordCount(self):
return self._byteCount >> 1
@request("GET", "memory/long/count")
@response("%d")
def longCount(self):
return self._byteCount >> 2
@request("GET", "memory/bit/%(address)s")
@response("%d")
def readMemoryBit(self, address):
address = toint(address)
self.checkBitAddress(address)
return self.__readMemoryBit__(address)
@request("POST", "memory/bit/%(address)s/%(value)s")
@response("%d")
def writeMemoryBit(self, address, value):
address = toint(address)
self.checkBitAddress(address)
value = toint(value)
self.checkBitValue(value)
self.__writeMemoryBit__(address, value)
return self.readMemoryBit(address)
@request("GET", "memory/byte/%(address)s")
@response("0x%02X")
def readMemoryByte(self, address):
address = toint(address)
self.checkByteAddress(address)
return self.__readMemoryByte__(address)
@request("POST", "memory/byte/%(address)s/%(value)s")
@response("0x%02X")
def writeMemoryByte(self, address, value):
address = toint(address)
self.checkByteAddress(address)
value = toint(value)
self.checkByteValue(value)
self.__writeMemoryByte__(address, value)
return self.readMemoryByte(address)
@request("GET", "memory/word/%(address)s")
@response("0x%04X")
def readMemoryWord(self, address):
address = toint(address)
self.checkWordAddress(address)
return self.__readMemoryWord__(address)
@request("POST", "memory/word/%(address)s/%(value)s")
@response("0x%04X")
def writeMemoryWord(self, address, value):
address = toint(address)
self.checkWordAddress(address)
value = toint(value)
self.checkWordValue(value)
self.__writeMemoryWord__(address, value)
return self.readMemoryWord(address)
@request("GET", "memory/long/%(address)s")
@response("0x%08X")
def readMemoryLong(self, address):
address = toint(address)
self.checkLongAddress(address)
return self.__readMemoryLong__(address)
@request("POST", "memory/long/%(address)s/%(value)s")
@response("0x%08X")
def writeMemoryLong(self, address, value):
address = toint(address)
self.checkLongAddress(address)
value = toint(value)
self.checkLongValue(value)
self.__writeMemoryLong__(address, value)
return self.readMemoryLong(address)
#---------- Memory abstraction NON-REST implementation ----------
def readMemoryBytes(self, start=0, stop=None):
maxCount = self.byteCount()
if stop is None:
stop = maxCount
self.checkByteAddress(start)
self.checkStopByteAddress(stop)
byteValues = []
if start > stop:
raise ValueError("Stop address must be >= start address")
for i in range(start, stop):
byteValues.append(self.readMemoryByte(i))
return byteValues
def writeMemoryBytes(self, start=0, byteValues=[]):
self.checkByteAddress(start)
stop = start + len(byteValues)
self.checkStopByteAddress(stop)
i = 0
for byte in byteValues: # do nothing if list is empty
position = i + start
self.writeMemoryByte(position, byte)
i += 1
#---------- Memory abstraction contracts ----------
def __readMemoryByte__(self, address):
raise NotImplementedError
def __writeMemoryByte__(self, address, value):
raise NotImplementedError
#---------- Memory abstraction contracts with default implementations ---------
def __readMemoryBit__(self, address):
byteAddress, rawPosition = divmod(address, 8)
bitPosition = 7 - rawPosition
return (self.__readMemoryByte__(byteAddress) & (1 << bitPosition)) >> bitPosition
def __writeMemoryBit__(self, address, value):
byteAddress, rawPosition = divmod(address, 8)
bitPosition = 7 - rawPosition
changeMask = 1 << bitPosition
byteValue = self.__readMemoryByte__(byteAddress)
if value:
byteValue |= changeMask
else:
byteValue &= ~changeMask
self.__writeMemoryByte__(byteAddress, byteValue)
def __readMemoryWord__(self, address):
byte0 = self.__readMemoryByte__(address * 2)
byte1 = self.__readMemoryByte__((address * 2) + 1)
return (byte0 << 8) + byte1
def __writeMemoryWord__(self, address, value):
byte0 = (value >> 8) & 0xFF
byte1 = value & 0xFF
self.__writeMemoryByte__(address * 2, byte0)
self.__writeMemoryByte__((address * 2) + 1, byte1)
def __readMemoryLong__(self, address):
byte0 = self.__readMemoryByte__(address * 4)
byte1 = self.__readMemoryByte__((address * 4) + 1)
byte2 = self.__readMemoryByte__((address * 4) + 2)
byte3 = self.__readMemoryByte__((address * 4) + 3)
return (byte0 << 24) + (byte1 << 16) + (byte2 << 8) + byte3
def __writeMemoryLong__(self, address, value):
byte0 = (value >> 24) & 0xFF
byte1 = (value >> 16) & 0xFF
byte2 = (value >> 8) & 0xFF
byte3 = value & 0xFF
self.__writeMemoryByte__(address * 4, byte0)
self.__writeMemoryByte__((address * 4) + 1, byte1)
self.__writeMemoryByte__((address * 4) + 2, byte2)
self.__writeMemoryByte__((address * 4) + 3, byte3)
#---------- Value checks ----------
def checkBitAddress(self, address):
if not 0 <= address < self.byteCount() * 8:
raise ValueError("Bit address [%d] out of range [%d..%d]" % (address, 0, (self.byteCount() * 8) - 1))
def checkBitValue(self, value):
if not value in range(2):
raise ValueError("Bit value [%d] out of range [0..1]" % value)
def checkByteAddress(self, address):
if not 0 <= address < self.byteCount():
raise ValueError("Byte address [%d] out of range [%d..%d]" % (address, 0, self.byteCount() - 1))
def checkStopByteAddress(self, address):
if not 0 <= address <= self.byteCount():
raise ValueError("Stop byte address [%d] out of range [%d..%d]" % (address, 0, self.byteCount()))
def checkByteValue(self, value):
if not value in range(0x00,0xFF + 1):
raise ValueError("Byte value [0x%02X] out of range [0x%02X..0x%02X]" % (value, 0x00,0xFF))
def checkWordAddress(self, address):
if not 0 <= address < self.wordCount():
raise ValueError("Word address [%d] out of range [%d..%d]" % (address, 0, (self.wordCount() - 1)))
def checkWordValue(self, value):
if not value in range(0x00,0xFFFF + 1):
raise ValueError("Word value [0x%04X] out of range [0x%04X..0x%04X]" % (value, 0x00,0xFFFF))
def checkLongAddress(self, address):
if not 0 <= address < self.longCount():
raise ValueError("Long address [%d] out of range [%d..%d]" % (address, 0, (self.longCount() - 1)))
def checkLongValue(self, value):
if not value in range(0x00,0xFFFFFFFF + 1):
raise ValueError("Long value [0x%08X] out of range [0x%08X..0x%08X]" % (value, 0x00,0xFFFFFFFF))
#---------- Driver lookup ----------
DRIVERS = {}
DRIVERS["filememory"] = ["PICKLEFILE"]
DRIVERS["at24"] = ["EE24BASIC", "EE24X32", "EE24X64", "EE24X128", "EE24X256", "EE24X512", "EE24X1024_2"]
| apache-2.0 | -2,507,982,289,497,271,000 | 33.628492 | 113 | 0.571267 | false |
mattjj/pylds | pylds/distributions.py | 1 | 8777 | import autograd.numpy as np
from autograd import value_and_grad
from autograd.scipy.special import gammaln
from scipy.optimize import minimize
from pybasicbayes.distributions import Regression
from pybasicbayes.util.text import progprint_xrange
class PoissonRegression(Regression):
"""
Poisson regression with Gaussian distributed inputs and exp link:
y ~ Poisson(exp(Ax))
where x ~ N(mu, sigma)
Currently, we only support maximum likelihood estimation of the
parameters A given the distribution over inputs, x, and
the observed outputs, y.
We compute the expected log likelihood in closed form (since
we can do this with the exp link function), and we use Autograd
to compute its gradients.
"""
def __init__(self, D_out, D_in, A=None, verbose=False):
self._D_out, self._D_in = D_out, D_in
self.verbose = verbose
if A is not None:
assert A.shape == (D_out, D_in)
self.A = A.copy()
else:
self.A = 0.01 * np.random.randn(D_out, D_in)
self.sigma = None
@property
def D_in(self):
return self._D_in
@property
def D_out(self):
return self._D_out
def log_likelihood(self,xy):
assert isinstance(xy, tuple)
x, y = xy
loglmbda = x.dot(self.A.T)
lmbda = np.exp(loglmbda)
return -gammaln(y+1) - lmbda + y * loglmbda
def expected_log_likelihood(self, mus, sigmas, y):
"""
Compute the expected log likelihood for a mean and
covariance of x and an observed value of y.
"""
# Flatten the covariance
T = mus.shape[0]
D = self.D_in
sigs_vec = sigmas.reshape((T, D ** 2))
# Compute the log likelihood of each column
ll = np.zeros((T, self.D_out))
for n in range(self.D_out):
an = self.A[n]
E_loglmbda = np.dot(mus, an)
ll[:,n] += y[:,n] * E_loglmbda
# Vectorized log likelihood calculation
aa_vec = np.outer(an, an).reshape((D ** 2,))
ll[:,n] = -np.exp(E_loglmbda + 0.5 * np.dot(sigs_vec, aa_vec))
return ll
def predict(self, x):
return np.exp(x.dot(self.A.T))
def rvs(self,x=None,size=1,return_xy=True):
x = np.random.normal(size=(size, self.D_in)) if x is None else x
y = np.random.poisson(self.predict(x))
return np.hstack((x, y)) if return_xy else y
def max_likelihood(self, data, weights=None,stats=None):
"""
Maximize the likelihood for a given value of x
:param data:
:param weights:
:param stats:
:return:
"""
raise NotImplementedError
def max_expected_likelihood(self, stats, verbose=False):
# These aren't really "sufficient" statistics, since we
# need the mean and covariance for each time bin.
EyxuT = np.sum([s[0] for s in stats], axis=0)
mus = np.vstack([s[1] for s in stats])
sigmas = np.vstack([s[2] for s in stats])
inputs = np.vstack([s[3] for s in stats])
masks = np.vstack(s[4] for s in stats)
T = mus.shape[0]
D_latent = mus.shape[1]
sigmas_vec = sigmas.reshape((T, D_latent**2))
# Optimize each row of A independently
ns = progprint_xrange(self.D_out) if verbose else range(self.D_out)
for n in ns:
# Flatten the covariance to enable vectorized calculations
def ll_vec(an):
ll = 0
ll += np.dot(an, EyxuT[n])
# Vectorized log likelihood calculation
loglmbda = np.dot(mus, an)
aa_vec = np.outer(an[:D_latent], an[:D_latent]).reshape((D_latent ** 2,))
trms = np.exp(loglmbda + 0.5 * np.dot(sigmas_vec, aa_vec))
ll -= np.sum(trms[masks[:, n]])
if not np.isfinite(ll):
return -np.inf
return ll / T
obj = lambda x: -ll_vec(x)
itr = [0]
def cbk(x):
itr[0] += 1
print("M_step iteration ", itr[0])
res = minimize(value_and_grad(obj), self.A[n],
jac=True,
callback=cbk if verbose else None)
assert res.success
self.A[n] = res.x
class BernoulliRegression(Regression):
"""
Bernoulli regression with Gaussian distributed inputs and logistic link:
y ~ Bernoulli(logistic(Ax))
where x ~ N(mu, sigma)
Currently, we only support maximum likelihood estimation of the
parameter A given the distribution over inputs, x, and
the observed outputs, y.
We approximate the expected log likelihood with Monte Carlo.
"""
def __init__(self, D_out, D_in, A=None, verbose=False):
self._D_out, self._D_in = D_out, D_in
self.verbose = verbose
if A is not None:
assert A.shape == (D_out, D_in)
self.A = A.copy()
else:
self.A = 0.01 * np.random.randn(D_out, D_in)
self.sigma = None
@property
def D_in(self):
return self._D_in
@property
def D_out(self):
return self._D_out
def log_likelihood(self,xy):
assert isinstance(xy, tuple)
x, y = xy
psi = x.dot(self.A.T)
# First term is linear
ll = y * psi
# Compute second term with log-sum-exp trick (see above)
logm = np.maximum(0, psi)
ll -= np.sum(logm)
ll -= np.sum(np.log(np.exp(-logm) + np.exp(psi - logm)))
return ll
def predict(self, x):
return 1 / (1 + np.exp(-x.dot(self.A.T)))
def rvs(self, x=None, size=1, return_xy=True):
x = np.random.normal(size=(size, self.D_in)) if x is None else x
y = np.random.rand(x.shape[0], self.D_out) < self.predict(x)
return np.hstack((x, y)) if return_xy else y
def max_likelihood(self, data, weights=None, stats=None):
"""
Maximize the likelihood for given data
:param data:
:param weights:
:param stats:
:return:
"""
if isinstance(data, list):
x = np.vstack([d[0] for d in data])
y = np.vstack([d[1] for d in data])
elif isinstance(data, tuple):
assert len(data) == 2
elif isinstance(data, np.ndarray):
x, y = data[:,:self.D_in], data[:, self.D_in:]
else:
raise Exception("Invalid data type")
from sklearn.linear_model import LogisticRegression
for n in progprint_xrange(self.D_out):
lr = LogisticRegression(fit_intercept=False)
lr.fit(x, y[:,n])
self.A[n] = lr.coef_
def max_expected_likelihood(self, stats, verbose=False, n_smpls=1):
# These aren't really "sufficient" statistics, since we
# need the mean and covariance for each time bin.
EyxuT = np.sum([s[0] for s in stats], axis=0)
mus = np.vstack([s[1] for s in stats])
sigmas = np.vstack([s[2] for s in stats])
inputs = np.vstack([s[3] for s in stats])
T = mus.shape[0]
D_latent = mus.shape[1]
# Draw Monte Carlo samples of x
sigmas_chol = np.linalg.cholesky(sigmas)
x_smpls = mus[:, :, None] + np.matmul(sigmas_chol, np.random.randn(T, D_latent, n_smpls))
# Optimize each row of A independently
ns = progprint_xrange(self.D_out) if verbose else range(self.D_out)
for n in ns:
def ll_vec(an):
ll = 0
# todo include mask
# First term is linear in psi
ll += np.dot(an, EyxuT[n])
# Second term depends only on x and cannot be computed in closed form
# Instead, Monte Carlo sample x
psi_smpls = np.einsum('tdm, d -> tm', x_smpls, an[:D_latent])
psi_smpls = psi_smpls + np.dot(inputs, an[D_latent:])[:, None]
logm = np.maximum(0, psi_smpls)
trm2_smpls = logm + np.log(np.exp(-logm) + np.exp(psi_smpls - logm))
ll -= np.sum(trm2_smpls) / n_smpls
if not np.isfinite(ll):
return -np.inf
return ll / T
obj = lambda x: -ll_vec(x)
itr = [0]
def cbk(x):
itr[0] += 1
print("M_step iteration ", itr[0])
res = minimize(value_and_grad(obj), self.A[n],
jac=True,
# callback=cbk if verbose else None)
callback=None)
assert res.success
self.A[n] = res.x
| mit | 6,226,444,625,505,084,000 | 30.124113 | 97 | 0.53982 | false |
botswana-harvard/edc-rule-groups | edc_rule_groups/rule_group.py | 1 | 4885 | import inspect
import copy
from .exceptions import RuleGroupError
from .rule import Rule
class BaseMeta:
"""Base class for RuleGroup "Meta" class."""
app_label = None
source_model = None
source_fk = None
rules = None
def __init__(self, group_name, **meta_attrs):
for k, v in meta_attrs.items():
setattr(self, k, v)
self.group_name = group_name
def __repr__(self):
return '<Options for {}>'.format(self.group_name)
class RuleGroupMeta(type):
"""Rule group metaclass."""
def __new__(cls, name, bases, attrs):
"""Add the Meta attributes to each rule."""
try:
abstract = attrs.get('Meta', False).abstract
except AttributeError:
abstract = False
parents = [b for b in bases if isinstance(b, RuleGroupMeta)]
if not parents or abstract:
# If this isn't a subclass of BaseRuleGroup, don't do anything special.
return super(RuleGroupMeta, cls).__new__(cls, name, bases, attrs)
for parent in parents:
try:
if parent.Meta.abstract:
for rule in [member for member in inspect.getmembers(parent) if isinstance(member[1], Rule)]:
parent_rule = copy.deepcopy(rule)
attrs.update({parent_rule[0]: parent_rule[1]})
except AttributeError:
pass
# get the meta class delared on the RuleGroup
meta = attrs.pop('Meta', None)
try:
if meta.source_model == meta.source_model.split('.'):
source_model = meta.app_label, meta.source_model
else:
source_model = meta.source_model.split('.')
except AttributeError as e:
if '\'tuple\' object has no attribute \'split\'' not in str(e):
meta.source_model = None
source_model = None
else:
source_model = meta.source_model
try:
if not meta.source_fk:
meta.source_fk = None
except AttributeError:
meta.source_fk = None
# update attrs in each rule from values in Meta
rules = []
for rule_name, rule in attrs.items():
if not rule_name.startswith('_'):
if isinstance(rule, Rule):
rule.name = rule_name
rule.group = name
rule.app_label = meta.app_label
if meta:
rule.app_label = meta.app_label
target_models = []
try:
for target_model in rule.target_models:
if len(target_model.split('.')) != 2:
target_model = '{}.{}'.format(meta.app_label, target_model)
target_models.append(target_model)
rule.target_models = target_models
except AttributeError as e:
if 'target_models' not in str(e):
raise AttributeError(e)
if len(rule.target_model.split('.')) != 2:
rule.target_model = '{}.{}'.format(meta.app_label, rule.target_model)
rule.target_models = [rule.target_model]
rule.source_model = source_model
rules.append(rule)
# add a django like _meta to Rulegroup as an instance of BaseMeta
meta_attrs = {k: getattr(meta, k) for k in meta.__dict__ if not k.startswith('_')}
meta_attrs.update({'rules': tuple(rules)})
attrs.update({'_meta': BaseMeta(name, **meta_attrs)})
attrs.update({'name': '{}.{}'.format(meta.app_label, name.lower())})
return super(RuleGroupMeta, cls).__new__(cls, name, bases, attrs)
class RuleGroup(object, metaclass=RuleGroupMeta):
"""A class used to decalre and contain rules."""
@classmethod
def run_for_source_model(cls, obj, source_model):
for rule in cls._meta.rules:
if rule.source_model == source_model:
try:
rule.run(obj)
except AttributeError as e:
raise RuleGroupError(
'An exception was raised for rule {} with object \'{}\'. Got {}'.format(
rule, obj._meta.label_lower, str(e)))
@classmethod
def run_all(cls, obj):
for rule in cls._meta.rules:
try:
rule.run(obj)
except AttributeError as e:
raise RuleGroupError(
'An exception was raised for rule {} with object \'{}\'. Got {}'.format(
rule, obj._meta.label_lower, str(e)))
| gpl-2.0 | 1,186,585,431,572,537,900 | 38.715447 | 113 | 0.510952 | false |
oolorg/ool-l1patch-dev | patch_ofc.py | 1 | 8096 | import logging
import json
from webob import Response
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller import dpset
# from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import CONFIG_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_0
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ofproto_v1_3
from ryu.lib import ofctl_v1_0
from ryu.lib import ofctl_v1_2
from ryu.lib import ofctl_v1_3
from ryu.app.wsgi import ControllerBase, WSGIApplication, route
import patch_ofc_flowbuilder
import patch_ofc_error
'''
"L1patch" OpenFlow controller based on "OFPatchPanel".
See also "OFPatchPanel" application.
nmasao/OFPatchPanel-SDNHackathon2014 - GitHub
https://github.com/nmasao/OFPatchPanel-SDNHackathon2014
'''
patch_instance_name = 'patch_app'
LOG = logging.getLogger('ryu.app.patch.patch_rest')
class PatchPanel(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_0.OFP_VERSION,
ofproto_v1_2.OFP_VERSION,
ofproto_v1_3.OFP_VERSION]
_CONTEXTS = {
'wsgi': WSGIApplication,
'dpset': dpset.DPSet}
def __init__(self, *args, **kwargs):
super(PatchPanel, self).__init__(*args, **kwargs)
self.dpset = kwargs['dpset']
wsgi = kwargs['wsgi']
wsgi.register(PatchController, {patch_instance_name: self})
self.patch_flows = [] # list of dict(flow)
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
message = 'connected datapath: dpid=%d' % datapath.id
LOG.info(message)
deny_any_flow = {
'match': {
# match any
},
'actions': [
# empty : action DROP
],
'priority': 0 # lowest priority
}
if not self._mod_patch_flow_entry(
datapath, deny_any_flow, datapath.ofproto.OFPFC_ADD):
msg = "DPID:%s, Cannot set default deny flow rule." % datapath.id
raise patch_ofc_error.PatchOfcError(msg)
def add_patch_flow(self, req_flow):
self._mod_patch_flow(req_flow, 'put')
def delete_patch_flow(self, req_flow):
self._mod_patch_flow(req_flow, 'delete')
def _mod_patch_flow(self, req_flow, command):
# check command
if command not in ['delete', 'put']:
LOG.error("Unknown command: %s" % command)
return Response(status=501)
# Check before send flow-mod
dpid = req_flow.get('dpid')
dp = self.dpset.get(dpid)
if dp is None:
LOG.error("Cannot find datapath-id:%s" % dpid)
return Response(status=400)
# TODO: resource overwrap-check for exclusive mode wire
# for flow in self.patch_flows:
# if dpid == flow['dpid'] and inport == flow['inport']:
# LOG.info('Requested inport is already used (dpid:%s, inport:%d)', dpid, inport)
# return Response(status=400)
try:
flow_rules = patch_ofc_flowbuilder.FlowRuleBuilder(dp, req_flow).build_flow()
for flow_rule in flow_rules:
print "--------------------------"
print "%s, dpid:%d (ofp_ver:%d)" % (
command.upper(), dpid, dp.ofproto.OFP_VERSION
)
print json.dumps(req_flow)
print json.dumps(flow_rule)
self._mod_patch_flow_entry(
dp, flow_rule, self._get_datapath_command(dp, command)
)
self._post_mod_patch_flow(req_flow, command)
print "--------------------------"
cors_headers = {'Access-Control-Allow-Origin': '*'}
# Notice: Any request will accepted (status=200)
# if the request can send flow-mod to OFS
# (When the request does not have invalid dpid, invalid ofp-version.)
# Does not matter whether the request is match/correct.
return Response(status=200, headers=cors_headers)
except (patch_ofc_error.PatchOfcRestError,
patch_ofc_error.PatchOfcError) as err:
LOG.error(err.message)
return Response(status=501)
@staticmethod
def _get_datapath_command(dp, command):
if command == 'delete':
return dp.ofproto.OFPFC_DELETE
elif command == 'put':
return dp.ofproto.OFPFC_ADD
else:
msg = "Unknown command: %s" % command
raise patch_ofc_error.PatchOfcError(msg)
def _post_mod_patch_flow(self, req_flow, command):
if command == 'delete':
self._delete_from_patch_flows(req_flow)
elif command == 'put':
self.patch_flows.append(req_flow)
else:
msg = "Unknown command: %s" % command
raise patch_ofc_error.PatchOfcError(msg)
def _delete_from_patch_flows(self, req_flow):
# check each flows
req_flow_str = json.dumps(req_flow)
found_flow = None
for flow in self.patch_flows:
# TODO: now, use simplified/strict compare...
# difficult to compare recursively complex dict/list data.
# To compare it more simply, stringify these data...
# (json.dumps default: dictionary sorted.
flow_str = json.dumps(flow)
if req_flow_str == flow_str:
found_flow = flow
break
if found_flow:
self.patch_flows.remove(found_flow)
def _mod_patch_flow_entry(self, dp, flow_rule, command):
if dp.ofproto.OFP_VERSION in self.OFP_VERSIONS:
if dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
ofctl_v1_0.mod_flow_entry(dp, flow_rule, command)
elif dp.ofproto.OFP_VERSION == ofproto_v1_2.OFP_VERSION:
ofctl_v1_2.mod_flow_entry(dp, flow_rule, command)
elif dp.ofproto.OFP_VERSION == ofproto_v1_3.OFP_VERSION:
ofctl_v1_3.mod_flow_entry(dp, flow_rule, command)
return True
else:
msg = "Unsupported OFP version: %s" % dp.ofproto.OFP_VERSION
raise patch_ofc_error.PatchOfcError(msg)
def get_patch_flows(self):
body = json.dumps(self.patch_flows)
return Response(content_type='application/json',
body=body, status=200)
class PatchController(ControllerBase):
def __init__(self, req, link, data, **config):
super(PatchController, self).__init__(req, link, data, **config)
self.patch_app = data[patch_instance_name]
@route('patch', '/patch/flow', methods=['PUT'])
def add_patch_flow(self, req, **kwargs):
LOG.debug("start add_patch_flow")
patch = self.patch_app
try:
flow = eval(req.body)
except SyntaxError:
LOG.debug('invalid syntax %s', req.body)
return Response(status=400)
result = patch.add_patch_flow(flow)
return result
@route('patch', '/patch/flow', methods=['DELETE'])
def delete_patch_flow(self, req, **kwargs):
patch = self.patch_app
try:
flow = eval(req.body)
except SyntaxError:
LOG.debug('invalid syntax %s', req.body)
return Response(status=400)
result = patch.delete_patch_flow(flow)
return result
@route('patch', '/patch/flow', methods=['GET'])
def get_patch_flows(self, req, **kwargs):
patch = self.patch_app
result = patch.get_patch_flows()
return result
@route('patch', '/patch/flow', methods=['OPTIONS'])
def opts_patch_flows(self, req, **kwargs):
cors_headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'PUT, GET, DELETE, OPTIONS',
'Access-Control-Allow-Headers': 'Content-Type, Origin'
}
return Response(status=200, headers=cors_headers)
| apache-2.0 | 517,307,418,234,747,600 | 36.655814 | 97 | 0.587204 | false |
RuiNascimento/krepo | script.module.lambdascrapers/lib/lambdascrapers/sources_placenta/en_placenta-1.7.8/cmovie.py | 1 | 3714 | # -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @Daddy_Blamo wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo
import re,base64,json,urlparse,urllib
from resources.lib.modules import client
from resources.lib.modules import cleantitle
from resources.lib.modules import directstream
from resources.lib.modules import dom_parser2
from resources.lib.modules import cfscrape
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['cutemovie.net']
self.base_link = 'http://www1.cutemovie.net/'
self.movies_search_path = ('search-movies/%s.html')
self.scraper = cfscrape.create_scraper()
def movie(self, imdb, title, localtitle, aliases, year):
try:
clean_title = cleantitle.geturl(title).replace('-','+')
url = urlparse.urljoin(self.base_link, (self.movies_search_path % clean_title))
r = client.request(url)
r = dom_parser2.parse_dom(r, 'div', {'id': 'movie-featured'})
r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
r = [(i[0].attrs['href'], re.search('Release:\s*(\d+)', i[0].content)) for i in r if i]
r = [(i[0], i[1].groups()[0]) for i in r if i[0] and i[1]]
r = [(i[0], i[1]) for i in r if i[1] == year]
if r[0]:
url = r[0][0]
return url
else: return
except Exception:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
r = client.request(url)
r = dom_parser2.parse_dom(r, 'p', {'class': 'server_play'})
r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
r = [(i[0].attrs['href'], re.search('/(\w+).html', i[0].attrs['href'])) for i in r if i]
r = [(i[0], i[1].groups()[0]) for i in r if i[0] and i[1]]
for i in r:
try:
host = i[1]
if str(host) in str(hostDict):
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({
'source': host,
'quality': 'SD',
'language': 'en',
'url': i[0].replace('\/','/'),
'direct': False,
'debridonly': False
})
except: pass
return sources
except Exception:
return
def resolve(self, url):
try:
r = client.request(url)
url = re.findall('document.write.+?"([^"]*)', r)[0]
url = base64.b64decode(url)
url = re.findall('src="([^"]*)', url)[0]
return url
except Exception:
return
| gpl-2.0 | 706,845,749,741,321,600 | 39.369565 | 100 | 0.462305 | false |
jemofthewest/mykoans | python3/koans/about_tuples.py | 1 | 2314 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutTuples(Koan):
def test_creating_a_tuple(self):
count_of_three = (1, 2, 5)
self.assertEqual(5, count_of_three[2])
def test_tuples_are_immutable_so_item_assignment_is_not_possible(self):
count_of_three = (1, 2, 5)
try:
count_of_three[2] = "three"
except TypeError as ex:
msg = ex.args[0]
# Note, assertRegexpMatches() uses regular expression pattern matching,
# so you don't have to copy the whole message.
self.assertRegexpMatches(msg, "'tuple' object")
def test_tuples_are_immutable_so_appending_is_not_possible(self):
count_of_three = (1, 2, 5)
with self.assertRaises(AttributeError): count_of_three.append("boom")
# Tuples are less flexible than lists, but faster.
def test_tuples_can_only_be_changed_through_replacement(self):
count_of_three = (1, 2, 5)
list_count = list(count_of_three)
list_count.append("boom")
count_of_three = tuple(list_count)
self.assertEqual((1, 2, 5, "boom"), count_of_three)
def test_tuples_of_one_look_peculiar(self):
self.assertEqual(int, (1).__class__)
self.assertEqual(tuple, (1,).__class__)
self.assertEqual(("Hello comma!",), ("Hello comma!", ))
def test_tuple_constructor_can_be_surprising(self):
self.assertEqual(('S','u','r','p','r','i','s','e','!'), tuple("Surprise!"))
def test_creating_empty_tuples(self):
self.assertEqual(tuple() , ())
self.assertEqual(() , tuple()) #Sometimes less confusing
def test_tuples_can_be_embedded(self):
lat = (37, 14, 6, 'N')
lon = (115, 48, 40, 'W')
place = ('Area 51', lat, lon)
self.assertEqual(('Area 51', (37, 14, 6, 'N'), (115, 48, 40, 'W')), place)
def test_tuples_are_good_for_representing_records(self):
locations = [
("Illuminati HQ", (38, 52, 15.56, 'N'), (77, 3, 21.46, 'W')),
("Stargate B", (41, 10, 43.92, 'N'), (1, 49, 34.29, 'W')),
]
locations.append( ("Cthulu", (26, 40, 1, 'N'), (70, 45, 7, 'W')) )
self.assertEqual('Cthulu', locations[2][0])
self.assertEqual(15.56, locations[0][1][2])
| mit | -1,628,391,265,102,596,600 | 33.029412 | 83 | 0.571305 | false |
joshwatson/binaryninja-api | python/settings.py | 1 | 18886 | # Copyright (c) 2015-2021 Vector 35 Inc
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import ctypes
# Binary Ninja components
from binaryninja import _binaryninjacore as core
# 2-3 compatibility
from binaryninja import range
from binaryninja import pyNativeStr
from binaryninja.enums import SettingsScope
class Settings(object):
"""
:class:`Settings` provides a way to define and access settings in a hierarchical fashion. The value of a setting can \
be defined for each hierarchical level, where each level overrides the preceding level. The backing-store for setting \
values at each level is also configurable. This allows for ephemeral or platform-independent persistent settings storage \
for components within Binary Ninja or consumers of the Binary Ninja API.
Each :class:`Settings` instance has an ``instance_id`` which identifies a schema. The schema defines the settings contents \
and the way in which settings are retrieved and manipulated. A new :class:`Settings` instance defaults to using a value of *'default'* \
for the ``instance_id``. The *'default'* settings schema defines all of the settings available for the active Binary Ninja components \
which include at a minimum, the settings defined by the Binary Ninja core. The *'default'* schema may additionally define settings \
for the UI and/or installed plugins. Extending existing schemas, or defining new ones is accomplished by calling :func:`register_group` \
and :func:`register_setting` methods, or by deserializing an existing schema with :func:`deserialize_schema`.
.. note:: All settings in the *'default'* settings schema are rendered with UI elements in the Settings View of Binary Ninja UI.
Allowing setting overrides is an important feature and Binary Ninja accomplishes this by allowing one to override a setting at various \
levels. The levels and their associated storage are shown in the following table. Default setting values are optional, and if specified, \
saved in the schema itself.
================= ========================== ============== ==============================================
Setting Level Settings Scope Preference Storage
================= ========================== ============== ==============================================
Default SettingsDefaultScope Lowest Settings Schema
User SettingsUserScope - <User Directory>/settings.json
Project SettingsProjectScope - <Project Directory>/.binaryninja/settings.json
Resource SettingsResourceScope Highest Raw BinaryView (Storage in BNDB)
================= ========================== ============== ==============================================
Settings are identified by a key, which is a string in the form of **'<group>.<name>'** or **'<group>.<subGroup>.<name>'**. Groups provide \
a simple way to categorize settings. Sub-groups are optional and multiple sub-groups are allowed. When defining a settings group, the \
:func:`register_group` method allows for specifying a UI friendly title for use in the Binary Ninja UI. Defining a new setting requires a \
unique setting key and a JSON string of property, value pairs. The following table describes the available properties and values.
================== ====================================== ================= ======== =======================================================================
Property JSON Data Type Prerequisite Optional {Allowed Values} and Notes
================== ====================================== ================= ======== =======================================================================
"title" string None No Concise Setting Title
"type" string None No {"array", "boolean", "number", "string"}
"elementType" string "type" is "array" No {"string"}
"enum" array : {string} "type" is "array" Yes Enumeration definitions
"enumDescriptions" array : {string} "type" is "array" Yes Enumeration descriptions that match "enum" array
"minValue" number "type" is "number" Yes Specify 0 to infer unsigned (default is signed)
"maxValue" number "type" is "number" Yes Values less than or equal to INT_MAX infer a spinbox.
"precision" number "type" is "number" Yes Specify precision for a QDoubleSpinBox
"default" {array, boolean, number, string, null} None Yes Specify optimal default value
"aliases" array : {string} None Yes Array of deprecated setting key(s)
"description" string None No Detailed setting description
"ignore" array : {string} None Yes {"SettingsUserScope", "SettingsProjectScope", "SettingsResourceScope"}
"readOnly" boolean None Yes Only enforced by UI elements
"optional" boolean None Yes Indicates setting can be null
.. note:: In order to facilitate deterministic analysis results, settings from the *'default'* schema that impact analysis are serialized \
from Default, User, and Project scope into Resource scope during initial BinaryView analysis. This allows an analysis database to be opened \
at a later time with the same settings, regardless if Default, User, or Project settings have been modified.
.. note:: Settings that do not impact analysis (e.g. many UI settings) should use the *"ignore"* property to exclude \
*"SettingsProjectScope"* and *"SettingsResourceScope"* from the applicable scopes for the setting.
Example analysis plugin setting:
>>> my_settings = Settings()
>>> title = "My Pre-Analysis Plugin"
>>> description = "Enable extra analysis before core analysis."
>>> properties = f'{{"title" : "{title}", "description" : "{description}", "type" : "boolean", "default" : false}}'
>>> my_settings.register_group("myPlugin", "My Plugin")
True
>>> my_settings.register_setting("myPlugin.enablePreAnalysis", properties)
True
>>> my_bv = open_view("/bin/ls", options={'myPlugin.enablePreAnalysis' : True})
>>> Settings().get_bool("myPlugin.enablePreAnalysis")
False
>>> Settings().get_bool("myPlugin.enablePreAnalysis", my_bv)
True
Example UI plugin setting:
>>> my_settings = Settings()
>>> title = "My UI Plugin"
>>> description = "Enable My UI Plugin table display."
>>> properties = f'{{"title" : "{title}", "description" : "{description}", "type" : "boolean", "default" : true, "ignore" : ["SettingsProjectScope", "SettingsResourceScope"]}}'
>>> my_settings.register_group("myPlugin", "My Plugin")
True
>>> my_settings.register_setting("myPlugin.enableTableView", properties)
True
>>> my_bv = open_view("/bin/ls", options={'myPlugin.enablePreAnalysis' : True})
>>> Settings().get_bool("myPlugin.enableTableView")
True
"""
handle = core.BNCreateSettings("default")
def __init__(self, instance_id = "default", handle = None):
if handle is None:
if instance_id is None or instance_id == "":
instance_id = "default"
self._instance_id = instance_id
if instance_id == "default":
self.handle = Settings.handle
else:
self.handle = core.BNCreateSettings(instance_id)
else:
instance_id = core.BNGetUniqueIdentifierString()
self.handle = handle
def __del__(self):
if self.handle is not Settings.handle and self.handle is not None:
core.BNFreeSettings(self.handle)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return ctypes.addressof(self.handle.contents) == ctypes.addressof(other.handle.contents)
def __ne__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return not (self == other)
def __hash__(self):
return hash((self.instance_id, ctypes.addressof(self.handle.contents)))
@property
def instance_id(self):
"""Returns the ``instance_id`` for this :class:`Settings` repository (read-only)"""
return self._instance_id
def set_resource_id(self, resource_id = None):
"""
``set_resource_id`` Sets the resource identifier for this class:`Settings` instance. When accessing setting values at the \
``SettingsResourceScope`` level, the resource identifier is passed along through the backing store interface.
.. note:: Currently the only available backing store for ``SettingsResourceScope`` is a :class:`BinaryView` object. In the context \
of a :class:`BinaryView` the resource identifier is the :class:`BinaryViewType` name. All settings for this type of backing store \
are saved in the *'Raw'* :class:`BinaryViewType`. This enables the configuration of setting values such that they are available \
during :class:`BinaryView` creation and initialization.
:param str resource_id: a unique identifier
:rtype: None
"""
if resource_id is None:
resource_id = ""
core.BNSettingsSetResourceId(self.handle, resource_id)
def register_group(self, group, title):
"""
``register_group`` registers a group in the schema for this :class:`Settings` instance
:param str group: a unique identifier
:param str title: a user friendly name appropriate for UI presentation
:return: True on success, False on failure.
:rtype: bool
:Example:
>>> Settings().register_group("solver", "Solver")
True
>>>
"""
return core.BNSettingsRegisterGroup(self.handle, group, title)
def register_setting(self, key, properties):
"""
``register_setting`` registers a new setting with this :class:`Settings` instance
:param str key: a unique setting identifier in the form **'<group>.<name>'**
:param str properties: a JSON string describes the setting schema
:return: True on success, False on failure.
:rtype: bool
:Example:
>>> Settings().register_group("solver", "Solver")
True
>>> Settings().register_setting("solver.basicBlockSlicing", '{"description" : "Enable the basic block slicing in the solver.", "title" : "Basic Block Slicing", "default" : true, "type" : "boolean"}')
True
"""
return core.BNSettingsRegisterSetting(self.handle, key, properties)
def contains(self, key):
"""
``contains`` determine if a setting identifier exists in the active settings schema
:param str key: the setting identifier
:return: True if the identifier exists in this active settings schema, False otherwise
:rtype: bool
"""
return core.BNSettingsContains(self.handle, key)
def is_empty(self):
"""
``is_empty`` determine if the active settings schema is empty
:return: True if the active settings schema is empty, False otherwise
:rtype: bool
"""
return core.BNSettingsIsEmpty(self.handle)
def keys(self):
"""
``keys`` retrieve the list of setting identifiers in the active settings schema
:return: list of setting identifiers
:rtype: list(str)
"""
length = ctypes.c_ulonglong()
result = core.BNSettingsKeysList(self.handle, ctypes.byref(length))
out_list = []
for i in range(length.value):
out_list.append(pyNativeStr(result[i]))
core.BNFreeStringList(result, length)
return out_list
def query_property_string_list(self, key, property_name):
length = ctypes.c_ulonglong()
result = core.BNSettingsQueryPropertyStringList(self.handle, key, property_name, ctypes.byref(length))
out_list = []
for i in range(length.value):
out_list.append(pyNativeStr(result[i]))
core.BNFreeStringList(result, length)
return out_list
def update_property(self, key, setting_property):
return core.BNSettingsUpdateProperty(self.handle, key, setting_property)
def deserialize_schema(self, schema, scope = SettingsScope.SettingsAutoScope, merge = True):
return core.BNSettingsDeserializeSchema(self.handle, schema, scope, merge)
def serialize_schema(self):
return core.BNSettingsSerializeSchema(self.handle)
def deserialize_settings(self, contents, view = None, scope = SettingsScope.SettingsAutoScope):
if view is not None:
view = view.handle
return core.BNDeserializeSettings(self.handle, contents, view, scope)
def serialize_settings(self, view = None, scope = SettingsScope.SettingsAutoScope):
if view is not None:
view = view.handle
return core.BNSerializeSettings(self.handle, view, scope)
def reset(self, key, view = None, scope = SettingsScope.SettingsAutoScope):
if view is not None:
view = view.handle
return core.BNSettingsReset(self.handle, key, view, scope)
def reset_all(self, view = None, scope = SettingsScope.SettingsAutoScope, schema_only = True):
if view is not None:
view = view.handle
return core.BNSettingsResetAll(self.handle, view, scope, schema_only)
def get_bool(self, key, view = None):
if view is not None:
view = view.handle
return core.BNSettingsGetBool(self.handle, key, view, None)
def get_double(self, key, view = None):
if view is not None:
view = view.handle
return core.BNSettingsGetDouble(self.handle, key, view, None)
def get_integer(self, key, view = None):
if view is not None:
view = view.handle
return core.BNSettingsGetUInt64(self.handle, key, view, None)
def get_string(self, key, view = None):
if view is not None:
view = view.handle
return core.BNSettingsGetString(self.handle, key, view, None)
def get_string_list(self, key, view = None):
if view is not None:
view = view.handle
length = ctypes.c_ulonglong()
result = core.BNSettingsGetStringList(self.handle, key, view, None, ctypes.byref(length))
out_list = []
for i in range(length.value):
out_list.append(pyNativeStr(result[i]))
core.BNFreeStringList(result, length)
return out_list
def get_json(self, key, view = None):
if view is not None:
view = view.handle
return core.BNSettingsGetJson(self.handle, key, view, None)
def get_bool_with_scope(self, key, view = None, scope = SettingsScope.SettingsAutoScope):
if view is not None:
view = view.handle
c_scope = core.SettingsScopeEnum(scope)
result = core.BNSettingsGetBool(self.handle, key, view, ctypes.byref(c_scope))
return (result, SettingsScope(c_scope.value))
def get_double_with_scope(self, key, view = None, scope = SettingsScope.SettingsAutoScope):
if view is not None:
view = view.handle
c_scope = core.SettingsScopeEnum(scope)
result = core.BNSettingsGetDouble(self.handle, key, view, ctypes.byref(c_scope))
return (result, SettingsScope(c_scope.value))
def get_integer_with_scope(self, key, view = None, scope = SettingsScope.SettingsAutoScope):
if view is not None:
view = view.handle
c_scope = core.SettingsScopeEnum(scope)
result = core.BNSettingsGetUInt64(self.handle, key, view, ctypes.byref(c_scope))
return (result, SettingsScope(c_scope.value))
def get_string_with_scope(self, key, view = None, scope = SettingsScope.SettingsAutoScope):
if view is not None:
view = view.handle
c_scope = core.SettingsScopeEnum(scope)
result = core.BNSettingsGetString(self.handle, key, view, ctypes.byref(c_scope))
return (result, SettingsScope(c_scope.value))
def get_string_list_with_scope(self, key, view = None, scope = SettingsScope.SettingsAutoScope):
if view is not None:
view = view.handle
c_scope = core.SettingsScopeEnum(scope)
length = ctypes.c_ulonglong()
result = core.BNSettingsGetStringList(self.handle, key, view, ctypes.byref(c_scope), ctypes.byref(length))
out_list = []
for i in range(length.value):
out_list.append(pyNativeStr(result[i]))
core.BNFreeStringList(result, length)
return (out_list, SettingsScope(c_scope.value))
def get_json_with_scope(self, key, view = None, scope = SettingsScope.SettingsAutoScope):
if view is not None:
view = view.handle
c_scope = core.SettingsScopeEnum(scope)
result = core.BNSettingsGetJson(self.handle, key, view, ctypes.byref(c_scope))
return (result, SettingsScope(c_scope.value))
def set_bool(self, key, value, view = None, scope = SettingsScope.SettingsAutoScope):
if view is not None:
view = view.handle
return core.BNSettingsSetBool(self.handle, view, scope, key, value)
def set_double(self, key, value, view = None, scope = SettingsScope.SettingsAutoScope):
if view is not None:
view = view.handle
return core.BNSettingsSetDouble(self.handle, view, scope, key, value)
def set_integer(self, key, value, view = None, scope = SettingsScope.SettingsAutoScope):
if view is not None:
view = view.handle
return core.BNSettingsSetUInt64(self.handle, view, scope, key, value)
def set_string(self, key, value, view = None, scope = SettingsScope.SettingsAutoScope):
if view is not None:
view = view.handle
return core.BNSettingsSetString(self.handle, view, scope, key, value)
def set_string_list(self, key, value, view = None, scope = SettingsScope.SettingsAutoScope):
if view is not None:
view = view.handle
length = ctypes.c_ulonglong()
length.value = len(value)
string_list = (ctypes.c_char_p * len(value))()
for i in range(len(value)):
string_list[i] = value[i].encode('charmap')
return core.BNSettingsSetStringList(self.handle, view, scope, key, string_list, length)
def set_json(self, key, value, view = None, scope = SettingsScope.SettingsAutoScope):
if view is not None:
view = view.handle
return core.BNSettingsSetJson(self.handle, view, scope, key, value)
| mit | 2,208,039,915,937,876,000 | 46.571788 | 202 | 0.67378 | false |
pywren/pywren | fabfile.py | 1 | 11297 | #
# Copyright 2018 PyWren Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fabric.api import local, env, run, put, cd, task, sudo, get, settings, warn_only, lcd
from fabric.contrib import project
import boto3
import cloudpickle
import json
import base64
from six.moves import cPickle as pickle
from pywren.wrenconfig import *
import pywren
import time
"""
conda notes
be sure to call conda clean --all before compressing
"""
env.roledefs['m'] = ['jonas@c65']
AWS_INSTANCE_NAME = "test_instance"
@task
def create_zip():
with lcd("pywren"):
local("zip ../deploy.zip *.py")
@task
def get_condaruntime():
pass
@task
def put_condaruntime():
local("scp -r c65:/data/jonas/chicken/condaruntime.tar.gz .")
#local("tar czvf condaruntime.tar.gz condaruntime")
local("aws s3 cp condaruntime.tar.gz s3://ericmjonas-public/condaruntime.tar.gz")
@task
def create_function():
lambclient = boto3.client('lambda', region_name=AWS_REGION)
lambclient.create_function(FunctionName = FUNCTION_NAME,
Handler = HANDLER_NAME,
Runtime = "python2.7",
MemorySize = MEMORY,
Timeout = TIMEOUT,
Role = ROLE,
Code = {'ZipFile' : open(PACKAGE_FILE, 'r').read()})
@task
def update_function():
lambclient = boto3.client('lambda', region_name=AWS_REGION)
response = lambclient.update_function_code(FunctionName=FUNCTION_NAME,
ZipFile=open(PACKAGE_FILE, 'r').read())
@task
def deploy():
local('git ls-tree --full-tree --name-only -r HEAD > .git-files-list')
project.rsync_project("/data/jonas/pywren/", local_dir="./",
exclude=['*.npy', "*.ipynb", 'data', "*.mp4",
"*.pdf", "*.png"],
extra_opts='--files-from=.git-files-list')
# copy the notebooks from remote to local
project.rsync_project("/data/jonas/pywren/", local_dir="./",
extra_opts="--include '*.ipynb' --include '*.pdf' --include '*.png' --include='*/' --exclude='*' ",
upload=False)
QUEUE_NAME = 'pywren-queue'
#MESSAGE_GROUP_ID = 'hello.world'
@task
def create_queue():
sqs = boto3.resource('sqs', region_name=AWS_REGION)
queue = sqs.create_queue(QueueName=QUEUE_NAME,
Attributes={'VisibilityTimeout' : "20"})
@task
def put_message(): # MessageBody="hello world"):
# Get the service resource
sqs = boto3.resource('sqs', region_name=AWS_REGION)
# Get the queue
queue = sqs.get_queue_by_name(QueueName=QUEUE_NAME)
MessageBody = "{}".format(time.time())
response = queue.send_message(MessageBody=MessageBody)
@task
def get_message(delete=False):
# Get the service resource
sqs = boto3.resource('sqs', region_name=AWS_REGION)
# Get the queue
queue = sqs.get_queue_by_name(QueueName=QUEUE_NAME)
response = queue.receive_messages()
if len(response) > 0 :
print(response[0].body)
if delete:
response[0].delete()
@task
def sqs_worker(number=1):
from multiprocessing.pool import ThreadPool, Pool
number = int(number)
sqs = boto3.resource('sqs', region_name=AWS_REGION)
# Get the queue
queue = sqs.get_queue_by_name(QueueName=QUEUE_NAME)
LOG_FILE = "sqs.log"
def process_message(m):
fid = open(LOG_FILE, 'a')
fid.write("sent {} received {}\n".format(m.body, time.time()))
m.delete()
fid.close()
pool = ThreadPool(10)
while(True):
print("reading queue" )
response = queue.receive_messages(WaitTimeSeconds=10)
if len(response) > 0:
print("Dispatching")
#pool.apply_async(
process_message(response[0])
else:
print("no message, sleeping")
time.sleep(1)
@task
def get_message(delete=False):
# Get the service resource
sqs = boto3.resource('sqs', region_name=AWS_REGION)
# Get the queue
queue = sqs.get_queue_by_name(QueueName=QUEUE_NAME)
response = queue.receive_messages()
if len(response) > 0 :
print(response[0].body)
if delete:
response[0].delete()
@task
def get_message(delete=False):
# Get the service resource
sqs = boto3.resource('sqs', region_name=AWS_REGION)
# Get the queue
queue = sqs.get_queue_by_name(QueueName=QUEUE_NAME)
@task
def sqs_purge_queue():
sqs = boto3.resource('sqs', region_name=AWS_REGION)
# Get the queue
queue = sqs.get_queue_by_name(QueueName=QUEUE_NAME)
queue.purge()
INSTANCE_PROFILE_NAME = "pywren_standalone"
@task
def create_instance_profile():
iam = boto3.resource('iam')
#iam.create_instance_profile(InstanceProfileName=INSTANCE_PROFILE_NAME)
instance_profile = iam.InstanceProfile(INSTANCE_PROFILE_NAME)
#instance_profile.add_role(RoleName='pywren_exec_role_refactor8')
print(instance_profile.name)
@task
def launch_instance():
tgt_ami = 'ami-b04e92d0'
AWS_REGION = 'us-west-2'
my_aws_key = 'ec2-us-west-2'
INSTANCE_TYPE = 'm3.xlarge'
instance_name = AWS_INSTANCE_NAME
ec2 = boto3.resource('ec2', region_name=AWS_REGION)
BlockDeviceMappings=[
{
'DeviceName': '/dev/xvda',
'Ebs': {
'VolumeSize': 100,
'DeleteOnTermination': True,
'VolumeType': 'standard',
'SnapshotId' : 'snap-c87f35ec'
},
},
]
user_data = """
#cloud-config
repo_update: true
repo_upgrade: all
packages:
- tmux
- emacs
- gcc
- g++
- git
- htop
runcmd:
- [ sh, -c, 'echo "hello world" > /tmp/hello.txt' ]
- pip install supervisor
- [ sudo, -Hu, ec2-user, sh, -c, "wget https://repo.continuum.io/miniconda/Miniconda2-latest-Linux-x86_64.sh -O /tmp/miniconda.sh"]
- [ sudo, -Hu, ec2-user, sh, -c, "chmod +x /tmp/miniconda.sh"]
- [ sudo, -Hu, ec2-user, sh, -c, "/tmp/miniconda.sh -b -p /home/ec2-user/anaconda"]
- [ sudo, -Hu, ec2-user, sh, -c, "/home/ec2-user/anaconda/bin/conda install -q -y numpy boto3"]
- [ sudo, -Hu, ec2-user, sh, -c, "git clone -b standalone-worker https://github.com/ericmjonas/pywren.git /home/ec2-user/pywren"]
- [ sudo, -Hu, ec2-user, sh, -c, "/home/ec2-user/anaconda/bin/pip install -e /home/ec2-user/pywren"]
"""
# - [
# - /home/ec2-user/anaconda/bin/conda install -q -y numpy boto3
# - git clone -b standalone-worker "https://github.com/ericmjonas/pywren.git" /home/ec2-user/pywren
# - /home/ec2-user/anaconda/bin/pip install -e
# - [ ./Miniconda2-latest-Linux-x86_64.sh -b -p /home/ec2-user/anaconda]
# - [ /home/ec2-user/anaconda/bin/conda install numpy boto3]
iam = boto3.resource('iam')
instance_profile = iam.InstanceProfile(INSTANCE_PROFILE_NAME)
instance_profile_dict = {
'Name' : instance_profile.name}
instances = ec2.create_instances(ImageId=tgt_ami, MinCount=1, MaxCount=1,
KeyName=my_aws_key,
InstanceType=INSTANCE_TYPE,
BlockDeviceMappings = BlockDeviceMappings,
InstanceInitiatedShutdownBehavior='terminate',
EbsOptimized=True,
IamInstanceProfile = instance_profile_dict,
UserData=user_data)
for inst in instances:
inst.wait_until_running()
inst.reload()
inst.create_tags(
Resources=[
inst.instance_id
],
Tags=[
{
'Key': 'Name',
'Value': instance_name
},
]
)
print(inst.public_dns_name)
def tags_to_dict(d):
return {a['Key'] : a['Value'] for a in d}
@task
def terminate_instance():
instance_name = "test_instance"
ec2 = boto3.resource('ec2', region_name=AWS_REGION)
insts = []
for i in ec2.instances.all():
if i.state['Name'] == 'running':
d = tags_to_dict(i.tags)
if d['Name'] == instance_name:
i.terminate()
insts.append(i)
@task
def delete_log_groups(prefix):
config = pywren.wrenconfig.default()
logclient = boto3.client('logs', region_name=config['account']['aws_region'])
lg = logclient.describe_log_groups(logGroupNamePrefix=prefix)
for l in lg['logGroups']:
logGroupName = l['logGroupName']
print('deleting', logGroupName)
logclient.delete_log_group(logGroupName = logGroupName)
@task
def cleanup_travis_leftovers():
"""
When travis builds fail they can leave behind IAM resources
"""
iam = boto3.resource('iam')
client = boto3.client('iam')
for p in iam.instance_profiles.all():
if 'pywren_travis_' in p.name:
print(p.name)
for r in p.roles:
p.remove_role(RoleName=r.name)
p.delete()
removed_role_count = 0
for r in iam.roles.all():
if 'pywren_travis_test_' in r.name:
for p in r.policies.all():
r.detach_policy(p)
p.delete()
r.delete()
removed_role_count += 1
print("removed {} roles".format(removed_role_count))
@task
def cleanup_leftover_buckets():
"""
Delete all buckets with pywren-travis in the name
"""
config = pywren.wrenconfig.default()
s3 = boto3.resource('s3')
client = boto3.client('s3')
for bucket in s3.buckets.all():
if 'pywren-travis-' in bucket.name:
while True:
response = client.list_objects_v2(Bucket=bucket.name,
MaxKeys=1000)
if response['KeyCount'] > 0:
keys = [c['Key'] for c in response['Contents']]
objects = [{'Key' : k} for k in keys]
print("deleting", len(keys), "keys")
client.delete_objects(Bucket=bucket.name,
Delete={'Objects' : objects})
else:
break
#for obj in bucket.objects.all():
print("deleting", bucket.name)
bucket.delete()
| apache-2.0 | -8,836,333,643,246,532,000 | 29.045213 | 136 | 0.565548 | false |
googleapis/googleapis-gen | google/cloud/dialogflow/cx/v3/dialogflow-cx-v3-py/google/cloud/dialogflowcx_v3/types/version.py | 1 | 8929 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.dialogflowcx_v3.types import flow
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.cloud.dialogflow.cx.v3',
manifest={
'CreateVersionOperationMetadata',
'Version',
'ListVersionsRequest',
'ListVersionsResponse',
'GetVersionRequest',
'CreateVersionRequest',
'UpdateVersionRequest',
'DeleteVersionRequest',
'LoadVersionRequest',
},
)
class CreateVersionOperationMetadata(proto.Message):
r"""Metadata associated with the long running operation for
[Versions.CreateVersion][google.cloud.dialogflow.cx.v3.Versions.CreateVersion].
Attributes:
version (str):
Name of the created version. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>/versions/<Version ID>``.
"""
version = proto.Field(
proto.STRING,
number=1,
)
class Version(proto.Message):
r"""Represents a version of a flow.
Attributes:
name (str):
Format: projects/<Project
ID>/locations/<Location ID>/agents/<Agent
ID>/flows/<Flow ID>/versions/<Version ID>.
Version ID is a self-increasing number generated
by Dialogflow upon version creation.
display_name (str):
Required. The human-readable name of the
version. Limit of 64 characters.
description (str):
The description of the version. The maximum
length is 500 characters. If exceeded, the
request is rejected.
nlu_settings (google.cloud.dialogflowcx_v3.types.NluSettings):
Output only. The NLU settings of the flow at
version creation.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Create time of the version.
state (google.cloud.dialogflowcx_v3.types.Version.State):
Output only. The state of this version. This
field is read-only and cannot be set by create
and update methods.
"""
class State(proto.Enum):
r"""The state of the version."""
STATE_UNSPECIFIED = 0
RUNNING = 1
SUCCEEDED = 2
FAILED = 3
name = proto.Field(
proto.STRING,
number=1,
)
display_name = proto.Field(
proto.STRING,
number=2,
)
description = proto.Field(
proto.STRING,
number=3,
)
nlu_settings = proto.Field(
proto.MESSAGE,
number=4,
message=flow.NluSettings,
)
create_time = proto.Field(
proto.MESSAGE,
number=5,
message=timestamp_pb2.Timestamp,
)
state = proto.Field(
proto.ENUM,
number=6,
enum=State,
)
class ListVersionsRequest(proto.Message):
r"""The request message for
[Versions.ListVersions][google.cloud.dialogflow.cx.v3.Versions.ListVersions].
Attributes:
parent (str):
Required. The [Flow][google.cloud.dialogflow.cx.v3.Flow] to
list all versions for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>``.
page_size (int):
The maximum number of items to return in a
single page. By default 20 and at most 100.
page_token (str):
The next_page_token value returned from a previous list
request.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
page_size = proto.Field(
proto.INT32,
number=2,
)
page_token = proto.Field(
proto.STRING,
number=3,
)
class ListVersionsResponse(proto.Message):
r"""The response message for
[Versions.ListVersions][google.cloud.dialogflow.cx.v3.Versions.ListVersions].
Attributes:
versions (Sequence[google.cloud.dialogflowcx_v3.types.Version]):
A list of versions. There will be a maximum number of items
returned based on the page_size field in the request. The
list may in some cases be empty or contain fewer entries
than page_size even if this isn't the last page.
next_page_token (str):
Token to retrieve the next page of results,
or empty if there are no more results in the
list.
"""
@property
def raw_page(self):
return self
versions = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='Version',
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class GetVersionRequest(proto.Message):
r"""The request message for
[Versions.GetVersion][google.cloud.dialogflow.cx.v3.Versions.GetVersion].
Attributes:
name (str):
Required. The name of the
[Version][google.cloud.dialogflow.cx.v3.Version]. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>/versions/<Version ID>``.
"""
name = proto.Field(
proto.STRING,
number=1,
)
class CreateVersionRequest(proto.Message):
r"""The request message for
[Versions.CreateVersion][google.cloud.dialogflow.cx.v3.Versions.CreateVersion].
Attributes:
parent (str):
Required. The [Flow][google.cloud.dialogflow.cx.v3.Flow] to
create an [Version][google.cloud.dialogflow.cx.v3.Version]
for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>``.
version (google.cloud.dialogflowcx_v3.types.Version):
Required. The version to create.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
version = proto.Field(
proto.MESSAGE,
number=2,
message='Version',
)
class UpdateVersionRequest(proto.Message):
r"""The request message for
[Versions.UpdateVersion][google.cloud.dialogflow.cx.v3.Versions.UpdateVersion].
Attributes:
version (google.cloud.dialogflowcx_v3.types.Version):
Required. The version to update.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The mask to control which fields get updated.
Currently only ``description`` and ``display_name`` can be
updated.
"""
version = proto.Field(
proto.MESSAGE,
number=1,
message='Version',
)
update_mask = proto.Field(
proto.MESSAGE,
number=2,
message=field_mask_pb2.FieldMask,
)
class DeleteVersionRequest(proto.Message):
r"""The request message for
[Versions.DeleteVersion][google.cloud.dialogflow.cx.v3.Versions.DeleteVersion].
Attributes:
name (str):
Required. The name of the
[Version][google.cloud.dialogflow.cx.v3.Version] to delete.
Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>/versions/<Version ID>``.
"""
name = proto.Field(
proto.STRING,
number=1,
)
class LoadVersionRequest(proto.Message):
r"""The request message for
[Versions.LoadVersion][google.cloud.dialogflow.cx.v3.Versions.LoadVersion].
Attributes:
name (str):
Required. The
[Version][google.cloud.dialogflow.cx.v3.Version] to be
loaded to draft flow. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>/versions/<Version ID>``.
allow_override_agent_resources (bool):
This field is used to prevent accidental overwrite of other
agent resources, which can potentially impact other flow's
behavior. If ``allow_override_agent_resources`` is false,
conflicted agent-level resources will not be overridden
(i.e. intents, entities, webhooks).
"""
name = proto.Field(
proto.STRING,
number=1,
)
allow_override_agent_resources = proto.Field(
proto.BOOL,
number=2,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | 1,017,226,616,603,671,700 | 29.578767 | 118 | 0.62045 | false |
cans/tappy-pkg | tap/tracker.py | 1 | 2144 | # Copyright (c) 2015, Matt Layman
from __future__ import print_function
from collections import namedtuple
import os
TAPLine = namedtuple('TAPLine', ['status', 'description', 'directive'])
class Tracker(object):
def __init__(self, outdir=None):
self._test_cases = {}
self.outdir = outdir
if outdir and not os.path.exists(outdir):
os.makedirs(outdir)
def _track(self, class_name):
"""Keep track of which test cases have executed."""
if self._test_cases.get(class_name) is None:
self._test_cases[class_name] = []
def add_ok(self, class_name, description, directive=''):
self._track(class_name)
self._test_cases[class_name].append(
TAPLine('ok', description, directive))
def add_not_ok(self, class_name, description, directive=''):
self._track(class_name)
self._test_cases[class_name].append(
TAPLine('not ok', description, directive))
def add_skip(self, class_name, description, reason):
directive = '# SKIP {0}'.format(reason)
self.add_ok(class_name, description, directive)
def generate_tap_reports(self):
for test_case, tap_lines in self._test_cases.items():
self.generate_tap_report(test_case, tap_lines)
def generate_tap_report(self, test_case, tap_lines):
with open(self._get_tap_file_path(test_case), 'w') as f:
print('# TAP results for {0}'.format(test_case), file=f)
for line_count, tap_line in enumerate(tap_lines, start=1):
result = ' '.join([
tap_line.status,
str(line_count),
'-',
tap_line.description,
tap_line.directive,
])
print(result, file=f)
print('1..{0}'.format(len(tap_lines)), file=f)
def _get_tap_file_path(self, test_case):
"""Get the TAP output file path for the test case."""
tap_file = test_case + '.tap'
if self.outdir:
return os.path.join(self.outdir, tap_file)
return tap_file
| bsd-2-clause | 9,054,455,438,370,134,000 | 33.580645 | 71 | 0.573694 | false |
quantopian/qdb | qdb/comm.py | 1 | 34207 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from abc import ABCMeta, abstractmethod
import atexit
from bdb import Breakpoint
import errno
from functools import partial
import json
import os
from pprint import pprint
import signal
import socket
from struct import pack, unpack
from textwrap import dedent
from logbook import Logger
from qdb.compat import (
Connection,
PY3,
gevent,
input,
items,
print_,
range,
with_metaclass,
)
from qdb.errors import (
QdbAuthenticationError,
QdbBreakpointReadError,
QdbCommunicationError,
QdbFailedToConnect,
QdbReceivedInvalidData,
QdbUnreachableBreakpoint,
)
from qdb.utils import Timeout, tco
log = Logger('Qdb')
def fmt_msg(event, payload=None, serial=None):
"""
Packs a message to be sent to the server.
Serial is a function to call on the frame to serialize it, e.g:
json.dumps.
"""
frame = {
'e': event,
'p': payload,
}
return serial(frame) if serial else frame
def fmt_err_msg(error_type, data, serial=None):
"""
Constructs an error message.
"""
return fmt_msg(
'error', {
'type': error_type,
'data': data,
},
serial=serial,
)
def fmt_breakpoint(breakpoint):
"""
formats breakpoint payload.
"""
return {
'file': breakpoint.file,
'line': breakpoint.line,
'temp': breakpoint.temporary,
'cond': breakpoint.cond,
'func': breakpoint.funcname,
}
class CommandManager(with_metaclass(ABCMeta, object)):
"""
An abstract base class for the command managers that control the tracer.
"""
def _fmt_stackframe(self, tracer, stackframe, line):
"""
Formats stackframe payload data.
"""
filename = stackframe.f_code.co_filename
func = stackframe.f_code.co_name
code = tracer.get_line(filename, line)
return {
'file': tracer.canonic(filename),
'line': line,
'func': func,
'code': code,
}
def send_disabled(self):
"""
Sends a message to the server to say that the tracer is done.
"""
try:
self.send_event('disabled')
except socket.error:
# We may safely ignore errors that occur here because we are
# already disabled.
pass
def send_breakpoints(self):
"""
Sends the breakpoint list event.
"""
self.send_event(
'breakpoints',
[fmt_breakpoint(breakpoint) for breakpoint in Breakpoint.bpbynumber
if breakpoint]
)
def send_watchlist(self, tracer):
"""
Sends the watchlist event.
"""
self.send_event(
'watchlist',
[{'expr': k, 'exc': exc, 'value': val}
for k, (exc, val) in items(tracer.watchlist)],
)
def send_print(self, input_, exc, output):
"""
Sends the print event with the given input and output.
"""
self.send(fmt_msg(
'print', {
'input': input_,
'exc': exc,
'output': output
},
serial=json.dumps)
)
def send_stack(self, tracer):
"""
Sends the stack event.
This filters out frames based on the rules defined in the tracer's
skip_fn. The index reported will account for any skipped frames, such
that querying the stack at the index provided will return the current
frame.
"""
stack = []
index = tracer.curindex
skip_fn = tracer.skip_fn
for n, (frame, line) in enumerate(tracer.stack):
if skip_fn(frame.f_code.co_filename):
if n < tracer.curindex:
index -= 1 # Drop the index to account for a skip
continue # Don't add frames we need to skip.
stack.append(self._fmt_stackframe(tracer, frame, line))
self.send_event(
'stack', {
'index': index,
'stack': stack,
}
)
def send_error(self, error_type, error_data):
"""
Sends a formatted error message.
"""
self.send(fmt_err_msg(error_type, error_data, serial=json.dumps))
def send_event(self, event, payload=None):
"""
Sends a formatted event.
"""
self.send(fmt_msg(event, payload, serial=json.dumps))
@tco
def next_command(self, tracer, msg=None):
"""
Processes the next command from the user.
If msg is given, it is sent with self.send(msg) before processing the
next command.
"""
if msg:
self.send(msg)
return self.user_next_command(tracer)
@abstractmethod
def send(self, msg):
"""
Sends a raw (already jsond) message.
"""
raise NotImplementedError
@abstractmethod
def user_next_command(self, tracer):
"""
Processes the next command.
This method must be overridden to dictate how the commands are
processed.
"""
raise NotImplementedError
@abstractmethod
def start(self, tracer, auth_msg=''):
"""
Start acquiring new commands.
"""
raise NotImplementedError
def stop(self):
"""
Stop acquiring new commands.
"""
self.send_disabled()
self.user_stop()
@abstractmethod
def user_stop(self):
"""
Use this to release and resources needed to generate the commands.
"""
raise NotImplementedError
class NopCommandManager(CommandManager):
"""
Nop command manager that never alters the state of the debugger.
This is useful if you want to manage the debugger in an alternate way.
"""
def user_next_command(self, tracer):
pass
def send(self, msg):
pass
def start(self, tracer, msg):
pass
def user_stop(self):
pass
class RemoteCommandManager(CommandManager):
"""
Manager that processes commands from the server.
This is the default Qdb command manager.
"""
def __init__(self):
super(RemoteCommandManager, self).__init__()
if gevent is not None:
import gipc # Only use gipc if we are running in gevent.
self._pipe = gipc.pipe
self._start_process = gipc.start_process
else:
import multiprocessing
def _pipe(*args, **kwargs):
a, b = multiprocessing.Pipe(*args, **kwargs)
return Connection(a), Connection(b)
self._pipe = _pipe
def _start_process(*args, **kwargs):
proc = multiprocessing.Process(*args, **kwargs)
proc.start()
return proc
self._start_process = _start_process
self.pipe = None
self.socket = None
self.reader = None
def _socket_connect(self, tracer):
"""
Connects to the socket or raise a QdbFailedToConnect error.
"""
log.info('Connecting to (%s, %d)' % tracer.address)
for n in range(tracer.retry_attempts):
# Try to connect to the server.
try:
self.socket = socket.create_connection(tracer.address)
# If we made it here, we connected and no longer need to retry.
break
except socket.error:
log.warn(
'Client %s failed to connect to (%s, %d) on attempt %d...'
% (tracer.uuid, tracer.address[0],
tracer.address[1], n + 1)
)
if self.socket is None:
log.warn(
'Failed to connect to (%s, %d), no longer retying.'
% tracer.address
)
raise QdbFailedToConnect(
tracer.address,
tracer.retry_attempts
)
log.info('Client %s connected to (%s, %d)'
% (tracer.uuid, tracer.address[0],
tracer.address[1]))
def start(self, tracer, auth_msg=''):
"""
Begins processing commands from the server.
"""
self.pipe, child_end = self._pipe()
self._socket_connect(tracer)
self.reader = self._start_process(
target=ServerReader,
args=(child_end, os.getpid(),
self.socket.fileno(),
tracer.pause_signal),
)
with Timeout(5, QdbFailedToConnect(tracer.address,
tracer.retry_attempts)):
# Receive a message to know that the reader is ready to begin.
while True:
try:
self.pipe.get()
break
except IOError as e:
# EAGAIN says to try the syscall again.
if e.errno != errno.EAGAIN:
raise
self.send(
fmt_msg(
'start', {
'uuid': tracer.uuid,
'auth': auth_msg,
'local': (0, 0),
},
serial=json.dumps,
)
)
signal.signal(
tracer.pause_signal, partial(self._pause_handler, tracer)
)
atexit.register(self.stop)
def user_stop(self):
"""
Stops the command manager, freeing its resources.
"""
if self.reader and self.reader.is_alive():
self.reader.terminate()
self.socket.close()
def fmt_breakpoint_dict(self, tracer, breakpoint):
"""
Makes our protocol for breakpoints match the Bdb protocol.
"""
if 'file' not in breakpoint and tracer.default_file:
breakpoint['file'] = tracer.default_file
if 'file' in breakpoint and 'line' in breakpoint:
# Do some formatting here to make the params cleaner.
breakpoint['filename'] = breakpoint.pop('file')
breakpoint['lineno'] = breakpoint.pop('line')
breakpoint['temporary'] = breakpoint.pop('temp', None)
breakpoint['funcname'] = breakpoint.pop('func', None)
breakpoint.setdefault('cond', None)
return breakpoint
raise QdbBreakpointReadError(breakpoint)
def send(self, msg):
"""
Sends a message to the server.
"""
self.socket.sendall(pack('>i', len(msg)))
self.socket.sendall(msg.encode('utf-8'))
def payload_check(self, payload, command):
"""
Asserts that payload is not None, sending an error message if it is.
returns False if payload is None, otherwise returns True.
"""
if payload is None:
self.send_error('payload', '%s: expected payload' % command)
return False
return True
def _pause_handler(self, tracer, signum, stackframe):
"""
Manager for the pause command.
"""
if signum == tracer.pause_signal:
tracer.set_step()
def get_events(self):
"""
Infinitely yield events from the Reader.
"""
while self.reader.is_alive():
try:
event = self.pipe.get()
except IOError as i:
if i.errno == errno.EAGAIN:
continue
raise
yield event
def get_commands(self, tracer):
"""
Yields the commands out of the events.
"""
for event in self.get_events():
if event['e'] == 'error':
self.handle_error(event.get('p'))
else:
command = getattr(self, 'command_' + event['e'], None)
if not command:
self.send_error('event', 'Command %s does not exist'
% event['e'])
else:
yield lambda: command(tracer, event.get('p'))
def handle_error(self, payload):
if payload['type'] == 'auth':
raise QdbAuthenticationError(payload['data'])
else:
raise QdbCommunicationError(payload)
def user_next_command(self, tracer, msg=None):
"""
Processes the next message from the reader.
"""
try:
return next(self.get_commands(tracer))()
except StopIteration:
raise QdbCommunicationError('No more commands from server')
def command_step(self, tracer, payload):
tracer.set_step()
def command_return(self, tracer, payload):
tracer.set_return(tracer.curframe)
def command_next(self, tracer, payload):
tracer.set_next(tracer.curframe)
def command_until(self, tracer, payload):
tracer.set_until(tracer.curframe)
def command_continue(self, tracer, payload):
tracer.set_continue()
def command_pprint(self, tracer, payload):
"""
Evaluates the expression with the pretty printer.
"""
return self.command_eval(tracer, payload, pprint=True)
def command_eval(self, tracer, payload, pprint=False):
"""
Evaluates and expression in tracer.curframe, reevaluates the
watchlist, and defers to user control.
"""
if not self.payload_check(payload, 'eval'):
return self.next_command.tailcall(tracer)
tracer.eval_(payload, pprint)
self.send_watchlist(tracer)
return self.next_command.tailcall(tracer)
def command_set_watch(self, tracer, payload):
"""
Extends the watchlist and defers to user control.
"""
if not self.payload_check(payload, 'set_watch'):
return self.next_command.tailcall(tracer)
tracer.extend_watchlist(*payload)
self.send_watchlist(tracer)
return self.next_command.tailcall(tracer)
def command_clear_watch(self, tracer, payload):
"""
Clears expressions from the watchlist and defers to user control.
"""
if not self.payload_check(payload, 'clear_watch'):
return self.next_command.tailcall(tracer)
for w in payload:
# Default to None so that clearing values that have not been set
# acts as a nop instead of an error.
tracer.watchlist.pop(w, None)
self.send_watchlist(tracer)
return self.next_command.tailcall(tracer)
def command_set_break(self, tracer, payload):
"""
Sets a breakpoint and defers to user control.
"""
if not self.payload_check(payload, 'set_break'):
return self.next_command.tailcall(tracer)
try:
breakpoint = self.fmt_breakpoint_dict(tracer, payload)
except QdbBreakpointReadError as b:
err_msg = fmt_err_msg('set_break', str(b), serial=json.dumps)
return self.next_command.tailcall(tracer, err_msg)
err_msg = None
try:
tracer.set_break(**breakpoint)
except QdbUnreachableBreakpoint as u:
err_msg = fmt_err_msg(
'set_breakpoint',
str(u),
serial=json.dumps
)
return self.next_command.tailcall(tracer, err_msg)
def command_clear_break(self, tracer, payload):
"""
Clears a breakpoint and defers to user control.
"""
if not self.payload_check(payload, 'clear_break'):
return self.next_command.tailcall(tracer)
try:
breakpoint = self.fmt_breakpoint_dict(tracer, payload)
except QdbBreakpointReadError as b:
err_msg = fmt_err_msg('clear_break', str(b), serial=json.dumps)
return self.next_command.tailcall(tracer, err_msg)
tracer.clear_break(**breakpoint)
return self.next_command.tailcall(tracer)
def command_list(self, tracer, payload):
"""
List the contents of a file and defer to user control.
"""
if not self.payload_check(payload, 'list'):
return self.next_command.tailcall(tracer)
filename = payload.get('file') or tracer.default_file
try:
if tracer.skip_fn(filename):
raise KeyError # Handled the same, avoids duplication.
if not (payload.get('start') or payload.get('end')):
msg = fmt_msg(
'list',
tracer.get_file(payload['file']),
serial=json.dumps
)
else:
# Send back the slice of the file that they requested.
msg = fmt_msg(
'list',
'\n'.join(
tracer.get_file_lines(tracer.canonic(filename))[
int(payload.get('start')):int(payload.get('end'))
]
),
serial=json.dumps
)
except KeyError: # The file failed to be cached.
msg = fmt_err_msg(
'list',
'File %s does not exist' % payload['file'],
serial=json.dumps
)
except TypeError:
# This occurs when we fail to convert the 'start' or 'stop' fields
# to integers.
msg = fmt_err_msg(
'list',
'List slice arguments must be convertable to type int',
serial=json.dumps
)
return self.next_command.tailcall(msg)
def command_up(self, tracer, payload):
"""
Step up the stack and defer to user control.
This will 'ignore' frames that we should skip, potentially going up
more than one stackframe.
"""
try:
tracer.stack_shift_direction(+1)
except IndexError:
self.send_error('up', 'Oldest frame')
self.send_watchlist(tracer)
self.send_stack(tracer)
return self.next_command.tailcall(tracer)
def command_down(self, tracer, payload):
"""
Step down the stack and defer to user control
This will 'ignore' frames that we should skip, potentially going down
more than one stackframe.
"""
try:
tracer.stack_shift_direction(-1)
except IndexError:
self.send_error('down', 'Newest frame')
self.send_watchlist(tracer)
self.send_stack(tracer)
return self.next_command.tailcall(tracer)
def command_locals(self, tracer, payload):
"""
Sends back the current frame locals and defer to user control.
"""
self.send_event('locals', tracer.curframe_locals)
return self.next_command.tailcall(tracer)
def command_start(self, tracer, payload):
"""
Sends back initial information and defers to user control.
"""
self.send_breakpoints()
self.send_watchlist(tracer)
self.send_stack(tracer)
return self.next_command.tailcall(tracer)
def command_disable(self, tracer, payload):
"""
Disables the tracer.
"""
if not self.payload_check(payload, 'disable'):
return self.next_command.tailcall(tracer)
if payload not in ['soft', 'hard']:
err_msg = fmt_err_msg(
'disable',
"payload must be either 'soft' or 'hard'",
serial=json.dumps
)
return self.next_command.tailcall(err_msg)
tracer.disable(payload)
def get_events_from_socket(sck):
"""
Yields valid events from the server socket.
"""
while True:
try:
sck.setblocking(True)
resp = bytearray(4)
if sck.recv_into(resp, 4) != 4:
raise QdbReceivedInvalidData(resp)
rlen = unpack('>i', resp)[0]
resp = bytearray(rlen)
sck.settimeout(1)
if sck.recv_into(resp, rlen) != rlen:
raise QdbReceivedInvalidData(resp)
if PY3:
resp = resp.decode('utf-8')
else:
resp = bytes(resp)
cmd = json.loads(resp)
if cmd['e'] == 'disabled':
# We are done tracing.
return
except KeyError:
log.warn('Client sent invalid cmd.')
yield fmt_err_msg('event', "No 'e' field sent")
return
except Exception as e:
# We can no longer talk to the server.
log.warn('Exception raised reading from socket')
yield fmt_err_msg('socket', str(e))
return
else:
# Yields only valid commands.
yield cmd
class ServerReader(object):
"""
Object that reads from the server asynchronously from the process
being debugged.
"""
def __init__(self, debugger_pipe, session_pid, server_comm_fd,
pause_signal):
self.pause_signal = pause_signal or signal.SIGUSR2
self.debugger_pipe = debugger_pipe
self.server_comm = socket.fromfd(server_comm_fd, 0, 0)
self.session_pid = session_pid
self.socket_error = None
self.process_messages()
def command_pause(self):
"""
Manages the pause command by raising a user defined signal in the
session process which will be caught by the command manager.
"""
os.kill(self.session_pid, self.pause_signal)
def process_messages(self):
"""
Infinitely reads events off the server. If it is a pause, then it
pauses the process; otherwise, it passes the message along.
"""
# Send a message to alert the tracer that we are ready to begin reading
# messages.
self.debugger_pipe.put(fmt_msg('reader_started'))
try:
for event in get_events_from_socket(self.server_comm):
if event['e'] == 'pause':
self.command_pause()
else:
self.debugger_pipe.put(event)
# If we get here, we had a socket error that dropped us
# out of get_events(), signal this to the process.
self.debugger_pipe.put(fmt_msg('disable', 'soft'))
finally:
log.info('ServerReader terminating')
class ServerLocalCommandManager(RemoteCommandManager):
"""
Use this command manager if you know for certain that the tracer will be
running on the same machine as the server. This circumvents the need for
spinning up the Reader process and lets the server take over some of that
responsibility. While using a normal RemoteCommandManager will work, this
incurs less overhead.
"""
def start(self, tracer, auth_msg=''):
"""
Begins processing commands from the server.
"""
self._socket_connect(tracer)
self.send(
fmt_msg(
'start', {
'uuid': tracer.uuid,
'auth': auth_msg,
'local': (os.getpid(), tracer.pause_signal),
},
serial=json.dumps,
)
)
def user_stop(self):
self.socket.close()
def get_events(self):
return get_events_from_socket(self.socket)
class TerminalCommandManager(CommandManager):
def __init__(self):
super(TerminalCommandManager, self).__init__()
self._sticky = True
self._redraw = True
# Side effectful imports ;_;
import rlcompleter # NOQA
import readline
self.readline = readline
readline.parse_and_bind("tab: complete")
def pprint(self, msg):
pprint(msg)
def send(self, event):
event = json.loads(event)
evfn = getattr(self, 'event_' + event['e'], None)
if not evfn:
self.unknown_event(event['e'])
else:
evfn(event.get('p'))
def writeln(self, msg=''):
print_(msg)
def writeerr(self, msg=''):
self.writeln('*** error: ' + msg)
def missing_argument(self, cmd):
self.writeerr('{cmd}: missing argument(s)'.format(cmd=cmd))
def unknown_event(self, e):
self.writeerr('{0}: unknown event type'.format(e))
def event_print(self, payload):
out = payload['output']
if out:
self.writeln(
'%s%s' % ('*** error: ' if payload['exc'] else '', out),
)
def event_stack(self, payload):
frame = payload['stack'][payload['index']] # Current frame
self.writeln('> {file}:{line}'.format(**frame))
if not self._sticky:
self.writeln('--> ' + frame['code'])
def event_watchlist(self, payload):
self.writeln('watchlist: [')
for watched in payload:
self.writeln(
' > %s%s: %s'
% ('*** error: ' if watched['exc'] else '',
watched['expr'], watched['value'])
)
self.writeln(']')
def event_exception(self, payload):
self.writeln('--* %s:%s' % (payload['type'], payload['value']))
def event_breakpoints(self, payload):
self.writeln('breakpoints: [')
for breakpoint in payload:
self.writeln(
' > {file} {line} {temp} {cond} {func}'.format(**breakpoint),
)
self.writeln(']')
def event_error(self, payload):
self.writeerr('{0}: {1}'.format(payload['type'], payload['data']))
def event_return(self, payload):
self.writeln('---> returning with %s' % payload)
def event_disabled(self, payload):
self.writeln()
def start(self, tracer, auth_msg=''):
pass
def user_stop(self):
pass
def prompt_user(self):
inp = input('(qdb) ').split(None, 1)
if not inp:
rl = self.readline
inp = rl.get_history_item(rl.get_current_history_length())
return inp
def user_next_command(self, tracer):
if self._sticky and self._redraw:
self.do_list(None, tracer, recurse=False)
self._redraw = False
try:
while True:
try:
inp = self.prompt_user()
while not inp:
inp = self.prompt_user()
break
except KeyboardInterrupt:
self.writeln()
except EOFError:
inp = ('quit',)
cmd = inp[0]
if cmd.endswith('?') and hasattr(self, 'do_' + cmd[:-1]):
self.writeln(dedent(getattr(self, 'do_' + cmd[:-1]).__doc__))
return self.user_next_command(tracer)
command = getattr(self, 'do_' + cmd, None)
if command is None:
return self.do_print(' '.join(inp), tracer)
else:
try:
arg = inp[1]
except IndexError:
arg = None
return command(arg, tracer)
def do_print(self, arg, tracer):
"""
p(rint)
Print the following expression
"""
tracer.eval_(arg)
return self.next_command.tailcall(tracer)
do_p = do_print
def do_step(self, arg, tracer):
"""
s(tep)
Execute the next line, function call, or return.
"""
self._redraw = True
tracer.set_step()
do_s = do_step
def do_return(self, arg, tracer):
"""
r(eturn)
Execute until the return event for the current stackframe.
"""
self._redraw = True
tracer.set_return(tracer.curframe)
do_r = do_return
def do_next(self, arg, tracer):
"""
n(ext)
Execute up to the next line in the current frame.
"""
self._redraw = True
tracer.set_next(tracer.curframe)
do_n = do_next
def do_until(self, arg, tracer):
"""
unt(il)
Execute until the line greater than the current is hit or until
you return from the current frame.
"""
self._redraw = True
tracer.set_until(tracer.curframe)
do_unt = do_until
def do_continue(self, arg, tracer):
"""
c(ontinue)
Continue execution until the next breakpoint is hit. If there are
no more breakpoints, stop tracing.
"""
self._redraw = True
tracer.set_continue()
do_c = do_continue
def do_watch(self, arg, tracer):
"""
w(atch) EXPR
Adds an expression to the watchlist.
"""
if not arg:
return self.missing_argument('w(atch)')
tracer.extend_watchlist((arg,))
return self.next_command.tailcall(tracer)
do_w = do_watch
def do_unwatch(self, arg, tracer):
"""
unw(atch) EXPR
Removes an expression from the watchlist if it is already being
watched, otherwise does nothing.
"""
if not arg:
return self.missing_argument('unw(atch)')
tracer.watchlist.pop(arg, None)
return self.next_command.tailcall(tracer)
do_unw = do_unwatch
def do_break(self, arg, tracer, temp=False):
"""
b(reak) BREAK-DICT
Adds a breakpoint with the form:
{'file': str, 'line': int, 'temp': bool, 'cond': str, 'func': str}
"""
if not arg:
self.missing_argument('b(reak)')
return
break_arg = self.parse_break_arg(arg, temp)
if break_arg:
tracer.set_break(**break_arg)
return self.next_command.tailcall(tracer)
do_b = do_break
def do_clear(self, arg, tracer):
"""
cl(ear) BREAK-DICT
Clears a breakpoint with the form:
{'file': str, 'line': int, 'temp': bool, 'cond': str, 'func': str}
Only 'file' and 'line' are needed.
"""
if not arg:
self.missing_argument('cl(ear)')
return
break_arg = self.parse_break_arg(arg)
if break_arg:
tracer.clear_break(**break_arg)
return self.next_command.tailcall(tracer)
do_cl = do_clear
def do_tbreak(self, arg, tracer):
"""
tbreak BREAK-DICT
Same as break, but with 'temp' defaulted to True.
"""
return self.do_break(arg, tracer, temp=True)
def do_list(self, arg, tracer, recurse=True):
"""
l(ist) FILE [START, [END]]
Shows the content of a file where START is the first line to show
and END is the last. This acts like a Python slice.
"""
start = end = None
try:
start, end = map(int, arg.split() if arg else ())
except (TypeError, ValueError):
pass
curline = tracer.curframe.f_lineno
if start is None and end is None and arg != ':':
start = curline - 5
if start < 0:
start = 0
end = curline + 5
def prepend(ix_l):
return (
'%s ' % ('-->' if ix_l[0] == curline else ' ')
) + ix_l[1]
self.writeln(
'\n'.join(
map(
prepend,
enumerate(
tracer.get_file_lines(
tracer.curframe.f_code.co_filename,
)[start:end],
1 if start is None else start + 1,
)
)
),
)
if recurse:
return self.next_command.tailcall(tracer)
do_l = do_list
def do_up(self, arg, tracer):
"""
u(p)
Steps up a stackframe if possible.
"""
try:
tracer.stack_shift_direction(+1)
except IndexError:
self.writeerr('up: top of stack')
else:
self.do_list(None, tracer, recurse=False)
return self.next_command.tailcall(tracer)
do_u = do_up
def do_down(self, arg, tracer):
"""
d(own)
Steps down a stackframe if possible.
"""
try:
tracer.stack_shift_direction(-1)
except IndexError:
self.writeerr('down: bottom of stack')
else:
self.do_list(None, tracer, recurse=False)
return self.next_command.tailcall(tracer)
do_d = do_down
def do_locals(self, arg, tracer):
"""
locals
Report back the current stackframe's locals.
"""
self.writeln('locals: [')
for p in items(tracer.curframe_locals):
self.writeln(' %s=%s' % p)
self.writeln(']')
return self.next_command.tailcall(tracer)
def do_quit(self, arg, tracer):
"""
q(uit) [MODE]
Stops the debugging session with the given mode, defaulting to
'soft'.
"""
if not arg or arg in ('soft', 'hard'):
tracer.disable(arg or 'hard')
else:
self.writeerr("disable: argument must be 'soft' or 'hard'")
def do_sticky(self, arg, tracer):
"""
sticky
Toggle sticky mode; printing the current context after every step.
"""
self._sticky = not self._sticky
if self._sticky:
return self.do_list(None, tracer)
return self.next_command.tailcall(tracer)
| apache-2.0 | -142,132,856,372,043,680 | 29.541964 | 79 | 0.540679 | false |
FrodeSolheim/fs-uae-launcher | fsgamesys/input/dinputkeycodes.py | 1 | 6071 | """
from dinput.h
"""
dinput_key_codes = {
"BACKSPACE": 0x0E,
"TAB": 0x0F,
"CLEAR": 0, # FIXME
"RETURN": 0x1C,
"PAUSE": 0, # FIXME
"ESCAPE": 0x01,
"SPACE": 0x39,
"EXCLAIM": 0, # FIXME
"QUOTEDBL": 0, # FIXME
"HASH": 0, # FIXME
"DOLLAR": 0, # FIXME
"AMPERSAND": 0, # FIXME
"QUOTE": 0x28,
"LEFTPAREN": 0, # FIXME
"RIGHTPAREN": 0, # FIXME
"ASTERISK": 0, # FIXME
"PLUS": 0, # FIXME
"COMMA": 0x33,
"MINUS": 0x0C,
"PERIOD": 0x34,
"SLASH": 0x35,
"0": 0x0B,
"1": 0x02,
"2": 0x03,
"3": 0x04,
"4": 0x05,
"5": 0x06,
"6": 0x07,
"7": 0x08,
"8": 0x09,
"9": 0x0A,
"COLON": 0x92,
"SEMICOLON": 0x27,
"LESS": 0, # FIXME
"EQUALS": 0x0D,
"GREATER": 0, # FIXME
"QUESTION": 0, # FIXME
"AT": 0x91,
"LEFTBRACKET": 0x1A,
"BACKSLASH": 0x2B,
"RIGHTBRACKET": 0x1B,
"CARET": 0, # FIXME
"UNDERSCORE": 0x93,
"BACKQUOTE": 0, # FIXME
"A": 0x1E,
"B": 0x30,
"C": 0x2E,
"D": 0x20,
"E": 0x12,
"F": 0x21,
"G": 0x22,
"H": 0x23,
"I": 0x17,
"J": 0x24,
"K": 0x25,
"L": 0x26,
"M": 0x32,
"N": 0x31,
"O": 0x18,
"P": 0x19,
"Q": 0x10,
"R": 0x13,
"S": 0x1F,
"T": 0x14,
"U": 0x16,
"V": 0x2F,
"W": 0x11,
"X": 0x2D,
"Y": 0x15,
"Z": 0x2C,
"DELETE": 0xD3,
"KP0": 0x52,
"KP1": 0x4F,
"KP2": 0x50,
"KP3": 0x51,
"KP4": 0x4B,
"KP5": 0x4C,
"KP6": 0x4D,
"KP7": 0x47,
"KP8": 0x48,
"KP9": 0x49,
"KP_PERIOD": 0x53,
"KP_DIVIDE": 0xB5,
"KP_MULTIPLY": 0x37,
"KP_MINUS": 0x4A,
"KP_PLUS": 0x4E,
"KP_ENTER": 0x9C,
"KP_EQUALS": 0x8D,
"UP": 0xC8,
"DOWN": 0xD0,
"RIGHT": 0xCD,
"LEFT": 0xCB,
"INSERT": 0xD2,
"HOME": 0xC7,
"END": 0xCF,
"PAGEUP": 0xC9,
"PAGEDOWN": 0xD1,
"F1": 0x3B,
"F2": 0x3C,
"F3": 0x3D,
"F4": 0x3E,
"F5": 0x3F,
"F6": 0x40,
"F7": 0x41,
"F8": 0x42,
"F9": 0x43,
"F10": 0x44,
"F11": 0x57,
"F12": 0x58,
"F13": 0x64,
"F14": 0x65,
"F15": 0x66,
"NUMLOCK": 0x45,
"CAPSLOCK": 0x3A,
"SCROLLOCK": 0x46,
"RSHIFT": 0x36,
"LSHIFT": 0x2A,
"RCTRL": 0x9D,
"LCTRL": 0x1D,
"RALT": 0xB8,
"LALT": 0x38,
"RMETA": 0, # FIXME
"LMETA": 0, # FIXME
"LSUPER": 0xDB,
"RSUPER": 0xDC,
"MODE": 0, # FIXME
"COMPOSE": 0, # FIXME
"HELP": 0, # FIXME
"PRINT": 0, # FIXME
"SYSREQ": 0xB7,
"BREAK": 0, # FIXME
"MENU": 0, # FIXME
"POWER": 0, # FIXME
"EURO": 0, # FIXME
"UNDO": 0, # FIXME
}
sdlk_to_dik = {
"ESCAPE": "ESCAPE",
"1": "1",
"2": "2",
"3": "3",
"4": "4",
"5": "5",
"6": "6",
"7": "7",
"8": "8",
"9": "9",
"0": "0",
"MINUS": "MINUS",
"EQUALS": "EQUALS",
"BACKSPACE": "BACK",
"TAB": "TAB",
"Q": "Q",
"W": "W",
"E": "E",
"R": "R",
"T": "T",
"Y": "Y",
"U": "U",
"I": "I",
"O": "O",
"P": "P",
"LEFTBRACKET": "LBRACKET",
"RIGHTBRACKET": "RBRACKET",
"RETURN": "RETURN",
"LCTRL": "LCONTROL",
"A": "A",
"S": "S",
"D": "D",
"F": "F",
"G": "G",
"H": "H",
"J": "J",
"K": "K",
"L": "L",
"SEMICOLON": "SEMICOLON",
"QUOTE": "APOSTROPHE",
"BACKQUOTE": "GRAVE",
"LSHIFT": "LSHIFT",
"BACKSLASH": "BACKSLASH",
"Z": "Z",
"X": "X",
"C": "C",
"V": "V",
"B": "B",
"N": "N",
"M": "M",
"COMMA": "COMMA",
"PERIOD": "PERIOD",
"SLASH": "SLASH",
"RSHIFT": "RSHIFT",
"KP_MULTIPLY": "MULTIPLY",
"LALT": "LMENU",
"SPACE": "SPACE",
"CAPSLOCK": "CAPITAL",
"F1": "F1",
"F2": "F2",
"F3": "F3",
"F4": "F4",
"F5": "F5",
"F6": "F6",
"F7": "F7",
"F8": "F8",
"F9": "F9",
"F10": "F10",
"NUMLOCK": "NUMLOCK",
"SCROLLOCK": "SCROLL",
"KP7": "NUMPAD7",
"KP8": "NUMPAD8",
"KP9": "NUMPAD9",
"KP_MINUS": "SUBTRACT",
"KP4": "NUMPAD4",
"KP5": "NUMPAD5",
"KP6": "NUMPAD6",
"KP_PLUS": "ADD",
"KP1": "NUMPAD1",
"KP2": "NUMPAD2",
"KP3": "NUMPAD3",
"KP0": "NUMPAD0",
"KP_PERIOD": "DECIMAL",
"F11": "F11",
"F12": "F12",
"F13": "F13",
"F14": "F14",
"F15": "F15",
# "": "KANA",
# "": "CONVERT",
# "": "NOCONVERT",
# "": "YEN",
# "": "NUMPADEQUALS",
# "": "CIRCUMFLEX",
"AT": "AT",
"COLON": "COLON",
# "": "UNDERLINE",
# "": "KANJI",
# "": "STOP",
# "": "AX",
# "": "UNLABELED",
"KP_ENTER": "NUMPADENTER",
"RCTRL": "RCONTROL",
# "": "NUMPADCOMMA",
"KP_DIVIDE": "DIVIDE",
"SYSREQ": "SYSRQ",
"RALT": "RMENU",
"HOME": "HOME",
"UP": "UP",
"PAGEUP": "PRIOR",
"LEFT": "LEFT",
"RIGHT": "RIGHT",
"END": "END",
"DOWN": "DOWN",
"PAGEDOWN": "NEXT",
"INSERT": "INSERT",
"DELETE": "DELETE",
"LSUPER": "LWIN",
"RSUPER": "RWIN",
# "": "APPS",
}
# "BACKSPACE": "BACKSPACE DIK_BACK /* backspace */
# "KP_MULTIPLY": "NUMPADSTAR DIK_MULTIPLY /* * on numeric keypad */
# "": "LALT DIK_LMENU /* left Alt */
# "": "CAPSLOCK DIK_CAPITAL /* CapsLock */
# "": "NUMPADMINUS DIK_SUBTRACT /* - on numeric keypad */
# "": "NUMPADPLUS DIK_ADD /* + on numeric keypad */
# "": "NUMPADPERIOD DIK_DECIMAL /* . on numeric keypad */
# "": "NUMPADSLASH DIK_DIVIDE /* / on numeric keypad */
# "": "RALT DIK_RMENU /* right Alt */
# "": "UPARROW DIK_UP /* UpArrow on arrow keypad */
# "": "PGUP DIK_PRIOR /* PgUp on arrow keypad */
# "": "LEFTARROW DIK_LEFT /* LeftArrow on arrow keypad */
# "": "RIGHTARROW DIK_RIGHT /* RightArrow on arrow keypad */
# "": "DOWNARROW DIK_DOWN /* DownArrow on arrow keypad */
# "": "PGDN DIK_NEXT /* PgDn on arrow keypad */
| gpl-2.0 | -8,757,528,464,550,546,000 | 20.682143 | 79 | 0.420853 | false |
open-synergy/opnsynid-hr | hr_expense_header_account/tests/test_onchange.py | 1 | 2544 | # -*- coding: utf-8 -*-
# Copyright 2016 OpenSynergy Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from .base import BaseCase
class HrExpensePayableAccount(BaseCase):
# Check value account_id
# Condition :
# journal_id == True
# default_credit_account == True
# home_address == True
# property_account_payable == True
def test_onchange_journal_id_1(self):
with self.env.do_in_onchange():
new = self.obj_hr_expense.new()
new.journal_id = self.journal_1.id
new.employee_id = self.employee_1.id
new.onchange_account_id()
self.assertEqual(
self.journal_1.default_credit_account_id.id,
new.account_id.id)
# Check value account_id
# Condition :
# journal_id == True
# default_credit_account == False
# home_address == True
# property_account_payable == True
def test_onchange_journal_id_2(self):
with self.env.do_in_onchange():
new = self.obj_hr_expense.new()
new.journal_id = self.journal_2.id
new.employee_id = self.employee_1.id
property_account_payable =\
new._get_partner_account()
new.onchange_account_id()
self.assertEqual(
property_account_payable,
new.account_id.id)
# Check value account_id
# Condition :
# journal_id == True
# default_credit_account == False
# home_address == False
# property_account_payable == False
def test_onchange_journal_id_3(self):
with self.env.do_in_onchange():
new = self.obj_hr_expense.new()
new.journal_id = self.journal_2.id
new.employee_id = self.employee_2.id
new.onchange_account_id()
self.assertEqual(
False,
new.account_id.id)
# Check value account_id
# Condition :
# journal_id == False
# default_credit_account == False
# home_address == True
# property_account_payable == True
def test_onchange_journal_id_4(self):
with self.env.do_in_onchange():
new = self.obj_hr_expense.new()
new.journal_id = False
new.employee_id = self.employee_1.id
property_account_payable =\
new._get_partner_account()
new.onchange_account_id()
self.assertEqual(
property_account_payable,
new.account_id.id)
| agpl-3.0 | -4,510,974,846,039,466,000 | 27.266667 | 63 | 0.56761 | false |
ForAP/Advanc3d-Pr0graming | A5-Tree/A5-Updated/toolbox.py | 1 | 2769 | # File name: toolbox.py
import kivy
kivy.require('1.7.0')
import math
from kivy.uix.togglebutton import ToggleButton
from kivy.graphics import Line
from turingwidgets import StateRep, DraggableWidget, Transition1, Transition2
from kivy.utils import get_color_from_hex
from kivy.graphics import Color, Ellipse, Line, Bezier
class ToolState(ToggleButton):
def on_touch_down(self, touch):
ds = self.parent.drawing_space
if self.state == 'down' and ds.collide_point(touch.x, touch.y):
(x,y) = ds.to_widget(touch.x, touch.y)
print (x,y)
self.draw(ds, x, y)
return True
return super(ToolState, self).on_touch_down(touch)
def draw(self, ds, x, y):
sm = StateRep(width=48, height=48)
sm.center = (x,y)
sm.opacity = 1
print type(ds.children)
print "ds children is " + str(ds.children)
print "ds children's length is " + str(len(ds.children))
go = self.parent.general_options
go.add_state()
ds.add_widget(sm)
ds.children[0].set_state(go.nameCounter - 1)
#if len(ds.children) > 2:
# tool = ToolTransition()
# tool.draw(ds,ds.children[-1],ds.children[-2])
class ToolTransition(ToggleButton):
#still to test and fix...
def draw_transition(self, current_state, transInfo, transitionCounter):
ds = self.parent.drawing_space
go = self.parent.general_options
print ds.children
stateOneID = int(current_state) + transitionCounter + 1
print stateOneID
stateOne = ds.children[-(stateOneID)]
# search for whether transition has already been created
if (current_state, transInfo[2]) in go.transitions:
key = go.transitions[(current_state, transInfo[2])]
# searches through the drawingspace for the transition and updates the label
for child in ds.children:
if isinstance(child, Transition1) or isinstance(child, Transition2):
if child.key == key:
child.update_label(transInfo)
# find what kind of transition is to be drawn
if current_state == transInfo[2]:
t = Transition1(stateOne,transInfo,go.keyNum)
ds.add_widget(t)
go.transitions.update({(current_state, transInfo[2]):go.keyNum})
go.keyNum += 1
go.transitionCounter += 1
else:
stateTwo = ds.children[-(int(transInfo[2])+transitionCounter+1)]
t = Transition2(stateOne,stateTwo,transInfo,go.keyNum)
ds.add_widget(t)
go.transitions.update({(current_state, transInfo[2]):go.keyNum})
go.keyNum += 1
go.transitionCounter += 1
| gpl-2.0 | 2,529,451,731,753,912,300 | 39.130435 | 84 | 0.615746 | false |
leebecker/jembatan | jembatan/analyzers/spacy.py | 1 | 10874 | import re
import itertools
import functools
from enum import auto, Flag
from jembatan.core.spandex import (Span, Spandex)
from jembatan.core.af import process_default_view, AnalysisFunction
from jembatan.typesys.chunking import NounChunk, Entity
from jembatan.typesys.segmentation import (Document, Sentence, Token)
from jembatan.typesys.syntax import (DependencyEdge, DependencyNode, DependencyParse)
class AnnotationLayers(Flag):
"""Enumerated type useful for turning on/off behavior in Spacy Analyzers
"""
DOCUMENT = auto()
SENTENCE = auto()
TOKEN = auto()
DEPPARSE = auto()
ENTITY = auto()
NOUN_CHUNK = auto()
@classmethod
def NONE(cls):
return functools.reduce(lambda x, y: x | y, [f for f in cls])
@classmethod
def ALL(cls):
return functools.reduce(lambda x, y: x | y, [f for f in cls])
@classmethod
def contains(flagset, flag):
return bool(flagset & flag)
class SpacyToJson(object):
def __init__(self):
pass
def serialize_token(self, t): #, token_lookup, sent_idx):
res = {}
text = {
"content": t.text,
"beginOffset": t.idx
}
partOfSpeech = {
"tag": t.pos_, # This switch is to make naming consistent with Google Natural Language API
"pos": t.tag_ # This is not a field in Google's API, but the original Treebank
}
depEdge = {
"headTokenIndex": t.head.i,
"label": t.dep_
}
return {
"text": text,
"partOfSpeech": partOfSpeech,
"lemma": t.lemma_,
"dependencyEdge": depEdge
}
def serialize_sentence(self, s):
return {
"text": {
"content": s.text,
"beginOffset": s.start_char
},
"sentiment": {}
}
def serialize_entity(self, e):
return {
"name": e.text,
"type": e.label_,
"metadata": { },
"salience": -1,
"mentions": [ {"content": e.text, "beginOffset": e.start_char, "type": "PROPER"}]
}
def to_json(self, spacydoc):
sentences = [self.serialize_sentence(s) for s in spacydoc.sents]
tokens = [self.serialize_token(t) for t in spacydoc]
entities = [self.serialize_entity(e) for e in spacydoc.ents]
return {
"sentences": sentences,
"tokens": tokens,
"entities": entities,
"documentSentiment": {},
"language": "unk"
}
class SpacyToSpandexUtils:
@staticmethod
def convert_sentence(spacysent, window_span=None):
begin = spacysent.start_char
end = begin + len(spacysent.text)
if window_span:
sent_span = Span(begin=window_span.begin + begin, end=window_span.begin + end)
else:
sent_span = Span(begin=begin, end=end)
sent = Sentence()
sent.span = sent_span
sent.source = spacysent
return sent
@staticmethod
def convert_token(spacytok, window_span=None):
span = Span(spacytok.idx, spacytok.idx + len(spacytok))
if window_span:
span = Span(window_span.begin + span.begin, window_span.begin + span.end)
tok = Token(lemma=spacytok.lemma_, pos=spacytok.tag_, tag=spacytok.pos_)
tok.span = span
tok.source = spacytok
return tok
@staticmethod
def convert_entity(entity, window_span=None):
if window_span:
entity_span = Span(window_span.begin + entity.start_char,
window_span.begin + entity.end_char)
else:
entity_span = Span(entity.start_char,
entity.end_char)
entity = Entity(name=None, salience=None, label=entity.label_)
entity.span = entity_span
entity.source = entity
return entity
@staticmethod
def convert_noun_chunk(noun_chunk, window_span=None):
if window_span:
noun_chunk_span = Span(window_span.begin + noun_chunk.start_char,
window_span.begin + noun_chunk.end_char)
else:
noun_chunk_span = Span(noun_chunk.start_char, noun_chunk.end_char)
noun_chunk = NounChunk(label=noun_chunk.label_)
noun_chunk.span = noun_chunk_span
return noun_chunk
@staticmethod
def spacy_to_spandex(spacy_doc, spndx=None, annotation_layers=AnnotationLayers.ALL(), window_span=None):
if not spndx:
spndx = Spandex(spacy_doc.text_with_ws)
if annotation_layers & AnnotationLayers.DOCUMENT:
if window_span:
doc = Document(begin=window_span.begin, end=window_span.end)
else:
doc_span = Span(0, len(spndx.content_string))
doc = Document(begin=doc_span.begin, end=doc_span.end)
spndx.add_annotations(doc)
if annotation_layers & AnnotationLayers.SENTENCE:
spndx.add_annotations(
*[SpacyToSpandexUtils.convert_sentence(s, window_span) for s in spacy_doc.sents])
# Extract tokens and dependency parse
spacy_toks = [t for t in spacy_doc]
if annotation_layers & AnnotationLayers.TOKEN:
all_toks = [SpacyToSpandexUtils.convert_token(t, window_span) for t in spacy_toks]
word_toks = [(tok, spacy_tok) for (tok, spacy_tok) in zip(all_toks, spacy_toks) if not spacy_tok.is_space]
toks = [tok for (tok, spacy_tok) in word_toks]
spndx.add_annotations(*toks)
if annotation_layers & AnnotationLayers.DEPPARSE:
# Pull out dependency graphs
span_to_nodes = {tok.span: DependencyNode(begin=tok.begin, end=tok.end) for tok in toks}
depedges = []
depnodes = []
depnode_spans = set()
for (tok, spacy_tok) in word_toks:
headtok = all_toks[spacy_tok.head.i]
head_node = span_to_nodes[headtok.span]
child_span = tok.span
child_node = span_to_nodes[child_span]
# get span for full dependency
depspan = Span(begin=min(tok.begin, headtok.begin),
end=max(tok.end, headtok.end))
# Build edges
depedge = DependencyEdge(label=spacy_tok.dep_, head=head_node, child=child_node)
depedge.span = depspan
child_node.head_edge = depedge
head_node.child_edges.append(depedge)
if headtok.span not in depnode_spans:
depnodes.append(head_node)
depnode_spans.add(head_node.span)
if child_span not in depnode_spans:
depnodes.append(child_node)
depnode_spans.add(child_span)
depedges.append(depedge)
# push dependency graph onto spandex
spndx.add_annotations(*depedges)
spndx.add_annotations(*depnodes)
dep_parses = []
for sent in spndx.select(Sentence):
dep_parse = DependencyParse(begin=sent.begin, end=sent.end)
dep_nodes = [n for n in spndx.select_covered(DependencyNode, dep_parse)]
for dep_node in dep_nodes:
if not dep_parse.root and dep_node.is_root:
# found the root
dep_parse.root = dep_node
dep_parses.append(dep_parse)
spndx.add_annotations(*dep_parses)
if annotation_layers & AnnotationLayers.ENTITY:
spndx.add_annotations(*[SpacyToSpandexUtils.convert_entity(e, window_span) for e in spacy_doc.ents])
if annotation_layers & AnnotationLayers.NOUN_CHUNK:
spndx.add_annotations(
*[SpacyToSpandexUtils.convert_noun_chunk(n, window_span) for n in spacy_doc.noun_chunks])
class SpacyAnalyzer(AnalysisFunction):
"""
Instances of this class accept a spandex operator at run Spacy on the spandex text
Spacy analyses are then converted into a common typesystem
"""
def __init__(self, spacy_pipeline=None, window_type=None):
"""
@param spacy_pipeline: a spacy model pipeline function which accepts text
and returns a spacy document. Default value of None will trigger
creation and initialization of the Spacy English model.
Example:
# initialize pipeline
spacy_analyzer = SpacyAnalyzer(en_nlp)
# only populate Document, Sentence and Token layers in Spandex
layers = AnnotationLayers.DOCUMENT | AnnotationLayers.SENTENCE \
| AnnotationLayers.TOKEN
spacy_analyzer(spndx, annotation_layers=layers)
"""
if spacy_pipeline:
self.spacy_pipeline = spacy_pipeline
else:
# no pipeline is specified so go ahead and initialize one
import spacy
self.spacy_pipeline = spacy.load("en_core_web_sm")
self.window_type = window_type
@process_default_view
def process(self, spndx: Spandex, **kwargs):
"""
Args:
**kwargs: Keyword Arguments
Keyword Args:
annotation_layers (:obj:`AnnotationLayer`): Bitwise mask of AnnotationLayers
indicating which layers to populate in Spandex. Default value is
AnnotationLayers.ALL()
window_type (str or type): Class Type of object to run processing
over. A common use case would be to run on boundaries already
defined prior to processing. For example processing a document
by subsection boundaries Default of None means to process the
full contents of the Spandex.
"""
# FIXME, better in init or as kwargs?
# window_type = kwargs.get('window_type', None)
annotation_layers = kwargs.get('annotation_layers', AnnotationLayers.ALL())
if not self.window_type:
# process full document
spacy_doc = self.spacy_pipeline(spndx.content_string)
SpacyToSpandexUtils.spacy_to_spandex(spacy_doc, spndx, annotation_layers)
else:
# process over windows
for window in spndx.select(self.window_type):
window_text = spndx.spanned_text(window)
spacy_doc = self.spacy_pipeline(window_text)
SpacyToSpandexUtils.spacy_to_spandex(spacy_doc, spndx, annotation_layers, window)
| apache-2.0 | 1,064,162,214,636,632,700 | 35.489933 | 118 | 0.573754 | false |
choderalab/MSMs | jchodera/src-10471/pyemma/cluster.py | 1 | 2588 | #!/usr/bin/env python
import pyemma
import numpy as np
import mdtraj
import os
# Source directory
source_directory = '/cbio/jclab/projects/fah/fah-data/munged/no-solvent/10471'
source_directory = '/cbio/jclab/projects/fah/fah-data/munged/no-solvent/10490'
################################################################################
# Load reference topology
################################################################################
print ('loading reference topology...')
reference_pdb_filename = 'reference.pdb'
reference_trajectory = os.path.join(source_directory, 'run0-clone0.h5')
traj = mdtraj.load(reference_trajectory)
traj[0].save_pdb(reference_pdb_filename)
################################################################################
# Initialize featurizer
################################################################################
print('Initializing featurizer...')
import pyemma.coordinates
featurizer = pyemma.coordinates.featurizer(reference_pdb_filename)
featurizer.add_all()
################################################################################
# Define coordinates source
################################################################################
import pyemma.coordinates
from glob import glob
trajectory_filenames = glob(os.path.join(source_directory, 'run0-clone94.h5'))
coordinates_source = pyemma.coordinates.source(trajectory_filenames, features=featurizer)
print("There are %d frames total in %d trajectories." % (coordinates_source.n_frames_total(), coordinates_source.number_of_trajectories()))
################################################################################
# Cluster
################################################################################
print('Clustering...')
generator_ratio = 100
nframes = coordinates_source.n_frames_total()
nstates = int(nframes / generator_ratio)
stride = 4
metric = 'minRMSD'
clustering = pyemma.coordinates.cluster_uniform_time(data=coordinates_source, k=nstates, stride=stride, metric=metric)
dtrajs = clustering.dtrajs
# Save discrete trajectories.
dtrajs_dir = 'dtrajs'
clustering.save_dtrajs(output_dir=dtrajs_dir, output_format='npy', extension='.npy')
################################################################################
# Make timescale plots
################################################################################
from pyemma import msm
from pyemma import plots
lags = [1,2,5,10]
its = msm.its(dtrajs, lags=lags, errors='bayes')
plots.plot_implied_timescales(its)
import matplotlib.pyplot as plt
plt.savefig('plot.pdf')
| gpl-2.0 | -3,964,985,091,263,813,600 | 34.452055 | 139 | 0.532844 | false |
nonsk131/USRP2016 | generate_tests0000-0999.py | 1 | 3341 | from isochrones.dartmouth import Dartmouth_Isochrone
from isochrones.utils import addmags
import numpy as np
import pandas as pd
file = open('/tigress/np5/true_params.txt','a')
def get_index(n):
if n < 10:
return '000' + str(n)
elif n < 100:
return '00' + str(n)
elif n < 1000:
return '0' + str(n)
else:
return str(n)
for n in range(0,1000,1):
index = get_index(n)
file.write('test: ' + index + '\n')
dar = Dartmouth_Isochrone()
array = np.random.rand(2) + 0.5
if array[0] > array[1]:
M1 = array[0]
M2 = array[1]
else:
M1 = array[1]
M2 = array[0]
age1 = np.log10(1e8)
age2 = np.log10(5e8)
feh1 = 0.0
array = 1400*np.random.rand(2) + 100
if array[0] > array[1]:
distance1 = array[0]
distance2 = array[1]
else:
distance1 = array[1]
distance2 = array[0]
AV1 = 0.0
feh2 = 0.2
AV2 = 0.1
params = (M1,M2,age1,age2,feh1,feh2,distance1,distance2,AV1,AV2)
params = str(params)
file.write('(M1,M2,age1,age2,feh1,feh2,distance1,distance2,AV1,AV2) = ' + params + '\n')
file.write('\n')
#Simulate true magnitudes
unresolved_bands = ['J','H','K']
resolved_bands = ['i','K']
args1 = (age1, feh1, distance1, AV1)
args2 = (age2, feh2, distance2, AV2)
unresolved = {b:addmags(dar.mag[b](M1, *args1), dar.mag[b](M2, *args2)) for b in unresolved_bands}
resolved_1 = {b:dar.mag[b](M1, *args1) for b in resolved_bands}
resolved_2 = {b:dar.mag[b](M2, *args2) for b in resolved_bands}
#print dar.mag['K'](M2, *args2)
#print unresolved, resolved_1, resolved_2
instruments = ['twomass','RAO']
bands = {'twomass':['J','H','K'],
'RAO':['i','K']}
mag_unc = {'twomass': 0.02, 'RAO':0.1}
resolution = {'twomass':4.0, 'RAO':0.1}
relative = {'twomass':False, 'RAO':True}
separation = 0.5
PA = 100.
columns = ['name', 'band', 'resolution', 'relative', 'separation', 'pa', 'mag', 'e_mag']
df = pd.DataFrame(columns=columns)
i=0
for inst in ['twomass']: #Unresolved observations
for b in bands[inst]:
row = {}
row['name'] = inst
row['band'] = b
row['resolution'] = resolution[inst]
row['relative'] = relative[inst]
row['separation'] = 0.
row['pa'] = 0.
row['mag'] = unresolved[b]
row['e_mag'] = mag_unc[inst]
df = df.append(pd.DataFrame(row, index=[i]))
i += 1
for inst in ['RAO']: #Resolved observations
for b in bands[inst]:
mags = [resolved_1[b], resolved_2[b]]
pas = [0, PA]
seps = [0., separation]
for mag,sep,pa in zip(mags,seps,pas):
row = {}
row['name'] = inst
row['band'] = b
row['resolution'] = resolution[inst]
row['relative'] = relative[inst]
row['separation'] = sep
row['pa'] = pa
row['mag'] = mag
row['e_mag'] = mag_unc[inst]
df = df.append(pd.DataFrame(row, index=[i]))
i += 1
#print df
df.to_csv(path_or_buf='/tigress/np5/df_binary_test{}.csv'.format(index))
file.close()
| mit | -4,797,106,223,220,081,000 | 28.566372 | 102 | 0.516911 | false |
potato16/pythonl | dirtylook/widgetsII.py | 1 | 2420 | import sys
from PyQt5.QtWidgets import (QWidget, QHBoxLayout,
QLabel, QApplication, QLineEdit,
QFrame, QSplitter, QStyleFactory,
QComboBox)
from PyQt5.QtGui import QPixmap
from PyQt5.QtCore import Qt
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
topleft = QFrame(self)
topleft.setFrameShape(QFrame.StyledPanel)
topright = QFrame(self)
topright.setFrameShape(QFrame.StyledPanel)
bottom = QFrame(self)
bottom.setFrameShape(QFrame.StyledPanel)
splitter1 = QSplitter(Qt.Horizontal)
splitter1.addWidget(topleft)
splitter1.addWidget(topright)
splitter2 = QSplitter(Qt.Vertical)
splitter2.addWidget(splitter1)
splitter2.addWidget(bottom)
self.lbl1 = QLabel(self)
qle = QLineEdit(self)
qle.move(60, 100)
self.lbl1.move(60,40)
qle.textChanged[str].connect(self.onChanged)
hbox = QHBoxLayout(self)
hbox.addWidget(splitter2)
#combobox
self.lblc = QLabel('Ubuntu', self)
combo = QComboBox(self)
combo.addItem("Ubuntu")
combo.addItem("Mandriva")
combo.addItem("Fedora")
combo.addItem("Arch")
combo.addItem("Gentoo")
combo.move (50, 50)
self.lblc.move(50,150)
combo.activated[str].connect(self.onActivated)
tmphbox = QHBoxLayout()
tmphbox.addWidget(combo)
tmphbox.addWidget(self.lblc)
topleft.setLayout(tmphbox)
#hbox.addWidget(self.lbl1)
#hbox.addWidget(qle)
pixmap = QPixmap('img1.jpg')
lbl = QLabel(self)
lbl.setPixmap(pixmap)
#hbox.addWidget(lbl)
self.setLayout(hbox)
# self.move(300, 200)
tmphbox = QHBoxLayout()
tmphbox.addWidget(self.lbl1)
tmphbox.addWidget(qle)
topright.setLayout(tmphbox)
tmphbox = QHBoxLayout()
tmphbox.addWidget(lbl)
bottom.setLayout(tmphbox)
self.setWindowTitle('Red Rock')
self.show()
def onActivated(self, text):
self.lblc.setText(text)
self.lblc.adjustSize()
def onChanged(self, text):
self.lbl1.setText(text)
self.lbl1.adjustSize()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
| apache-2.0 | 1,898,606,519,239,225,300 | 30.842105 | 54 | 0.606198 | false |
piface/pifacecad | setup.py | 2 | 1293 | import sys
from distutils.core import setup
#PY3 = sys.version_info.major >= 3
PY3 = sys.version_info[0] >= 3
VERSION_FILE = "pifacecad/version.py"
def get_version():
if PY3:
version_vars = {}
with open(VERSION_FILE) as f:
code = compile(f.read(), VERSION_FILE, 'exec')
exec(code, None, version_vars)
return version_vars['__version__']
else:
execfile(VERSION_FILE)
return __version__
setup(
name='pifacecad',
version=get_version(),
description='The PiFace Control And Display module.',
author='Thomas Preston',
author_email='[email protected]',
license='GPLv3+',
url='http://piface.github.io/pifacecad/',
packages=['pifacecad', 'pifacecad.tools'],
long_description=open('README.md').read() + open('CHANGELOG').read(),
classifiers=[
"License :: OSI Approved :: GNU Affero General Public License v3 or "
"later (AGPLv3+)",
"Programming Language :: Python :: 3",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='piface cad control display raspberrypi openlx',
requires=['pifacecommon', 'lirc'],
)
| gpl-3.0 | 8,687,815,180,526,768,000 | 29.785714 | 77 | 0.621036 | false |
TheTazza/StRADRL | settings/options3.py | 1 | 4094 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def get_options(option_type):
"""
option_type: string
'training' or 'diplay' or 'visualize'
"""
# name
tf.app.flags.DEFINE_string("training_name","tc_v1","name of next training in log")
# Common
tf.app.flags.DEFINE_string("env_type", "gym", "environment type (lab or gym or maze)")
tf.app.flags.DEFINE_string("env_name", "CartPole-v1", "environment name (for lab)")
tf.app.flags.DEFINE_integer("env_max_steps", 400000, "max number of steps in environment")
tf.app.flags.DEFINE_boolean("use_base", False, "whether to use base A3C for aux network")
tf.app.flags.DEFINE_boolean("use_pixel_change", False, "whether to use pixel change")
tf.app.flags.DEFINE_boolean("use_value_replay", False, "whether to use value function replay")
tf.app.flags.DEFINE_boolean("use_reward_prediction", False, "whether to use reward prediction")
tf.app.flags.DEFINE_boolean("use_temporal_coherence", True, "whether to use temporal coherence")
tf.app.flags.DEFINE_boolean("use_proportionality", False, "whether to use proportionality")
tf.app.flags.DEFINE_boolean("use_causality", False, "whether to use causality")
tf.app.flags.DEFINE_boolean("use_repeatability", False, "whether to use repeatability")
tf.app.flags.DEFINE_string("checkpoint_dir", "/tmp/StRADRL/checkpoints", "checkpoint directory")
# For training
if option_type == 'training':
tf.app.flags.DEFINE_string("temp_dir", "/tmp/StRADRL/tensorboard/", "base directory for tensorboard")
tf.app.flags.DEFINE_string("log_dir", "/tmp/StRADRL/log/", "base directory for logs")
tf.app.flags.DEFINE_integer("max_time_step", 10**6, "max time steps")
tf.app.flags.DEFINE_integer("save_interval_step", 10**4, "saving interval steps")
tf.app.flags.DEFINE_boolean("grad_norm_clip", 40.0, "gradient norm clipping")
#base
tf.app.flags.DEFINE_float("initial_learning_rate", 1e-3, "learning rate")
tf.app.flags.DEFINE_float("gamma", 0.99, "discount factor for rewards")
tf.app.flags.DEFINE_float("entropy_beta", 0.01, "entropy regurarlization constant")
tf.app.flags.DEFINE_float("value_lambda", 0.5, "value ratio for base loss")
tf.app.flags.DEFINE_float("base_lambda", 0.97, "generalized adv. est. lamba for short-long sight")
# auxiliary
tf.app.flags.DEFINE_integer("parallel_size", 1, "parallel thread size")
tf.app.flags.DEFINE_float("aux_initial_learning_rate", 1e-3, "learning rate")
tf.app.flags.DEFINE_float("aux_lambda", 0.0, "generalized adv. est. lamba for short-long sight (aux)")
tf.app.flags.DEFINE_float("gamma_pc", 0.9, "discount factor for pixel control")
tf.app.flags.DEFINE_float("pixel_change_lambda", 0.0001, "pixel change lambda") # 0.05, 0.01 ~ 0.1 for lab, 0.0001 ~ 0.01 for gym
tf.app.flags.DEFINE_float("temporal_coherence_lambda", 1., "temporal coherence lambda")
tf.app.flags.DEFINE_float("proportionality_lambda", 100., "proportionality lambda")
tf.app.flags.DEFINE_float("causality_lambda", 1., "causality lambda")
tf.app.flags.DEFINE_float("repeatability_lambda", 100., "repeatability lambda")
tf.app.flags.DEFINE_integer("experience_history_size", 100000, "experience replay buffer size")
# queuer
tf.app.flags.DEFINE_integer("local_t_max", 20, "repeat step size")
tf.app.flags.DEFINE_integer("queue_length", 5, "max number of batches (of length local_t_max) in queue")
tf.app.flags.DEFINE_integer("env_runner_sync", 1, "number of env episodes before sync to global")
tf.app.flags.DEFINE_float("action_freq", 0, "number of actions per second in env")
# For display
if option_type == 'display':
tf.app.flags.DEFINE_string("frame_save_dir", "/tmp/StRADRL_frames", "frame save directory")
tf.app.flags.DEFINE_boolean("recording", False, "whether to record movie")
tf.app.flags.DEFINE_boolean("frame_saving", False, "whether to save frames")
return tf.app.flags.FLAGS
| mit | -3,369,474,994,041,291,300 | 53.586667 | 133 | 0.703713 | false |
kedz/sumpy | duc_testbed.py | 1 | 2216 | import argparse
import pandas as pd
import os
import sumpy
import sumpy.eval
def load_docsets(duc_dir):
docset_paths = [os.path.join(duc_dir, fname)
for fname in os.listdir(duc_dir)]
docset_paths = [path for path in docset_paths if os.path.isdir(path)]
docsets = {}
for docset_path in docset_paths:
docset_id, docs, models = load_docset(docset_path)
docsets[docset_id] = {u"docs": docs, u"models": models}
return docsets
def load_docset(docset_path):
docset_id = os.path.split(docset_path)[1]
docs_path = os.path.join(docset_path, u"docs")
docs = sumpy.io.load_duc_docset(docs_path)
models = []
for fname in os.listdir(docset_path):
if docset_id in fname:
model_paths = [os.path.join(docset_path, fname, length)
for length in [u"200", u"400"]]
model_sums = sumpy.io.load_duc_abstractive_summaries(model_paths)
models.extend(model_sums)
return docset_id, docs, models
def generate_summaries(systems, docsets):
rouge = sumpy.eval.ROUGE(max_ngrams=2, limit=100, limit_type=u"word")
results = []
for docset_id in docsets.keys():
#print docset_id
docs = docsets[docset_id][u"docs"]
models = docsets[docset_id][u"models"]
sys_sums = [(system_name, unicode(sum_func(docs)))
for system_name, sum_func in systems]
df = rouge.evaluate(sys_sums, models)
results.append(df)
return pd.concat(results).groupby(level=0).mean()
def main(duc_dir):
print u"Loading DUC document sets from:", duc_dir
docsets = load_docsets(duc_dir)
lede = lambda x: sumpy.lede(x)
centroid = lambda x: sumpy.centroid(x)
lexrank = lambda x: sumpy.lexrank(x)
systems = [(u"lede", lede), (u"centroid", centroid),
(u"lexrank", lexrank)]
print generate_summaries(systems, docsets)
if __name__ == u"__main__":
parser = argparse.ArgumentParser()
parser.add_argument(u"-d", u"--duc-dir", required=True, type=unicode,
help=u"path to DUC document set directory")
args = parser.parse_args()
duc_dir = args.duc_dir
main(duc_dir)
| apache-2.0 | 1,770,225,091,152,493,600 | 34.174603 | 77 | 0.618682 | false |
repotvsupertuga/tvsupertuga.repository | script.module.streamtvsupertuga/lib/resources/lib/sources/it/freedownload.py | 1 | 3281 | import requests
import re
import xbmc,xbmcaddon,time
from ..scraper import Scraper
from ..common import clean_title,clean_search,random_agent,send_log,error_log
dev_log = xbmcaddon.Addon('script.module.universalscrapers').getSetting("dev_log")
class freedownload(Scraper):
domains = ['http://freemoviedownloads6.com']
name = "FreeDownload"
sources = []
def __init__(self):
self.base_link = 'http://freemoviedownloads6.com'
self.goog = 'https://www.google.co.uk'
self.sources = []
if dev_log=='true':
self.start_time = time.time()
def scrape_movie(self, title, year, imdb, debrid=False):
try:
scrape = clean_search(title.lower()).replace(' ','+')
start_url = '%s/search?q=freemoviedownloads6.com+%s+%s' %(self.goog,scrape,year)
#print 'START> '+start_url
headers = {'User-Agent':random_agent()}
html = requests.get(start_url,headers=headers,timeout=3).content
results = re.compile('href="(.+?)"',re.DOTALL).findall(html)
for url in results:
if self.base_link in url:
if scrape.replace('+','-') in url:
if 'webcache' in url:
continue
self.get_source(url,title,year)
return self.sources
except Exception, argument:
if dev_log == 'true':
error_log(self.name,'Check Search')
return self.sources
def get_source(self,url,title,year):
try:
#print 'cfwds %s %s %s' %(url,title,year)
confirm_name = clean_title(title.lower()) + year
headers={'User-Agent':random_agent()}
OPEN = requests.get(url,headers=headers,timeout=5).content
getTit = re.compile('<title>(.+?)</title>',re.DOTALL).findall(OPEN)[0]
getTit = getTit.split('Free')[0]
if clean_title(getTit.lower()) == confirm_name:
#print 'This Movie + '+getTit
OPEN = OPEN.split("type='video/mp4'")[1]
Regex = re.compile('href="(.+?)"',re.DOTALL).findall(OPEN)
count = 0
for link in Regex:
if '1080' in link:
res = '1080p'
elif '720' in link:
res = '720p'
elif '480' in link:
res = '480p'
else:
res = 'SD'
if '.mkv' in link:
count +=1
self.sources.append({'source': 'DirectLink', 'quality': res, 'scraper': self.name, 'url': link,'direct': True})
if '.mp4' in link:
count +=1
self.sources.append({'source': 'DirectLink', 'quality': res, 'scraper': self.name, 'url': link,'direct': True})
if dev_log=='true':
end_time = time.time() - self.start_time
send_log(self.name,end_time,count)
except:
pass
| gpl-2.0 | 967,297,113,718,419,100 | 37.6 | 135 | 0.477294 | false |
DarjaGFX/GroupWeb | web/settings.py | 1 | 3218 | """
Django settings for web project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6tua3m@^yp6@ld%q*exja6p$3mlr@th-r)4y8s8#cf$xk7ivqs'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# 'web.middleware.alaki'
]
ROOT_URLCONF = 'web.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'web.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
# LANGUAGE_CODE = 'fa-ir'
# import locale
# locale.setlocale(locale.LC_ALL, "fa_IR")
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/blog/static/'
| gpl-3.0 | -8,696,760,926,007,692,000 | 24.744 | 91 | 0.684587 | false |
aksareen/balrog | auslib/test/admin/views/test_releases.py | 1 | 71942 | import mock
import simplejson as json
from sqlalchemy import select
from auslib.blobs.base import createBlob
from auslib.global_state import dbo
from auslib.test.admin.views.base import ViewTest
class TestReleasesAPI_JSON(ViewTest):
def testGetRelease(self):
ret = self._get("/releases/b")
self.assertStatusCode(ret, 200)
self.assertEqual(json.loads(ret.data), json.loads("""
{
"name": "b",
"hashFunction": "sha512",
"schema_version": 1
}
"""))
def testGetRelease404(self):
ret = self._get("/releases/g")
self.assertStatusCode(ret, 404)
def testReleasePostUpdateExisting(self):
data = json.dumps(dict(detailsUrl='blah', fakePartials=True, schema_version=1))
ret = self._post('/releases/d', data=dict(data=data, product='d', data_version=1))
self.assertStatusCode(ret, 200)
ret = select([dbo.releases.data]).where(dbo.releases.name == 'd').execute().fetchone()[0]
self.assertEqual(ret, createBlob("""
{
"name": "d",
"schema_version": 1,
"detailsUrl": "blah",
"fakePartials": true,
"hashFunction": "sha512",
"platforms": {
"p": {
"locales": {
"d": {
"complete": {
"filesize": 1234,
"from": "*",
"hashValue": "abc"
}
}
}
}
}
}
"""))
def testReleasePostUpdateExistingWithoutPermission(self):
data = json.dumps(dict(detailsUrl='blah', fakePartials=True, schema_version=1))
ret = self._post('/releases/d', data=dict(data=data, product='d', data_version=1), username="hannah")
self.assertStatusCode(ret, 403)
def testReleasePutUpdateMergeableOutdatedData(self):
ancestor_blob = """
{
"name": "dd",
"schema_version": 1,
"detailsUrl": "blah",
"fakePartials": true,
"hashFunction": "sha512",
"platforms": {
"p": {
"locales": {
"dd": {
"complete": {
"filesize": 1234,
"from": "*",
"hashValue": "abc"
}
}
}
}
}
}"""
blob1 = """
{
"name": "dd",
"schema_version": 1,
"detailsUrl": "blah",
"fakePartials": true,
"hashFunction": "sha512",
"platforms": {
"p": {
"locales": {
"dd": {
"complete": {
"filesize": 1234,
"from": "*",
"hashValue": "abc"
}
},
"dd2": {
"complete": {
"filesize": 1234,
"from": "*",
"hashValue": "abc"
}
}
}
}
}
}"""
blob2 = """
{
"name": "dd",
"schema_version": 1,
"detailsUrl": "blah",
"fakePartials": true,
"hashFunction": "sha512",
"platforms": {
"p": {
"locales": {
"dd": {
"complete": {
"filesize": 1234,
"from": "*",
"hashValue": "abc"
}
},
"dd1": {
"complete": {
"filesize": 1234,
"from": "*",
"hashValue": "abc"
}
}
}
}
}
}"""
result_blob = createBlob("""
{
"name": "dd",
"schema_version": 1,
"detailsUrl": "blah",
"fakePartials": true,
"hashFunction": "sha512",
"platforms": {
"p": {
"locales": {
"dd2": {
"complete": {
"filesize": 1234,
"from": "*",
"hashValue": "abc"
}
},
"dd": {
"complete": {
"filesize": 1234,
"from": "*",
"hashValue": "abc"
}
},
"dd1": {
"complete": {
"filesize": 1234,
"from": "*",
"hashValue": "abc"
}
}
}
}
}
}""")
# Testing Put request to add new release
ret = self._put('/releases/dd', data=dict(blob=ancestor_blob, name='dd',
product='dd', data_version=1))
self.assertStatusCode(ret, 201)
ret = select([dbo.releases.data]).where(dbo.releases.name == 'dd').execute().fetchone()[0]
self.assertEqual(ret, createBlob(ancestor_blob))
# Updating same release
ret = self._put('/releases/dd', data=dict(blob=blob1, name='dd',
product='dd', data_version=1))
self.assertStatusCode(ret, 200)
self.assertEqual(json.loads(ret.data), dict(new_data_version=2))
# Updating release with outdated data, testing if merged correctly
ret = self._put('/releases/dd', data=dict(blob=blob2, name='dd',
product='dd', data_version=1))
self.assertStatusCode(ret, 200)
self.assertEqual(json.loads(ret.data), dict(new_data_version=3))
ret = select([dbo.releases.data]).where(dbo.releases.name == 'dd').execute().fetchone()[0]
self.assertEqual(ret, result_blob)
def testReleasePutUpdateConflictingOutdatedData(self):
ancestor_blob = """
{
"name": "dd",
"schema_version": 1,
"detailsUrl": "blah",
"fakePartials": true,
"hashFunction": "sha512",
"platforms": {
"p": {
"locales": {
"dd": {
"complete": {
"filesize": 1234,
"from": "*",
"hashValue": "abc"
}
}
}
}
}
}"""
blob1 = """
{
"name": "dd",
"schema_version": 1,
"detailsUrl": "blah",
"fakePartials": true,
"hashFunction": "sha512",
"platforms": {
"p": {
"locales": {
"dd1": {
"complete": {
"filesize": 1234,
"from": "*",
"hashValue": "abc"
}
}
}
}
}
}"""
blob2 = """
{
"name": "dd",
"schema_version": 1,
"detailsUrl": "blah",
"fakePartials": true,
"hashFunction": "sha512",
"platforms": {
"p": {
"locales": {
"dd": {
"complete": {
"filesize": 12345,
"from": "*",
"hashValue": "abc"
}
},
"dd1": {
"complete": {
"filesize": 1234,
"from": "*",
"hashValue": "abc1"
}
}
}
}
}
}"""
# Testing Put request to add new release
ret = self._put('/releases/dd', data=dict(blob=ancestor_blob, name='dd', product='dd', data_version=1))
self.assertStatusCode(ret, 201)
ret = select([dbo.releases.data]).where(dbo.releases.name == 'dd').execute().fetchone()[0]
self.assertEqual(ret, createBlob(ancestor_blob))
# Updating same release
ret = self._put('/releases/dd', data=dict(blob=blob1, name='dd',
product='dd', data_version=1))
self.assertStatusCode(ret, 200)
self.assertEqual(json.loads(ret.data), dict(new_data_version=2))
# Updating same release with conflicting data
ret = self._put('/releases/dd', data=dict(blob=blob2, name='dd',
product='dd', data_version=1))
self.assertStatusCode(ret, 400)
def testReleasePostUpdateOutdatedDataNotBlob(self):
blob = """
{
"name": "ee",
"schema_version": 1,
"detailsUrl": "blah",
"fakePartials": true,
"hashFunction": "sha512",
"platforms": {
"p": {
"locales": {
"ee": {
"complete": {
"filesize": 1234,
"from": "*",
"hashValue": "abc"
}
}
}
}
}
}"""
ret = self._post('/releases/ee', data=dict(data=blob, hashFunction="sha512", name='ee', product='ee', data_version=1))
self.assertStatusCode(ret, 201)
# Updating same release
self.assertEqual(ret.data, json.dumps(dict(new_data_version=2)), "Data: %s" % ret.data)
ret = self._post('/releases/ee', data=dict(data=blob,
hashFunction="sha512",
name='ee', product='ee', data_version=2))
self.assertStatusCode(ret, 200)
self.assertEqual(ret.data, json.dumps(dict(new_data_version=3)), "Data: %s" % ret.data)
# Outdated Data Error on same release
ret = self._post('/releases/ee', data=dict(hashFunction="sha512",
read_only=True,
name='ee', product='ee', data_version=1))
self.assertStatusCode(ret, 400)
def testReleasePostMismatchedName(self):
data = json.dumps(dict(name="eee", schema_version=1))
ret = self._post('/releases/d', data=dict(data=data, product='d', data_version=1))
self.assertStatusCode(ret, 400)
def testReleasePostUpdateChangeHashFunction(self):
data = json.dumps(dict(detailsUrl='blah', hashFunction="sha1024", schema_version=1))
ret = self._post('/releases/d', data=dict(data=data, product='d', data_version=1))
self.assertStatusCode(ret, 400)
def testReleasePostUpdateChangeProduct(self):
data = json.dumps(dict(detailsUrl="abc", schema_version=1))
ret = self._post("/releases/c", data=dict(data=data, product="h", data_version=1))
self.assertStatusCode(ret, 400)
def testReleasePostInvalidBlob(self):
data = json.dumps(dict(uehont="uhetn", schema_version=1))
ret = self._post("/releases/c", data=dict(data=data, product="c", data_version=1))
self.assertStatusCode(ret, 400)
self.assertIn("Additional properties are not allowed", ret.data)
def testReleasePostWithSignoffRequired(self):
data = json.dumps(dict(bouncerProducts=dict(partial='foo'), name='a', hashFunction="sha512"))
ret = self._post("/releases/a", data=dict(data=data, product="a", data_version=1, schema_version=1))
self.assertStatusCode(ret, 400)
self.assertIn("This change requires signoff", ret.data)
def testReleasePostCreatesNewReleasev1(self):
data = json.dumps(dict(bouncerProducts=dict(partial='foo'), name='e', hashFunction="sha512"))
ret = self._post('/releases/e', data=dict(data=data, product='e', schema_version=1))
self.assertStatusCode(ret, 201)
ret = dbo.releases.t.select().where(dbo.releases.name == 'e').execute().fetchone()
self.assertEqual(ret['product'], 'e')
self.assertEqual(ret['name'], 'e')
self.assertEqual(ret['data'], createBlob("""
{
"name": "e",
"hashFunction": "sha512",
"schema_version": 1,
"bouncerProducts": {
"partial": "foo"
}
}
"""))
def testReleasePostCreatesNewReleaseNopermission(self):
data = json.dumps(dict(bouncerProducts=dict(partial='foo'), name='e', hashFunction="sha512"))
ret = self._post('/releases/e', data=dict(data=data, product='e', schema_version=1), username="kate")
self.assertStatusCode(ret, 403)
def testReleasePostCreatesNewReleasev2(self):
data = json.dumps(dict(bouncerProducts=dict(complete='foo'), name='e', hashFunction="sha512"))
ret = self._post('/releases/e', data=dict(data=data, product='e', schema_version=2))
self.assertStatusCode(ret, 201)
ret = dbo.releases.t.select().where(dbo.releases.name == 'e').execute().fetchone()
self.assertEqual(ret['product'], 'e')
self.assertEqual(ret['name'], 'e')
self.assertEqual(ret['data'], createBlob("""
{
"name": "e",
"hashFunction": "sha512",
"schema_version": 2,
"bouncerProducts": {
"complete": "foo"
}
}
"""))
def testReleasePostInvalidKey(self):
data = json.dumps(dict(foo=1))
ret = self._post('/releases/ab', data=dict(data=data))
self.assertStatusCode(ret, 400)
def testReleasePostRejectedURL(self):
data = json.dumps(dict(platforms=dict(p=dict(locales=dict(f=dict(complete=dict(fileUrl='http://evil.com')))))))
ret = self._post('/releases/d', data=dict(data=data, product='d', data_version=1))
self.assertStatusCode(ret, 400)
def testDeleteRelease(self):
ret = self._delete("/releases/d", qs=dict(data_version=1))
self.assertStatusCode(ret, 200)
ret = dbo.releases.t.count().where(dbo.releases.name == 'd').execute().first()[0]
self.assertEqual(ret, 0)
def testDeleteReleaseOutdatedData(self):
# Release's data version is outdated
ret = self._get("/releases/d")
self.assertStatusCode(ret, 200)
ret = self._delete("/releases/d", qs=dict(data_version=7))
self.assertStatusCode(ret, 400)
def testDeleteNonExistentRelease(self):
ret = self._delete("/releases/ueo", qs=dict(data_version=1))
self.assertStatusCode(ret, 404)
def testDeleteWithoutPermission(self):
ret = self._delete("/releases/d", username="bob", qs=dict(data_version=1))
self.assertStatusCode(ret, 403)
def testDeleteWithoutPermissionForAction(self):
ret = self._delete("/releases/d", username="bob", qs=dict(data_version=1))
self.assertStatusCode(ret, 403)
def testDeleteWithProductAdminPermission(self):
ret = self._delete("/releases/d", username="bill", qs=dict(data_version=1))
self.assertStatusCode(ret, 200)
def testDeleteWithoutProductAdminPermission(self):
ret = self._delete("/releases/d", username="billy", qs=dict(data_version=1))
self.assertStatusCode(ret, 403)
def testDeleteReadOnlyRelease(self):
dbo.releases.t.update(values=dict(read_only=True, data_version=2)).where(dbo.releases.name == "d").execute()
ret = self._delete("/releases/d", username="bill", qs=dict(data_version=2))
self.assertStatusCode(ret, 403)
def testDeleteWithRules(self):
ret = self._delete("/releases/a", qs=dict(data_version=1))
self.assertStatusCode(ret, 400)
def testLocalePut(self):
data = json.dumps({
"complete": {
"filesize": 435,
"from": "*",
"hashValue": "abc",
}
})
ret = self._put('/releases/ab/builds/p/l', data=dict(data=data, product='a', data_version=1, schema_version=1))
self.assertStatusCode(ret, 201)
self.assertEqual(ret.data, json.dumps(dict(new_data_version=2)), "Data: %s" % ret.data)
ret = select([dbo.releases.data]).where(dbo.releases.name == 'ab').execute().fetchone()[0]
self.assertEqual(ret, createBlob("""
{
"name": "ab",
"schema_version": 1,
"hashFunction": "sha512",
"platforms": {
"p": {
"locales": {
"l": {
"complete": {
"filesize": 435,
"from": "*",
"hashValue": "abc"
}
}
}
}
}
}
"""))
def testLocalePutSpecificPermission(self):
data = json.dumps({
"complete": {
"filesize": 435,
"from": "*",
"hashValue": "abc",
}
})
ret = self._put('/releases/ab/builds/p/l', username="ashanti", data=dict(data=data, product='a', data_version=1, schema_version=1))
self.assertStatusCode(ret, 201)
self.assertEqual(ret.data, json.dumps(dict(new_data_version=2)), "Data: %s" % ret.data)
ret = select([dbo.releases.data]).where(dbo.releases.name == 'ab').execute().fetchone()[0]
self.assertEqual(ret, createBlob("""
{
"name": "ab",
"schema_version": 1,
"hashFunction": "sha512",
"platforms": {
"p": {
"locales": {
"l": {
"complete": {
"filesize": 435,
"from": "*",
"hashValue": "abc"
}
}
}
}
}
}
"""))
def testLocalePutWithBadHashFunction(self):
data = json.dumps(dict(complete=dict(filesize='435')))
ret = self._put('/releases/ab/builds/p/l', data=dict(data=data, product='a', data_version=1, schema_version=1))
self.assertStatusCode(ret, 400)
def testLocalePutWithoutPermission(self):
data = '{"complete": {"filesize": 435, "from": "*", "hashValue": "abc"}}'
ret = self._put('/releases/ab/builds/p/l', username='liu', data=dict(data=data, product='a', data_version=1, schema_version=1))
self.assertStatusCode(ret, 403)
def testLocalePutWithoutPermissionForProduct(self):
data = '{"complete": {"filesize": 435, "from": "*", "hashValue": "abc"}}'
ret = self._put('/releases/ab/builds/p/l', username='bob', data=dict(data=data, product='a', data_version=1, schema_version=1))
self.assertStatusCode(ret, 403)
def testLocalePutForNewRelease(self):
data = json.dumps({
"complete": {
"filesize": 678,
"from": "*",
"hashValue": "abc",
}
})
# setting schema_version in the incoming blob is a hack for testing
# SingleLocaleView._put() doesn't give us access to the form
ret = self._put('/releases/e/builds/p/a', data=dict(data=data, product='e', hashFunction="sha512", schema_version=1))
self.assertStatusCode(ret, 201)
self.assertEqual(ret.data, json.dumps(dict(new_data_version=2)), "Data: %s" % ret.data)
ret = select([dbo.releases.data]).where(dbo.releases.name == 'e').execute().fetchone()[0]
self.assertEqual(ret, createBlob("""
{
"name": "e",
"schema_version": 1,
"hashFunction": "sha512",
"platforms": {
"p": {
"locales": {
"a": {
"complete": {
"filesize": 678,
"from": "*",
"hashValue": "abc"
}
}
}
}
}
}
"""))
def testLocalePutAppend(self):
data = json.dumps({
"partial": {
"filesize": 234,
"from": "c",
"hashValue": "abc",
"fileUrl": "http://good.com/blah",
}
})
ret = self._put('/releases/d/builds/p/g', data=dict(data=data, product='d', data_version=1, schema_version=1))
self.assertStatusCode(ret, 201)
self.assertEqual(ret.data, json.dumps(dict(new_data_version=2)), "Data: %s" % ret.data)
ret = select([dbo.releases.data]).where(dbo.releases.name == 'd').execute().fetchone()[0]
self.assertEqual(ret, createBlob("""
{
"name": "d",
"schema_version": 1,
"hashFunction": "sha512",
"platforms": {
"p": {
"locales": {
"d": {
"complete": {
"filesize": 1234,
"from": "*",
"hashValue": "abc"
}
},
"g": {
"partial": {
"filesize": 234,
"from": "c",
"hashValue": "abc",
"fileUrl": "http://good.com/blah"
}
}
}
}
}
}
"""))
def testLocalePutForNewReleaseWithAlias(self):
data = json.dumps({
"complete": {
"filesize": 678,
"from": "*",
"hashValue": "abc",
}
})
# setting schema_version in the incoming blob is a hack for testing
# SingleLocaleView._put() doesn't give us access to the form
ret = self._put('/releases/e/builds/p/a', data=dict(data=data, product='e', alias='["p2"]', schema_version=1, hashFunction="sha512"))
self.assertStatusCode(ret, 201)
self.assertEqual(ret.data, json.dumps(dict(new_data_version=2)), "Data: %s" % ret.data)
ret = select([dbo.releases.data]).where(dbo.releases.name == 'e').execute().fetchone()[0]
self.assertEqual(ret, createBlob("""
{
"name": "e",
"hashFunction": "sha512",
"schema_version": 1,
"platforms": {
"p": {
"locales": {
"a": {
"complete": {
"filesize": 678,
"from": "*",
"hashValue": "abc"
}
}
}
},
"p2": {
"alias": "p"
}
}
}
"""))
def testLocalePutAppendWithAlias(self):
data = json.dumps({
"partial": {
"filesize": 123,
"from": "c",
"hashValue": "abc",
"fileUrl": "http://good.com/blah",
}
})
ret = self._put('/releases/d/builds/q/g', data=dict(data=data, product='d', data_version=1, alias='["q2"]', schema_version=1))
self.assertStatusCode(ret, 201)
self.assertEqual(ret.data, json.dumps(dict(new_data_version=2)), "Data: %s" % ret.data)
ret = select([dbo.releases.data]).where(dbo.releases.name == 'd').execute().fetchone()[0]
self.assertEqual(ret, createBlob("""
{
"name": "d",
"schema_version": 1,
"hashFunction": "sha512",
"platforms": {
"p": {
"locales": {
"d": {
"complete": {
"filesize": 1234,
"from": "*",
"hashValue": "abc"
}
}
}
},
"q": {
"locales": {
"g": {
"partial": {
"filesize": 123,
"from": "c",
"hashValue": "abc",
"fileUrl": "http://good.com/blah"
}
}
}
},
"q2": {
"alias": "q"
}
}
}
"""))
def testLocalePutWithCopy(self):
data = json.dumps({
"partial": {
"filesize": 123,
"from": "b",
"hashValue": "abc",
}
})
data = dict(data=data, product='a', copyTo=json.dumps(['b']), data_version=1, schema_version=1)
ret = self._put('/releases/ab/builds/p/l', data=data)
self.assertStatusCode(ret, 201)
self.assertEqual(ret.data, json.dumps(dict(new_data_version=2)), "Data: %s" % ret.data)
ret = select([dbo.releases.data]).where(dbo.releases.name == 'ab').execute().fetchone()[0]
self.assertEqual(ret, createBlob("""
{
"name": "ab",
"schema_version": 1,
"hashFunction": "sha512",
"platforms": {
"p": {
"locales": {
"l": {
"partial": {
"filesize": 123,
"from": "b",
"hashValue": "abc"
}
}
}
}
}
}
"""))
ret = select([dbo.releases.data]).where(dbo.releases.name == 'b').execute().fetchone()[0]
self.assertEqual(ret, createBlob("""
{
"name": "b",
"schema_version": 1,
"hashFunction": "sha512",
"platforms": {
"p": {
"locales": {
"l": {
"partial": {
"filesize": 123,
"from": "b",
"hashValue": "abc"
}
}
}
}
}
}
"""))
def testLocalePutBadJSON(self):
ret = self._put('/releases/ab/builds/p/l', data=dict(data='a', product='a'))
self.assertStatusCode(ret, 400)
def testLocaleRejectedURL(self):
data = json.dumps(dict(complete=dict(fileUrl='http://evil.com')))
ret = self._put('/releases/ab/builds/p/l', data=dict(data=data, product='a', data_version=1))
self.assertStatusCode(ret, 400)
def testLocaleGet(self):
ret = self._get('/releases/d/builds/p/d')
self.assertStatusCode(ret, 200)
got = json.loads(ret.data)
expected = {
"complete": {
"filesize": 1234,
"from": "*",
"hashValue": "abc",
}
}
self.assertEquals(got, expected)
self.assertEqual(ret.headers['X-Data-Version'], '1')
def testLocalePutNotAllowed(self):
data = json.dumps({
"complete": {
"filesize": 435,
"from": "*",
"hashValue": "abc",
}
})
inp_data = dict(csrf_token="lorem", data=data, product='d', data_version=1, schema_version=1)
ret = self.client.put('/releases/d/builds/p/d', data=json.dumps(inp_data), content_type="application/json")
self.assertStatusCode(ret, 401)
def testLocalePutReadOnlyRelease(self):
dbo.releases.t.update(values=dict(read_only=True, data_version=2)).where(dbo.releases.name == "ab").execute()
data = json.dumps({
"complete": {
"filesize": 435,
"from": "*",
"hashValue": "abc",
}
})
ret = self._put('/releases/ab/builds/p/l', data=dict(data=data, product='a', data_version=1, schema_version=1))
self.assertStatusCode(ret, 403)
def testLocalePutWithProductAdmin(self):
data = json.dumps({
"complete": {
"filesize": 435,
"from": "*",
"hashValue": "abc",
}
})
ret = self._put('/releases/ab/builds/p/l', username='billy',
data=dict(data=data, product='a', data_version=1, schema_version=1))
self.assertStatusCode(ret, 201)
def testLocalePutWithoutProductAdmin(self):
data = json.dumps({
"complete": {
"filesize": 435,
"from": "*",
"hashValue": "abc",
}
})
ret = self._put('/releases/d/builds/p/d', username='billy',
data=dict(data=data, product='d', data_version=1, schema_version=1))
self.assertStatusCode(ret, 403)
def testLocalePutCantChangeProduct(self):
data = json.dumps(dict(complete=dict(filesize=435)))
ret = self._put('/releases/ab/builds/p/l', data=dict(data=data, product='b', schema_version=1))
self.assertStatusCode(ret, 400)
def testLocaleGet404(self):
ret = self._get("/releases/c/builds/h/u")
self.assertStatusCode(ret, 404)
# FIXME: We shouldn't rely on 500 to validate behaviour. This test should fake a 400 instead.
# Currently, causing a 400 in this will NOT make version revert to the original value. Need to
# fix the bug at the same time as changing the test.
# def testLocaleRevertsPartialUpdate(self):
# data = json.dumps(dict(complete=dict(filesize=1)))
# with mock.patch('auslib.global_state.dbo.releases.addLocaleToRelease') as r:
# r.side_effect = Exception("Fail")
# ret = self._put('/releases/a/builds/p/l', data=dict(data=data, product='a', version='c', data_version=1, schema_version=1))
# self.assertStatusCode(ret, 500)
# ret = dbo.releases.t.select().where(dbo.releases.name == 'a').execute().fetchone()
# self.assertEqual(ret['product'], 'a')
# self.assertEqual(ret['version'], 'a')
# self.assertEqual(json.loads(ret['data']), dict(name='a', hashFunction="sha512", schema_version=1))
def testNewReleasePut(self):
ret = self._put('/releases/new_release', data=dict(name='new_release', product='Firefox',
blob="""
{
"name": "new_release",
"hashFunction": "sha512",
"schema_version": 1,
"platforms": {
"p": {
"locales": {
"l": {
}
}
}
}
}
"""))
self.assertEquals(ret.status_code, 201, "Status Code: %d, Data: %s" % (ret.status_code, ret.data))
r = dbo.releases.t.select().where(dbo.releases.name == 'new_release').execute().fetchall()
self.assertEquals(len(r), 1)
self.assertEquals(r[0]['name'], 'new_release')
self.assertEquals(r[0]['product'], 'Firefox')
self.assertEquals(r[0]['data'], createBlob("""
{
"name": "new_release",
"hashFunction": "sha512",
"schema_version": 1,
"platforms": {
"p": {
"locales": {
"l": {
}
}
}
}
}
"""))
def testNewReleasePutBadInput(self):
ret = self._put("/releases/ueohueo", data=dict(name="ueohueo", product="aa", blob="""
{
"name": "ueohueo",
"schema_version": 3,
"hashFunction": "sha512",
"borken": "yes"
}
"""))
self.assertStatusCode(ret, 400)
def testNewReleasePutMismatchedName(self):
ret = self._put("/releases/aaaa", data=dict(name="ueohueo", product="aa", blob="""
{
"name": "bbbb",
"schema_version": 3
}
"""))
self.assertStatusCode(ret, 400)
def testPutExistingRelease(self):
ret = self._put("/releases/d", data=dict(name="d", product="Firefox", data_version=1, blob="""
{
"name": "d",
"schema_version": 3,
"hashFunction": "sha512",
"actions": "doit"
}
"""))
self.assertEquals(ret.status_code, 200, "Status Code: %d, Data: %s" % (ret.status_code, ret.data))
r = dbo.releases.t.select().where(dbo.releases.name == 'd').execute().fetchall()
self.assertEquals(len(r), 1)
self.assertEquals(r[0]['name'], 'd')
self.assertEquals(r[0]['product'], 'Firefox')
self.assertEquals(r[0]['data'], createBlob("""
{
"name": "d",
"schema_version": 3,
"hashFunction": "sha512",
"actions": "doit"
}
"""))
def testGMPReleasePut(self):
ret = self._put('/releases/gmprel', data=dict(name='gmprel', product='a',
blob="""
{
"name": "gmprel",
"schema_version": 1000,
"hashFunction": "sha512",
"vendors": {
"foo": {
"version": "1",
"platforms": {
"a": {
"filesize": 2,
"hashValue": "acbdacbdacbdacbdacbdacbdacbdacbdacbdacbdacbda\
cbdacbdacbdacbdacbdacbdacbdacbdacbdacbdacbdacbdacbdacbdacbdacbdacbdacbdacbdacbdacbd",
"fileUrl": "http://good.com/4"
},
"a2": {
"alias": "a"
}
}
}
}
}
"""))
self.assertEquals(ret.status_code, 201, "Status Code: %d, Data: %s" % (ret.status_code, ret.data))
r = dbo.releases.t.select().where(dbo.releases.name == 'gmprel').execute().fetchall()
self.assertEquals(len(r), 1)
self.assertEquals(r[0]['name'], 'gmprel')
self.assertEquals(r[0]['product'], 'a')
self.assertEquals(r[0]['data'], createBlob("""
{
"name": "gmprel",
"schema_version": 1000,
"hashFunction": "sha512",
"vendors": {
"foo": {
"version": "1",
"platforms": {
"a": {
"filesize": 2,
"hashValue": "acbdacbdacbdacbdacbdacbdacbdacbdacbdacbdacbda\
cbdacbdacbdacbdacbdacbdacbdacbdacbdacbdacbdacbdacbdacbdacbdacbdacbdacbdacbdacbdacbd",
"fileUrl": "http://good.com/4"
},
"a2": {
"alias": "a"
}
}
}
}
}
"""))
def testGetReleases(self):
ret = self._get("/releases")
self.assertStatusCode(ret, 200)
data = json.loads(ret.data)
self.assertEquals(len(data["releases"]), 5)
def testGetReleasesNamesOnly(self):
ret = self._get("/releases", qs=dict(names_only=1))
self.assertStatusCode(ret, 200)
self.assertEquals(json.loads(ret.data), json.loads("""
{
"names": [
"a", "ab", "b", "c", "d"
]
}
"""))
def testGetReleasesNamePrefix(self):
ret = self._get("/releases", qs=dict(name_prefix='a'))
self.assertStatusCode(ret, 200)
ret_data = json.loads(ret.data)
with self.assertRaises(KeyError):
ret_data['data']
self.assertEquals(ret_data, json.loads("""
{
"releases": [
{"data_version": 1, "name": "a", "product": "a", "read_only": false, "rule_ids": [3, 4, 6, 7]},
{"data_version": 1, "name": "ab", "product": "a", "read_only": false, "rule_ids": []}
]
}
"""))
def testGetReleasesNamePrefixNamesOnly(self):
ret = self._get("/releases", qs=dict(name_prefix='a',
names_only='1'))
self.assertStatusCode(ret, 200)
self.assertEquals(json.loads(ret.data), json.loads("""
{
"names": ["a", "ab"]
}
"""))
def testReleasesPost(self):
data = json.dumps(dict(bouncerProducts=dict(partial='foo'), name='e', schema_version=1, hashFunction="sha512"))
ret = self._post('/releases', data=dict(blob=data, name="e", product='e'))
self.assertStatusCode(ret, 201)
ret = dbo.releases.t.select().where(dbo.releases.name == 'e').execute().fetchone()
self.assertEqual(ret['product'], 'e')
self.assertEqual(ret['name'], 'e')
self.assertEqual(ret['data'], createBlob("""
{
"name": "e",
"hashFunction": "sha512",
"schema_version": 1,
"bouncerProducts": {
"partial": "foo"
}
}
"""))
class TestReleasesScheduledChanges(ViewTest):
maxDiff = 10000
def setUp(self):
super(TestReleasesScheduledChanges, self).setUp()
dbo.releases.scheduled_changes.t.insert().execute(
sc_id=1, scheduled_by="bill", change_type="insert", data_version=1, base_name="m", base_product="m",
base_data=createBlob(dict(name="m", hashFunction="sha512", schema_version=1))
)
dbo.releases.scheduled_changes.history.t.insert().execute(change_id=1, changed_by="bill", timestamp=50, sc_id=1)
dbo.releases.scheduled_changes.history.t.insert().execute(
change_id=2, changed_by="bill", timestamp=51, sc_id=1, scheduled_by="bill", change_type="insert", data_version=1, base_name="m",
base_product="m", base_data=createBlob(dict(name="m", hashFunction="sha512", schema_version=1))
)
dbo.releases.scheduled_changes.conditions.t.insert().execute(sc_id=1, when=4000000000, data_version=1)
dbo.releases.scheduled_changes.conditions.history.t.insert().execute(change_id=1, changed_by="bill", timestamp=50, sc_id=1)
dbo.releases.scheduled_changes.conditions.history.t.insert().execute(
change_id=2, changed_by="bill", timestamp=51, sc_id=1, when=4000000000, data_version=1
)
dbo.releases.scheduled_changes.t.insert().execute(
sc_id=2, scheduled_by="bill", change_type="update", data_version=1, base_name="a", base_product="a",
base_data=createBlob(dict(name="a", hashFunction="sha512", schema_version=1, extv="2.0")), base_data_version=1
)
dbo.releases.scheduled_changes.history.t.insert().execute(change_id=3, changed_by="bill", timestamp=70, sc_id=2)
dbo.releases.scheduled_changes.history.t.insert().execute(
change_id=4, changed_by="bill", timestamp=71, sc_id=2, scheduled_by="bill", change_type="update", data_version=1, base_name="a",
base_product="a", base_data=createBlob(dict(name="a", hashFunction="sha512", schema_version=1, extv="2.0")), base_data_version=1
)
dbo.releases.scheduled_changes.signoffs.t.insert().execute(sc_id=2, username="bill", role="releng")
dbo.releases.scheduled_changes.signoffs.history.t.insert().execute(change_id=1, changed_by="bill", timestamp=100, sc_id=2, username="bill")
dbo.releases.scheduled_changes.signoffs.history.t.insert().execute(change_id=2, changed_by="bill", timestamp=101, sc_id=2,
username="bill", role="releng")
dbo.releases.scheduled_changes.conditions.t.insert().execute(sc_id=2, when=6000000000, data_version=1)
dbo.releases.scheduled_changes.conditions.history.t.insert().execute(change_id=3, changed_by="bill", timestamp=70, sc_id=2)
dbo.releases.scheduled_changes.conditions.history.t.insert().execute(
change_id=4, changed_by="bill", timestamp=71, sc_id=2, when=6000000000, data_version=1
)
dbo.releases.scheduled_changes.t.insert().execute(
sc_id=3, complete=True, scheduled_by="bill", change_type="update", data_version=2, base_name="b", base_product="b",
base_data=createBlob(dict(name="b", hashFunction="sha512", schema_version=1)), base_data_version=1
)
dbo.releases.scheduled_changes.history.t.insert().execute(change_id=5, changed_by="bill", timestamp=6, sc_id=3)
dbo.releases.scheduled_changes.history.t.insert().execute(
change_id=6, changed_by="bill", timestamp=7, sc_id=3, complete=False, scheduled_by="bill", change_type="update", data_version=1, base_name="b",
base_product="b", base_data=createBlob(dict(name="b", hashFunction="sha512", schema_version=1)), base_data_version=1
)
dbo.releases.scheduled_changes.history.t.insert().execute(
change_id=7, changed_by="bill", timestamp=25, sc_id=3, complete=True, change_type="update", scheduled_by="bill", data_version=2, base_name="b",
base_product="b", base_data=createBlob(dict(name="b", hashFunction="sha512", schema_version=1)), base_data_version=1
)
dbo.releases.scheduled_changes.conditions.t.insert().execute(sc_id=3, when=10000000, data_version=2)
dbo.releases.scheduled_changes.conditions.history.t.insert().execute(change_id=5, changed_by="bill", timestamp=6, sc_id=3)
dbo.releases.scheduled_changes.conditions.history.t.insert().execute(
change_id=6, changed_by="bill", timestamp=7, sc_id=3, when=10000000, data_version=1
)
dbo.releases.scheduled_changes.conditions.history.t.insert().execute(
change_id=7, changed_by="bill", timestamp=25, sc_id=3, when=10000000, data_version=2
)
dbo.releases.scheduled_changes.t.insert().execute(
sc_id=4, complete=False, scheduled_by="bill", change_type="delete", data_version=1, base_name="ab", base_data_version=1,
)
dbo.releases.scheduled_changes.history.t.insert().execute(change_id=8, changed_by="bill", timestamp=25, sc_id=4)
dbo.releases.scheduled_changes.history.t.insert().execute(
change_id=9, changed_by="bill", timestamp=26, sc_id=4, complete=False, scheduled_by="bill", change_type="delete", data_version=1,
base_name="ab", base_data_version=1
)
dbo.releases.scheduled_changes.conditions.t.insert().execute(sc_id=4, when=230000000, data_version=1)
dbo.releases.scheduled_changes.conditions.history.t.insert().execute(change_id=8, changed_by="bill", timestamp=25, sc_id=4)
dbo.releases.scheduled_changes.conditions.history.t.insert().execute(
change_id=9, changed_by="bill", timestamp=26, sc_id=4, when=230000000, data_version=1
)
dbo.releases.scheduled_changes.signoffs.t.insert().execute(sc_id=4, username="bill", role="releng")
dbo.releases.scheduled_changes.signoffs.t.insert().execute(sc_id=4, username="ben", role="releng")
def testGetScheduledChanges(self):
ret = self._get("/scheduled_changes/releases")
expected = {
"count": 3,
"scheduled_changes": [
{
"sc_id": 1, "when": 4000000000, "scheduled_by": "bill", "change_type": "insert", "complete": False, "sc_data_version": 1,
"name": "m", "product": "m", "data": {"name": "m", "hashFunction": "sha512", "schema_version": 1}, "read_only": False,
"data_version": None, "signoffs": {}, "required_signoffs": {},
},
{
"sc_id": 2, "when": 6000000000, "scheduled_by": "bill", "change_type": "update", "complete": False, "sc_data_version": 1,
"name": "a", "product": "a", "data": {"name": "a", "hashFunction": "sha512", "schema_version": 1, "extv": "2.0"},
"read_only": False, "data_version": 1, "signoffs": {"bill": "releng"}, "required_signoffs": {"releng": 1},
},
{
"sc_id": 4, "when": 230000000, "scheduled_by": "bill", "change_type": "delete", "complete": False, "sc_data_version": 1,
"name": "ab", "product": None, "data": None, "read_only": False, "data_version": 1, "signoffs": {"ben": "releng", "bill": "releng"},
"required_signoffs": {},
},
]
}
self.assertEquals(json.loads(ret.data), expected)
def testGetScheduledChangesWithCompleted(self):
ret = self._get("/scheduled_changes/releases", qs={"all": 1})
expected = {
"count": 4,
"scheduled_changes": [
{
"sc_id": 1, "when": 4000000000, "scheduled_by": "bill", "change_type": "insert", "complete": False, "sc_data_version": 1,
"name": "m", "product": "m", "data": {"name": "m", "hashFunction": "sha512", "schema_version": 1}, "read_only": False,
"data_version": None, "signoffs": {}, "required_signoffs": {},
},
{
"sc_id": 2, "when": 6000000000, "scheduled_by": "bill", "change_type": "update", "complete": False, "sc_data_version": 1,
"name": "a", "product": "a", "data": {"name": "a", "hashFunction": "sha512", "schema_version": 1, "extv": "2.0"},
"read_only": False, "data_version": 1, "signoffs": {"bill": "releng"}, "required_signoffs": {"releng": 1},
},
{
"sc_id": 3, "when": 10000000, "scheduled_by": "bill", "change_type": "update", "complete": True, "sc_data_version": 2,
"name": "b", "product": "b", "data": {"name": "b", "hashFunction": "sha512", "schema_version": 1}, "read_only": False,
"data_version": 1, "signoffs": {}, "required_signoffs": {},
},
{
"sc_id": 4, "when": 230000000, "scheduled_by": "bill", "change_type": "delete", "complete": False, "sc_data_version": 1,
"name": "ab", "product": None, "data": None, "read_only": False, "data_version": 1, "signoffs": {"ben": "releng", "bill": "releng"},
"required_signoffs": {},
},
]
}
self.assertEquals(json.loads(ret.data), expected)
@mock.patch("time.time", mock.MagicMock(return_value=300))
def testAddScheduledChangeExistingRelease(self):
data = {
"when": 2300000000, "name": "d", "data": '{"name": "d", "hashFunction": "sha256", "schema_version": 1}',
"product": "d", "data_version": 1, "change_type": "update"
}
ret = self._post("/scheduled_changes/releases", data=data)
self.assertEquals(ret.status_code, 200, ret.data)
self.assertEquals(json.loads(ret.data), {"sc_id": 5})
r = dbo.releases.scheduled_changes.t.select().where(dbo.releases.scheduled_changes.sc_id == 5).execute().fetchall()
self.assertEquals(len(r), 1)
db_data = dict(r[0])
expected = {
"sc_id": 5, "scheduled_by": "bill", "change_type": "update", "complete": False, "data_version": 1, "base_product": "d", "base_read_only": False,
"base_name": "d", "base_data": {"name": "d", "hashFunction": "sha256", "schema_version": 1}, "base_data_version": 1
}
self.assertEquals(db_data, expected)
cond = dbo.releases.scheduled_changes.conditions.t.select().where(dbo.releases.scheduled_changes.conditions.sc_id == 5).execute().fetchall()
self.assertEquals(len(cond), 1)
cond_expected = {"sc_id": 5, "data_version": 1, "when": 2300000000}
self.assertEquals(dict(cond[0]), cond_expected)
@mock.patch("time.time", mock.MagicMock(return_value=300))
def testAddScheduledChangeDeleteRelease(self):
data = {
"when": 4200000000, "name": "d", "data_version": 1, "change_type": "delete",
}
ret = self._post("/scheduled_changes/releases", data=data)
self.assertEquals(ret.status_code, 200, ret.data)
self.assertEquals(json.loads(ret.data), {"sc_id": 5})
r = dbo.releases.scheduled_changes.t.select().where(dbo.releases.scheduled_changes.sc_id == 5).execute().fetchall()
self.assertEquals(len(r), 1)
db_data = dict(r[0])
expected = {
"sc_id": 5, "scheduled_by": "bill", "change_type": "delete", "complete": False, "data_version": 1, "base_product": None, "base_read_only": False,
"base_name": "d", "base_data": None, "base_data_version": 1,
}
self.assertEquals(db_data, expected)
cond = dbo.releases.scheduled_changes.conditions.t.select().where(dbo.releases.scheduled_changes.conditions.sc_id == 5).execute().fetchall()
self.assertEquals(len(cond), 1)
cond_expected = {"sc_id": 5, "data_version": 1, "when": 4200000000}
self.assertEquals(dict(cond[0]), cond_expected)
@mock.patch("time.time", mock.MagicMock(return_value=300))
def testAddScheduledChangeNewRelease(self):
data = {
"when": 5200000000, "name": "q", "data": '{"name": "q", "hashFunction": "sha512", "schema_version": 1}',
"product": "q", "change_type": "insert",
}
ret = self._post("/scheduled_changes/releases", data=data)
self.assertEquals(ret.status_code, 200, ret.data)
self.assertEquals(json.loads(ret.data), {"sc_id": 5})
r = dbo.releases.scheduled_changes.t.select().where(dbo.releases.scheduled_changes.sc_id == 5).execute().fetchall()
self.assertEquals(len(r), 1)
db_data = dict(r[0])
expected = {
"sc_id": 5, "scheduled_by": "bill", "change_type": "insert", "complete": False, "data_version": 1, "base_product": "q", "base_read_only": False,
"base_name": "q", "base_data": {"name": "q", "hashFunction": "sha512", "schema_version": 1}, "base_data_version": None,
}
self.assertEquals(db_data, expected)
cond = dbo.releases.scheduled_changes.conditions.t.select().where(dbo.releases.scheduled_changes.conditions.sc_id == 5).execute().fetchall()
self.assertEquals(len(cond), 1)
cond_expected = {"sc_id": 5, "data_version": 1, "when": 5200000000}
self.assertEquals(dict(cond[0]), cond_expected)
@mock.patch("time.time", mock.MagicMock(return_value=300))
def testUpdateScheduledChangeExistingRelease(self):
data = {
"data": '{"name": "a", "hashFunction": "sha512", "extv": "3.0", "schema_version": 1}', "name": "a",
"data_version": 1, "sc_data_version": 1, "when": 78900000000, "change_type": "update",
}
ret = self._post("/scheduled_changes/releases/2", data=data)
self.assertEquals(ret.status_code, 200, ret.data)
self.assertEquals(json.loads(ret.data), {"new_data_version": 2})
r = dbo.releases.scheduled_changes.t.select().where(dbo.releases.scheduled_changes.sc_id == 2).execute().fetchall()
self.assertEquals(len(r), 1)
db_data = dict(r[0])
expected = {
"sc_id": 2, "complete": False, "change_type": "update", "data_version": 2, "scheduled_by": "bill", "base_name": "a", "base_product": "a",
"base_read_only": False, "base_data": {"name": "a", "hashFunction": "sha512", "extv": "3.0", "schema_version": 1},
"base_data_version": 1,
}
self.assertEquals(db_data, expected)
cond = dbo.releases.scheduled_changes.conditions.t.select().where(dbo.releases.scheduled_changes.conditions.sc_id == 2).execute().fetchall()
self.assertEquals(len(cond), 1)
cond_expected = {"sc_id": 2, "data_version": 2, "when": 78900000000}
self.assertEquals(dict(cond[0]), cond_expected)
@mock.patch("time.time", mock.MagicMock(return_value=300))
def testUpdateScheduledChangeExistingReleaseResetSignOffs(self):
data = {
"name": "ab", "data_version": 1, "sc_data_version": 1, "when": 88900000000, "change_type": "delete"
}
rows = dbo.releases.scheduled_changes.signoffs.t.select().\
where(dbo.releases.scheduled_changes.signoffs.sc_id == 4).execute().fetchall()
self.assertEquals(len(rows), 2)
ret = self._post("/scheduled_changes/releases/4", data=data)
self.assertEquals(ret.status_code, 200, ret.data)
self.assertEquals(json.loads(ret.data), {"new_data_version": 2})
r = dbo.releases.scheduled_changes.t.select().where(
dbo.releases.scheduled_changes.sc_id == 4).execute().fetchall()
self.assertEquals(len(r), 1)
db_data = dict(r[0])
expected = {
"sc_id": 4, "complete": False, "change_type": "delete", "data_version": 2, "scheduled_by": "bill",
"base_name": "ab", "base_product": None,
"base_read_only": False,
"base_data": None,
"base_data_version": 1,
}
self.assertEquals(db_data, expected)
rows = dbo.releases.scheduled_changes.signoffs.t.select(). \
where(dbo.releases.scheduled_changes.signoffs.sc_id == 4).execute().fetchall()
self.assertEquals(len(rows), 0)
@mock.patch("time.time", mock.MagicMock(return_value=300))
def testUpdateScheduledChangeExistingDeleteRelease(self):
data = {
"name": "c",
"data_version": 1, "sc_data_version": 1, "when": 78900000000, "change_type": "delete"
}
ret = self._post("/scheduled_changes/releases/4", data=data)
self.assertEquals(ret.status_code, 200, ret.data)
@mock.patch("time.time", mock.MagicMock(return_value=300))
def testUpdateCompletedScheduledChangeDeleteRelease(self):
data = {
"name": "c",
"data_version": 1, "sc_data_version": 1, "when": 78900000000, "change_type": "delete"
}
ret = self._post("/scheduled_changes/releases/3", data=data)
self.assertEquals(ret.status_code, 400, ret.data)
@mock.patch("time.time", mock.MagicMock(return_value=300))
def testUpdateCompletedScheduledChangeUpdatingTheRelease(self):
data = {
"data": '{"name": "c", "hashFunction": "sha512", "extv": "3.0", "schema_version": 1}', "name": "c",
"data_version": 1, "sc_data_version": 1, "when": 78900000000, "change_type": "update",
}
ret = self._post("/scheduled_changes/releases/3", data=data)
self.assertEquals(ret.status_code, 400, ret.data)
@mock.patch("time.time", mock.MagicMock(return_value=300))
def testUpdateScheduledChangeNewRelease(self):
data = {
"data": '{"name": "m", "hashFunction": "sha512", "appv": "4.0", "schema_version": 1}', "name": "m", "product": "m",
"sc_data_version": 1, "change_type": "insert",
}
ret = self._post("/scheduled_changes/releases/1", data=data)
self.assertEquals(ret.status_code, 200, ret.data)
self.assertEquals(json.loads(ret.data), {"new_data_version": 2})
r = dbo.releases.scheduled_changes.t.select().where(dbo.releases.scheduled_changes.sc_id == 1).execute().fetchall()
self.assertEquals(len(r), 1)
db_data = dict(r[0])
expected = {
"sc_id": 1, "complete": False, "change_type": "insert", "data_version": 2, "scheduled_by": "bill", "base_name": "m", "base_product": "m",
"base_read_only": False, "base_data": {"name": "m", "hashFunction": "sha512", "appv": "4.0", "schema_version": 1},
"base_data_version": None,
}
self.assertEquals(db_data, expected)
cond = dbo.releases.scheduled_changes.conditions.t.select().where(dbo.releases.scheduled_changes.conditions.sc_id == 1).execute().fetchall()
self.assertEquals(len(cond), 1)
cond_expected = {"sc_id": 1, "data_version": 2, "when": 4000000000}
self.assertEquals(dict(cond[0]), cond_expected)
@mock.patch("time.time", mock.MagicMock(return_value=300))
def testUpdateScheduledChangeNewReleaseChangeName(self):
data = {
"data": '{"name": "mm", "hashFunction": "sha512", "appv": "4.0", "schema_version": 1}', "name": "mm", "product": "mm",
"sc_data_version": 1, "change_type": "insert",
}
ret = self._post("/scheduled_changes/releases/1", data=data)
self.assertEquals(ret.status_code, 200, ret.data)
self.assertEquals(json.loads(ret.data), {"new_data_version": 2})
r = dbo.releases.scheduled_changes.t.select().where(dbo.releases.scheduled_changes.sc_id == 1).execute().fetchall()
self.assertEquals(len(r), 1)
db_data = dict(r[0])
expected = {
"sc_id": 1, "complete": False, "change_type": "insert", "data_version": 2, "scheduled_by": "bill", "base_name": "mm", "base_product": "mm",
"base_read_only": False, "base_data": {"name": "mm", "hashFunction": "sha512", "appv": "4.0", "schema_version": 1},
"base_data_version": None,
}
self.assertEquals(db_data, expected)
cond = dbo.releases.scheduled_changes.conditions.t.select().where(dbo.releases.scheduled_changes.conditions.sc_id == 1).execute().fetchall()
self.assertEquals(len(cond), 1)
cond_expected = {"sc_id": 1, "data_version": 2, "when": 4000000000}
self.assertEquals(dict(cond[0]), cond_expected)
def testDeleteScheduledChange(self):
ret = self._delete("/scheduled_changes/releases/2", qs={"data_version": 1})
self.assertEquals(ret.status_code, 200, ret.data)
got = dbo.releases.scheduled_changes.t.select().where(dbo.releases.scheduled_changes.sc_id == 2).execute().fetchall()
self.assertEquals(got, [])
cond_got = dbo.releases.scheduled_changes.conditions.t.select().where(dbo.releases.scheduled_changes.conditions.sc_id == 2).execute().fetchall()
self.assertEquals(cond_got, [])
def testEnactScheduledChangeExistingRelease(self):
ret = self._post("/scheduled_changes/releases/2/enact")
self.assertEquals(ret.status_code, 200, ret.data)
r = dbo.releases.scheduled_changes.t.select().where(dbo.releases.scheduled_changes.sc_id == 2).execute().fetchall()
self.assertEquals(len(r), 1)
db_data = dict(r[0])
expected = {
"sc_id": 2, "complete": True, "data_version": 2, "scheduled_by": "bill", "change_type": "update", "base_name": "a", "base_product": "a",
"base_read_only": False, "base_data": {"name": "a", "hashFunction": "sha512", "schema_version": 1, "extv": "2.0"},
"base_data_version": 1,
}
self.assertEquals(db_data, expected)
base_row = dict(dbo.releases.t.select().where(dbo.releases.name == "a").execute().fetchall()[0])
base_expected = {
"name": "a", "product": "a", "read_only": False,
"data": {"name": "a", "hashFunction": "sha512", "schema_version": 1, "extv": "2.0"}, "data_version": 2,
}
self.assertEquals(base_row, base_expected)
def testEnactScheduledChangeNewRelease(self):
ret = self._post("/scheduled_changes/releases/1/enact")
self.assertEquals(ret.status_code, 200, ret.data)
r = dbo.releases.scheduled_changes.t.select().where(dbo.releases.scheduled_changes.sc_id == 1).execute().fetchall()
self.assertEquals(len(r), 1)
db_data = dict(r[0])
expected = {
"sc_id": 1, "complete": True, "data_version": 2, "scheduled_by": "bill", "change_type": "insert", "base_name": "m", "base_product": "m",
"base_read_only": False, "base_data": {"name": "m", "hashFunction": "sha512", "schema_version": 1},
"base_data_version": None,
}
self.assertEquals(db_data, expected)
base_row = dict(dbo.releases.t.select().where(dbo.releases.name == "m").execute().fetchall()[0])
base_expected = {
"name": "m", "product": "m", "read_only": False,
"data": {"name": "m", "hashFunction": "sha512", "schema_version": 1}, "data_version": 1,
}
self.assertEquals(base_row, base_expected)
def testEnactScheduledChangeDeleteRelease(self):
ret = self._post("/scheduled_changes/releases/4/enact")
self.assertEquals(ret.status_code, 200, ret.data)
r = dbo.releases.scheduled_changes.t.select().where(dbo.releases.scheduled_changes.sc_id == 4).execute().fetchall()
self.assertEquals(len(r), 1)
db_data = dict(r[0])
expected = {
"sc_id": 4, "complete": True, "data_version": 2, "scheduled_by": "bill", "change_type": "delete", "base_name": "ab", "base_product": None,
"base_read_only": False, "base_data": None, "base_data_version": 1,
}
self.assertEquals(db_data, expected)
base_row = dbo.releases.t.select().where(dbo.releases.name == "ab").execute().fetchall()
self.assertEquals(len(base_row), 0)
def testGetScheduledChangeHistoryRevisions(self):
ret = self._get("/scheduled_changes/releases/3/revisions")
self.assertEquals(ret.status_code, 200, ret.data)
ret = json.loads(ret.data)
expected = {
"count": 2,
"revisions": [
{
"change_id": 7, "changed_by": "bill", "timestamp": 25, "sc_id": 3, "scheduled_by": "bill", "change_type": "update", "data_version": 1,
"name": "b", "product": "b", "data": {"name": "b", "hashFunction": "sha512", "schema_version": 1}, "read_only": False,
"complete": True, "when": 10000000, "sc_data_version": 2,
},
{
"change_id": 6, "changed_by": "bill", "timestamp": 7, "sc_id": 3, "scheduled_by": "bill", "change_type": "update", "data_version": 1,
"name": "b", "product": "b", "data": {"name": "b", "hashFunction": "sha512", "schema_version": 1}, "read_only": False,
"complete": False, "when": 10000000, "sc_data_version": 1,
},
],
}
self.assertEquals(ret, expected)
@mock.patch("time.time", mock.MagicMock(return_value=100))
def testSignoffWithPermission(self):
ret = self._post("/scheduled_changes/releases/1/signoffs", data=dict(role="qa"), username="bill")
self.assertEquals(ret.status_code, 200, ret.data)
r = dbo.releases.scheduled_changes.signoffs.t.select().where(dbo.releases.scheduled_changes.signoffs.sc_id == 1).execute().fetchall()
self.assertEquals(len(r), 1)
db_data = dict(r[0])
self.assertEquals(db_data, {"sc_id": 1, "username": "bill", "role": "qa"})
r = dbo.releases.scheduled_changes.signoffs.history.t.select().where(dbo.releases.scheduled_changes.signoffs.history.sc_id == 1).execute().fetchall()
self.assertEquals(len(r), 2)
self.assertEquals(dict(r[0]), {"change_id": 3, "changed_by": "bill", "timestamp": 99999, "sc_id": 1, "username": "bill", "role": None})
self.assertEquals(dict(r[1]), {"change_id": 4, "changed_by": "bill", "timestamp": 100000, "sc_id": 1, "username": "bill", "role": "qa"})
def testSignoffWithoutPermission(self):
ret = self._post("/scheduled_changes/releases/1/signoffs", data=dict(role="relman"), username="bill")
self.assertEquals(ret.status_code, 403, ret.data)
def testSignoffWithoutRole(self):
ret = self._post("/scheduled_changes/releases/1/signoffs", data=dict(lorem="random"), username="bill")
self.assertEquals(ret.status_code, 400, ret.data)
def testRevokeSignoff(self):
ret = self._delete("/scheduled_changes/releases/2/signoffs", username="bill")
self.assertEquals(ret.status_code, 200, ret.data)
r = dbo.releases.scheduled_changes.signoffs.t.select().where(dbo.releases.scheduled_changes.signoffs.sc_id == 1).execute().fetchall()
self.assertEquals(len(r), 0)
class TestReleaseHistoryView(ViewTest):
def testGetRevisions(self):
# Make some changes to a release
data = json.dumps(dict(detailsUrl='blah', fakePartials=True, schema_version=1))
ret = self._post(
'/releases/d',
data=dict(
data=data,
product='d',
data_version=1,
)
)
self.assertStatusCode(ret, 200)
ret = self._post(
'/releases/d',
data=dict(
data=data,
product='d',
data_version=2,
)
)
self.assertStatusCode(ret, 200)
url = '/releases/d/revisions'
ret = self._get(url)
self.assertEquals(ret.status_code, 200, msg=ret.data)
data = json.loads(ret.data)
self.assertEquals(data["count"], 2)
self.assertEquals(len(data["revisions"]), 2)
with self.assertRaises(KeyError):
data['data']
def testPostRevisionRollback(self):
# Make some changes to a release
data = json.dumps(dict(detailsUrl='beep', fakePartials=True, schema_version=1))
ret = self._post(
'/releases/d',
data=dict(
data=data,
product='d',
data_version=1,
)
)
self.assertStatusCode(ret, 200)
data = json.dumps(dict(detailsUrl='boop', fakePartials=False, schema_version=1))
ret = self._post(
'/releases/d',
data=dict(
data=data,
product='d',
data_version=2,
)
)
self.assertStatusCode(ret, 200)
table = dbo.releases
row, = table.select(where=[table.name == 'd'])
self.assertEqual(row['data_version'], 3)
data = row['data']
self.assertEqual(data['fakePartials'], False)
self.assertEqual(data['detailsUrl'], 'boop')
query = table.history.t.count()
count, = query.execute().first()
self.assertEqual(count, 2)
row, = table.history.select(
where=[(table.history.product == 'd') & (table.history.data_version == 2)],
limit=1
)
change_id = row['change_id']
assert row['name'] == 'd' # one of the fixtures
url = '/releases/d/revisions'
ret = self._post(url, {'change_id': change_id})
self.assertEquals(ret.status_code, 200, ret.data)
query = table.history.t.count()
count, = query.execute().first()
self.assertEqual(count, 3)
row, = table.select(where=[table.name == 'd'])
self.assertEqual(row['data_version'], 4)
data = row['data']
self.assertEqual(data['fakePartials'], True)
self.assertEqual(data['detailsUrl'], 'beep')
def testPostRevisionRollbackBadRequests(self):
data = json.dumps(dict(detailsUrl='beep', fakePartials=True, schema_version=1))
ret = self._post(
'/releases/d',
data=dict(
data=data,
product='d',
data_version=1,
)
)
self.assertStatusCode(ret, 200)
# when posting you need both the release name and the change_id
ret = self._post('/releases/CRAZYNAME/revisions', data={'change_id': 1})
self.assertEquals(ret.status_code, 404, ret.data)
url = '/releases/d/revisions'
ret = self._post(url, {'change_id': 999})
self.assertEquals(ret.status_code, 400)
ret = self._post(url)
self.assertEquals(ret.status_code, 400)
class TestSingleColumn_JSON(ViewTest):
def testGetReleasesSingleColumn(self):
expected_product = ["a", "c", "b", "d"]
expected = dict(count=4, product=expected_product)
ret = self._get("/releases/columns/product")
ret_data = json.loads(ret.data)
self.assertEquals(ret_data['count'], expected['count'])
self.assertEquals(ret_data['product'].sort(), expected['product'].sort())
def testGetReleaseColumn404(self):
ret = self.client.get("/releases/columns/blah")
self.assertEquals(ret.status_code, 404)
class TestReadOnlyView(ViewTest):
def testReadOnlyGet(self):
ret = self._get('/releases/b/read_only')
is_read_only = dbo.releases.t.select(dbo.releases.name == 'b').execute().first()['read_only']
self.assertEqual(json.loads(ret.data)['read_only'], is_read_only)
def testReadOnlySetTrueAdmin(self):
data = dict(name='b', read_only=True, product='b', data_version=1)
ret = self._put('/releases/b/read_only', username='bill', data=data)
self.assertStatusCode(ret, 201)
read_only = dbo.releases.isReadOnly(name='b')
self.assertEqual(read_only, True)
def testReadOnlySetTrueNonAdmin(self):
data = dict(name='b', read_only=True, product='b', data_version=1)
ret = self._put('/releases/b/read_only', username='bob', data=data)
self.assertStatusCode(ret, 201)
read_only = dbo.releases.isReadOnly(name='b')
self.assertEqual(read_only, True)
def testReadOnlySetFalseAdmin(self):
dbo.releases.t.update(values=dict(read_only=True, data_version=2)).where(dbo.releases.name == "a").execute()
data = dict(name='b', read_only='', product='b', data_version=2)
ret = self._put('/releases/b/read_only', username='bill', data=data)
self.assertStatusCode(ret, 201)
read_only = dbo.releases.isReadOnly(name='b')
self.assertEqual(read_only, False)
def testReadOnlyUnsetWithoutPermissionForProduct(self):
dbo.releases.t.update(values=dict(read_only=True, data_version=2)).where(dbo.releases.name == "a").execute()
data = dict(name='b', read_only='', product='b', data_version=2)
ret = self._put('/releases/b/read_only', username='me', data=data)
self.assertStatusCode(ret, 403)
def testReadOnlyAdminSetAndUnsetFlag(self):
# Setting flag
data = dict(name='b', read_only=True, product='b', data_version=1)
ret = self._put('/releases/b/read_only', username='bill', data=data)
self.assertStatusCode(ret, 201)
# Resetting flag
data = dict(name='b', read_only='', product='b', data_version=2)
ret = self._put('/releases/b/read_only', username='bill', data=data)
self.assertStatusCode(ret, 201)
# Verify reset
read_only = dbo.releases.isReadOnly(name='b')
self.assertEqual(read_only, False)
def testReadOnlyNonAdminCanSetFlagButNotUnset(self):
# Setting read only flag
data_set = dict(name='b', read_only=True, product='b', data_version=1)
ret = self._put('/releases/b/read_only', username='bob', data=data_set)
self.assertStatusCode(ret, 201)
# Verifying if flag was set to true
read_only = dbo.releases.isReadOnly(name='b')
self.assertEqual(read_only, True)
# Resetting flag, which should fail with 403
data_unset = dict(name='b', read_only='', product='b', data_version=2)
ret = self._put('/releases/b/read_only', username='bob', data=data_unset)
self.assertStatusCode(ret, 403)
class TestRuleIdsReturned(ViewTest):
def testPresentRuleIdField(self):
releases = self._get("/releases")
releases_data = json.loads(releases.data)
self.assertTrue('rule_ids' in releases_data['releases'][0])
def testMappingIncluded(self):
rel_name = 'ab'
rule_id = 8
releases = self._get("/releases")
releases_data = json.loads(releases.data)
not_mapped_rel = next(rel for rel in releases_data['releases'] if rel['name'] == rel_name)
self.assertEqual(len(not_mapped_rel['rule_ids']), 0)
self.assertFalse(rule_id in not_mapped_rel['rule_ids'])
dbo.rules.t.insert().execute(id=rule_id, priority=100, version='3.5', buildTarget='d',
backgroundRate=100, mapping=rel_name, update_type='minor', data_version=1)
releases = self._get("/releases")
releases_data = json.loads(releases.data)
mapped_rel = next(rel for rel in releases_data['releases'] if rel['name'] == rel_name)
self.assertEqual(len(mapped_rel['rule_ids']), 1)
self.assertTrue(rule_id in mapped_rel['rule_ids'])
| mpl-2.0 | -5,560,149,751,817,180,000 | 41.393636 | 157 | 0.531345 | false |
ForToffee/UnicornHAT | wordclock.py | 1 | 4259 | #!/usr/bin/env python
# wordclock.py by Carl Monk (@ForToffee)
# https://github.com/ForToffee/UnicornHAT
# no unicorns were harmed in the making of this code
import unicornhat as UH
import time
#global variables
hourPattern = []
minPattern = []
#pre-defined patterns - groups of x,y co-ords - 0,0 is bottom right with GPIO at bottom
fivePattern = [[7,6],[6,6],[4,6],[2,6]]
tenPattern = [[1,7],[1,6],[0,6]]
fiftPattern = [[7,6],[6,6],[5,6],[3,6],[2,6],[1,6],[0,6]]
twenPattern = [[5,7],[4,7],[3,7],[2,7],[1,7],[0,7]]
halfPattern = [[7,7],[6,7],[7,5],[6,5]]
pastPattern = [[4,5],[3,5],[2,5],[1,5]]
toPattern = [[1,5],[0,5]]
#function to light the pixels we need to display the time
#pixels is a list of pixels
def showTime(pixels):
UH.clear()
for coords in pixels:
UH.set_pixel(coords[0],coords[1],255,0,255) #magenta
UH.show() #once pixels set, call .show() to enable them
#function to light the '*' character to show seconds and minutes elapsing
def showTick(m):
colour = []
minPart = m % 5
# % is modulo which gives us the remainder of m divided by 5
# this tells us the value 0 - 4 or the part of 5 minutes the time is
if m == -1: # for setting the '*' off or black
colour = [0,0,0]
elif minPart == 0: #:m0 or :m5
colour = [255,0,0] #red
elif minPart == 1 : #:m1 or :m6
colour = [0,255,0] #green
elif minPart == 2 : #:m2 or :m7
colour = [0,0,255] #blue
elif minPart == 3 : #:m3 or :m8
colour = [255,255,0] #yellow
elif minPart == 4 : #:m4 or :m9
colour = [0,255,255] #cyan
UH.set_pixel(5,5,colour[0],colour[1],colour[2]) #5,5 is the position of '*'
UH.show()
#takes the current hour and provides the required pattern of letters
def getHourPattern(h,m):
global hourPattern
hourPattern = []
#convert 24hr into 12hr
if h >= 12:
h -= 12
#if minutes > 35 then display will be 'to' the next hour
if m >= 35:
h = h + 1
#special case for 11:35 - 12:00. Hour is 0 to 11 so need to reset to 0
if h == 12:
h = 0
if h == 0: #aka Twelve
hourPattern = [[7,2],[6,2],[5,2],[4,2],[2,2],[1,2]]
elif h == 1:
hourPattern = [[7,3],[6,3],[5,3]]
elif h == 2:
hourPattern = [[7,2],[6,2],[6,1]]
elif h == 3:
hourPattern = [[4,3],[3,3],[2,3],[1,3],[0,3]]
elif h == 4:
hourPattern = [[7,1],[6,1],[5,1],[4,1]]
elif h == 5:
hourPattern = [[3,1],[2,1],[1,1],[0,1]]
elif h == 6:
hourPattern = [[7,0],[6,0],[5,0]]
elif h == 7:
hourPattern = [[4,0],[3,0],[2,0],[1,0],[0,0]]
elif h == 8:
hourPattern = [[4,4],[3,4],[2,4],[1,4],[0,4]]
elif h == 9:
hourPattern = [[7,4],[6,4],[5,4],[4,4]]
elif h == 10:
hourPattern = [[0,4],[0,3],[0,2]]
elif h == 11:
hourPattern = [[5,2],[4,2],[3,2],[2,2],[1,2],[0,2]]
#takes the current minute and provides the required pattern of letters
def getMinutePattern(m):
global minPattern
minPattern = []
if 10 > m >= 5 or m >= 55:
minPattern = fivePattern
elif 15 > m >= 10 or 55 > m >= 50:
minPattern = tenPattern
elif 20 > m >= 15 or 50 > m >= 45:
minPattern = fiftPattern
elif 25 > m >= 20 or 45 > m >= 40:
minPattern = twenPattern
elif 30 > m >= 25 or 40 > m >= 35:
minPattern = twenPattern + fivePattern
elif 35 > m >= 30:
minPattern = halfPattern
#if time between 5 and 34 we need to show 'past' the hour
if 35 > m >= 5:
minPattern = minPattern + pastPattern
elif m >= 35: #otherwise 'to' the hour
minPattern = minPattern + toPattern
#cycle through a full 12hrs minute by minute
def fullTest():
for n in range(12*60):
getHourPattern(n / 60, n % 60)
getMinutePattern(n % 60)
showTime(minPattern + hourPattern)
showTick(n)
time.sleep(.25)
#cycle through hours, then minutes
def quickTest():
for n in range(12):
getHourPattern(n, 0)
showTime(hourPattern)
time.sleep(.5)
for n in range(60):
getMinutePattern(n)
showTime(minPattern )
showTick(n)
time.sleep(.25)
#main function
quickTest()
#while True:
# fullTest()
while True:
#get time parts
h = time.localtime().tm_hour
m = time.localtime().tm_min
s = time.localtime().tm_sec
#get patterns
getHourPattern(h, m)
getMinutePattern(m)
#show patterns
showTime(minPattern + hourPattern)
#flash '*' to show time passing, lit every 2 seconds
if s % 2:
showTick(m)
else:
showTick(-1)
time.sleep(1)
| mit | 1,032,687,860,614,343,800 | 23.477011 | 87 | 0.61329 | false |
erikiado/jp2_online | familias/migrations/0004_alumno_tutor.py | 1 | 1205 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-02-23 05:32
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('familias', '0003_merge_20170223_0525'),
]
operations = [
migrations.CreateModel(
name='Alumno',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('activo', models.BooleanField(default=True)),
('integrante', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='familias.Integrante')),
],
),
migrations.CreateModel(
name='Tutor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('relacion', models.TextField(choices=[('madre', 'Madre'), ('padre', 'Padre'), ('tutor', 'Tutor')])),
('integrante', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='familias.Integrante')),
],
),
]
| mit | -2,830,436,533,046,184,000 | 36.65625 | 124 | 0.587552 | false |
spnow/grr | gui/plugins/cron_view_test.py | 1 | 16455 | #!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
# Copyright 2012 Google Inc. All Rights Reserved.
"""Test the cron_view interface."""
import time
from grr.gui import runtests_test
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib.aff4_objects import cronjobs
class TestCronView(test_lib.GRRSeleniumTest):
"""Test the Cron view GUI."""
def AddJobStatus(self, job, status):
with self.ACLChecksDisabled():
with aff4.FACTORY.OpenWithLock("aff4:/cron/OSBreakDown",
token=self.token) as job:
job.Set(job.Schema.LAST_RUN_TIME(rdfvalue.RDFDatetime().Now()))
job.Set(job.Schema.LAST_RUN_STATUS(status=status))
def setUp(self):
super(TestCronView, self).setUp()
with self.ACLChecksDisabled():
cronjobs.ScheduleSystemCronFlows(token=self.token)
cronjobs.CRON_MANAGER.RunOnce(token=self.token)
def testCronView(self):
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.Click("css=a[grrtarget=ManageCron]")
# Table should contain Last Run
self.WaitUntil(self.IsTextPresent, "Last Run")
# Table should contain system cron jobs
self.WaitUntil(self.IsTextPresent, "GRRVersionBreakDown")
self.WaitUntil(self.IsTextPresent, "LastAccessStats")
self.WaitUntil(self.IsTextPresent, "OSBreakDown")
# Select a Cron.
self.Click("css=td:contains('OSBreakDown')")
# Check that there's one flow in the list.
self.WaitUntil(self.IsElementPresent,
"css=#main_bottomPane td:contains('OSBreakDown')")
def testMessageIsShownWhenNoCronJobSelected(self):
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.Click("css=a[grrtarget=ManageCron]")
self.WaitUntil(self.IsTextPresent,
"Please select a cron job to see the details.")
def testShowsCronJobDetailsOnClick(self):
self.Open("/")
self.Click("css=a[grrtarget=ManageCron]")
self.Click("css=td:contains('OSBreakDown')")
# Tabs should appear in the bottom pane
self.WaitUntil(self.IsElementPresent, "css=#main_bottomPane #Details")
self.WaitUntil(self.IsElementPresent, "css=#main_bottomPane #Flows")
self.WaitUntil(self.IsTextPresent, "CURRENT_FLOW_URN")
self.WaitUntil(self.IsTextPresent, "CRON_ARGS")
# Click on "Flows" tab
self.Click("css=#main_bottomPane #Flows")
# Click on the first flow and wait for flow details panel to appear.
self.Click("css=#main_bottomPane td:contains('OSBreakDown')")
self.WaitUntil(self.IsTextPresent, "FLOW_STATE")
self.WaitUntil(self.IsTextPresent, "next_states")
self.WaitUntil(self.IsTextPresent, "outstanding_requests")
# Close the panel.
self.Click("css=#main_bottomPane .panel button.close")
self.WaitUntilNot(self.IsTextPresent, "FLOW_STATE")
self.WaitUntilNot(self.IsTextPresent, "next_states")
self.WaitUntilNot(self.IsTextPresent, "outstanding_requests")
def testToolbarStateForDisabledCronJob(self):
with self.ACLChecksDisabled():
cronjobs.CRON_MANAGER.DisableJob(
rdfvalue.RDFURN("aff4:/cron/OSBreakDown"))
self.Open("/")
self.Click("css=a[grrtarget=ManageCron]")
self.Click("css=td:contains('OSBreakDown')")
self.assertTrue(self.IsElementPresent(
"css=button[name=EnableCronJob]:not([disabled])"))
self.assertTrue(self.IsElementPresent(
"css=button[name=DisableCronJob][disabled]"))
self.assertTrue(self.IsElementPresent(
"css=button[name=DeleteCronJob]:not([disabled])"))
def testToolbarStateForEnabledCronJob(self):
with self.ACLChecksDisabled():
cronjobs.CRON_MANAGER.EnableJob(
rdfvalue.RDFURN("aff4:/cron/OSBreakDown"))
self.Open("/")
self.Click("css=a[grrtarget=ManageCron]")
self.Click("css=td:contains('OSBreakDown')")
self.assertTrue(self.IsElementPresent(
"css=button[name=EnableCronJob][disabled]"))
self.assertTrue(self.IsElementPresent(
"css=button[name=DisableCronJob]:not([disabled])"))
self.assertTrue(self.IsElementPresent(
"css=button[name=DeleteCronJob]:not([disabled])"))
def testEnableCronJob(self):
with self.ACLChecksDisabled():
cronjobs.CRON_MANAGER.DisableJob(
rdfvalue.RDFURN("aff4:/cron/OSBreakDown"))
self.Open("/")
self.Click("css=a[grrtarget=ManageCron]")
self.Click("css=td:contains('OSBreakDown')")
# Click on Enable button and check that dialog appears.
self.Click("css=button[name=EnableCronJob]")
self.WaitUntil(self.IsTextPresent,
"Are you sure you want to ENABLE this cron job?")
# Click on "Proceed" and wait for authorization dialog to appear.
self.Click("css=button[name=Proceed]")
# This should be rejected now and a form request is made.
self.WaitUntil(self.IsTextPresent, "Create a new approval")
self.Click("css=#acl_dialog button[name=Close]")
# Wait for dialog to disappear.
self.WaitUntilNot(self.IsVisible, "css=.modal-backdrop")
with self.ACLChecksDisabled():
self.GrantCronJobApproval(rdfvalue.RDFURN("aff4:/cron/OSBreakDown"))
# Click on Enable button and check that dialog appears.
self.Click("css=button[name=EnableCronJob]")
self.WaitUntil(self.IsTextPresent,
"Are you sure you want to ENABLE this cron job?")
# Click on "Proceed" and wait for success label to appear.
# Also check that "Proceed" button gets disabled.
self.Click("css=button[name=Proceed]")
self.WaitUntil(self.IsTextPresent, "Cron job was ENABLEd successfully!")
self.assertTrue(self.IsElementPresent("css=button[name=Proceed][disabled]"))
# Click on "Cancel" and check that dialog disappears.
self.Click("css=button[name=Cancel]")
self.WaitUntilNot(self.IsVisible, "css=.modal-backdrop")
# View should be refreshed automatically.
self.WaitUntil(self.IsTextPresent, "OSBreakDown")
self.WaitUntil(self.IsElementPresent,
"css=tr:contains('OSBreakDown') *[state=enabled]")
def testDisableCronJob(self):
with self.ACLChecksDisabled():
cronjobs.CRON_MANAGER.EnableJob(
rdfvalue.RDFURN("aff4:/cron/OSBreakDown"))
self.Open("/")
self.Click("css=a[grrtarget=ManageCron]")
self.Click("css=td:contains('OSBreakDown')")
# Click on Enable button and check that dialog appears.
self.Click("css=button[name=DisableCronJob]")
self.WaitUntil(self.IsTextPresent,
"Are you sure you want to DISABLE this cron job?")
# Click on "Proceed" and wait for authorization dialog to appear.
self.Click("css=button[name=Proceed]")
self.WaitUntil(self.IsTextPresent, "Create a new approval")
self.Click("css=#acl_dialog button[name=Close]")
# Wait for dialog to disappear.
self.WaitUntilNot(self.IsVisible, "css=.modal-backdrop")
with self.ACLChecksDisabled():
self.GrantCronJobApproval(rdfvalue.RDFURN("aff4:/cron/OSBreakDown"))
# Click on Disable button and check that dialog appears.
self.Click("css=button[name=DisableCronJob]")
self.WaitUntil(self.IsTextPresent,
"Are you sure you want to DISABLE this cron job?")
# Click on "Proceed" and wait for success label to appear.
# Also check that "Proceed" button gets disabled.
self.Click("css=button[name=Proceed]")
self.WaitUntil(self.IsTextPresent, "Cron job was DISABLEd successfully!")
self.assertTrue(self.IsElementPresent("css=button[name=Proceed][disabled]"))
# Click on "Cancel" and check that dialog disappears.
self.Click("css=button[name=Cancel]")
self.WaitUntilNot(self.IsVisible, "css=.modal-backdrop")
# View should be refreshed automatically.
self.WaitUntil(self.IsTextPresent, "OSBreakDown")
self.WaitUntil(self.IsElementPresent,
"css=tr:contains('OSBreakDown') *[state=disabled]")
def testDeleteCronJob(self):
with self.ACLChecksDisabled():
cronjobs.CRON_MANAGER.EnableJob(
rdfvalue.RDFURN("aff4:/cron/OSBreakDown"))
self.Open("/")
self.Click("css=a[grrtarget=ManageCron]")
self.Click("css=td:contains('OSBreakDown')")
# Click on Enable button and check that dialog appears.
self.Click("css=button[name=DeleteCronJob]")
self.WaitUntil(self.IsTextPresent,
"Are you sure you want to DELETE this cron job?")
# Click on "Proceed" and wait for authorization dialog to appear.
self.Click("css=button[name=Proceed]")
self.WaitUntil(self.IsTextPresent, "Create a new approval")
self.Click("css=#acl_dialog button[name=Close]")
# Wait for dialog to disappear.
self.WaitUntilNot(self.IsVisible, "css=.modal-backdrop")
with self.ACLChecksDisabled():
self.GrantCronJobApproval(rdfvalue.RDFURN("aff4:/cron/OSBreakDown"))
# Click on Disable button and check that dialog appears.
self.Click("css=button[name=DeleteCronJob]")
self.WaitUntil(self.IsTextPresent,
"Are you sure you want to DELETE this cron job?")
# Click on "Proceed" and wait for success label to appear.
# Also check that "Proceed" button gets disabled.
self.Click("css=button[name=Proceed]")
self.WaitUntil(self.IsTextPresent, "Cron job was DELETEd successfully!")
self.assertTrue(self.IsElementPresent("css=button[name=Proceed][disabled]"))
# Click on "Cancel" and check that dialog disappears.
self.Click("css=button[name=Cancel]")
self.WaitUntilNot(self.IsVisible, "css=.modal-backdrop")
# View should be refreshed automatically.
self.WaitUntil(self.IsElementPresent,
"css=#main_topPane td:contains('GRRVersionBreakDown')")
self.WaitUntilNot(self.IsElementPresent,
"css=#main_topPane td:contains('OSBreakDown')")
def testHuntSchedulingWorksCorrectly(self):
self.Open("/")
self.Click("css=a[grrtarget=ManageCron]")
self.Click("css=button[name=ScheduleHuntCronJob]")
self.WaitUntil(self.IsTextPresent, "What to run?")
# Click on Filesystem item in flows list
self.WaitUntil(self.IsElementPresent, "css=#_Filesystem > ins.jstree-icon")
self.Click("css=#_Filesystem > ins.jstree-icon")
# Click on Fetch Files item in Filesystem flows list
self.WaitUntil(self.IsElementPresent, "link=Fetch Files")
self.Click("link=Fetch Files")
# Wait for flow configuration form to be rendered (just wait for first
# input field).
self.WaitUntil(self.IsElementPresent,
"css=.Wizard input[id=args-paths-0]")
# Change "path", "pathtype", "depth" and "ignore_errors" values
self.Type("css=.Wizard input[id=args-paths-0]", "/tmp")
self.Select("css=.Wizard select[id=args-pathtype]", "TSK")
self.Type("css=.Wizard input[id=args-max_size]", "42")
# Click on "Next" button
self.Click("css=.Wizard button.Next")
self.WaitUntil(self.IsTextPresent, "Output Processing")
# Configure the hunt to use a collection and also send an email on results.
self.Click("css=.Wizard button:contains('Add Output Plugin')")
self.Select("css=.Wizard select[id=output_1-option]",
"Send an email for each result.")
self.Type("css=.Wizard input[id=output_1-email]",
"test@%s" % config_lib.CONFIG["Logging.domain"])
# Click on "Next" button
self.Click("css=.Wizard button.Next")
self.WaitUntil(self.IsTextPresent, "Where to run?")
# Create 3 foreman rules
self.WaitUntil(
self.IsElementPresent,
"css=.Wizard select[id=rule_1-option]")
self.Select("css=.Wizard select[id=rule_1-option]",
"Regular Expressions")
self.Select("css=.Wizard select[id=rule_1-attribute_name]",
"System")
self.Type("css=.Wizard input[id=rule_1-attribute_regex]",
"Linux")
# Make the button visible by scrolling to the bottom.
self.driver.execute_script("""
$("button:contains('Add Rule')").parent().scrollTop(10000)
""")
self.Click("css=.Wizard button:contains('Add Rule')")
self.Select("css=.Wizard select[id=rule_2-option]",
"Integer Rule")
self.Select("css=.Wizard select[id=rule_2-attribute_name]",
"Clock")
self.Select("css=.Wizard select[id=rule_2-operator]",
"GREATER_THAN")
self.Type("css=.Wizard input[id=rule_2-value]",
"1336650631137737")
# Make the button visible by scrolling to the bottom.
self.driver.execute_script("""
$("button:contains('Add Rule')").parent().scrollTop(10000)
""")
self.Click("css=.Wizard button:contains('Add Rule')")
self.Select("css=.Wizard select[id=rule_3-option]",
"OSX")
# Make the button visible by scrolling to the bottom.
self.driver.execute_script("""
$("button:contains('Add Rule')").parent().scrollTop(10000)
""")
# Click on "Next" button
self.Click("css=.Wizard button.Next")
self.WaitUntil(self.IsTextPresent, "When to run?")
# Select daily periodicity
self.Type("css=.Wizard input[id=cron-periodicity]", "1d")
# Click on "Next" button
self.Click("css=.Wizard button.Next")
self.WaitUntil(self.IsTextPresent, "Review")
# Check that the arguments summary is present.
self.assertTrue(self.IsTextPresent("Paths"))
self.assertTrue(self.IsTextPresent("/tmp"))
self.assertTrue(self.IsTextPresent("Max size"))
self.assertTrue(self.IsTextPresent("42"))
# Check that output plugins are shown.
self.assertTrue(self.IsTextPresent("EmailPlugin"))
self.assertTrue(self.IsTextPresent("test@%s" %
config_lib.CONFIG["Logging.domain"]))
# Check that rules summary is present.
self.assertTrue(self.IsTextPresent("Regex rules"))
# Check that periodicity information is present in the review.
self.assertTrue(self.IsTextPresent("Hunt Periodicity"))
self.assertTrue(self.IsTextPresent("Hunt will run 1d."))
# Click on "Schedule" button
self.Click("css=.Wizard button.Next")
# Anyone can schedule a hunt but we need an approval to actually start it.
self.WaitUntil(self.IsTextPresent,
"Hunt was successfully scheduled")
# Close the window and check that cron job object was created.
self.Click("css=button.Finish")
# Select newly created cron job.
self.Click("css=td:contains('cron/CreateGenericHuntFlow_')")
# Check that correct details are displayed in cron job details tab.
self.WaitUntil(self.IsTextPresent, "CreateGenericHuntFlow")
self.WaitUntil(self.IsTextPresent, "Flow args")
self.assertTrue(self.IsTextPresent("Paths"))
self.assertTrue(self.IsTextPresent("/tmp"))
self.assertTrue(self.IsTextPresent("Max size"))
self.assertTrue(self.IsTextPresent("42"))
def testStuckCronJobIsHighlighted(self):
# Make sure a lot of time has passed since the last
# execution
with test_lib.Stubber(time, "time", lambda: 0):
self.AddJobStatus("aff4:/cron/OSBreakDown",
rdfvalue.CronJobRunStatus.Status.OK)
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.Click("css=a[grrtarget=ManageCron]")
# OSBreakDown's row should have a 'warn' class
self.WaitUntil(self.IsElementPresent,
"css=tr.warning td:contains('OSBreakDown')")
# Check that only OSBreakDown is highlighted
self.WaitUntilNot(self.IsElementPresent,
"css=tr.warning td:contains('GRRVersionBreakDown')")
def testFailingCronJobIsHighlighted(self):
for _ in range(4):
self.AddJobStatus("aff4:/cron/OSBreakDown",
rdfvalue.CronJobRunStatus.Status.ERROR)
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.Click("css=a[grrtarget=ManageCron]")
# OSBreakDown's row should have an 'error' class
self.WaitUntil(self.IsElementPresent,
"css=tr.error td:contains('OSBreakDown')")
# Check that only OSBreakDown is highlighted
self.WaitUntilNot(self.IsElementPresent,
"css=tr.error td:contains('GRRVersionBreakDown')")
def main(argv):
# Run the full test suite
runtests_test.SeleniumTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| apache-2.0 | -7,085,412,603,742,628,000 | 36.397727 | 80 | 0.682528 | false |
ojosedo/programacion_morada | Pygame/juego/personaje.py | 1 | 11020 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Importa la librería de funciones llamada 'pygame'
import pygame
import pytmx
import copy
# Definimos algunos colores
NEGRO = [ 0, 0, 0]
BLANCO = [ 255, 255, 255]
VERDE = [ 0, 255, 0]
ROJO = [ 255, 0, 0]
# Constantes en mayúsculas
# Inicializa el motor de juegos
pygame.init()
ancho = 40
alto = 22
ancho_pixeles = 32 * ancho
alto_pixeles = 32 * alto
dimensiones = [ancho_pixeles, alto_pixeles]
# Abrir la pantalla (otra opción es open_window)
pantalla = pygame.display.set_mode(dimensiones)
pygame.display.set_caption("Mi primer juego")
# ESTE ES EL BUCLE PRINCIPAL DEL PROGRAMA
pygame.mouse.set_visible(False)
#Itera hasta que el usuario pincha sobre el botón de cierre.
hecho = False
informacion = False
# Se usa para gestionar cuan rápido se actualiza la pantalla
reloj = pygame.time.Clock()
class SpriteSheet(object):
""" Clase usada para sacar imágenes de la hoja de sprites."""
# inicializamos la hoja de sprites
sprite_sheet = None
def __init__(self, file_name):
""" Constructor. Le entra el nombre de archivo de la hoja
de sprites. """
# Carga la hoja de datos completa.
self.sprite_sheet = pygame.image.load(file_name).convert()
def get_image(self, fila, columna, ancho, alto):
""" retorna una única imagen
se le pasa la posición (x, y)
y el ancho y alto del sprite. """
# Crea una imagen en blanco
imagen = pygame.Surface([ancho, alto]).convert()
# Copia una parte de la hoja de sprites a la imagen
imagen.blit(self.sprite_sheet, (0, 0), (fila * ancho, columna * alto, ancho, alto))
# Asume que el negro es el color transparente
imagen.set_colorkey(NEGRO)
# Retorna la imagen
return imagen
hoja_completa = SpriteSheet("./imagenes/completa32.png")
class mapa(object):
"""en esta clase se definira el mapa"""
xmin = 0
xmax = 32*ancho
ymin = 0
ymax = 32*alto
#capas = 15
#personaje = 9
#datos = pytmx.load_pygame("./jardin.tmx")
def pintar_mapa(self,pantalla):
for capa in range(self.capas):
if capa == self.personaje:
for personaje in personajes:
personaje.pintese(pantalla)
else:
for x in range(ancho):
for y in range(alto):
imagen = self.datos.get_tile_image(x, y, capa)
if imagen==None:
continue
pantalla.blit(imagen, [x*32 , y*32])
def __init__(self, ruta, n_capas, c_personaje):
self.datos = pytmx.load_pygame(ruta)
self.capas = n_capas
self.personaje = c_personaje
def cambiar_mapa(self, ruta, n_capas, c_personaje):
self.datos = pytmx.load_pygame(ruta)
self.capas = n_capas
self.personaje = c_personaje
print("cambie el mapa, ", self)
class personaje(object):
"""este es tu personaje
le puedes dar atributos"""
direccion=""
x = ancho*16
y = alto*16
velocidad_x = 0
velocidad_y = 0
adelante = hoja_completa.get_image(3, 8, 32, 32)
derecha = hoja_completa.get_image(3,10,32,32)
izquierda = hoja_completa.get_image(3,9,32,32)
atras = hoja_completa.get_image(3,11,32,32)
imagen_actual = adelante
def cambiar_direccion(self,nueva_direccion):
if nueva_direccion == "derecha":
self.imagen_actual = self.derecha
self.direccion = "derecha"
if nueva_direccion == "izquierda":
self.imagen_actual = self.izquierda
self.direccion = "izquierda"
if nueva_direccion == "adelante":
self.imagen_actual = self.adelante
self.direccion = "adelante"
if nueva_direccion == "atras":
self.imagen_actual = self.atras
self.direccion = "atras"
def cambiar_velocidad(self, direccion, velocidad):
self.cambiar_direccion(direccion)
self.pare_seguro()
if direccion == "derecha":
self.velocidad_x += velocidad
if direccion == "izquierda":
self.velocidad_x -= velocidad
if direccion == "adelante":
self.velocidad_y += velocidad
if direccion == "atras":
self.velocidad_y -= velocidad
def pare(self, direccion):
if direccion == self.direccion:
self.velocidad_x = 0
self.velocidad_y = 0
def pare_seguro(self):
self.velocidad_x = 0
self.velocidad_y = 0
def muevase(self, mapa):
if (self.x + self.velocidad_x >= mapa.xmin
and self.x + self.velocidad_x + 16 <= mapa.xmax
and self.y + self.velocidad_y >= mapa.ymin
and self.y + self.velocidad_y + 16 <= mapa.ymax): #dentro del mapa
imagen = mapa.datos.get_tile_image((self.x+self.velocidad_x)//32, (self.y+self.velocidad_y)//32, 0)
if imagen==None:
self.x += self.velocidad_x
self.y += self.velocidad_y
def pintese(self, ventana):
ventana.blit(self.imagen_actual, [(self.x//32)*32, (self.y//32)*32]) #solo se pinta centrado en los cuadros, múltiplos de 32
def cambiar_apariencia(self, disfraz):
if disfraz == "niño":
self.adelante = hoja_completa.get_image(0, 8, 32, 32)
self.derecha = hoja_completa.get_image(0,10,32,32)
self.izquierda = hoja_completa.get_image(0,9,32,32)
self.atras = hoja_completa.get_image(0,11,32,32)
elif disfraz == "esqueleto":
self.adelante = hoja_completa.get_image(6, 8, 32, 32)
self.derecha = hoja_completa.get_image(6,10,32,32)
self.izquierda = hoja_completa.get_image(6,9,32,32)
self.atras = hoja_completa.get_image(6,11,32,32)
elif disfraz == "baba":
self.adelante = hoja_completa.get_image(0, 12, 32, 32)
self.derecha = hoja_completa.get_image(0,14,32,32)
self.izquierda = hoja_completa.get_image(0,13,32,32)
self.atras = hoja_completa.get_image(0,15,32,32)
elif disfraz == "murcielago":
self.adelante = hoja_completa.get_image(3, 12, 32, 32)
self.derecha = hoja_completa.get_image(3,14,32,32)
self.izquierda = hoja_completa.get_image(3,13,32,32)
self.atras = hoja_completa.get_image(3,15,32,32)
elif disfraz == "fantasma":
self.adelante = hoja_completa.get_image(6, 12, 32, 32)
self.derecha = hoja_completa.get_image(6,14,32,32)
self.izquierda = hoja_completa.get_image(6,13,32,32)
self.atras = hoja_completa.get_image(6,15,32,32)
elif disfraz == "araña":
self.adelante = hoja_completa.get_image(12, 12, 32, 32)
self.derecha = hoja_completa.get_image(12,14,32,32)
self.izquierda = hoja_completa.get_image(12,13,32,32)
self.atras = hoja_completa.get_image(12,15,32,32)
else:
self.adelante = hoja_completa.get_image(3, 8, 32, 32)
self.derecha = hoja_completa.get_image(3,10,32,32)
self.izquierda = hoja_completa.get_image(3,9,32,32)
self.atras = hoja_completa.get_image(3,11,32,32)
violeta = personaje()
monstruo1 = personaje()
monstruo1.cambiar_apariencia("esqueleto")
monstruo1.cambiar_velocidad("adelante", 5)
monstruo2 = personaje()
monstruo2.cambiar_apariencia("baba")
monstruo2.cambiar_velocidad("derecha", 5)
monstruo3 = personaje()
monstruo3.cambiar_apariencia("murcielago")
monstruo3.cambiar_velocidad("izquierda", 5)
monstruo4 = personaje()
monstruo4.cambiar_apariencia("fantasma")
monstruo4.cambiar_velocidad("atras", 5)
personajes = [violeta, monstruo1, monstruo2, monstruo3, monstruo4]
mapa_actual = mapa("./jardin.tmx", 15, 9)
jardin = mapa("./jardin.tmx", 15, 9)
laberinto = mapa("./laberinto.tmx", 13, 8)
def pintar_texto():
# Seleccionamos la fuente, tamaño, negrita, acostada
fuente = pygame.font.SysFont('Calibri', 25, True, False)
# Rendirazar, mi texto, suavizado, color
texto = fuente.render(str(violeta.x) + " " + str(violeta.y), True, NEGRO)
# Poner en pantalla el texto
pantalla.blit(texto, [0,0])
# -------- Bucle Principal del Programa -----------
while not hecho:
# TODOS LOS EVENTOS DE PROCESAMIENTO DEBERÍAN IR DEBAJO DE ESTE COMENTARIO
for evento in pygame.event.get(): # El usuario hizo algo
if evento.type == pygame.QUIT: # Si el usuario pincha sobre cerrar
hecho = True # Marca que indica que hemos acabado y sale de este bucle
if evento.type == pygame.MOUSEBUTTONDOWN:
pass
if evento.type==pygame.KEYDOWN:
if evento.key==pygame.K_UP:
violeta.cambiar_velocidad("atras", 5)
if evento.key==pygame.K_DOWN:
violeta.cambiar_velocidad("adelante", 5)
if evento.key==pygame.K_RIGHT:
violeta.cambiar_velocidad("derecha", 5)
if evento.key==pygame.K_LEFT:
violeta.cambiar_velocidad("izquierda", 5)
if evento.key==pygame.K_F3:
informacion = not informacion
mapa_actual = laberinto
if evento.key==pygame.K_F2:
mapa_actual = jardin
if evento.type==pygame.KEYUP:
if evento.key==pygame.K_UP:
violeta.pare("atras")
if evento.key==pygame.K_DOWN:
violeta.pare("adelante")
if evento.key==pygame.K_RIGHT:
violeta.pare("derecha")
if evento.key==pygame.K_LEFT:
violeta.pare("izquierda")
# TODOS LOS EVENTOS DE PROCESAMIENTO DEBERÍAN IR ENCIMA DE ESTE COMENTARIO
# TODA LA LÓGICA DEL JUEGO DEBERÍA IR DEBAJO DE ESTE COMENTARIO
for personaje in personajes:
personaje.muevase(mapa_actual)
# TODA LA LÓGICA DEL JUEGO DEBERÍA IR ENCIMA DE ESTE COMENTARIO
# TODO EL CÓDIGO DE DIBUJO DEBERÍA IR DEBAJO DE ESTE COMENTARIO
# Primero limpiamos pantalla. No dibujes por encima de esta linea
# o todo lo que escribas sera borrado por este comando.
pantalla.fill(BLANCO)
#todo(pasto,0,0,16,20)
#todo(pasto,0.5,0.5,15,19)
# pantalla.blit(pasto, [x, y])
mapa_actual.pintar_mapa(pantalla)
# DIBUJEMOS ALGUNAS FIGURAS
# DIBUJEMOS ALGUN TEXTO
if informacion:
pintar_texto()
# Avanza y actualiza la pantalla con lo que hemos dibujado.
pygame.display.flip()
# TODO EL CÓDIGO DE DIBUJO DEBERÍA IR ENCIMA DE ESTE COMENTARIO
# Limita a 20 fotogramas por segundo (frames per second)
reloj.tick(30)
# Cierra la ventana.
# Si olvidas poner esta linea el programa se 'colgara'.
pygame.quit()
| gpl-3.0 | 4,637,477,412,209,843,000 | 34.025478 | 132 | 0.602019 | false |
paul-jean/ud858 | Lesson_2/000_Hello_Endpoints/helloworld_api.py | 1 | 1742 | """Hello World API implemented using Google Cloud Endpoints.
Contains declarations of endpoint, endpoint methods,
as well as the ProtoRPC message class and container required
for endpoint method definition.
"""
import endpoints
from protorpc import messages
from protorpc import message_types
from protorpc import remote
# If the request contains path or querystring arguments,
# you cannot use a simple Message class.
# Instead, you must use a ResourceContainer class
REQUEST_CONTAINER = endpoints.ResourceContainer(
message_types.VoidMessage,
name=messages.StringField(1)
)
REQUEST_GREETING_CONTAINER = endpoints.ResourceContainer(
period=messages.StringField(1),
name=messages.StringField(2)
)
package = 'Hello'
class Hello(messages.Message):
"""String that stores a message."""
greeting = messages.StringField(1)
@endpoints.api(name='helloworldendpoints', version='v1')
class HelloWorldApi(remote.Service):
"""Helloworld API v1."""
@endpoints.method(message_types.VoidMessage, Hello,
path = "sayHello", http_method='GET', name = "sayHello")
def say_hello(self, request):
return Hello(greeting="Hello World")
@endpoints.method(REQUEST_CONTAINER, Hello,
path = "sayHelloByName", http_method='GET', name = "sayHelloByName")
def say_hello_by_name(self, request):
greet = "Hello {}".format(request.name)
return Hello(greeting=greet)
@endpoints.method(REQUEST_GREETING_CONTAINER, Hello,
path = "greetByPeriod", http_method='GET', name = "greetByPeriod")
def greet_by_period(self, request):
greet = "Good {}, {}!".format(request.period, request.name)
return Hello(greeting=greet)
APPLICATION = endpoints.api_server([HelloWorldApi])
| gpl-3.0 | -2,053,519,090,300,488,000 | 30.672727 | 74 | 0.729047 | false |
blurstudio/cross3d | cross3d/abstract/abstractapplication.py | 1 | 8664 | """
The AbstractApplication class will define all operations for application
interaction. It is a singleton class, so calling cross3d.Application() will
always return the same instance of Application. One of its main functions
is connecting application callbacks to cross3d.Dispatch. The
AbstractApplication is a QObject instance and any changes to the scene
data can be controlled by connecting to the signals defined here. When
subclassing the AbstractScene, methods tagged as @abstractmethod will be
required to be overwritten. Methods tagged with [virtual] are flagged
such that additional operations could be required based on the needs of
the method. All @abstractmethod methods MUST be implemented in a subclass.
"""
import re
import cross3d
from PyQt4.QtCore import QObject
from cross3d import abstractmethod
from contextlib import contextmanager
dispatch = None
class AbstractApplication(QObject):
"""
The Application class will define all operations for application
interaction. It is a singleton class, so calling cross3d.Application() will
always return the same instance of Application. One of its main functions
is connecting application callbacks to cross3d.Dispatch. The
Application is a QObject instance and any changes to the scene
data can be controlled by connecting to the signals defined here. When
subclassing the Scene, methods tagged as @abstractmethod will be
required to be overwritten. Methods tagged with [virtual] are flagged
such that additional operations could be required based on the needs of
the method. All @abstractmethod methods MUST be implemented in a subclass.
"""
_instance = None
_blockRefresh = False
def __init__(self):
QObject.__init__(self)
self._objectToBeDeleted = None
def connect(self):
"""
Responsible for setting up the application to connect to signals
using :meth:`cross3d.Dispatch.connectCallback`. Connect is
called when the first :class:`cross3d.Dispatch` signal is
connected.
:return: connection success
:rtype: bool
"""
# create a signal linking between 2 signals
import cross3d
global dispatch
dispatch = cross3d.dispatch
dispatch.linkSignals('sceneNewRequested', 'scenePreInvalidated')
dispatch.linkSignals('sceneOpenRequested', 'scenePreInvalidated')
dispatch.linkSignals('sceneMergeRequested', 'scenePreInvalidated')
# dispatch.linkSignals('sceneReferenceRequested', 'scenePreInvalidated')
dispatch.linkSignals('scenePreReset', 'scenePreInvalidated')
dispatch.linkSignals('sceneImportRequested', 'scenePreInvalidated')
dispatch.linkSignals('sceneNewFinished', 'sceneInvalidated')
dispatch.linkSignals('sceneOpenFinished', 'sceneInvalidated')
dispatch.linkSignals('sceneMergeFinished', 'sceneInvalidated')
# dispatch.linkSignals('sceneReferenceFinished', 'sceneInvalidated')
dispatch.linkSignals('sceneReset', 'sceneInvalidated')
dispatch.linkSignals('sceneImportFinished', 'sceneInvalidated')
dispatch.linkSignals('objectCreated', 'newObject')
dispatch.linkSignals('objectCloned', 'newObject')
dispatch.linkSignals('objectAdded', 'newObject')
return True
@abstractmethod
def imageSequenceRegex(self):
return re.compile('')
def conformObjectName(self, name):
return re.sub('[^%s]' % self.allowedCharacters(), '_', name)
@abstractmethod
def allowedCharacters(self):
return '.'
def clipboardCopyText(self, text):
""" Set the provided text to the system clipboard so it can be pasted
This function is used because QApplication.clipboard sometimes deadlocks in some
applications like XSI.
Args:
text (str): Set the text in the paste buffer to this text.
"""
from PyQt4.QtGui import QApplication
QApplication.clipboard().setText(text)
@abstractmethod
def connectCallback(self, signal):
"""
Connects a single callback. This allows cross3d to only have to
respond to callbacks that tools actually need, instead of all
callbacks. Called the first time a signal is connected to
this callback.
"""
return
def disconnect(self):
"""
Responsible for disabling all changes made in
:meth:`cross3d.Application.connect`. Disconnect is called when
the last :class:`cross3d.Dispatch` signal is disconnected.
"""
return
@abstractmethod
def disconnectCallback(self, signal):
"""
Disconnect a single callback when it is no longer used. Called
when the last signal for this callback is disconnected.
"""
return
def preDeleteObject(self, *args):
"""
Pre-process the object that is going to be deleted.
"""
return
def postDeleteObject(self, *args):
"""
Emits the signal that a object has been deleted. This method is
used for applications like max that generate a pre and post
delete signal.
"""
dispatch.objectPostDelete.emit()
@abstractmethod
def year(self):
"""
Returns the version year of the software.
:return: version number
:rtyp: int
"""
return 0
@abstractmethod
def version(self, major=True):
"""
Returns the version of the software.
:return: version number
:rtyp: various
"""
return 0 if major else '0.0.0'
@abstractmethod
def autokey(self):
return False
@abstractmethod
def exportAlembic(self, filename, **kwargs):
return False
@abstractmethod
def importAlembic(self, filename, **kwargs):
return False
@abstractmethod
def installDir(self):
""" Returns the path to the current application's install directory.
:return: path string
:rtyp: str
"""
import sys
import os
return os.path.dirname(sys.executable)
@abstractmethod
def nameSpaceSeparator(self):
return ''
@abstractmethod
def name(self):
"""
Returns the unique name of the software.
"""
return ''
@abstractmethod
def refresh(self):
return False
@abstractmethod
def id(self):
"""
Returns a unique version/bits string information that will
represent the exact version of the software being run.
:rtype: str
"""
return ''
@abstractmethod
def animationClipExtension(self):
return ''
@abstractmethod
def sceneFileExtension(self):
return ''
@abstractmethod
def modelFileExtension(self):
return ''
@abstractmethod
def nameAndVersion(self):
"""
Returns the unique name and version format needed for Assburner.
:rtype: str
"""
return ''
@abstractmethod
def log(self, message):
print message
def isSilent(self):
"""
Returns whether the application is currently running in silent mode.
"""
return False
def undoContext(self, name):
"""
Returns a context guard for the undo stack. Everything that takes
place within this context guard will appear as a single undo
operation in the stack.
"""
return cross3d.UndoContext(name)
def openUndo(self, name):
"""
Opens a new undo context. It is important that the user takes care
to call closeUndo() to close the context, even if an error or
exception occurs; otherwise, the undo stack will remain open and
unusable.
"""
cross3d.UndoContext.openUndo(name)
def closeUndo(self):
"""
Close the undo context. This call should always follw a call to
openUndo().
"""
cross3d.UndoContext.closeUndo()
@contextmanager
def blockRefreshContext(self, blockRefresh=True):
orig_blockRefresh = self.blockRefresh()
self.setBlockRefresh(blockRefresh)
yield
self.setBlockRefresh(orig_blockRefresh)
def blockRefresh(self):
"""
If returns true, the refresh method will not refresh.
"""
return self._blockRefresh
def setBlockRefresh(self, blockRefresh):
"""
If set to true, the refresh method will not refresh.
"""
if self._blockRefresh != blockRefresh:
self._blockRefresh = blockRefresh
cross3d.Scene().setUpdatesEnabled(not blockRefresh)
return True
return False
def shouldBlockSignal(self, signal, default):
""" Allows the Application to conditionally block a signal.
Normally you should pass cross3d.dispatch.signalsBlocked() to default.
In general if default is True this method should just return True. This will
prevent unexpected signal emits when a script called
cross3d.dispatch.blockSignals(True) to block all signals.
Args:
signal (str): The name of the signal to check if it should be blocked.
default (bool): Returned if signal doesn't match any requirements.
Returns:
bool: If the signal should be blocked.
"""
return default
# register the symbol
cross3d.registerSymbol('Application', AbstractApplication, ifNotFound=True)
# Creating a single instance of Application for all code to use.
cross3d.registerSymbol('application', AbstractApplication(), ifNotFound=True)
| mit | 2,690,035,886,214,266,000 | 26.075 | 82 | 0.742267 | false |
mezuro/kalibro_client_py | tests/processor/test_hotspot_metric_result.py | 1 | 1774 | from unittest import TestCase
from nose.tools import assert_equal, assert_true, raises
from mock import patch
from tests.factories import HotspotMetricResultFactory
from tests.helpers import not_raises
class TestHotspotMetricResult(TestCase):
def setUp(self):
self.subject = HotspotMetricResultFactory.build()
def test_properties_getters(self):
assert_true(hasattr(self.subject, 'line_number'))
assert_true(hasattr(self.subject, 'message'))
@not_raises((AttributeError, ValueError))
def test_properties_setters(self):
self.subject.line_number = 1
self.subject.message = "test message"
@raises(ValueError)
def test_properties_setters_with_invalid_parameters(self):
self.subject.line_number = "string"
def test_asdict(self):
dict_ = self.subject._asdict()
assert_equal(None, dict_["value"])
assert_equal(self.subject.metric_configuration_id, dict_["metric_configuration_id"])
assert_equal(self.subject.line_number, dict_["line_number"])
assert_equal(self.subject.message, dict_["message"])
def test_related_results(self):
related_results = [HotspotMetricResultFactory.build(id=id_) for id_ in range(3)]
related_results_hash = {'hotspot_metric_results': [related_result._asdict()
for related_result in related_results]}
with patch.object(self.subject, 'request', return_value=related_results_hash) as request_mock:
assert_equal(self.subject.related_results(), related_results)
request_mock.assert_called_once_with(action=':id/related_results', params={'id': self.subject.id},
method='get')
| lgpl-3.0 | -1,097,465,209,249,219,600 | 40.255814 | 110 | 0.657835 | false |
EconForge/dolo | dolo/misc/caching.py | 1 | 4370 | import functools
class memoized(object):
"""Decorator that caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args, **kargs):
targs = (e for e in args)
hh = tuple(hashable(e) for e in targs)
h2 = hashable(kargs)
h = hash((hh, h2))
try:
return self.cache[h]
except KeyError:
value = self.func(*args, **kargs)
self.cache[h] = value
return value
except TypeError:
# uncachable -- for instance, passing a list as an argument.
# Better to not cache than to blow up entirely.
return self.func(*args, **kargs)
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
def __get__(self, obj, objtype):
"""Support instance methods."""
return functools.partial(self.__call__, obj)
class cachedondisk(object):
"""Decorator that caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
"""
def __init__(self, func):
# create caching direcory if it is not there already
import os
if not os.path.isdir(".cache"):
os.mkdir(".cache")
self.func = func
self.fname = func.__name__
def __call__(self, *args, **kargs):
import pickle
hh = tuple(hashable(e) for e in args)
h2 = hashable(kargs)
h = hash((hh, h2))
try:
with open(".cache/{0}.{1}.pickle".format(self.fname, h), "rb") as f:
value = pickle.load(f)
return value
except IOError:
value = self.func(*args, **kargs)
if value is not None: # should there be other kinds of error values
# write file with h
with open(".cache/{0}.{1}.pickle".format(self.fname, h), "wb") as f:
pickle.dump(value, f)
return value
except TypeError:
# uncachable -- for instance, passing a list as an argument.
# Better to not cache than to blow up entirely.
return self.func(*args, **kargs)
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
def __get__(self, obj, objtype):
"""Support instance methods."""
return functools.partial(self.__call__, obj)
def clear_cache(function_name=None):
import os
try:
if function_name:
os.system("rm -rf .cache/{}.*.pickle".format(function_name))
else:
os.system("rm -rf .cache/*.pickle")
except:
pass
import os
class DiskDictionary:
def __init__(self, directory=".cache", funname="fun"):
self.funname = funname
if not os.path.isdir(directory):
os.mkdir(directory)
self.directory = directory
def get_filename(self, key):
import pickle
hh = tuple(hashable(k) for k in key)
h = hash(hh)
filename = "{0}/{1}.{2}.pickle".format(self.directory, self.funname, h)
return filename
def __setitem__(self, key, value):
import pickle
filename = self.get_filename(key)
try:
with open(filename, "w") as f:
pickle.dump(value, f)
except TypeError as e:
raise e
def get(self, item):
import pickle
filename = self.get_filename(item)
try:
with open(filename) as f:
value = pickle.load(f)
return value
except:
return None
import collections
def hashable(obj):
if hasattr(obj, "flatten"): # for numpy arrays
return tuple(obj.flatten().tolist())
if isinstance(obj, collections.abc.Hashable):
return obj
if isinstance(obj, collections.abc.Mapping):
items = [(k, hashable(v)) for (k, v) in obj.items()]
return frozenset(items)
if isinstance(obj, collections.abc.Iterable):
return tuple([hashable(item) for item in obj])
return TypeError(type(obj))
| bsd-2-clause | 8,237,001,637,224,577,000 | 27.376623 | 84 | 0.556979 | false |
daniellawrence/pyspeccheck | speccheck/file.py | 1 | 2560 | #!/usr/bin/env python
from .util import Spec
class File(Spec):
STATES = [
"directory", "file",
"owned_by",
"smaller_than", "larger_than"
]
def __init__(self, path):
self.path = path
self.state = {}
self.get_state()
self.WIN = "File %s is %%s" % self.path
def get_state(self):
import os
import stat
import pwd
import grp
s = os.stat(self.path)
self.state = {
'st_mode': s.st_mode,
'st_ino': s.st_ino,
'st_dev': s.st_dev,
'st_nlink': s.st_nlink,
'st_uid': s.st_uid,
'st_gid': s.st_gid,
'st_size': s.st_size,
'user': pwd.getpwuid(s.st_uid)[0],
'group': grp.getgrgid(s.st_gid)[0],
'directory': stat.S_ISDIR(s.st_mode),
'file': stat.S_ISREG(s.st_mode),
'full_mode': oct(stat.S_IMODE(s.st_mode)),
'mode': oct(stat.S_IMODE(s.st_mode))[1:],
}
def sb_smaller_than(self, size):
size_map = {
'k': 1024,
'm': 1024*1024,
'g': 1024*1024*1024
}
real_size = self.state['st_size']
size_units = size[-1].lower()
size_count = int(size[0:-1])
expected_size = size_count * size_map[size_units]
if expected_size > real_size:
return True, "File %s is smaller than %s" % (self.path, size)
else:
return False, "File %s is larger than %s" % (self.path, size)
def sb_larger_than(self, size):
x, msg = self.sb_smaller_than(size)
return x is False, msg
def sb_directory(self, *args):
if self._make_sure(self.state['directory']):
return True
return "%s is not a directory" % (self.path)
def sb_file(self, *args):
if self._make_sure(self.state['file']):
return True
return "%s is not a file" % (self.path)
def sb_owned_by(self, desired_owner):
user = None
group = None
problems = []
if ':' in desired_owner:
(user, group) = desired_owner.split(':')
else:
user = desired_owner
if user and self.state['user'] != user:
problems.append("owned %s not %s" % (self.state['user'], user))
if group and self.state['group'] != group:
problems.append("group %s not %s" % (self.state['group'], group))
if problems:
return ' and '.join(problems)
return True
| mit | -4,272,780,294,364,306,000 | 27.764045 | 77 | 0.494141 | false |
ahmadcahyana/xyberville | xyberville/apps/sales/migrations/0001_initial.py | 1 | 2125 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('quantity', models.PositiveSmallIntegerField()),
('price', models.FloatField(blank=True, null=True)),
('tax_amount', models.FloatField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Sale',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('number', models.CharField(max_length=8, db_index=True)),
('status', models.PositiveSmallIntegerField(choices=[(1, 'Pending'), (2, 'Paid'), (3, 'Completed'), (4, 'Canceled'), (5, 'Refunded')])),
('notes', models.TextField(blank=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False)),
('price', models.FloatField(blank=True, null=True)),
('discount', models.FloatField(blank=True, null=True)),
('amount_to_collect', models.FloatField(blank=True, null=True)),
('paid', models.DateTimeField(blank=True, null=True)),
('completed', models.DateTimeField(blank=True, null=True)),
('canceled', models.DateTimeField(blank=True, null=True)),
('cancellation_note', models.TextField(blank=True)),
('refunded', models.DateTimeField(blank=True, null=True)),
('refund_notes', models.TextField(blank=True)),
('rating', models.PositiveSmallIntegerField(blank=True, null=True, choices=[(1, '1 - Bad'), (2, '2'), (3, '3 - Average'), (4, '4'), (5, '5 - Best')])),
],
),
]
| mit | -8,822,335,561,830,740,000 | 47.295455 | 167 | 0.568941 | false |
sserrot/champion_relationships | venv/Lib/site-packages/zmq/tests/test_pubsub.py | 1 | 1089 | # Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
from random import Random
import time
from unittest import TestCase
import zmq
from zmq.tests import BaseZMQTestCase, have_gevent, GreenTest
class TestPubSub(BaseZMQTestCase):
pass
# We are disabling this test while an issue is being resolved.
def test_basic(self):
s1, s2 = self.create_bound_pair(zmq.PUB, zmq.SUB)
s2.setsockopt(zmq.SUBSCRIBE, b'')
time.sleep(0.1)
msg1 = b'message'
s1.send(msg1)
msg2 = s2.recv() # This is blocking!
self.assertEqual(msg1, msg2)
def test_topic(self):
s1, s2 = self.create_bound_pair(zmq.PUB, zmq.SUB)
s2.setsockopt(zmq.SUBSCRIBE, b'x')
time.sleep(0.1)
msg1 = b'message'
s1.send(msg1)
self.assertRaisesErrno(zmq.EAGAIN, s2.recv, zmq.NOBLOCK)
msg1 = b'xmessage'
s1.send(msg1)
msg2 = s2.recv()
self.assertEqual(msg1, msg2)
if have_gevent:
class TestPubSubGreen(GreenTest, TestPubSub):
pass
| mit | 4,570,999,008,400,975,400 | 24.928571 | 66 | 0.640037 | false |
jslade/python-monque | examples/speed_test.py | 1 | 1859 | #!/usr/bin/env python
"""
speed_test -- test how fast tasks can be submitted and processed.
The client/submitter side will run forever, submitting dummy tasks (that don't do anything)
in a loop. Periodically (every few seconds), a status line is printed showing how many
tasks are in the queue, how many running, etc.
Start the submitter side as:
python speed_tests.py
Interrupt with Ctrl-C to end the test.
One or more workers should also be started to consume/execute the tasks:
python -m monque.worker --include speed_test --queue speed_test
Test pausing / resuming the queue while the workers are running:
python -m monque.worker --control [pause|resume] --queue speed_test
"""
from monque import Monque
from monque.task import Task
class NopTask(Task):
queue = 'speed_test'
def run(self):
pass
if __name__ == '__main__':
q = Monque()
nop = NopTask()
import time
started = time.time()
def stats():
return { 'posted': q.count_posted(),
'pending': q.count_pending(queue='speed_test'),
'completed': q.count_completed(queue='speed_test'),
'failed': q.count_failed(queue='speed_test') }
prev = stats()
while True:
nop.post()
now = time.time()
elapsed = now - started
if elapsed >= 5:
curr = stats()
print "Posted: %d(%.1f/s), Pending: %d(%.1f/s), Completed: %d(%.1f/s), Failed: %d(%.1f/s)" % \
(curr['posted'], (curr['posted']-prev['posted'])/elapsed,
curr['pending'], (curr['pending']-prev['pending'])/elapsed,
curr['completed'], (curr['completed']-prev['completed'])/elapsed,
curr['failed'], (curr['failed']-prev['failed'])/elapsed)
prev = curr
started = now
| mit | 8,231,868,192,520,842,000 | 27.6 | 106 | 0.592792 | false |
olkku/tf-info | apps/voteresults/views.py | 1 | 1866 | from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponse
from django.utils import timezone
from django.conf import settings
from django.views.decorators.cache import cache_page
import urllib2
"""
Takes in results from a vote in the following format:
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
</head>
<body>
0:1<br>1:0<br>2:0<br>3:0<br>4:0<br>5:0<br>9:0<br></body>
</html>
Parses and adds colors
"""
class Result:
def __init__(self, voteid, count):
self.id = int (voteid)
self.count = int(count)
self.color = "#0F0F0F"
self.size = 0
self.pos = 0
def add_votes(self, add_count):
self.count += add_count
def set_color(self, color):
self.color = color
@cache_page(60 * 10)
def index(request):
try:
response = urllib2.urlopen(settings.VOTERESULTS_URL)
data = response.read()
except:
return HttpResponse("Unable to access stats.", status=500)
result = data.split("<body>")[1].split("</body>")[0].split("<br>")
votes = []
for x in result:
x = x.replace("\n","")
if len(x) == 0:
continue
xId = x.split(":")[0]
xCount = x.split(":")[1]
if xId == "1" or xId == "2":
votes[0].add_votes(int(xCount))
else:
votes.append(Result(xId, xCount))
for x in votes:
if x.id == 0:
x.set_color("#FF9900")
elif x.id == 3:
x.id = 1
x.set_color("#6699FF")
elif x.id == 4:
x.id = 2
x.set_color("#66FF33")
elif x.id == 5:
x.id = 4
x.set_color("#835C3B")
elif x.id == 9:
x.id = 3
x.set_color("#FF1919")
x.pos = x.id *22
if int(x.count) > 20:
x.size = 95
else:
x.size = int((float(x.count) / 20) * 95)
width = votes[-1].pos + 22 + 10
return render_to_response('voteresults/index.html', {"votes": votes, "width": width}, context_instance=RequestContext(request))
| bsd-3-clause | 1,497,367,023,401,473,300 | 19.282609 | 128 | 0.633976 | false |
srinivasanmit/all-in-all | DSAlgo/LinkedLists/linked_list.py | 1 | 4932 |
class Node() :
def __init__(self, data):
self.data = data
self.next = None
class LinkedList() :
def __init__(self):
self.head = None
self.tail = None
self.ll_length = 0
def appendNode(self, data):
new_node = Node(data)
self.ll_length += 1
if self.head == None :
self.head = new_node
self.tail = new_node
else :
self.tail.next = new_node
self.tail = new_node
def insertNode(self, data, position) :
new_node = Node(data)
curr_node = self.head
i = 1
self.ll_length += 1
if position > self.ll_length :
print "Position can't be greater than the length of LL"
elif position != 1 :
while i < position -1 :
curr_node = curr_node.next
i += 1
new_node.next = curr_node.next
curr_node.next = new_node
else :
new_node.next = curr_node
self.head = new_node
def insert_into_sorted(self, data) :
new_node = Node(data)
self.ll_length += 1
curr_node = self.head
print "inserting ", data
while curr_node.next != None :
if data < self.head.data :
new_node.next = self.head
self.head = new_node
break
elif data > curr_node.data and data < curr_node.next.data :
new_node.next = curr_node.next
curr_node.next = new_node
break
elif data > curr_node.next.data :
curr_node.next.next = new_node
curr_node = curr_node.next
if data > self.head.data :
self.head.next = new_node
else :
new_node.next = self.head
self.head = new_node
def deleteNode(self, data):
curr_node = self.head
prev_node = self.head
if self.head.data == data :
self.head = self.head.next
self.ll_length -= 1
return
while curr_node.next != None :
if curr_node.data == data :
prev_node.next = curr_node.next
del(curr_node)
self.ll_length -= 1
break
prev_node = curr_node
curr_node = curr_node.next
else :
print "Could not find item to delete in the linked list"
def reverseList(self):
tmp_curr_node = curr_node = self.head
prev_node = None
revList = LinkedList()
while curr_node :
tmp_current_node = curr_node.next
curr_node.next = prev_node
prev_node = curr_node
revList.appendNode(curr_node.data)
curr_node = tmp_current_node
self.head = prev_node
return revList
def printNodes(self) :
curr_node = self.head
while curr_node != None :
print curr_node.data
curr_node = curr_node.next
def getLength(self, head_pointer) :
count = 0
while head_pointer :
count += 1
head_pointer = head_pointer.next
return count
def checkPalindromeLists(self, revobj ) :
curr_node = self.head
rev_curr_node = revobj.head
if self.getLength(self.head) != self.getLength(revobj.head) :
print "Linked lists are not palidromes"
while curr_node :
if curr_node.data != rev_curr_node.data :
return False
curr_node = curr_node.next
rev_curr_node = rev_curr_node.next
else :
return True
if __name__ == "__main__":
ll_obj = LinkedList()
print "Appending 1 to the linked list"
ll_obj.appendNode(1)
ll_obj.printNodes()
print "Appending 3 to the linked list"
ll_obj.appendNode(3)
ll_obj.printNodes()
print "Appending 4 to the linked list"
ll_obj.appendNode(4)
ll_obj.printNodes()
print "Appending 5 to the linked list"
ll_obj.appendNode(5)
ll_obj.printNodes()
print "Inserting 2 at position 1 to the linked list"
ll_obj.insertNode(2,1)
ll_obj.printNodes()
print "Deleting node with data 4 in the linked list"
ll_obj.deleteNode(2)
ll_obj.printNodes()
print "Trying to delete an item not in the list - 20"
ll_obj.deleteNode(20)
ll_obj.printNodes()
rev_obj = ll_obj.reverseList()
print "Printing linked list after reversing"
ll_obj.printNodes()
print "Printing original linked list"
ll_obj.insert_into_sorted(2)
ll_obj.insert_into_sorted(0)
ll_obj.insert_into_sorted(21)
print ll_obj.printNodes()
rev_obj.printNodes()
if ll_obj.checkPalindromeLists(rev_obj) :
print "Linked lists are palindrome"
else :
print "Linked lists are not palindrome"
| gpl-3.0 | 134,500,026,237,291,900 | 30.018868 | 71 | 0.538727 | false |
bhecquet/seleniumRobot-server | snapshotServer/views/TestResultView.py | 1 | 3829 | '''
Created on 4 sept. 2017
@author: worm
'''
from django.views.generic.list import ListView
from snapshotServer.models import TestCaseInSession, StepResult, Snapshot
import json
from snapshotServer.views.LoginRequiredMixinConditional import LoginRequiredMixinConditional
class TestResultView(LoginRequiredMixinConditional, ListView):
"""
View displaying a single test result
"""
template_name = "snapshotServer/testResult.html"
@classmethod
def buildLogStringFromJson(cls, jsonString):
"""
{"name":"Login","type":"step","actions":[
{"messageType":"INFO","name":"everything OK","type":"message"},
{"name":"action2","failed":false,"type":"action"},
{"name":"subStep","type":"step","actions":[
{"messageType":"WARNING","name":"everything in subStep almost OK","type":"message"},
{"name":"action1","failed":false,"type":"action"}
]}
]}
to
<ul>
<li>everything OK</li>
<li>action2</li>
<ul>
<li>everything in subStep almost OK</li>
<li>action1</li>
</ul>
</ul>
"""
logStr = ""
def parseStep(step, firstCall=False):
nonlocal logStr
# do not reprint the main step name as test report will print it
if not firstCall:
logStr += "<li>%s</li>\n" % step['name']
logStr += "<ul>\n"
for action in step['actions']:
if action['type'] == 'message':
logStr += "<div class='message-%s'>%s: %s</div>\n" % (action['messageType'].lower(), action['messageType'], action['name'])
elif action['type'] == 'action':
if action['failed']:
logStr += "<li class='action-failed'>%s</li>\n" % (action['name'],)
else:
logStr += "<li class='action-success'>%s</li>\n" % (action['name'],)
elif action['type'] == 'step':
parseStep(action)
logStr += "</ul>\n"
logsDict = json.loads(jsonString)
parseStep(logsDict, True)
return logStr
def get_queryset(self):
try:
test_case_in_session = self.kwargs['testCaseInSessionId']
test_steps = TestCaseInSession.objects.get(id=test_case_in_session).testSteps.all()
stepSnapshots = {}
for step_result in StepResult.objects.filter(testCase=test_case_in_session, step__in=test_steps).order_by('id'):
# build logs from json string
if step_result.stacktrace:
logs = self.buildLogStringFromJson(step_result.stacktrace)
else:
logs = None
step_result.formattedLogs = logs
try:
stepSnapshots[step_result] = Snapshot.objects.get(stepResult = step_result)
except:
stepSnapshots[step_result] = None
return stepSnapshots
except:
return []
def get_context_data(self, **kwargs):
context = super(TestResultView, self).get_context_data(**kwargs)
context['currentTest'] = TestCaseInSession.objects.get(pk=self.kwargs['testCaseInSessionId'])
if context['currentTest'].stacktrace:
context['stacktrace'] = context['currentTest'].stacktrace.split('\n')
else:
context['stacktrace'] = ['no logs available']
return context | apache-2.0 | 8,277,047,193,499,717,000 | 34.794393 | 143 | 0.511883 | false |
h2oai/sparkling-water | py/tests/integration/external_only/test_stopping.py | 1 | 1727 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from tests.integration.external_only.external_backend_test_utils import *
from tests.integration.integ_test_utils import *
def testStoppingWithoutExplicitStop(integ_spark_conf):
return_code = launch(integ_spark_conf, "examples/tests/H2OContextWithoutExplicitStop.py")
time.sleep(10)
assert "Total number of applications (application-types: [] and states: [SUBMITTED, ACCEPTED, RUNNING]):0" in listYarnApps()
assert return_code == 0, "Process ended in a wrong way. It ended with return code " + str(return_code)
def testStoppingWithExplicitStop(integ_spark_conf):
return_code = launch(integ_spark_conf, "examples/tests/H2OContextWithExplicitStop.py")
time.sleep(10)
assert "Total number of applications (application-types: [] and states: [SUBMITTED, ACCEPTED, RUNNING]):0" in listYarnApps()
assert return_code == 0, "Process ended in a wrong way. It ended with return code " + str(return_code)
| apache-2.0 | 2,905,388,376,934,338,600 | 48.342857 | 128 | 0.762594 | false |
MattBlack85/dunning-cruncher | utils/func_utils.py | 1 | 4192 | #-*- coding: utf-8 -*-
from django.conf import settings
from django.http import HttpResponse
from django.template.context import Context
from django.template.loader import get_template
from xhtml2pdf import pisa # TODO: Change this when the lib changes.
import StringIO
import os
#===============================================================================
# HELPERS
#===============================================================================
class UnsupportedMediaPathException(Exception):
pass
def fetch_resources(uri, rel):
"""
Callback to allow xhtml2pdf/reportlab to retrieve Images,Stylesheets, etc.
`uri` is the href attribute from the html link element.
`rel` gives a relative path, but it's not used here.
"""
if uri.startswith(settings.MEDIA_URL):
path = os.path.join(settings.MEDIA_ROOT,
uri.replace(settings.MEDIA_URL, ""))
elif uri.startswith(settings.STATIC_URL):
path = os.path.join(settings.STATIC_ROOT,
uri.replace(settings.STATIC_URL, ""))
if not os.path.exists(path):
for d in settings.STATICFILES_DIRS:
path = os.path.join(d, uri.replace(settings.STATIC_URL, ""))
if os.path.exists(path):
break
else:
raise UnsupportedMediaPathException(
'media urls must start with %s or %s' % (
settings.MEDIA_ROOT, settings.STATIC_ROOT))
return path
def generate_pdf_template_object(template_object, file_object, context):
"""
Inner function to pass template objects directly instead of passing a filename
"""
html = template_object.render(Context(context))
pisa.CreatePDF(html.encode('utf-8'), file_object, link_callback=fetch_resources, encodiing='UTF-8')
return file_object
#===============================================================================
# Main
#===============================================================================
def generate_pdf(template_name, file_object=None, context=None): # pragma: no cover
"""
Uses the xhtml2pdf library to render a PDF to the passed file_object, from the
given template name.
This returns the passed-in file object, filled with the actual PDF data.
In case the passed in file object is none, it will return a StringIO instance.
"""
if not file_object:
file_object = StringIO.StringIO()
if not context:
context = {}
tmpl = get_template(template_name)
generate_pdf_template_object(tmpl, file_object, context)
file_object.close()
return file_object
def render_to_pdf_response2(template_name, context=None, pdfname=None):
file_object = open(os.path.dirname(os.path.dirname(__file__))+'/upload/'+pdfname, 'wb')
if not pdfname:
pdfname = '%s.pdf' % os.path.splitext(os.path.basename(template_name))[0]
#file_object['Content-Disposition'] = 'attachment; filename=%s' % pdfname
return generate_pdf(template_name, file_object, context)
def render_to_pdf_response(template_name, context=None, pdfname=None):
file_object = HttpResponse(content_type='application/pdf')
if not pdfname:
pdfname = '%s.pdf' % os.path.splitext(os.path.basename(template_name))[0]
file_object['Content-Disposition'] = 'attachment; filename=%s' % pdfname
return generate_pdf(template_name, file_object, context)
class rem_to_do():
MCODE = {'DE': ['DE2', 'DE3', 'DE4', 'DEP2', 'DEP3', 'DEP4'],
'PL': ['PLW'],
'AT': ['AT'],
'NL': ['NL2', 'NL3', 'NL4'],
'IT': ['ITFP3L', 'ITW3L'],
'FR': ['FRPD', 'FRWD', 'FRD'],
'BE': ['BEF', 'BEW'],
'PT': ['PTD'],
'DK': ['DK2', 'DK3'],
'SE': ['SE2', 'SE3'],
'FI': ['FI2', 'FI3'],
'NO': ['NO2', 'NO3'],
'CH': ['CH1T', 'CH2', 'CH3', 'CH4', 'CHW1T', 'CHW2', 'CHW3', 'CHW4', 'CHC1T', 'CHC2', 'CHC3', 'CHC4']
}
MAILSUBJECT = {'EN': "reply to reminder",
'DE': "Antwort auf Mahnung",
'PL': "odpowiedz na wezwanie do zaplaty",
}
| bsd-3-clause | -8,430,157,454,704,671,000 | 38.54717 | 114 | 0.558922 | false |
dknez/libmesh | doc/statistics/libmesh_pagehits.py | 1 | 7992 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
# Import stuff for working with dates
from datetime import datetime
from matplotlib.dates import date2num
# Hits/month, pages, and gigabytes served.
# To get the Google analytics data:
# .) go to analytics.google.com
# .) click on libmesh
# .) click View Report
# .) Adjust date range to previous month
# .) Record the number of "Pageviews" in the "Hits" column below
# Hits, pages, GB served
data = [
# 'Jan 2003', 616, 616, 0
# 'Feb 2003', 2078, 2078, 0,
# 'Mar 2003', 3157, 3157, 0,
# 'Apr 2003', 7800, 7800, 0,
# 'May 2003', 4627, 4627, 0,
# 'Jun 2003', 6156, 6156, 0,
# 'Jul 2003', 6389, 6389, 0,
# 'Aug 2003', 10136, 10136, 0,
# 'Sep 2003', 8871, 8871, 0,
# 'Oct 2003', 9703, 9703, 0,
# 'Nov 2003', 9802, 9802, 0,
# 'Dec 2003', 9123, 9123, 0,
# 'Jan 2004', 13599, 13599, 0,
# 'Feb 2004', 11018, 11018, 0,
# 'Mar 2004', 11713, 11713, 0,
# 'Apr 2004', 14995, 14995, 0,
# 'May 2004', 11285, 11285, 0,
# 'Jun 2004', 12974, 12974, 0,
# 'Jul 2004', 12939, 12939, 0,
# 'Aug 2004', 9708, 9708, 0,
# 'Sep 2004', 7994, 7994, 0,
# 'Oct 2004', 6920, 6920, 0,
# 'Nov 2004', 10261, 10261, 0,
# 'Dec 2004', 7483, 7483, 0,
# 'Jan 2005', 3184, 3184, 0,
# 'Feb 2005', 37733, 14077, .4373,
# 'Mar 2005', 43927, 16408, .5637,
# 'Apr 2005', 29792, 8518, .2890,
# 'May 2005', 51288, 17629, .5689,
# 'Jun 2005', 40617, 16599, .5379,
# 'Jul 2005', 29944, 10006, .3363,
# 'Aug 2005', 39592, 14556, .4577,
# 'Sep 2005', 57638, 14666, .4881,
# 'Oct 2005', 48336, 17976, .5749,
# 'Nov 2005', 49563, 15308, .5810,
# 'Dec 2005', 90863, 40736, .9415,
# 'Jan 2006', 46723, 13487, .5662,
# 'Feb 2006', 62285, 26567, .8229,
# 'Mar 2006', 47446, 14711, .6534,
# 'Apr 2006', 90314, 29635, .9762,
# 'May 2006', 68209, 20998, .7949,
# 'Jun 2006', 50495, 17128, .6881,
# 'Jul 2006', 42387, 10958, .6016,
# 'Aug 2006', 55658, 11793, .6174,
# 'Sep 2006', 54919, 20591, .9056,
# 'Oct 2006', 52916, 17944, .9015,
# 'Nov 2006', 55382, 19833, .9439,
# 'Dec 2006', 54265, 22688, .9162,
# 'Jan 2007', 53813, 19881, 1.0 ,
# 'Feb 2007', 52434, 17920, .9472,
# 'Mar 2007', 61530, 21172, 1.2,
# 'Apr 2007', 125578, 77539, 1.3,
# 'May 2007', 182764, 129596, 1.6,
# 'Jun 2007', 115730, 38571, 1.7,
# 'Jul 2007', 121054, 42757, 1.8,
# 'Aug 2007', 81192, 28187, 1.3,
# 'Sep 2007', 143553, 39734, 2.3,
# 'Oct 2007', 110449, 42111, 2.4,
# 'Nov 2007', 128307, 57851, 2.3,
# 'Dec 2007', 80584, 42631, 2.0,
# 'Jan 2008', 69623, 34155, 2.0,
# 'Feb 2008', 144881, 111751, 2.5,
# 'Mar 2008', 69801, 29211, 1.9,
# 'Apr 2008', 74023, 31149, 2.0,
# 'May 2008', 63123, 23277, 1.8,
# 'Jun 2008', 66055, 25418, 2.1,
# 'Jul 2008', 60046, 22082, 2.0,
# 'Aug 2008', 60206, 24543, 2.0,
# 'Sep 2008', 53057, 18635, 1.6,
# 'Oct 2008', 64828, 27042, 2.1,
# 'Nov 2008', 72406, 29767, 2.3,
# 'Dec 2008', 76248, 31690, 2.3,
# 'Jan 2009', 73002, 29744, 2.0,
# 'Feb 2009', 70801, 29156, 2.1,
# 'Mar 2009', 78200, 31139, 2.1,
# 'Apr 2009', 70888, 26182, 1.7,
# 'May 2009', 67263, 26210, 1.8,
# 'Jun 2009', 73146, 31328, 2.6,
# 'Jul 2009', 77828, 33711, 2.4,
# 'Aug 2009', 64378, 28542, 1.9,
# 'Sep 2009', 76167, 33484, 2.2,
# 'Oct 2009', 95727, 41062, 2.8,
# 'Nov 2009', 88042, 38869, 2.5,
# 'Dec 2009', 76148, 37609, 2.3,
# 'Jan 2010', 268856, 45983, 3.2,
# 'Feb 2010', 208210, 42680, 3.0,
# 'Mar 2010', 116263, 42660, 2.6,
# 'Apr 2010', 102493, 32942, 2.4,
# 'May 2010', 117023, 37107, 2.5,
# 'Jun 2010', 128589, 38019, 2.5,
# 'Jul 2010', 87183, 34026, 2.2,
# 'Aug 2010', 99161, 33199, 2.5,
# 'Sep 2010', 81657, 32305, 2.5,
# 'Oct 2010', 98236, 42091, 3.4,
# 'Nov 2010', 115603, 48695, 3.4,
# 'Dec 2010', 105030, 45570, 3.4,
# 'Jan 2011', 133476, 43549, 3.1,
# 'Feb 2011', 34483, 15002, 1.1,
# 'Mar 2011', 0, 0, 0.0,
# 'Apr 2011', 0, 0, 0.0,
# 'May 2011', 0, 0, 0.0,
# 'Jun 2011', 0, 0, 0.0,
# 'Jul 2011', 0, 0, 0.0,
'Aug 2011', 10185, 0, 0.0, # New "Pageviews" data from google analytics, does not seem comparable to sf.net pagehits data
'Sep 2011', 10305, 0, 0.0,
'Oct 2011', 14081, 0, 0.0,
'Nov 2011', 13397, 0, 0.0,
'Dec 2011', 13729, 0, 0.0,
'Jan 2012', 11050, 0, 0.0,
'Feb 2012', 12779, 0, 0.0,
'Mar 2012', 12970, 0, 0.0,
'Apr 2012', 13051, 0, 0.0,
'May 2012', 11857, 0, 0.0,
'Jun 2012', 12584, 0, 0.0,
'Jul 2012', 12995, 0, 0.0,
'Aug 2012', 13204, 0, 0.0,
'Sep 2012', 13170, 0, 0.0,
'Oct 2012', 13335, 0, 0.0,
'Nov 2012', 11337, 0, 0.0,
'Dec 2012', 10108, 0, 0.0, # libmesh switched to github on December 10, 2012
'Jan 2013', 13029, 0, 0.0,
'Feb 2013', 10420, 0, 0.0,
'Mar 2013', 13400, 0, 0.0,
'Apr 2013', 14416, 0, 0.0,
'May 2013', 13875, 0, 0.0,
'Jun 2013', 13747, 0, 0.0,
'Jul 2013', 14019, 0, 0.0,
'Aug 2013', 10828, 0, 0.0,
'Sep 2013', 9969, 0, 0.0,
'Oct 2013', 13083, 0, 0.0,
'Nov 2013', 12938, 0, 0.0,
'Dec 2013', 9079, 0, 0.0,
'Jan 2014', 9736, 0, 0.0,
'Feb 2014', 11824, 0, 0.0,
'Mar 2014', 10861, 0, 0.0,
'Apr 2014', 12711, 0, 0.0,
'May 2014', 11177, 0, 0.0,
'Jun 2014', 10738, 0, 0.0,
'Jul 2014', 10349, 0, 0.0,
'Aug 2014', 8877, 0, 0.0,
'Sep 2014', 9226, 0, 0.0,
'Oct 2014', 8052, 0, 0.0, # Google analytics number moved over to libmesh.github.io in Oct 2014
'Nov 2014', 9243, 0, 0.0,
'Dec 2014', 10714, 0, 0.0,
'Jan 2015', 11508, 0, 0.0,
'Feb 2015', 11278, 0, 0.0,
'Mar 2015', 13305, 0, 0.0,
'Apr 2015', 12347, 0, 0.0,
'May 2015', 11368, 0, 0.0,
'Jun 2015', 11203, 0, 0.0,
'Jul 2015', 10419, 0, 0.0,
'Aug 2015', 11282, 0, 0.0,
'Sep 2015', 13535, 0, 0.0,
'Oct 2015', 12912, 0, 0.0,
'Nov 2015', 13894, 0, 0.0,
'Dec 2015', 11694, 0, 0.0,
]
# Extract number of hits/month
n_hits_month = data[1::4]
# Divide by 1000 for plotting...
n_hits_month = np.divide(n_hits_month, 1000.)
# Extract list of date strings
date_strings = data[0::4]
# Convert date strings into numbers
date_nums = []
for d in date_strings:
date_nums.append(date2num(datetime.strptime(d, '%b %Y')))
# Get a reference to the figure
fig = plt.figure()
# 111 is equivalent to Matlab's subplot(1,1,1) command
ax = fig.add_subplot(111)
# Make the bar chart. We have one number/month, there are about 30
# days in each month, this defines the bar width...
ax.bar(date_nums, n_hits_month, width=30, color='b')
# Create title
fig.suptitle('LibMesh Page Hits/Month (in Thousands)')
# Set up x-tick locations -- August of each year
ticks_names = ['Aug 2011', 'Aug 2012', 'Aug 2013', 'Aug 2014']
# Get numerical values for the names
tick_nums = []
for x in ticks_names:
tick_nums.append(date2num(datetime.strptime(x, '%b %Y')))
# Set tick labels and positions
ax.set_xticks(tick_nums)
ax.set_xticklabels(ticks_names)
# Set x limits for the plot
plt.xlim(date_nums[0], date_nums[-1]+30);
# Make x-axis ticks point outward
ax.get_xaxis().set_tick_params(direction='out')
# Save as PDF
plt.savefig('libmesh_pagehits.pdf')
# Local Variables:
# python-indent: 2
# End:
| lgpl-2.1 | -8,665,432,598,488,634,000 | 34.052632 | 131 | 0.519144 | false |
aisk/leancloud-python-sdk | tests/test_file.py | 1 | 4681 | # coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import io
import six
import requests
from nose.tools import with_setup # type: ignore
from nose.tools import assert_raises # type: ignore
from nose.tools import raises # type: ignore
import leancloud
from leancloud import File
from leancloud import ACL
__author__ = 'asaka'
def setup_func():
leancloud.init(
os.environ['APP_ID'],
master_key=os.environ['MASTER_KEY']
)
def test_basic(): # type: () -> None
def fn(s):
f = File('Blah', s, mime_type='text/plain')
assert f.name == 'Blah'
assert f._metadata['size'] == 14
assert f.size == 14
b = b'blah blah blah'
fn(io.BytesIO(b))
fn(memoryview(b))
if six.PY2:
import StringIO
import cStringIO
fn(StringIO.StringIO(b))
fn(cStringIO.StringIO(b))
fn(buffer(b))
def test_create_with_url(): # type: () -> None
f = File.create_with_url('xxx', u'http://i1.wp.com/leancloud.cn/images/static/default-avatar.png', meta_data={})
assert f.url == 'http://i1.wp.com/leancloud.cn/images/static/default-avatar.png'
def test_create_without_data(): # type: () -> None
f = File.create_without_data('a123')
assert f.id == 'a123'
def test_acl(): # type: () -> None
acl_ = ACL()
f = File('Blah', io.BytesIO(b'xxx'))
assert_raises(TypeError, f.set_acl, 'a')
f.set_acl(acl_)
assert f.get_acl() == acl_
@with_setup(setup_func)
def test_save(): # type: () -> None
user = leancloud.User()
user.login('user1_name', 'password')
f = File('Blah.txt', open('tests/sample_text.txt', 'rb'))
f.save()
assert f.owner_id == user.id
assert f.id
assert f.name == 'Blah.txt'
assert f.mime_type == 'text/plain'
assert not f.url.endswith('.')
@with_setup(setup_func)
def test_query(): # type: () -> None
files = leancloud.Query('File').find()
for f in files:
assert isinstance(f, File)
assert f.url
assert f.name
assert f.metadata
assert isinstance(leancloud.File.query.first(), File)
@with_setup(setup_func)
def test_save_external(): # type: () -> None
f = File.create_with_url('lenna.jpg', 'http://i1.wp.com/leancloud.cn/images/static/default-avatar.png')
f.save()
assert f.id
@raises(ValueError)
def test_thumbnail_url_erorr(): # type: () -> None
f = File.create_with_url('xx', '')
f.get_thumbnail_url(100, 100)
@with_setup(setup_func)
@raises(ValueError)
def test_thumbnail_size_erorr(): # type: () -> None
r = requests.get('http://i1.wp.com/leancloud.cn/images/static/default-avatar.png')
b = io.BytesIO(r.content)
f = File('Lenna2.jpg', b)
f.save()
assert f.id
f.get_thumbnail_url(-1, -1)
f.get_thumbnail_url(1, 1, quality=110)
@with_setup(setup_func)
def test_thumbnail(): # type: () -> None
r = requests.get('http://i1.wp.com/leancloud.cn/images/static/default-avatar.png')
b = io.BytesIO(r.content)
f = File('Lenna2.jpg', b)
f.save()
assert f.id
url = f.get_thumbnail_url(100, 100)
assert url.endswith('?imageView/2/w/100/h/100/q/100/format/png')
@with_setup(setup_func)
def test_destroy(): # type: () -> None
r = requests.get('http://i1.wp.com/leancloud.cn/images/static/default-avatar.png')
b = io.BytesIO(r.content)
f = File('Lenna2.jpg', b)
f.save()
assert f.id
f.destroy()
@with_setup(setup_func)
def test_file_callback(): # type: () -> None
d = {}
def noop(token, *args, **kwargs):
d['token'] = token
f = File('xxx', io.BytesIO(b'xxx'))
f._save_to_s3 = noop
f._save_to_qiniu = noop
f._save_to_qcloud = noop
f.save()
f._save_callback(d['token'], False)
# time.sleep(3)
# File should be deleted by API server
# assert_raises(leancloud.LeanCloudError, File.query().get, f.id)
@with_setup(setup_func)
def test_fetch(): # type: () -> None
r = requests.get('http://i1.wp.com/leancloud.cn/images/static/default-avatar.png')
b = io.BytesIO(r.content)
f = File('Lenna2.jpg', b)
f.metadata['foo'] = 'bar'
f.save()
fetched = File.create_without_data(f.id)
fetched.fetch()
assert fetched.id == f.id
assert fetched.metadata == f.metadata
assert fetched.name == f.name
assert fetched.url == f.url
assert fetched.size == f.size
assert fetched.url == f.url
f.destroy()
def test_checksum(): # type: () -> None
f = File('Blah', open('tests/sample_text.txt', 'rb'))
assert f._metadata['_checksum'] == 'd0588d95e45eed70745ffabdf0b18acd'
| lgpl-3.0 | 791,541,468,818,069,400 | 25.005556 | 116 | 0.614826 | false |
cloudera/Impala | tests/query_test/test_insert_permutation.py | 1 | 2412 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Targeted Impala insert tests
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.skip import SkipIfS3
from tests.common.test_dimensions import (
create_exec_option_dimension,
create_uncompressed_text_dimension)
class TestInsertQueriesWithPermutation(ImpalaTestSuite):
"""
Tests for the column permutation feature of INSERT statements
"""
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestInsertQueriesWithPermutation, cls).add_test_dimensions()
# Fix the exec_option vector to have a single value. This is needed should we decide
# to run the insert tests in parallel (otherwise there will be two tests inserting
# into the same table at the same time for the same file format).
# TODO: When we do decide to run these tests in parallel we could create unique temp
# tables for each test case to resolve the concurrency problems.
# TODO: do we need to run with multiple file formats? This seems to be really
# targeting FE behavior.
cls.ImpalaTestMatrix.add_dimension(create_exec_option_dimension(
cluster_sizes=[0], disable_codegen_options=[False], batch_sizes=[0]))
cls.ImpalaTestMatrix.add_dimension(
create_uncompressed_text_dimension(cls.get_workload()))
@SkipIfS3.eventually_consistent
def test_insert_permutation(self, vector):
map(self.cleanup_db, ["insert_permutation_test"])
self.run_test_case('QueryTest/insert_permutation', vector)
def teardown_method(self, method):
map(self.cleanup_db, ["insert_permutation_test"])
| apache-2.0 | -1,662,382,883,920,067,300 | 42.854545 | 88 | 0.75539 | false |
Urinx/Project_Euler_Answers | 050.py | 1 | 1316 | #!/usr/bin/env python
#coding:utf-8
"""
Consecutive prime sum
The prime 41, can be written as the sum of six consecutive primes:
41 = 2 + 3 + 5 + 7 + 11 + 13
This is the longest sum of consecutive primes that adds to a prime below one-hundred.
The longest sum of consecutive primes below one-thousand that adds to a prime, contains 21 terms, and is equal to 953.
Which prime, below one-million, can be written as the sum of the most consecutive primes?
"""
import math
def gen_primes():
D = {}
q = 2
while True:
if q not in D:
yield q
D[q * q] = [q]
else:
for p in D[q]:
D.setdefault(p + q, []).append(p)
del D[q]
q += 1
primes=[]
for p in gen_primes():
if p>10000: break
primes.append(p)
def is_prime(num):
for p in primes:
if p>math.sqrt(num): break
if num%p==0: return False
return True
def answer():
m=[0,0]
for i in xrange(0,29):
for j in xrange(len(primes)-680,i,-1):
s=sum(primes[i:j])
if s<1000000 and is_prime(s) and j-i>m[1]:
m=[s,j-i]
break
print m,len(primes)
import time
tStart=time.time()
answer()
print 'run time=',time.time()-tStart
# [997651, 543]
# run time= 0.111933946609 | gpl-2.0 | -2,962,537,939,399,148,000 | 22.517857 | 118 | 0.572188 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.