repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
alphacsc/alphacsc | alphacsc/other/sdtw/barycenter.py | 1 | 1860 | # Author: Mathieu Blondel
# License: Simplified BSD
import numpy as np
from scipy.optimize import minimize
from sdtw import SoftDTW
from sdtw.distance import SquaredEuclidean
def sdtw_barycenter(X, barycenter_init, gamma=1.0, weights=None,
method="L-BFGS-B", tol=1e-3, max_iter=50):
"""
Compute barycenter (time series averaging) under the soft-DTW geometry.
Parameters
----------
X: list
List of time series, numpy arrays of shape [len(X[i]), d].
barycenter_init: array, shape = [length, d]
Initialization.
gamma: float
Regularization parameter.
Lower is less smoothed (closer to true DTW).
weights: None or array
Weights of each X[i]. Must be the same size as len(X).
method: string
Optimization method, passed to `scipy.optimize.minimize`.
Default: L-BFGS.
tol: float
Tolerance of the method used.
max_iter: int
Maximum number of iterations.
"""
if weights is None:
weights = np.ones(len(X))
weights = np.array(weights)
def _func(Z):
# Compute objective value and grad at Z.
Z = Z.reshape(*barycenter_init.shape)
G = np.zeros_like(Z)
obj = 0
for i in range(len(X)):
D = SquaredEuclidean(Z, X[i])
sdtw = SoftDTW(D, gamma=gamma)
value = sdtw.compute()
E = sdtw.grad()
G_tmp = D.jacobian_product(E)
G += weights[i] * G_tmp
obj += weights[i] * value
return obj, G.ravel()
# The function works with vectors so we need to vectorize barycenter_init.
res = minimize(_func, barycenter_init.ravel(), method=method, jac=True,
tol=tol, options=dict(maxiter=max_iter, disp=False))
return res.x.reshape(*barycenter_init.shape)
| bsd-3-clause | -8,478,393,243,963,510,000 | 25.197183 | 78 | 0.599462 | false |
ruibarreira/linuxtrail | usr/lib/python2.7/dist-packages/reportbug/checkversions.py | 1 | 6898 | #
# checkversions.py - Find if the installed version of a package is the latest
#
# Written by Chris Lawrence <[email protected]>
# (C) 2002-08 Chris Lawrence
# Copyright (C) 2008-2014 Sandro Tosi <[email protected]>
#
# This program is freely distributable per the following license:
#
## Permission to use, copy, modify, and distribute this software and its
## documentation for any purpose and without fee is hereby granted,
## provided that the above copyright notice appears in all copies and that
## both that copyright notice and this permission notice appear in
## supporting documentation.
##
## I DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL I
## BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
## DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
## WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
## ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
## SOFTWARE.
import sys
import os
import re
import urllib2
import sgmllib
import gc
import time
import utils
from urlutils import open_url
from reportbug.exceptions import (
NoNetwork,
)
# needed to parse new.822
from debian.deb822 import Deb822
from debian import debian_support
RMADISON_URL = 'http://qa.debian.org/madison.php?package=%s&text=on'
INCOMING_URL = 'http://incoming.debian.org/'
NEWQUEUE_URL = 'http://ftp-master.debian.org/new.822'
# The format is an unordered list
class BaseParser(sgmllib.SGMLParser):
def __init__(self):
sgmllib.SGMLParser.__init__(self)
self.savedata = None
# --- Formatter interface, taking care of 'savedata' mode;
# shouldn't need to be overridden
def handle_data(self, data):
if self.savedata is not None:
self.savedata = self.savedata + data
# --- Hooks to save data; shouldn't need to be overridden
def save_bgn(self):
self.savedata = ''
def save_end(self, mode=0):
data = self.savedata
self.savedata = None
if not mode and data is not None: data = ' '.join(data.split())
return data
class IncomingParser(sgmllib.SGMLParser):
def __init__(self, package, arch='i386'):
sgmllib.SGMLParser.__init__(self)
self.found = []
self.savedata = None
arch = r'(?:all|'+re.escape(arch)+')'
self.package = re.compile(re.escape(package)+r'_([^_]+)_'+arch+'.deb')
def start_a(self, attrs):
for attrib, value in attrs:
if attrib.lower() != 'href':
continue
mob = self.package.match(value)
if mob:
self.found.append(mob.group(1))
def compare_versions(current, upstream):
"""Return 1 if upstream is newer than current, -1 if current is
newer than upstream, and 0 if the same."""
if not current or not upstream: return 0
return debian_support.version_compare(upstream, current)
def later_version(a, b):
if compare_versions(a, b) > 0:
return b
return a
def get_versions_available(package, timeout, dists=None, http_proxy=None, arch='i386'):
if not dists:
dists = ('oldstable', 'stable', 'testing', 'unstable', 'experimental')
arch = utils.get_arch()
url = RMADISON_URL % package
url += '&s=' + ','.join(dists)
# select only those lines that refers to source pkg
# or to binary packages available on the current arch
url += '&a=source,all,' + arch
try:
page = open_url(url)
except NoNetwork:
return {}
except urllib2.HTTPError, x:
print >> sys.stderr, "Warning:", x
return {}
if not page:
return {}
# read the content of the page, remove spaces, empty lines
content = page.read().replace(' ', '').strip()
page.close()
versions = {}
for line in content.split('\n'):
l = line.split('|')
# skip lines not having the right number of fields
if len(l) != 4:
continue
# map suites name (returned by madison) to dist name
dist = utils.SUITES2DISTS.get(l[2], l[2])
versions[dist] = l[1]
return versions
def get_newqueue_available(package, timeout, dists=None, http_proxy=None, arch='i386'):
if dists is None:
dists = ('unstable (new queue)', )
try:
page = open_url(NEWQUEUE_URL, http_proxy, timeout)
except NoNetwork:
return {}
except urllib2.HTTPError, x:
print >> sys.stderr, "Warning:", x
return {}
if not page:
return {}
versions = {}
# iter over the entries, one paragraph at a time
for para in Deb822.iter_paragraphs(page):
if para['Source'] == package:
k = para['Distribution'] + ' (' + para['Queue'] + ')'
# in case of multiple versions, choose the bigger
versions[k] = max(para['Version'].split())
return versions
def get_incoming_version(package, timeout, http_proxy=None, arch='i386'):
try:
page = open_url(INCOMING_URL, http_proxy, timeout)
except NoNetwork:
return None
except urllib2.HTTPError, x:
print >> sys.stderr, "Warning:", x
return None
if not page:
return None
parser = IncomingParser(package, arch)
for line in page:
parser.feed(line)
parser.close()
try:
page.fp._sock.recv = None
except:
pass
page.close()
if parser.found:
found = parser.found
del parser
return reduce(later_version, found, '0')
del page
del parser
return None
def check_available(package, version, timeout, dists=None,
check_incoming=True, check_newqueue=True,
http_proxy=None, arch='i386'):
avail = {}
if check_incoming:
iv = get_incoming_version(package, timeout, http_proxy, arch)
if iv:
avail['incoming'] = iv
stuff = get_versions_available(package, timeout, dists, http_proxy, arch)
avail.update(stuff)
if check_newqueue:
srcpackage = utils.get_source_name(package)
if srcpackage is None:
srcpackage = package
stuff = get_newqueue_available(srcpackage, timeout, dists, http_proxy, arch)
avail.update(stuff)
#print gc.garbage, stuff
new = {}
newer = 0
for dist in avail:
if dist == 'incoming':
if ':' in version:
ver = version.split(':', 1)[1]
else:
ver = version
comparison = compare_versions(ver, avail[dist])
else:
comparison = compare_versions(version, avail[dist])
if comparison > 0:
new[dist] = avail[dist]
elif comparison < 0:
newer += 1
too_new = (newer and newer == len(avail))
return new, too_new
| gpl-3.0 | -5,959,477,691,698,844,000 | 29.794643 | 87 | 0.620325 | false |
borjam/exabgp | src/exabgp/bgp/message/update/attribute/origin.py | 2 | 1719 | # encoding: utf-8
"""
origin.py
Created by Thomas Mangin on 2009-11-05.
Copyright (c) 2009-2017 Exa Networks. All rights reserved.
License: 3-clause BSD. (See the COPYRIGHT file)
"""
from exabgp.bgp.message.update.attribute.attribute import Attribute
# =================================================================== Origin (1)
@Attribute.register()
class Origin(Attribute):
ID = Attribute.CODE.ORIGIN
FLAG = Attribute.Flag.TRANSITIVE
CACHING = True
IGP = 0x00
EGP = 0x01
INCOMPLETE = 0x02
def __init__(self, origin, packed=None):
self.origin = origin
self._packed = self._attribute(packed if packed else bytes([origin]))
def __eq__(self, other):
return self.ID == other.ID and self.FLAG == other.FLAG and self.origin == other.origin
def __ne__(self, other):
return not self.__eq__(other)
def pack(self, negotiated=None):
return self._packed
def __len__(self):
return len(self._packed)
def __repr__(self):
if self.origin == 0x00:
return 'igp'
if self.origin == 0x01:
return 'egp'
if self.origin == 0x02:
return 'incomplete'
return 'invalid'
@classmethod
def unpack(cls, data, direction, negotiated):
return cls(data[0], data)
@classmethod
def setCache(cls):
# there can only be three, build them now
IGP = Origin(Origin.IGP)
EGP = Origin(Origin.EGP)
INC = Origin(Origin.INCOMPLETE)
cls.cache[Attribute.CODE.ORIGIN][IGP.pack()] = IGP
cls.cache[Attribute.CODE.ORIGIN][EGP.pack()] = EGP
cls.cache[Attribute.CODE.ORIGIN][INC.pack()] = INC
Origin.setCache()
| bsd-3-clause | 5,328,723,355,995,210,000 | 24.656716 | 94 | 0.588133 | false |
acbilson/forbidden-island | tests/playerservicetests.py | 1 | 1508 | import unittest
import sys
sys.path.append('../src')
from island import *
from service_island import *
from service_player import *
from islandbus import *
from constants import *
from message import *
from cards import *
import sys
from iofactory import *
from playerfactory import *
from tiles import *
class TestPlayerService(unittest.TestCase):
ps = None
def setUp(self):
bus = IslandBus()
playerFactory = PlayerFactory()
tiles = Tiles()
self.ps = PlayerService(bus, playerFactory, tiles)
def test_ctor(self):
bus = IslandBus()
playerFactory = PlayerFactory()
tiles = Tiles()
ps = PlayerService(bus, playerFactory, tiles)
def test_on_message_received_receivesCreateMessage_CreatesTwoPlayers(self):
""" When a request for two players to be created is received, should add them to the player list """
msg = PlayerMessage(Request(PlayerOptions.Create, [Constant.PlayerType["Diver"], Constant.PlayerType["Messenger"]]))
self.ps.on_message_received(msg)
self.assertEqual(2, len(self.ps.players))
def test_on_message_received_receivesCreateMessage_updatesTiles(self):
""" When a request for two players to be created is received, should add them to the tiles """
msg = PlayerMessage(Request(PlayerOptions.Create, [Constant.PlayerType["Diver"]]))
self.ps.on_message_received(msg)
diverTile = self.ps.tiles.get_tile(Constant.TileNames["IronGate"])
self.assertEqual(Constant.PlayerType["Diver"], diverTile.player.value)
| gpl-3.0 | 7,529,739,578,610,399,000 | 28.568627 | 120 | 0.730106 | false |
slremy/cloudroboticsreferences | processbibtex.py | 1 | 2266 | from sys import exc_info, argv
import pandas as pd
from pybtex.database.input import bibtex as bibtex_in
import textwrap
bibparser = bibtex_in.Parser()
d=[]
for bibfile in argv[1:]:
#bib=bibparser.parse_file(bibfile)
with open(bibfile) as f:
raw = f.read()
f.close()
bib=bibparser.parse_string(raw)#raw.encode(encoding='UTF-8',errors='strict')
for bib_id in bib.entries:
b = bib.entries[bib_id].fields
#if hasattr(b,"booktitle"): print b["booktitle"], bib_id
if b["abstract"] == "" or 'keywords' not in b.keys(): continue
#print b
d.append([" <br> ".join(textwrap.wrap(b["title"],60)),
b["abstract"],
b["year"],
#b["booktitle"] if "booktitle" in b else b["journal"],
b["keywords"],
bib.entries[bib_id].type])
mydata = pd.DataFrame(d, columns = ["title", "abstract", "year", "keywords","document identifier"])
from plotfcns import *
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.decomposition import PCA,TruncatedSVD
from sklearn.manifold import TSNE
from sklearn.preprocessing import Normalizer, normalize
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans, MiniBatchKMeans
#vectorizer = CountVectorizer(min_df=1)
# or
vectorizer = TfidfVectorizer(max_df=0.5, max_features=100000,
min_df=2, stop_words='english',
use_idf=True)
X = vectorizer.fit_transform(mydata['abstract'])
pca=PCA(n_components=3, random_state=1);
X = pca.fit_transform(X.toarray())
'''
svd = TruncatedSVD(3)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
'''
#for i in unique(mydata['year']): print(i, mydata[mydata['year']==i].shape[0])
badtitle = ["In the News", "Program guide", "Table of", "Title page", "Front co", "Copyright not", "Content list", "Proceedings", "Contents","Cover art"]
plot_n_save(mydata[~mydata['title'].str.contains('|'.join(badtitle),case=False)], X, mydata['document identifier'], "type")
#plot2d_n_save(mydata[~mydata['title'].str.contains('|'.join(badtitle),case=False)], X, mydata['document identifier'], "type")
| apache-2.0 | 2,383,310,921,888,308,000 | 36.766667 | 153 | 0.652692 | false |
pebble/spacel-provision | src/spacel/provision/changesets.py | 1 | 3695 | import logging
logger = logging.getLogger('spacel')
# These could always use tuning:
COSTS = {
'AWS::AutoScaling::AutoScalingGroup': {
'Add': 2,
'Modify': 300,
'Remove': 2
},
'AWS::AutoScaling::LaunchConfiguration': {
'Add': 2,
'Modify': 2,
'Remove': 2
},
'AWS::CloudWatch::Alarm': {
'Add': 15,
'Modify': 15,
'Remove': 15
},
'AWS::DynamoDB::Table': {
'Add': 30,
'Modify': 5,
'Remove': 30
},
'AWS::EC2::EIP': {
'Add': 15,
'Modify': 5,
'Remove': 15
},
'AWS::EC2::NatGateway': {
'Add': 60,
'Remove': 60
},
'AWS::EC2::Route': {
'Add': 5,
'Modify': 5,
'Remove': 5
},
'AWS::EC2::RouteTable': {
'Add': 5,
'Modify': 5,
'Remove': 5
},
'AWS::EC2::SecurityGroup': {
'Add': 140,
'Modify': 2,
'Remove': 5
},
'AWS::EC2::SecurityGroupIngress': {
'Add': 10,
'Modify': 10,
'Remove': 5
},
'AWS::EC2::SpotFleet': {
'Add': 30,
'Modify': 300,
'Remove': 30
},
'AWS::EC2::SubnetRouteTableAssociation': {
'Add': 5,
'Remove': 5
},
'AWS::EC2::Subnet': {
'Add': 5,
'Modify': 5,
'Remove': 5
},
'AWS::ElasticLoadBalancing::LoadBalancer': {
'Add': 15,
'Modify': 5,
'Remove': 5
},
'AWS::ElastiCache::ReplicationGroup': {
'Add': 120,
'Modify': 30,
'Remove': 60
},
'AWS::IAM::InstanceProfile': {
'Add': 120,
'Modify': 60,
'Remove': 120
},
'AWS::IAM::Policy': {
'Add': 120,
'Modify': 60,
'Remove': 120
},
'AWS::IAM::Role': {
'Add': 75,
'Modify': 60,
'Remove': 75
},
'AWS::RDS::DBInstance': {
'Add': 300,
'Modify': 300,
'Remove': 120
},
'AWS::Route53::RecordSetGroup': {
'Add': 5,
'Modify': 5,
'Remove': 5
},
'AWS::SNS::Topic': {
'Add': 15,
'Modify': 15,
'Remove': 15
}
}
class ChangeSetEstimator(object):
"""
Estimate how long it will take to execute a CF change set.
"""
def estimate(self, changes):
# Aggregate changes in a single log message:
changes_debug = 'Changes to be performed:\n'
seconds = 0
for change in changes:
resource_change = change.get('ResourceChange')
if resource_change:
physical = resource_change.get('PhysicalResourceId')
if physical:
physical = ' (%s)' % physical
else:
physical = ''
resource_action = resource_change['Action']
resource_type = resource_change['ResourceType']
# Debug message:
changes_debug += '%6s %25s - %s%s\n' % (
resource_action,
resource_type,
resource_change['LogicalResourceId'],
physical)
seconds += self._estimate(resource_action, resource_type)
changes_debug += 'This should take %s seconds...' % seconds
logger.info(changes_debug)
return seconds
@staticmethod
def _estimate(resource_action, resource_type):
basic_cost = COSTS.get(resource_type, {}).get(resource_action)
if basic_cost:
return basic_cost
logger.warning('No basic cost for %s to %s.', resource_action,
resource_type)
return 0
| mit | 6,241,463,731,857,132,000 | 22.83871 | 73 | 0.458728 | false |
SeVenOPS/CFM-Project | ClientSide/#OldReactor.py | 1 | 1621 | import SocketServer
import subprocess
import sys
from threading import Thread
import time
import sqlite3
dbcon = sqlite3.connect("./Compile/dbfile.sqlite", check_same_thread = False)
dbcon.isolation_level = None
dbcur = dbcon.cursor()
dbcon.row_factory = sqlite3.Row
dbcon.text_factory = str
HOST = '176.53.113.223'
PORT = 7777
class TCPConnectionHandler(SocketServer.BaseRequestHandler):
def handle(self):
a=""
dbcur.execute("SELECT COUNT(*) from users WHERE first > 10")
tpl=dbcur.fetchone()[0]
a+="$7$"+str(tpl)
data = self.request.recv(1024)
sayfa,sayfada=data.split("|")
limit=str((int(sayfa) - 1) * int(sayfada))
dbcur.execute('SELECT name, cheese, first, saves, rounds,(first+cheese+saves)/3 as rank FROM users WHERE first > 10 ORDER BY rank DESC LIMIT ?, ?', [limit,sayfada])
rrf = dbcur.fetchall()
for b in rrf:
i=0
a+="$7$"
for c in b:
if i==0:a+=str(c)
elif i==5:pass
else:a+="|"+str(c)
i+=1
self.request.send(a)
self.request.close()
class Server(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
daemon_threads = True
allow_reuse_address = True
def __init__(self, server_address, RequestHandlerClass):
SocketServer.TCPServer.__init__(\
self,\
server_address,\
RequestHandlerClass)
if __name__ == "__main__":
server = Server((HOST, PORT), TCPConnectionHandler)
try:
server.serve_forever()
except KeyboardInterrupt:
sys.exit(0)
| apache-2.0 | -6,648,381,212,443,176,000 | 28.472727 | 172 | 0.607033 | false |
mogoweb/chromium-crosswalk | content/test/gpu/gpu_tests/webgl_robustness.py | 1 | 2375 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import test
from telemetry.page import page_set
from webgl_conformance import WebglConformanceValidator
from webgl_conformance import conformance_harness_script
from webgl_conformance import conformance_path
robustness_harness_script = conformance_harness_script + r"""
var robustnessTestHarness = {};
robustnessTestHarness._contextLost = false;
robustnessTestHarness.initialize = function() {
var canvas = document.getElementById('example');
canvas.addEventListener('webglcontextlost', function() {
robustnessTestHarness._contextLost = true;
});
}
robustnessTestHarness.runTestLoop = function() {
// Run the test in a loop until the context is lost.
main();
if (!robustnessTestHarness._contextLost)
window.requestAnimationFrame(robustnessTestHarness.runTestLoop);
else
robustnessTestHarness.notifyFinished();
}
robustnessTestHarness.notifyFinished = function() {
// The test may fail in unpredictable ways depending on when the context is
// lost. We ignore such errors and only require that the browser doesn't
// crash.
webglTestHarness._allTestSucceeded = true;
// Notify test completion after a delay to make sure the browser is able to
// recover from the lost context.
setTimeout(webglTestHarness.notifyFinished, 3000);
}
window.confirm = function() {
robustnessTestHarness.initialize();
robustnessTestHarness.runTestLoop();
return false;
}
window.webglRobustnessTestHarness = robustnessTestHarness;
"""
class WebglRobustness(test.Test):
enabled = False
test = WebglConformanceValidator
def CreatePageSet(self, options):
page_set_dict = {
'description': 'Test cases for WebGL robustness',
'user_agent_type': 'desktop',
'serving_dirs': [''],
'pages': [
{
'url': 'file:///extra/lots-of-polys-example.html',
'script_to_evaluate_on_commit': robustness_harness_script,
'navigate_steps': [
{ 'action': 'navigate' },
{ 'action': 'wait', 'javascript': 'webglTestHarness._finished' }
]
}
]
}
return page_set.PageSet.FromDict(page_set_dict, conformance_path)
| bsd-3-clause | 925,873,588,832,584,000 | 34.447761 | 79 | 0.701053 | false |
wkentaro/chainer | tests/chainer_tests/functions_tests/pooling_tests/test_max_pooling_2d.py | 1 | 5832 | import unittest
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr
from chainer.testing import backend
_inject_backend_tests = backend.inject_backend_tests(
None,
# CPU tests
testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0'],
})
)
@_inject_backend_tests
@testing.parameterize(*testing.product({
'cover_all': [True, False],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'contiguous': [None, 'C'],
}))
class TestMaxPooling2D(testing.FunctionTestCase):
def setUp(self):
if self.cover_all:
self.output_shape = (2, 3, 3, 2)
else:
self.output_shape = (2, 3, 2, 2)
if self.dtype == numpy.float16:
self.check_backward_options = {
'atol': 1e-3, 'rtol': 1e-2}
self.check_double_backward_options = {
'atol': 1e-3, 'rtol': 1e-2}
else:
self.check_backward_options = {
'atol': 1e-4, 'rtol': 1e-3}
self.check_double_backward_options = {
'atol': 1e-4, 'rtol': 1e-3}
def generate_inputs(self):
x = numpy.arange(2 * 3 * 4 * 3, dtype=self.dtype).reshape(2, 3, 4, 3)
numpy.random.shuffle(x)
x = 2 * x / x.size - 1
return x,
def forward_expected(self, inputs):
x, = inputs
expect = numpy.empty(self.output_shape, dtype=self.dtype)
for i in six.moves.range(2):
for c in six.moves.range(3):
xx = x[i, c]
if self.cover_all:
expect[i, c] = numpy.array([
[xx[0:2, 0:2].max(), xx[0:2, 1:3].max()],
[xx[1:4, 0:2].max(), xx[1:4, 1:3].max()],
[xx[3:4, 0:2].max(), xx[3:4, 1:3].max()]])
else:
expect[i, c] = numpy.array([
[xx[0:2, 0:2].max(), xx[0:2, 1:3].max()],
[xx[1:4, 0:2].max(), xx[1:4, 1:3].max()]])
return expect,
def forward(self, inputs, device):
x, = inputs
y = functions.max_pooling_2d(x, 3, stride=2, pad=1,
cover_all=self.cover_all)
return y,
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestMaxPooling2DForwardCpuWide(unittest.TestCase):
# see #120
def test_forward_cpu_wide(self):
x_data = numpy.random.rand(2, 3, 15, 15).astype(self.dtype)
x = chainer.Variable(x_data)
functions.max_pooling_2d(x, 6, stride=6, pad=0)
@testing.parameterize(*testing.product({
'use_cudnn': ['always', 'auto', 'never'],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestMaxPooling2DCudnnCall(unittest.TestCase):
def setUp(self):
self.x = cuda.cupy.arange(
2 * 3 * 4 * 3, dtype=self.dtype).reshape(2, 3, 4, 3)
self.gy = cuda.cupy.random.uniform(-1, 1,
(2, 3, 2, 2)).astype(self.dtype)
def forward(self):
x = chainer.Variable(self.x)
return functions.max_pooling_2d(
x, 3, stride=2, pad=1, cover_all=False)
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with testing.patch('cupy.cudnn.pooling_forward') as func:
self.forward()
self.assertEqual(func.called,
chainer.should_use_cudnn('>=auto'))
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
expect = chainer.should_use_cudnn('>=auto')
y = self.forward()
# should be consistent to forward regardless of use_cudnn config
y.grad = self.gy
with testing.patch('cupy.cudnn.pooling_backward') as func:
y.backward()
self.assertEqual(func.called, expect)
class TestMaxPooling2DIndices(unittest.TestCase):
def setUp(self):
self.x = numpy.arange(
2 * 3 * 4 * 4, dtype=numpy.float32).reshape(2, 3, 4, 4)
numpy.random.shuffle(self.x)
def _check(self, x):
out, indices = functions.max_pooling_2d(
x, 2, cover_all=False, return_indices=True)
assert isinstance(out, chainer.Variable)
assert isinstance(out.array, type(x))
assert isinstance(indices, type(x))
assert indices.shape == out.array.shape
# Calculate expected indices.
expect = numpy.zeros(indices.shape, dtype=indices.dtype)
for i in six.moves.range(2):
for c in six.moves.range(3):
xx = x[i, c]
expect[i, c] = numpy.array([
[xx[0:2, 0:2].ravel().argmax(),
xx[0:2, 2:4].ravel().argmax()],
[xx[2:4, 0:2].ravel().argmax(),
xx[2:4, 2:4].ravel().argmax()],
])
if out.xp is cuda.cupy:
expect = cuda.to_gpu(expect)
assert (expect == indices).all()
def test_cpu(self):
self._check(self.x)
@attr.gpu
@attr.cudnn
def test_gpu(self):
x = cuda.to_gpu(self.x)
with chainer.using_config('use_cudnn', 'never'):
self._check(x)
with chainer.using_config('use_cudnn', 'always'):
self._check(x)
testing.run_module(__name__, __file__)
| mit | -6,062,248,388,377,533,000 | 31.581006 | 77 | 0.534294 | false |
viswimmer1/PythonGenerator | data/python_files/28566063/ws.py | 1 | 1270 | import cookielib, socket, urllib, urllib2, urllib, sys
from urllib import urlretrieve
from shutil import copyfile
from .views.SubscriptsViews import getIntereses
from twisted.internet import reactor
from twisted.python import log
import json, os
import pickle
import threading
from autobahn.websocket import WebSocketServerFactory, \
WebSocketServerProtocol, \
listenWS
wss = set()
temas = {}
idSs = {}
refreshws = True
def reload(a = ''):
refreshws = True
def notify(titulo, idTema):
refreshws = True
for wxs in wss:
if refreshws:
temas[wxs] = [int(m['idTema']) for m in getIntereses(idSs[wxs])]
if idTema in temas[wxs]:
wxs.sendMessage(str(titulo))
refreshws = False
class PushServerProtocol(WebSocketServerProtocol):
def onOpen(self):
wss.add(self)
idSs[self] = 1
refreshws = True
def onMessage(self, msg, binary):
idSs[self] = int(msg)
refreshws = True
def onClose(self, wasClean, code, reason):
wss.discard(self)
idSs[self] = 1
refreshws = True
class PushServer ( threading.Thread ):
def run ( self ):
log.startLogging(sys.stdout)
factory = WebSocketServerFactory("ws://localhost:9000", debug = False)
factory.protocol = PushServerProtocol
listenWS(factory)
reactor.run()
| gpl-2.0 | -2,534,047,227,258,700,300 | 20.913793 | 73 | 0.712598 | false |
arvidfm/masters-thesis | src/features.py | 1 | 11702 | # Copyright (C) 2016 Arvid Fahlström Myrman
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import functools
import click
import librosa.feature
import librosa.filters
import librosa.util
import numpy as np
import scipy.signal
import dataset
_shared_arguments = {
'inset': click.Option(('-i', '--inset',), required=True),
'outset': click.Option(('-o', '--outset',), required=True),
'inplace': click.Option(('--inplace',), show_default=True, is_flag=True),
'destructive': click.Option(('--destructive',), show_default=True, is_flag=True),
'chunk_size': click.Option(('--chunk-size',), default=1000000, show_default=True),
'hdf5file': click.Argument(('hdf5file',), type=dataset.HDF5TYPE),
}
def extractor_command(dtype=None):
def decorator(comm):
comm.params.append(_shared_arguments['inset'])
comm.params.append(_shared_arguments['outset'])
comm.params.append(_shared_arguments['hdf5file'])
callback = comm.callback
@functools.wraps(callback)
def wrapper(hdf5file, inset, outset, **kwargs):
try:
inset = hdf5file[inset]
except KeyError:
raise click.BadOptionUsage("Dataset '{}' does not exist.".format(inset))
if dtype is None:
ddtype = inset.data.dtype
else:
ddtype = np.dtype(dtype)
extractor, dims = callback(inset=inset, input_dims=inset.dims, **kwargs)
outset = hdf5file.create_dataset(outset, dims, ddtype, overwrite=True)
transform_dataset(extractor, inset, outset,
callback=lambda sec, level: level == 1 and print(sec.name))
comm.callback = wrapper
return comm
return decorator
def inherit_flags(command, exclude=None):
default_excludes = {param.human_readable_name
for param in _shared_arguments.values()}
exclude = (default_excludes if exclude is None
else {*exclude, *default_excludes})
def decorator(f):
f.params.extend(param for param in command.params
if param.human_readable_name not in exclude)
return f
return decorator
def feature_extractor(parent=None):
def decorator(f):
f._parent = parent
@functools.wraps(f)
def wrapper(input_dims=(), **kwargs):
if f._parent is not None:
parent, dims = f._parent(**kwargs)
extractor, dims = f(dims=dims, input_dims=input_dims, **kwargs)
else:
parent = None
extractor, dims = f(dims=input_dims, **kwargs)
def iterator():
data = yield
while True:
if parent is not None:
data = parent.send(data)
if data is None:
data = yield None
else:
data = yield extractor(data)
it = iterator()
next(it)
return it, dims
return wrapper
return decorator
def iterate_data(section, callback, chunk_size=1000):
data = section.data
for i in range(0, data.shape[0], chunk_size):
callback(data[i:i+chunk_size])
def transform_dataset(extractor, inset, outset, callback=None):
def _build_sections(insec, outsec, level):
for sec in insec:
if callback is not None:
callback(sec, level)
sectiondata = sec.sectiondata
data = extractor.send(sectiondata[:] if sectiondata is not None else None)
metadata = sec.metadata
newsec = outsec.create_section(name=sec.name, data=data, metadata=metadata)
_build_sections(sec, newsec, level + 1)
outset.metadata = inset.metadata
_build_sections(inset, outset, 1)
def ms_to_samples(sample_rate, duration):
assert isinstance(sample_rate, int)
return int(sample_rate // 1000 * duration)
@feature_extractor()
def frame(*, dims, sample_rate=None, window_length=25, window_shift=10,
feature_axis=-1, new_axis=False, collapse_axes=False, **kwargs):
if sample_rate is not None:
window_length = ms_to_samples(sample_rate, window_length)
window_shift = ms_to_samples(sample_rate, window_shift)
if len(dims) == 0:
new_axis = True
new_shape = np.array([window_length, *dims])
# compensate for new axis if positive;
# get actual axis index if negative so that axis + 1 isn't 0
feature_axis = (feature_axis + 1 if feature_axis >= 0
else len(new_shape) + feature_axis)
# get axis indices
axes = np.arange(len(new_shape))
# move new axis to just before the feature axis
transpose = np.hstack((np.roll(axes[:feature_axis], -1), axes[feature_axis:]))
# update shape tuple
new_shape = tuple(new_shape[transpose])
# merge new axis and feature axis through multiplication,
# but return an empty tuple if 1D (will be merged with the first axis)
prod = lambda a: (a[0]*a[1],) if len(a) == 2 else ()
if collapse_axes:
new_shape = (*new_shape[:feature_axis-1],
*prod(new_shape[feature_axis-1:feature_axis+1]),
*new_shape[feature_axis+1:])
def extractor(data):
if data.shape[0] < window_length:
return None
try:
indices = librosa.util.frame(
np.arange(data.shape[0]), window_length, window_shift).T
except librosa.ParameterError:
return None
data = data[indices]
# compensate for unspecified first axis
fa = feature_axis + 1
# add first axis to transpose array
transp = np.hstack((0, transpose + 1))
data = data.transpose(transp)
if collapse_axes:
data = data.reshape((-1, *new_shape))
return data
return extractor, new_shape
@feature_extractor(parent=frame)
def spectrum(*, dims, fft_bins=512, window_function='hamming', **kwargs):
if window_function == 'hamming':
window = scipy.signal.hamming(dims[0], sym=False)
elif window_function == 'hann':
window = scipy.signal.hann(dims[0], sym=False)
else:
window = None
def extractor(data):
return abs(np.fft.fft(data, fft_bins))**2
return extractor, (fft_bins,)
@feature_extractor(parent=spectrum)
def fbank(*, dims, sample_rate, filters=40, low_frequency=0.0,
high_frequency=None, **kwargs):
filterbank = librosa.filters.mel(sample_rate, dims[0], filters,
low_frequency, high_frequency).T
def extractor(data):
return np.log(data[:,:dims[0] // 2 + 1] @ filterbank)
return extractor, (filters,)
@feature_extractor(parent=fbank)
def mfcc(*, mfccs=13, first_order=False, second_order=False, **kwargs):
def deltas(x):
x = np.pad(x, ((2, 2), (0, 0)), 'edge')
return ((x[2:] - x[:-2])[1:-1] + 2*(x[4:] - x[:-4])) / 10
def extractor(data):
coeffs = [librosa.feature.mfcc(n_mfcc=mfccs, S=data.T).T]
if first_order or second_order:
d = deltas(coeffs[0])
if first_order:
coeffs.append(d)
if second_order:
coeffs.append(deltas(d))
return np.hstack(coeffs)
return extractor, (mfccs * (1 + first_order + second_order),)
@feature_extractor()
def normalize(*, dims, mean, std, **kwargs):
def extractor(data):
return (data - mean) / std
return extractor, dims
def calculate_mean_std(inset):
running_mean = 0
running_squares = 0
n = 0
def collect_statistics(data):
nonlocal running_mean, running_squares, n
m = n + data.shape[0]
running_mean = (n/m) * running_mean + (data / m).sum(axis=0)
running_squares = (n/m) * running_squares + (data**2 / m).sum(axis=0)
n = m
iterate_data(inset, collect_statistics)
std = np.sqrt(running_squares - running_mean**2)
print("Mean: {}; Std: {}".format(running_mean, std))
return running_mean, std
def log():
pass
def randomize():
pass
def lpc(samples, n):
alphas = []
for sample in np.atleast_2d(samples):
corr = np.correlate(sample, sample, mode='full')[len(sample) - 1:]
alpha = scipy.linalg.solve_toeplitz(corr[:n], corr[1:n+1])
alphas.append(alpha)
return np.array(alphas)
def envelope(samples, fs, coeffs=18, resolution=512,
max_freq=5000, min_freq=100):
alpha = lpc(samples, coeffs)
steps = np.linspace(min_freq, max_freq, resolution)
exponents = np.outer(1j * 2 * np.pi * steps / fs,
-np.arange(1, coeffs + 1))
spec = 1 / (1 - (alpha * np.exp(exponents)).sum(axis=1))
power = abs(spec) ** 2
return power, steps
def formants(samples, fs, num_formants=3, return_spec=False, **kwargs):
power, steps = envelope(samples, fs, **kwargs)
# find values larger that both neighbours
local_maxima = (power[:-1] > power[1:])[1:] & (power[1:] > power[:-1])[:-1]
indices, = np.where(local_maxima)
formants = steps[indices + 1][:num_formants]
if return_spec:
return power, formants
else:
return formants
@click.group()
def main():
pass
@extractor_command()
@click.option('--window-shift', default=10, show_default=True)
@click.option('--window-length', default=25, show_default=True)
@click.option('--sample-rate', type=int)
@click.option('--feature-axis', default=-1, show_default=True)
@click.option('--collapse-axes', is_flag=True)
@main.command('frame')
def frame_comm(**kwargs):
return frame(**kwargs)
@extractor_command(dtype='f4')
@click.option('--window-function', default='hamming', show_default=True,
type=click.Choice(['none', 'hamming', 'hann']))
@click.option('--fft-bins', default=512, show_default=True)
@inherit_flags(frame_comm, exclude={'feature_axis', 'collapse_axes'})
@main.command('spectrum')
def spectrum_comm(**kwargs):
return spectrum(**kwargs)
@extractor_command(dtype='f4')
@click.option('--filters', default=40, show_default=True)
@click.option('--low-frequency', default=0.0, show_default=True)
@click.option('--high-frequency', default=None, type=float)
@inherit_flags(spectrum_comm, exclude={'sample_rate'})
@click.option('--sample-rate', type=int, required=True)
@main.command('fbank')
def fbank_comm(**kwargs):
return fbank(**kwargs)
@extractor_command(dtype='f4')
@click.option('--second-order', is_flag=True, show_default=True)
@click.option('--first-order', is_flag=True, show_default=True)
@click.option('--mfccs', default=13, show_default=True)
@inherit_flags(fbank_comm)
@main.command('mfcc')
def mfcc_comm(**kwargs):
return mfcc(**kwargs)
@extractor_command(dtype='f4')
@main.command('normalize')
def normalize_comm(inset, **kwargs):
mean, std = calculate_mean_std(inset)
return normalize(mean=mean, std=std, **kwargs)
if __name__ == '__main__':
main()
| gpl-2.0 | 7,318,891,887,492,125,000 | 33.014535 | 89 | 0.617041 | false |
vootelerotov/CTT-reliability-estimates | tests/lambda_2_test.py | 1 | 2349 | '''
Created on Aug 5, 2013
@author: Voss
'''
import sys
sys.path.append('../python2')
import unittest
import logging
import lambda_2
import numpy as np
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
logging.basicConfig(filename="./test_log.log",
level=logging.DEBUG,
format="%(asctime)s -%(levelname) -8s %(message)s")
cls.log = logging.getLogger()
def testLambda2Sijtsma1(self):
"""
Based on first covariance matrix of Sijtsma,2009: 117
Data to be compared with taken from Revelle,Zinbarg, 2008 (S-2a)
"""
self.log.debug("------------------------ l_2 test number 1 based on Sijtsma ---------------------------")
cov_matrix = np.array([[0.25,0.2,0,0,0,0],[0.2,0.25,0,0,0,0],[0,0,0.25,0.2,0,0],[0,0,0.2,0.25,0,0],[0,0,0,0,0.25,0.2],[0,0,0,0,0.2,0.25]])
self.assertAlmostEqual(lambda_2.calculate_lambda_2(cov_matrix), 0.643, 3, "Not equal")
def testLambda2Sijtsma2(self):
"""
Based on second covariance matrix of Sijtsma,2009: 117
Data to be compared with taken from Revelle,Zinbarg, 2008 (S-2b)
"""
self.log.debug("-------------------------- l_2 test number 2 based on Sijtsma ---------------------------")
cov_matrix = np.array([[0.25,0.1,0.1,0,0,0],[0.1,0.25,0.1,0,0,0],[0.1,0.1,0.25,0,0,0],[0,0,0,0.25,0.1,0.1],[0,0,0,0.1,0.25,0.1],[0,0,0,0.1,0.1,0.25]])
self.assertAlmostEqual(lambda_2.calculate_lambda_2(cov_matrix), 0.585, 3, "Not equal")
def testLambda2Sijtsma3(self):
"""
Based on third covariance matrix of Sijtsma,2009: 117
Data to be compared with taken from Revelle,Zinbarg, 2008 (S-2c)
"""
self.log.debug("---------------------- l_2 test number 3 based on Sijtsma ----------------------------")
cov_matrix = np.array([[0.25,0.04,0.04,0.04,0.04,0.04],[0.04,0.25,0.04,0.04,0.04,0.04],[0.04,0.04,0.25,0.04,0.04,0.04],[0.04,0.04,0.04,0.25,0.04,0.04],[0.04,0.04,0.04,0.04,0.25,0.04],[0.04,0.04,0.04,0.04,0.04,0.25]])
self.assertAlmostEqual(lambda_2.calculate_lambda_2(cov_matrix), 0.533, 3, "Not equal")
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | mit | 7,359,208,791,154,483,000 | 44.098039 | 224 | 0.539378 | false |
vkscool/nupic | nupic/swarming/HypersearchV2.py | 1 | 169328 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import sys
import os
import time
import logging
import json
import hashlib
import itertools
import StringIO
import shutil
import tempfile
import copy
import pprint
from operator import itemgetter
from nupic.data import dictutils
from nupic.frameworks.opf import opfhelpers
from nupic.frameworks.opf.opfutils import InferenceType
from nupic.support import clippedObj
from nupic.support.serializationutils import sortedJSONDumpS
from nupic.support.configuration import Configuration
from nupic.swarming.hypersearch.errorcodes import ErrorCodes
from nupic.database.ClientJobsDAO import (
ClientJobsDAO, InvalidConnectionException)
from nupic.swarming.utils import (runModelGivenBaseAndParams,
runDummyModel)
from nupic.swarming.permutationhelpers import *
from nupic.swarming.exp_generator.ExpGenerator import expGenerator
def _flattenKeys(keys):
return '|'.join(keys)
class SwarmTerminator(object):
"""Class that records the performane of swarms in a sprint and makes
decisions about which swarms should stop running. This is a usful optimization
that identifies field combinations that no longer need to be run.
"""
MATURITY_WINDOW = None
MAX_GENERATIONS = None
_DEFAULT_MILESTONES = [1.0 / (x + 1) for x in xrange(12)]
def __init__(self, milestones=None, logLevel=None):
# Set class constants.
self.MATURITY_WINDOW = int(Configuration.get(
"nupic.hypersearch.swarmMaturityWindow"))
self.MAX_GENERATIONS = int(Configuration.get(
"nupic.hypersearch.swarmMaxGenerations"))
if self.MAX_GENERATIONS < 0:
self.MAX_GENERATIONS = None
# Set up instsance variables.
self._isTerminationEnabled = bool(int(Configuration.get(
'nupic.hypersearch.enableSwarmTermination')))
self.swarmBests = dict()
self.swarmScores = dict()
self.terminatedSwarms = set([])
self._logger = logging.getLogger(".".join(
['com.numenta', self.__class__.__module__, self.__class__.__name__]))
if milestones is not None:
self.milestones = milestones
else:
self.milestones = copy.deepcopy(self._DEFAULT_MILESTONES)
def recordDataPoint(self, swarmId, generation, errScore):
"""Record the best score for a swarm's generation index (x)
Returns list of swarmIds to terminate.
"""
terminatedSwarms = []
# Append score to existing swarm.
if swarmId in self.swarmScores:
entry = self.swarmScores[swarmId]
assert(len(entry) == generation)
entry.append(errScore)
entry = self.swarmBests[swarmId]
entry.append(min(errScore, entry[-1]))
assert(len(self.swarmBests[swarmId]) == len(self.swarmScores[swarmId]))
else:
# Create list of scores for a new swarm
assert (generation == 0)
self.swarmScores[swarmId] = [errScore]
self.swarmBests[swarmId] = [errScore]
# If the current swarm hasn't completed at least MIN_GENERATIONS, it should
# not be candidate for maturation or termination. This prevents the initial
# allocation of particles in PSO from killing off a field combination too
# early.
if generation + 1 < self.MATURITY_WINDOW:
return terminatedSwarms
# If the swarm has completed more than MAX_GENERATIONS, it should be marked
# as mature, regardless of how its value is changing.
if self.MAX_GENERATIONS is not None and generation > self.MAX_GENERATIONS:
self._logger.info(
'Swarm %s has matured (more than %d generations). Stopping' %
(swarmId, self.MAX_GENERATIONS))
terminatedSwarms.append(swarmId)
if self._isTerminationEnabled:
terminatedSwarms.extend(self._getTerminatedSwarms(generation))
# Return which swarms to kill when we've reached maturity
# If there is no change in the swarm's best for some time,
# Mark it dead
cumulativeBestScores = self.swarmBests[swarmId]
if cumulativeBestScores[-1] == cumulativeBestScores[-self.MATURITY_WINDOW]:
self._logger.info('Swarm %s has matured (no change in %d generations).'
'Stopping...'% (swarmId, self.MATURITY_WINDOW))
terminatedSwarms.append(swarmId)
self.terminatedSwarms = self.terminatedSwarms.union(terminatedSwarms)
return terminatedSwarms
def numDataPoints(self, swarmId):
if swarmId in self.swarmScores:
return len(self.swarmScores[swarmId])
else:
return 0
def _getTerminatedSwarms(self, generation):
terminatedSwarms = []
generationScores = dict()
for swarm, scores in self.swarmScores.iteritems():
if len(scores) > generation and swarm not in self.terminatedSwarms:
generationScores[swarm] = scores[generation]
if len(generationScores) == 0:
return
bestScore = min(generationScores.values())
tolerance = self.milestones[generation]
for swarm, score in generationScores.iteritems():
if score > (1 + tolerance) * bestScore:
self._logger.info('Swarm %s is doing poorly at generation %d.\n'
'Current Score:%s \n'
'Best Score:%s \n'
'Tolerance:%s. Stopping...',
swarm, generation, score, bestScore, tolerance)
terminatedSwarms.append(swarm)
return terminatedSwarms
class ResultsDB(object):
"""This class holds all the information we have accumulated on completed
models, which particles were used, etc.
When we get updated results sent to us (via recordModelProgress), we
record it here for access later by various functions in this module.
"""
def __init__(self, hsObj):
""" Instantiate our results database
Parameters:
--------------------------------------------------------------------
hsObj: Reference to the HypersearchV2 instance
"""
self._hsObj = hsObj
# This list holds all the results we have so far on every model. In
# addition, we maintain mutliple other data structures which provide
# faster access into portions of this list
self._allResults = []
# Models that completed with errors and all completed.
# These are used to determine when we should abort because of too many
# errors
self._errModels = set()
self._numErrModels = 0
self._completedModels = set()
self._numCompletedModels = 0
# Map of the model ID to index of result in _allResults
self._modelIDToIdx = dict()
# The global best result on the optimize metric so far, and the model ID
self._bestResult = numpy.inf
self._bestModelID = None
# This is a dict of dicts. The top level dict has the swarmId as the key.
# Each entry is a dict of genIdx: (modelId, errScore) entries.
self._swarmBestOverall = dict()
# For each swarm, we keep track of how many particles we have per generation
# The key is the swarmId, the value is a list of the number of particles
# at each generation
self._swarmNumParticlesPerGeneration = dict()
# The following variables are used to support the
# getMaturedSwarmGenerations() call.
#
# The _modifiedSwarmGens set contains the set of (swarmId, genIdx) tuples
# that have had results reported to them since the last time
# getMaturedSwarmGenerations() was called.
#
# The maturedSwarmGens contains (swarmId,genIdx) tuples, one for each
# swarm generation index which we have already detected has matured. This
# insures that if by chance we get a rogue report from a model in a swarm
# generation index which we have already assumed was matured that we won't
# report on it again.
self._modifiedSwarmGens = set()
self._maturedSwarmGens = set()
# For each particle, we keep track of it's best score (across all
# generations) and the position it was at when it got that score. The keys
# in this dict are the particleId, the values are (bestResult, position),
# where position is a dict with varName:position items in it.
self._particleBest = dict()
# For each particle, we keep track of it's latest generation index.
self._particleLatestGenIdx = dict()
# For each swarm, we keep track of which models are in it. The key
# is the swarmId, the value is a list of indexes into self._allResults.
self._swarmIdToIndexes = dict()
# ParamsHash to index mapping
self._paramsHashToIndexes = dict()
def update(self, modelID, modelParams, modelParamsHash, metricResult,
completed, completionReason, matured, numRecords):
""" Insert a new entry or update an existing one. If this is an update
of an existing entry, then modelParams will be None
Parameters:
--------------------------------------------------------------------
modelID: globally unique modelID of this model
modelParams: params dict for this model, or None if this is just an update
of a model that it already previously reported on.
See the comments for the createModels() method for
a description of this dict.
modelParamsHash: hash of the modelParams dict, generated by the worker
that put it into the model database.
metricResult: value on the optimizeMetric for this model.
May be None if we have no results yet.
completed: True if the model has completed evaluation, False if it
is still running (and these are online results)
completionReason: One of the ClientJobsDAO.CMPL_REASON_XXX equates
matured: True if this model has matured
numRecords: Number of records that have been processed so far by this
model.
retval: Canonicalized result on the optimize metric
"""
# The modelParamsHash must always be provided - it can change after a
# model is inserted into the models table if it got detected as an
# orphan
assert (modelParamsHash is not None)
# We consider a model metricResult as "final" if it has completed or
# matured. By default, assume anything that has completed has matured
if completed:
matured = True
# Get the canonicalized optimize metric results. For this metric, lower
# is always better
if metricResult is not None and matured and \
completionReason in [ClientJobsDAO.CMPL_REASON_EOF,
ClientJobsDAO.CMPL_REASON_STOPPED]:
# Canonicalize the error score so that lower is better
if self._hsObj._maximize:
errScore = -1 * metricResult
else:
errScore = metricResult
if errScore < self._bestResult:
self._bestResult = errScore
self._bestModelID = modelID
self._hsObj.logger.info("New best model after %d evaluations: errScore "
"%g on model %s" % (len(self._allResults), self._bestResult,
self._bestModelID))
else:
errScore = numpy.inf
# If this model completed with an unacceptable completion reason, set the
# errScore to infinite and essentially make this model invisible to
# further queries
if completed and completionReason in [ClientJobsDAO.CMPL_REASON_ORPHAN]:
errScore = numpy.inf
hidden = True
else:
hidden = False
# Update our set of erred models and completed models. These are used
# to determine if we should abort the search because of too many errors
if completed:
self._completedModels.add(modelID)
self._numCompletedModels = len(self._completedModels)
if completionReason == ClientJobsDAO.CMPL_REASON_ERROR:
self._errModels.add(modelID)
self._numErrModels = len(self._errModels)
# Are we creating a new entry?
wasHidden = False
if modelID not in self._modelIDToIdx:
assert (modelParams is not None)
entry = dict(modelID=modelID, modelParams=modelParams,
modelParamsHash=modelParamsHash,
errScore=errScore, completed=completed,
matured=matured, numRecords=numRecords, hidden=hidden)
self._allResults.append(entry)
entryIdx = len(self._allResults) - 1
self._modelIDToIdx[modelID] = entryIdx
self._paramsHashToIndexes[modelParamsHash] = entryIdx
swarmId = modelParams['particleState']['swarmId']
if not hidden:
# Update the list of particles in each swarm
if swarmId in self._swarmIdToIndexes:
self._swarmIdToIndexes[swarmId].append(entryIdx)
else:
self._swarmIdToIndexes[swarmId] = [entryIdx]
# Update number of particles at each generation in this swarm
genIdx = modelParams['particleState']['genIdx']
numPsEntry = self._swarmNumParticlesPerGeneration.get(swarmId, [0])
while genIdx >= len(numPsEntry):
numPsEntry.append(0)
numPsEntry[genIdx] += 1
self._swarmNumParticlesPerGeneration[swarmId] = numPsEntry
# Replacing an existing one
else:
entryIdx = self._modelIDToIdx.get(modelID, None)
assert (entryIdx is not None)
entry = self._allResults[entryIdx]
wasHidden = entry['hidden']
# If the paramsHash changed, note that. This can happen for orphaned
# models
if entry['modelParamsHash'] != modelParamsHash:
self._paramsHashToIndexes.pop(entry['modelParamsHash'])
self._paramsHashToIndexes[modelParamsHash] = entryIdx
entry['modelParamsHash'] = modelParamsHash
# Get the model params, swarmId, and genIdx
modelParams = entry['modelParams']
swarmId = modelParams['particleState']['swarmId']
genIdx = modelParams['particleState']['genIdx']
# If this particle just became hidden, remove it from our swarm counts
if hidden and not wasHidden:
assert (entryIdx in self._swarmIdToIndexes[swarmId])
self._swarmIdToIndexes[swarmId].remove(entryIdx)
self._swarmNumParticlesPerGeneration[swarmId][genIdx] -= 1
# Update the entry for the latest info
entry['errScore'] = errScore
entry['completed'] = completed
entry['matured'] = matured
entry['numRecords'] = numRecords
entry['hidden'] = hidden
# Update the particle best errScore
particleId = modelParams['particleState']['id']
genIdx = modelParams['particleState']['genIdx']
if matured and not hidden:
(oldResult, pos) = self._particleBest.get(particleId, (numpy.inf, None))
if errScore < oldResult:
pos = Particle.getPositionFromState(modelParams['particleState'])
self._particleBest[particleId] = (errScore, pos)
# Update the particle latest generation index
prevGenIdx = self._particleLatestGenIdx.get(particleId, -1)
if not hidden and genIdx > prevGenIdx:
self._particleLatestGenIdx[particleId] = genIdx
elif hidden and not wasHidden and genIdx == prevGenIdx:
self._particleLatestGenIdx[particleId] = genIdx-1
# Update the swarm best score
if not hidden:
swarmId = modelParams['particleState']['swarmId']
if not swarmId in self._swarmBestOverall:
self._swarmBestOverall[swarmId] = []
bestScores = self._swarmBestOverall[swarmId]
while genIdx >= len(bestScores):
bestScores.append((None, numpy.inf))
if errScore < bestScores[genIdx][1]:
bestScores[genIdx] = (modelID, errScore)
# Update the self._modifiedSwarmGens flags to support the
# getMaturedSwarmGenerations() call.
if not hidden:
key = (swarmId, genIdx)
if not key in self._maturedSwarmGens:
self._modifiedSwarmGens.add(key)
return errScore
def getNumErrModels(self):
"""Return number of models that completed with errors.
Parameters:
---------------------------------------------------------------------
retval: # if models
"""
return self._numErrModels
def getErrModelIds(self):
"""Return list of models IDs that completed with errors.
Parameters:
---------------------------------------------------------------------
retval: # if models
"""
return list(self._errModels)
def getNumCompletedModels(self):
"""Return total number of models that completed.
Parameters:
---------------------------------------------------------------------
retval: # if models that completed
"""
return self._numCompletedModels
def getModelIDFromParamsHash(self, paramsHash):
""" Return the modelID of the model with the given paramsHash, or
None if not found.
Parameters:
---------------------------------------------------------------------
paramsHash: paramsHash to look for
retval: modelId, or None if not found
"""
entryIdx = self. _paramsHashToIndexes.get(paramsHash, None)
if entryIdx is not None:
return self._allResults[entryIdx]['modelID']
else:
return None
def numModels(self, swarmId=None, includeHidden=False):
"""Return the total # of models we have in our database (if swarmId is
None) or in a specific swarm.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders
in this swarm. For example '__address_encoder.__gym_encoder'
includeHidden: If False, this will only return the number of models
that are not hidden (i.e. orphanned, etc.)
retval: numModels
"""
# Count all models
if includeHidden:
if swarmId is None:
return len(self._allResults)
else:
return len(self._swarmIdToIndexes.get(swarmId, []))
# Only count non-hidden models
else:
if swarmId is None:
entries = self._allResults
else:
entries = [self._allResults[entryIdx]
for entryIdx in self._swarmIdToIndexes.get(swarmId,[])]
return len([entry for entry in entries if not entry['hidden']])
def bestModelIdAndErrScore(self, swarmId=None, genIdx=None):
"""Return the model ID of the model with the best result so far and
it's score on the optimize metric. If swarm is None, then it returns
the global best, otherwise it returns the best for the given swarm
for all generatons up to and including genIdx.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
genIdx: consider the best in all generations up to and including this
generation if not None.
retval: (modelID, result)
"""
if swarmId is None:
return (self._bestModelID, self._bestResult)
else:
if swarmId not in self._swarmBestOverall:
return (None, numpy.inf)
# Get the best score, considering the appropriate generations
genScores = self._swarmBestOverall[swarmId]
bestModelId = None
bestScore = numpy.inf
for (i, (modelId, errScore)) in enumerate(genScores):
if genIdx is not None and i > genIdx:
break
if errScore < bestScore:
bestScore = errScore
bestModelId = modelId
return (bestModelId, bestScore)
def getParticleInfo(self, modelId):
"""Return particle info for a specific modelId.
Parameters:
---------------------------------------------------------------------
modelId: which model Id
retval: (particleState, modelId, errScore, completed, matured)
"""
entry = self._allResults[self._modelIDToIdx[modelId]]
return (entry['modelParams']['particleState'], modelId, entry['errScore'],
entry['completed'], entry['matured'])
def getParticleInfos(self, swarmId=None, genIdx=None, completed=None,
matured=None, lastDescendent=False):
"""Return a list of particleStates for all particles we know about in
the given swarm, their model Ids, and metric results.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
genIdx: If not None, only return particles at this specific generation
index.
completed: If not None, only return particles of the given state (either
completed if 'completed' is True, or running if 'completed'
is false
matured: If not None, only return particles of the given state (either
matured if 'matured' is True, or not matured if 'matured'
is false. Note that any model which has completed is also
considered matured.
lastDescendent: If True, only return particles that are the last descendent,
that is, the highest generation index for a given particle Id
retval: (particleStates, modelIds, errScores, completed, matured)
particleStates: list of particleStates
modelIds: list of modelIds
errScores: list of errScores, numpy.inf is plugged in
if we don't have a result yet
completed: list of completed booleans
matured: list of matured booleans
"""
# The indexes of all the models in this swarm. This list excludes hidden
# (orphaned) models.
if swarmId is not None:
entryIdxs = self._swarmIdToIndexes.get(swarmId, [])
else:
entryIdxs = range(len(self._allResults))
if len(entryIdxs) == 0:
return ([], [], [], [], [])
# Get the particles of interest
particleStates = []
modelIds = []
errScores = []
completedFlags = []
maturedFlags = []
for idx in entryIdxs:
entry = self._allResults[idx]
# If this entry is hidden (i.e. it was an orphaned model), it should
# not be in this list
if swarmId is not None:
assert (not entry['hidden'])
# Get info on this model
modelParams = entry['modelParams']
isCompleted = entry['completed']
isMatured = entry['matured']
particleState = modelParams['particleState']
particleGenIdx = particleState['genIdx']
particleId = particleState['id']
if genIdx is not None and particleGenIdx != genIdx:
continue
if completed is not None and (completed != isCompleted):
continue
if matured is not None and (matured != isMatured):
continue
if lastDescendent \
and (self._particleLatestGenIdx[particleId] != particleGenIdx):
continue
# Incorporate into return values
particleStates.append(particleState)
modelIds.append(entry['modelID'])
errScores.append(entry['errScore'])
completedFlags.append(isCompleted)
maturedFlags.append(isMatured)
return (particleStates, modelIds, errScores, completedFlags, maturedFlags)
def getOrphanParticleInfos(self, swarmId, genIdx):
"""Return a list of particleStates for all particles in the given
swarm generation that have been orphaned.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
genIdx: If not None, only return particles at this specific generation
index.
retval: (particleStates, modelIds, errScores, completed, matured)
particleStates: list of particleStates
modelIds: list of modelIds
errScores: list of errScores, numpy.inf is plugged in
if we don't have a result yet
completed: list of completed booleans
matured: list of matured booleans
"""
entryIdxs = range(len(self._allResults))
if len(entryIdxs) == 0:
return ([], [], [], [], [])
# Get the particles of interest
particleStates = []
modelIds = []
errScores = []
completedFlags = []
maturedFlags = []
for idx in entryIdxs:
# Get info on this model
entry = self._allResults[idx]
if not entry['hidden']:
continue
modelParams = entry['modelParams']
if modelParams['particleState']['swarmId'] != swarmId:
continue
isCompleted = entry['completed']
isMatured = entry['matured']
particleState = modelParams['particleState']
particleGenIdx = particleState['genIdx']
particleId = particleState['id']
if genIdx is not None and particleGenIdx != genIdx:
continue
# Incorporate into return values
particleStates.append(particleState)
modelIds.append(entry['modelID'])
errScores.append(entry['errScore'])
completedFlags.append(isCompleted)
maturedFlags.append(isMatured)
return (particleStates, modelIds, errScores, completedFlags, maturedFlags)
def getMaturedSwarmGenerations(self):
"""Return a list of swarm generations that have completed and the
best (minimal) errScore seen for each of them.
Parameters:
---------------------------------------------------------------------
retval: list of tuples. Each tuple is of the form:
(swarmId, genIdx, bestErrScore)
"""
# Return results go in this list
result = []
# For each of the swarm generations which have had model result updates
# since the last time we were called, see which have completed.
modifiedSwarmGens = sorted(self._modifiedSwarmGens)
# Walk through them in order from lowest to highest generation index
for key in modifiedSwarmGens:
(swarmId, genIdx) = key
# Skip it if we've already reported on it. This should happen rarely, if
# ever. It means that some worker has started and completed a model in
# this generation after we've determined that the generation has ended.
if key in self._maturedSwarmGens:
self._modifiedSwarmGens.remove(key)
continue
# If the previous generation for this swarm is not complete yet, don't
# bother evaluating this one.
if (genIdx >= 1) and not (swarmId, genIdx-1) in self._maturedSwarmGens:
continue
# We found a swarm generation that had some results reported since last
# time, see if it's complete or not
(_, _, errScores, completedFlags, maturedFlags) = \
self.getParticleInfos(swarmId, genIdx)
maturedFlags = numpy.array(maturedFlags)
numMatured = maturedFlags.sum()
if numMatured >= self._hsObj._minParticlesPerSwarm \
and numMatured == len(maturedFlags):
errScores = numpy.array(errScores)
bestScore = errScores.min()
self._maturedSwarmGens.add(key)
self._modifiedSwarmGens.remove(key)
result.append((swarmId, genIdx, bestScore))
# Return results
return result
def firstNonFullGeneration(self, swarmId, minNumParticles):
""" Return the generation index of the first generation in the given
swarm that does not have numParticles particles in it, either still in the
running state or completed. This does not include orphaned particles.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
minNumParticles: minium number of partices required for a full
generation.
retval: generation index, or None if no particles at all.
"""
if not swarmId in self._swarmNumParticlesPerGeneration:
return None
numPsPerGen = self._swarmNumParticlesPerGeneration[swarmId]
numPsPerGen = numpy.array(numPsPerGen)
firstNonFull = numpy.where(numPsPerGen < minNumParticles)[0]
if len(firstNonFull) == 0:
return len(numPsPerGen)
else:
return firstNonFull[0]
def highestGeneration(self, swarmId):
""" Return the generation index of the highest generation in the given
swarm.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
retval: generation index
"""
numPsPerGen = self._swarmNumParticlesPerGeneration[swarmId]
return len(numPsPerGen)-1
def getParticleBest(self, particleId):
""" Return the best score and position for a given particle. The position
is given as a dict, with varName:varPosition items in it.
Parameters:
---------------------------------------------------------------------
particleId: which particle
retval: (bestResult, bestPosition)
"""
return self._particleBest.get(particleId, (None, None))
def getResultsPerChoice(self, swarmId, maxGenIdx, varName):
""" Return a dict of the errors obtained on models that were run with
each value from a PermuteChoice variable.
For example, if a PermuteChoice variable has the following choices:
['a', 'b', 'c']
The dict will have 3 elements. The keys are the stringified choiceVars,
and each value is tuple containing (choiceVar, errors) where choiceVar is
the original form of the choiceVar (before stringification) and errors is
the list of errors received from models that used the specific choice:
retval:
['a':('a', [0.1, 0.2, 0.3]), 'b':('b', [0.5, 0.1, 0.6]), 'c':('c', [])]
Parameters:
---------------------------------------------------------------------
swarmId: swarm Id of the swarm to retrieve info from
maxGenIdx: max generation index to consider from other models, ignored
if None
varName: which variable to retrieve
retval: list of the errors obtained from each choice.
"""
results = dict()
# Get all the completed particles in this swarm
(allParticles, _, resultErrs, _, _) = self.getParticleInfos(swarmId,
genIdx=None, matured=True)
for particleState, resultErr in itertools.izip(allParticles, resultErrs):
# Consider this generation?
if maxGenIdx is not None:
if particleState['genIdx'] > maxGenIdx:
continue
# Ignore unless this model completed successfully
if resultErr == numpy.inf:
continue
position = Particle.getPositionFromState(particleState)
varPosition = position[varName]
varPositionStr = str(varPosition)
if varPositionStr in results:
results[varPositionStr][1].append(resultErr)
else:
results[varPositionStr] = (varPosition, [resultErr])
return results
class Particle(object):
"""Construct a particle. Each particle evaluates one or more models
serially. Each model represents a position that the particle is evaluated
at.
Each position is a set of values chosen for each of the permutation variables.
The particle's best position is the value of the permutation variables when it
did best on the optimization metric.
Some permutation variables are treated like traditional particle swarm
variables - that is they have a position and velocity. Others are simply
choice variables, for example a list of strings. We follow a different
methodology for choosing each permutation variable value depending on its
type.
A particle belongs to 1 and only 1 swarm. A swarm is a collection of particles
that all share the same global best position. A swarm is identified by its
specific combination of fields. If we are evaluating multiple different field
combinations, then there will be multiple swarms. A Hypersearch Worker (HSW)
will only instantiate and run one particle at a time. When done running a
particle, another worker can pick it up, pick a new position, for it and run
it based on the particle state information which is stored in each model table
entry.
Each particle has a generationIdx. It starts out at generation #0. Every time
a model evaluation completes and the particle is moved to a different position
(to evaluate a different model), the generation index is incremented.
Every particle that is created has a unique particleId. The particleId
is a string formed as '<workerConnectionId>.<particleIdx>', where particleIdx
starts at 0 for each worker and increments by 1 every time a new particle
is created by that worker.
"""
_nextParticleID = 0
def __init__(self, hsObj, resultsDB, flattenedPermuteVars,
swarmId=None, newFarFrom=None, evolveFromState=None,
newFromClone=None, newParticleId=False):
""" Create a particle.
There are 3 fundamentally different methods of instantiating a particle:
1.) You can instantiate a new one from scratch, at generation index #0. This
particle gets a new particleId.
required: swarmId
optional: newFarFrom
must be None: evolveFromState, newFromClone
2.) You can instantiate one from savedState, in which case it's generation
index is incremented (from the value stored in the saved state) and
its particleId remains the same.
required: evolveFromState
optional:
must be None: flattenedPermuteVars, swarmId, newFromClone
3.) You can clone another particle, creating a new particle at the same
generationIdx but a different particleId. This new particle will end
up at exactly the same position as the one it was cloned from. If
you want to move it to the next position, or just jiggle it a bit, call
newPosition() or agitate() after instantiation.
required: newFromClone
optional:
must be None: flattenedPermuteVars, swarmId, evolveFromState
Parameters:
--------------------------------------------------------------------
hsObj: The HypersearchV2 instance
resultsDB: the ResultsDB instance that holds all the model results
flattenedPermuteVars: dict() containing the (key, PermuteVariable) pairs
of the flattened permutation variables as read from the permutations
file.
swarmId: String that represents the encoder names of the encoders that are
to be included in this particle's model. Of the form
'encoder1.encoder2'.
Required for creation method #1.
newFarFrom: If not None, this is a list of other particleState dicts in the
swarm that we want to be as far away from as possible. Optional
argument for creation method #1.
evolveFromState: If not None, evolve an existing particle. This is a
dict containing the particle's state. Preserve the particleId, but
increment the generation index. Required for creation method #2.
newFromClone: If not None, clone this other particle's position and generation
index, with small random perturbations. This is a dict containing the
particle's state. Required for creation method #3.
newParticleId: Only applicable when newFromClone is True. Give the clone
a new particle ID.
"""
# Save constructor arguments
self._hsObj = hsObj
self.logger = hsObj.logger
self._resultsDB = resultsDB
# See the random number generator used for all the variables in this
# particle. We will seed it differently based on the construction method,
# below.
self._rng = random.Random()
self._rng.seed(42)
# Setup our variable set by taking what's in flattenedPermuteVars and
# stripping out vars that belong to encoders we are not using.
def _setupVars(flattenedPermuteVars):
allowedEncoderNames = self.swarmId.split('.')
self.permuteVars = copy.deepcopy(flattenedPermuteVars)
# Remove fields we don't want.
varNames = self.permuteVars.keys()
for varName in varNames:
# Remove encoders we're not using
if ':' in varName: # if an encoder
if varName.split(':')[0] not in allowedEncoderNames:
self.permuteVars.pop(varName)
continue
# All PermuteChoice variables need to know all prior results obtained
# with each choice.
if isinstance(self.permuteVars[varName], PermuteChoices):
if self._hsObj._speculativeParticles:
maxGenIdx = None
else:
maxGenIdx = self.genIdx-1
resultsPerChoice = self._resultsDB.getResultsPerChoice(
swarmId=self.swarmId, maxGenIdx=maxGenIdx, varName=varName)
self.permuteVars[varName].setResultsPerChoice(
resultsPerChoice.values())
# Method #1
# Create from scratch, optionally pushing away from others that already
# exist.
if swarmId is not None:
assert (evolveFromState is None)
assert (newFromClone is None)
# Save construction param
self.swarmId = swarmId
# Assign a new unique ID to this particle
self.particleId = "%s.%s" % (str(self._hsObj._workerID),
str(Particle._nextParticleID))
Particle._nextParticleID += 1
# Init the generation index
self.genIdx = 0
# Setup the variables to initial locations.
_setupVars(flattenedPermuteVars)
# Push away from other particles?
if newFarFrom is not None:
for varName in self.permuteVars.iterkeys():
otherPositions = []
for particleState in newFarFrom:
otherPositions.append(particleState['varStates'][varName]['position'])
self.permuteVars[varName].pushAwayFrom(otherPositions, self._rng)
# Give this particle a unique seed.
self._rng.seed(str(otherPositions))
# Method #2
# Instantiate from saved state, preserving particleId but incrementing
# generation index.
elif evolveFromState is not None:
assert (swarmId is None)
assert (newFarFrom is None)
assert (newFromClone is None)
# Setup other variables from saved state
self.particleId = evolveFromState['id']
self.genIdx = evolveFromState['genIdx'] + 1
self.swarmId = evolveFromState['swarmId']
# Setup the variables to initial locations.
_setupVars(flattenedPermuteVars)
# Override the position and velocity of each variable from
# saved state
self.initStateFrom(self.particleId, evolveFromState, newBest=True)
# Move it to the next position. We need the swarm best for this.
self.newPosition()
# Method #3
# Clone another particle, producing a new particle at the same genIdx with
# the same particleID. This is used to re-run an orphaned model.
elif newFromClone is not None:
assert (swarmId is None)
assert (newFarFrom is None)
assert (evolveFromState is None)
# Setup other variables from clone particle
self.particleId = newFromClone['id']
if newParticleId:
self.particleId = "%s.%s" % (str(self._hsObj._workerID),
str(Particle._nextParticleID))
Particle._nextParticleID += 1
self.genIdx = newFromClone['genIdx']
self.swarmId = newFromClone['swarmId']
# Setup the variables to initial locations.
_setupVars(flattenedPermuteVars)
# Override the position and velocity of each variable from
# the clone
self.initStateFrom(self.particleId, newFromClone, newBest=False)
else:
assert False, "invalid creation parameters"
# Log it
self.logger.debug("Created particle: %s" % (str(self)))
def __repr__(self):
return "Particle(swarmId=%s) [particleId=%s, genIdx=%d, " \
"permuteVars=\n%s]" % (self.swarmId, self.particleId,
self.genIdx, pprint.pformat(self.permuteVars, indent=4))
def getState(self):
"""Get the particle state as a dict. This is enough information to
instantiate this particle on another worker."""
varStates = dict()
for varName, var in self.permuteVars.iteritems():
varStates[varName] = var.getState()
return dict(id = self.particleId,
genIdx = self.genIdx,
swarmId = self.swarmId,
varStates = varStates)
def initStateFrom(self, particleId, particleState, newBest):
"""Init all of our variable positions, velocities, and optionally the best
result and best position from the given particle.
If newBest is true, we get the best result and position for this new
generation from the resultsDB, This is used when evoloving a particle
because the bestResult and position as stored in was the best AT THE TIME
THAT PARTICLE STARTED TO RUN and does not include the best since that
particle completed.
"""
# Get the update best position and result?
if newBest:
(bestResult, bestPosition) = self._resultsDB.getParticleBest(particleId)
else:
bestResult = bestPosition = None
# Replace with the position and velocity of each variable from
# saved state
varStates = particleState['varStates']
for varName in varStates.keys():
varState = copy.deepcopy(varStates[varName])
if newBest:
varState['bestResult'] = bestResult
if bestPosition is not None:
varState['bestPosition'] = bestPosition[varName]
self.permuteVars[varName].setState(varState)
def copyEncoderStatesFrom(self, particleState):
"""Copy all encoder variables from particleState into this particle.
Parameters:
--------------------------------------------------------------
particleState: dict produced by a particle's getState() method
"""
# Set this to false if you don't want the variable to move anymore
# after we set the state
allowedToMove = True
for varName in particleState['varStates']:
if ':' in varName: # if an encoder
# If this particle doesn't include this field, don't copy it
if varName not in self.permuteVars:
continue
# Set the best position to the copied position
state = copy.deepcopy(particleState['varStates'][varName])
state['_position'] = state['position']
state['bestPosition'] = state['position']
if not allowedToMove:
state['velocity'] = 0
# Set the state now
self.permuteVars[varName].setState(state)
if allowedToMove:
# Let the particle move in both directions from the best position
# it found previously and set it's initial velocity to a known
# fraction of the total distance.
self.permuteVars[varName].resetVelocity(self._rng)
def copyVarStatesFrom(self, particleState, varNames):
"""Copy specific variables from particleState into this particle.
Parameters:
--------------------------------------------------------------
particleState: dict produced by a particle's getState() method
varNames: which variables to copy
"""
# Set this to false if you don't want the variable to move anymore
# after we set the state
allowedToMove = True
for varName in particleState['varStates']:
if varName in varNames:
# If this particle doesn't include this field, don't copy it
if varName not in self.permuteVars:
continue
# Set the best position to the copied position
state = copy.deepcopy(particleState['varStates'][varName])
state['_position'] = state['position']
state['bestPosition'] = state['position']
if not allowedToMove:
state['velocity'] = 0
# Set the state now
self.permuteVars[varName].setState(state)
if allowedToMove:
# Let the particle move in both directions from the best position
# it found previously and set it's initial velocity to a known
# fraction of the total distance.
self.permuteVars[varName].resetVelocity(self._rng)
def getPosition(self):
"""Return the position of this particle. This returns a dict() of key
value pairs where each key is the name of the flattened permutation
variable and the value is its chosen value.
Parameters:
--------------------------------------------------------------
retval: dict() of flattened permutation choices
"""
result = dict()
for (varName, value) in self.permuteVars.iteritems():
result[varName] = value.getPosition()
return result
@staticmethod
def getPositionFromState(pState):
"""Return the position of a particle given its state dict.
Parameters:
--------------------------------------------------------------
retval: dict() of particle position, keys are the variable names,
values are their positions
"""
result = dict()
for (varName, value) in pState['varStates'].iteritems():
result[varName] = value['position']
return result
def agitate(self):
"""Agitate this particle so that it is likely to go to a new position.
Every time agitate is called, the particle is jiggled an even greater
amount.
Parameters:
--------------------------------------------------------------
retval: None
"""
for (varName, var) in self.permuteVars.iteritems():
var.agitate()
self.newPosition()
def newPosition(self, whichVars=None):
# TODO: incorporate data from choice variables....
# TODO: make sure we're calling this when appropriate.
"""Choose a new position based on results obtained so far from all other
particles.
Parameters:
--------------------------------------------------------------
whichVars: If not None, only move these variables
retval: new position
"""
# Get the global best position for this swarm generation
globalBestPosition = None
# If speculative particles are enabled, use the global best considering
# even particles in the current generation. This gives better results
# but does not provide repeatable results because it depends on
# worker timing
if self._hsObj._speculativeParticles:
genIdx = self.genIdx
else:
genIdx = self.genIdx - 1
if genIdx >= 0:
(bestModelId, _) = self._resultsDB.bestModelIdAndErrScore(self.swarmId, genIdx)
if bestModelId is not None:
(particleState, _, _, _, _) = self._resultsDB.getParticleInfo(bestModelId)
globalBestPosition = Particle.getPositionFromState(particleState)
# Update each variable
for (varName, var) in self.permuteVars.iteritems():
if whichVars is not None and varName not in whichVars:
continue
if globalBestPosition is None:
var.newPosition(None, self._rng)
else:
var.newPosition(globalBestPosition[varName], self._rng)
# get the new position
position = self.getPosition()
# Log the new position
if self.logger.getEffectiveLevel() <= logging.DEBUG:
msg = StringIO.StringIO()
print >> msg, "New particle position: \n%s" % (pprint.pformat(position,
indent=4))
print >> msg, "Particle variables:"
for (varName, var) in self.permuteVars.iteritems():
print >> msg, " %s: %s" % (varName, str(var))
self.logger.debug(msg.getvalue())
msg.close()
return position
class HsState(object):
"""This class encapsulates the Hypersearch state which we share with all
other workers. This state gets serialized into a JSON dict and written to
the engWorkerState field of the job record.
Whenever a worker changes this state, it does an atomic setFieldIfEqual to
insure it has the latest state as updated by any other worker as a base.
Here is an example snapshot of this state information:
swarms = {'a': {'status': 'completed', # 'active','completing','completed',
# or 'killed'
'bestModelId': <modelID>, # Only set for 'completed' swarms
'bestErrScore': <errScore>, # Only set for 'completed' swarms
'sprintIdx': 0,
},
'a.b': {'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': 1,
}
}
sprints = [{'status': 'completed', # 'active','completing','completed'
'bestModelId': <modelID>, # Only set for 'completed' sprints
'bestErrScore': <errScore>, # Only set for 'completed' sprints
},
{'status': 'completing',
'bestModelId': <None>,
'bestErrScore': <None>
}
{'status': 'active',
'bestModelId': None
'bestErrScore': None
}
]
"""
def __init__(self, hsObj):
""" Create our state object.
Parameters:
---------------------------------------------------------------------
hsObj: Reference to the HypersesarchV2 instance
cjDAO: ClientJobsDAO instance
logger: logger to use
jobID: our JobID
"""
# Save constructor parameters
self._hsObj = hsObj
# Convenient access to the logger
self.logger = self._hsObj.logger
# This contains our current state, and local working changes
self._state = None
# This contains the state we last read from the database
self._priorStateJSON = None
# Set when we make a change to our state locally
self._dirty = False
# Read in the initial state
self.readStateFromDB()
def isDirty(self):
"""Return true if our local copy of the state has changed since the
last time we read from the DB.
"""
return self._dirty
def isSearchOver(self):
"""Return true if the search should be considered over."""
return self._state['searchOver']
def readStateFromDB(self):
"""Set our state to that obtained from the engWorkerState field of the
job record.
Parameters:
---------------------------------------------------------------------
stateJSON: JSON encoded state from job record
"""
self._priorStateJSON = self._hsObj._cjDAO.jobGetFields(self._hsObj._jobID,
['engWorkerState'])[0]
# Init if no prior state yet
if self._priorStateJSON is None:
swarms = dict()
# Fast Swarm, first and only sprint has one swarm for each field
# in fixedFields
if self._hsObj._fixedFields is not None:
print self._hsObj._fixedFields
encoderSet = []
for field in self._hsObj._fixedFields:
if field =='_classifierInput':
continue
encoderName = self.getEncoderKeyFromName(field)
assert encoderName in self._hsObj._encoderNames, "The field '%s' " \
" specified in the fixedFields list is not present in this " \
" model." % (field)
encoderSet.append(encoderName)
encoderSet.sort()
swarms['.'.join(encoderSet)] = {
'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': 0,
}
# Temporal prediction search, first sprint has N swarms of 1 field each,
# the predicted field may or may not be that one field.
elif self._hsObj._searchType == HsSearchType.temporal:
for encoderName in self._hsObj._encoderNames:
swarms[encoderName] = {
'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': 0,
}
# Classification prediction search, first sprint has N swarms of 1 field
# each where this field can NOT be the predicted field.
elif self._hsObj._searchType == HsSearchType.classification:
for encoderName in self._hsObj._encoderNames:
if encoderName == self._hsObj._predictedFieldEncoder:
continue
swarms[encoderName] = {
'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': 0,
}
# Legacy temporal. This is either a model that uses reconstruction or
# an older multi-step model that doesn't have a separate
# 'classifierOnly' encoder for the predicted field. Here, the predicted
# field must ALWAYS be present and the first sprint tries the predicted
# field only
elif self._hsObj._searchType == HsSearchType.legacyTemporal:
swarms[self._hsObj._predictedFieldEncoder] = {
'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': 0,
}
else:
raise RuntimeError("Unsupported search type: %s" % \
(self._hsObj._searchType))
# Initialize the state.
self._state = dict(
# The last time the state was updated by a worker.
lastUpdateTime = time.time(),
# Set from within setSwarmState() if we detect that the sprint we just
# completed did worse than a prior sprint. This stores the index of
# the last good sprint.
lastGoodSprint = None,
# Set from within setSwarmState() if lastGoodSprint is True and all
# sprints have completed.
searchOver = False,
# This is a summary of the active swarms - this information can also
# be obtained from the swarms entry that follows, but is summarized here
# for easier reference when viewing the state as presented by
# log messages and prints of the hsState data structure (by
# permutations_runner).
activeSwarms = swarms.keys(),
# All the swarms that have been created so far.
swarms = swarms,
# All the sprints that have completed or are in progress.
sprints = [{'status': 'active',
'bestModelId': None,
'bestErrScore': None}],
# The list of encoders we have "blacklisted" because they
# performed so poorly.
blackListedEncoders = [],
)
# This will do nothing if the value of engWorkerState is not still None.
self._hsObj._cjDAO.jobSetFieldIfEqual(
self._hsObj._jobID, 'engWorkerState', json.dumps(self._state), None)
self._priorStateJSON = self._hsObj._cjDAO.jobGetFields(
self._hsObj._jobID, ['engWorkerState'])[0]
assert (self._priorStateJSON is not None)
# Read state from the database
self._state = json.loads(self._priorStateJSON)
self._dirty = False
def writeStateToDB(self):
"""Update the state in the job record with our local changes (if any).
If we don't have the latest state in our priorStateJSON, then re-load
in the latest state and return False. If we were successful writing out
our changes, return True
Parameters:
---------------------------------------------------------------------
retval: True if we were successful writing out our changes
False if our priorState is not the latest that was in the DB.
In this case, we will re-load our state from the DB
"""
# If no changes, do nothing
if not self._dirty:
return True
# Set the update time
self._state['lastUpdateTime'] = time.time()
newStateJSON = json.dumps(self._state)
success = self._hsObj._cjDAO.jobSetFieldIfEqual(self._hsObj._jobID,
'engWorkerState', str(newStateJSON), str(self._priorStateJSON))
if success:
self.logger.debug("Success changing hsState to: \n%s " % \
(pprint.pformat(self._state, indent=4)))
self._priorStateJSON = newStateJSON
# If no success, read in the current state from the DB
else:
self.logger.debug("Failed to change hsState to: \n%s " % \
(pprint.pformat(self._state, indent=4)))
self._priorStateJSON = self._hsObj._cjDAO.jobGetFields(self._hsObj._jobID,
['engWorkerState'])[0]
self._state = json.loads(self._priorStateJSON)
self.logger.info("New hsState has been set by some other worker to: "
" \n%s" % (pprint.pformat(self._state, indent=4)))
return success
def getEncoderNameFromKey(self, key):
""" Given an encoder dictionary key, get the encoder name.
Encoders are a sub-dict within model params, and in HSv2, their key
is structured like this for example:
'modelParams|sensorParams|encoders|home_winloss'
The encoderName is the last word in the | separated key name
"""
return key.split('|')[-1]
def getEncoderKeyFromName(self, name):
""" Given an encoder name, get the key.
Encoders are a sub-dict within model params, and in HSv2, their key
is structured like this for example:
'modelParams|sensorParams|encoders|home_winloss'
The encoderName is the last word in the | separated key name
"""
return 'modelParams|sensorParams|encoders|%s' % (name)
def getFieldContributions(self):
"""Return the field contributions statistics.
Parameters:
---------------------------------------------------------------------
retval: Dictionary where the keys are the field names and the values
are how much each field contributed to the best score.
"""
#in the fast swarm, there is only 1 sprint and field contributions are
#not defined
if self._hsObj._fixedFields is not None:
return dict(), dict()
# Get the predicted field encoder name
predictedEncoderName = self._hsObj._predictedFieldEncoder
# -----------------------------------------------------------------------
# Collect all the single field scores
fieldScores = []
for swarmId, info in self._state['swarms'].iteritems():
encodersUsed = swarmId.split('.')
if len(encodersUsed) != 1:
continue
field = self.getEncoderNameFromKey(encodersUsed[0])
bestScore = info['bestErrScore']
# If the bestScore is None, this swarm hasn't completed yet (this could
# happen if we're exiting because of maxModels), so look up the best
# score so far
if bestScore is None:
(_modelId, bestScore) = \
self._hsObj._resultsDB.bestModelIdAndErrScore(swarmId)
fieldScores.append((bestScore, field))
# -----------------------------------------------------------------------
# If we only have 1 field that was tried in the first sprint, then use that
# as the base and get the contributions from the fields in the next sprint.
if self._hsObj._searchType == HsSearchType.legacyTemporal:
assert(len(fieldScores)==1)
(baseErrScore, baseField) = fieldScores[0]
for swarmId, info in self._state['swarms'].iteritems():
encodersUsed = swarmId.split('.')
if len(encodersUsed) != 2:
continue
fields = [self.getEncoderNameFromKey(name) for name in encodersUsed]
fields.remove(baseField)
fieldScores.append((info['bestErrScore'], fields[0]))
# The first sprint tried a bunch of fields, pick the worst performing one
# (within the top self._hsObj._maxBranching ones) as the base
else:
fieldScores.sort(reverse=True)
# If maxBranching was specified, pick the worst performing field within
# the top maxBranching+1 fields as our base, which will give that field
# a contribution of 0.
if self._hsObj._maxBranching > 0 \
and len(fieldScores) > self._hsObj._maxBranching:
baseErrScore = fieldScores[-self._hsObj._maxBranching-1][0]
else:
baseErrScore = fieldScores[0][0]
# -----------------------------------------------------------------------
# Prepare and return the fieldContributions dict
pctFieldContributionsDict = dict()
absFieldContributionsDict = dict()
# If we have no base score, can't compute field contributions. This can
# happen when we exit early due to maxModels or being cancelled
if baseErrScore is not None:
# If the base error score is 0, we can't compute a percent difference
# off of it, so move it to a very small float
if abs(baseErrScore) < 0.00001:
baseErrScore = 0.00001
for (errScore, field) in fieldScores:
if errScore is not None:
pctBetter = (baseErrScore - errScore) * 100.0 / baseErrScore
else:
pctBetter = 0.0
errScore = baseErrScore # for absFieldContribution
pctFieldContributionsDict[field] = pctBetter
absFieldContributionsDict[field] = baseErrScore - errScore
self.logger.debug("FieldContributions: %s" % (pctFieldContributionsDict))
return pctFieldContributionsDict, absFieldContributionsDict
def getAllSwarms(self, sprintIdx):
"""Return the list of all swarms in the given sprint.
Parameters:
---------------------------------------------------------------------
retval: list of active swarm Ids in the given sprint
"""
swarmIds = []
for swarmId, info in self._state['swarms'].iteritems():
if info['sprintIdx'] == sprintIdx:
swarmIds.append(swarmId)
return swarmIds
def getActiveSwarms(self, sprintIdx=None):
"""Return the list of active swarms in the given sprint. These are swarms
which still need new particles created in them.
Parameters:
---------------------------------------------------------------------
sprintIdx: which sprint to query. If None, get active swarms from all
sprints
retval: list of active swarm Ids in the given sprint
"""
swarmIds = []
for swarmId, info in self._state['swarms'].iteritems():
if sprintIdx is not None and info['sprintIdx'] != sprintIdx:
continue
if info['status'] == 'active':
swarmIds.append(swarmId)
return swarmIds
def getNonKilledSwarms(self, sprintIdx):
"""Return the list of swarms in the given sprint that were not killed.
This is called when we are trying to figure out which encoders to carry
forward to the next sprint. We don't want to carry forward encoder
combintations which were obviously bad (in killed swarms).
Parameters:
---------------------------------------------------------------------
retval: list of active swarm Ids in the given sprint
"""
swarmIds = []
for swarmId, info in self._state['swarms'].iteritems():
if info['sprintIdx'] == sprintIdx and info['status'] != 'killed':
swarmIds.append(swarmId)
return swarmIds
def getCompletedSwarms(self):
"""Return the list of all completed swarms.
Parameters:
---------------------------------------------------------------------
retval: list of active swarm Ids
"""
swarmIds = []
for swarmId, info in self._state['swarms'].iteritems():
if info['status'] == 'completed':
swarmIds.append(swarmId)
return swarmIds
def getCompletingSwarms(self):
"""Return the list of all completing swarms.
Parameters:
---------------------------------------------------------------------
retval: list of active swarm Ids
"""
swarmIds = []
for swarmId, info in self._state['swarms'].iteritems():
if info['status'] == 'completing':
swarmIds.append(swarmId)
return swarmIds
def bestModelInCompletedSwarm(self, swarmId):
"""Return the best model ID and it's errScore from the given swarm.
If the swarm has not completed yet, the bestModelID will be None.
Parameters:
---------------------------------------------------------------------
retval: (modelId, errScore)
"""
swarmInfo = self._state['swarms'][swarmId]
return (swarmInfo['bestModelId'],
swarmInfo['bestErrScore'])
def bestModelInCompletedSprint(self, sprintIdx):
"""Return the best model ID and it's errScore from the given sprint.
If the sprint has not completed yet, the bestModelID will be None.
Parameters:
---------------------------------------------------------------------
retval: (modelId, errScore)
"""
sprintInfo = self._state['sprints'][sprintIdx]
return (sprintInfo['bestModelId'],
sprintInfo['bestErrScore'])
def bestModelInSprint(self, sprintIdx):
"""Return the best model ID and it's errScore from the given sprint,
which may still be in progress. This returns the best score from all models
in the sprint which have matured so far.
Parameters:
---------------------------------------------------------------------
retval: (modelId, errScore)
"""
# Get all the swarms in this sprint
swarms = self.getAllSwarms(sprintIdx)
# Get the best model and score from each swarm
bestModelId = None
bestErrScore = numpy.inf
for swarmId in swarms:
(modelId, errScore) = self._hsObj._resultsDB.bestModelIdAndErrScore(swarmId)
if errScore < bestErrScore:
bestModelId = modelId
bestErrScore = errScore
return (bestModelId, bestErrScore)
def setSwarmState(self, swarmId, newStatus):
"""Change the given swarm's state to 'newState'. If 'newState' is
'completed', then bestModelId and bestErrScore must be provided.
Parameters:
---------------------------------------------------------------------
swarmId: swarm Id
newStatus: new status, either 'active', 'completing', 'completed', or
'killed'
"""
assert (newStatus in ['active', 'completing', 'completed', 'killed'])
# Set the swarm status
swarmInfo = self._state['swarms'][swarmId]
if swarmInfo['status'] == newStatus:
return
# If some other worker noticed it as completed, setting it to completing
# is obviously old information....
if swarmInfo['status'] == 'completed' and newStatus == 'completing':
return
self._dirty = True
swarmInfo['status'] = newStatus
if newStatus == 'completed':
(modelId, errScore) = self._hsObj._resultsDB.bestModelIdAndErrScore(swarmId)
swarmInfo['bestModelId'] = modelId
swarmInfo['bestErrScore'] = errScore
# If no longer active, remove it from the activeSwarms entry
if newStatus != 'active' and swarmId in self._state['activeSwarms']:
self._state['activeSwarms'].remove(swarmId)
# If new status is 'killed', kill off any running particles in that swarm
if newStatus=='killed':
self._hsObj.killSwarmParticles(swarmId)
# In case speculative particles are enabled, make sure we generate a new
# swarm at this time if all of the swarms in the current sprint have
# completed. This will insure that we don't mark the sprint as completed
# before we've created all the possible swarms.
sprintIdx = swarmInfo['sprintIdx']
self.isSprintActive(sprintIdx)
# Update the sprint status. Check all the swarms that belong to this sprint.
# If they are all completed, the sprint is completed.
sprintInfo = self._state['sprints'][sprintIdx]
statusCounts = dict(active=0, completing=0, completed=0, killed=0)
bestModelIds = []
bestErrScores = []
for info in self._state['swarms'].itervalues():
if info['sprintIdx'] != sprintIdx:
continue
statusCounts[info['status']] += 1
if info['status'] == 'completed':
bestModelIds.append(info['bestModelId'])
bestErrScores.append(info['bestErrScore'])
if statusCounts['active'] > 0:
sprintStatus = 'active'
elif statusCounts['completing'] > 0:
sprintStatus = 'completing'
else:
sprintStatus = 'completed'
sprintInfo['status'] = sprintStatus
# If the sprint is complete, get the best model from all of its swarms and
# store that as the sprint best
if sprintStatus == 'completed':
if len(bestErrScores) > 0:
whichIdx = numpy.array(bestErrScores).argmin()
sprintInfo['bestModelId'] = bestModelIds[whichIdx]
sprintInfo['bestErrScore'] = bestErrScores[whichIdx]
else:
# This sprint was empty, most likely because all particles were
# killed. Give it a huge error score
sprintInfo['bestModelId'] = 0
sprintInfo['bestErrScore'] = numpy.inf
# See if our best err score got NO BETTER as compared to a previous
# sprint. If so, stop exploring subsequent sprints (lastGoodSprint
# is no longer None).
bestPrior = numpy.inf
for idx in range(sprintIdx):
if self._state['sprints'][idx]['status'] == 'completed':
(_, errScore) = self.bestModelInCompletedSprint(idx)
if errScore is None:
errScore = numpy.inf
else:
errScore = numpy.inf
if errScore < bestPrior:
bestPrior = errScore
if sprintInfo['bestErrScore'] >= bestPrior:
self._state['lastGoodSprint'] = sprintIdx-1
# If ALL sprints up to the last good one are done, the search is now over
if self._state['lastGoodSprint'] is not None \
and not self.anyGoodSprintsActive():
self._state['searchOver'] = True
def anyGoodSprintsActive(self):
"""Return True if there are any more good sprints still being explored.
A 'good' sprint is one that is earlier than where we detected an increase
in error from sprint to subsequent sprint.
"""
if self._state['lastGoodSprint'] is not None:
goodSprints = self._state['sprints'][0:self._state['lastGoodSprint']+1]
else:
goodSprints = self._state['sprints']
for sprint in goodSprints:
if sprint['status'] == 'active':
anyActiveSprints = True
break
else:
anyActiveSprints = False
return anyActiveSprints
def isSprintCompleted(self, sprintIdx):
"""Return True if the given sprint has completed."""
numExistingSprints = len(self._state['sprints'])
if sprintIdx >= numExistingSprints:
return False
return (self._state['sprints'][sprintIdx]['status'] == 'completed')
def killUselessSwarms(self):
"""See if we can kill off some speculative swarms. If an earlier sprint
has finally completed, we can now tell which fields should *really* be present
in the sprints we've already started due to speculation, and kill off the
swarms that should not have been included.
"""
# Get number of existing sprints
numExistingSprints = len(self._state['sprints'])
# Should we bother killing useless swarms?
if self._hsObj._searchType == HsSearchType.legacyTemporal:
if numExistingSprints <= 2:
return
else:
if numExistingSprints <= 1:
return
# Form completedSwarms as a list of tuples, each tuple contains:
# (swarmName, swarmState, swarmBestErrScore)
# ex. completedSwarms:
# [('a', {...}, 1.4),
# ('b', {...}, 2.0),
# ('c', {...}, 3.0)]
completedSwarms = self.getCompletedSwarms()
completedSwarms = [(swarm, self._state["swarms"][swarm],
self._state["swarms"][swarm]["bestErrScore"]) \
for swarm in completedSwarms]
# Form the completedMatrix. Each row corresponds to a sprint. Each row
# contains the list of swarm tuples that belong to that sprint, sorted
# by best score. Each swarm tuple contains (swarmName, swarmState,
# swarmBestErrScore).
# ex. completedMatrix:
# [(('a', {...}, 1.4), ('b', {...}, 2.0), ('c', {...}, 3.0)),
# (('a.b', {...}, 3.0), ('b.c', {...}, 4.0))]
completedMatrix = [[] for i in range(numExistingSprints)]
for swarm in completedSwarms:
completedMatrix[swarm[1]["sprintIdx"]].append(swarm)
for sprint in completedMatrix:
sprint.sort(key=itemgetter(2))
# Form activeSwarms as a list of tuples, each tuple contains:
# (swarmName, swarmState, swarmBestErrScore)
# Include all activeSwarms and completingSwarms
# ex. activeSwarms:
# [('d', {...}, 1.4),
# ('e', {...}, 2.0),
# ('f', {...}, 3.0)]
activeSwarms = self.getActiveSwarms()
# Append the completing swarms
activeSwarms.extend(self.getCompletingSwarms())
activeSwarms = [(swarm, self._state["swarms"][swarm],
self._state["swarms"][swarm]["bestErrScore"]) \
for swarm in activeSwarms]
# Form the activeMatrix. Each row corresponds to a sprint. Each row
# contains the list of swarm tuples that belong to that sprint, sorted
# by best score. Each swarm tuple contains (swarmName, swarmState,
# swarmBestErrScore)
# ex. activeMatrix:
# [(('d', {...}, 1.4), ('e', {...}, 2.0), ('f', {...}, 3.0)),
# (('d.e', {...}, 3.0), ('e.f', {...}, 4.0))]
activeMatrix = [[] for i in range(numExistingSprints)]
for swarm in activeSwarms:
activeMatrix[swarm[1]["sprintIdx"]].append(swarm)
for sprint in activeMatrix:
sprint.sort(key=itemgetter(2))
# Figure out which active swarms to kill
toKill = []
for i in range(1, numExistingSprints):
for swarm in activeMatrix[i]:
curSwarmEncoders = swarm[0].split(".")
# If previous sprint is complete, get the best swarm and kill all active
# sprints that are not supersets
if(len(activeMatrix[i-1])==0):
# If we are trying all possible 3 field combinations, don't kill any
# off in sprint 2
if i==2 and (self._hsObj._tryAll3FieldCombinations or \
self._hsObj._tryAll3FieldCombinationsWTimestamps):
pass
else:
bestInPrevious = completedMatrix[i-1][0]
bestEncoders = bestInPrevious[0].split('.')
for encoder in bestEncoders:
if not encoder in curSwarmEncoders:
toKill.append(swarm)
# if there are more than two completed encoders sets that are complete and
# are worse than at least one active swarm in the previous sprint. Remove
# any combinations that have any pair of them since they cannot have the best encoder.
#elif(len(completedMatrix[i-1])>1):
# for completedSwarm in completedMatrix[i-1]:
# activeMatrix[i-1][0][2]<completed
# Mark the bad swarms as killed
if len(toKill) > 0:
print "ParseMe: Killing encoders:" + str(toKill)
for swarm in toKill:
self.setSwarmState(swarm[0], "killed")
return
def isSprintActive(self, sprintIdx):
"""If the given sprint exists and is active, return active=True.
If the sprint does not exist yet, this call will create it (and return
active=True). If it already exists, but is completing or complete, return
active=False.
If sprintIdx is past the end of the possible sprints, return
active=False, noMoreSprints=True
IMPORTANT: When speculative particles are enabled, this call has some
special processing to handle speculative sprints:
* When creating a new speculative sprint (creating sprint N before
sprint N-1 has completed), it initially only puts in only ONE swarm into
the sprint.
* Every time it is asked if sprint N is active, it also checks to see if
it is time to add another swarm to the sprint, and adds a new swarm if
appropriate before returning active=True
* We decide it is time to add a new swarm to a speculative sprint when ALL
of the currently active swarms in the sprint have all the workers they
need (number of running (not mature) particles is _minParticlesPerSwarm).
This means that we have capacity to run additional particles in a new
swarm.
It is expected that the sprints will be checked IN ORDER from 0 on up. (It
is an error not to) The caller should always try to allocate from the first
active sprint it finds. If it can't, then it can call this again to
find/create the next active sprint.
Parameters:
---------------------------------------------------------------------
retval: (active, noMoreSprints)
active: True if the given sprint is active
noMoreSprints: True if there are no more sprints possible
"""
while True:
numExistingSprints = len(self._state['sprints'])
# If this sprint already exists, see if it is active
if sprintIdx <= numExistingSprints-1:
# With speculation off, it's simple, just return whether or not the
# asked for sprint has active status
if not self._hsObj._speculativeParticles:
active = (self._state['sprints'][sprintIdx]['status'] == 'active')
return (active, False)
# With speculation on, if the sprint is still marked active, we also
# need to see if it's time to add a new swarm to it.
else:
active = (self._state['sprints'][sprintIdx]['status'] == 'active')
if not active:
return (active, False)
# See if all of the existing swarms are at capacity (have all the
# workers they need):
activeSwarmIds = self.getActiveSwarms(sprintIdx)
swarmSizes = [self._hsObj._resultsDB.getParticleInfos(swarmId,
matured=False)[0] for swarmId in activeSwarmIds]
notFullSwarms = [len(swarm) for swarm in swarmSizes \
if len(swarm) < self._hsObj._minParticlesPerSwarm]
# If some swarms have room return that the swarm is active.
if len(notFullSwarms) > 0:
return (True, False)
# If the existing swarms are at capacity, we will fall through to the
# logic below which tries to add a new swarm to the sprint.
# Stop creating new sprints?
if self._state['lastGoodSprint'] is not None:
return (False, True)
# if fixedFields is set, we are running a fast swarm and only run sprint0
if self._hsObj._fixedFields is not None:
return (False, True)
# ----------------------------------------------------------------------
# Get the best model (if there is one) from the prior sprint. That gives
# us the base encoder set for the next sprint. For sprint zero make sure
# it does not take the last sprintidx because of wrapping.
if sprintIdx > 0 \
and self._state['sprints'][sprintIdx-1]['status'] == 'completed':
(bestModelId, _) = self.bestModelInCompletedSprint(sprintIdx-1)
(particleState, _, _, _, _) = self._hsObj._resultsDB.getParticleInfo(
bestModelId)
bestSwarmId = particleState['swarmId']
baseEncoderSets = [bestSwarmId.split('.')]
# If there is no best model yet, then use all encoder sets from the prior
# sprint that were not killed
else:
bestSwarmId = None
particleState = None
# Build up more combinations, using ALL of the sets in the current
# sprint.
baseEncoderSets = []
for swarmId in self.getNonKilledSwarms(sprintIdx-1):
baseEncoderSets.append(swarmId.split('.'))
# ----------------------------------------------------------------------
# Which encoders should we add to the current base set?
encoderAddSet = []
# If we have constraints on how many fields we carry forward into
# subsequent sprints (either nupic.hypersearch.max.field.branching or
# nupic.hypersearch.min.field.contribution was set), then be more
# picky about which fields we add in.
limitFields = False
if self._hsObj._maxBranching > 0 \
or self._hsObj._minFieldContribution >= 0:
if self._hsObj._searchType == HsSearchType.temporal or \
self._hsObj._searchType == HsSearchType.classification:
if sprintIdx >= 1:
limitFields = True
baseSprintIdx = 0
elif self._hsObj._searchType == HsSearchType.legacyTemporal:
if sprintIdx >= 2:
limitFields = True
baseSprintIdx = 1
else:
raise RuntimeError("Unimplemented search type %s" % \
(self._hsObj._searchType))
# Only add top _maxBranching encoders to the swarms?
if limitFields:
# Get field contributions to filter added fields
pctFieldContributions, absFieldContributions = \
self.getFieldContributions()
toRemove = []
self.logger.debug("FieldContributions min: %s" % \
(self._hsObj._minFieldContribution))
for fieldname in pctFieldContributions:
if pctFieldContributions[fieldname] < self._hsObj._minFieldContribution:
self.logger.debug("FieldContributions removing: %s" % (fieldname))
toRemove.append(self.getEncoderKeyFromName(fieldname))
else:
self.logger.debug("FieldContributions keeping: %s" % (fieldname))
# Grab the top maxBranching base sprint swarms.
swarms = self._state["swarms"]
sprintSwarms = [(swarm, swarms[swarm]["bestErrScore"]) \
for swarm in swarms if swarms[swarm]["sprintIdx"] == baseSprintIdx]
sprintSwarms = sorted(sprintSwarms, key=itemgetter(1))
if self._hsObj._maxBranching > 0:
sprintSwarms = sprintSwarms[0:self._hsObj._maxBranching]
# Create encoder set to generate further swarms.
for swarm in sprintSwarms:
swarmEncoders = swarm[0].split(".")
for encoder in swarmEncoders:
if not encoder in encoderAddSet:
encoderAddSet.append(encoder)
encoderAddSet = [encoder for encoder in encoderAddSet \
if not str(encoder) in toRemove]
# If no limit on the branching or min contribution, simply use all of the
# encoders.
else:
encoderAddSet = self._hsObj._encoderNames
# -----------------------------------------------------------------------
# Build up the new encoder combinations for the next sprint.
newSwarmIds = set()
# See if the caller wants to try more extensive field combinations with
# 3 fields.
if (self._hsObj._searchType == HsSearchType.temporal \
or self._hsObj._searchType == HsSearchType.legacyTemporal) \
and sprintIdx == 2 \
and (self._hsObj._tryAll3FieldCombinations or \
self._hsObj._tryAll3FieldCombinationsWTimestamps):
if self._hsObj._tryAll3FieldCombinations:
newEncoders = set(self._hsObj._encoderNames)
if self._hsObj._predictedFieldEncoder in newEncoders:
newEncoders.remove(self._hsObj._predictedFieldEncoder)
else:
# Just make sure the timestamp encoders are part of the mix
newEncoders = set(encoderAddSet)
if self._hsObj._predictedFieldEncoder in newEncoders:
newEncoders.remove(self._hsObj._predictedFieldEncoder)
for encoder in self._hsObj._encoderNames:
if encoder.endswith('_timeOfDay') or encoder.endswith('_weekend') \
or encoder.endswith('_dayOfWeek'):
newEncoders.add(encoder)
allCombos = list(itertools.combinations(newEncoders, 2))
for combo in allCombos:
newSet = list(combo)
newSet.append(self._hsObj._predictedFieldEncoder)
newSet.sort()
newSwarmId = '.'.join(newSet)
if newSwarmId not in self._state['swarms']:
newSwarmIds.add(newSwarmId)
# If a speculative sprint, only add the first encoder, if not add
# all of them.
if (len(self.getActiveSwarms(sprintIdx-1)) > 0):
break
# Else, we only build up by adding 1 new encoder to the best combination(s)
# we've seen from the prior sprint
else:
for baseEncoderSet in baseEncoderSets:
for encoder in encoderAddSet:
if encoder not in self._state['blackListedEncoders'] \
and encoder not in baseEncoderSet:
newSet = list(baseEncoderSet)
newSet.append(encoder)
newSet.sort()
newSwarmId = '.'.join(newSet)
if newSwarmId not in self._state['swarms']:
newSwarmIds.add(newSwarmId)
# If a speculative sprint, only add the first encoder, if not add
# all of them.
if (len(self.getActiveSwarms(sprintIdx-1)) > 0):
break
# ----------------------------------------------------------------------
# Sort the new swarm Ids
newSwarmIds = sorted(newSwarmIds)
# If no more swarms can be found for this sprint...
if len(newSwarmIds) == 0:
# if sprint is not an empty sprint return that it is active but do not
# add anything to it.
if len(self.getAllSwarms(sprintIdx)) > 0:
return (True, False)
# If this is an empty sprint and we couldn't find any new swarms to
# add (only bad fields are remaining), the search is over
else:
return (False, True)
# Add this sprint and the swarms that are in it to our state
self._dirty = True
# Add in the new sprint if necessary
if len(self._state["sprints"]) == sprintIdx:
self._state['sprints'].append({'status': 'active',
'bestModelId': None,
'bestErrScore': None})
# Add in the new swarm(s) to the sprint
for swarmId in newSwarmIds:
self._state['swarms'][swarmId] = {'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': sprintIdx}
# Update the list of active swarms
self._state['activeSwarms'] = self.getActiveSwarms()
# Try to set new state
success = self.writeStateToDB()
# Return result if successful
if success:
return (True, False)
# No success, loop back with the updated state and try again
class HsSearchType(object):
"""This class enumerates the types of search we can perform."""
temporal = 'temporal'
legacyTemporal = 'legacyTemporal'
classification = 'classification'
class HypersearchV2(object):
"""The v2 Hypersearch implementation. This is one example of a Hypersearch
implementation that can be used by the HypersearchWorker. Other implementations
just have to implement the following methods:
createModels()
recordModelProgress()
getPermutationVariables()
getComplexVariableLabelLookupDict()
This implementation uses a hybrid of Particle Swarm Optimization (PSO) and
the old "ronamatic" logic from Hypersearch V1. Variables which are lists of
choices (i.e. string values, integer values that represent different
categories) are searched using the ronamatic logic whereas floats and
integers that represent a range of values are searched using PSO.
For prediction experiments, this implementation starts out evaluating only
single encoder models that encode the predicted field. This is the first
"sprint". Once it finds the optimum set of variables for that, it starts to
build up by adding in combinations of 2 fields (the second "sprint"), where
one of them is the predicted field. Once the top 2-field combination(s) are
discovered, it starts to build up on those by adding in a 3rd field, etc.
Each new set of field combinations is called a sprint.
For classification experiments, this implementation starts out evaluating two
encoder models, where one of the encoders is the classified field. This is the
first "sprint". Once it finds the optimum set of variables for that, it starts
to build up by evauating combinations of 3 fields (the second "sprint"), where
two of them are the best 2 fields found in the first sprint (one of those of
course being the classified field). Once the top 3-field combination(s) are
discovered, it starts to build up on those by adding in a 4th field, etc.
In classification models, the classified field, although it has an encoder, is
not sent "into" the network. Rather, the encoded value just goes directly to
the classifier as the classifier input.
At any one time, there are 1 or more swarms being evaluated at the same time -
each swarm representing a certain field combination within the sprint. We try
to load balance the swarms and have the same number of models evaluated for
each swarm at any one time. Each swarm contains N particles, and we also try
to keep N >= some mininum number. Each position of a particle corresponds to a
model.
When a worker is ready to evaluate a new model, it first picks the swarm with
the least number of models so far (least number of evaluated particle
positions). If that swarm does not have the min number of particles in it yet,
or does not yet have a particle created by this worker, the worker will create
a new particle, else it will choose another particle from that swarm that it
had created in the past which has the least number of evaluated positions so
far.
"""
def __init__(self, searchParams, workerID=None, cjDAO=None, jobID=None,
logLevel=None):
"""Instantiate the HyperseachV2 instance.
Parameters:
----------------------------------------------------------------------
searchParams: a dict of the job's search parameters. The format is:
persistentJobGUID: REQUIRED.
Persistent, globally-unique identifier for this job
for use in constructing persistent model checkpoint
keys. MUST be compatible with S3 key-naming rules, but
MUST NOT contain forward slashes. This GUID is
expected to retain its global uniqueness across
clusters and cluster software updates (unlike the
record IDs in the Engine's jobs table, which recycle
upon table schema change and software update). In the
future, this may also be instrumental for checkpoint
garbage collection.
permutationsPyFilename:
OPTIONAL - path to permutations.py file
permutationsPyContents:
OPTIONAL - JSON encoded string with
contents of permutations.py file
descriptionPyContents:
OPTIONAL - JSON encoded string with
contents of base description.py file
description: OPTIONAL - JSON description of the search
createCheckpoints: OPTIONAL - Whether to create checkpoints
useTerminators OPTIONAL - True of False (default config.xml). When set
to False, the model and swarm terminators
are disabled
maxModels: OPTIONAL - max # of models to generate
NOTE: This is a deprecated location for this
setting. Now, it should be specified through
the maxModels variable within the permutations
file, or maxModels in the JSON description
dummyModel: OPTIONAL - Either (True/False) or a dict of parameters
for a dummy model. If this key is absent,
a real model is trained.
See utils.py/OPFDummyModel runner for the
schema of the dummy parameters
speculativeParticles OPTIONAL - True or False (default obtained from
nupic.hypersearch.speculative.particles.default
configuration property). See note below.
NOTE: The caller must provide just ONE of the following to describe the
hypersearch:
1.) permutationsPyFilename
OR 2.) permutationsPyContents & permutationsPyContents
OR 3.) description
The schema for the description element can be found at:
"py/nupic/frameworks/opf/expGenerator/experimentDescriptionSchema.json"
NOTE about speculativeParticles: If true (not 0), hypersearch workers will
go ahead and create and run particles in subsequent sprints and
generations before the current generation or sprint has been completed. If
false, a worker will wait in a sleep loop until the current generation or
sprint has finished before choosing the next particle position or going
into the next sprint. When true, the best model can be found faster, but
results are less repeatable due to the randomness of when each worker
completes each particle. This property can be overridden via the
speculativeParticles element of the Hypersearch job params.
workerID: our unique Hypersearch worker ID
cjDAO: ClientJobsDB Data Access Object
jobID: job ID for this hypersearch job
logLevel: override logging level to this value, if not None
"""
# Instantiate our logger
self.logger = logging.getLogger(".".join( ['com.numenta',
self.__class__.__module__, self.__class__.__name__]))
# Override log level?
if logLevel is not None:
self.logger.setLevel(logLevel)
# This is how to check the logging level
#if self.logger.getEffectiveLevel() <= logging.DEBUG:
# print "at debug level"
# Init random seed
random.seed(42)
# Save the search info
self._searchParams = searchParams
self._workerID = workerID
self._cjDAO = cjDAO
self._jobID = jobID
# Log search params
self.logger.info("searchParams: \n%s" % (pprint.pformat(
clippedObj(searchParams))))
self._createCheckpoints = self._searchParams.get('createCheckpoints',
False)
self._maxModels = self._searchParams.get('maxModels', None)
if self._maxModels == -1:
self._maxModels = None
self._predictionCacheMaxRecords = self._searchParams.get('predictionCacheMaxRecords', None)
# Speculative particles?
self._speculativeParticles = self._searchParams.get('speculativeParticles',
bool(int(Configuration.get(
'nupic.hypersearch.speculative.particles.default'))))
self._speculativeWaitSecondsMax = float(Configuration.get(
'nupic.hypersearch.speculative.particles.sleepSecondsMax'))
# Maximum Field Branching
self._maxBranching= int(Configuration.get(
'nupic.hypersearch.max.field.branching'))
# Minimum Field Contribution
self._minFieldContribution= float(Configuration.get(
'nupic.hypersearch.min.field.contribution'))
# This gets set if we detect that the job got cancelled
self._jobCancelled = False
# Use terminators (typically set by permutations_runner.py)
if 'useTerminators' in self._searchParams:
useTerminators = self._searchParams['useTerminators']
useTerminators = str(int(useTerminators))
Configuration.set('nupic.hypersearch.enableModelTermination', useTerminators)
Configuration.set('nupic.hypersearch.enableModelMaturity', useTerminators)
Configuration.set('nupic.hypersearch.enableSwarmTermination', useTerminators)
# Special test mode?
if 'NTA_TEST_exitAfterNModels' in os.environ:
self._maxModels = int(os.environ['NTA_TEST_exitAfterNModels'])
self._dummyModel = self._searchParams.get('dummyModel', None)
# Holder for temporary directory, if any, that needs to be cleaned up
# in our close() method.
self._tempDir = None
try:
# Get the permutations info. This can be either:
# 1.) JSON encoded search description (this will be used to generate a
# permutations.py and description.py files using ExpGenerator)
# 2.) path to a pre-generated permutations.py file. The description.py is
# assumed to be in the same directory
# 3.) contents of the permutations.py and descrption.py files.
if 'description' in self._searchParams:
if ('permutationsPyFilename' in self._searchParams or
'permutationsPyContents' in self._searchParams or
'descriptionPyContents' in self._searchParams):
raise RuntimeError(
"Either 'description', 'permutationsPyFilename' or"
"'permutationsPyContents' & 'permutationsPyContents' should be "
"specified, but not two or more of these at once.")
# Calculate training period for anomaly models
searchParamObj = self._searchParams
anomalyParams = searchParamObj['description'].get('anomalyParams',
dict())
# This is used in case searchParamObj['description']['anomalyParams']
# is set to None.
if anomalyParams is None:
anomalyParams = dict()
if (('autoDetectWaitRecords' not in anomalyParams) or
(anomalyParams['autoDetectWaitRecords'] is None)):
streamDef = self._getStreamDef(searchParamObj['description'])
from nupic.data.stream_reader import StreamReader
try:
streamReader = StreamReader(streamDef, isBlocking=False,
maxTimeout=0, eofOnTimeout=True)
anomalyParams['autoDetectWaitRecords'] = \
streamReader.getDataRowCount()
except Exception:
anomalyParams['autoDetectWaitRecords'] = None
self._searchParams['description']['anomalyParams'] = anomalyParams
# Call the experiment generator to generate the permutations and base
# description file.
outDir = self._tempDir = tempfile.mkdtemp()
expGenerator([
'--description=%s' % (
json.dumps(self._searchParams['description'])),
'--version=v2',
'--outDir=%s' % (outDir)])
# Get the name of the permutations script.
permutationsScript = os.path.join(outDir, 'permutations.py')
elif 'permutationsPyFilename' in self._searchParams:
if ('description' in self._searchParams or
'permutationsPyContents' in self._searchParams or
'descriptionPyContents' in self._searchParams):
raise RuntimeError(
"Either 'description', 'permutationsPyFilename' or "
"'permutationsPyContents' & 'permutationsPyContents' should be "
"specified, but not two or more of these at once.")
permutationsScript = self._searchParams['permutationsPyFilename']
elif 'permutationsPyContents' in self._searchParams:
if ('description' in self._searchParams or
'permutationsPyFilename' in self._searchParams):
raise RuntimeError(
"Either 'description', 'permutationsPyFilename' or"
"'permutationsPyContents' & 'permutationsPyContents' should be "
"specified, but not two or more of these at once.")
assert ('descriptionPyContents' in self._searchParams)
# Generate the permutations.py and description.py files
outDir = self._tempDir = tempfile.mkdtemp()
permutationsScript = os.path.join(outDir, 'permutations.py')
fd = open(permutationsScript, 'w')
fd.write(self._searchParams['permutationsPyContents'])
fd.close()
fd = open(os.path.join(outDir, 'description.py'), 'w')
fd.write(self._searchParams['descriptionPyContents'])
fd.close()
else:
raise RuntimeError ("Either 'description' or 'permutationsScript' must be"
"specified")
# Get the base path of the experiment and read in the base description
self._basePath = os.path.dirname(permutationsScript)
self._baseDescription = open(os.path.join(self._basePath,
'description.py')).read()
self._baseDescriptionHash = hashlib.md5(self._baseDescription).digest()
# Read the model config to figure out the inference type
modelDescription, _ = opfhelpers.loadExperiment(self._basePath)
# Read info from permutations file. This sets up the following member
# variables:
# _predictedField
# _permutations
# _flattenedPermutations
# _encoderNames
# _reportKeys
# _filterFunc
# _optimizeKey
# _maximize
# _dummyModelParamsFunc
self._readPermutationsFile(permutationsScript, modelDescription)
# Fill in and save the base description and permutations file contents
# if they haven't already been filled in by another worker
if self._cjDAO is not None:
updated = self._cjDAO.jobSetFieldIfEqual(jobID=self._jobID,
fieldName='genBaseDescription',
curValue=None,
newValue = self._baseDescription)
if updated:
permContents = open(permutationsScript).read()
self._cjDAO.jobSetFieldIfEqual(jobID=self._jobID,
fieldName='genPermutations',
curValue=None,
newValue = permContents)
# if user provided an artificialMetric, force use of the dummy model
if self._dummyModelParamsFunc is not None:
if self._dummyModel is None:
self._dummyModel = dict()
# If at DEBUG log level, print out permutations info to the log
if self.logger.getEffectiveLevel() <= logging.DEBUG:
msg = StringIO.StringIO()
print >> msg, "Permutations file specifications: "
info = dict()
for key in ['_predictedField', '_permutations',
'_flattenedPermutations', '_encoderNames',
'_reportKeys', '_optimizeKey', '_maximize']:
info[key] = getattr(self, key)
print >> msg, pprint.pformat(info)
self.logger.debug(msg.getvalue())
msg.close()
# Instantiate our database to hold the results we received so far
self._resultsDB = ResultsDB(self)
# Instantiate the Swarm Terminator
self._swarmTerminator = SwarmTerminator()
# Initial hypersearch state
self._hsState = None
# The Max # of attempts we will make to create a unique model before
# giving up.
self._maxUniqueModelAttempts = int(Configuration.get(
'nupic.hypersearch.maxUniqueModelAttempts'))
# The max amount of time allowed before a model is considered orphaned.
self._modelOrphanIntervalSecs = float(Configuration.get(
'nupic.hypersearch.modelOrphanIntervalSecs'))
# The max percent of models that can complete with errors
self._maxPctErrModels = float(Configuration.get(
'nupic.hypersearch.maxPctErrModels'))
except:
# Clean up our temporary directory, if any
if self._tempDir is not None:
shutil.rmtree(self._tempDir)
self._tempDir = None
raise
return
def _getStreamDef(self, modelDescription):
"""
Generate stream definition based on
"""
#--------------------------------------------------------------------------
# Generate the string containing the aggregation settings.
aggregationPeriod = {
'days': 0,
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0,
}
# Honor any overrides provided in the stream definition
aggFunctionsDict = {}
if 'aggregation' in modelDescription['streamDef']:
for key in aggregationPeriod.keys():
if key in modelDescription['streamDef']['aggregation']:
aggregationPeriod[key] = modelDescription['streamDef']['aggregation'][key]
if 'fields' in modelDescription['streamDef']['aggregation']:
for (fieldName, func) in modelDescription['streamDef']['aggregation']['fields']:
aggFunctionsDict[fieldName] = str(func)
# Do we have any aggregation at all?
hasAggregation = False
for v in aggregationPeriod.values():
if v != 0:
hasAggregation = True
break
# Convert the aggFunctionsDict to a list
aggFunctionList = aggFunctionsDict.items()
aggregationInfo = dict(aggregationPeriod)
aggregationInfo['fields'] = aggFunctionList
streamDef = copy.deepcopy(modelDescription['streamDef'])
streamDef['aggregation'] = copy.deepcopy(aggregationInfo)
return streamDef
def __del__(self):
"""Destructor; NOTE: this is not guaranteed to be called (bugs like
circular references could prevent it from being called).
"""
self.close()
return
def close(self):
"""Deletes temporary system objects/files. """
if self._tempDir is not None and os.path.isdir(self._tempDir):
self.logger.debug("Removing temporary directory %r", self._tempDir)
shutil.rmtree(self._tempDir)
self._tempDir = None
return
def _readPermutationsFile(self, filename, modelDescription):
"""
Read the permutations file and initialize the following member variables:
_predictedField: field name of the field we are trying to
predict
_permutations: Dict containing the full permutations dictionary.
_flattenedPermutations: Dict containing the flattened version of
_permutations. The keys leading to the value in the dict are joined
with a period to create the new key and permute variables within
encoders are pulled out of the encoder.
_encoderNames: keys from self._permutations of only the encoder
variables.
_reportKeys: The 'report' list from the permutations file.
This is a list of the items from each experiment's pickled
results file that should be included in the final report. The
format of each item is a string of key names separated by colons,
each key being one level deeper into the experiment results
dict. For example, 'key1:key2'.
_filterFunc: a user-supplied function that can be used to
filter out specific permutation combinations.
_optimizeKey: which report key to optimize for
_maximize: True if we should try and maximize the optimizeKey
metric. False if we should minimize it.
_dummyModelParamsFunc: a user-supplied function that can be used to
artificially generate CLA model results. When supplied,
the model is not actually run through the OPF, but instead is run
through a "Dummy Model" (nupic.swarming.ModelRunner.
OPFDummyModelRunner). This function returns the params dict used
to control various options in the dummy model (the returned metric,
the execution time, etc.). This is used for hypersearch algorithm
development.
Parameters:
---------------------------------------------------------
filename: Name of permutations file
retval: None
"""
# Open and execute the permutations file
vars = {}
permFile = execfile(filename, globals(), vars)
# Read in misc info.
self._reportKeys = vars.get('report', [])
self._filterFunc = vars.get('permutationFilter', None)
self._dummyModelParamsFunc = vars.get('dummyModelParams', None)
self._predictedField = None # default
self._predictedFieldEncoder = None # default
self._fixedFields = None # default
# The fastSwarm variable, if present, contains the params from a best
# model from a previous swarm. If present, use info from that to seed
# a fast swarm
self._fastSwarmModelParams = vars.get('fastSwarmModelParams', None)
if self._fastSwarmModelParams is not None:
encoders = self._fastSwarmModelParams['structuredParams']['modelParams']\
['sensorParams']['encoders']
self._fixedFields = []
for fieldName in encoders:
if encoders[fieldName] is not None:
self._fixedFields.append(fieldName)
if 'fixedFields' in vars:
self._fixedFields = vars['fixedFields']
# Get min number of particles per swarm from either permutations file or
# config.
self._minParticlesPerSwarm = vars.get('minParticlesPerSwarm')
if self._minParticlesPerSwarm == None:
self._minParticlesPerSwarm = Configuration.get(
'nupic.hypersearch.minParticlesPerSwarm')
self._minParticlesPerSwarm = int(self._minParticlesPerSwarm)
# Enable logic to kill off speculative swarms when an earlier sprint
# has found that it contains poorly performing field combination?
self._killUselessSwarms = vars.get('killUselessSwarms', True)
# The caller can request that the predicted field ALWAYS be included ("yes")
# or optionally include ("auto"). The setting of "no" is N/A and ignored
# because in that case the encoder for the predicted field will not even
# be present in the permutations file.
# When set to "yes", this will force the first sprint to try the predicted
# field only (the legacy mode of swarming).
# When set to "auto", the first sprint tries all possible fields (one at a
# time) in the first sprint.
self._inputPredictedField = vars.get("inputPredictedField", "yes")
# Try all possible 3-field combinations? Normally, we start with the best
# 2-field combination as a base. When this flag is set though, we try
# all possible 3-field combinations which takes longer but can find a
# better model.
self._tryAll3FieldCombinations = vars.get('tryAll3FieldCombinations', False)
# Always include timestamp fields in the 3-field swarms?
# This is a less compute intensive version of tryAll3FieldCombinations.
# Instead of trying ALL possible 3 field combinations, it just insures
# that the timestamp fields (dayOfWeek, timeOfDay, weekend) are never left
# out when generating the 3-field swarms.
self._tryAll3FieldCombinationsWTimestamps = vars.get(
'tryAll3FieldCombinationsWTimestamps', False)
# Allow the permutations file to override minFieldContribution. This would
# be set to a negative number for large swarms so that you don't disqualify
# a field in an early sprint just because it did poorly there. Sometimes,
# a field that did poorly in an early sprint could help accuracy when
# added in a later sprint
minFieldContribution = vars.get('minFieldContribution', None)
if minFieldContribution is not None:
self._minFieldContribution = minFieldContribution
# Allow the permutations file to override maxBranching.
maxBranching = vars.get('maxFieldBranching', None)
if maxBranching is not None:
self._maxBranching = maxBranching
# Read in the optimization info.
if 'maximize' in vars:
self._optimizeKey = vars['maximize']
self._maximize = True
elif 'minimize' in vars:
self._optimizeKey = vars['minimize']
self._maximize = False
else:
raise RuntimeError("Permutations file '%s' does not include a maximize"
" or minimize metric.")
# The permutations file is the new location for maxModels. The old location,
# in the jobParams is deprecated.
maxModels = vars.get('maxModels')
if maxModels is not None:
if self._maxModels is None:
self._maxModels = maxModels
else:
raise RuntimeError('It is an error to specify maxModels both in the job'
' params AND in the permutations file.')
# Figure out if what kind of search this is:
#
# If it's a temporal prediction search:
# the first sprint has 1 swarm, with just the predicted field
# elif it's a spatial prediction search:
# the first sprint has N swarms, each with predicted field + one
# other field.
# elif it's a classification search:
# the first sprint has N swarms, each with 1 field
inferenceType = modelDescription['modelParams']['inferenceType']
if not InferenceType.validate(inferenceType):
raise ValueError("Invalid inference type %s" %inferenceType)
if inferenceType in [InferenceType.TemporalMultiStep,
InferenceType.NontemporalMultiStep]:
# If it does not have a separate encoder for the predicted field that
# goes to the classifier, it is a legacy multi-step network
classifierOnlyEncoder = None
for encoder in modelDescription["modelParams"]["sensorParams"]\
["encoders"].values():
if encoder.get("classifierOnly", False) \
and encoder["fieldname"] == vars.get('predictedField', None):
classifierOnlyEncoder = encoder
break
if classifierOnlyEncoder is None or self._inputPredictedField=="yes":
# If we don't have a separate encoder for the classifier (legacy
# MultiStep) or the caller explicitly wants to include the predicted
# field, then use the legacy temporal search methodology.
self._searchType = HsSearchType.legacyTemporal
else:
self._searchType = HsSearchType.temporal
elif inferenceType in [InferenceType.TemporalNextStep,
InferenceType.TemporalAnomaly]:
self._searchType = HsSearchType.legacyTemporal
elif inferenceType in (InferenceType.TemporalClassification,
InferenceType.NontemporalClassification):
self._searchType = HsSearchType.classification
else:
raise RuntimeError("Unsupported inference type: %s" % inferenceType)
# Get the predicted field. Note that even classification experiments
# have a "predicted" field - which is the field that contains the
# classification value.
self._predictedField = vars.get('predictedField', None)
if self._predictedField is None:
raise RuntimeError("Permutations file '%s' does not have the required"
" 'predictedField' variable" % filename)
# Read in and validate the permutations dict
if 'permutations' not in vars:
raise RuntimeError("Permutations file '%s' does not define permutations" % filename)
if not isinstance(vars['permutations'], dict):
raise RuntimeError("Permutations file '%s' defines a permutations variable "
"but it is not a dict")
self._encoderNames = []
self._permutations = vars['permutations']
self._flattenedPermutations = dict()
def _flattenPermutations(value, keys):
if ':' in keys[-1]:
raise RuntimeError("The permutation variable '%s' contains a ':' "
"character, which is not allowed.")
flatKey = _flattenKeys(keys)
if isinstance(value, PermuteEncoder):
self._encoderNames.append(flatKey)
# If this is the encoder for the predicted field, save its name.
if value.fieldName == self._predictedField:
self._predictedFieldEncoder = flatKey
# Store the flattened representations of the variables within the
# encoder.
for encKey, encValue in value.kwArgs.iteritems():
if isinstance(encValue, PermuteVariable):
self._flattenedPermutations['%s:%s' % (flatKey, encKey)] = encValue
elif isinstance(value, PermuteVariable):
self._flattenedPermutations[flatKey] = value
else:
if isinstance(value, PermuteVariable):
self._flattenedPermutations[key] = value
dictutils.rApply(self._permutations, _flattenPermutations)
def getExpectedNumModels(self):
"""Computes the number of models that are expected to complete as part of
this instances's HyperSearch.
NOTE: This is compute-intensive for HyperSearches with a huge number of
combinations.
NOTE/TODO: THIS ONLY WORKS FOR RONOMATIC: This method is exposed for the
benefit of perutations_runner.py for use in progress
reporting.
Parameters:
---------------------------------------------------------
retval: The total number of expected models, if known; -1 if unknown
"""
return -1
def getModelNames(self):
"""Generates a list of model names that are expected to complete as part of
this instances's HyperSearch.
NOTE: This is compute-intensive for HyperSearches with a huge number of
combinations.
NOTE/TODO: THIS ONLY WORKS FOR RONOMATIC: This method is exposed for the
benefit of perutations_runner.py.
Parameters:
---------------------------------------------------------
retval: List of model names for this HypersearchV2 instance, or
None of not applicable
"""
return None
def getPermutationVariables(self):
"""Returns a dictionary of permutation variables.
Parameters:
---------------------------------------------------------
retval: A dictionary of permutation variables; keys are
flat permutation variable names and each value is
a sub-class of PermuteVariable.
"""
return self._flattenedPermutations
def getComplexVariableLabelLookupDict(self):
"""Generates a lookup dictionary of permutation variables whose values
are too complex for labels, so that artificial labels have to be generated
for them.
Parameters:
---------------------------------------------------------
retval: A look-up dictionary of permutation
variables whose values are too complex for labels, so
artificial labels were generated instead (e.g., "Choice0",
"Choice1", etc.); the key is the name of the complex variable
and the value is:
dict(labels=<list_of_labels>, values=<list_of_values>).
"""
raise NotImplementedError
def getOptimizationMetricInfo(self):
"""Retrives the optimization key name and optimization function.
Parameters:
---------------------------------------------------------
retval: (optimizationMetricKey, maximize)
optimizationMetricKey: which report key to optimize for
maximize: True if we should try and maximize the optimizeKey
metric. False if we should minimize it.
"""
return (self._optimizeKey, self._maximize)
def _checkForOrphanedModels (self):
"""If there are any models that haven't been updated in a while, consider
them dead, and mark them as hidden in our resultsDB. We also change the
paramsHash and particleHash of orphaned models so that we can
re-generate that particle and/or model again if we desire.
Parameters:
----------------------------------------------------------------------
retval:
"""
self.logger.debug("Checking for orphaned models older than %s" % \
(self._modelOrphanIntervalSecs))
while True:
orphanedModelId = self._cjDAO.modelAdoptNextOrphan(self._jobID,
self._modelOrphanIntervalSecs)
if orphanedModelId is None:
return
self.logger.info("Removing orphaned model: %d" % (orphanedModelId))
# Change the model hash and params hash as stored in the models table so
# that we can insert a new model with the same paramsHash
for attempt in range(100):
paramsHash = hashlib.md5("OrphanParams.%d.%d" % (orphanedModelId,
attempt)).digest()
particleHash = hashlib.md5("OrphanParticle.%d.%d" % (orphanedModelId,
attempt)).digest()
try:
self._cjDAO.modelSetFields(orphanedModelId,
dict(engParamsHash=paramsHash,
engParticleHash=particleHash))
success = True
except:
success = False
if success:
break
if not success:
raise RuntimeError("Unexpected failure to change paramsHash and "
"particleHash of orphaned model")
# Mark this model as complete, with reason "orphaned"
self._cjDAO.modelSetCompleted(modelID=orphanedModelId,
completionReason=ClientJobsDAO.CMPL_REASON_ORPHAN,
completionMsg="Orphaned")
# Update our results DB immediately, rather than wait for the worker
# to inform us. This insures that the getParticleInfos() calls we make
# below don't include this particle. Setting the metricResult to None
# sets it to worst case
self._resultsDB.update(modelID=orphanedModelId,
modelParams=None,
modelParamsHash=paramsHash,
metricResult=None,
completed = True,
completionReason = ClientJobsDAO.CMPL_REASON_ORPHAN,
matured = True,
numRecords = 0)
def _hsStatePeriodicUpdate(self, exhaustedSwarmId=None):
"""
Periodically, check to see if we should remove a certain field combination
from evaluation (because it is doing so poorly) or move on to the next
sprint (add in more fields).
This method is called from _getCandidateParticleAndSwarm(), which is called
right before we try and create a new model to run.
Parameters:
-----------------------------------------------------------------------
removeSwarmId: If not None, force a change to the current set of active
swarms by removing this swarm. This is used in situations
where we can't find any new unique models to create in
this swarm. In these situations, we update the hypersearch
state regardless of the timestamp of the last time another
worker updated it.
"""
if self._hsState is None:
self._hsState = HsState(self)
# Read in current state from the DB
self._hsState.readStateFromDB()
# This will hold the list of completed swarms that we find
completedSwarms = set()
# Mark the exhausted swarm as completing/completed, if any
if exhaustedSwarmId is not None:
self.logger.info("Removing swarm %s from the active set "
"because we can't find any new unique particle "
"positions" % (exhaustedSwarmId))
# Is it completing or completed?
(particles, _, _, _, _) = self._resultsDB.getParticleInfos(
swarmId=exhaustedSwarmId, matured=False)
if len(particles) > 0:
exhaustedSwarmStatus = 'completing'
else:
exhaustedSwarmStatus = 'completed'
# Kill all swarms that don't need to be explored based on the most recent
# information.
if self._killUselessSwarms:
self._hsState.killUselessSwarms()
# For all swarms that were in the 'completing' state, see if they have
# completed yet.
#
# Note that we are not quite sure why this doesn't automatically get handled
# when we receive notification that a model finally completed in a swarm.
# But, we ARE running into a situation, when speculativeParticles is off,
# where we have one or more swarms in the 'completing' state even though all
# models have since finished. This logic will serve as a failsafe against
# this situation.
completingSwarms = self._hsState.getCompletingSwarms()
for swarmId in completingSwarms:
# Is it completed?
(particles, _, _, _, _) = self._resultsDB.getParticleInfos(
swarmId=swarmId, matured=False)
if len(particles) == 0:
completedSwarms.add(swarmId)
# Are there any swarms we can remove (because they have matured)?
completedSwarmGens = self._resultsDB.getMaturedSwarmGenerations()
priorCompletedSwarms = self._hsState.getCompletedSwarms()
for (swarmId, genIdx, errScore) in completedSwarmGens:
# Don't need to report it if the swarm already completed
if swarmId in priorCompletedSwarms:
continue
completedList = self._swarmTerminator.recordDataPoint(
swarmId=swarmId, generation=genIdx, errScore=errScore)
# Update status message
statusMsg = "Completed generation #%d of swarm '%s' with a best" \
" errScore of %g" % (genIdx, swarmId, errScore)
if len(completedList) > 0:
statusMsg = "%s. Matured swarm(s): %s" % (statusMsg, completedList)
self.logger.info(statusMsg)
self._cjDAO.jobSetFields (jobID=self._jobID,
fields=dict(engStatus=statusMsg),
useConnectionID=False,
ignoreUnchanged=True)
# Special test mode to check which swarms have terminated
if 'NTA_TEST_recordSwarmTerminations' in os.environ:
while True:
resultsStr = self._cjDAO.jobGetFields(self._jobID, ['results'])[0]
if resultsStr is None:
results = {}
else:
results = json.loads(resultsStr)
if not 'terminatedSwarms' in results:
results['terminatedSwarms'] = {}
for swarm in completedList:
if swarm not in results['terminatedSwarms']:
results['terminatedSwarms'][swarm] = (genIdx,
self._swarmTerminator.swarmScores[swarm])
newResultsStr = json.dumps(results)
if newResultsStr == resultsStr:
break
updated = self._cjDAO.jobSetFieldIfEqual(jobID=self._jobID,
fieldName='results',
curValue=resultsStr,
newValue = json.dumps(results))
if updated:
break
if len(completedList) > 0:
for name in completedList:
self.logger.info("Swarm matured: %s. Score at generation %d: "
"%s" % (name, genIdx, errScore))
completedSwarms = completedSwarms.union(completedList)
if len(completedSwarms)==0 and (exhaustedSwarmId is None):
return
# We need to mark one or more swarms as completed, keep trying until
# successful, or until some other worker does it for us.
while True:
if exhaustedSwarmId is not None:
self._hsState.setSwarmState(exhaustedSwarmId, exhaustedSwarmStatus)
# Mark the completed swarms as completed
for swarmId in completedSwarms:
self._hsState.setSwarmState(swarmId, 'completed')
# If nothing changed, we're done
if not self._hsState.isDirty():
return
# Update the shared Hypersearch state now
# This will do nothing and return False if some other worker beat us to it
success = self._hsState.writeStateToDB()
if success:
# Go through and cancel all models that are still running, except for
# the best model. Once the best model changes, the one that used to be
# best (and has matured) will notice that and stop itself at that point.
jobResultsStr = self._cjDAO.jobGetFields(self._jobID, ['results'])[0]
if jobResultsStr is not None:
jobResults = json.loads(jobResultsStr)
bestModelId = jobResults.get('bestModel', None)
else:
bestModelId = None
for swarmId in list(completedSwarms):
(_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(
swarmId=swarmId, completed=False)
if bestModelId in modelIds:
modelIds.remove(bestModelId)
if len(modelIds) == 0:
continue
self.logger.info("Killing the following models in swarm '%s' because"
"the swarm is being terminated: %s" % (swarmId,
str(modelIds)))
for modelId in modelIds:
self._cjDAO.modelSetFields(modelId,
dict(engStop=ClientJobsDAO.STOP_REASON_KILLED),
ignoreUnchanged = True)
return
# We were not able to change the state because some other worker beat us
# to it.
# Get the new state, and try again to apply our changes.
self._hsState.readStateFromDB()
self.logger.debug("New hsState has been set by some other worker to: "
" \n%s" % (pprint.pformat(self._hsState._state, indent=4)))
def _getCandidateParticleAndSwarm (self, exhaustedSwarmId=None):
"""Find or create a candidate particle to produce a new model.
At any one time, there is an active set of swarms in the current sprint, where
each swarm in the sprint represents a particular combination of fields.
Ideally, we should try to balance the number of models we have evaluated for
each swarm at any time.
This method will see how many models have been evaluated for each active
swarm in the current active sprint(s) and then try and choose a particle
from the least represented swarm in the first possible active sprint, with
the following constraints/rules:
for each active sprint:
for each active swarm (preference to those with least# of models so far):
1.) The particle will be created from new (generation #0) if there are not
already self._minParticlesPerSwarm particles in the swarm.
2.) Find the first gen that has a completed particle and evolve that
particle to the next generation.
3.) If we got to here, we know that we have satisfied the min# of
particles for the swarm, and they are all currently running (probably at
various generation indexes). Go onto the next swarm
If we couldn't find a swarm to allocate a particle in, go onto the next
sprint and start allocating particles there....
Parameters:
----------------------------------------------------------------
exhaustedSwarmId: If not None, force a change to the current set of active
swarms by marking this swarm as either 'completing' or
'completed'. If there are still models being evaluaed in
it, mark it as 'completing', else 'completed. This is
used in situations where we can't find any new unique
models to create in this swarm. In these situations, we
force an update to the hypersearch state so no other
worker wastes time try to use this swarm.
retval: (exit, particle, swarm)
exit: If true, this worker is ready to exit (particle and
swarm will be None)
particle: Which particle to run
swarm: which swarm the particle is in
NOTE: When particle and swarm are None and exit is False, it
means that we need to wait for one or more other worker(s) to
finish their respective models before we can pick a particle
to run. This will generally only happen when speculativeParticles
is set to False.
"""
# Cancel search?
jobCancel = self._cjDAO.jobGetFields(self._jobID, ['cancel'])[0]
if jobCancel:
self._jobCancelled = True
# Did a worker cancel the job because of an error?
(workerCmpReason, workerCmpMsg) = self._cjDAO.jobGetFields(self._jobID,
['workerCompletionReason', 'workerCompletionMsg'])
if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS:
self.logger.info("Exiting due to job being cancelled")
self._cjDAO.jobSetFields(self._jobID,
dict(workerCompletionMsg="Job was cancelled"),
useConnectionID=False, ignoreUnchanged=True)
else:
self.logger.error("Exiting because some worker set the "
"workerCompletionReason to %s. WorkerCompletionMsg: %s" %
(workerCmpReason, workerCmpMsg))
return (True, None, None)
# Perform periodic updates on the Hypersearch state.
if self._hsState is not None:
priorActiveSwarms = self._hsState.getActiveSwarms()
else:
priorActiveSwarms = None
# Update the HypersearchState, checking for matured swarms, and marking
# the passed in swarm as exhausted, if any
self._hsStatePeriodicUpdate(exhaustedSwarmId=exhaustedSwarmId)
# The above call may have modified self._hsState['activeSwarmIds']
# Log the current set of active swarms
activeSwarms = self._hsState.getActiveSwarms()
if activeSwarms != priorActiveSwarms:
self.logger.info("Active swarms changed to %s (from %s)" % (activeSwarms,
priorActiveSwarms))
self.logger.debug("Active swarms: %s" % (activeSwarms))
# If too many model errors were detected, exit
totalCmpModels = self._resultsDB.getNumCompletedModels()
if totalCmpModels > 5:
numErrs = self._resultsDB.getNumErrModels()
if (float(numErrs) / totalCmpModels) > self._maxPctErrModels:
# Get one of the errors
errModelIds = self._resultsDB.getErrModelIds()
resInfo = self._cjDAO.modelsGetResultAndStatus([errModelIds[0]])[0]
modelErrMsg = resInfo.completionMsg
cmpMsg = "%s: Exiting due to receiving too many models failing" \
" from exceptions (%d out of %d). \nModel Exception: %s" % \
(ErrorCodes.tooManyModelErrs, numErrs, totalCmpModels,
modelErrMsg)
self.logger.error(cmpMsg)
# Cancel the entire job now, if it has not already been cancelled
workerCmpReason = self._cjDAO.jobGetFields(self._jobID,
['workerCompletionReason'])[0]
if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS:
self._cjDAO.jobSetFields(
self._jobID,
fields=dict(
cancel=True,
workerCompletionReason = ClientJobsDAO.CMPL_REASON_ERROR,
workerCompletionMsg = cmpMsg),
useConnectionID=False,
ignoreUnchanged=True)
return (True, None, None)
# If HsState thinks the search is over, exit. It is seeing if the results
# on the sprint we just completed are worse than a prior sprint.
if self._hsState.isSearchOver():
cmpMsg = "Exiting because results did not improve in most recently" \
" completed sprint."
self.logger.info(cmpMsg)
self._cjDAO.jobSetFields(self._jobID,
dict(workerCompletionMsg=cmpMsg),
useConnectionID=False, ignoreUnchanged=True)
return (True, None, None)
# Search successive active sprints, until we can find a candidate particle
# to work with
sprintIdx = -1
while True:
# Is this sprint active?
sprintIdx += 1
(active, eos) = self._hsState.isSprintActive(sprintIdx)
# If no more sprints to explore:
if eos:
# If any prior ones are still being explored, finish up exploring them
if self._hsState.anyGoodSprintsActive():
self.logger.info("No more sprints to explore, waiting for prior"
" sprints to complete")
return (False, None, None)
# Else, we're done
else:
cmpMsg = "Exiting because we've evaluated all possible field " \
"combinations"
self._cjDAO.jobSetFields(self._jobID,
dict(workerCompletionMsg=cmpMsg),
useConnectionID=False, ignoreUnchanged=True)
self.logger.info(cmpMsg)
return (True, None, None)
if not active:
if not self._speculativeParticles:
if not self._hsState.isSprintCompleted(sprintIdx):
self.logger.info("Waiting for all particles in sprint %d to complete"
"before evolving any more particles" % (sprintIdx))
return (False, None, None)
continue
# ====================================================================
# Look for swarms that have particle "holes" in their generations. That is,
# an earlier generation with less than minParticlesPerSwarm. This can
# happen if a model that was started eariler got orphaned. If we detect
# this, start a new particle in that generation.
swarmIds = self._hsState.getActiveSwarms(sprintIdx)
for swarmId in swarmIds:
firstNonFullGenIdx = self._resultsDB.firstNonFullGeneration(
swarmId=swarmId,
minNumParticles=self._minParticlesPerSwarm)
if firstNonFullGenIdx is None:
continue
if firstNonFullGenIdx < self._resultsDB.highestGeneration(swarmId):
self.logger.info("Cloning an earlier model in generation %d of swarm "
"%s (sprintIdx=%s) to replace an orphaned model" % (
firstNonFullGenIdx, swarmId, sprintIdx))
# Clone a random orphaned particle from the incomplete generation
(allParticles, allModelIds, errScores, completed, matured) = \
self._resultsDB.getOrphanParticleInfos(swarmId, firstNonFullGenIdx)
if len(allModelIds) > 0:
# We have seen instances where we get stuck in a loop incessantly
# trying to clone earlier models (NUP-1511). My best guess is that
# we've already successfully cloned each of the orphaned models at
# least once, but still need at least one more. If we don't create
# a new particleID, we will never be able to instantiate another
# model (since particleID hash is a unique key in the models table).
# So, on 1/8/2013 this logic was changed to create a new particleID
# whenever we clone an orphan.
newParticleId = True
self.logger.info("Cloning an orphaned model")
# If there is no orphan, clone one of the other particles. We can
# have no orphan if this was a speculative generation that only
# continued particles completed in the prior generation.
else:
newParticleId = True
self.logger.info("No orphans found, so cloning a non-orphan")
(allParticles, allModelIds, errScores, completed, matured) = \
self._resultsDB.getParticleInfos(swarmId=swarmId,
genIdx=firstNonFullGenIdx)
# Clone that model
modelId = random.choice(allModelIds)
self.logger.info("Cloning model %r" % (modelId))
(particleState, _, _, _, _) = self._resultsDB.getParticleInfo(modelId)
particle = Particle(hsObj = self,
resultsDB = self._resultsDB,
flattenedPermuteVars=self._flattenedPermutations,
newFromClone=particleState,
newParticleId=newParticleId)
return (False, particle, swarmId)
# ====================================================================
# Sort the swarms in priority order, trying the ones with the least
# number of models first
swarmSizes = numpy.array([self._resultsDB.numModels(x) for x in swarmIds])
swarmSizeAndIdList = zip(swarmSizes, swarmIds)
swarmSizeAndIdList.sort()
for (_, swarmId) in swarmSizeAndIdList:
# -------------------------------------------------------------------
# 1.) The particle will be created from new (at generation #0) if there
# are not already self._minParticlesPerSwarm particles in the swarm.
(allParticles, allModelIds, errScores, completed, matured) = (
self._resultsDB.getParticleInfos(swarmId))
if len(allParticles) < self._minParticlesPerSwarm:
particle = Particle(hsObj=self,
resultsDB=self._resultsDB,
flattenedPermuteVars=self._flattenedPermutations,
swarmId=swarmId,
newFarFrom=allParticles)
# Jam in the best encoder state found from the first sprint
bestPriorModel = None
if sprintIdx >= 1:
(bestPriorModel, errScore) = self._hsState.bestModelInSprint(0)
if bestPriorModel is not None:
self.logger.info("Best model and errScore from previous sprint(%d):"
" %s, %g" % (0, str(bestPriorModel), errScore))
(baseState, modelId, errScore, completed, matured) \
= self._resultsDB.getParticleInfo(bestPriorModel)
particle.copyEncoderStatesFrom(baseState)
# Copy the best inference type from the earlier sprint
particle.copyVarStatesFrom(baseState, ['modelParams|inferenceType'])
# It's best to jiggle the best settings from the prior sprint, so
# compute a new position starting from that previous best
# Only jiggle the vars we copied from the prior model
whichVars = []
for varName in baseState['varStates']:
if ':' in varName:
whichVars.append(varName)
particle.newPosition(whichVars)
self.logger.debug("Particle after incorporating encoder vars from best "
"model in previous sprint: \n%s" % (str(particle)))
return (False, particle, swarmId)
# -------------------------------------------------------------------
# 2.) Look for a completed particle to evolve
# Note that we use lastDescendent. We only want to evolve particles that
# are at their most recent generation index.
(readyParticles, readyModelIds, readyErrScores, _, _) = (
self._resultsDB.getParticleInfos(swarmId, genIdx=None,
matured=True, lastDescendent=True))
# If we have at least 1 ready particle to evolve...
if len(readyParticles) > 0:
readyGenIdxs = [x['genIdx'] for x in readyParticles]
sortedGenIdxs = sorted(set(readyGenIdxs))
genIdx = sortedGenIdxs[0]
# Now, genIdx has the generation of the particle we want to run,
# Get a particle from that generation and evolve it.
useParticle = None
for particle in readyParticles:
if particle['genIdx'] == genIdx:
useParticle = particle
break
# If speculativeParticles is off, we don't want to evolve a particle
# into the next generation until all particles in the current
# generation have completed.
if not self._speculativeParticles:
(particles, _, _, _, _) = self._resultsDB.getParticleInfos(
swarmId, genIdx=genIdx, matured=False)
if len(particles) > 0:
continue
particle = Particle(hsObj=self,
resultsDB=self._resultsDB,
flattenedPermuteVars=self._flattenedPermutations,
evolveFromState=useParticle)
return (False, particle, swarmId)
# END: for (swarmSize, swarmId) in swarmSizeAndIdList:
# No success in this swarm, onto next swarm
# ====================================================================
# We couldn't find a particle in this sprint ready to evolve. If
# speculative particles is OFF, we have to wait for one or more other
# workers to finish up their particles before we can do anything.
if not self._speculativeParticles:
self.logger.info("Waiting for one or more of the %s swarms "
"to complete a generation before evolving any more particles" \
% (str(swarmIds)))
return (False, None, None)
# END: while True:
# No success in this sprint, into next sprint
def _okToExit(self):
"""Test if it's OK to exit this worker. This is only called when we run
out of prospective new models to evaluate. This method sees if all models
have matured yet. If not, it will sleep for a bit and return False. This
will indicate to the hypersearch worker that we should keep running, and
check again later. This gives this worker a chance to pick up and adopt any
model which may become orphaned by another worker before it matures.
If all models have matured, this method will send a STOP message to all
matured, running models (presummably, there will be just one - the model
which thinks it's the best) before returning True.
"""
# Send an update status periodically to the JobTracker so that it doesn't
# think this worker is dead.
print >> sys.stderr, "reporter:status:In hypersearchV2: _okToExit"
# Any immature models still running?
if not self._jobCancelled:
(_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(matured=False)
if len(modelIds) > 0:
self.logger.info("Ready to end hyperseach, but not all models have " \
"matured yet. Sleeping a bit to wait for all models " \
"to mature.")
# Sleep for a bit, no need to check for orphaned models very often
time.sleep(5.0 * random.random())
return False
# All particles have matured, send a STOP signal to any that are still
# running.
(_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(completed=False)
for modelId in modelIds:
self.logger.info("Stopping model %d because the search has ended" \
% (modelId))
self._cjDAO.modelSetFields(modelId,
dict(engStop=ClientJobsDAO.STOP_REASON_STOPPED),
ignoreUnchanged = True)
# Update the HsState to get the accurate field contributions.
self._hsStatePeriodicUpdate()
pctFieldContributions, absFieldContributions = \
self._hsState.getFieldContributions()
# Update the results field with the new field contributions.
jobResultsStr = self._cjDAO.jobGetFields(self._jobID, ['results'])[0]
if jobResultsStr is not None:
jobResults = json.loads(jobResultsStr)
else:
jobResults = {}
# Update the fieldContributions field.
if pctFieldContributions != jobResults.get('fieldContributions', None):
jobResults['fieldContributions'] = pctFieldContributions
jobResults['absoluteFieldContributions'] = absFieldContributions
isUpdated = self._cjDAO.jobSetFieldIfEqual(self._jobID,
fieldName='results',
curValue=jobResultsStr,
newValue=json.dumps(jobResults))
if isUpdated:
self.logger.info('Successfully updated the field contributions:%s',
pctFieldContributions)
else:
self.logger.info('Failed updating the field contributions, ' \
'another hypersearch worker must have updated it')
return True
def killSwarmParticles(self, swarmID):
(_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(
swarmId=swarmID, completed=False)
for modelId in modelIds:
self.logger.info("Killing the following models in swarm '%s' because"
"the swarm is being terminated: %s" % (swarmID,
str(modelIds)))
self._cjDAO.modelSetFields(
modelId, dict(engStop=ClientJobsDAO.STOP_REASON_KILLED),
ignoreUnchanged=True)
def createModels(self, numModels=1):
"""Create one or more new models for evaluation. These should NOT be models
that we already know are in progress (i.e. those that have been sent to us
via recordModelProgress). We return a list of models to the caller
(HypersearchWorker) and if one can be successfully inserted into
the models table (i.e. it is not a duplicate) then HypersearchWorker will
turn around and call our runModel() method, passing in this model. If it
is a duplicate, HypersearchWorker will call this method again. A model
is a duplicate if either the modelParamsHash or particleHash is
identical to another entry in the model table.
The numModels is provided by HypersearchWorker as a suggestion as to how
many models to generate. This particular implementation only ever returns 1
model.
Before choosing some new models, we first do a sweep for any models that
may have been abandonded by failed workers. If/when we detect an abandoned
model, we mark it as complete and orphaned and hide it from any subsequent
queries to our ResultsDB. This effectively considers it as if it never
existed. We also change the paramsHash and particleHash in the model record
of the models table so that we can create another model with the same
params and particle status and run it (which we then do immediately).
The modelParamsHash returned for each model should be a hash (max allowed
size of ClientJobsDAO.hashMaxSize) that uniquely identifies this model by
it's params and the optional particleHash should be a hash of the particleId
and generation index. Every model that gets placed into the models database,
either by this worker or another worker, will have these hashes computed for
it. The recordModelProgress gets called for every model in the database and
the hash is used to tell which, if any, are the same as the ones this worker
generated.
NOTE: We check first ourselves for possible duplicates using the paramsHash
before we return a model. If HypersearchWorker failed to insert it (because
some other worker beat us to it), it will turn around and call our
recordModelProgress with that other model so that we now know about it. It
will then call createModels() again.
This methods returns an exit boolean and the model to evaluate. If there is
no model to evalulate, we may return False for exit because we want to stay
alive for a while, waiting for all other models to finish. This gives us
a chance to detect and pick up any possibly orphaned model by another
worker.
Parameters:
----------------------------------------------------------------------
numModels: number of models to generate
retval: (exit, models)
exit: true if this worker should exit.
models: list of tuples, one for each model. Each tuple contains:
(modelParams, modelParamsHash, particleHash)
modelParams is a dictionary containing the following elements:
structuredParams: dictionary containing all variables for
this model, with encoders represented as a dict within
this dict (or None if they are not included.
particleState: dictionary containing the state of this
particle. This includes the position and velocity of
each of it's variables, the particleId, and the particle
generation index. It contains the following keys:
id: The particle Id of the particle we are using to
generate/track this model. This is a string of the
form <hypesearchWorkerId>.<particleIdx>
genIdx: the particle's generation index. This starts at 0
and increments every time we move the particle to a
new position.
swarmId: The swarmId, which is a string of the form
<encoder>.<encoder>... that describes this swarm
varStates: dict of the variable states. The key is the
variable name, the value is a dict of the variable's
position, velocity, bestPosition, bestResult, etc.
"""
# Check for and mark orphaned models
self._checkForOrphanedModels()
modelResults = []
for _ in xrange(numModels):
candidateParticle = None
# If we've reached the max # of model to evaluate, we're done.
if (self._maxModels is not None and
(self._resultsDB.numModels() - self._resultsDB.getNumErrModels()) >=
self._maxModels):
return (self._okToExit(), [])
# If we don't already have a particle to work on, get a candidate swarm and
# particle to work with. If None is returned for the particle it means
# either that the search is over (if exitNow is also True) or that we need
# to wait for other workers to finish up their models before we can pick
# another particle to run (if exitNow is False).
if candidateParticle is None:
(exitNow, candidateParticle, candidateSwarm) = (
self._getCandidateParticleAndSwarm())
if candidateParticle is None:
if exitNow:
return (self._okToExit(), [])
else:
# Send an update status periodically to the JobTracker so that it doesn't
# think this worker is dead.
print >> sys.stderr, "reporter:status:In hypersearchV2: speculativeWait"
time.sleep(self._speculativeWaitSecondsMax * random.random())
return (False, [])
useEncoders = candidateSwarm.split('.')
numAttempts = 0
# Loop until we can create a unique model that we haven't seen yet.
while True:
# If this is the Nth attempt with the same candidate, agitate it a bit
# to find a new unique position for it.
if numAttempts >= 1:
self.logger.debug("Agitating particle to get unique position after %d "
"failed attempts in a row" % (numAttempts))
candidateParticle.agitate()
# Create the hierarchical params expected by the base description. Note
# that this is where we incorporate encoders that have no permuted
# values in them.
position = candidateParticle.getPosition()
structuredParams = dict()
def _buildStructuredParams(value, keys):
flatKey = _flattenKeys(keys)
# If it's an encoder, either put in None if it's not used, or replace
# all permuted constructor params with the actual position.
if flatKey in self._encoderNames:
if flatKey in useEncoders:
# Form encoder dict, substituting in chosen permutation values.
return value.getDict(flatKey, position)
# Encoder not used.
else:
return None
# Regular top-level variable.
elif flatKey in position:
return position[flatKey]
# Fixed override of a parameter in the base description.
else:
return value
structuredParams = dictutils.rCopy(self._permutations,
_buildStructuredParams,
discardNoneKeys=False)
# Create the modelParams.
modelParams = dict(
structuredParams=structuredParams,
particleState = candidateParticle.getState()
)
# And the hashes.
m = hashlib.md5()
m.update(sortedJSONDumpS(structuredParams))
m.update(self._baseDescriptionHash)
paramsHash = m.digest()
particleInst = "%s.%s" % (modelParams['particleState']['id'],
modelParams['particleState']['genIdx'])
particleHash = hashlib.md5(particleInst).digest()
# Increase attempt counter
numAttempts += 1
# If this is a new one, and passes the filter test, exit with it.
# TODO: There is currently a problem with this filters implementation as
# it relates to self._maxUniqueModelAttempts. When there is a filter in
# effect, we should try a lot more times before we decide we have
# exhausted the parameter space for this swarm. The question is, how many
# more times?
if self._filterFunc and not self._filterFunc(structuredParams):
valid = False
else:
valid = True
if valid and self._resultsDB.getModelIDFromParamsHash(paramsHash) is None:
break
# If we've exceeded the max allowed number of attempts, mark this swarm
# as completing or completed, so we don't try and allocate any more new
# particles to it, and pick another.
if numAttempts >= self._maxUniqueModelAttempts:
(exitNow, candidateParticle, candidateSwarm) \
= self._getCandidateParticleAndSwarm(
exhaustedSwarmId=candidateSwarm)
if candidateParticle is None:
if exitNow:
return (self._okToExit(), [])
else:
time.sleep(self._speculativeWaitSecondsMax * random.random())
return (False, [])
numAttempts = 0
useEncoders = candidateSwarm.split('.')
# Log message
if self.logger.getEffectiveLevel() <= logging.DEBUG:
self.logger.debug("Submitting new potential model to HypersearchWorker: \n%s"
% (pprint.pformat(modelParams, indent=4)))
modelResults.append((modelParams, paramsHash, particleHash))
return (False, modelResults)
def recordModelProgress(self, modelID, modelParams, modelParamsHash, results,
completed, completionReason, matured, numRecords):
"""Record or update the results for a model. This is called by the
HSW whenever it gets results info for another model, or updated results
on a model that is still running.
The first time this is called for a given modelID, the modelParams will
contain the params dict for that model and the modelParamsHash will contain
the hash of the params. Subsequent updates of the same modelID will
have params and paramsHash values of None (in order to save overhead).
The Hypersearch object should save these results into it's own working
memory into some table, which it then uses to determine what kind of
new models to create next time createModels() is called.
Parameters:
----------------------------------------------------------------------
modelID: ID of this model in models table
modelParams: params dict for this model, or None if this is just an update
of a model that it already previously reported on.
See the comments for the createModels() method for a
description of this dict.
modelParamsHash: hash of the modelParams dict, generated by the worker
that put it into the model database.
results: tuple containing (allMetrics, optimizeMetric). Each is a
dict containing metricName:result pairs. .
May be none if we have no results yet.
completed: True if the model has completed evaluation, False if it
is still running (and these are online results)
completionReason: One of the ClientJobsDAO.CMPL_REASON_XXX equates
matured: True if this model has matured. In most cases, once a
model matures, it will complete as well. The only time a
model matures and does not complete is if it's currently
the best model and we choose to keep it running to generate
predictions.
numRecords: Number of records that have been processed so far by this
model.
"""
if results is None:
metricResult = None
else:
metricResult = results[1].values()[0]
# Update our database.
errScore = self._resultsDB.update(modelID=modelID,
modelParams=modelParams,modelParamsHash=modelParamsHash,
metricResult=metricResult, completed=completed,
completionReason=completionReason, matured=matured,
numRecords=numRecords)
# Log message.
self.logger.debug('Received progress on model %d: completed: %s, '
'cmpReason: %s, numRecords: %d, errScore: %s' ,
modelID, completed, completionReason, numRecords, errScore)
# Log best so far.
(bestModelID, bestResult) = self._resultsDB.bestModelIdAndErrScore()
self.logger.debug('Best err score seen so far: %s on model %s' % \
(bestResult, bestModelID))
def runModel(self, modelID, jobID, modelParams, modelParamsHash,
jobsDAO, modelCheckpointGUID):
"""Run the given model.
This runs the model described by 'modelParams'. Periodically, it updates
the results seen on the model to the model database using the databaseAO
(database Access Object) methods.
Parameters:
-------------------------------------------------------------------------
modelID: ID of this model in models table
jobID: ID for this hypersearch job in the jobs table
modelParams: parameters of this specific model
modelParams is a dictionary containing the name/value
pairs of each variable we are permuting over. Note that
variables within an encoder spec have their name
structure as:
<encoderName>.<encodrVarName>
modelParamsHash: hash of modelParamValues
jobsDAO jobs data access object - the interface to the jobs
database where model information is stored
modelCheckpointGUID: A persistent, globally-unique identifier for
constructing the model checkpoint key
"""
# We're going to make an assumption that if we're not using streams, that
# we also don't need checkpoints saved. For now, this assumption is OK
# (if there are no streams, we're typically running on a single machine
# and just save models to files) but we may want to break this out as
# a separate controllable parameter in the future
if not self._createCheckpoints:
modelCheckpointGUID = None
# Register this model in our database
self._resultsDB.update(modelID=modelID,
modelParams=modelParams,
modelParamsHash=modelParamsHash,
metricResult = None,
completed = False,
completionReason = None,
matured = False,
numRecords = 0)
# Get the structured params, which we pass to the base description
structuredParams = modelParams['structuredParams']
if self.logger.getEffectiveLevel() <= logging.DEBUG:
self.logger.debug("Running Model. \nmodelParams: %s, \nmodelID=%s, " % \
(pprint.pformat(modelParams, indent=4), modelID))
# Record time.clock() so that we can report on cpu time
cpuTimeStart = time.clock()
# Run the experiment. This will report the results back to the models
# database for us as well.
logLevel = self.logger.getEffectiveLevel()
try:
if self._dummyModel is None or self._dummyModel is False:
(cmpReason, cmpMsg) = runModelGivenBaseAndParams(
modelID=modelID,
jobID=jobID,
baseDescription=self._baseDescription,
params=structuredParams,
predictedField=self._predictedField,
reportKeys=self._reportKeys,
optimizeKey=self._optimizeKey,
jobsDAO=jobsDAO,
modelCheckpointGUID=modelCheckpointGUID,
logLevel=logLevel,
predictionCacheMaxRecords=self._predictionCacheMaxRecords)
else:
dummyParams = dict(self._dummyModel)
dummyParams['permutationParams'] = structuredParams
if self._dummyModelParamsFunc is not None:
permInfo = dict(structuredParams)
permInfo ['generation'] = modelParams['particleState']['genIdx']
dummyParams.update(self._dummyModelParamsFunc(permInfo))
(cmpReason, cmpMsg) = runDummyModel(
modelID=modelID,
jobID=jobID,
params=dummyParams,
predictedField=self._predictedField,
reportKeys=self._reportKeys,
optimizeKey=self._optimizeKey,
jobsDAO=jobsDAO,
modelCheckpointGUID=modelCheckpointGUID,
logLevel=logLevel,
predictionCacheMaxRecords=self._predictionCacheMaxRecords)
# Write out the completion reason and message
jobsDAO.modelSetCompleted(modelID,
completionReason = cmpReason,
completionMsg = cmpMsg,
cpuTime = time.clock() - cpuTimeStart)
except InvalidConnectionException, e:
self.logger.warn("%s", e)
| gpl-3.0 | -6,840,123,944,068,782,000 | 41.110918 | 95 | 0.620417 | false |
ccpgames/pypicloud-tools | pypicloud_tools/__init__.py | 1 | 9315 | """Contstants and helpers common to multiple operations."""
from __future__ import print_function
import os
import sys
import boto
import argparse
import datetime
import operator
import pkg_resources
from collections import namedtuple
from pip.utils import SUPPORTED_EXTENSIONS
from boto.exception import NoAuthHandlerFound
try:
from configparser import RawConfigParser # python 3+
except ImportError: # pragma: no cover
from ConfigParser import RawConfigParser
# standarized config objects
PyPIConfig = namedtuple("PyPIConfig", ("server", "user", "password"))
Settings = namedtuple("Settings", ("s3", "pypi", "items", "parsed"))
S3Config = namedtuple(
"S3Config",
("bucket", "access", "secret", "acl", "region"),
)
# Supported Extensions for uploading, downloading, listing, rehosting..
SUPPORTED_EXTENSIONS = tuple(list(SUPPORTED_EXTENSIONS) +
[".egg", ".exe", ".msi"])
# used to preform version comparisons
OPERATORS = {
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
"<=": operator.le,
">": operator.gt,
"<": operator.lt,
}
# used as a callback to show some progress to stdout
print_dot = lambda x, y: print(".", end="")
# command line usage
USAGE = """
{called_as} [options] <FILE> [FILE] ...
Reads {pypirc} (override with --config) for the section and extra keys:
[pypicloud]
repository:http://your.pypicloud.server/pypi
username:admin
password:hunter7
bucket:your_bucket
access:some_key
secret:other_key
region:aws_region
acl:optional_acl
Note:
To talk directly to S3, you need the `bucket`, `region` and/or `access`
and `secret` values filled in. The ACL defined here is your default, you
can override per file via the --acl flag, which takes precendence.
AWS Access_Key and Secret_Key can also optionally be read from your
credentials file at ~/.aws/credentials.
""".format(
called_as=os.path.basename(sys.argv[0]),
pypirc=os.path.join(os.path.expanduser("~"), ".pypirc")
)
def get_bucket_conn(s3_config):
"""Uses a S3Config and boto to return a bucket connection object."""
no_auth_error = ("Could not authenticate with S3. Check your "
"~/.aws/credentials or pass --access and --secret flags.")
if s3_config.region is None:
func = boto.connect_s3
args = (s3_config.access, s3_config.secret)
else:
func = boto.s3.connect_to_region
args = (s3_config.region,)
try:
s3_conn = func(*args)
except NoAuthHandlerFound:
raise SystemExit(no_auth_error)
return s3_conn.get_bucket(s3_config.bucket)
def settings_from_config(options):
"""Try to read config file and parse settings.
Args:
options: parsed NameSpace, with `config` and maybe `acl` values
Returns:
tuple of S3Config and PyPIConfig objects, or Nones when missing values
"""
parser = RawConfigParser()
if isinstance(options.config, list):
config_file = options.config[0]
else:
config_file = options.config
try:
parser.read(config_file)
except Exception as error:
print(error, file=sys.stderr)
key = "pypicloud" # config section key
if key not in parser.sections():
return None, None
s3_conf = None
pypi_conf = None
pypi_required = ("repository", "username", "password")
if parser.has_option(key, "bucket"):
acl = access = secret = region = None
if getattr(options, "acl", None):
acl = options.acl[0]
elif parser.has_option(key, "acl"):
acl = parser.get(key, "acl")
if parser.has_option(key, "region"):
region = parser.get(key, "region")
if parser.has_option(key, "secret"):
secret = parser.get(key, "secret")
if parser.has_option(key, "access"):
access = parser.get(key, "access")
s3_conf = S3Config(
parser.get(key, "bucket"),
access,
secret,
acl,
region,
)
if all([parser.has_option(key, opt) for opt in pypi_required]):
pypi_conf = PyPIConfig(
parser.get(key, "repository"),
parser.get(key, "username"),
parser.get(key, "password"),
)
return s3_conf, pypi_conf
def parse_args(upload=False, download=False, listing=False, rehost=False):
"""Builds an argparse ArgumentParser.
Returns:
tuple of parse settings and argument parser object
"""
if upload:
verb = "upload"
direction = "to"
s3_flags = ("bucket", "access", "secret", "acl", "region")
remainders = ("files", " to PyPICloud's S3 bucket directly")
elif rehost:
verb = "rehost"
direction = "to"
s3_flags = ("bucket", "access", "secret", "acl", "region")
remainders = ("packages", ", use ==N.N.N for a specific version")
else:
verb = "download" if download else "list"
direction = "from"
s3_flags = ("bucket", "access", "secret", "region")
remainders = ("packages", ", use ==N.N.N for a specific version")
parser = argparse.ArgumentParser(
prog=os.path.basename(sys.argv[0]),
formatter_class=argparse.RawDescriptionHelpFormatter,
description="{} package(s) {} S3, bypassing PyPICloud".format(
verb.title(),
direction,
),
usage=USAGE,
)
for flag in s3_flags:
parser.add_argument(
"--{}".format(flag),
metavar=flag.upper(),
help="Specify the S3 {} key value for this {}".format(flag, verb),
nargs=1,
type=str,
default=False,
)
for flag in ("server", "user", "password"):
parser.add_argument(
"--{}".format(flag),
metavar=flag.upper(),
help="Specify the PyPICloud {} for this {}".format(flag, verb),
nargs=1,
type=str,
default=False,
)
parser.add_argument(
"--config",
metavar="FILE",
nargs=1,
type=str,
default=os.path.join(os.path.expanduser("~"), ".pypirc"),
help="Specify a config file (default: %(default)s)",
)
if rehost:
parser.add_argument(
"--deps", "--with-deps",
action="store_true",
help="Rehost the package(s) dependencies as well",
)
parser.add_argument(
"-v", "--version",
action="version",
version=(
"pypicloud-tools %(prog)s v{}\n"
"Copyright (c) {} CCP hf.\n"
"Released for use under the MIT license."
).format(
pkg_resources.get_distribution("pypicloud-tools").version,
datetime.datetime.now().year,
),
)
parser.add_argument(
dest=remainders[0],
metavar=remainders[0].upper(),
help="{}(s) to {}{}".format(
remainders[0].title(),
verb,
remainders[1],
),
nargs=argparse.REMAINDER,
)
return parser.parse_args(sys.argv[1:]), parser
def get_settings(upload=False, download=False, listing=False, rehost=False):
"""Gathers both settings for S3 and PyPICloud.
Args:
upload: boolean of if this is an upload
download: boolean of if this is a download
listing: boolean of if this is a listing
Returns:
a Settings object with `s3` and `pypi` attributes
"""
if len([key for key in (upload, download, listing, rehost) if key]) != 1:
raise RuntimeError("Expecting a single boolean argument to be True!")
args, parser = parse_args(upload, download, listing, rehost)
if hasattr(args, "files"):
remainders = args.files
else:
remainders = args.packages
# ignore --long-opts which might be used per-module inline from sys.argv
remainders = [rem for rem in remainders if not rem.startswith("--")]
if not remainders and not listing:
raise SystemExit(parser.print_help())
if args.bucket:
acl = access = secret = region = None
if hasattr(args, "region") and args.region:
region = args.region[0]
if hasattr(args, "access") and args.access:
access = args.access[0]
if hasattr(args, "secret") and args.secret:
secret = args.secret[0]
if hasattr(args, "acl") and args.acl:
acl = args.acl[0]
s3_config = S3Config(args.bucket[0], access, secret, acl, region)
else:
s3_config = None
if args.server and args.user and args.password:
pypi_config = PyPIConfig(
args.server[0],
args.user[0],
args.password[0],
)
else:
pypi_config = None
configfile_s3, configfile_pypi = settings_from_config(args)
if configfile_s3 and s3_config is None:
s3_config = configfile_s3
if configfile_pypi and pypi_config is None:
pypi_config = configfile_pypi
if s3_config is None:
print("ERROR: Could not determine S3 settings.", file=sys.stderr)
raise SystemExit(parser.print_help())
return Settings(s3_config, pypi_config, remainders, args)
| mit | -1,410,663,693,755,735,800 | 27.928571 | 79 | 0.593666 | false |
walterjabo/RPI_LCD_Transmission_Monitor | torrents.py | 1 | 1106 | #!/usr/bin/python
from Adafruit_CharLCD import Adafruit_CharLCD
from subprocess import *
from time import sleep, strftime
from datetime import datetime
import transmissionrpc
lcd = Adafruit_CharLCD()
cmd = "ip addr show wlan0 | grep inet | awk '{print $2}' | cut -d/ -f1"
lcd.begin(16, 2)
def run_cmd(cmd):
p = Popen(cmd, shell=True, stdout=PIPE)
output = p.communicate()[0]
return output
tc = transmissionrpc.Client(address="localhost", port=9091, user='transmission', password='transmission')
torrent_name = tc.get_torrents()[0].name;
torrent_index = 0
str_torrent_index = str(torrent_index + 1)
texto_corredizo = torrent_name
texto_largo = texto_corredizo
i = 0
while 1:
lcd.setCursor(0,1)
percent = tc.get_torrents()[0].percentDone * 100;
lcd.message(str(percent)+ ' %');
sleep(1)
k = 0
while (k < 4):
if len(texto_largo) < 16:
texto_largo = texto_largo + '...' + texto_corredizo
message_texto = (str_torrent_index + ':' + texto_largo)[:16]
lcd.setCursor(0,0)
lcd.message(message_texto)
sleep(0.25)
texto_largo = texto_largo[1:]
i = i + 1
k = k + 1
| gpl-2.0 | 8,548,548,404,915,456,000 | 21.12 | 105 | 0.673599 | false |
fvcproductions/dotfiles | bin/alfred/Alfred.alfredpreferences/workflows/user.workflow.DEDF5652-6FEF-4776-80D8-ACEDF577D06A/release.py | 1 | 2355 | import sys
import urllib2
import json
import os
GITHUB_USER = 'jeeftor'
GITHUB_REPO = 'EmojiTaco'
''' Requires you have a github access token as specified below in your home director '''
# Read github access token outof ~/.github_access_token
from os.path import expanduser
home = expanduser("~")
token_file = home + "/.github_access_token"
GITHUB_ACCESS_TOKEN = open(token_file, "r").read()
def pp_json(json_thing, sort=True, indents=4):
if type(json_thing) is str:
print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents))
else:
print(json.dumps(json_thing, sort_keys=sort, indent=indents))
return None
print (sys.argv)
version = sys.argv[1]
file_to_upload = sys.argv[2]
github_token = str(GITHUB_ACCESS_TOKEN).rstrip()
#curl -i -H 'Authorization: token 5b8e3a4d92993282d2a8f20b5fe4910edc9f82dd' https://api.github.com/user/repos
request_headers = {
"Content-Type": "application/json",
"Authorization": "token %s" % github_token
}
print (request_headers)
# Release INFO
payload = {
"tag_name": "v{}".format(version),
"target_commitish": "master",
"name": "Release {}".format(version),
"body": "Auto Generated Release notes by the `release.py` script",
"draft": True,
"prerelease": False
}
# Make a new reelease
data = json.dumps(payload)
clen = len(data)
request_headers['Content-Length'] = clen
url = "https://api.github.com/repos/{}/{}/releases".format(GITHUB_USER, GITHUB_REPO)
#url = 'https://api.github.com/repos/jeeftor/EmojiTaco/releases'
print (url)
req = urllib2.Request(url, data, headers=request_headers)
f = urllib2.urlopen(req)
response = f.read()
f.close()
pp_json(response)
json = json.loads(response)
# Parse out the upload URL
url = json['upload_url'].split('{')[0]
# Do more parsing
upload_path = "build/" + file_to_upload
upload_data_len = length = os.path.getsize(upload_path)
upload_data = open(upload_path, "rb")
url = url + "?name={}".format(file_to_upload)
# Upload the new workflow file
request = urllib2.Request(url, data=upload_data, headers=request_headers)
request.add_header('Cache-Control', 'no-cache')
request.add_header('Content-Length', '%d' % upload_data_len)
res = urllib2.urlopen(request).read().strip()
# Launch web browser to the Draf release
from subprocess import call
call(["open", json['html_url']])
exit()
| mit | 1,539,996,446,428,196,600 | 23.789474 | 109 | 0.698938 | false |
antonyrp/teach-acsl | contest4/acsl_numble.py | 1 | 4242 | ## Antony Philip - 04/02/2016 - Developed for ACSL teaching
## ACSL 2013-2014 - ACSL NUMBLE
"""
This is an interesting problem which involves backtracking/recursion and greedy approach
First I sort the given n numbers so that we can find the maximum possible sum of given
length r which is a multiple of 5. Then recursion does the magic.
Imagine we have game board of n x r we are trying to place the distinct digits on this
board which will give the target sum, if possible. results are stored in a set so that
duplicates which may occur due to multiple same digits can be removed.
"""
class Numble :
"""
Simple class to do numble work!
"""
def __init__(self, digits, target_len, multiple=5):
"""
Constructor taking the list of digits to use - duplicates allowed
target length for to have the sum, sum multiples of (In this case 5 always, default param)
"""
self._sortedDigits = digits
self._sortedDigits.sort(reverse=True) ## Sort with maximum digit first we want to maximize the sum
self._numDigits = len(self._sortedDigits)
self._targetLength = target_len
self._multiple = multiple
def NumbleWords(self):
"""
We need to try all possible 5 multiples starting from the maximum possible
"""
s = sum(self._sortedDigits[:self._targetLength]) ## Get maximum possible length using digits with given length
s = int(s/self._multiple) * self._multiple ## Find the maximum sum which is also a multiple of 5
self._results = set() ## Set where we store the results
while (s > 0 and len(self._results)==0) :
self.Numble(0, 0, 0, s, []) ## Call numble to find the possible digits which can sum to s
s -= self._multiple ## Try the next sum
return "None" if len(self._results)==0 else ', '.join(map(lambda n: str(n), self._results))
def Numble(self, i, level, current_sum, target_sum, candidates) :
"""
Do the recursion/backtracking to find numble word digits which sum to given target_sum
Partial candidates are stored in candidates and Partial sum is stored in current_sum
level indicates in the board analogy which column we are in -
We need to be at the last column (targetLength) in order to get sum with targetLength
i is the index into sorted digits array which is used for the current column/level
"""
if level >= self._targetLength : ## We are done
return
for k in range(i, self._numDigits): ## Try all digits for this column/level starting from i'th digit
x = self._sortedDigits[k]
if level == self._targetLength - 1 :
if current_sum+x == target_sum : ## We found a candidate!
candidates.append(x)
n = ''.join(map(lambda x : str(x), candidates))
self._results.add(n)
candidates.pop()
elif current_sum+x < target_sum : ## No more try required all the other digits will sum less than target
break;
elif current_sum+x < target_sum : ## We haven't explored all levels and we still can add digits
candidates.append(x)
self.Numble(k+1, level+1, current_sum+x, target_sum, candidates) ## Try for the next level
candidates.pop()
"""
Main program starts here
Read in 5 test cases and find numble words (max sum multiple of 5)
"""
results = []
for i in range(5) :
userinput = raw_input()
data = userinput.split(', ')
digits_for_use = map(lambda x : int(x), list(data[0])) ## Convert the digit strings to numbers
target_length = int(data[1])
numble = Numble(digits_for_use, target_length) ## Find the numble words
results.append(numble.NumbleWords()) ## Add the results for this testcase
## Write out all the results
for r in results :
print r
"""
Results of execution input/output
>>>
9678415, 7
9678415, 6
9678415, 5
9678415, 4
2678515, 3
9876541
987641
98765
9876
875
>>>
"""
| mit | 806,462,668,560,513,000 | 42.731959 | 120 | 0.619283 | false |
flavour/ifrc_qa | modules/tests/staff/add_staff_participants.py | 1 | 2561 | """ Sahana Eden Module Automated Tests - HRM007 Add Staff Participants
@copyright: 2011-2016 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from tests.web2unittest import SeleniumUnitTest
from tests import *
class AddStaffParticipants(SeleniumUnitTest):
def test_hrm007_add_staff_participants(self):
"""
@case: HRM007
@description: Add a premade made staff participant
@TestDoc: https://docs.google.com/spreadsheet/ccc?key=0AmB3hMcgB-3idG1XNGhhRG9QWF81dUlKLXpJaFlCMFE
@Test Wiki: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Testing
"""
browser = self.browser
config = self.config
self.login(account="admin", nexturl="hrm/training_event")
self.dt_filter("Ainaro Branch Office")
self.dt_action()
url = browser.current_url
url_parts = url.split("/")
try:
org_id = int(url_parts[-2])
except:
org_id = int(url_parts[-1])
browser.get("%s/hrm/training_event/%s/participant" % (config.url, org_id))
# Check if add button is present on the page. Click it if found.
add_btn = self.browser.find_elements_by_id("show-add-btn")
if len(add_btn) > 0:
add_btn[0].click()
self.create("hrm_training",
[
( "person_id",
"Goku Gohan")
]
)
| mit | -2,509,087,433,587,518,500 | 39.650794 | 110 | 0.654822 | false |
MichaelCoughlinAN/Odds-N-Ends | Python/Python Modules/lxml-4.2.0/src/lxml/html/tests/test_html5parser.py | 2 | 15503 | import os
import imp
try:
from StringIO import StringIO
except ImportError: # python 3
from io import StringIO
import sys
import tempfile
import unittest
try:
from unittest import skipUnless
except ImportError:
# sys.version < (2, 7)
def skipUnless(condition, reason):
return lambda f: condition and f or None
if sys.version_info < (2,6):
class NamedTemporaryFile(object):
def __init__(self, delete=True, **kwargs):
self._tmpfile = tempfile.NamedTemporaryFile(**kwargs)
def close(self):
self._tmpfile.flush()
def __getattr__(self, name):
return getattr(self._tmpfile, name)
else:
NamedTemporaryFile = tempfile.NamedTemporaryFile
from lxml.builder import ElementMaker
from lxml.etree import Element, ElementTree, ParserError
from lxml.html import html_parser, XHTML_NAMESPACE
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
try:
from urllib import pathname2url
except ImportError:
from urllib.request import pathname2url
def path2url(path):
return urlparse.urljoin(
'file:', pathname2url(path))
try:
import html5lib
except ImportError:
html5lib = None
class BogusModules(object):
# See PEP 302 for details on how this works
def __init__(self, mocks):
self.mocks = mocks
def find_module(self, fullname, path=None):
if fullname in self.mocks:
return self
return None
def load_module(self, fullname):
mod = sys.modules.setdefault(fullname, imp.new_module(fullname))
mod.__file__, mod.__loader__, mod.__path__ = "<dummy>", self, []
mod.__dict__.update(self.mocks[fullname])
return mod
# Fake just enough of html5lib so that html5parser.py is importable
# without errors.
sys.meta_path.append(BogusModules({
'html5lib': {
# A do-nothing HTMLParser class
'HTMLParser': type('HTMLParser', (object,), {
'__init__': lambda self, **kw: None,
}),
},
'html5lib.treebuilders': {
},
'html5lib.treebuilders.etree_lxml': {
'TreeBuilder': 'dummy treebuilder',
},
}))
class Test_HTMLParser(unittest.TestCase):
def make_one(self, **kwargs):
from lxml.html.html5parser import HTMLParser
return HTMLParser(**kwargs)
@skipUnless(html5lib, 'html5lib is not installed')
def test_integration(self):
parser = self.make_one(strict=True)
tree = parser.parse(XHTML_TEST_DOCUMENT)
root = tree.getroot()
self.assertEqual(root.tag, xhtml_tag('html'))
class Test_XHTMLParser(unittest.TestCase):
def make_one(self, **kwargs):
from lxml.html.html5parser import XHTMLParser
return XHTMLParser(**kwargs)
@skipUnless(hasattr(html5lib, 'XHTMLParser'),
'xhtml5lib does not have XHTMLParser')
def test_integration(self):
# XXX: This test are untested. (html5lib no longer has an XHTMLParser)
parser = self.make_one(strict=True)
tree = parser.parse(XHTML_TEST_DOCUMENT)
root = tree.getroot()
self.assertEqual(root.tag, xhtml_tag('html'))
class Test_document_fromstring(unittest.TestCase):
def call_it(self, *args, **kwargs):
from lxml.html.html5parser import document_fromstring
return document_fromstring(*args, **kwargs)
def test_basic(self):
parser = DummyParser(doc=DummyElementTree(root='dummy root'))
elem = self.call_it(b'dummy input', parser=parser)
self.assertEqual(elem, 'dummy root')
self.assertEqual(parser.parse_args, (b'dummy input',))
self.assertEqual(parser.parse_kwargs, {'useChardet': True})
def test_guess_charset_not_used_for_unicode(self):
parser = DummyParser()
elem = self.call_it(b''.decode('ascii'), parser=parser)
self.assertEqual(parser.parse_kwargs, {})
def test_guess_charset_arg_gets_passed_to_parser(self):
parser = DummyParser()
elem = self.call_it(b'', guess_charset='gc_arg', parser=parser)
self.assertEqual(parser.parse_kwargs, {'useChardet': 'gc_arg'})
def test_raises_type_error_on_nonstring_input(self):
not_a_string = None
self.assertRaises(TypeError, self.call_it, not_a_string)
@skipUnless(html5lib, 'html5lib is not installed')
def test_integration(self):
elem = self.call_it(XHTML_TEST_DOCUMENT)
self.assertEqual(elem.tag, xhtml_tag('html'))
class Test_fragments_fromstring(unittest.TestCase):
def call_it(self, *args, **kwargs):
from lxml.html.html5parser import fragments_fromstring
return fragments_fromstring(*args, **kwargs)
def test_basic(self):
parser = DummyParser(fragments='fragments')
fragments = self.call_it(b'dummy input', parser=parser)
self.assertEqual(fragments, 'fragments')
self.assertEqual(parser.parseFragment_kwargs, {'useChardet': False})
def test_guess_charset_arg_gets_passed_to_parser(self):
parser = DummyParser()
elem = self.call_it(b'', guess_charset='gc_arg', parser=parser)
self.assertEqual(parser.parseFragment_kwargs, {'useChardet': 'gc_arg'})
def test_guess_charset_not_used_for_unicode(self):
parser = DummyParser()
elem = self.call_it(b''.decode('ascii'), parser=parser)
self.assertEqual(parser.parseFragment_kwargs, {})
def test_raises_type_error_on_nonstring_input(self):
not_a_string = None
self.assertRaises(TypeError, self.call_it, not_a_string)
def test_no_leading_text_strips_empty_leading_text(self):
parser = DummyParser(fragments=['', 'tail'])
fragments = self.call_it('', parser=parser, no_leading_text=True)
self.assertEqual(fragments, ['tail'])
def test_no_leading_text_raises_error_if_leading_text(self):
parser = DummyParser(fragments=['leading text', 'tail'])
self.assertRaises(ParserError, self.call_it,
'', parser=parser, no_leading_text=True)
@skipUnless(html5lib, 'html5lib is not installed')
def test_integration(self):
fragments = self.call_it('a<b>c</b>')
self.assertEqual(len(fragments), 2)
self.assertEqual(fragments[0], 'a')
self.assertEqual(fragments[1].tag, xhtml_tag('b'))
class Test_fragment_fromstring(unittest.TestCase):
def call_it(self, *args, **kwargs):
from lxml.html.html5parser import fragment_fromstring
return fragment_fromstring(*args, **kwargs)
def test_basic(self):
element = DummyElement()
parser = DummyParser(fragments=[element])
self.assertEqual(self.call_it('html', parser=parser), element)
def test_raises_type_error_on_nonstring_input(self):
not_a_string = None
self.assertRaises(TypeError, self.call_it, not_a_string)
def test_create_parent(self):
parser = DummyParser(fragments=['head', Element('child')])
elem = self.call_it('html', parser=parser, create_parent='parent')
self.assertEqual(elem.tag, 'parent')
self.assertEqual(elem.text, 'head')
self.assertEqual(elem[0].tag, 'child')
def test_create_parent_default_type_no_ns(self):
parser = DummyParser(fragments=[], namespaceHTMLElements=False)
elem = self.call_it('html', parser=parser, create_parent=True)
self.assertEqual(elem.tag, 'div')
def test_raises_error_on_leading_text(self):
parser = DummyParser(fragments=['leading text'])
self.assertRaises(ParserError, self.call_it, 'html', parser=parser)
def test_raises_error_if_no_elements_found(self):
parser = DummyParser(fragments=[])
self.assertRaises(ParserError, self.call_it, 'html', parser=parser)
def test_raises_error_if_multiple_elements_found(self):
parser = DummyParser(fragments=[DummyElement(), DummyElement()])
self.assertRaises(ParserError, self.call_it, 'html', parser=parser)
def test_raises_error_if_tail(self):
parser = DummyParser(fragments=[DummyElement(tail='tail')])
self.assertRaises(ParserError, self.call_it, 'html', parser=parser)
class Test_fromstring(unittest.TestCase):
def call_it(self, *args, **kwargs):
from lxml.html.html5parser import fromstring
return fromstring(*args, **kwargs)
def test_returns_whole_doc_if_input_contains_html_tag(self):
parser = DummyParser(root='the doc')
self.assertEqual(self.call_it('<html></html>', parser=parser),
'the doc')
def test_returns_whole_doc_if_input_contains_doctype(self):
parser = DummyParser(root='the doc')
self.assertEqual(self.call_it('<!DOCTYPE html>', parser=parser),
'the doc')
def test_returns_whole_doc_if_input_is_encoded(self):
parser = DummyParser(root='the doc')
input = '<!DOCTYPE html>'.encode('ascii')
self.assertEqual(self.call_it(input, parser=parser),
'the doc')
def test_returns_whole_doc_if_head_not_empty(self, use_ns=True):
E = HTMLElementMaker(namespaceHTMLElements=use_ns)
root = E.html(E.head(E.title()))
parser = DummyParser(root=root)
self.assertEqual(self.call_it('', parser=parser), root)
def test_returns_whole_doc_if_head_not_empty_no_ns(self):
self.test_returns_whole_doc_if_head_not_empty(use_ns=False)
def test_returns_unwraps_body_if_single_element(self):
E = HTMLElementMaker()
elem = E.p('test')
root = E.html(E.head(), E.body(elem))
parser = DummyParser(root=root)
self.assertEqual(self.call_it('', parser=parser), elem)
def test_returns_body_if_has_text(self):
E = HTMLElementMaker()
elem = E.p('test')
body = E.body('text', elem)
root = E.html(E.head(), body)
parser = DummyParser(root=root)
self.assertEqual(self.call_it('', parser=parser), body)
def test_returns_body_if_single_element_has_tail(self):
E = HTMLElementMaker()
elem = E.p('test')
elem.tail = 'tail'
body = E.body(elem)
root = E.html(E.head(), body)
parser = DummyParser(root=root)
self.assertEqual(self.call_it('', parser=parser), body)
def test_wraps_multiple_fragments_in_div_no_ns(self):
E = HTMLElementMaker(namespaceHTMLElements=False)
parser = DummyParser(root=E.html(E.head(), E.body(E.h1(), E.p())),
namespaceHTMLElements=False)
elem = self.call_it('', parser=parser)
self.assertEqual(elem.tag, 'div')
def test_wraps_multiple_fragments_in_span_no_ns(self):
E = HTMLElementMaker(namespaceHTMLElements=False)
parser = DummyParser(root=E.html(E.head(), E.body('foo', E.a('link'))),
namespaceHTMLElements=False)
elem = self.call_it('', parser=parser)
self.assertEqual(elem.tag, 'span')
def test_raises_type_error_on_nonstring_input(self):
not_a_string = None
self.assertRaises(TypeError, self.call_it, not_a_string)
@skipUnless(html5lib, 'html5lib is not installed')
def test_integration_whole_doc(self):
elem = self.call_it(XHTML_TEST_DOCUMENT)
self.assertEqual(elem.tag, xhtml_tag('html'))
@skipUnless(html5lib, 'html5lib is not installed')
def test_integration_single_fragment(self):
elem = self.call_it('<p></p>')
self.assertEqual(elem.tag, xhtml_tag('p'))
class Test_parse(unittest.TestCase):
def call_it(self, *args, **kwargs):
from lxml.html.html5parser import parse
return parse(*args, **kwargs)
def make_temp_file(self, contents=''):
tmpfile = NamedTemporaryFile(delete=False)
try:
tmpfile.write(contents.encode('utf8'))
tmpfile.flush()
tmpfile.seek(0)
return tmpfile
except Exception:
try:
tmpfile.close()
finally:
os.unlink(tempfile.name)
raise
def test_with_file_object(self):
parser = DummyParser(doc='the doc')
fp = open(__file__)
try:
self.assertEqual(self.call_it(fp, parser=parser), 'the doc')
self.assertEqual(parser.parse_args, (fp,))
finally:
fp.close()
def test_with_file_name(self):
parser = DummyParser(doc='the doc')
tmpfile = self.make_temp_file('data')
try:
data = tmpfile.read()
finally:
tmpfile.close()
try:
self.assertEqual(self.call_it(tmpfile.name, parser=parser), 'the doc')
fp, = parser.parse_args
try:
self.assertEqual(fp.read(), data)
finally:
fp.close()
finally:
os.unlink(tmpfile.name)
def test_with_url(self):
parser = DummyParser(doc='the doc')
tmpfile = self.make_temp_file('content')
try:
data = tmpfile.read()
finally:
tmpfile.close()
try:
url = path2url(tmpfile.name)
self.assertEqual(self.call_it(url, parser=parser), 'the doc')
fp, = parser.parse_args
try:
self.assertEqual(fp.read(), data)
finally:
fp.close()
finally:
os.unlink(tmpfile.name)
@skipUnless(html5lib, 'html5lib is not installed')
def test_integration(self):
doc = self.call_it(StringIO(XHTML_TEST_DOCUMENT))
root = doc.getroot()
self.assertEqual(root.tag, xhtml_tag('html'))
def test_suite():
loader = unittest.TestLoader()
return loader.loadTestsFromModule(sys.modules[__name__])
class HTMLElementMaker(ElementMaker):
def __init__(self, namespaceHTMLElements=True):
initargs = dict(makeelement=html_parser.makeelement)
if namespaceHTMLElements:
initargs.update(namespace=XHTML_NAMESPACE,
nsmap={None: XHTML_NAMESPACE})
ElementMaker.__init__(self, **initargs)
class DummyParser(object):
def __init__(self, doc=None, root=None,
fragments=None, namespaceHTMLElements=True):
self.doc = doc or DummyElementTree(root=root)
self.fragments = fragments
self.tree = DummyTreeBuilder(namespaceHTMLElements)
def parse(self, *args, **kwargs):
self.parse_args = args
self.parse_kwargs = kwargs
return self.doc
def parseFragment(self, *args, **kwargs):
self.parseFragment_args = args
self.parseFragment_kwargs = kwargs
return self.fragments
class DummyTreeBuilder(object):
def __init__(self, namespaceHTMLElements=True):
self.namespaceHTMLElements = namespaceHTMLElements
class DummyElementTree(object):
def __init__(self, root):
self.root = root
def getroot(self):
return self.root
class DummyElement(object):
def __init__(self, tag='tag', tail=None):
self.tag = tag
self.tail = tail
def xhtml_tag(tag):
return '{%s}%s' % (XHTML_NAMESPACE, tag)
XHTML_TEST_DOCUMENT = '''
<!DOCTYPE html>
<html>
<head><title>TITLE</title></head>
<body></body>
</html>
'''
| gpl-3.0 | -3,265,210,253,360,421,400 | 33.76009 | 82 | 0.620525 | false |
PaulMcMillan/2014_defcon_timing | hue/vis3.py | 1 | 4234 | import matplotlib.pyplot as plt
from collections import defaultdict
from itertools import combinations
from pprint import pprint
from scipy import stats
import random
from itertools import chain
class QueryResponse(object):
"""Class to make it easier to work with parsed data. Works with
everything natively in nanoseconds.
"""
# This offset is a convenience that makes it easier to avoid
# losing precision if we start using floats. Pick the right value
# for you.
OFFSET = 1405000000000000000
def __init__(self, *args):
if len(args) < 3:
print args
self.host = args[0]
self.path = args[1]
self.query = self._parse(args[2])
self.response = map(self._parse, args[3:])
def _parse(self, nano_time):
""" Parse a nansecond timestamp string into nanoseconds (integer) """
# If we accidentally mix microsecond time, fix it to nano.
seconds, nanoseconds = nano_time.split('.')
return int('{}{:<9}'.format(seconds, nanoseconds)) - self.OFFSET
def total(self):
""" Time from Request to complete response. """
return self.response[-1] - self.query
def first_response(self):
""" Time from request to first response. """
return self.response[0] - self.query
def total_response(self):
""" Delta first response packet to last. """
return self.response[-1] - self.response[0]
def last_delta(self):
""" Time from second to last packet, to last response packet. """
return self.response[-1] - self.response[-2]
def response_count(self):
""" How many packets were in the response? """
return len(self.response)
def _response_deltas(self):
for x in range(len(self.response) - 1):
yield self.response[x+1] - self.response[x]
data = defaultdict(list)
with open('data/out.parsed') as f:
for line in f:
qr = QueryResponse(*line.strip().split(','))
if qr.path.startswith('/api/'):
if qr.response_count() <= 18:
data[qr.path.replace('/api/', '')[:24][-1]].append(
qr.total_response())
for k, v in data.items():
# resx = defaultdict(int)
# for x in v:
# resx[x] += 1
print k, len(v)
# pprint(dict(resx))
#START = 500
#END = 8100
START = 8000
END = 15000
# while True:
# data_roundup = defaultdict(int)
# a, b = random.choice(xrange(16500)), random.choice(xrange(16500))
# START = min([a,b])
# END = max([a,b])
# for s1, s2 in combinations(data.keys(), 2):
# d, p = stats.ks_2samp(data[s1][START:END],data[s2][START:END])
# if p < 0.01:
# data_roundup[s1] += 1
# data_roundup[s2] += 1
# #print s1, s2,
# #print ' D: %s p:%s' % (d, p)
# if data_roundup and max(dict(data_roundup).values()) >= 10:
# print END - START
# pprint(dict(data_roundup))
# import math
# length = 15000
# for key in data.keys():
# other_keys = set(data.keys())
# other_keys.remove(key)
# this_data = random.sample(data[key], length)
# other_data = random.sample(list(chain(*[data[x] for x in other_keys])), length)
# d, p = stats.ks_2samp(this_data, other_data)
# if p < 0.05:
# print
# print key, ' D:%s p:%s' % (d, p), max(this_data), max(other_data)
# def parse_data():
# results = defaultdict(list)
# for x in data:
# if len(x) <= 80:
# category = len(x[1])
# try:
# diff = float(x[-1]) - float(x[3])
# except IndexError:
# print x
# results[category].append(diff)
# print results.keys()
# return results
common_params = dict(
# bins=100,
# range=(29500000, 30400000),
# # histtype='step',
# alpha=0.6,
)
prev = 0
for x in range(3, 7):
n = 5000 * x
d = data[str(x)][1:5000]
plt.plot(sorted(d), label='%d' % (x), **common_params)
prev = n
# for key, value in parse_data().items():
# plt.plot(sorted(value[:1200]), label=str(key), **common_params)
# #plt.plot(sorted(parse_data('')), label='all', **common_params)
plt.legend()
plt.show()
| bsd-2-clause | -4,668,456,046,142,274,000 | 29.905109 | 85 | 0.573925 | false |
FunTimeCoding/directory-tools | setup.py | 1 | 1432 | #!/usr/bin/env python3
from setuptools import setup, find_packages
setup(
name='directory-tools',
version='0.1.0',
description='Manage OpenLDAP users and groups.',
url='https://github.com/FunTimeCoding/directory-tools',
author='Alexander Reitzel',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development',
'Topic :: System :: Systems Administration :: Authentication/Directory'
' :: LDAP',
],
keywords='slapd openldap abstraction command line web service',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=['pyyaml', 'flask', 'ldap3'],
python_requires='>=3.2',
entry_points={
'console_scripts': [
'dt=directory_tools.directory_tools:'
'DirectoryTools.main',
],
},
)
| mit | 6,691,615,528,217,709,000 | 33.926829 | 79 | 0.60405 | false |
SheepDogInc/sheepdog_tables | sheepdog_tables/templatetags/urlbuilder.py | 1 | 1968 | from django import template
from django.core.urlresolvers import reverse
register = template.Library()
"""
urlbuilder is a template tag that takes a TableRowURL object
and an object to get data from, like a Participant or Connector.
It builds a list of arguments from the args parameter as found in
TableRowURL. For each one, it checks if the argument matches a
property of the passed object, and will use that property. Otherwise
it will just pass through the argument as is.
The result is a URL, like /myapp/myobject/1/, generated at the end of
the day, by django.core.urlresolvers.reverse
:params
url - A TableRowURL object or matching subclass
obj - A (normally) model backed object (like User, Participant, etc)
Usage for this template tag are as follows:
{% urlbuilder tablerowurl object %}
"""
class UrlBuilderNode(template.Node):
def __init__(self, url, obj):
self.url = url
self.obj = obj
def render(self, context):
try:
url = context.get(self.url, None)
obj = context.get(self.obj, None)
if url is None or obj is None:
return ''
arg_lists = [arg.split('.') for arg in url.args]
args = []
# TODO: Replace with resolve() when it gets implemented.
for arg_list in arg_lists:
chain = obj
for arg in arg_list:
chain = getattr(chain, arg) if hasattr(chain, arg) else arg
chain = chain() if callable(chain) else chain
args.append(chain)
return reverse(url.url, args=args)
except template.VariableDoesNotExist:
return ""
def urlbuilder(parser, token):
try:
tag_name, url, obj = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError('%s requires 2 arguments' % tag_name)
return UrlBuilderNode(url, obj)
register.tag('urlbuilder', urlbuilder)
| bsd-3-clause | -6,577,632,244,761,753,000 | 30.238095 | 80 | 0.643801 | false |
jhpyle/docassemble | docassemble_demo/docassemble/demo/alembic/env.py | 1 | 2127 | from __future__ import with_statement
from logging.config import fileConfig
from sqlalchemy import engine_from_config
from sqlalchemy import pool
from alembic import context
import json
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
connect_args = json.loads(config.get_main_option("connect_args"))
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
connect_args=connect_args
)
with connectable.connect() as connection:
context.configure(
connection=connection, target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| mit | -1,788,559,162,752,157,400 | 25.924051 | 69 | 0.706159 | false |
ojii/viceroy | viceroy/tests/test_django.py | 1 | 1064 | from __future__ import absolute_import
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'viceroy.tests.djangoapp.settings'
import django
from django.test.runner import setup_databases
from viceroy.api import build_test_case
from viceroy.contrib.django import ViceroyDjangoTestCase
from .utils import ViceroyScanner
root = os.path.abspath(os.path.dirname(__file__))
test_file = os.path.join(root, 'djangoapp', 'static', 'tests.js')
class DatabaseTestCase(ViceroyDjangoTestCase):
@classmethod
def setUpClass(cls):
django.setup()
cls.old_config = setup_databases(0, False)
super(DatabaseTestCase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(DatabaseTestCase, cls).tearDownClass()
old_names = cls.old_config
for connection, old_name, destroy in old_names:
if destroy:
connection.creation.destroy_test_db(old_name, verbosity=0)
ViceroyDjangoTests = build_test_case(
'ViceroyDjangoTests',
test_file,
ViceroyScanner,
DatabaseTestCase,
)
| bsd-3-clause | -3,555,753,147,447,797,000 | 27 | 74 | 0.707707 | false |
guangyingjiang/Mapping_and_Navigation | src/select_goals.py | 1 | 2622 | #!/usr/bin/env python
######################################################################################
# --- selecte_goals.py Version 1.0
# --- This module read and display selected goals on the map.
# ---
# --- 11/02/16 GYJ Initial coding.
######################################################################################
import rospy
import numpy as np
import tf
import math
import geometry_msgs.msg
from geometry_msgs.msg import Point
from visualization_msgs.msg import Marker
######################################################################################
class waypoint(object):
def __init__(self):
self.path = Marker()
self.marker_id = 1
rospy.init_node('echoer')
# subscribe to "/move_base_simple/goal" to get picked way points using 2D Nav Goal in rviz
rospy.Subscriber("/move_base_simple/goal", geometry_msgs.msg.PoseStamped, self.get_way_point)
# display picked way points and path between way points in rviz
self.publisher = rospy.Publisher('visualization_marker', Marker, queue_size = 10)
# fetch clicked way points
def get_way_point(self, msg):
# display way points and path on the map
self.display_way_point(msg.pose.position.x,msg.pose.position.y)
self.display_path(msg.pose.position.x,msg.pose.position.y)
# print picked way points in terminal
# print msg.pose.position.x, msg.pose.position.y
# get orientationn and convert quternion to euler (roll pitch yaw)
quaternion = (
msg.pose.orientation.x,
msg.pose.orientation.y,
msg.pose.orientation.z,
msg.pose.orientation.w)
euler = tf.transformations.euler_from_quaternion(quaternion)
yaw = math.degrees(euler[2])
print "X " , msg.pose.position.x, "m Y ", msg.pose.position.y, " m Yaw ", yaw, "degrees"
# display way points on the map
def display_way_point(self,x,y):
points = Marker()
points.header.frame_id = "/map" # publish path in map frame
points.type = points.POINTS
points.action = points.ADD
points.lifetime = rospy.Duration(0)
points.id = self.marker_id
self.marker_id += 1
points.scale.x = 0.1
points.scale.y = 0.1
points.color.a = 1.0
points.color.r = 0.0
points.color.g = 0.0
points.color.b = 1.0
points.pose.orientation.w = 1.0
point = Point()
point.x = x
point.y = y
points.points.append(point);
# Publish the MarkerArray
self.publisher.publish(points)
def run(self):
rospy.spin()
######################################################################################
if __name__ == '__main__':
print "*********** selecte_goals.py: read and display way point on the map ***********"
waypoint().run() | gpl-3.0 | 8,439,585,836,310,511,000 | 33.973333 | 95 | 0.598398 | false |
Kobzol/debug-visualizer | debugger/lldbc/lldb_breakpoint_manager.py | 1 | 2794 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2016 Jakub Beranek
#
# This file is part of Devi.
#
# Devi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License, or
# (at your option) any later version.
#
# Devi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Devi. If not, see <http://www.gnu.org/licenses/>.
#
import os
from debugger.debugee import Breakpoint
from debugger.enums import DebuggerState
from debugger import debugger_api
class LldbBreakpointManager(debugger_api.BreakpointManager):
def __init__(self, debugger):
super(LldbBreakpointManager, self).__init__(debugger)
def get_breakpoints(self):
bps = [self.debugger.target.GetBreakpointAtIndex(i)
for i in xrange(self.debugger.target.GetNumBreakpoints())]
breakpoints = []
for bp in bps:
if bp.num_locations > 0:
location = bp.GetLocationAtIndex(0)
address = location.GetAddress().line_entry
line = address.line
file = os.path.abspath(address.file.fullpath)
breakpoints.append(Breakpoint(bp.id, file, line))
return breakpoints
def toggle_breakpoint(self, location, line):
bp = self.find_breakpoint(location, line)
if bp:
return self.remove_breakpoint(location, line)
else:
return self.add_breakpoint(location, line)
def add_breakpoint(self, location, line):
self.debugger.require_state(DebuggerState.BinaryLoaded)
location = os.path.abspath(location)
bp = self.debugger.target.BreakpointCreateByLocation(location, line)
if bp.IsValid() and bp.num_locations > 0:
return True
else:
self.debugger.target.BreakpointDelete(bp.id)
return False
def find_breakpoint(self, location, line):
location = os.path.abspath(location)
bps = self.get_breakpoints()
for bp in bps:
if bp.location == location and bp.line == line:
return bp
return None
def remove_breakpoint(self, location, line):
self.debugger.require_state(DebuggerState.BinaryLoaded)
bp = self.find_breakpoint(location, line)
if bp:
self.debugger.target.BreakpointDelete(bp.number)
return True
else:
return False
| gpl-3.0 | -1,981,503,231,332,240,600 | 31.870588 | 76 | 0.64388 | false |
cdeboever3/WASP | CHT/chromstat.py | 1 | 4416 |
import sys
import numpy as np
import genome.db
class ChromStats(object):
def __init__(self):
self.n = 0
self.n_nan = 0
self.sum = 0
self.min = None
self.max = None
def mean(self):
"""Calculates mean of sites that are not nan
on this chromsome"""
n = self.n - self.n_nan
if n == 0:
return np.inf
return self.sum / float(n)
def set_from_vals(self, vals):
self.n = vals.size
if str(vals.dtype).startswith('float'):
nan_vals = np.isnan(vals)
self.n_nan = np.sum(nan_vals)
if self.n_nan < self.n:
self.min = np.min(vals[~nan_vals])
self.max = np.max(vals[~nan_vals])
self.sum = np.sum(vals[~nan_vals])
else:
self.min = np.min(vals)
self.max = np.max(vals)
self.sum = np.sum(vals)
def add(self, other):
self.n += other.n
self.n_nan += other.n_nan
self.sum += other.sum
if (self.min is None) or (other.min is not None and
self.min > other.min):
self.min = other.min
if (self.max is None) or (other.max is not None and
self.max < other.max):
self.max = other.max
def __str__(self):
return "n=%d n_nan=%s min=%s max=%s sum=%s" % \
(self.n, str(self.n_nan), str(self.min), str(self.max),
str(self.sum))
def calc_stats(h5f, chrom_list, verbose=False):
"""Calculates stats for each chromosome in provided list as well
as combined stats."""
combined = ChromStats()
for chrom in chrom_list:
chrom_stat = ChromStats()
node_name = "/%s" % chrom.name
if node_name in h5f:
node = h5f.getNode("/%s" % chrom.name)
vals = node[:]
chrom_stat.set_from_vals(vals)
if verbose:
sys.stderr.write("%s %s\n" % (str(chrom), str(chrom_stat)))
else:
sys.stderr.write("skipping chromosome %s because "
"not present in HDF5 file" % chrom.name)
combined.add(chrom_stat)
return combined
def set_stats(h5f, chrom_list, verbose=False):
"""Calculates stats for each chromosome and entire track and
stores them as attributes on the chromosome nodes. The
provided HDF5 file handle must have been opened in append mode"""
combined = ChromStats()
for chrom in chrom_list:
node_name = "/%s" % chrom.name
if node_name in h5f:
chrom_stat = ChromStats()
node = h5f.getNode(node_name)
chrom_stat.set_from_vals(node[:])
node.attrs.n = chrom_stat.n
node.attrs.n_nan = chrom_stat.n_nan
node.attrs.min = chrom_stat.min
node.attrs.max = chrom_stat.max
node.attrs.sum = chrom_stat.sum
node.flush()
if verbose:
sys.stderr.write("%s %s\n" % (str(chrom), str(chrom_stat)))
combined.add(chrom_stat)
else:
sys.stderr.write("skipping chromosome %s because "
"not present in HDF5 file\n" % chrom.name)
return combined
def get_stats(h5f, chrom_list, verbose=False):
"""Retrieves stats that are stored as attributes for the specified
set of chromosomes."""
combined = ChromStats()
chrom_stat = ChromStats()
for chrom in chrom_list:
node_name = "/%s" % chrom.name
if node_name in h5f:
node = h5f.getNode(node_name)
if 'n' not in node.attrs:
raise ValueError("Stat attributes are not set for track %s"
% track.name)
chrom_stat.n = node.attrs.n
chrom_stat.n_nan = node.attrs.n_nan
chrom_stat.min = node.attrs.min
chrom_stat.max = node.attrs.max
chrom_stat.sum = node.attrs.sum
if verbose:
sys.stderr.write("%s %s\n" % (str(chrom), str(chrom_stat)))
combined.add(chrom_stat)
else:
sys.stderr.write("skipping chromosome %s because "
"not present in HDF5 file\n" % chrom.name)
return combined
| apache-2.0 | -6,585,973,737,630,896,000 | 28.245033 | 75 | 0.519248 | false |
speendo/Photocell | photocell.py | 1 | 2727 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'marcel'
import threading
import os
import select
import time
import atexit
class CheckLight(threading.Thread):
def __init__(self, light_on_method, light_off_method, pin=24, waiting_time=0.5):
threading.Thread.__init__(self)
self.pin_number = pin
# Making the pin available
# First of all, make sure, that the pin is unexported, once the script is terminated
atexit.register(self.unexport_pin)
# Now export the pin (if not already exported)
if not os.path.isdir("/sys/class/gpio/gpio{pin_number}".format(pin_number=self.pin_number)):
with open("/sys/class/gpio/export", "w") as export_pin_file:
export_pin_file.write(str(self.pin_number))
# Define pin as interrupt (with both edges)
# /sys/class/gpio/<pin number>/edge should be writeable
# However, it might be necessary to wait a bit until it is writeable
# Save the current time
start_time = time.time()
# If the questionable file is not writeable, wait ...
while not os.access("/sys/class/gpio/gpio{pin_number}/edge".format(pin_number=self.pin_number), os.W_OK):
# ... but not longer than waitingTime
if waiting_time < time.time() - start_time:
raise ValueError("Waited for {waiting_time} seconds for \"/sys/class/gpio/gpio{pin_number}/edge\" to be writeable. "
"Either waiting_time is defined too short or there's something wrong with the GPIO-setup. "
.format(waiting_time = waiting_time, pin_number=self.pin_number))
# /sys/class/gpio/<pin number>/edge is here now. Set it to "both"
with open("/sys/class/gpio/gpio{pin_number}/edge".format(pin_number=self.pin_number), "w") as edge_pin_file:
edge_pin_file.write("both")
self.pin_fd = open("/sys/class/gpio/gpio{pin_number}/value".format(pin_number=self.pin_number))
self.epoll = select.epoll()
self.light_on_method = light_on_method
self.light_off_method = light_off_method
self.light_status = False # light is started off
def run(self):
with self.pin_fd:
self.epoll.register(self.pin_fd, select.EPOLLIN | select.EPOLLET)
while True:
events = self.epoll.poll()
if len(events) > 0:
current_light_status = not self.pin_fd.read(1) == "1" # 0 == ON, 1 == OFF
self.pin_fd.seek(0)
if current_light_status != self.light_status:
self.light_status = current_light_status
if self.light_status:
self.light_on_method()
else:
self.light_off_method()
# unexport the pin
def unexport_pin(self):
if os.path.isdir("/sys/class/gpio/gpio{pin_number}".format(pin_number=self.pin_number)):
with open("/sys/class/gpio/unexport", "w") as unexport_pin_file:
unexport_pin_file.write(str(self.pin_number))
| gpl-2.0 | 9,150,732,193,831,011,000 | 35.36 | 120 | 0.686102 | false |
open-synergy/runbot-addons | runbot_secure/__openerp__.py | 1 | 1544 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Odoo, Open Source Management Solution
# This module copyright (C) 2010 - 2014 Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Runbot Secure Links',
'category': 'Website',
'summary': 'Provide https links',
'version': '8.0.1.0.0',
'description': """
Runbot Secure Links
===================
Serve links to spawned Odoo instances with an https link instead of http
Contributors
------------
* Sandy Carter ([email protected])
""",
'author': "Savoir-faire Linux,Odoo Community Association (OCA)",
'depends': ['runbot'],
'data': [
'runbot_qweb.xml',
],
'installable': True,
}
| agpl-3.0 | -1,551,203,393,975,892,200 | 34.090909 | 78 | 0.599093 | false |
southampton/unimatrix | deskctl/app.py | 1 | 5053 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import os.path
from flask import Flask, request, session, abort, g, render_template, url_for
import logging
import binascii
import datetime
class DeskCtlFlask(Flask):
config_file = '/usr/lib/deskctl/deskctl.conf'
class FatalError(Exception):
pass
class DaemonConnectionError(Exception):
pass
################################################################################
def __init__(self, init_object_name):
"""Constructor for the application. Reads the config, sets
up logging, configures Jinja and Flask."""
# Call the superclass (Flask) constructor
super(DeskCtlFlask, self).__init__(init_object_name)
# CSRF exemption support
self._exempt_views = set()
self.before_request(self._csrf_protect)
# CSRF token function in templates
self.jinja_env.globals['csrf_token'] = self._generate_csrf_token
# Load the __init__.py config defaults
self.config.from_object("deskctl.defaultcfg")
# Check the config file exists, if it does not, create one instead
# with a random secret key in it which we generate
if not os.path.exists(self.config_file):
self.logger.info("No config file found; generating new config file")
try:
with open(self.config_file,'w') as fp:
fp.write('SECRET_KEY="' + self.token() + '"')
os.chmod(self.config_file,0700)
except Exception as ex:
raise self.FatalError("Could not create new config file: " + str(ex))
# Load the config file
self.config.from_pyfile('/usr/lib/deskctl/deskctl.conf')
# Check all the necessary options have been defined
for cfg in ['SECRET_KEY']:
error = False
if not cfg in self.config:
error = True
else:
if len(self.config[cfg]) == 0:
error = True
if error:
raise ValueError("The configuration option '" + cfg + "' must be set")
# Set up the max log level
if self.debug:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.INFO)
# Output some startup info
self.logger.info('deskctl version ' + self.config['VERSION'] + ' initialised')
self.logger.info('debug status: ' + str(self.config['DEBUG']))
################################################################################
def token(self,bytes=64):
"""Generates a random token. This code was derived from the
proposed new 'token' functions in Python 3.6, see:
https://bitbucket.org/sdaprano/secrets/"""
return binascii.hexlify(os.urandom(bytes))
################################################################################
def _generate_csrf_token(self):
"""This function is used to generate a CSRF token for use in templates."""
if '_csrf_token' not in session:
session['_csrf_token'] = self.token()
return session['_csrf_token']
################################################################################
def _csrf_protect(self):
"""Performs the checking of CSRF tokens. This check is skipped for the
GET, HEAD, OPTIONS and TRACE methods within HTTP, and is also skipped
for any function that has been added to _exempt_views by use of the
disable_csrf_check decorator."""
## Throw away requests with methods we don't support
if request.method not in ('GET', 'HEAD', 'POST'):
abort(405)
# For methods that require CSRF checking
if request.method == 'POST':
view = self.view_functions.get(request.endpoint)
# Make sure we actually found a view function
if view is not None:
view_location = view.__module__ + '.' + view.__name__
# If the view is not exempt
if not view_location in self._exempt_views:
token = session.get('_csrf_token')
if not token or token != request.form.get('_csrf_token'):
if 'username' in session:
self.logger.warning('CSRF protection alert: %s failed to present a valid POST token', session['username'])
else:
self.logger.warning('CSRF protection alert: a non-logged in user failed to present a valid POST token')
# The user should not have accidentally triggered this so just throw a 400
abort(400)
else:
self.logger.debug('View ' + view_location + ' is exempt from CSRF checks')
################################################################################
def disable_csrf_check(self, view):
"""A decorator that can be used to exclude a view from CSRF validation.
Example usage of disable_csrf_check might look something like this:
@app.disable_csrf_check
@app.route('/some_view')
def some_view():
return render_template('some_view.html')
:param view: The view to be wrapped by the decorator.
"""
view_location = view.__module__ + '.' + view.__name__
self._exempt_views.add(view_location)
self.logger.debug('Added CSRF check exemption for ' + view_location)
return view
################################################################################
def strtime(self,when=None):
if when is None:
dt = datetime.datetime.now()
else:
dt = datetime.datetime.fromtimestamp(int(when))
return dt.strftime("%Y-%m-%d %H:%M:%S")
| gpl-3.0 | -4,498,089,863,996,088,300 | 31.6 | 113 | 0.622007 | false |
sniemi/SamPy | sandbox/src1/examples/interactive2.py | 1 | 10393 | #!/usr/bin/env python
# GTK Interactive Console
# (C) 2003, Jon Anderson
# See www.python.org/2.2/license.html for
# license details.
#
import gtk
import gtk.gdk
import code
import os, sys
import pango
import __builtin__
import __main__
banner = """GTK Interactive Python Console
Thanks to Jon Anderson
%s
""" % sys.version
banner += """
Welcome to matplotlib.
help(matplotlib) -- shows a list of all matlab(TM) compatible commands provided
help(plotting) -- shows a list of plot specific commands
"""
class Completer:
"""
Taken from rlcompleter, with readline references stripped, and a local dictionary to use.
"""
def __init__(self, locals):
self.locals = locals
def complete(self, text, state):
"""Return the next possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
"""
if state == 0:
if "." in text:
self.matches = self.attr_matches(text)
else:
self.matches = self.global_matches(text)
try:
return self.matches[state]
except IndexError:
return None
def global_matches(self, text):
"""Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names
currently defines in __main__ that match.
"""
import keyword
matches = []
n = len(text)
for list in [keyword.kwlist,__builtin__.__dict__.keys(),__main__.__dict__.keys(), self.locals.keys()]:
for word in list:
if word[:n] == text and word != "__builtins__":
matches.append(word)
return matches
def attr_matches(self, text):
"""Compute matches when text contains a dot.
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluatable in the globals of __main__, it will be evaluated
and its attributes (as revealed by dir()) are used as possible
completions. (For class instances, class members are are also
considered.)
WARNING: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.
"""
import re
m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text)
if not m:
return
expr, attr = m.group(1, 3)
object = eval(expr, __main__.__dict__, self.locals)
words = dir(object)
if hasattr(object,'__class__'):
words.append('__class__')
words = words + get_class_members(object.__class__)
matches = []
n = len(attr)
for word in words:
if word[:n] == attr and word != "__builtins__":
matches.append("%s.%s" % (expr, word))
return matches
def get_class_members(klass):
ret = dir(klass)
if hasattr(klass,'__bases__'):
for base in klass.__bases__:
ret = ret + get_class_members(base)
return ret
class OutputStream:
"""
A Multiplexing output stream.
It can replace another stream, and tee output to the original stream and too
a GTK textview.
"""
def __init__(self,view,old_out,style):
self.view = view
self.buffer = view.get_buffer()
self.mark = self.buffer.create_mark("End",self.buffer.get_end_iter(), False )
self.out = old_out
self.style = style
self.tee = 1
def write(self,text):
if self.tee:
self.out.write(text)
end = self.buffer.get_end_iter()
if not self.view == None:
self.view.scroll_to_mark(self.mark, 0, True, 1, 1)
self.buffer.insert_with_tags(end,text,self.style)
class GTKInterpreterConsole(gtk.ScrolledWindow):
"""
An InteractiveConsole for GTK. It's an actual widget,
so it can be dropped in just about anywhere.
"""
def __init__(self):
gtk.ScrolledWindow.__init__(self)
self.set_policy (gtk.POLICY_AUTOMATIC,gtk.POLICY_AUTOMATIC)
self.text = gtk.TextView()
self.text.set_wrap_mode(True)
self.interpreter = code.InteractiveInterpreter()
self.completer = Completer(self.interpreter.locals)
self.buffer = []
self.history = []
self.banner = banner
self.ps1 = ">>> "
self.ps2 = "... "
self.text.add_events( gtk.gdk.KEY_PRESS_MASK )
self.text.connect( "key_press_event", self.key_pressed )
self.current_history = -1
self.mark = self.text.get_buffer().create_mark("End",self.text.get_buffer().get_end_iter(), False )
#setup colors
self.style_banner = gtk.TextTag("banner")
self.style_banner.set_property( "foreground", "saddle brown" )
self.style_ps1 = gtk.TextTag("ps1")
self.style_ps1.set_property( "foreground", "DarkOrchid4" )
self.style_ps1.set_property( "editable", False )
self.style_ps1.set_property("font", "courier" )
self.style_ps2 = gtk.TextTag("ps2")
self.style_ps2.set_property( "foreground", "DarkOliveGreen" )
self.style_ps2.set_property( "editable", False )
self.style_ps2.set_property("font", "courier" )
self.style_out = gtk.TextTag("stdout")
self.style_out.set_property( "foreground", "midnight blue" )
self.style_err = gtk.TextTag("stderr")
self.style_err.set_property( "style", pango.STYLE_ITALIC )
self.style_err.set_property( "foreground", "red" )
self.text.get_buffer().get_tag_table().add(self.style_banner)
self.text.get_buffer().get_tag_table().add(self.style_ps1)
self.text.get_buffer().get_tag_table().add(self.style_ps2)
self.text.get_buffer().get_tag_table().add(self.style_out)
self.text.get_buffer().get_tag_table().add(self.style_err)
self.stdout = OutputStream(self.text,sys.stdout,self.style_out)
self.stderr = OutputStream(self.text,sys.stderr,self.style_err)
sys.stderr = self.stderr
sys.stdout = self.stdout
self.current_prompt = None
self.write_line(self.banner, self.style_banner)
self.prompt_ps1()
self.add(self.text)
self.text.show()
def reset_history(self):
self.history = []
def reset_buffer(self):
self.buffer = []
def prompt_ps1(self):
self.current_prompt = self.prompt_ps1
self.write_line(self.ps1,self.style_ps1)
def prompt_ps2(self):
self.current_prompt = self.prompt_ps2
self.write_line(self.ps2,self.style_ps2)
def write_line(self,text,style=None):
start,end = self.text.get_buffer().get_bounds()
if style==None:
self.text.get_buffer().insert(end,text)
else:
self.text.get_buffer().insert_with_tags(end,text,style)
self.text.scroll_to_mark(self.mark, 0, True, 1, 1)
def push(self, line):
self.buffer.append(line)
if len(line) > 0:
self.history.append(line)
source = "\n".join(self.buffer)
more = self.interpreter.runsource(source, "<<console>>")
if not more:
self.reset_buffer()
return more
def key_pressed(self,widget,event):
if event.keyval == gtk.gdk.keyval_from_name('Return'):
return self.execute_line()
if event.keyval == gtk.gdk.keyval_from_name('Up'):
self.current_history = self.current_history - 1
if self.current_history < - len(self.history):
self.current_history = - len(self.history)
return self.show_history()
elif event.keyval == gtk.gdk.keyval_from_name('Down'):
self.current_history = self.current_history + 1
if self.current_history > 0:
self.current_history = 0
return self.show_history()
elif event.keyval == gtk.gdk.keyval_from_name( 'Home'):
l = self.text.get_buffer().get_line_count() - 1
start = self.text.get_buffer().get_iter_at_line_offset(l,4)
self.text.get_buffer().place_cursor(start)
return True
elif event.keyval == gtk.gdk.keyval_from_name( 'space') and event.state & gtk.gdk.CONTROL_MASK:
return self.complete_line()
return False
def show_history(self):
if self.current_history == 0:
return True
else:
self.replace_line( self.history[self.current_history] )
return True
def current_line(self):
start,end = self.current_line_bounds()
return self.text.get_buffer().get_text(start,end, True)
def current_line_bounds(self):
txt_buffer = self.text.get_buffer()
l = txt_buffer.get_line_count() - 1
start = txt_buffer.get_iter_at_line(l)
if start.get_chars_in_line() >= 4:
start.forward_chars(4)
end = txt_buffer.get_end_iter()
return start,end
def replace_line(self,txt):
start,end = self.current_line_bounds()
self.text.get_buffer().delete(start,end)
self.write_line(txt)
def execute_line(self, line=None):
if line is None:
line = self.current_line()
self.write_line("\n")
else:
self.write_line(line + "\n")
more = self.push(line)
self.text.get_buffer().place_cursor(self.text.get_buffer().get_end_iter())
if more:
self.prompt_ps2()
else:
self.prompt_ps1()
self.current_history = 0
self.window.raise_()
return True
def complete_line(self):
line = self.current_line()
tokens = line.split()
token = tokens[-1]
completions = []
p = self.completer.complete(token,len(completions))
while p != None:
completions.append(p)
p = self.completer.complete(token, len(completions))
if len(completions) != 1:
self.write_line("\n")
self.write_line("\n".join(completions), self.style_ps1)
self.write_line("\n")
self.current_prompt()
self.write_line(line)
else:
i = line.rfind(token)
line = line[0:i] + completions[0]
self.replace_line(line)
return True
def main():
w = gtk.Window()
console = GTKInterpreterConsole()
console.set_size_request(640,480)
w.add(console)
def destroy(arg=None):
gtk.main_quit()
def key_event(widget,event):
if gtk.gdk.keyval_name( event.keyval) == 'd' and \
event.state & gtk.gdk.CONTROL_MASK:
destroy()
return False
w.connect("destroy", destroy)
w.add_events( gtk.gdk.KEY_PRESS_MASK )
w.connect( 'key_press_event', key_event)
w.show_all()
console.execute_line('import matplotlib')
console.execute_line("matplotlib.use('GTKAgg')")
console.execute_line('matplotlib.interactive(1)')
console.execute_line('from pylab import *')
if len(sys.argv)>1:
fname = sys.argv[1]
if not os.path.exists(fname):
print >> sys.stderr, '%s does not exist' % fname
for line in file(fname):
line = line.strip()
console.execute_line(line)
gtk.main()
if __name__ == '__main__':
main()
| bsd-2-clause | -2,978,916,656,849,842,700 | 26.494709 | 106 | 0.638507 | false |
levi-rs/gifted | gifted/cli.py | 1 | 1486 | import os
import argparse
from gifted.gifted import load_images, write_gif
# Strings
PNG = 'PNG'
png = 'png'
JPG = 'JPG'
jpg = 'jpg'
GIF = 'GIF'
gif = 'gif'
OUTPUT_FILE = "output.gif"
DEFAULT_DURATION = 0.2
def get_args():
"""
Parses command line arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'-d', '--directory',
type=str, required=True,
help="Folder to load images from"
)
parser.add_argument(
'-e', '--extension',
type=str, default=PNG, choices=[PNG, png, JPG, jpg, GIF, gif],
help="Image extension type"
)
parser.add_argument(
'-o', '--output-file',
type=str, default=OUTPUT_FILE,
help='The name of the output file. Defaults to {0}'.format(OUTPUT_FILE)
)
parser.add_argument(
'--duration',
type=float, default=DEFAULT_DURATION,
help="Duration between frames. Defaults to {0}".format(DEFAULT_DURATION)
)
parser.add_argument(
'--dither',
type=bool, default=False, choices=[True, False],
help="Use dither when creating GIF"
)
return parser.parse_args()
def main():
args = get_args()
if not os.path.isdir(args.directory):
raise ValueError("Cannot find directory {0}".format(args.directory))
imgs = load_images(args.directory, args.extension)
write_gif(args.output_file, imgs, args.duration, args.dither)
if __name__ == "__main__":
main()
| bsd-3-clause | 3,649,467,000,269,699,000 | 21.179104 | 80 | 0.603634 | false |
edwardpopoola/pypaystack | tests/test_02_customer.py | 1 | 1994 | from . import test_auth_key, uuid4, Customer, TestCase
class TestCustomer(TestCase):
def setUp(self):
super(TestCustomer, self).setUp()
self.assertNotEqual(test_auth_key, None)
self.customer = Customer(authorization_key=test_auth_key)
def test_customer_setup_and_update(self):
"""
Integration test for creating customer and updating created customer details
"""
# using random generator for email id to ensure email is unique, thus ensuring success on retests
user_email = f"{uuid4()}@mail.com"
user_details = {"email": user_email,
"first_name": "Test",
"last_name": "Customer",
"phone": "08012345678"}
updated_user_details = {
"email": user_email,
"first_name": "Updated",
"last_name": "Customer",
"phone": "080987654321"}
def create_customer():
(status_code, status, response_msg,
created_customer_data) = self.customer.create(**user_details)
self.assertEqual(status_code, 200)
self.assertEqual(status, True)
self.assertEqual(response_msg, 'Customer created')
# assert if subset
self.assertLessEqual(
user_details.items(), created_customer_data.items())
return created_customer_data
def update_customer():
(status_code, status, response_msg, updated_customer_data) = self.customer.update(
user_id=created_customer_data['id'], **updated_user_details)
self.assertEqual(status_code, 200)
self.assertEqual(status, True)
self.assertEqual(response_msg, 'Customer updated')
# assert if subset
self.assertLessEqual(
updated_user_details.items(), updated_customer_data.items())
created_customer_data = create_customer()
update_customer()
| mit | -9,124,287,658,959,089,000 | 40.541667 | 105 | 0.585757 | false |
tswast/google-cloud-python | billingbudgets/google/cloud/billing_budgets_v1beta1/types.py | 1 | 1563 | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import sys
from google.api_core.protobuf_helpers import get_messages
from google.cloud.billing_budgets_v1beta1.proto import budget_model_pb2
from google.cloud.billing_budgets_v1beta1.proto import budget_service_pb2
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
from google.type import money_pb2
_shared_modules = [empty_pb2, field_mask_pb2, money_pb2]
_local_modules = [budget_model_pb2, budget_service_pb2]
names = []
for module in _shared_modules: # pragma: NO COVER
for name, message in get_messages(module).items():
setattr(sys.modules[__name__], name, message)
names.append(name)
for module in _local_modules:
for name, message in get_messages(module).items():
message.__module__ = "google.cloud.billing_budgets_v1beta1.types"
setattr(sys.modules[__name__], name, message)
names.append(name)
__all__ = tuple(sorted(names))
| apache-2.0 | 6,767,624,625,924,204,000 | 32.255319 | 74 | 0.731926 | false |
terasaur/seedbank | src/seedbank/messaging/server_control_handler.py | 1 | 3657 | #
# Copyright 2012 ibiblio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# TODO: rename file to control_message_handler.py
from terasaur.messaging.rabbitmq_message_handler import ControlMessageHandler
from seedbank.server.libtorrent_session import LibtorrentSessionQueueItem
from seedbank.messaging.server_control_message import ServerPingResponseMessage
import seedbank.server.shared as seedbank_shared
class SeedbankControlMessageHandler(ControlMessageHandler):
def _handle_action(self, action, data):
if action == 'publish_stats':
self._handle_publish_stats(bool(data['enable']))
elif action == 'upload':
self._handle_upload(data)
elif action == 'ping_request':
self._handle_ping(data)
else:
self._log.warning('Control message received without valid action (%s)' % action)
def _handle_publish_stats(self, enable):
if enable:
config = self._server._get_config()
item = LibtorrentSessionQueueItem('publish_stats', {'enable': True, 'config': config})
else:
item = LibtorrentSessionQueueItem('publish_stats', {'enable': False})
self._log.info('Received publish_stats control message (%s)' % enable)
seedbank_shared.session_manager.send('server', item)
def _handle_upload(self, data):
if not data.has_key('upload_action'):
self._log.error('Missing upload_action param in upload control message')
return
if not data.has_key('info_hash'):
self._log.error('Missing info_hash param in upload control message')
return
upload_action = data['upload_action']
upload_manager = seedbank_shared.upload_manager
if upload_action == 'start':
if data.has_key('torrent_file'):
torrent_file = data['torrent_file']
else:
torrent_file = None
upload_manager.start(info_hash=data['info_hash'], torrent_file=torrent_file)
elif upload_action == 'stop':
upload_manager.stop(info_hash=data['info_hash'])
elif upload_action == 'cancel':
upload_manager.cancel(info_hash=data['info_hash'])
elif upload_action == 'delete':
upload_manager.delete(info_hash=data['info_hash'])
elif upload_action == 'status':
upload_manager.status(info_hash=data['info_hash'])
else:
self._log.error('Invalid upload_action param in upload control message')
def _handle_torrent(self):
pass
def _handle_ping(self, data):
if self._verbose:
self._log.info('Sending ping reply (reply_to: %s, correlation_id: %s)' % (data['reply_to'], data['correlation_id']))
message = ServerPingResponseMessage(correlation_id=data['correlation_id'])
seedbank_shared.mq_out.publish(str(message), routing_key=data['reply_to'])
return
count = 0
while count < 100:
print 'sending ping reply (%i)' % count
seedbank_shared.mq_out.publish(str(message), routing_key=data['reply_to'])
count += 1
| apache-2.0 | 5,971,937,317,664,378,000 | 42.023529 | 128 | 0.648346 | false |
sigmunau/nav | tests/unittests/arnold/arnold_test.py | 1 | 1070 | """Basic tests for nav.arnold"""
import unittest
from nav.arnold import find_input_type
class TestArnold(unittest.TestCase):
"""Tests for nav.arnold"""
def test_find_input_type(self):
"""Test find_input_type"""
ip_address = '158.38.129.113'
mac = '5c:f9:dd:78:72:8a'
self.assertEqual(find_input_type(ip_address), 'IP')
self.assertEqual(find_input_type(mac), 'MAC')
self.assertEqual(find_input_type(123), 'SWPORTID')
def test_typo_not_accepted(self):
"""Tests for weakness in IPy library"""
ip_address = '158.38.129'
self.assertEqual(find_input_type(ip_address), 'UNKNOWN')
def test_end_on_zero(self):
"""Tests that IP-addresses that ends on zero are accepted"""
ip_address = '158.38.129.0'
self.assertEqual(find_input_type(ip_address), 'IP')
def test_ipv6(self):
"""Tests that a simple ipv6 address is recognized"""
ip_address = 'FE80:0000:0000:0000:0202:B3FF:FE1E:8329'
self.assertEqual(find_input_type(ip_address), 'IP')
| gpl-2.0 | -2,306,070,185,254,072,300 | 33.516129 | 68 | 0.630841 | false |
sherpya/archiver | utils.py | 1 | 3927 | #!/usr/bin/env python
# -*- Mode: Python; tab-width: 4 -*-
#
# Netfarm Mail Archiver - release 2
#
# Copyright (C) 2005-2007 Gianluigi Tiesi <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
# ======================================================================
## @file utils.py
## Common utils
import re
from mimify import mime_decode
from base64 import decodestring
from rfc822 import parseaddr
from md5 import new as MD5
mime_head = re.compile('=\\?(.*?)\\?(\w)\\?([^? \t\n]+)\\?=', re.IGNORECASE)
encodings = { 'q': mime_decode, 'b': decodestring }
CHECKHEADERS = [ 'from', 'subject', 'date', 'message-id', 'x-archiver-id' ]
HASHHEADERS = [ 'message-id', 'from', 'to', 'cc', 'subject' ]
def mime_decode_header(line):
"""workaround to python mime_decode_header
The original code doesn't support base64"""
## TODO: check combined charsets headers
newline = ''
charset = 'latin-1'
pos = 0
while 1:
res = mime_head.search(line, pos)
if res is None:
break
charset = res.group(1)
enctype = res.group(2).lower()
match = res.group(3)
if encodings.has_key(enctype):
match = ' '.join(match.split('_'))
newline = newline + line[pos:res.start(0)] + encodings[enctype](match)
else:
newline = newline + line[pos:res.start(0)] + match
pos = res.end(0)
decoded = newline + line[pos:]
return decoded.decode(charset, 'replace')
def unquote(text):
return ''.join(text.split('"'))
def split_hdr(header, value, dict):
""" Multiline headers splitting"""
hdr = '='.join([header, value]).replace('\r', '').replace('\n', '')
hdr_list = hdr.split(';')
for hdr in hdr_list:
hdr = hdr.strip()
if hdr.find('=') == -1: continue # invalid
key, value = hdr.split('=', 1)
if len(value) == 0: continue # empty
key = key.strip()
value = unquote(value).strip()
dict[key] = value
def parse_message(submsg):
"""Parse a sub message"""
found = None
if submsg.dict.has_key('content-type'):
ct = submsg.dict['content-type']
hd = {}
split_hdr('Content-Type', ct, hd)
if submsg.dict.has_key('content-disposition'):
cd = submsg.dict['content-disposition']
split_hdr('Content-Disposition', cd, hd)
### Hmm nice job clients, filename or name?
if not hd.has_key('name') and hd.has_key('filename'):
hd['name'] = hd['filename']
### Found an attachment
if hd.has_key('name'):
found = { 'name': hd['name'], 'content-type': hd['Content-Type'] }
return found
def dupe_check(headers):
"""Check for duplicate headers
Some headers should be unique"""
check = []
for hdr in headers:
hdr = hdr.strip()
if hdr.find(':') == -1: continue
key = hdr.split(':', 1)[0]
key = key.lower()
if key in check and key in CHECKHEADERS:
return key
check.append(key)
return None
def safe_parseaddr(address):
address = parseaddr(address)[1]
if address is None or (address.find('@') == -1):
return None
l, d = address.split('@', 1)
l = l.strip()
d = d.strip()
if (len(l) == 0) or (len(d) == 0):
return None
return address
def hash_headers(getter):
m = MD5()
for header in HASHHEADERS:
m.update(getter(header, ''))
return m.hexdigest()
| gpl-2.0 | -4,113,292,646,432,418,000 | 30.416 | 82 | 0.583142 | false |
trading-dev/trading-coin | qa/rpc-tests/multi_rpc.py | 1 | 4587 | #!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test mulitple rpc user config option rpcauth
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import base64
try:
import http.client as httplib
except ImportError:
import httplib
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
class HTTPBasicsTest (BitcoinTestFramework):
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir)
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain(self.options.tmpdir)
#Append rpcauth to TDC.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
with open(os.path.join(self.options.tmpdir+"/node0", "TDC.conf"), 'a') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
##################################################
url = urlparse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
#New authpair generated via share/rpcuser tool
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
#Second authpair with different username
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
authpairnew = "rt:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Use new authpair to confirm both work
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong login name with rt's password
authpairnew = "rtwrong:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Wrong password for rt
authpairnew = "rt:"+password+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Correct for rt2
authpairnew = "rt2:"+password2
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong password for rt2
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| mit | -5,702,095,949,871,202,000 | 36.909091 | 129 | 0.641596 | false |
111pontes/ydk-py | openconfig/ydk/models/openconfig/openconfig_policy_types.py | 1 | 7027 | """ openconfig_policy_types
This module contains general data definitions for use in routing
policy. It can be imported by modules that contain protocol\-
specific policy conditions and actions.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class MatchSetOptionsRestrictedTypeEnum(Enum):
"""
MatchSetOptionsRestrictedTypeEnum
Options that govern the behavior of a match statement. The
default behavior is ANY, i.e., the given value matches any
of the members of the defined set. Note this type is a
restricted version of the match\-set\-options\-type.
.. data:: ANY = 0
match is true if given value matches any member
of the defined set
.. data:: INVERT = 1
match is true if given value does not match any
member of the defined set
"""
ANY = 0
INVERT = 1
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_policy_types as meta
return meta._meta_table['MatchSetOptionsRestrictedTypeEnum']
class MatchSetOptionsTypeEnum(Enum):
"""
MatchSetOptionsTypeEnum
Options that govern the behavior of a match statement. The
default behavior is ANY, i.e., the given value matches any
of the members of the defined set
.. data:: ANY = 0
match is true if given value matches any member
of the defined set
.. data:: ALL = 1
match is true if given value matches all
members of the defined set
.. data:: INVERT = 2
match is true if given value does not match any
member of the defined set
"""
ANY = 0
ALL = 1
INVERT = 2
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_policy_types as meta
return meta._meta_table['MatchSetOptionsTypeEnum']
class AttributeComparisonIdentity(object):
"""
base type for supported comparison operators on route
attributes
"""
_prefix = 'ptypes'
_revision = '2015-10-09'
def __init__(self):
pass
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_policy_types as meta
return meta._meta_table['AttributeComparisonIdentity']['meta_info']
class InstallProtocolTypeIdentity(object):
"""
Base type for protocols which can install prefixes into the
RIB
"""
_prefix = 'ptypes'
_revision = '2015-10-09'
def __init__(self):
pass
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_policy_types as meta
return meta._meta_table['InstallProtocolTypeIdentity']['meta_info']
class AttributeLeIdentity(AttributeComparisonIdentity):
"""
<= comparison
"""
_prefix = 'ptypes'
_revision = '2015-10-09'
def __init__(self):
AttributeComparisonIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_policy_types as meta
return meta._meta_table['AttributeLeIdentity']['meta_info']
class DirectlyConnectedIdentity(InstallProtocolTypeIdentity):
"""
A directly connected route
"""
_prefix = 'ptypes'
_revision = '2015-10-09'
def __init__(self):
InstallProtocolTypeIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_policy_types as meta
return meta._meta_table['DirectlyConnectedIdentity']['meta_info']
class AttributeEqIdentity(AttributeComparisonIdentity):
"""
== comparison
"""
_prefix = 'ptypes'
_revision = '2015-10-09'
def __init__(self):
AttributeComparisonIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_policy_types as meta
return meta._meta_table['AttributeEqIdentity']['meta_info']
class BgpIdentity(InstallProtocolTypeIdentity):
"""
BGP
"""
_prefix = 'ptypes'
_revision = '2015-10-09'
def __init__(self):
InstallProtocolTypeIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_policy_types as meta
return meta._meta_table['BgpIdentity']['meta_info']
class AttributeGeIdentity(AttributeComparisonIdentity):
"""
>= comparison
"""
_prefix = 'ptypes'
_revision = '2015-10-09'
def __init__(self):
AttributeComparisonIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_policy_types as meta
return meta._meta_table['AttributeGeIdentity']['meta_info']
class OspfIdentity(InstallProtocolTypeIdentity):
"""
OSPFv2
"""
_prefix = 'ptypes'
_revision = '2015-10-09'
def __init__(self):
InstallProtocolTypeIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_policy_types as meta
return meta._meta_table['OspfIdentity']['meta_info']
class LocalAggregateIdentity(InstallProtocolTypeIdentity):
"""
Locally defined aggregate route
"""
_prefix = 'ptypes'
_revision = '2015-10-09'
def __init__(self):
InstallProtocolTypeIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_policy_types as meta
return meta._meta_table['LocalAggregateIdentity']['meta_info']
class IsisIdentity(InstallProtocolTypeIdentity):
"""
IS\-IS
"""
_prefix = 'ptypes'
_revision = '2015-10-09'
def __init__(self):
InstallProtocolTypeIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_policy_types as meta
return meta._meta_table['IsisIdentity']['meta_info']
class Ospf3Identity(InstallProtocolTypeIdentity):
"""
OSPFv3
"""
_prefix = 'ptypes'
_revision = '2015-10-09'
def __init__(self):
InstallProtocolTypeIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_policy_types as meta
return meta._meta_table['Ospf3Identity']['meta_info']
class StaticIdentity(InstallProtocolTypeIdentity):
"""
Locally\-installed static route
"""
_prefix = 'ptypes'
_revision = '2015-10-09'
def __init__(self):
InstallProtocolTypeIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_policy_types as meta
return meta._meta_table['StaticIdentity']['meta_info']
| apache-2.0 | 5,542,493,948,085,486,000 | 19.48688 | 80 | 0.64181 | false |
MechanisM/djangodash2011 | test_project/test_app/urls.py | 1 | 1313 | from django.conf.urls.defaults import *
from staste.charts.views import PieChart, TimeserieChart, LatestCountAndAverageChart
from staste.middleware import response_time_metrica
from .views import IndexView
from .metrics import gender_age_metrica
urlpatterns = patterns('',
url(r'^$', IndexView.as_view(), name="index"),
url(r'^pie/$',
PieChart.as_view(metrica=gender_age_metrica,
axis_keyword='gender'),
name='gender_pie'),
url(r'^timeline/$',
TimeserieChart.as_view(metrica=gender_age_metrica),
name='gender_timeline'),
url(r'^requests/pie/$',
PieChart.as_view(metrica=response_time_metrica,
axis_keyword='view'),
name='requests_pie'),
url(r'^requests/$',
LatestCountAndAverageChart.as_view(metrica=response_time_metrica,
title='Requests count and average response time'),
name='requests_timeserie')
)
| bsd-3-clause | 8,659,286,452,042,558,000 | 41.354839 | 112 | 0.471439 | false |
alexeyshulzhenko/OBDZ_Project | OnlineAgecy/views.py | 1 | 18388 | #!python
#OnlineAgesy/views.py
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from .forms import *
@login_required(login_url="login/")
def home(request):
return render(request, "static_pages/home.html")
#--------------------Clients Views------------------------------#
#################################################################
@login_required(login_url="login/")
def clients(request):
# get the blog clients that are published
clients = Client.objects.raw('SELECT * FROM OnlineAgecy_client')
# now return the rendered template
return render(request, "clients/clients_list.html", {'clients': clients})
@login_required(login_url="login/")
def client_new(request):
if request.method == "POST":
form = ClientForm(request.POST)
if form.is_valid():
client = form.save(commit=False)
client.save()
return redirect('clients')
else:
form = ClientForm()
return render(request, 'layouts/form.html', {'form': form})
@login_required(login_url="login/")
def client_detail(request, id):
client = get_object_or_404(Client, id=id)
return render(request, 'clients/client_detail.html', {'client': client})
@login_required(login_url="login/")
def client_edit(request, id):
client = get_object_or_404(Client, id=id)
if request.method == "POST":
form = ClientForm(request.POST, instance=client)
if form.is_valid():
client = form.save(commit=False)
client.save()
return redirect('client_detail', id=id)
else:
form = ClientForm(instance=client)
return render(request, 'layouts/form.html', {'form': form})
@login_required(login_url="login/")
def delete_client(request, id):
query = Client.objects.get(id=id)
query.delete()
return redirect('clients')
@login_required(login_url="login/")
def all_clients_contracts(request, id):
# get the blog clients that are published
items = Contract.objects.raw('SELECT * FROM OnlineAgecy_contract WHERE Client_id_id =' + id)
# now return the rendered template
return render(request, "clients/allUserContracts.html", {'items': items})
@login_required(login_url="login/")
def all_clients_briefs(request, id):
# get the blog clients that are published
items = Brief.objects.raw('SELECT * FROM OnlineAgecy_brief WHERE Client_id_id =' + id)
# now return the rendered template
return render(request, "clients/allUserBriefs.html", {'items': items})
@login_required(login_url="login/")
def clients_services_count(request):
items = Client.objects.raw('SELECT OnlineAgecy_client.Name, count(OnlineAgecy_contract_Services.id) AS num, OnlineAgecy_client.id, OnlineAgecy_contract.id '
'FROM (OnlineAgecy_client INNER JOIN OnlineAgecy_contract ON OnlineAgecy_client.id = OnlineAgecy_contract.Client_id_id)INNER JOIN OnlineAgecy_contract_Services ON OnlineAgecy_contract_Services.contract_id = OnlineAgecy_contract.id '
'GROUP BY OnlineAgecy_client.id '
'ORDER BY count(OnlineAgecy_contract_Services.id) DESC')
return render(request, 'clients/clients_services.html', {'items': items})
def all_clients_bills(request, id):
items = Client.objects.raw('SELECT OnlineAgecy_client.id, OnlineAgecy_client.Name AS Name, OnlineAgecy_contract.id, OnlineAgecy_act.id, OnlineAgecy_bill.Act_id_id AS Act_id '
'FROM ((OnlineAgecy_client INNER JOIN OnlineAgecy_contract ON OnlineAgecy_client.id = OnlineAgecy_contract.Client_id_id) INNER JOIN OnlineAgecy_act ON OnlineAgecy_contract.id = OnlineAgecy_act.Contract_id_id) INNER JOIN OnlineAgecy_bill ON OnlineAgecy_act.id=OnlineAgecy_bill.Act_id_id '
'WHERE OnlineAgecy_client.id =' + id)
return render(request, 'clients/allUserBiils.html', {'items': items})
def fresh_clients(request):
items = Client.objects.raw('SELECT id ,Name FROM OnlineAgecy_client WHERE id NOT IN (SELECT Client_id_id '
'FROM OnlineAgecy_contract WHERE NOT EXISTS (SELECT service_id '
'FROM OnlineAgecy_contract_Services WHERE service_id = OnlineAgecy_contract.id))')
return render(request, 'clients/blacklist.html', {'items': items})
#--------------------Contracts Views------------------------------#
###################################################################
@login_required(login_url="login/")
def contracts(request):
# get the blog clients that are published
contracts = Contract.objects.raw('SELECT * FROM OnlineAgecy_contract')
# now return the rendered template
return render(request, "contracts/contracts_list.html", {'contracts': contracts})
@login_required(login_url="login/")
def contract_new(request):
if request.method == "POST":
form = ContractForm(request.POST)
if form.is_valid():
contract = form.save(commit=False)
contract.save()
form.save_m2m()
return redirect('contracts')
else:
form = ContractForm()
return render(request, 'layouts/form.html', {'form': form})
@login_required(login_url="login/")
def contract_detail(request, id):
contract = get_object_or_404(Contract, id=id)
return render(request, 'contracts/contract_detail.html', {'contract': contract})
@login_required(login_url="login/")
def contract_edit(request, id):
contract = get_object_or_404(Contract, id=id)
if request.method == "POST":
form = ContractForm(request.POST, instance=contract)
if form.is_valid():
contract = form.save(commit=False)
contract.save()
return redirect('contract_detail', id=id)
else:
form = ContractForm(instance=contract)
return render(request, 'layouts/form.html', {'form': form})
@login_required(login_url="login/")
def contracts_by_date(request, Date):
contracts = Contract.objects.raw('SELECT * FROM OnlineAgecy_contract WHERE Date =' + Date)
return render(request, "contracts/contracts_list.html", {'contracts': contracts})
@login_required(login_url="login/")
def delete_contract(request, id):
query = Contract.objects.get(id=id)
query.delete()
return redirect('contracts')
@login_required(login_url="login/")
def contracts_services(request):
items = Contract.objects.raw('SELECT OnlineAgecy_client.id, OnlineAgecy_client.Name AS clientName , OnlineAgecy_contract.id, OnlineAgecy_contract.Client_id_id, OnlineAgecy_contract_Services.contract_id, OnlineAgecy_service.id, OnlineAgecy_service.Name AS service, OnlineAgecy_contract_Services.service_id AS id '
'FROM ((OnlineAgecy_service INNER JOIN OnlineAgecy_contract_Services ON OnlineAgecy_contract_Services.service_id = OnlineAgecy_service.id) INNER JOIN OnlineAgecy_contract ON OnlineAgecy_contract_Services.contract_id = OnlineAgecy_contract.id) INNER JOIN OnlineAgecy_client ON OnlineAgecy_client.id = OnlineAgecy_contract.Client_id_id')
return render(request, "contracts/contracts_services.html", {'items': items})
#--------------------Manager Views------------------------------#
#################################################################
@login_required(login_url="login/")
def managers(request):
# get the blog clients that are published
managers = Manager.objects.raw('SELECT * FROM OnlineAgecy_manager')
# now return the rendered template
return render(request, "manager/manager_list.html", {'managers': managers})
@login_required(login_url="login/")
def manager_new(request):
if request.method == "POST":
form = ManagerForm(request.POST)
if form.is_valid():
manager = form.save(commit=False)
manager.save()
return redirect('managers')
else:
form = ManagerForm()
return render(request, 'layouts/form.html', {'form': form})
@login_required(login_url="login/")
def manager_detail(request, id):
manager = get_object_or_404(Manager, id=id)
return render(request, 'manager/manager_detail.html', {'manager': manager})
@login_required(login_url="login/")
def manager_edit(request, id):
manager = get_object_or_404(Manager, id=id)
if request.method == "POST":
form = ManagerForm(request.POST, instance=manager)
if form.is_valid():
manager = form.save(commit=False)
manager.save()
return redirect('manager_detail', id=id)
else:
form = ManagerForm(instance=manager)
return render(request, 'layouts/form.html', {'form': form})
@login_required(login_url="login/")
def managers_clients_count(request):
items = Manager.objects.raw('SELECT OnlineAgecy_manager.Name, count(OnlineAgecy_client.id) AS num, OnlineAgecy_contract.id FROM (OnlineAgecy_manager INNER JOIN OnlineAgecy_contract ON OnlineAgecy_manager.id = OnlineAgecy_contract.Manager_id_id)INNER JOIN OnlineAgecy_client ON OnlineAgecy_contract.Manager_id_id = OnlineAgecy_client.id GROUP BY OnlineAgecy_client.id ORDER BY count(OnlineAgecy_client.id) DESC')
return render(request, 'manager/manager_clients.html', {'items': items})
@login_required(login_url="login/")
def delete_manager(request, id):
query = Manager.objects.get(id=id)
query.delete()
return redirect('managers')
#--------------------Brief Views------------------------------#
###############################################################
@login_required(login_url="login/")
def brief(request):
# get the blog clients that are published
briefs = Brief.objects.raw('SELECT * FROM OnlineAgecy_brief')
# now return the rendered template
return render(request, "briefs/briefs_list.html", {'briefs': briefs})
@login_required(login_url="login/")
def brief_new(request):
if request.method == "POST":
form = BriefForm(request.POST)
if form.is_valid():
brief = form.save(commit=False)
brief.save()
return redirect('briefs')
else:
form = BriefForm()
return render(request, 'layouts/form.html', {'form': form})
@login_required(login_url="login/")
def brief_detail(request, id):
brief = get_object_or_404(Brief, id=id)
return render(request, 'briefs/brief_detail.html', {'brief': brief})
@login_required(login_url="login/")
def brief_edit(request, id):
brief = get_object_or_404(Brief, id=id)
if request.method == "POST":
form = BriefForm(request.POST, instance=brief)
if form.is_valid():
brief = form.save(commit=False)
brief.save()
return redirect('brief_detail', id=id)
else:
form = BriefForm(instance=brief)
return render(request, 'layouts/form.html', {'form': form})
@login_required(login_url="login/")
def delete_brief(request, id):
query = Brief.objects.get(id=id)
query.delete()
return redirect('briefs')
#--------------------Services Views------------------------------#
##################################################################
@login_required(login_url="login/")
def services(request):
services = Service.objects.raw('SELECT * FROM OnlineAgecy_service')
return render(request, "services/services_list.html", {'services': services})
@login_required(login_url="login/")
def services_new(request):
if request.method == "POST":
form = ServiceForm(request.POST)
if form.is_valid():
services = form.save(commit=False)
services.save()
return redirect('services')
else:
form = ServiceForm()
return render(request, 'layouts/form.html', {'form': form})
@login_required(login_url="login/")
def service_detail(request, id):
service = get_object_or_404(Service, id=id)
return render(request, 'services/service_detail.html', {'service': service})
@login_required(login_url="login/")
def service_edit(request, id):
service = get_object_or_404(Service, id=id)
if request.method == "POST":
form = ServiceForm(request.POST, instance=service)
if form.is_valid():
service = form.save(commit=False)
service.save()
return redirect('service_detail', id=id)
else:
form = ServiceForm(instance=service)
return render(request, 'layouts/form.html', {'form': form})
@login_required(login_url="login/")
def service_all_clients(request, id):
# get the blog clients that are published
services = Contract.objects.raw('SELECT OnlineAgecy_contract.id, OnlineAgecy_contract_Services.contract_id AS id, Online_Agecy_service.id, Online_Agecy_service.Name, OnlineAgecy_contract_Services.service_id AS id'
'from (OnlineAgecy_contract join OnlineAgecy_contract_Services using (id)) join OnlineAgecy_contract_Services using (id)')
# 'SELECT OnlineAgecy_client.Name OnlineAgecy_service.Name AS ServiceName FROM OnlineAgecy_contract JOIN OnlineAgecy_client USING (id)')
#SELECT table.id, other_table.name AS name from table join other_table using (id)
return render(request, "services/allClientServices.html", {'services': services})
@login_required(login_url="login/")
def delete_service(request, id):
query = Service.objects.get(id=id)
query.delete()
return redirect('services')
#--------------------contractors Views------------------------------#
#####################################################################
@login_required(login_url="login/")
def contractors(request):
contractors = Contractor.objects.raw('SELECT * FROM OnlineAgecy_contractor')
return render(request, "contractors/contractors_list.html", {'contractors': contractors})
@login_required(login_url="login/")
def contractors_new(request):
if request.method == "POST":
form = ContractorForm(request.POST)
if form.is_valid():
contractors = form.save(commit=False)
contractors.save()
return redirect('contractors')
else:
form = ContractorForm()
return render(request, 'layouts/form.html', {'form': form})
@login_required(login_url="login/")
def contractor_detail(request, id):
contractor = get_object_or_404(Contractor, id=id)
return render(request, 'contractors/contractor_detail.html', {'contractor': contractor})
@login_required(login_url="login/")
def contractor_edit(request, id):
contractor = get_object_or_404(Contractor, id=id)
if request.method == "POST":
form = ContractorForm(request.POST, instance=contractor)
if form.is_valid():
contractor = form.save(commit=False)
contractor.save()
return redirect('contractor_detail', id=id)
else:
form = ContractorForm(instance=contractor)
return render(request, 'layouts/form.html', {'form': form})
def newest_contractors(request):
items = Contractor.objects.raw('SELECT id ,Name FROM OnlineAgecy_contractor WHERE id NOT in(SELECT id AS Serv_id FROM OnlineAgecy_service '
'WHERE NOT EXISTS (SELECT contract_id, service_id FROM OnlineAgecy_contract_Services '
'WHERE OnlineAgecy_contract_Services.service_id = OnlineAgecy_service.id))')
return render(request, 'contractors/new_contractors.html', {'items': items})
@login_required(login_url="login/")
def delete_contractor(request, id):
query = Contractor.objects.get(id=id)
query.delete()
return redirect('contractors')
#--------------------Act Views------------------------------#
#####################################################################
@login_required(login_url="login/")
def acts(request):
acts = Act.objects.raw('SELECT * FROM OnlineAgecy_act')
return render(request, "acts/act_list.html", {'acts': acts})
@login_required(login_url="login/")
def act_new(request):
if request.method == "POST":
form = ActForm(request.POST)
if form.is_valid():
acts = form.save(commit=False)
acts.save()
return redirect('acts')
else:
form = ActForm()
return render(request, 'layouts/form.html', {'form': form})
@login_required(login_url="login/")
def act_detail(request, id):
act = get_object_or_404(Act, id=id)
return render(request, 'acts/act_detail.html', {'act': act})
@login_required(login_url="login/")
def act_edit(request, id):
act = get_object_or_404(Act, id=id)
if request.method == "POST":
form = ActForm(request.POST, instance=act)
if form.is_valid():
act = form.save(commit=False)
act.save()
return redirect('act_detail', id=id)
else:
form = ActForm(instance=act)
return render(request, 'layouts/form.html', {'form': form})
@login_required(login_url="login/")
def delete_act(request, id):
query = Act.objects.get(id=id)
query.delete()
return redirect('acts')
#--------------------Bill Views------------------------------#
#####################################################################
@login_required(login_url="login/")
def bills(request):
bills = Bill.objects.raw('SELECT * FROM OnlineAgecy_bill')
return render(request, "bills/bills_list.html", {'bills': bills})
def bills_new(request):
if request.method == "POST":
form = BillForm(request.POST)
if form.is_valid():
bills = form.save(commit=False)
bills.save()
return redirect('bills')
else:
form = BillForm()
return render(request, 'layouts/form.html', {'form': form})
@login_required(login_url="login/")
def bills_detail(request, id):
bill = get_object_or_404(Bill, id=id)
return render(request, 'bills/bill_detail.html', {'bill': bill})
@login_required(login_url="login/")
def bills_edit(request, id):
bill = get_object_or_404(Bill, id=id)
if request.method == "POST":
form = BillForm(request.POST, instance=bill)
if form.is_valid():
bill = form.save(commit=False)
bill.save()
return redirect('bills_detail', id=id)
else:
form = BillForm(instance=bill)
return render(request, 'layouts/form.html', {'form': form})
@login_required(login_url="login/")
def delete_bill(request, id):
query = Bill.objects.get(id=id)
query.delete()
return redirect('bills') | gpl-3.0 | -6,328,619,585,197,924,000 | 38.461373 | 416 | 0.637753 | false |
dbmi-pitt/DIKB-Micropublication | scripts/mp-scripts/Bio/Geo/__init__.py | 1 | 2770 | # Copyright 2001 by Katharine Lindner. All rights reserved.
# Copyright 2006 by PeterC. All rights reserved.
# Copyright 2007 by Michiel de Hoon. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Parser for files from NCBI's Gene Expression Omnibus (GEO).
http://www.ncbi.nlm.nih.gov/geo/
"""
import Record
def _read_key_value(line):
words = line[1:].split("=", 1)
try:
key, value = words
value = value.strip()
except ValueError:
key = words[0]
value = ""
key = key.strip()
return key, value
def parse(handle):
record = None
for line in handle:
line = line.strip('\n').strip('\r')
if not line: continue # Ignore empty lines
c = line[0]
if c=='^':
if record: yield record
record = Record.Record()
record.entity_type, record.entity_id = _read_key_value(line)
elif c=='!':
if line in ('!Sample_table_begin',
'!Sample_table_end',
'!Platform_table_begin',
'!Platform_table_end'):
continue
key, value = _read_key_value(line)
if key in record.entity_attributes:
if type(record.entity_attributes[key])==list:
record.entity_attributes[key].append(value)
else:
existing = record.entity_attributes[key]
record.entity_attributes[key] = [existing, value]
else:
record.entity_attributes[key] = value
elif c=='#':
key, value = _read_key_value(line)
assert key not in record.col_defs
record.col_defs[key] = value
else:
row = line.split("\t")
record.table_rows.append(row)
yield record
class Iterator:
"""Iterator interface to move over a file of Geo entries one at a time.
Uses the fact that each GEO record begins with a line starting ^ (caret).
"""
def __init__(self, handle, parser = None):
"""Initialize the iterator.
Arguments:
o handle - A handle with GEO entries to iterate through.
returning them. If None, then the raw entry will be returned.
"""
import warnings
warnings.warn("Bio.Geo.Iterator(handle, parser) is deprecated. Please use Bio.Geo.parse(handle) instead. It also returns an iterator.""",
DeprecationWarning)
self.records = parse(handle)
def next(self):
return self.records.next()
def __iter__(self):
return iter(self.next, None)
| apache-2.0 | -817,326,445,066,471,700 | 32.373494 | 145 | 0.57509 | false |
sdanzige/cmonkey-python | test/setenrichment_test.py | 1 | 1506 | """setenrichment_test.py - unit tests for set enrichment module
This file is part of cMonkey Python. Please see README and LICENSE for
more information and licensing details.
"""
import unittest
import cmonkey.set_enrichment as se
class DiscreteEnrichmentSetTest(unittest.TestCase): # pylint: disable-msg=R0904
"""Test class for DiscreteEnrichmentSet"""
def test_construct(self):
genes = {'gene1', 'gene2'}
aset = se.DiscreteEnrichmentSet(genes)
self.assertEquals(genes, aset.genes())
self.assertEquals(genes, aset.genes_above_cutoff())
class CutoffEnrichmentSetTest(unittest.TestCase): # pylint: disable-msg=R0904
"""Test class for CutoffEnrichmentSet"""
def test_construct(self):
genes = {('gene1', 0.1), ('gene2', 0.6)}
aset = se.CutoffEnrichmentSet(0.5, genes)
self.assertEquals({'gene1', 'gene2'}, aset.genes())
self.assertEquals({'gene2'}, aset.genes_above_cutoff())
class SetTypeTest(unittest.TestCase): # pylint: disable-msg=R0904
"""Test class for SetType"""
def test_construct(self):
genes1 = {'gene1', 'gene2'}
aset1 = se.DiscreteEnrichmentSet(genes1)
genes2 = {'gene3', 'gene4'}
aset2 = se.DiscreteEnrichmentSet(genes2)
set_type = se.SetType('mysettype', {'set1': aset1, 'set2': aset2}, 1.0)
self.assertEquals('mysettype', set_type.name)
self.assertEquals({'gene1', 'gene2', 'gene3', 'gene4'},
set_type.genes())
| lgpl-3.0 | 7,097,148,200,919,555,000 | 36.65 | 80 | 0.655378 | false |
svenstaro/uni-projekt | assembler/operations/pseudoOperation.py | 1 | 2713 | from ..operands import Zero, Register, AluOperand2, Opcodes, Const
from .operation import Operation
from .jumpOperation import JumpOperation
from .aluOperation import AluOperation, AluSOperation
import re
def PseudoOperations():
class PseudoOperation(Operation):
underlyingType = None
pseudo = ""
real = ""
@classmethod
def translate(cls, s, src, dest):
src = src.replace("$", "\\$")
srcPattern = "^" + re.sub(r'%(\d)', r'(?P<a\1>.*)', src) + "$"
destPattern = re.sub(r'%(\d)', r'\\g<a\1>', dest)
return re.sub(srcPattern, destPattern, s)
@classmethod
def fromReal(cls, arg):
try:
return cls.translate(arg, cls.pseudo, cls.real)
except:
return None
@classmethod
def isValidText(cls, arg):
realarg = cls.fromReal(arg)
if realarg:
return cls.underlyingType.isValidText(realarg)
return False
@classmethod
def fromText(cls, line, state):
realarg = cls.fromReal(line)
inner = cls.underlyingType.fromText(realarg, state)
return cls(line, inner.binary, inner)
@classmethod
def isValidBinary(cls, arg):
if not cls.underlyingType.isValidBinary(arg):
return False
inner = cls.underlyingType.fromBinary(arg, None)
text = cls.translate(inner.text, cls.real, cls.pseudo)
return inner.text != text
@classmethod
def fromBinary(cls, arg, state):
inner = cls.underlyingType.fromBinary(arg, state)
text = cls.translate(inner.text, cls.real, cls.pseudo)
return cls(text, inner.binary, inner)
ops = [("ret", "jmp $15", JumpOperation),
("halt", "jmp #-4", JumpOperation),
("nop", "add $0, $0, $0", AluOperation),
("cmp %0, %1", "subs $0, %0, %1", AluSOperation),
("tst %0, %1", "ands $0, %0, %1", AluSOperation),
("not %0, %1", "orn %0, $0, %1", AluOperation),
("nots %0, %1", "orns %0, $0, %1", AluSOperation),
("neg %0, %1", "sub %0, $0, %1", AluOperation),
("negs %0, %1", "subs %0, $0, %1", AluSOperation),
("mov %0, %1", "add %0, $0, %1", AluOperation),
("movs %0, %1", "adds %0, $0, %1", AluSOperation)]
result = []
for op in ops:
pseudo, real, underlyingType = op
newType = type("PseudoOperation", (PseudoOperation,),
dict(pseudo=pseudo, real=real, underlyingType=underlyingType))
result.append(newType)
return result
| gpl-3.0 | 3,973,601,288,656,858,600 | 31.686747 | 85 | 0.537044 | false |
beeftornado/sentry | tests/acceptance/test_performance_trends.py | 1 | 2855 | from __future__ import absolute_import
import pytz
from six.moves.urllib.parse import urlencode
from mock import patch
from django.db.models import F
from sentry.models import Project
from sentry.testutils import AcceptanceTestCase, SnubaTestCase
from sentry.testutils.helpers.datetime import before_now, iso_format
from sentry.utils.samples import load_data
from .page_objects.base import BasePage
class PerformanceTrendsTest(AcceptanceTestCase, SnubaTestCase):
def make_trend(
self, name, durations, period_mins=60,
):
for index, duration in enumerate(durations):
time_between = period_mins / len(durations)
# distirbute events over the period
minutes = period_mins - ((index + 1) * time_between) + (time_between / 2)
event = load_data("transaction")
event.update(
{
"transaction": name,
"event_id": "{:02x}".format(index).rjust(32, "0"),
"start_timestamp": iso_format(before_now(minutes=minutes, seconds=duration)),
"timestamp": iso_format(before_now(minutes=minutes)),
}
)
self.store_event(data=event, project_id=self.project.id)
def setUp(self):
super(PerformanceTrendsTest, self).setUp()
self.org = self.create_organization(owner=self.user, name="Rowdy Tiger")
self.team = self.create_team(
organization=self.org, name="Mariachi Band", members=[self.user]
)
self.project = self.create_project(organization=self.org, teams=[self.team], name="Bengal")
self.group = self.create_group(project=self.project)
self.login_as(self.user)
self.path = u"/organizations/{}/performance/?{}".format(
self.org.slug,
urlencode(
{
"view": "TRENDS",
"query": "transaction.duration:>0",
"statsPeriod": "1h",
"project": self.project.id,
}
),
)
self.page = BasePage(self.browser)
@patch("django.utils.timezone.now")
def test_with_data(self, mock_now):
mock_now.return_value = before_now().replace(tzinfo=pytz.utc)
values = range(1, 100, 5)
self.make_trend("improvement", [v for v in reversed(values)])
self.make_trend("regression", values)
self.project.update(flags=F("flags").bitor(Project.flags.has_transactions))
with self.feature("organizations:performance-view"):
self.browser.get(self.path)
self.page.wait_until_loaded()
trend_item = '[data-test-id="trends-list-item-regression"]'
self.browser.wait_until(trend_item)
self.browser.snapshot("performance trends - with data")
| bsd-3-clause | -5,551,760,831,722,706,000 | 37.581081 | 99 | 0.601051 | false |
dmrsouza/json2kml | json2kml.py | 1 | 1803 | #####################################################################################
# Python script to convert Google Maps JSON file to a KML file
# Copyright (C) 2017 Dimitri Souza
# https://github.com/dmrsouza/json2kml
#####################################################################################
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v3 as published by
# the Free Software Foundation.
#####################################################################################
import json
import simplekml
import sys
import codecs
inputFile = "Saved Places.json"
outputFile = "Saved Places.kml"
# JSON Encoding is UTF-8. Change stdout to UTF-8 to prevent encoding error
# when calling print titles inside the loop
sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())
print ('Opening file "'+inputFile+'"')
with open (inputFile) as jsonFile:
data = json.load (jsonFile)
kml = simplekml.Kml ()
kml.document.name = outputFile
count = 0
for place in data["features"]:
if place["type"] == "Feature":
title = place["properties"]["Title"]
print ('Parsing place "'+title+'"')
placeLocation = place["properties"]["Location"]
lon = place["geometry"]["coordinates"][0]
lat = place["geometry"]["coordinates"][1]
if "Address" in placeLocation:
address = placeLocation ["Address"]
else:
address = "N/A"
kml.newpoint (name=title, coords=[(lon,lat)], address=address)
count += 1
print ('Saving file "'+outputFile+'"')
kml.save (outputFile)
print ('Done! Total of '+str(count)+' places saved to the KML file.')
| gpl-3.0 | -2,344,173,037,282,680,000 | 32.673077 | 85 | 0.54132 | false |
mcgraw-bb25/site-quality-crawler | crawler/crawler.py | 1 | 4832 | import time
import argparse
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from crawler.page_request import PageRequest
from crawler.page_report import PageReport
from crawler.report_builder import ReportBuilder
class Crawler(object):
'''
The main crawler object that the user interacts with
'''
crawled_urls = []
page_reports = []
def __init__(self, root_url, start_url=None, crawl_limit=5):
self.root_url = root_url
if start_url:
self.start_url = start_url
else:
self.start_url = root_url
self.crawl_limit = crawl_limit
def start_crawl(self):
'''
Begins crawling the given site at the initialized start_url
'''
self.url_queue = [self.start_url]
while self.url_queue and len(self.crawled_urls) < self.crawl_limit:
current_url = self.url_queue.pop(0)
if current_url in self.crawled_urls:
continue
if self.is_outbound_url(current_url):
print("Skipping outbound url ", current_url)
continue
try:
response = self.get_response(current_url)
except:
# TODO: Put malformed urls in page report
print('Skipping malformed URL - ', current_url)
continue
page_report = PageReport(
url=current_url,
root_url=self.root_url,
status_code=response.status_code,
redirects=response.history,
page_links=self.get_absolute_page_links(current_url, response))
self.url_queue += page_report.page_links
self.page_reports.append(page_report)
print(page_report)
self.crawled_urls.append(current_url)
self.sleep()
self.build_report()
def get_response(self, current_url):
''' Hides PageRequest which allows for mocking '''
return PageRequest(current_url).make_request()
def get_absolute_page_links(self, current_url, response):
'''
Parses a page and returns all links on the page in absolute form
'''
page_soup = BeautifulSoup(response.content, 'html.parser')
links = []
for tag in page_soup.find_all('a'):
if tag.has_attr('href'):
url = self.get_absolute_url(current_url, tag.get('href'))
if not self.is_outbound_url(url):
links.append(url)
return links
def get_absolute_url(self, base_url, link_url):
'''
Given a root and a url returns the absolute url
'''
if link_url.startswith('http://') or link_url.startswith('https://'):
return link_url
return urljoin(base_url, link_url)
def is_outbound_url(self, url):
'''
Returns true when url is outside the domain of the root url
'''
return not url.startswith(self.root_url)
def sleep(self):
'''
Used to delay between requests while crawling
'''
time.sleep(2)
def build_report(self):
''' invokes ReportBuilder '''
ReportBuilder(self.page_reports).build_report()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--root", type=str,
help="The root url of pages to crawl")
parser.add_argument("--start", type=str,
help="The entry url for crawl.")
parser.add_argument("--limit", type=int,
help="Maximum number of pages to crawl")
args = parser.parse_args()
if args.root is None:
user_root = ""
else:
user_root = args.root
if args.start is None:
user_start = ""
else:
user_start = args.start
if args.limit is None:
user_limit = ""
else:
user_limit = args.limit
if not user_root.startswith('http'):
while not user_root.startswith('http'):
user_root = input("Please enter a valid root url:")
else:
user_root = args.root
if not user_start.startswith('http'):
while not user_start.startswith('http'):
user_start = input("Please enter a valid starting url:")
else:
user_start = args.start
if not isinstance(user_limit, int):
while not isinstance(user_limit, int):
user_limit = input("Please enter the maximum number of pages to crawl:")
try:
user_limit = int(user_limit)
except:
pass
else:
user_limit = args.limit
real_crawler = Crawler(
root_url=user_root,
start_url=user_start,
crawl_limit=user_limit)
real_crawler.start_crawl()
| bsd-2-clause | 24,238,746,664,563,644 | 29.974359 | 84 | 0.56726 | false |
qedsoftware/commcare-hq | corehq/apps/userreports/reports/view.py | 1 | 19536 | import json
import os
import tempfile
from StringIO import StringIO
from corehq.apps.domain.views import BaseDomainView
from corehq.apps.reports.util import \
DEFAULT_CSS_FORM_ACTIONS_CLASS_REPORT_FILTER
from corehq.apps.style.decorators import (
use_select2,
use_daterangepicker,
use_jquery_ui,
use_nvd3,
use_datatables,
)
from corehq.apps.userreports.const import REPORT_BUILDER_EVENTS_KEY, \
DATA_SOURCE_NOT_FOUND_ERROR_MESSAGE
from couchexport.shortcuts import export_response
from corehq.toggles import DISABLE_COLUMN_LIMIT_IN_UCR
from dimagi.utils.modules import to_function
from django.conf import settings
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponse, Http404, HttpResponseBadRequest
from django.utils.translation import ugettext as _, ugettext_noop
from braces.views import JSONResponseMixin
from corehq.apps.locations.permissions import conditionally_location_safe
from corehq.apps.reports.dispatcher import (
ReportDispatcher,
)
from corehq.apps.reports.models import ReportConfig
from corehq.apps.reports_core.exceptions import FilterException
from corehq.apps.userreports.exceptions import (
BadSpecError,
UserReportsError,
TableNotFoundWarning,
UserReportsFilterError,
DataSourceConfigurationNotFoundError)
from corehq.apps.userreports.models import (
CUSTOM_REPORT_PREFIX,
StaticReportConfiguration,
ReportConfiguration,
report_config_id_is_static,
)
from corehq.apps.userreports.reports.factory import ReportFactory
from corehq.apps.userreports.reports.util import (
get_expanded_columns,
has_location_filter,
)
from corehq.apps.userreports.util import (
default_language,
has_report_builder_trial,
can_edit_report,
)
from corehq.util.couch import get_document_or_404, get_document_or_not_found, \
DocumentNotFound
from couchexport.export import export_from_tables
from couchexport.models import Format
from dimagi.utils.couch.pagination import DatatablesParams
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.web import json_request
from no_exceptions.exceptions import Http403
from corehq.apps.reports.datatables import DataTablesHeader
UCR_EXPORT_TO_EXCEL_ROW_LIMIT = 1000
def get_filter_values(filters, request_dict, user=None):
"""
Return a dictionary mapping filter ids to specified values
:param filters: A list of corehq.apps.reports_core.filters.BaseFilter
objects (or subclasses)
:param request_dict: key word arguments from the request
:return:
"""
try:
return {
filter.css_id: filter.get_value(request_dict, user)
for filter in filters
}
except FilterException, e:
raise UserReportsFilterError(unicode(e))
def query_dict_to_dict(query_dict, domain):
"""
Transform the given QueryDict to a normal dict where each value has been
converted from a string to a dict (if the value is JSON).
Also add the domain to the dict.
:param query_dict: a QueryDict
:param domain:
:return: a dict
"""
request_dict = json_request(query_dict)
request_dict['domain'] = domain
return request_dict
class ConfigurableReport(JSONResponseMixin, BaseDomainView):
section_name = ugettext_noop("Reports")
template_name = 'userreports/configurable_report.html'
slug = "configurable"
prefix = slug
emailable = True
is_exportable = True
show_filters = True
_domain = None
@property
def domain(self):
if self._domain is not None:
return self._domain
return super(ConfigurableReport, self).domain
@use_select2
@use_daterangepicker
@use_jquery_ui
@use_datatables
@use_nvd3
@conditionally_location_safe(has_location_filter)
def dispatch(self, request, *args, **kwargs):
original = super(ConfigurableReport, self).dispatch(request, *args, **kwargs)
return original
@property
def section_url(self):
# todo what should the parent section url be?
return "#"
@property
def is_static(self):
return report_config_id_is_static(self.report_config_id)
@property
def is_custom_rendered(self):
return self.report_config_id.startswith(CUSTOM_REPORT_PREFIX)
@property
@memoized
def spec(self):
if self.is_static:
return StaticReportConfiguration.by_id(self.report_config_id)
else:
return get_document_or_not_found(ReportConfiguration, self.domain, self.report_config_id)
def get_spec_or_404(self):
try:
return self.spec
except DocumentNotFound:
raise Http404()
def has_viable_configuration(self):
try:
self.spec
except DocumentNotFound:
return False
else:
return True
@property
def title(self):
return self.spec.title
@property
def page_name(self):
return self.spec.title
@property
@memoized
def data_source(self):
report = ReportFactory.from_spec(self.spec, include_prefilters=True)
report.lang = self.lang
return report
@property
@memoized
def request_dict(self):
if self.request.method == 'GET':
return query_dict_to_dict(self.request.GET, self.domain)
elif self.request.method == 'POST':
return query_dict_to_dict(self.request.POST, self.domain)
@property
@memoized
def filter_values(self):
try:
user = self.request.couch_user
except AttributeError:
user = None
return get_filter_values(self.filters, self.request_dict, user=user)
@property
@memoized
def filter_context(self):
return {
filter.css_id: filter.context(self.filter_values[filter.css_id], self.lang)
for filter in self.filters
}
@property
@memoized
def filters(self):
return self.spec.ui_filters
_report_config_id = None
@property
def report_config_id(self):
if self._report_config_id is not None:
return self._report_config_id
return self.kwargs['subreport_slug']
_lang = None
@property
def lang(self):
if self._lang is not None:
return self._lang
return self.request.couch_user.language or default_language()
def get(self, request, *args, **kwargs):
if self.has_permissions(self.domain, request.couch_user):
self.get_spec_or_404()
if kwargs.get('render_as') == 'email':
return self.email_response
elif kwargs.get('render_as') == 'excel':
return self.excel_response
elif request.GET.get('format', None) == "export":
return self.export_response
elif request.GET.get('format', None) == 'export_size_check':
return self.export_size_check_response
elif request.is_ajax() or request.GET.get('format', None) == 'json':
return self.get_ajax(self.request.GET)
self.content_type = None
try:
self.add_warnings(self.request)
except UserReportsError as e:
details = ''
if isinstance(e, DataSourceConfigurationNotFoundError):
error_message = DATA_SOURCE_NOT_FOUND_ERROR_MESSAGE
else:
error_message = _(
'It looks like there is a problem with your report. '
'You may need to delete and recreate the report. '
'If you believe you are seeing this message in error, please report an issue.'
)
details = unicode(e)
self.template_name = 'userreports/report_error.html'
context = {
'report_id': self.report_config_id,
'is_static': self.is_static,
'error_message': error_message,
'details': details,
}
context.update(self.main_context)
return self.render_to_response(context)
return super(ConfigurableReport, self).get(request, *args, **kwargs)
else:
raise Http403()
def post(self, request, *args, **kwargs):
if self.has_permissions(self.domain, request.couch_user):
self.get_spec_or_404()
if request.is_ajax():
return self.get_ajax(self.request.POST)
else:
return HttpResponseBadRequest()
else:
raise Http403()
def has_permissions(self, domain, user):
return True
def add_warnings(self, request):
for warning in self.data_source.column_warnings:
messages.warning(request, warning)
@property
def page_context(self):
context = {
'report': self,
'report_table': {'default_rows': 25},
'filter_context': self.filter_context,
'url': self.url,
'method': 'POST',
'headers': self.headers,
'can_edit_report': can_edit_report(self.request, self),
'has_report_builder_trial': has_report_builder_trial(self.request),
'report_filter_form_action_css_class': DEFAULT_CSS_FORM_ACTIONS_CLASS_REPORT_FILTER,
}
context.update(self.saved_report_context_data)
context.update(self.pop_report_builder_context_data())
if isinstance(self.spec, ReportConfiguration) and self.spec.report_meta.builder_report_type == 'map':
context['report_table']['default_rows'] = 100
return context
def pop_report_builder_context_data(self):
"""
Pop any report builder data stored on the session and return a dict to
be included in the template context.
"""
return {
'report_builder_events': self.request.session.pop(REPORT_BUILDER_EVENTS_KEY, [])
}
@property
def saved_report_context_data(self):
def _get_context_for_saved_report(report_config):
if report_config:
report_config_data = report_config.to_json()
report_config_data['filters'].update(report_config.get_date_range())
return report_config_data
else:
return ReportConfig.default()
saved_report_config_id = self.request.GET.get('config_id')
saved_report_config = get_document_or_404(ReportConfig, self.domain, saved_report_config_id) \
if saved_report_config_id else None
return {
'report_configs': [
_get_context_for_saved_report(saved_report)
for saved_report in ReportConfig.by_domain_and_owner(
self.domain, self.request.couch_user._id, report_slug=self.slug
)
],
'default_config': _get_context_for_saved_report(saved_report_config),
'datespan_filters': ReportConfig.datespan_filter_choices(self.datespan_filters, self.lang),
}
@property
def has_datespan(self):
return bool(self.datespan_filters)
@property
def datespan_filters(self):
return [
f for f in self.spec.filters
if f['type'] == 'date'
]
@property
def headers(self):
return DataTablesHeader(*[col.data_tables_column for col in self.data_source.inner_columns])
def get_ajax(self, params):
try:
data_source = self.data_source
if len(data_source.inner_columns) > 50 and not DISABLE_COLUMN_LIMIT_IN_UCR.enabled(self.domain):
raise UserReportsError(_("This report has too many columns to be displayed"))
data_source.set_filter_values(self.filter_values)
sort_column = params.get('iSortCol_0')
sort_order = params.get('sSortDir_0', 'ASC')
echo = int(params.get('sEcho', 1))
if sort_column and echo != 1:
data_source.set_order_by(
[(data_source.top_level_columns[int(sort_column)].column_id, sort_order.upper())]
)
datatables_params = DatatablesParams.from_request_dict(params)
page = list(data_source.get_data(start=datatables_params.start, limit=datatables_params.count))
total_records = data_source.get_total_records()
total_row = data_source.get_total_row() if data_source.has_total_row else None
except UserReportsError as e:
if settings.DEBUG:
raise
return self.render_json_response({
'error': e.message,
'aaData': [],
'iTotalRecords': 0,
'iTotalDisplayRecords': 0,
})
except TableNotFoundWarning:
if self.spec.report_meta.created_by_builder:
msg = _(
"The database table backing your report does not exist yet. "
"Please wait while the report is populated."
)
else:
msg = _(
"The database table backing your report does not exist yet. "
"You must rebuild the data source before viewing the report."
)
return self.render_json_response({
'warning': msg
})
json_response = {
'aaData': page,
"sEcho": params.get('sEcho', 0),
"iTotalRecords": total_records,
"iTotalDisplayRecords": total_records,
}
if total_row is not None:
json_response["total_row"] = total_row
return self.render_json_response(json_response)
def _get_initial(self, request, **kwargs):
pass
@classmethod
def url_pattern(cls):
from django.conf.urls import url
pattern = r'^{slug}/(?P<subreport_slug>[\w\-:]+)/$'.format(slug=cls.slug)
return url(pattern, cls.as_view(), name=cls.slug)
@property
def type(self):
"""
Used to populate ReportConfig.report_type
"""
return self.prefix
@property
def sub_slug(self):
"""
Used to populate ReportConfig.subreport_slug
"""
return self.report_config_id
@classmethod
def get_report(cls, domain, slug, report_config_id):
report = cls()
report._domain = domain
report._report_config_id = report_config_id
if not report.has_viable_configuration():
return None
report.name = report.title
return report
@property
def url(self):
return reverse(self.slug, args=[self.domain, self.report_config_id])
@property
@memoized
def export_table(self):
try:
data = self.data_source
data.set_filter_values(self.filter_values)
data.set_order_by([(o['field'], o['order']) for o in self.spec.sort_expression])
except UserReportsError as e:
return self.render_json_response({
'error': e.message,
})
raw_rows = list(data.get_data())
headers = [column.header for column in self.data_source.columns]
column_id_to_expanded_column_ids = get_expanded_columns(data.top_level_columns, data.config)
column_ids = []
for column in self.spec.report_columns:
column_ids.extend(column_id_to_expanded_column_ids.get(column.column_id, [column.column_id]))
rows = [[raw_row[column_id] for column_id in column_ids] for raw_row in raw_rows]
total_rows = [data.get_total_row()] if data.has_total_row else []
return [
[
self.title,
[headers] + rows + total_rows
]
]
@property
@memoized
def email_response(self):
fd, path = tempfile.mkstemp()
with os.fdopen(fd, 'wb') as temp:
export_from_tables(self.export_table, temp, Format.HTML)
with open(path) as f:
return HttpResponse(json.dumps({
'report': f.read(),
}))
@property
@memoized
def excel_response(self):
file = StringIO()
export_from_tables(self.export_table, file, Format.XLS_2007)
return file
@property
@memoized
def export_too_large(self):
data = self.data_source
data.set_filter_values(self.filter_values)
total_rows = data.get_total_records()
return total_rows > UCR_EXPORT_TO_EXCEL_ROW_LIMIT
@property
@memoized
def export_size_check_response(self):
try:
too_large = self.export_too_large
except UserReportsError as e:
if settings.DEBUG:
raise
return self.render_json_response({
'export_allowed': False,
'message': e.message,
})
if too_large:
return self.render_json_response({
'export_allowed': False,
'message': _(
"Report export is limited to {number} rows. "
"Please filter the data in your report to "
"{number} or fewer rows before exporting"
).format(number=UCR_EXPORT_TO_EXCEL_ROW_LIMIT),
})
return self.render_json_response({
"export_allowed": True,
})
@property
@memoized
def export_response(self):
if self.export_too_large:
# Frontend should check size with export_size_check_response()
# Before hitting this endpoint, but we check the size again here
# in case the user modifies the url manually.
return HttpResponseBadRequest()
temp = StringIO()
export_from_tables(self.export_table, temp, Format.XLS_2007)
return export_response(temp, Format.XLS_2007, self.title)
# Base class for classes that provide custom rendering for UCRs
class CustomConfigurableReport(ConfigurableReport):
# Ensures that links in saved reports will hit CustomConfigurableReportDispatcher
slug = 'custom_configurable'
class CustomConfigurableReportDispatcher(ReportDispatcher):
slug = prefix = 'custom_configurable'
map_name = 'CUSTOM_UCR'
@staticmethod
def _report_class(domain, config_id):
class_path = StaticReportConfiguration.report_class_by_domain_and_id(
domain, config_id
)
return to_function(class_path)
def dispatch(self, request, domain, subreport_slug, **kwargs):
report_config_id = subreport_slug
try:
report_class = self._report_class(domain, report_config_id)
except BadSpecError:
raise Http404
return report_class.as_view()(request, domain=domain, subreport_slug=report_config_id, **kwargs)
def get_report(self, domain, slug, config_id):
try:
report_class = self._report_class(domain, config_id)
except BadSpecError:
return None
return report_class.get_report(domain, slug, config_id)
@classmethod
def url_pattern(cls):
from django.conf.urls import url
pattern = r'^{slug}/(?P<subreport_slug>[\w\-:]+)/$'.format(slug=cls.slug)
return url(pattern, cls.as_view(), name=cls.slug)
| bsd-3-clause | 7,399,060,679,144,329,000 | 33.21366 | 109 | 0.609285 | false |
jnvandermeer/PythonFeedback | Thermo/Thermo.py | 1 | 6111 | # we use pygame for our nice little thermometer!
import pygame
# let's try it...
class Thermo(object):
"""
So... this class displays, using pygame, a nice thermometer on the
screen. You can set the height of the thing as a percentage, using:
set_percentage(some percentage)
This is really just as simple as possible, and later should be
integrated into pyff or pythonpy.
"""
## Just some base functions which I now overloaded. (French) Haha!
# def __init__(self):
# pass
# def __str__(self):
# pass
# def __len__(self):
# pass
def __init__(self):
"""
This function should do the initialization, set all the right
variables/screen size, etc etc.
screen_size = a tuple with two int parameters.
percentage = a float or int percentage number.
It will also draw the screen, creating the surface
And load/resize the images, etc.
"""
screen_size = (1024, 768)
percentage = 50.
# self.screen_size = screen_size
# self.percentage = percentage
# get the screen surface!
self.screen = pygame.display.set_mode(screen_size)
# load my images...
self.image_temperature = pygame.image.load("colorbar_morered.jpg").convert()
self.image_thermometer = pygame.image.load("frame_greyblack_grad.bmp").convert()
self.image_thermometer.set_colorkey((255, 255, 255))
self.image_temperature.set_colorkey((255, 255, 255))
# resizen thermometer...
self.new_size_thermometer = (int(self.screen.get_size()[0]*0.2), int(self.screen.get_size()[1]*0.7))
self.new_image_thermometer = pygame.transform.scale(self.image_thermometer,self.new_size_thermometer)
# resuzen temperature...
self.new_size_temperature = (int(self.new_size_thermometer[0]*0.85), int(self.new_size_thermometer[1]*0.96))
self.new_image_temperature = pygame.transform.scale(self.image_temperature,self.new_size_temperature)
# what is the center of my screen?
self.new_center = (self.screen.get_size()[0]*0.5, self.screen.get_size()[1]*0.5)
# define the rects of where we would like to draw them!
self.new_rect_thermometer = self.new_image_thermometer.get_rect()
self.new_rect_temperature = self.new_image_temperature.get_rect()
self.new_rect_thermometer.center=self.new_center
self.new_rect_temperature.center=self.new_center
# just do a small backup of our window for later use.
self.temperature_window = self.new_rect_temperature
# fill the screen...
self.screen.fill((125,125,125))
# get the background surface layer... the surface.. is the thing
# that's constantly being updated.
self.background=self.screen.copy()
# put the thermometer on the screen.
self.screen.blit(self.new_image_thermometer, self.new_rect_thermometer)
# put the temperature also -- no, we don't have to yet... just set
# it to some beginning position.
# screen.blit(new_image_thermometer, new_rect_thermometer)
# we don't update yet... that comes later.
# with our complete set_percentage function... let's try calling that!
self.set_percentage(percentage)
# return self
def set_percentage(self,percentage):
"""
This function should update the termometer to some new value
and then update the screen.
"""
self.percentage = percentage
p=self.percentage
# print 'p = ' + str(p)
# so what's the rect position of the temperature??
oldpos = self.new_rect_temperature
# get the new position... copy + change it.
newpos = pygame.Rect(self.temperature_window)
# change the offset + height..
newpos[1]=self.temperature_window[1] + self.temperature_window[3] - self.temperature_window[3]/100.*p
newpos[3]=self.temperature_window[3]/100.*p
# we don't have to store the rects... but we CAN append them in an array..
#
dirty_rects = []
dirty_rects.append(oldpos)
dirty_rects.append(newpos)
# we also need to change the rect encompassing the temperature surface to match.. so do it!
# just use the temperature_window rect to change it... this
# is a NEW rect that's made every time.
sourcerect = pygame.Rect(self.temperature_window)
sourcerect[0] = 0 # put the x at 0, xheight needs not to be changed.
sourcerect[1] = self.temperature_window[3] - self.temperature_window[3]/100.*p
sourcerect[3] = self.temperature_window[3]/100.*p
# print 'oldpos = ' + str(oldpos)
# print 'newpos = ' + str(newpos)
# print 'sourcerect = ' + str(sourcerect)
# so we've defined all our nice rects. Now we remove the bar... by copying background
# material.
self.screen.blit(self.background, oldpos, oldpos)
# might as well draw also the thermometer ... again!!!.
self.screen.blit(self.new_image_thermometer, self.new_rect_thermometer)
# after that, we copy some stuff from the thermometer onto the screen...
self.screen.blit(self.new_image_temperature,newpos,sourcerect)
# we also update our temperature position...
self.new_rect_temperature = newpos
# and we update our screen while we're at it.
pygame.display.flip()
# we COULD have also done THIS:
# pygame.display.update(dirty_rects)
def stop(self):
"""
Luckily, the stop keyword hasn't been used yet in python.
I will use it for my class in order to stop the thing.
... only, I haven't got any idea yet how.
"""
pygame.display.quit()
def get_percentage(self):
"""
In case you were wondering where you set the bar to, this just returns
the percentage value!
"""
return self.percentage
| gpl-2.0 | -3,218,809,404,950,575,600 | 34.12069 | 116 | 0.62412 | false |
iamrajhans/FlaskBackend | migrations/versions/8eba94c1838c_.py | 1 | 1094 | """empty message
Revision ID: 8eba94c1838c
Revises: 59b25a9d6e7b
Create Date: 2017-02-24 00:38:20.074565
"""
# revision identifiers, used by Alembic.
revision = '8eba94c1838c'
down_revision = '59b25a9d6e7b'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('user_session',
sa.Column('id', sa.String(length=255), nullable=False),
sa.Column('user_id', sa.String(length=100), nullable=False),
sa.Column('session_start', sa.TIMESTAMP(), nullable=False),
sa.Column('session_end', sa.TIMESTAMP(), nullable=False),
sa.Column('isValid', sa.Boolean(), nullable=False),
sa.Column('user_plan', sa.String(length=255), nullable=False),
sa.Column('created_at', sa.TIMESTAMP(), nullable=False),
sa.Column('updated_at', sa.TIMESTAMP(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('user_session')
### end Alembic commands ###
| mit | -4,994,312,549,992,917,000 | 29.388889 | 66 | 0.680987 | false |
punitvara/zephyr | scripts/gen_kobject_list.py | 1 | 8899 | #!/usr/bin/env python3
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
import sys
import argparse
import math
import os
import struct
from elf_helper import ElfHelper, kobject_to_enum
# Keys in this dictionary are structs which should be recognized as kernel
# objects. Values should either be None, or the name of a Kconfig that
# indicates the presence of this object's definition in case it is not
# available in all configurations.
kobjects = {
"k_alert": None,
"k_mem_slab": None,
"k_msgq": None,
"k_mutex": None,
"k_pipe": None,
"k_queue": None,
"k_poll_signal": None,
"k_sem": None,
"k_stack": None,
"k_thread": None,
"k_timer": None,
"_k_thread_stack_element": None,
"net_context": "CONFIG_NETWORKING",
"device": None
}
subsystems = [
"adc_driver_api",
"aio_cmp_driver_api",
"counter_driver_api",
"crypto_driver_api",
"dma_driver_api",
"flash_driver_api",
"gpio_driver_api",
"i2c_driver_api",
"i2s_driver_api",
"ipm_driver_api",
"led_driver_api",
"pinmux_driver_api",
"pwm_driver_api",
"entropy_driver_api",
"rtc_driver_api",
"sensor_driver_api",
"spi_driver_api",
"uart_driver_api",
]
header = """%compare-lengths
%define lookup-function-name _k_object_lookup
%language=ANSI-C
%global-table
%struct-type
%{
#include <kernel.h>
#include <toolchain.h>
#include <syscall_handler.h>
#include <string.h>
%}
struct _k_object;
%%
"""
# Different versions of gperf have different prototypes for the lookup
# function, best to implement the wrapper here. The pointer value itself is
# turned into a string, we told gperf to expect binary strings that are not
# NULL-terminated.
footer = """%%
struct _k_object *_k_object_gperf_find(void *obj)
{
return _k_object_lookup((const char *)obj, sizeof(void *));
}
void _k_object_gperf_wordlist_foreach(_wordlist_cb_func_t func, void *context)
{
int i;
for (i = MIN_HASH_VALUE; i <= MAX_HASH_VALUE; i++) {
if (wordlist[i].name != NULL) {
func(&wordlist[i], context);
}
}
}
#ifndef CONFIG_DYNAMIC_OBJECTS
struct _k_object *_k_object_find(void *obj)
ALIAS_OF(_k_object_gperf_find);
void _k_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
ALIAS_OF(_k_object_gperf_wordlist_foreach);
#endif
"""
def write_gperf_table(fp, eh, objs, static_begin, static_end):
fp.write(header)
# Setup variables for mapping thread indexes
syms = eh.get_symbols()
thread_max_bytes = syms["CONFIG_MAX_THREAD_BYTES"]
thread_idx_map = {}
for i in range(0, thread_max_bytes):
thread_idx_map[i] = 0xFF
for obj_addr, ko in objs.items():
obj_type = ko.type_name
# pre-initialized objects fall within this memory range, they are
# either completely initialized at build time, or done automatically
# at boot during some PRE_KERNEL_* phase
initialized = obj_addr >= static_begin and obj_addr < static_end
byte_str = struct.pack("<I" if eh.little_endian else ">I", obj_addr)
fp.write("\"")
for byte in byte_str:
val = "\\x%02x" % byte
fp.write(val)
fp.write(
"\",{},%s,%s,%d\n" %
(obj_type,
"K_OBJ_FLAG_INITIALIZED" if initialized else "0",
ko.data))
if obj_type == "K_OBJ_THREAD":
idx = math.floor(ko.data / 8)
bit = ko.data % 8
thread_idx_map[idx] = thread_idx_map[idx] & ~(2**bit)
fp.write(footer)
# Generate the array of already mapped thread indexes
fp.write('\n')
fp.write('u8_t _thread_idx_map[%d] = {' % (thread_max_bytes))
for i in range(0, thread_max_bytes):
fp.write(' 0x%x, ' % (thread_idx_map[i]))
fp.write('};\n')
driver_macro_tpl = """
#define Z_SYSCALL_DRIVER_%(driver_upper)s(ptr, op) Z_SYSCALL_DRIVER_GEN(ptr, op, %(driver_lower)s, %(driver_upper)s)
"""
def write_validation_output(fp):
fp.write("#ifndef DRIVER_VALIDATION_GEN_H\n")
fp.write("#define DRIVER_VALIDATION_GEN_H\n")
fp.write("""#define Z_SYSCALL_DRIVER_GEN(ptr, op, driver_lower_case, driver_upper_case) \\
(Z_SYSCALL_OBJ(ptr, K_OBJ_DRIVER_##driver_upper_case) || \\
Z_SYSCALL_DRIVER_OP(ptr, driver_lower_case##_driver_api, op))
""")
for subsystem in subsystems:
subsystem = subsystem.replace("_driver_api", "")
fp.write(driver_macro_tpl % {
"driver_lower": subsystem.lower(),
"driver_upper": subsystem.upper(),
})
fp.write("#endif /* DRIVER_VALIDATION_GEN_H */\n")
def write_kobj_types_output(fp):
fp.write("/* Core kernel objects */\n")
for kobj, dep in kobjects.items():
if kobj == "device":
continue
if dep:
fp.write("#ifdef %s\n" % dep)
fp.write("%s,\n" % kobject_to_enum(kobj))
if dep:
fp.write("#endif\n")
fp.write("/* Driver subsystems */\n")
for subsystem in subsystems:
subsystem = subsystem.replace("_driver_api", "").upper()
fp.write("K_OBJ_DRIVER_%s,\n" % subsystem)
def write_kobj_otype_output(fp):
fp.write("/* Core kernel objects */\n")
for kobj, dep in kobjects.items():
if kobj == "device":
continue
if dep:
fp.write("#ifdef %s\n" % dep)
fp.write('case %s: ret = "%s"; break;\n' %
(kobject_to_enum(kobj), kobj))
if dep:
fp.write("#endif\n")
fp.write("/* Driver subsystems */\n")
for subsystem in subsystems:
subsystem = subsystem.replace("_driver_api", "")
fp.write('case K_OBJ_DRIVER_%s: ret = "%s driver"; break;\n' % (
subsystem.upper(),
subsystem
))
def write_kobj_size_output(fp):
fp.write("/* Non device/stack objects */\n")
for kobj, dep in kobjects.items():
# device handled by default case. Stacks are not currently handled,
# if they eventually are it will be a special case.
if kobj == "device" or kobj == "_k_thread_stack_element":
continue
if dep:
fp.write("#ifdef %s\n" % dep)
fp.write('case %s: ret = sizeof(struct %s); break;\n' %
(kobject_to_enum(kobj), kobj))
if dep:
fp.write("#endif\n")
def parse_args():
global args
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-k", "--kernel", required=False,
help="Input zephyr ELF binary")
parser.add_argument(
"-g", "--gperf-output", required=False,
help="Output list of kernel object addresses for gperf use")
parser.add_argument(
"-V", "--validation-output", required=False,
help="Output driver validation macros")
parser.add_argument(
"-K", "--kobj-types-output", required=False,
help="Output k_object enum values")
parser.add_argument(
"-S", "--kobj-otype-output", required=False,
help="Output case statements for otype_to_str()")
parser.add_argument(
"-Z", "--kobj-size-output", required=False,
help="Output case statements for obj_size_get()")
parser.add_argument("-v", "--verbose", action="store_true",
help="Print extra debugging information")
args = parser.parse_args()
if "VERBOSE" in os.environ:
args.verbose = 1
def main():
parse_args()
if args.gperf_output:
eh = ElfHelper(args.kernel, args.verbose, kobjects, subsystems)
syms = eh.get_symbols()
max_threads = syms["CONFIG_MAX_THREAD_BYTES"] * 8
objs = eh.find_kobjects(syms)
thread_counter = eh.get_thread_counter()
if thread_counter > max_threads:
sys.stderr.write("Too many thread objects (%d)\n" % thread_counter)
sys.stderr.write("Increase CONFIG_MAX_THREAD_BYTES to %d\n",
-(-thread_counter // 8))
sys.exit(1)
with open(args.gperf_output, "w") as fp:
write_gperf_table(fp, eh, objs,
syms["_static_kernel_objects_begin"],
syms["_static_kernel_objects_end"])
if args.validation_output:
with open(args.validation_output, "w") as fp:
write_validation_output(fp)
if args.kobj_types_output:
with open(args.kobj_types_output, "w") as fp:
write_kobj_types_output(fp)
if args.kobj_otype_output:
with open(args.kobj_otype_output, "w") as fp:
write_kobj_otype_output(fp)
if args.kobj_size_output:
with open(args.kobj_size_output, "w") as fp:
write_kobj_size_output(fp)
if __name__ == "__main__":
main()
| apache-2.0 | 8,422,010,048,923,392,000 | 27.986971 | 116 | 0.589167 | false |
habnabit/theresa-bot | twits.py | 1 | 8827 | # Copyright (c) Aaron Gallagher <[email protected]>
# See COPYING for details.
"I HATE TWITTER"
from twisted.application.service import Service
from twisted.protocols.policies import TimeoutMixin
from twisted.web.http_headers import Headers
from twisted.protocols.basic import LineOnlyReceiver
from twisted.internet.error import TimeoutError
from twisted.web.client import ResponseDone
from twisted.web.http import PotentialDataLoss
from twisted.internet import defer
from twisted.python import log, failure
import oauth2
import urlparse
import theresa
import urllib
import json
import re
defaultSignature = oauth2.SignatureMethod_HMAC_SHA1()
defaultTwitterAPI = 'https://api.twitter.com/1.1/'
defaultStreamingAPI = 'https://userstream.twitter.com/1.1/'
class UnexpectedHTTPStatus(Exception):
pass
def trapBadStatuses(response, goodStatuses=(200,)):
if response.code not in goodStatuses:
raise UnexpectedHTTPStatus(response.code, response.phrase)
return response
class OAuthAgent(object):
"An Agent wrapper that adds OAuth authorization headers."
def __init__(self, agent, consumer, token, signatureMethod=defaultSignature):
self.agent = agent
self.consumer = consumer
self.token = token
self.signatureMethod = signatureMethod
def request(self, method, uri, headers=None, bodyProducer=None, parameters=None, addAuthHeader=True):
"""Make a request, optionally signing it.
Any query string passed in `uri` will get clobbered by the urlencoded
version of `parameters`.
"""
if headers is None:
headers = Headers()
if parameters is None:
parameters = {}
if addAuthHeader:
req = oauth2.Request.from_consumer_and_token(
self.consumer, token=self.token,
http_method=method, http_url=uri, parameters=parameters)
req.sign_request(self.signatureMethod, self.consumer, self.token)
for header, value in req.to_header().iteritems():
# oauth2, for some bozotic reason, gives unicode header values
headers.addRawHeader(header, value.encode())
parsed = urlparse.urlparse(uri)
uri = urlparse.urlunparse(parsed._replace(query=urllib.urlencode(parameters)))
return self.agent.request(method, uri, headers, bodyProducer)
class TwitterStream(LineOnlyReceiver, TimeoutMixin):
"Receive a stream of JSON in twitter's weird streaming format."
def __init__(self, delegate, timeoutPeriod=60):
self.delegate = delegate
self.timeoutPeriod = timeoutPeriod
self.deferred = defer.Deferred(self._cancel)
self._done = False
def connectionMade(self):
"Start the timeout once the connection has been established."
self.setTimeout(self.timeoutPeriod)
LineOnlyReceiver.connectionMade(self)
def _cancel(self, ign):
"A Deferred canceler that drops the connection."
if self._done:
return
self._done = True
self.transport.stopProducing()
self.deferred.errback(defer.CancelledError())
def dataReceived(self, data):
"Reset the timeout and parse the received data."
self.resetTimeout()
LineOnlyReceiver.dataReceived(self, data)
def lineReceived(self, line):
"Ignoring empty-line keepalives, inform the delegate about new data."
if not line:
return
obj = json.loads(line)
try:
self.delegate(obj)
except:
log.err(None, 'error in stream delegate %r' % (self.delegate,))
def timeoutConnection(self):
"We haven't received data in too long, so drop the connection."
if self._done:
return
self._done = True
self.transport.stopProducing()
self.deferred.errback(TimeoutError())
def connectionLost(self, reason):
"Report back how the connection was lost unless we already did."
self.setTimeout(None)
if self._done:
return
self._done = True
if reason.check(ResponseDone, PotentialDataLoss):
self.deferred.callback(None)
else:
self.deferred.errback(reason)
class Twitter(object):
"Close to the most minimal twitter interface ever."
def __init__(self, agent, twitterAPI=defaultTwitterAPI, streamingAPI=defaultStreamingAPI):
self.agent = agent
self.twitterAPI = twitterAPI
self.streamingAPI = streamingAPI
def _makeRequest(self, whichAPI, method, resource, parameters):
d = self.agent.request(method, urlparse.urljoin(whichAPI, resource), parameters=parameters)
d.addCallback(trapBadStatuses)
return d
def request(self, resource, method='GET', **parameters):
"""Make a GET request from the twitter 1.1 API.
`resource` is the part of the resource URL not including the API URL,
e.g. 'statuses/show.json'. As everything gets decoded by `json.loads`,
this should always end in '.json'. Any parameters passed in as keyword
arguments will be added to the URL as the query string. The `Deferred`
returned will fire with the decoded JSON.
"""
d = self._makeRequest(self.twitterAPI, method, resource, parameters)
d.addCallback(theresa.receive, theresa.StringReceiver())
d.addCallback(json.loads)
return d
def stream(self, resource, delegate, **parameters):
"""Receive from the twitter 1.1 streaming API.
`resource` and keyword arguments are treated the same as the in
`request`, and `delegate` will be called with each JSON object which is
received from the stream. The `Deferred` returned will fire when the
stream has ended.
"""
d = self._makeRequest(self.streamingAPI, 'GET', resource, parameters)
d.addCallback(theresa.receive, TwitterStream(delegate))
return d
class StreamPreserver(Service):
"Keep a stream connected as a service."
def __init__(self, twitter, resource, **parameters):
self.twitter = twitter
self.resource = resource
self.parameters = parameters
self._streamDone = None
self._delegates = set()
def __repr__(self):
return '<StreamPreserver %#x for %r/%r>' % (id(self), self.resource, self.parameters)
def _connectStream(self, r):
if isinstance(r, failure.Failure) and r.check(defer.CancelledError):
log.msg('not reconnecting twitter stream %r' % self)
return
log.msg('reconnecting twitter stream %r' % self)
d = self._streamDone = self.twitter.stream(self.resource, self._streamDelegate, **self.parameters)
d.addBoth(self._connectStream)
d.addErrback(log.err, 'error reading from twitter stream %r' % self)
return r
def _streamDelegate(self, data):
for delegate in self._delegates:
try:
delegate(data)
except Exception:
log.err('error calling delegate %r' % (delegate,))
def addDelegate(self, delegate):
"Add a delegate to receive stream data."
self._delegates.add(delegate)
def removeDelegate(self, delegate):
"Remove a previously-added stream data delegate."
self._delegates.discard(delegate)
def startService(self):
"Start reading from the stream."
if not self.running:
self._connectStream(None)
Service.startService(self)
def stopService(self):
"Stop reading from the stream."
ret = None
if self.running and self._streamDone is not None:
self._streamDone.cancel()
ret = self._streamDone
Service.startService(self)
return ret
entityReplacements = [
('media', 'media_url_https'),
('urls', 'expanded_url'),
]
# SERIOUSLY why the FUCK do I have to do this
dumbCrapReplacements = {
'&': '&',
'<': '<',
'>': '>',
}
dumbCrapRegexp = re.compile('|'.join(re.escape(s) for s in dumbCrapReplacements))
def extractRealTwitText(twit):
"Oh my god why is this necessary."
if 'retweeted_status' in twit:
rt = twit['retweeted_status']
return u'RT @%s: %s' % (rt['user']['screen_name'], extractRealTwitText(rt))
replacements = sorted(
(entity['indices'], entity[replacement])
for entityType, replacement in entityReplacements
if entityType in twit['entities']
for entity in twit['entities'][entityType])
mutableText = list(twit['text'])
for (l, r), replacement in reversed(replacements):
mutableText[l:r] = replacement
text = u''.join(mutableText)
return dumbCrapRegexp.sub(lambda m: dumbCrapReplacements[m.group()], text)
| isc | 4,725,299,947,152,811,000 | 36.244726 | 106 | 0.656282 | false |
rcarmo/crab | scikits/crab/recommenders/knn/tests/test_item_strategies.py | 1 | 3263 | import numpy as np
from numpy.testing import assert_array_equal
from nose.tools import assert_raises
from scikits.crab.models.classes import MatrixPreferenceDataModel
from scikits.crab.recommenders.knn.item_strategies import ItemsNeighborhoodStrategy, AllPossibleItemsStrategy
from scikits.crab.models.utils import UserNotFoundError
movies = {'Marcel Caraciolo': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.5,
'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5,
'The Night Listener': 3.0},
'Luciana Nunes': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5,
'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 3.5},
'Leopoldo Pires': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0,
'Superman Returns': 3.5, 'The Night Listener': 4.0},
'Lorena Abreu': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0,
'The Night Listener': 4.5, 'Superman Returns': 4.0,
'You, Me and Dupree': 2.5},
'Steve Gates': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 2.0},
'Sheldom': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'The Night Listener': 3.0, 'Superman Returns': 5.0, 'You, Me and Dupree': 3.5},
'Penny Frewman': {'Snakes on a Plane': 4.5, 'You, Me and Dupree': 1.0, 'Superman Returns': 4.0},
'Maria Gabriela': {}}
def test_ItemsNeighborhoodStrategy():
#Empty Dataset
model = MatrixPreferenceDataModel({})
strategy = ItemsNeighborhoodStrategy()
assert_raises(UserNotFoundError, strategy.candidate_items, 'Lorena Abreu', model)
#Possible candidates
model = MatrixPreferenceDataModel(movies)
strategy = ItemsNeighborhoodStrategy()
assert_array_equal(np.array(['Lady in the Water']), strategy.candidate_items('Lorena Abreu', model))
#Empty candidates
model = MatrixPreferenceDataModel(movies)
strategy = ItemsNeighborhoodStrategy()
assert_array_equal(np.array([], dtype='|S'), strategy.candidate_items('Marcel Caraciolo', model))
#Empty candidates
model = MatrixPreferenceDataModel(movies)
strategy = ItemsNeighborhoodStrategy()
assert_array_equal(np.array([], dtype=bool), strategy.candidate_items('Maria Gabriela', model))
def test_AllPossibleItemsStrategy():
#Empty Dataset
model = MatrixPreferenceDataModel({})
strategy = AllPossibleItemsStrategy()
assert_raises(UserNotFoundError, strategy.candidate_items, 'Lorena Abreu', model)
#Possible candidates
model = MatrixPreferenceDataModel(movies)
strategy = AllPossibleItemsStrategy()
assert_array_equal(np.array(['Lady in the Water']), strategy.candidate_items('Lorena Abreu', model))
#Empty candidates
model = MatrixPreferenceDataModel(movies)
strategy = AllPossibleItemsStrategy()
assert_array_equal(np.array([], dtype='|S'), strategy.candidate_items('Marcel Caraciolo', model))
#Empty candidates
model = MatrixPreferenceDataModel(movies)
strategy = AllPossibleItemsStrategy()
assert_array_equal(np.array(['Just My Luck', 'Lady in the Water', 'Snakes on a Plane',
'Superman Returns', 'The Night Listener', 'You, Me and Dupree']), strategy.candidate_items('Maria Gabriela', model))
| bsd-3-clause | 6,464,256,378,812,769,000 | 44.957746 | 123 | 0.709776 | false |
matrumz/RPi_Custom_Files | Printing/hplip-3.15.2/ui/setupform_base.py | 1 | 26342 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/setupform_base.ui'
#
# Created: Thu Sep 20 11:45:16 2007
# by: The PyQt User Interface Compiler (pyuic) 3.17
#
# WARNING! All changes made in this file will be lost!
from qt import *
class SetupForm_base(QWizard):
def __init__(self,parent = None,name = None,modal = 0,fl = 0):
QWizard.__init__(self,parent,name,modal,fl)
if not name:
self.setName("SetupForm_base")
self.ConnectionPage = QWidget(self,"ConnectionPage")
ConnectionPageLayout = QGridLayout(self.ConnectionPage,1,1,11,6,"ConnectionPageLayout")
self.connectionTypeButtonGroup = QButtonGroup(self.ConnectionPage,"connectionTypeButtonGroup")
self.connectionTypeButtonGroup.setColumnLayout(0,Qt.Vertical)
self.connectionTypeButtonGroup.layout().setSpacing(6)
self.connectionTypeButtonGroup.layout().setMargin(11)
connectionTypeButtonGroupLayout = QGridLayout(self.connectionTypeButtonGroup.layout())
connectionTypeButtonGroupLayout.setAlignment(Qt.AlignTop)
self.usbRadioButton = QRadioButton(self.connectionTypeButtonGroup,"usbRadioButton")
connectionTypeButtonGroupLayout.addWidget(self.usbRadioButton,0,0)
self.netRadioButton = QRadioButton(self.connectionTypeButtonGroup,"netRadioButton")
connectionTypeButtonGroupLayout.addWidget(self.netRadioButton,1,0)
self.parRadioButton = QRadioButton(self.connectionTypeButtonGroup,"parRadioButton")
connectionTypeButtonGroupLayout.addWidget(self.parRadioButton,2,0)
ConnectionPageLayout.addMultiCellWidget(self.connectionTypeButtonGroup,1,1,0,1)
spacer12 = QSpacerItem(20,120,QSizePolicy.Minimum,QSizePolicy.Expanding)
ConnectionPageLayout.addItem(spacer12,2,0)
spacer18 = QSpacerItem(321,20,QSizePolicy.Expanding,QSizePolicy.Minimum)
ConnectionPageLayout.addItem(spacer18,3,1)
self.searchFiltersPushButton2 = QPushButton(self.ConnectionPage,"searchFiltersPushButton2")
ConnectionPageLayout.addWidget(self.searchFiltersPushButton2,3,0)
self.addPage(self.ConnectionPage,QString(""))
self.ProbedDevicesPage = QWidget(self,"ProbedDevicesPage")
ProbedDevicesPageLayout = QGridLayout(self.ProbedDevicesPage,1,1,11,6,"ProbedDevicesPageLayout")
self.probedDevicesListView = QListView(self.ProbedDevicesPage,"probedDevicesListView")
self.probedDevicesListView.setAllColumnsShowFocus(1)
ProbedDevicesPageLayout.addMultiCellWidget(self.probedDevicesListView,1,1,0,3)
self.searchFiltersPushButton = QPushButton(self.ProbedDevicesPage,"searchFiltersPushButton")
ProbedDevicesPageLayout.addWidget(self.searchFiltersPushButton,2,0)
self.probeHeadingTextLabel = QLabel(self.ProbedDevicesPage,"probeHeadingTextLabel")
ProbedDevicesPageLayout.addMultiCellWidget(self.probeHeadingTextLabel,0,0,0,3)
self.manualFindPushButton = QPushButton(self.ProbedDevicesPage,"manualFindPushButton")
ProbedDevicesPageLayout.addWidget(self.manualFindPushButton,2,1)
spacer13 = QSpacerItem(101,20,QSizePolicy.Expanding,QSizePolicy.Minimum)
ProbedDevicesPageLayout.addItem(spacer13,2,2)
self.probeUpdatePushButton = QPushButton(self.ProbedDevicesPage,"probeUpdatePushButton")
ProbedDevicesPageLayout.addWidget(self.probeUpdatePushButton,2,3)
self.addPage(self.ProbedDevicesPage,QString(""))
self.PPDPage = QWidget(self,"PPDPage")
PPDPageLayout = QGridLayout(self.PPDPage,1,1,11,6,"PPDPageLayout")
self.ppdListView = QListView(self.PPDPage,"ppdListView")
self.ppdListView.addColumn(self.__tr("PPD File"))
self.ppdListView.addColumn(self.__tr("Description"))
self.ppdListView.setAllColumnsShowFocus(1)
PPDPageLayout.addMultiCellWidget(self.ppdListView,1,1,0,2)
self.otherPPDPushButton = QPushButton(self.PPDPage,"otherPPDPushButton")
self.otherPPDPushButton.setEnabled(1)
PPDPageLayout.addWidget(self.otherPPDPushButton,2,0)
spacer9 = QSpacerItem(320,20,QSizePolicy.Expanding,QSizePolicy.Minimum)
PPDPageLayout.addItem(spacer9,2,1)
self.ppdDefaultsPushButton = QPushButton(self.PPDPage,"ppdDefaultsPushButton")
PPDPageLayout.addWidget(self.ppdDefaultsPushButton,2,2)
self.textLabel1_5 = QLabel(self.PPDPage,"textLabel1_5")
self.textLabel1_5.setAlignment(QLabel.WordBreak | QLabel.AlignVCenter)
PPDPageLayout.addMultiCellWidget(self.textLabel1_5,0,0,0,2)
self.addPage(self.PPDPage,QString(""))
self.PrinterNamePage = QWidget(self,"PrinterNamePage")
PrinterNamePageLayout = QGridLayout(self.PrinterNamePage,1,1,11,6,"PrinterNamePageLayout")
self.groupBox4 = QGroupBox(self.PrinterNamePage,"groupBox4")
self.groupBox4.setColumnLayout(0,Qt.Vertical)
self.groupBox4.layout().setSpacing(6)
self.groupBox4.layout().setMargin(11)
groupBox4Layout = QGridLayout(self.groupBox4.layout())
groupBox4Layout.setAlignment(Qt.AlignTop)
self.printerNameLineEdit = QLineEdit(self.groupBox4,"printerNameLineEdit")
self.printerNameLineEdit.setMaxLength(50)
groupBox4Layout.addWidget(self.printerNameLineEdit,0,1)
self.defaultPrinterNamePushButton = QPushButton(self.groupBox4,"defaultPrinterNamePushButton")
self.defaultPrinterNamePushButton.setEnabled(0)
groupBox4Layout.addWidget(self.defaultPrinterNamePushButton,0,2)
self.textLabel1_2 = QLabel(self.groupBox4,"textLabel1_2")
groupBox4Layout.addWidget(self.textLabel1_2,1,0)
self.textLabel1 = QLabel(self.groupBox4,"textLabel1")
groupBox4Layout.addWidget(self.textLabel1,0,0)
self.printerDescriptionLineEdit = QLineEdit(self.groupBox4,"printerDescriptionLineEdit")
self.printerDescriptionLineEdit.setMaxLength(50)
groupBox4Layout.addWidget(self.printerDescriptionLineEdit,2,1)
self.printerLocationLineEdit = QLineEdit(self.groupBox4,"printerLocationLineEdit")
self.printerLocationLineEdit.setMaxLength(50)
groupBox4Layout.addWidget(self.printerLocationLineEdit,1,1)
self.textLabel2 = QLabel(self.groupBox4,"textLabel2")
groupBox4Layout.addWidget(self.textLabel2,2,0)
PrinterNamePageLayout.addWidget(self.groupBox4,0,0)
self.faxInfoGroupBox = QGroupBox(self.PrinterNamePage,"faxInfoGroupBox")
self.faxInfoGroupBox.setColumnLayout(0,Qt.Vertical)
self.faxInfoGroupBox.layout().setSpacing(6)
self.faxInfoGroupBox.layout().setMargin(11)
faxInfoGroupBoxLayout = QGridLayout(self.faxInfoGroupBox.layout())
faxInfoGroupBoxLayout.setAlignment(Qt.AlignTop)
self.faxNameLineEdit = QLineEdit(self.faxInfoGroupBox,"faxNameLineEdit")
faxInfoGroupBoxLayout.addWidget(self.faxNameLineEdit,1,1)
self.textLabel1_3 = QLabel(self.faxInfoGroupBox,"textLabel1_3")
faxInfoGroupBoxLayout.addWidget(self.textLabel1_3,1,0)
self.textLabel3 = QLabel(self.faxInfoGroupBox,"textLabel3")
faxInfoGroupBoxLayout.addWidget(self.textLabel3,3,0)
self.textLabel2_2 = QLabel(self.faxInfoGroupBox,"textLabel2_2")
faxInfoGroupBoxLayout.addWidget(self.textLabel2_2,2,0)
self.faxCheckBox = QCheckBox(self.faxInfoGroupBox,"faxCheckBox")
self.faxCheckBox.setChecked(1)
faxInfoGroupBoxLayout.addMultiCellWidget(self.faxCheckBox,0,0,0,2)
self.faxNumberLineEdit = QLineEdit(self.faxInfoGroupBox,"faxNumberLineEdit")
self.faxNumberLineEdit.setMaxLength(50)
faxInfoGroupBoxLayout.addWidget(self.faxNumberLineEdit,2,1)
self.faxNameCoLineEdit = QLineEdit(self.faxInfoGroupBox,"faxNameCoLineEdit")
self.faxNameCoLineEdit.setMaxLength(50)
faxInfoGroupBoxLayout.addWidget(self.faxNameCoLineEdit,3,1)
self.defaultFaxNamePushButton = QPushButton(self.faxInfoGroupBox,"defaultFaxNamePushButton")
self.defaultFaxNamePushButton.setEnabled(0)
faxInfoGroupBoxLayout.addWidget(self.defaultFaxNamePushButton,1,2)
self.textLabel1_2_2 = QLabel(self.faxInfoGroupBox,"textLabel1_2_2")
faxInfoGroupBoxLayout.addWidget(self.textLabel1_2_2,4,0)
self.textLabel2_4 = QLabel(self.faxInfoGroupBox,"textLabel2_4")
faxInfoGroupBoxLayout.addWidget(self.textLabel2_4,5,0)
self.faxLocationLineEdit = QLineEdit(self.faxInfoGroupBox,"faxLocationLineEdit")
self.faxLocationLineEdit.setMaxLength(50)
faxInfoGroupBoxLayout.addWidget(self.faxLocationLineEdit,4,1)
self.faxDescriptionLineEdit = QLineEdit(self.faxInfoGroupBox,"faxDescriptionLineEdit")
self.faxDescriptionLineEdit.setMaxLength(50)
faxInfoGroupBoxLayout.addWidget(self.faxDescriptionLineEdit,5,1)
PrinterNamePageLayout.addWidget(self.faxInfoGroupBox,1,0)
self.textLabel1_4 = QLabel(self.PrinterNamePage,"textLabel1_4")
PrinterNamePageLayout.addWidget(self.textLabel1_4,3,0)
spacer14 = QSpacerItem(20,20,QSizePolicy.Minimum,QSizePolicy.Expanding)
PrinterNamePageLayout.addItem(spacer14,2,0)
self.addPage(self.PrinterNamePage,QString(""))
self.FinishedPage = QWidget(self,"FinishedPage")
FinishedPageLayout = QGridLayout(self.FinishedPage,1,1,11,6,"FinishedPageLayout")
self.printTestPageCheckBox = QCheckBox(self.FinishedPage,"printTestPageCheckBox")
self.printTestPageCheckBox.setChecked(1)
FinishedPageLayout.addWidget(self.printTestPageCheckBox,4,0)
spacer7 = QSpacerItem(20,20,QSizePolicy.Minimum,QSizePolicy.Expanding)
FinishedPageLayout.addItem(spacer7,3,0)
self.faxGroupBox = QGroupBox(self.FinishedPage,"faxGroupBox")
self.faxGroupBox.setEnabled(0)
self.faxGroupBox.setColumnLayout(0,Qt.Vertical)
self.faxGroupBox.layout().setSpacing(6)
self.faxGroupBox.layout().setMargin(11)
faxGroupBoxLayout = QGridLayout(self.faxGroupBox.layout())
faxGroupBoxLayout.setAlignment(Qt.AlignTop)
self.textLabel7 = QLabel(self.faxGroupBox,"textLabel7")
faxGroupBoxLayout.addWidget(self.textLabel7,0,0)
self.lineEdit5 = QLineEdit(self.faxGroupBox,"lineEdit5")
self.lineEdit5.setFrameShape(QLineEdit.NoFrame)
self.lineEdit5.setReadOnly(1)
faxGroupBoxLayout.addWidget(self.lineEdit5,0,1)
self.lineEdit6 = QLineEdit(self.faxGroupBox,"lineEdit6")
self.lineEdit6.setFrameShape(QLineEdit.NoFrame)
self.lineEdit6.setReadOnly(1)
faxGroupBoxLayout.addWidget(self.lineEdit6,1,1)
self.textLabel6 = QLabel(self.faxGroupBox,"textLabel6")
faxGroupBoxLayout.addWidget(self.textLabel6,1,0)
self.textLabel8 = QLabel(self.faxGroupBox,"textLabel8")
faxGroupBoxLayout.addWidget(self.textLabel8,2,0)
self.textLabel8_2 = QLabel(self.faxGroupBox,"textLabel8_2")
faxGroupBoxLayout.addWidget(self.textLabel8_2,3,0)
self.lineEdit7 = QLineEdit(self.faxGroupBox,"lineEdit7")
self.lineEdit7.setFrameShape(QLineEdit.NoFrame)
self.lineEdit7.setReadOnly(1)
faxGroupBoxLayout.addWidget(self.lineEdit7,2,1)
self.textLabel8_3 = QLabel(self.faxGroupBox,"textLabel8_3")
faxGroupBoxLayout.addWidget(self.textLabel8_3,4,0)
self.lineEdit8 = QLineEdit(self.faxGroupBox,"lineEdit8")
self.lineEdit8.setFrameShape(QLineEdit.NoFrame)
self.lineEdit8.setReadOnly(1)
faxGroupBoxLayout.addWidget(self.lineEdit8,3,1)
self.lineEdit9 = QLineEdit(self.faxGroupBox,"lineEdit9")
self.lineEdit9.setFrameShape(QLineEdit.NoFrame)
self.lineEdit9.setReadOnly(1)
faxGroupBoxLayout.addWidget(self.lineEdit9,4,1)
FinishedPageLayout.addWidget(self.faxGroupBox,2,0)
self.groupBox3 = QGroupBox(self.FinishedPage,"groupBox3")
self.groupBox3.setColumnLayout(0,Qt.Vertical)
self.groupBox3.layout().setSpacing(6)
self.groupBox3.layout().setMargin(11)
groupBox3Layout = QGridLayout(self.groupBox3.layout())
groupBox3Layout.setAlignment(Qt.AlignTop)
self.textLabel4 = QLabel(self.groupBox3,"textLabel4")
groupBox3Layout.addWidget(self.textLabel4,2,0)
self.textLabel3_2 = QLabel(self.groupBox3,"textLabel3_2")
groupBox3Layout.addWidget(self.textLabel3_2,1,0)
self.lineEdit4 = QLineEdit(self.groupBox3,"lineEdit4")
self.lineEdit4.setFrameShape(QLineEdit.NoFrame)
self.lineEdit4.setReadOnly(1)
groupBox3Layout.addWidget(self.lineEdit4,3,1)
self.textLabel2_3 = QLabel(self.groupBox3,"textLabel2_3")
groupBox3Layout.addWidget(self.textLabel2_3,0,0)
self.lineEdit3 = QLineEdit(self.groupBox3,"lineEdit3")
self.lineEdit3.setFrameShape(QLineEdit.NoFrame)
self.lineEdit3.setReadOnly(1)
groupBox3Layout.addWidget(self.lineEdit3,2,1)
self.lineEdit2 = QLineEdit(self.groupBox3,"lineEdit2")
self.lineEdit2.setFrameShape(QLineEdit.NoFrame)
self.lineEdit2.setReadOnly(1)
groupBox3Layout.addWidget(self.lineEdit2,1,1)
self.lineEdit1 = QLineEdit(self.groupBox3,"lineEdit1")
self.lineEdit1.setFrameShape(QLineEdit.NoFrame)
self.lineEdit1.setReadOnly(1)
groupBox3Layout.addWidget(self.lineEdit1,0,1)
self.textLabel5 = QLabel(self.groupBox3,"textLabel5")
groupBox3Layout.addWidget(self.textLabel5,3,0)
FinishedPageLayout.addWidget(self.groupBox3,1,0)
self.textLabel2_5 = QLabel(self.FinishedPage,"textLabel2_5")
self.textLabel2_5.setAlignment(QLabel.WordBreak | QLabel.AlignVCenter)
FinishedPageLayout.addWidget(self.textLabel2_5,0,0)
self.addPage(self.FinishedPage,QString(""))
self.languageChange()
self.resize(QSize(754,456).expandedTo(self.minimumSizeHint()))
self.clearWState(Qt.WState_Polished)
self.connect(self.connectionTypeButtonGroup,SIGNAL("clicked(int)"),self.connectionTypeButtonGroup_clicked)
self.connect(self.probedDevicesListView,SIGNAL("currentChanged(QListViewItem*)"),self.probedDevicesListView_currentChanged)
self.connect(self.printerNameLineEdit,SIGNAL("textChanged(const QString&)"),self.printerNameLineEdit_textChanged)
self.connect(self.defaultPrinterNamePushButton,SIGNAL("clicked()"),self.defaultPrinterNamePushButton_clicked)
self.connect(self.ppdListView,SIGNAL("currentChanged(QListViewItem*)"),self.ppdListView_currentChanged)
self.connect(self.searchFiltersPushButton,SIGNAL("clicked()"),self.searchFiltersPushButton_clicked)
self.connect(self.searchFiltersPushButton2,SIGNAL("clicked()"),self.searchFiltersPushButton2_clicked)
self.connect(self.probeUpdatePushButton,SIGNAL("clicked()"),self.probeUpdatePushButton_clicked)
self.connect(self.manualFindPushButton,SIGNAL("clicked()"),self.manualFindPushButton_clicked)
self.connect(self.printerLocationLineEdit,SIGNAL("textChanged(const QString&)"),self.printerLocationLineEdit_textChanged)
self.connect(self.printerDescriptionLineEdit,SIGNAL("textChanged(const QString&)"),self.printerDescriptionLineEdit_textChanged)
self.connect(self.faxCheckBox,SIGNAL("toggled(bool)"),self.faxNameLineEdit.setEnabled)
self.connect(self.faxCheckBox,SIGNAL("toggled(bool)"),self.faxNumberLineEdit.setEnabled)
self.connect(self.faxCheckBox,SIGNAL("toggled(bool)"),self.faxNameCoLineEdit.setEnabled)
self.connect(self.faxNameLineEdit,SIGNAL("textChanged(const QString&)"),self.faxNameLineEdit_textChanged)
self.connect(self.faxNumberLineEdit,SIGNAL("textChanged(const QString&)"),self.faxNumberLineEdit_textChanged)
self.connect(self.faxNameCoLineEdit,SIGNAL("textChanged(const QString&)"),self.faxNameCoLineEdit_textChanged)
self.connect(self.faxCheckBox,SIGNAL("toggled(bool)"),self.faxCheckBox_toggled)
self.connect(self.printTestPageCheckBox,SIGNAL("toggled(bool)"),self.printTestPageCheckBox_toggled)
self.connect(self.defaultFaxNamePushButton,SIGNAL("clicked()"),self.defaultFaxNamePushButton_clicked)
self.connect(self.otherPPDPushButton,SIGNAL("clicked()"),self.otherPPDPushButton_clicked)
self.connect(self.ppdDefaultsPushButton,SIGNAL("clicked()"),self.ppdDefaultsPushButton_clicked)
self.connect(self.faxLocationLineEdit,SIGNAL("textChanged(const QString&)"),self.faxLocationLineEdit_textChanged)
self.connect(self.faxDescriptionLineEdit,SIGNAL("textChanged(const QString&)"),self.faxDescriptionLineEdit_textChanged)
self.connect(self.faxCheckBox,SIGNAL("toggled(bool)"),self.faxLocationLineEdit.setEnabled)
self.connect(self.faxCheckBox,SIGNAL("toggled(bool)"),self.faxDescriptionLineEdit.setEnabled)
self.setTabOrder(self.printerNameLineEdit,self.printerLocationLineEdit)
self.setTabOrder(self.printerLocationLineEdit,self.printerDescriptionLineEdit)
self.setTabOrder(self.printerDescriptionLineEdit,self.faxCheckBox)
self.setTabOrder(self.faxCheckBox,self.faxNameLineEdit)
self.setTabOrder(self.faxNameLineEdit,self.faxNumberLineEdit)
self.setTabOrder(self.faxNumberLineEdit,self.faxNameCoLineEdit)
self.setTabOrder(self.faxNameCoLineEdit,self.faxLocationLineEdit)
self.setTabOrder(self.faxLocationLineEdit,self.faxDescriptionLineEdit)
self.setTabOrder(self.faxDescriptionLineEdit,self.usbRadioButton)
self.setTabOrder(self.usbRadioButton,self.netRadioButton)
self.setTabOrder(self.netRadioButton,self.parRadioButton)
self.setTabOrder(self.parRadioButton,self.searchFiltersPushButton2)
self.setTabOrder(self.searchFiltersPushButton2,self.probedDevicesListView)
self.setTabOrder(self.probedDevicesListView,self.searchFiltersPushButton)
self.setTabOrder(self.searchFiltersPushButton,self.manualFindPushButton)
self.setTabOrder(self.manualFindPushButton,self.probeUpdatePushButton)
self.setTabOrder(self.probeUpdatePushButton,self.ppdListView)
self.setTabOrder(self.ppdListView,self.otherPPDPushButton)
self.setTabOrder(self.otherPPDPushButton,self.ppdDefaultsPushButton)
self.setTabOrder(self.ppdDefaultsPushButton,self.defaultPrinterNamePushButton)
self.setTabOrder(self.defaultPrinterNamePushButton,self.defaultFaxNamePushButton)
self.setTabOrder(self.defaultFaxNamePushButton,self.lineEdit4)
self.setTabOrder(self.lineEdit4,self.lineEdit3)
self.setTabOrder(self.lineEdit3,self.lineEdit2)
self.setTabOrder(self.lineEdit2,self.lineEdit1)
self.setTabOrder(self.lineEdit1,self.printTestPageCheckBox)
self.setTabOrder(self.printTestPageCheckBox,self.lineEdit5)
self.setTabOrder(self.lineEdit5,self.lineEdit6)
self.setTabOrder(self.lineEdit6,self.lineEdit7)
self.setTabOrder(self.lineEdit7,self.lineEdit8)
self.setTabOrder(self.lineEdit8,self.lineEdit9)
def languageChange(self):
self.setCaption(self.__tr("HP Device Manger - Printer Setup Wizard"))
self.connectionTypeButtonGroup.setTitle(self.__tr("Connection (I/O) Type"))
self.usbRadioButton.setText(self.__tr("Universal Serial Bus (USB)"))
self.netRadioButton.setText(self.__tr("Network/Ethernet/Wireless (direct connection or JetDirect)"))
self.parRadioButton.setText(self.__tr("Parallel Port (LPT)"))
self.searchFiltersPushButton2.setText(self.__tr("Advanced..."))
self.setTitle(self.ConnectionPage,self.__tr("Choose Connection Type"))
self.searchFiltersPushButton.setText(self.__tr("Advanced..."))
self.probeHeadingTextLabel.setText(self.__tr("probeHeadingTextLabel"))
self.manualFindPushButton.setText(self.__tr("Find Manually..."))
self.probeUpdatePushButton.setText(self.__tr("Refresh"))
self.setTitle(self.ProbedDevicesPage,self.__tr("Select from Discovered Devices"))
self.ppdListView.header().setLabel(0,self.__tr("PPD File"))
self.ppdListView.header().setLabel(1,self.__tr("Description"))
self.otherPPDPushButton.setText(self.__tr("Select Other..."))
self.ppdDefaultsPushButton.setText(self.__tr("Defaults"))
self.textLabel1_5.setText(self.__tr("Please choose the PPD file (by name and description) that most closely matches your printer. <i>Note: The model name of the printer may vary somewhat from the PPD file name, for example, a Deskjet 5550 may have a PPD file with the model name of Deskjet_5500_series.</i>"))
self.setTitle(self.PPDPage,self.__tr("Select/Confirm PPD File"))
self.groupBox4.setTitle(self.__tr("Printer Information"))
self.defaultPrinterNamePushButton.setText(self.__tr("Default"))
self.textLabel1_2.setText(self.__tr("Location:"))
self.textLabel1.setText(self.__tr("Printer Name:"))
self.textLabel2.setText(self.__tr("Description:"))
self.faxInfoGroupBox.setTitle(self.__tr("Fax Information"))
self.textLabel1_3.setText(self.__tr("Fax Name:"))
self.textLabel3.setText(self.__tr("Name/Company:"))
self.textLabel2_2.setText(self.__tr("Fax Number:"))
self.faxCheckBox.setText(self.__tr("Setup PC send fax"))
self.defaultFaxNamePushButton.setText(self.__tr("Default"))
self.textLabel1_2_2.setText(self.__tr("Location:"))
self.textLabel2_4.setText(self.__tr("Description:"))
self.textLabel1_4.setText(self.__tr("Click \"Next >\" to install the printer on your system."))
self.setTitle(self.PrinterNamePage,self.__tr("Enter Printer Information"))
self.printTestPageCheckBox.setText(self.__tr("Send test page to printer"))
self.faxGroupBox.setTitle(self.__tr("Fax Summary"))
self.textLabel7.setText(self.__tr("Fax Number:"))
self.textLabel6.setText(self.__tr("Fax Name:"))
self.textLabel8.setText(self.__tr("Name/Company:"))
self.textLabel8_2.setText(self.__tr("Location:"))
self.textLabel8_3.setText(self.__tr("Description:"))
self.groupBox3.setTitle(self.__tr("Printer Summary"))
self.textLabel4.setText(self.__tr("Description:"))
self.textLabel3_2.setText(self.__tr("Location:"))
self.textLabel2_3.setText(self.__tr("Printer Name:"))
self.textLabel5.setText(self.__tr("PPD File:"))
self.textLabel2_5.setText(self.__tr("The printer has been successfully installed on your system."))
self.setTitle(self.FinishedPage,self.__tr("Finished Adding Printer"))
def connectionTypeButtonGroup_clicked(self,a0):
print("SetupForm_base.connectionTypeButtonGroup_clicked(int): Not implemented yet")
def probedDevicesListView_currentChanged(self,a0):
print("SetupForm_base.probedDevicesListView_currentChanged(QListViewItem*): Not implemented yet")
def printerNameLineEdit_textChanged(self,a0):
print("SetupForm_base.printerNameLineEdit_textChanged(const QString&): Not implemented yet")
def defaultPrinterNamePushButton_clicked(self):
print("SetupForm_base.defaultPrinterNamePushButton_clicked(): Not implemented yet")
def ppdBrowsePushButton_clicked(self):
print("SetupForm_base.ppdBrowsePushButton_clicked(): Not implemented yet")
def ppdFileLineEdit_textChanged(self,a0):
print("SetupForm_base.ppdFileLineEdit_textChanged(const QString&): Not implemented yet")
def ppdListView_currentChanged(self,a0):
print("SetupForm_base.ppdListView_currentChanged(QListViewItem*): Not implemented yet")
def probeUpdatePushButton_clicked(self):
print("SetupForm_base.probeUpdatePushButton_clicked(): Not implemented yet")
def searchFiltersPushButton_clicked(self):
print("SetupForm_base.searchFiltersPushButton_clicked(): Not implemented yet")
def searchFiltersPushButton2_clicked(self):
print("SetupForm_base.searchFiltersPushButton2_clicked(): Not implemented yet")
def manualFindPushButton_clicked(self):
print("SetupForm_base.manualFindPushButton_clicked(): Not implemented yet")
def printerLocationLineEdit_textChanged(self,a0):
print("SetupForm_base.printerLocationLineEdit_textChanged(const QString&): Not implemented yet")
def printerDescriptionLineEdit_textChanged(self,a0):
print("SetupForm_base.printerDescriptionLineEdit_textChanged(const QString&): Not implemented yet")
def faxNameLineEdit_textChanged(self,a0):
print("SetupForm_base.faxNameLineEdit_textChanged(const QString&): Not implemented yet")
def faxNumberLineEdit_textChanged(self,a0):
print("SetupForm_base.faxNumberLineEdit_textChanged(const QString&): Not implemented yet")
def faxNameCoLineEdit_textChanged(self,a0):
print("SetupForm_base.faxNameCoLineEdit_textChanged(const QString&): Not implemented yet")
def printTestPageCheckBox_clicked(self):
print("SetupForm_base.printTestPageCheckBox_clicked(): Not implemented yet")
def faxCheckBox_clicked(self):
print("SetupForm_base.faxCheckBox_clicked(): Not implemented yet")
def faxCheckBox_toggled(self,a0):
print("SetupForm_base.faxCheckBox_toggled(bool): Not implemented yet")
def printTestPageCheckBox_toggled(self,a0):
print("SetupForm_base.printTestPageCheckBox_toggled(bool): Not implemented yet")
def defaultFaxNamePushButton_clicked(self):
print("SetupForm_base.defaultFaxNamePushButton_clicked(): Not implemented yet")
def otherPPDPushButton_clicked(self):
print("SetupForm_base.otherPPDPushButton_clicked(): Not implemented yet")
def ppdDefaultsPushButton_clicked(self):
print("SetupForm_base.ppdDefaultsPushButton_clicked(): Not implemented yet")
def faxLocationLineEdit_textChanged(self,a0):
print("SetupForm_base.faxLocationLineEdit_textChanged(const QString&): Not implemented yet")
def faxDescriptionLineEdit_textChanged(self,a0):
print("SetupForm_base.faxDescriptionLineEdit_textChanged(const QString&): Not implemented yet")
def __tr(self,s,c = None):
return qApp.translate("SetupForm_base",s,c)
| gpl-2.0 | 80,808,447,214,767,820 | 47.781481 | 317 | 0.738289 | false |
bytedance/fedlearner | fedlearner/common/dfs_client.py | 1 | 5157 | # Copyright 2021 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""DFS client."""
import os
try:
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import gfile
except ImportError:
import tensorflow as tf
from tensorflow import gfile
from tensorflow.python.lib.io import file_io
from . import fl_logging
class DFSClient(object):
"""
support HDFS and NFS
"""
def __init__(self, base_dir):
self._meta_filename = "meta"
self._base_dir = base_dir
def get_data(self, key):
key_path = self._generate_path(key)
if not gfile.Exists(key_path):
return None
with gfile.Open(self._generate_path(key), 'rb') as file:
return file.read()
def set_data(self, key, data):
key_path = self._generate_path(key)
base_dir = os.path.dirname(key_path)
if not gfile.Exists(base_dir):
try:
gfile.MakeDirs(base_dir)
except tf.errors.OpError as e: # pylint: disable=broad-except
fl_logging.warning("create directory %s failed,"
" reason: %s", base_dir, str(e))
return False
file_io.atomic_write_string_to_file(key_path, data)
return True
def delete(self, key):
try:
gfile.Remove(self._generate_path(key))
return True
except tf.errors.OpError as e:
fl_logging.warning("delete key %s failed, reason: %s",
key, str(e))
return False
def delete_prefix(self, key):
try:
gfile.DeleteRecursively(self._generate_path(key, with_meta=False))
return True
except Exception as e: # pylint: disable=broad-except
fl_logging.warning("delete prefix with key %s failed,"
" reason: %s", key, str(e))
return False
def cas(self, key, old_data, new_data):
org_data = self.get_data(key)
if isinstance(org_data, bytes):
org_data = org_data.decode('utf-8')
if isinstance(old_data, bytes):
old_data = old_data.decode('utf-8')
if org_data != old_data:
fl_logging.warning("CAS failed. \norg data: %s old data: %s"
" new data: %s", org_data, old_data, new_data)
return False
return self.set_data(key, new_data)
def get_prefix_kvs(self, prefix, ignore_prefix=False):
kvs = []
target_path = self._generate_path(prefix, with_meta=False)
cur_paths = [target_path]
children_paths = []
while cur_paths:
for path in cur_paths:
filenames = []
try:
if gfile.IsDirectory(path):
filenames = gfile.ListDirectory(path)
except Exception as e: # pylint: disable=broad-except
fl_logging.warning("get prefix kvs %s failed, "
" reason: %s", path, str(e))
break
for filename in sorted(filenames):
file_path = "/".join([path, filename])
if gfile.IsDirectory(file_path):
children_paths.append(file_path)
else:
if ignore_prefix and path == target_path:
continue
nkey = self.normalize_output_key(
path, self._base_dir).encode()
with gfile.Open(file_path, 'rb') as file:
kvs.append((nkey, file.read()))
cur_paths = children_paths
children_paths = []
return kvs
def _generate_path(self, key, with_meta=True):
if with_meta:
return '/'.join([self._base_dir, self._normalize_input_key(key),
self._meta_filename])
return '/'.join([self._base_dir, self._normalize_input_key(key)])
@staticmethod
def _normalize_input_key(key):
skip_cnt = 0
while key[skip_cnt] == '.' or key[skip_cnt] == '/':
skip_cnt += 1
if skip_cnt > 0:
return key[skip_cnt:]
return key
@staticmethod
def normalize_output_key(key, base_dir):
if isinstance(base_dir, str):
assert key.startswith(base_dir)
else:
assert key.startswith(base_dir)
return key[len(base_dir)+1:]
@classmethod
def destroy_client_pool(cls):
pass
| apache-2.0 | 6,400,063,321,914,510,000 | 34.565517 | 78 | 0.54935 | false |
tkarna/cofs | examples/bottomFriction/steadyChannel.py | 1 | 6962 | """
Steady-state channel flow in 3D
===============================
Steady state flow in a channel subject to bottom friction.
This test reproduces the "channel" test case found in GOTM test suite [1]
and also [2].
This case tests the turbulence closure model, vertical viscosity and bottom
boundary layer. Water column is initially at rest. Circulation is driven by
a constant elevation gradient until it reaches a steady state. Here the
elevation gradient is replaced by an equivalent source term in the
momentum equation.
[1] http://www.gotm.net/
[2] Karna et al. (2012). Coupling of a discontinuous Galerkin finite element
marine model with a finite difference turbulence closure model.
Ocean Modelling, 47:55-64.
http://dx.doi.org/10.1016/j.ocemod.2012.01.001
"""
from thetis import *
import numpy
depth = 15.0
surf_slope = -1.0e-5 # d elev/dx
def bottom_friction_test(layers=25, gls_closure='k-omega',
stability_func='Canuto B',
fast_convergence=False, verify=True,
iterate=True, load_export_ix=None, **model_options):
outputdir = 'outputs'
# set mesh resolution
dx = 2500.0
nx = 3 # nb elements in flow direction
lx = nx*dx
ny = 3 # nb elements in cross direction
ly = ny*dx
mesh2d = PeriodicRectangleMesh(nx, ny, lx, ly, direction='x', reorder=True)
dt = 25.0
t_end = 12 * 3600.0 # sufficient to reach ~steady state
if fast_convergence:
t_end = 5 * 3600.0
t_export = 400.0
u_mag = 1.0
if os.getenv('THETIS_REGRESSION_TEST') is not None:
t_end = 5*t_export
# bathymetry
p1_2d = get_functionspace(mesh2d, 'CG', 1)
bathymetry2d = Function(p1_2d, name='Bathymetry')
bathymetry2d.assign(depth)
# create solver
solver_obj = solver.FlowSolver(mesh2d, bathymetry2d, layers)
options = solver_obj.options
options.element_family = 'dg-dg'
options.timestepper_type = 'SSPRK22'
options.solve_salinity = False
options.solve_temperature = False
options.use_implicit_vertical_diffusion = True
options.use_bottom_friction = True
options.bottom_roughness = Constant(1.5e-3)
options.use_turbulence = True
options.vertical_viscosity = Constant(1.3e-6) # background value
options.vertical_diffusivity = Constant(1.4e-7) # background value
# options.use_ale_moving_mesh = False
options.use_limiter_for_tracers = True
options.simulation_export_time = t_export
options.timestepper_options.use_automatic_timestep = False
options.timestep = dt
options.simulation_end_time = t_end
options.horizontal_velocity_scale = Constant(u_mag)
options.fields_to_export = ['uv_2d', 'elev_2d', 'elev_3d', 'uv_3d',
'uv_dav_2d',
'eddy_visc_3d', 'shear_freq_3d',
'tke_3d', 'psi_3d', 'eps_3d', 'len_3d', ]
options.fields_to_export_hdf5 = ['uv_2d', 'elev_2d', 'uv_3d',
'eddy_visc_3d', 'eddy_diff_3d',
'shear_freq_3d',
'tke_3d', 'psi_3d', 'eps_3d', 'len_3d', ]
options.update(model_options)
turb_options = options.turbulence_model_options
turb_options.apply_defaults(gls_closure)
turb_options.stability_function_name = stability_func
layer_str = 'nz{:}'.format(layers)
odir = '_'.join([outputdir, layer_str,
turb_options.closure_name.replace(' ', '-'),
turb_options.stability_function_name.replace(' ', '-')])
options.output_directory = odir
solver_obj.create_function_spaces()
# drive flow with momentum source term equivalent to constant surface slope
pressure_grad = -physical_constants['g_grav'] * surf_slope
options.momentum_source_2d = Constant((pressure_grad, 0))
solver_obj.create_equations()
xyz = SpatialCoordinate(solver_obj.mesh)
if fast_convergence:
# speed-up convergence by stating with u > 0
u_init_2d = 0.5
solver_obj.assign_initial_conditions(uv_2d=Constant((u_init_2d, 0)))
# consistent 3d velocity with slope
solver_obj.fields.uv_3d.project(as_vector((u_init_2d*0.3*(xyz[2]/depth + 0.5), 0, 0)))
if iterate:
print_output('Exporting to ' + options.output_directory)
solver_obj.iterate()
if verify and os.getenv('THETIS_REGRESSION_TEST') is None:
# compare against logarithmic velocity profile
# u = u_b / kappa * log((z + bath + z_0)/z_0)
# estimate bottom friction velocity from maximal u
u_max = 0.9 # max velocity in [2] Fig 2.
l2_tol = 0.05
kappa = solver_obj.options.turbulence_model_options.kappa
z_0 = options.bottom_roughness.dat.data[0]
u_b = u_max * kappa / np.log((depth + z_0)/z_0)
log_uv = Function(solver_obj.function_spaces.P1DGv, name='log velocity')
log_uv.project(as_vector((u_b / kappa * ln((xyz[2] + depth + z_0)/z_0), 0, 0)))
out = File(options.output_directory + '/log_uv.pvd')
out.write(log_uv)
uv_p1_dg = Function(solver_obj.function_spaces.P1DGv, name='velocity p1dg')
uv_p1_dg.project(solver_obj.fields.uv_3d + solver_obj.fields.uv_dav_3d)
volume = lx*ly*depth
uv_l2_err = errornorm(log_uv, uv_p1_dg)/numpy.sqrt(volume)
assert uv_l2_err < l2_tol, 'L2 error is too large: {:} > {:}'.format(uv_l2_err, l2_tol)
print_output('L2 error {:.4f} PASSED'.format(uv_l2_err))
elif load_export_ix is not None:
print_output('Loading state: {:}'.format(load_export_ix))
solver_obj.load_state(load_export_ix)
return solver_obj
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Run bottom friction test case',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument('-n', '--nlevels', type=int, default=50,
help='number of vertical levels')
parser.add_argument('-m', '--model', default='k-epsilon',
choices=['k-epsilon', 'k-omega', 'gls'],
help='GLS turbulence closure model')
parser.add_argument('-s', '--stability-func', default='Canuto-A',
choices=['Canuto-A', 'Canuto-B', 'Cheng'],
help='Stability function name')
parser.add_argument('-v', '--verify', action='store_true',
help='Verify correctness against log profile.')
args = parser.parse_args()
model = args.model
if model == 'gls':
model = 'Generic Length Scale'
bottom_friction_test(
layers=args.nlevels,
gls_closure=model,
stability_func=args.stability_func.replace('-', ' '),
verify=args.verify)
| mit | 6,528,286,562,185,436,000 | 39.952941 | 99 | 0.607584 | false |
lseman/pylspm | pylspm/bootstraping.py | 1 | 4635 | # EFRON, B.; TIBSHIRANI, R. J. An Introduction to the Bootstrap. 1993.
import pandas
import numpy as np
from numpy import inf
import pandas as pd
import scipy.stats
from scipy.stats import norm
from .pylspm import PyLSpm
from .boot import PyLSboot
def bootstrap(nrboot, cores, data_, lvmodel,
mvmodel, scheme, regression, h='0', maxit='100', method='percentile', boolen_stine=0):
if boolen_stine == 1:
segmento = 'SEM'
data_boolen = data_.drop(segmento, axis=1)
colunas = data_boolen.columns
S = pd.DataFrame.cov(data_boolen)
chol = np.linalg.cholesky(S)
A = (pd.DataFrame(np.linalg.inv(chol)))
boolen = PyLSpm(data_boolen, lvmodel, mvmodel,
scheme, regression, 0, 100)
implied = np.sqrt(boolen.implied())
data_boolen = data_boolen - data_boolen.mean()
data_boolen = np.dot(np.dot(data_boolen, A), implied)
data_ = pd.DataFrame(data_boolen, columns=colunas)
tese = PyLSboot(nrboot, cores, data_, lvmodel,
mvmodel, scheme, regression, 0, 100)
resultados = tese.boot()
default = PyLSpm(data_, lvmodel, mvmodel, scheme,
regression, h, maxit).path_matrix.values
current = list(filter(None.__ne__, resultados))
current = np.sort(current, axis=0)
if (method == 'percentile'):
for i in range(len(current[0])):
current_ = [j[i] for j in current]
print('MEAN')
mean_ = (np.round(np.mean(current_, axis=0), 4))
print(mean_)
print('STD')
print(np.round(np.std(current_, axis=0, ddof=1), 4))
print('CI 2.5')
print(np.round(np.percentile(current_, 2.5, axis=0), 4))
print('CI 97.5')
print(np.round(np.percentile(current_, 97.5, axis=0), 4))
print('t-value')
tstat = np.nan_to_num(np.mean(current_, axis=0) /
np.std(current_, axis=0, ddof=1))
print(tstat)
print('p-value')
pvalue = np.round((scipy.stats.t.sf(
tstat, len(current_) - 1)), 5)
print(pvalue)
return pvalue, mean_
elif (method == 'bca'):
default = PyLSpm(data_, lvmodel, mvmodel, scheme,
regression, 0, 100).path_matrix.values
for i in range(len(current[0])):
current_ = [j[i] for j in current]
alpha = 0.05
if np.iterable(alpha):
alphas = np.array(alpha)
else:
alphas = np.array([alpha / 2, 1 - alpha / 2])
# bias
z0 = norm.ppf(
(np.sum(current_ < default, axis=0)) / len(current_))
zs = z0 + \
norm.ppf(alphas).reshape(alphas.shape + (1,) * z0.ndim)
# acceleration and jackknife
jstat = PyLSboot(len(data_), cores, data_,
lvmodel, mvmodel, scheme, regression, 0, 100)
jstat = jstat.jk()
jstat = list(filter(None.__ne__, jstat))
jmean = np.mean(jstat, axis=0)
a = np.sum((jmean - jstat)**3, axis=0) / \
(6.0 * np.sum((jmean - jstat)**2, axis=0)**(3 / 2))
zs = z0 + \
norm.ppf(alphas).reshape(alphas.shape + (1,) * z0.ndim)
avals = norm.cdf(z0 + zs / (1 - a * zs))
nvals = np.round((len(current_) - 1) * avals)
nvals = np.nan_to_num(nvals).astype('int')
low_conf = np.zeros(shape=(len(current_[0]), len(current_[0])))
high_conf = np.zeros(
shape=(len(current_[0]), len(current_[0])))
for i in range(len(current_[0])):
for j in range(len(current_[0])):
low_conf[i][j] = (current_[nvals[0][i][j]][i][j])
for i in range(len(*current[0])):
for j in range(len(*current[0])):
high_conf[i][j] = (current_[nvals[1][i][j]][i][j])
print('MEAN')
print(np.round(np.mean(current_, axis=0), 4))
print('CI LOW')
print(avals[0])
print(low_conf)
print('CI HIGH')
print(avals[1])
print(high_conf)
print('t-value')
tstat = np.nan_to_num(np.mean(current_, axis=0) /
np.std(current_, axis=0, ddof=1))
print(tstat)
| mit | 4,771,311,085,366,402,000 | 33.381679 | 100 | 0.486731 | false |
joopert/home-assistant | homeassistant/components/bluetooth_tracker/device_tracker.py | 1 | 5963 | """Tracking for bluetooth devices."""
import asyncio
import logging
from typing import List, Set, Tuple, Optional
# pylint: disable=import-error
import bluetooth
from bt_proximity import BluetoothRSSI
import voluptuous as vol
from homeassistant.components.device_tracker import PLATFORM_SCHEMA
from homeassistant.components.device_tracker.const import (
CONF_SCAN_INTERVAL,
CONF_TRACK_NEW,
DEFAULT_TRACK_NEW,
SCAN_INTERVAL,
SOURCE_TYPE_BLUETOOTH,
)
from homeassistant.components.device_tracker.legacy import (
YAML_DEVICES,
async_load_config,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.typing import HomeAssistantType
from .const import DOMAIN, SERVICE_UPDATE
_LOGGER = logging.getLogger(__name__)
BT_PREFIX = "BT_"
CONF_REQUEST_RSSI = "request_rssi"
CONF_DEVICE_ID = "device_id"
DEFAULT_DEVICE_ID = -1
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_TRACK_NEW): cv.boolean,
vol.Optional(CONF_REQUEST_RSSI): cv.boolean,
vol.Optional(CONF_DEVICE_ID, default=DEFAULT_DEVICE_ID): vol.All(
vol.Coerce(int), vol.Range(min=-1)
),
}
)
def is_bluetooth_device(device) -> bool:
"""Check whether a device is a bluetooth device by its mac."""
return device.mac and device.mac[:3].upper() == BT_PREFIX
def discover_devices(device_id: int) -> List[Tuple[str, str]]:
"""Discover Bluetooth devices."""
result = bluetooth.discover_devices(
duration=8,
lookup_names=True,
flush_cache=True,
lookup_class=False,
device_id=device_id,
)
_LOGGER.debug("Bluetooth devices discovered = %d", len(result))
return result
async def see_device(
hass: HomeAssistantType, async_see, mac: str, device_name: str, rssi=None
) -> None:
"""Mark a device as seen."""
attributes = {}
if rssi is not None:
attributes["rssi"] = rssi
await async_see(
mac=f"{BT_PREFIX}{mac}",
host_name=device_name,
attributes=attributes,
source_type=SOURCE_TYPE_BLUETOOTH,
)
async def get_tracking_devices(hass: HomeAssistantType) -> Tuple[Set[str], Set[str]]:
"""
Load all known devices.
We just need the devices so set consider_home and home range to 0
"""
yaml_path: str = hass.config.path(YAML_DEVICES)
devices = await async_load_config(yaml_path, hass, 0)
bluetooth_devices = [device for device in devices if is_bluetooth_device(device)]
devices_to_track: Set[str] = {
device.mac[3:] for device in bluetooth_devices if device.track
}
devices_to_not_track: Set[str] = {
device.mac[3:] for device in bluetooth_devices if not device.track
}
return devices_to_track, devices_to_not_track
def lookup_name(mac: str) -> Optional[str]:
"""Lookup a Bluetooth device name."""
_LOGGER.debug("Scanning %s", mac)
return bluetooth.lookup_name(mac, timeout=5)
async def async_setup_scanner(
hass: HomeAssistantType, config: dict, async_see, discovery_info=None
):
"""Set up the Bluetooth Scanner."""
device_id: int = config.get(CONF_DEVICE_ID)
interval = config.get(CONF_SCAN_INTERVAL, SCAN_INTERVAL)
request_rssi = config.get(CONF_REQUEST_RSSI, False)
update_bluetooth_lock = asyncio.Lock()
# If track new devices is true discover new devices on startup.
track_new: bool = config.get(CONF_TRACK_NEW, DEFAULT_TRACK_NEW)
_LOGGER.debug("Tracking new devices is set to %s", track_new)
devices_to_track, devices_to_not_track = await get_tracking_devices(hass)
if not devices_to_track and not track_new:
_LOGGER.debug("No Bluetooth devices to track and not tracking new devices")
if request_rssi:
_LOGGER.debug("Detecting RSSI for devices")
async def perform_bluetooth_update():
"""Discover Bluetooth devices and update status."""
_LOGGER.debug("Performing Bluetooth devices discovery and update")
tasks = []
try:
if track_new:
devices = await hass.async_add_executor_job(discover_devices, device_id)
for mac, device_name in devices:
if mac not in devices_to_track and mac not in devices_to_not_track:
devices_to_track.add(mac)
for mac in devices_to_track:
device_name = await hass.async_add_executor_job(lookup_name, mac)
if device_name is None:
# Could not lookup device name
continue
rssi = None
if request_rssi:
client = BluetoothRSSI(mac)
rssi = await hass.async_add_executor_job(client.request_rssi)
client.close()
tasks.append(see_device(hass, async_see, mac, device_name, rssi))
if tasks:
await asyncio.wait(tasks)
except bluetooth.BluetoothError:
_LOGGER.exception("Error looking up Bluetooth device")
async def update_bluetooth(now=None):
"""Lookup Bluetooth devices and update status."""
# If an update is in progress, we don't do anything
if update_bluetooth_lock.locked():
_LOGGER.debug(
"Previous execution of update_bluetooth is taking longer than the scheduled update of interval %s",
interval,
)
return
async with update_bluetooth_lock:
await perform_bluetooth_update()
async def handle_manual_update_bluetooth(call):
"""Update bluetooth devices on demand."""
await update_bluetooth()
hass.async_create_task(update_bluetooth())
async_track_time_interval(hass, update_bluetooth, interval)
hass.services.async_register(DOMAIN, SERVICE_UPDATE, handle_manual_update_bluetooth)
return True
| apache-2.0 | -5,821,608,202,752,087,000 | 30.550265 | 115 | 0.64917 | false |
mtury/scapy | scapy/layers/tls/crypto/suites.py | 1 | 30034 | # This file is part of Scapy
# Copyright (C) 2007, 2008, 2009 Arnaud Ebalard
# 2015, 2016, 2017 Maxence Tury
# This program is published under a GPLv2 license
"""
TLS cipher suites.
A comprehensive list of specified cipher suites can be consulted at:
https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml
"""
from __future__ import absolute_import
from scapy.layers.tls.crypto.kx_algs import _tls_kx_algs
from scapy.layers.tls.crypto.hash import _tls_hash_algs
from scapy.layers.tls.crypto.h_mac import _tls_hmac_algs
from scapy.layers.tls.crypto.ciphers import _tls_cipher_algs
import scapy.modules.six as six
def get_algs_from_ciphersuite_name(ciphersuite_name):
"""
Return the 3-tuple made of the Key Exchange Algorithm class, the Cipher
class and the HMAC class, through the parsing of the ciphersuite name.
"""
tls1_3 = False
if ciphersuite_name.startswith("TLS"):
s = ciphersuite_name[4:]
if s.endswith("CCM") or s.endswith("CCM_8"):
kx_name, s = s.split("_WITH_")
kx_alg = _tls_kx_algs.get(kx_name)
hash_alg = _tls_hash_algs.get("SHA256")
cipher_alg = _tls_cipher_algs.get(s)
hmac_alg = None
else:
if "WITH" in s:
kx_name, s = s.split("_WITH_")
kx_alg = _tls_kx_algs.get(kx_name)
else:
tls1_3 = True
kx_alg = _tls_kx_algs.get("TLS13")
hash_name = s.split('_')[-1]
hash_alg = _tls_hash_algs.get(hash_name)
cipher_name = s[:-(len(hash_name) + 1)]
if tls1_3:
cipher_name += "_TLS13"
cipher_alg = _tls_cipher_algs.get(cipher_name)
hmac_alg = None
if cipher_alg is not None and cipher_alg.type != "aead":
hmac_name = "HMAC-%s" % hash_name
hmac_alg = _tls_hmac_algs.get(hmac_name)
elif ciphersuite_name.startswith("SSL"):
s = ciphersuite_name[7:]
kx_alg = _tls_kx_algs.get("SSLv2")
cipher_name, hash_name = s.split("_WITH_")
cipher_alg = _tls_cipher_algs.get(cipher_name.rstrip("_EXPORT40"))
kx_alg.export = cipher_name.endswith("_EXPORT40")
hmac_alg = _tls_hmac_algs.get("HMAC-NULL")
hash_alg = _tls_hash_algs.get(hash_name)
return kx_alg, cipher_alg, hmac_alg, hash_alg, tls1_3
_tls_cipher_suites = {}
_tls_cipher_suites_cls = {}
class _GenericCipherSuiteMetaclass(type):
"""
Cipher suite classes are automatically registered through this metaclass.
Their name attribute equates their respective class name.
We also pre-compute every expected length of the key block to be generated,
which may vary according to the current tls_version. The default is set to
the TLS 1.2 length, and the value should be set at class instantiation.
Regarding the AEAD cipher suites, note that the 'hmac_alg' attribute will
be set to None. Yet, we always need a 'hash_alg' for the PRF.
"""
def __new__(cls, cs_name, bases, dct):
cs_val = dct.get("val")
if cs_name != "_GenericCipherSuite":
kx, c, hm, h, tls1_3 = get_algs_from_ciphersuite_name(cs_name)
if c is None or h is None or (kx is None and not tls1_3):
dct["usable"] = False
else:
dct["usable"] = True
dct["name"] = cs_name
dct["kx_alg"] = kx
dct["cipher_alg"] = c
dct["hmac_alg"] = hm
dct["hash_alg"] = h
if not tls1_3:
kb_len = 2 * c.key_len
if c.type == "stream" or c.type == "block":
kb_len += 2 * hm.key_len
kb_len_v1_0 = kb_len
if c.type == "block":
kb_len_v1_0 += 2 * c.block_size
# no explicit IVs added for TLS 1.1+
elif c.type == "aead":
kb_len_v1_0 += 2 * c.fixed_iv_len
kb_len += 2 * c.fixed_iv_len
dct["_key_block_len_v1_0"] = kb_len_v1_0
dct["key_block_len"] = kb_len
_tls_cipher_suites[cs_val] = cs_name
the_class = super(_GenericCipherSuiteMetaclass, cls).__new__(cls,
cs_name,
bases,
dct)
if cs_name != "_GenericCipherSuite":
_tls_cipher_suites_cls[cs_val] = the_class
return the_class
class _GenericCipherSuite(six.with_metaclass(_GenericCipherSuiteMetaclass, object)): # noqa: E501
def __init__(self, tls_version=0x0303):
"""
Most of the attributes are fixed and have already been set by the
metaclass, but we still have to provide tls_version differentiation.
For now, the key_block_len remains the only application if this.
Indeed for TLS 1.1+, when using a block cipher, there are no implicit
IVs derived from the master secret. Note that an overlong key_block_len
would not affect the secret generation (the trailing bytes would
simply be discarded), but we still provide this for completeness.
"""
super(_GenericCipherSuite, self).__init__()
if tls_version <= 0x301:
self.key_block_len = self._key_block_len_v1_0
class TLS_NULL_WITH_NULL_NULL(_GenericCipherSuite):
val = 0x0000
class TLS_RSA_WITH_NULL_MD5(_GenericCipherSuite):
val = 0x0001
class TLS_RSA_WITH_NULL_SHA(_GenericCipherSuite):
val = 0x0002
class TLS_RSA_EXPORT_WITH_RC4_40_MD5(_GenericCipherSuite):
val = 0x0003
class TLS_RSA_WITH_RC4_128_MD5(_GenericCipherSuite):
val = 0x0004
class TLS_RSA_WITH_RC4_128_SHA(_GenericCipherSuite):
val = 0x0005
class TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5(_GenericCipherSuite):
val = 0x0006
class TLS_RSA_WITH_IDEA_CBC_SHA(_GenericCipherSuite):
val = 0x0007
class TLS_RSA_EXPORT_WITH_DES40_CBC_SHA(_GenericCipherSuite):
val = 0x0008
class TLS_RSA_WITH_DES_CBC_SHA(_GenericCipherSuite):
val = 0x0009
class TLS_RSA_WITH_3DES_EDE_CBC_SHA(_GenericCipherSuite):
val = 0x000A
class TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA(_GenericCipherSuite):
val = 0x000B
class TLS_DH_DSS_WITH_DES_CBC_SHA(_GenericCipherSuite):
val = 0x000C
class TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA(_GenericCipherSuite):
val = 0x000D
class TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA(_GenericCipherSuite):
val = 0x000E
class TLS_DH_RSA_WITH_DES_CBC_SHA(_GenericCipherSuite):
val = 0x000F
class TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA(_GenericCipherSuite):
val = 0x0010
class TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA(_GenericCipherSuite):
val = 0x0011
class TLS_DHE_DSS_WITH_DES_CBC_SHA(_GenericCipherSuite):
val = 0x0012
class TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA(_GenericCipherSuite):
val = 0x0013
class TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA(_GenericCipherSuite):
val = 0x0014
class TLS_DHE_RSA_WITH_DES_CBC_SHA(_GenericCipherSuite):
val = 0x0015
class TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA(_GenericCipherSuite):
val = 0x0016
class TLS_DH_anon_EXPORT_WITH_RC4_40_MD5(_GenericCipherSuite):
val = 0x0017
class TLS_DH_anon_WITH_RC4_128_MD5(_GenericCipherSuite):
val = 0x0018
class TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA(_GenericCipherSuite):
val = 0x0019
class TLS_DH_anon_WITH_DES_CBC_SHA(_GenericCipherSuite):
val = 0x001A
class TLS_DH_anon_WITH_3DES_EDE_CBC_SHA(_GenericCipherSuite):
val = 0x001B
class TLS_KRB5_WITH_DES_CBC_SHA(_GenericCipherSuite):
val = 0x001E
class TLS_KRB5_WITH_3DES_EDE_CBC_SHA(_GenericCipherSuite):
val = 0x001F
class TLS_KRB5_WITH_RC4_128_SHA(_GenericCipherSuite):
val = 0x0020
class TLS_KRB5_WITH_IDEA_CBC_SHA(_GenericCipherSuite):
val = 0x0021
class TLS_KRB5_WITH_DES_CBC_MD5(_GenericCipherSuite):
val = 0x0022
class TLS_KRB5_WITH_3DES_EDE_CBC_MD5(_GenericCipherSuite):
val = 0x0023
class TLS_KRB5_WITH_RC4_128_MD5(_GenericCipherSuite):
val = 0x0024
class TLS_KRB5_WITH_IDEA_CBC_MD5(_GenericCipherSuite):
val = 0x0025
class TLS_KRB5_EXPORT_WITH_DES40_CBC_SHA(_GenericCipherSuite):
val = 0x0026
class TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA(_GenericCipherSuite):
val = 0x0027
class TLS_KRB5_EXPORT_WITH_RC4_40_SHA(_GenericCipherSuite):
val = 0x0028
class TLS_KRB5_EXPORT_WITH_DES40_CBC_MD5(_GenericCipherSuite):
val = 0x0029
class TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5(_GenericCipherSuite):
val = 0x002A
class TLS_KRB5_EXPORT_WITH_RC4_40_MD5(_GenericCipherSuite):
val = 0x002B
class TLS_PSK_WITH_NULL_SHA(_GenericCipherSuite):
val = 0x002C
class TLS_DHE_PSK_WITH_NULL_SHA(_GenericCipherSuite):
val = 0x002D
class TLS_RSA_PSK_WITH_NULL_SHA(_GenericCipherSuite):
val = 0x002E
class TLS_RSA_WITH_AES_128_CBC_SHA(_GenericCipherSuite):
val = 0x002F
class TLS_DH_DSS_WITH_AES_128_CBC_SHA(_GenericCipherSuite):
val = 0x0030
class TLS_DH_RSA_WITH_AES_128_CBC_SHA(_GenericCipherSuite):
val = 0x0031
class TLS_DHE_DSS_WITH_AES_128_CBC_SHA(_GenericCipherSuite):
val = 0x0032
class TLS_DHE_RSA_WITH_AES_128_CBC_SHA(_GenericCipherSuite):
val = 0x0033
class TLS_DH_anon_WITH_AES_128_CBC_SHA(_GenericCipherSuite):
val = 0x0034
class TLS_RSA_WITH_AES_256_CBC_SHA(_GenericCipherSuite):
val = 0x0035
class TLS_DH_DSS_WITH_AES_256_CBC_SHA(_GenericCipherSuite):
val = 0x0036
class TLS_DH_RSA_WITH_AES_256_CBC_SHA(_GenericCipherSuite):
val = 0x0037
class TLS_DHE_DSS_WITH_AES_256_CBC_SHA(_GenericCipherSuite):
val = 0x0038
class TLS_DHE_RSA_WITH_AES_256_CBC_SHA(_GenericCipherSuite):
val = 0x0039
class TLS_DH_anon_WITH_AES_256_CBC_SHA(_GenericCipherSuite):
val = 0x003A
class TLS_RSA_WITH_NULL_SHA256(_GenericCipherSuite):
val = 0x003B
class TLS_RSA_WITH_AES_128_CBC_SHA256(_GenericCipherSuite):
val = 0x003C
class TLS_RSA_WITH_AES_256_CBC_SHA256(_GenericCipherSuite):
val = 0x003D
class TLS_DH_DSS_WITH_AES_128_CBC_SHA256(_GenericCipherSuite):
val = 0x003E
class TLS_DH_RSA_WITH_AES_128_CBC_SHA256(_GenericCipherSuite):
val = 0x003F
class TLS_DHE_DSS_WITH_AES_128_CBC_SHA256(_GenericCipherSuite):
val = 0x0040
class TLS_RSA_WITH_CAMELLIA_128_CBC_SHA(_GenericCipherSuite):
val = 0x0041
class TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA(_GenericCipherSuite):
val = 0x0042
class TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA(_GenericCipherSuite):
val = 0x0043
class TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA(_GenericCipherSuite):
val = 0x0044
class TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA(_GenericCipherSuite):
val = 0x0045
class TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA(_GenericCipherSuite):
val = 0x0046
class TLS_DHE_RSA_WITH_AES_128_CBC_SHA256(_GenericCipherSuite):
val = 0x0067
class TLS_DH_DSS_WITH_AES_256_CBC_SHA256(_GenericCipherSuite):
val = 0x0068
class TLS_DH_RSA_WITH_AES_256_CBC_SHA256(_GenericCipherSuite):
val = 0x0069
class TLS_DHE_DSS_WITH_AES_256_CBC_SHA256(_GenericCipherSuite):
val = 0x006A
class TLS_DHE_RSA_WITH_AES_256_CBC_SHA256(_GenericCipherSuite):
val = 0x006B
class TLS_DH_anon_WITH_AES_128_CBC_SHA256(_GenericCipherSuite):
val = 0x006C
class TLS_DH_anon_WITH_AES_256_CBC_SHA256(_GenericCipherSuite):
val = 0x006D
class TLS_RSA_WITH_CAMELLIA_256_CBC_SHA(_GenericCipherSuite):
val = 0x0084
class TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA(_GenericCipherSuite):
val = 0x0085
class TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA(_GenericCipherSuite):
val = 0x0086
class TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA(_GenericCipherSuite):
val = 0x0087
class TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA(_GenericCipherSuite):
val = 0x0088
class TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA(_GenericCipherSuite):
val = 0x0089
class TLS_PSK_WITH_RC4_128_SHA(_GenericCipherSuite):
val = 0x008A
class TLS_PSK_WITH_3DES_EDE_CBC_SHA(_GenericCipherSuite):
val = 0x008B
class TLS_PSK_WITH_AES_128_CBC_SHA(_GenericCipherSuite):
val = 0x008C
class TLS_PSK_WITH_AES_256_CBC_SHA(_GenericCipherSuite):
val = 0x008D
class TLS_DHE_PSK_WITH_RC4_128_SHA(_GenericCipherSuite):
val = 0x008E
class TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA(_GenericCipherSuite):
val = 0x008F
class TLS_DHE_PSK_WITH_AES_128_CBC_SHA(_GenericCipherSuite):
val = 0x0090
class TLS_DHE_PSK_WITH_AES_256_CBC_SHA(_GenericCipherSuite):
val = 0x0091
class TLS_RSA_PSK_WITH_RC4_128_SHA(_GenericCipherSuite):
val = 0x0092
class TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA(_GenericCipherSuite):
val = 0x0093
class TLS_RSA_PSK_WITH_AES_128_CBC_SHA(_GenericCipherSuite):
val = 0x0094
class TLS_RSA_PSK_WITH_AES_256_CBC_SHA(_GenericCipherSuite):
val = 0x0095
class TLS_RSA_WITH_SEED_CBC_SHA(_GenericCipherSuite):
val = 0x0096
class TLS_DH_DSS_WITH_SEED_CBC_SHA(_GenericCipherSuite):
val = 0x0097
class TLS_DH_RSA_WITH_SEED_CBC_SHA(_GenericCipherSuite):
val = 0x0098
class TLS_DHE_DSS_WITH_SEED_CBC_SHA(_GenericCipherSuite):
val = 0x0099
class TLS_DHE_RSA_WITH_SEED_CBC_SHA(_GenericCipherSuite):
val = 0x009A
class TLS_DH_anon_WITH_SEED_CBC_SHA(_GenericCipherSuite):
val = 0x009B
class TLS_RSA_WITH_AES_128_GCM_SHA256(_GenericCipherSuite):
val = 0x009C
class TLS_RSA_WITH_AES_256_GCM_SHA384(_GenericCipherSuite):
val = 0x009D
class TLS_DHE_RSA_WITH_AES_128_GCM_SHA256(_GenericCipherSuite):
val = 0x009E
class TLS_DHE_RSA_WITH_AES_256_GCM_SHA384(_GenericCipherSuite):
val = 0x009F
class TLS_DH_RSA_WITH_AES_128_GCM_SHA256(_GenericCipherSuite):
val = 0x00A0
class TLS_DH_RSA_WITH_AES_256_GCM_SHA384(_GenericCipherSuite):
val = 0x00A1
class TLS_DHE_DSS_WITH_AES_128_GCM_SHA256(_GenericCipherSuite):
val = 0x00A2
class TLS_DHE_DSS_WITH_AES_256_GCM_SHA384(_GenericCipherSuite):
val = 0x00A3
class TLS_DH_DSS_WITH_AES_128_GCM_SHA256(_GenericCipherSuite):
val = 0x00A4
class TLS_DH_DSS_WITH_AES_256_GCM_SHA384(_GenericCipherSuite):
val = 0x00A5
class TLS_DH_anon_WITH_AES_128_GCM_SHA256(_GenericCipherSuite):
val = 0x00A6
class TLS_DH_anon_WITH_AES_256_GCM_SHA384(_GenericCipherSuite):
val = 0x00A7
class TLS_PSK_WITH_AES_128_GCM_SHA256(_GenericCipherSuite):
val = 0x00A8
class TLS_PSK_WITH_AES_256_GCM_SHA384(_GenericCipherSuite):
val = 0x00A9
class TLS_DHE_PSK_WITH_AES_128_GCM_SHA256(_GenericCipherSuite):
val = 0x00AA
class TLS_DHE_PSK_WITH_AES_256_GCM_SHA384(_GenericCipherSuite):
val = 0x00AB
class TLS_RSA_PSK_WITH_AES_128_GCM_SHA256(_GenericCipherSuite):
val = 0x00AC
class TLS_RSA_PSK_WITH_AES_256_GCM_SHA384(_GenericCipherSuite):
val = 0x00AD
class TLS_PSK_WITH_AES_128_CBC_SHA256(_GenericCipherSuite):
val = 0x00AE
class TLS_PSK_WITH_AES_256_CBC_SHA384(_GenericCipherSuite):
val = 0x00AF
class TLS_PSK_WITH_NULL_SHA256(_GenericCipherSuite):
val = 0x00B0
class TLS_PSK_WITH_NULL_SHA384(_GenericCipherSuite):
val = 0x00B1
class TLS_DHE_PSK_WITH_AES_128_CBC_SHA256(_GenericCipherSuite):
val = 0x00B2
class TLS_DHE_PSK_WITH_AES_256_CBC_SHA384(_GenericCipherSuite):
val = 0x00B3
class TLS_DHE_PSK_WITH_NULL_SHA256(_GenericCipherSuite):
val = 0x00B4
class TLS_DHE_PSK_WITH_NULL_SHA384(_GenericCipherSuite):
val = 0x00B5
class TLS_RSA_PSK_WITH_AES_128_CBC_SHA256(_GenericCipherSuite):
val = 0x00B6
class TLS_RSA_PSK_WITH_AES_256_CBC_SHA384(_GenericCipherSuite):
val = 0x00B7
class TLS_RSA_PSK_WITH_NULL_SHA256(_GenericCipherSuite):
val = 0x00B8
class TLS_RSA_PSK_WITH_NULL_SHA384(_GenericCipherSuite):
val = 0x00B9
class TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256(_GenericCipherSuite):
val = 0x00BA
class TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256(_GenericCipherSuite):
val = 0x00BB
class TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256(_GenericCipherSuite):
val = 0x00BC
class TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256(_GenericCipherSuite):
val = 0x00BD
class TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256(_GenericCipherSuite):
val = 0x00BE
class TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256(_GenericCipherSuite):
val = 0x00BF
class TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256(_GenericCipherSuite):
val = 0x00C0
class TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256(_GenericCipherSuite):
val = 0x00C1
class TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256(_GenericCipherSuite):
val = 0x00C2
class TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256(_GenericCipherSuite):
val = 0x00C3
class TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256(_GenericCipherSuite):
val = 0x00C4
class TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256(_GenericCipherSuite):
val = 0x00C5
# class TLS_EMPTY_RENEGOTIATION_INFO_CSV(_GenericCipherSuite):
# val = 0x00FF
# class TLS_FALLBACK_SCSV(_GenericCipherSuite):
# val = 0x5600
class TLS_ECDH_ECDSA_WITH_NULL_SHA(_GenericCipherSuite):
val = 0xC001
class TLS_ECDH_ECDSA_WITH_RC4_128_SHA(_GenericCipherSuite):
val = 0xC002
class TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA(_GenericCipherSuite):
val = 0xC003
class TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA(_GenericCipherSuite):
val = 0xC004
class TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA(_GenericCipherSuite):
val = 0xC005
class TLS_ECDHE_ECDSA_WITH_NULL_SHA(_GenericCipherSuite):
val = 0xC006
class TLS_ECDHE_ECDSA_WITH_RC4_128_SHA(_GenericCipherSuite):
val = 0xC007
class TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA(_GenericCipherSuite):
val = 0xC008
class TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA(_GenericCipherSuite):
val = 0xC009
class TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA(_GenericCipherSuite):
val = 0xC00A
class TLS_ECDH_RSA_WITH_NULL_SHA(_GenericCipherSuite):
val = 0xC00B
class TLS_ECDH_RSA_WITH_RC4_128_SHA(_GenericCipherSuite):
val = 0xC00C
class TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA(_GenericCipherSuite):
val = 0xC00D
class TLS_ECDH_RSA_WITH_AES_128_CBC_SHA(_GenericCipherSuite):
val = 0xC00E
class TLS_ECDH_RSA_WITH_AES_256_CBC_SHA(_GenericCipherSuite):
val = 0xC00F
class TLS_ECDHE_RSA_WITH_NULL_SHA(_GenericCipherSuite):
val = 0xC010
class TLS_ECDHE_RSA_WITH_RC4_128_SHA(_GenericCipherSuite):
val = 0xC011
class TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA(_GenericCipherSuite):
val = 0xC012
class TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA(_GenericCipherSuite):
val = 0xC013
class TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA(_GenericCipherSuite):
val = 0xC014
class TLS_ECDH_anon_WITH_NULL_SHA(_GenericCipherSuite):
val = 0xC015
class TLS_ECDH_anon_WITH_RC4_128_SHA(_GenericCipherSuite):
val = 0xC016
class TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA(_GenericCipherSuite):
val = 0xC017
class TLS_ECDH_anon_WITH_AES_128_CBC_SHA(_GenericCipherSuite):
val = 0xC018
class TLS_ECDH_anon_WITH_AES_256_CBC_SHA(_GenericCipherSuite):
val = 0xC019
class TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA(_GenericCipherSuite):
val = 0xC01A
class TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA(_GenericCipherSuite):
val = 0xC01B
class TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA(_GenericCipherSuite):
val = 0xC01C
class TLS_SRP_SHA_WITH_AES_128_CBC_SHA(_GenericCipherSuite):
val = 0xC01D
class TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA(_GenericCipherSuite):
val = 0xC01E
class TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA(_GenericCipherSuite):
val = 0xC01F
class TLS_SRP_SHA_WITH_AES_256_CBC_SHA(_GenericCipherSuite):
val = 0xC020
class TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA(_GenericCipherSuite):
val = 0xC021
class TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA(_GenericCipherSuite):
val = 0xC022
class TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256(_GenericCipherSuite):
val = 0xC023
class TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384(_GenericCipherSuite):
val = 0xC024
class TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256(_GenericCipherSuite):
val = 0xC025
class TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384(_GenericCipherSuite):
val = 0xC026
class TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256(_GenericCipherSuite):
val = 0xC027
class TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384(_GenericCipherSuite):
val = 0xC028
class TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256(_GenericCipherSuite):
val = 0xC029
class TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384(_GenericCipherSuite):
val = 0xC02A
class TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256(_GenericCipherSuite):
val = 0xC02B
class TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384(_GenericCipherSuite):
val = 0xC02C
class TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256(_GenericCipherSuite):
val = 0xC02D
class TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384(_GenericCipherSuite):
val = 0xC02E
class TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256(_GenericCipherSuite):
val = 0xC02F
class TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384(_GenericCipherSuite):
val = 0xC030
class TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256(_GenericCipherSuite):
val = 0xC031
class TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384(_GenericCipherSuite):
val = 0xC032
class TLS_ECDHE_PSK_WITH_RC4_128_SHA(_GenericCipherSuite):
val = 0xC033
class TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA(_GenericCipherSuite):
val = 0xC034
class TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA(_GenericCipherSuite):
val = 0xC035
class TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA(_GenericCipherSuite):
val = 0xC036
class TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256(_GenericCipherSuite):
val = 0xC037
class TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384(_GenericCipherSuite):
val = 0xC038
class TLS_ECDHE_PSK_WITH_NULL_SHA(_GenericCipherSuite):
val = 0xC039
class TLS_ECDHE_PSK_WITH_NULL_SHA256(_GenericCipherSuite):
val = 0xC03A
class TLS_ECDHE_PSK_WITH_NULL_SHA384(_GenericCipherSuite):
val = 0xC03B
# suites 0xC03C-C071 use ARIA
class TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256(_GenericCipherSuite):
val = 0xC072
class TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384(_GenericCipherSuite):
val = 0xC073
class TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256(_GenericCipherSuite):
val = 0xC074
class TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384(_GenericCipherSuite):
val = 0xC075
class TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256(_GenericCipherSuite):
val = 0xC076
class TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384(_GenericCipherSuite):
val = 0xC077
class TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256(_GenericCipherSuite):
val = 0xC078
class TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384(_GenericCipherSuite):
val = 0xC079
class TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256(_GenericCipherSuite):
val = 0xC07A
class TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384(_GenericCipherSuite):
val = 0xC07B
class TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256(_GenericCipherSuite):
val = 0xC07C
class TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384(_GenericCipherSuite):
val = 0xC07D
class TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256(_GenericCipherSuite):
val = 0xC07E
class TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384(_GenericCipherSuite):
val = 0xC07F
class TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256(_GenericCipherSuite):
val = 0xC080
class TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384(_GenericCipherSuite):
val = 0xC081
class TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256(_GenericCipherSuite):
val = 0xC082
class TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384(_GenericCipherSuite):
val = 0xC083
class TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256(_GenericCipherSuite):
val = 0xC084
class TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384(_GenericCipherSuite):
val = 0xC085
class TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256(_GenericCipherSuite):
val = 0xC086
class TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384(_GenericCipherSuite):
val = 0xC087
class TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256(_GenericCipherSuite):
val = 0xC088
class TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384(_GenericCipherSuite):
val = 0xC089
class TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256(_GenericCipherSuite):
val = 0xC08A
class TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384(_GenericCipherSuite):
val = 0xC08B
class TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256(_GenericCipherSuite):
val = 0xC08C
class TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384(_GenericCipherSuite):
val = 0xC08D
class TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256(_GenericCipherSuite):
val = 0xC08E
class TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384(_GenericCipherSuite):
val = 0xC08F
class TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256(_GenericCipherSuite):
val = 0xC090
class TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384(_GenericCipherSuite):
val = 0xC091
class TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256(_GenericCipherSuite):
val = 0xC092
class TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384(_GenericCipherSuite):
val = 0xC093
class TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256(_GenericCipherSuite):
val = 0xC094
class TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384(_GenericCipherSuite):
val = 0xC095
class TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256(_GenericCipherSuite):
val = 0xC096
class TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384(_GenericCipherSuite):
val = 0xC097
class TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256(_GenericCipherSuite):
val = 0xC098
class TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384(_GenericCipherSuite):
val = 0xC099
class TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256(_GenericCipherSuite):
val = 0xC09A
class TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384(_GenericCipherSuite):
val = 0xC09B
class TLS_RSA_WITH_AES_128_CCM(_GenericCipherSuite):
val = 0xC09C
class TLS_RSA_WITH_AES_256_CCM(_GenericCipherSuite):
val = 0xC09D
class TLS_DHE_RSA_WITH_AES_128_CCM(_GenericCipherSuite):
val = 0xC09E
class TLS_DHE_RSA_WITH_AES_256_CCM(_GenericCipherSuite):
val = 0xC09F
class TLS_RSA_WITH_AES_128_CCM_8(_GenericCipherSuite):
val = 0xC0A0
class TLS_RSA_WITH_AES_256_CCM_8(_GenericCipherSuite):
val = 0xC0A1
class TLS_DHE_RSA_WITH_AES_128_CCM_8(_GenericCipherSuite):
val = 0xC0A2
class TLS_DHE_RSA_WITH_AES_256_CCM_8(_GenericCipherSuite):
val = 0xC0A3
class TLS_PSK_WITH_AES_128_CCM(_GenericCipherSuite):
val = 0xC0A4
class TLS_PSK_WITH_AES_256_CCM(_GenericCipherSuite):
val = 0xC0A5
class TLS_DHE_PSK_WITH_AES_128_CCM(_GenericCipherSuite):
val = 0xC0A6
class TLS_DHE_PSK_WITH_AES_256_CCM(_GenericCipherSuite):
val = 0xC0A7
class TLS_PSK_WITH_AES_128_CCM_8(_GenericCipherSuite):
val = 0xC0A8
class TLS_PSK_WITH_AES_256_CCM_8(_GenericCipherSuite):
val = 0xC0A9
class TLS_DHE_PSK_WITH_AES_128_CCM_8(_GenericCipherSuite):
val = 0xC0AA
class TLS_DHE_PSK_WITH_AES_256_CCM_8(_GenericCipherSuite):
val = 0xC0AB
class TLS_ECDHE_ECDSA_WITH_AES_128_CCM(_GenericCipherSuite):
val = 0xC0AC
class TLS_ECDHE_ECDSA_WITH_AES_256_CCM(_GenericCipherSuite):
val = 0xC0AD
class TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8(_GenericCipherSuite):
val = 0xC0AE
class TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8(_GenericCipherSuite):
val = 0xC0AF
# the next 3 suites are from draft-agl-tls-chacha20poly1305-04
class TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256_OLD(_GenericCipherSuite):
val = 0xCC13
class TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256_OLD(_GenericCipherSuite):
val = 0xCC14
class TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256_OLD(_GenericCipherSuite):
val = 0xCC15
class TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256(_GenericCipherSuite):
val = 0xCCA8
class TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256(_GenericCipherSuite):
val = 0xCCA9
class TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256(_GenericCipherSuite):
val = 0xCCAA
class TLS_PSK_WITH_CHACHA20_POLY1305_SHA256(_GenericCipherSuite):
val = 0xCCAB
class TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256(_GenericCipherSuite):
val = 0xCCAC
class TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256(_GenericCipherSuite):
val = 0xCCAD
class TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256(_GenericCipherSuite):
val = 0xCCAE
class TLS_AES_128_GCM_SHA256(_GenericCipherSuite):
val = 0x1301
class TLS_AES_256_GCM_SHA384(_GenericCipherSuite):
val = 0x1302
class TLS_CHACHA20_POLY1305_SHA256(_GenericCipherSuite):
val = 0x1303
class TLS_AES_128_CCM_SHA256(_GenericCipherSuite):
val = 0x1304
class TLS_AES_128_CCM_8_SHA256(_GenericCipherSuite):
val = 0x1305
class SSL_CK_RC4_128_WITH_MD5(_GenericCipherSuite):
val = 0x010080
class SSL_CK_RC4_128_EXPORT40_WITH_MD5(_GenericCipherSuite):
val = 0x020080
class SSL_CK_RC2_128_CBC_WITH_MD5(_GenericCipherSuite):
val = 0x030080
class SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5(_GenericCipherSuite):
val = 0x040080
class SSL_CK_IDEA_128_CBC_WITH_MD5(_GenericCipherSuite):
val = 0x050080
class SSL_CK_DES_64_CBC_WITH_MD5(_GenericCipherSuite):
val = 0x060040
class SSL_CK_DES_192_EDE3_CBC_WITH_MD5(_GenericCipherSuite):
val = 0x0700C0
_tls_cipher_suites[0x00ff] = "TLS_EMPTY_RENEGOTIATION_INFO_SCSV"
_tls_cipher_suites[0x5600] = "TLS_FALLBACK_SCSV"
def get_usable_ciphersuites(l, kx):
"""
From a list of proposed ciphersuites, this function returns a list of
usable cipher suites, i.e. for which key exchange, cipher and hash
algorithms are known to be implemented and usable in current version of the
TLS extension. The order of the cipher suites in the list returned by the
function matches the one of the proposal.
"""
res = []
for c in l:
if c in _tls_cipher_suites_cls:
ciph = _tls_cipher_suites_cls[c]
if ciph.usable:
# XXX select among RSA and ECDSA cipher suites
# according to the key(s) the server was given
if (ciph.kx_alg.anonymous or
kx in ciph.kx_alg.name or
ciph.kx_alg.name == "TLS13"):
res.append(c)
return res
| gpl-2.0 | 2,971,043,887,224,793,000 | 21.770281 | 98 | 0.689718 | false |
nttks/edx-platform | openedx/core/djangoapps/course_global/tests/test_models.py | 1 | 1567 | import unittest
from django.conf import settings
from django.test import TestCase
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from openedx.core.djangoapps.course_global.models import CourseGlobalSetting
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class CourseGlobalSettingTest(TestCase):
"""
Tests for the CourseGlobalSetting model.
"""
def setUp(self):
pass
def _create_global_setting(self, course_id, global_enabled=True):
"""
Create a new couse global setting model.
"""
return CourseGlobalSetting.objects.get_or_create(
course_id=course_id,
global_enabled=global_enabled
)
def test_all_global_courses(self):
"""
Tests all of global-course.
"""
course_id_1 = SlashSeparatedCourseKey('Test', 'TestCourse', 'TestCourseRun1')
course_id_2 = SlashSeparatedCourseKey('Test', 'TestCourse', 'TestCourseRun2')
course_id_3 = SlashSeparatedCourseKey('Test', 'TestCourse', 'TestCourseRun3')
# create test models
self._create_global_setting(course_id_1)
self._create_global_setting(course_id_2)
self._create_global_setting(course_id_3, False)
course_global_ids = CourseGlobalSetting.all_course_id()
self.assertEquals(2, len(course_global_ids))
self.assertTrue(course_id_1 in course_global_ids)
self.assertTrue(course_id_2 in course_global_ids)
self.assertFalse(course_id_3 in course_global_ids)
| agpl-3.0 | 1,442,189,915,585,723,600 | 34.613636 | 85 | 0.675175 | false |
ThomasChauve/aita | docs/source/conf.py | 1 | 5994 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import sphinx_rtd_theme
import os
import sys
#sys.path.insert(0, os.path.abspath('../AITAToolbox/'))
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'AITAToolbox'
copyright = 'CC-BY-CC'
author = 'Thomas Chauve'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '2.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'AITAToolboxdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'AITAToolbox.tex', 'AITAToolbox Documentation',
'Thomas Chauve', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'aitatoolbox', 'AITAToolbox Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'AITAToolbox', 'AITAToolbox Documentation',
author, 'AITAToolbox', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| gpl-3.0 | -4,977,427,064,826,604,000 | 29.120603 | 79 | 0.63964 | false |
nexdatas/recselector | test/TestPool2SetUp.py | 1 | 5146 | #!/usr/bin/env python
# This file is part of nexdatas - Tango Server for NeXus data writer
#
# Copyright (C) 2012-2014 DESY, Jan Kotanski <[email protected]>
#
# nexdatas is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# nexdatas is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with nexdatas. If not, see <http://www.gnu.org/licenses/>.
# \package test nexdatas
# \file ServerSetUp.py
# class with server settings
#
import os
import sys
import subprocess
import PyTango
import time
try:
import TestPool2
except Exception:
from . import TestPool2
# test fixture
class TestPool2SetUp(object):
# constructor
# \brief defines server parameters
def __init__(self, device="pooltestp09/testts/t1r228",
instance="POOLTESTS1"):
# information about tango writer
self.new_device_info_writer = PyTango.DbDevInfo()
# information about tango writer class
self.new_device_info_writer._class = "Pool"
# information about tango writer server
self.new_device_info_writer.server = "Pool/%s" % instance
# information about tango writer name
self.new_device_info_writer.name = device
# server instance
self.instance = instance
self._psub = None
# device proxy
self.dp = None
# device properties
# test starter
# \brief Common set up of Tango Server
def setUp(self):
print("\nsetting up...")
self.add()
self.start()
def add(self):
db = PyTango.Database()
db.add_device(self.new_device_info_writer)
db.add_server(self.new_device_info_writer.server,
self.new_device_info_writer)
# starts server
def start(self):
db = PyTango.Database()
path = os.path.dirname(TestPool2.__file__)
if not path:
path = '.'
if sys.version_info > (3,):
self._psub = subprocess.call(
"cd %s; python3 ./TestPool2.py %s &" %
(path, self.instance),
stdout=None,
stderr=None, shell=True)
else:
self._psub = subprocess.call(
"cd %s; python ./TestPool2.py %s &" %
(path, self.instance),
stdout=None,
stderr=None, shell=True)
sys.stdout.write("waiting for simple server")
found = False
cnt = 0
dvname = self.new_device_info_writer.name
while not found and cnt < 1000:
try:
sys.stdout.write(".")
sys.stdout.flush()
exl = db.get_device_exported(dvname)
if dvname not in exl.value_string:
time.sleep(0.01)
cnt += 1
continue
self.dp = PyTango.DeviceProxy(dvname)
time.sleep(0.01)
if self.dp.state() == PyTango.DevState.ON:
found = True
except Exception:
found = False
cnt += 1
print("")
# test closer
# \brief Common tear down of Tango Server
def tearDown(self):
print("tearing down ...")
self.delete()
self.stop()
def delete(self):
db = PyTango.Database()
db.delete_server(self.new_device_info_writer.server)
# stops server
def stop(self):
if sys.version_info > (3,):
with subprocess.Popen(
"ps -ef | grep 'TestPool2.py %s' | grep -v grep" %
self.instance,
stdout=subprocess.PIPE, shell=True) as proc:
pipe = proc.stdout
res = str(pipe.read(), "utf8").split("\n")
for r in res:
sr = r.split()
if len(sr) > 2:
subprocess.call(
"kill -9 %s" % sr[1], stderr=subprocess.PIPE,
shell=True)
pipe.close()
else:
pipe = subprocess.Popen(
"ps -ef | grep 'TestPool2.py %s' | grep -v grep" %
self.instance,
stdout=subprocess.PIPE, shell=True).stdout
res = str(pipe.read()).split("\n")
for r in res:
sr = r.split()
if len(sr) > 2:
subprocess.call(
"kill -9 %s" % sr[1], stderr=subprocess.PIPE,
shell=True)
pipe.close()
if __name__ == "__main__":
simps = TestPool2SetUp()
simps.setUp()
print(simps.dp.status())
simps.tearDown()
| gpl-3.0 | 4,812,012,494,212,713,000 | 30.765432 | 73 | 0.536922 | false |
ARamsey118/Reverse-Javadoc | ReverseDoc.py | 1 | 6710 | #!/usr/bin/python3
from bs4 import BeautifulSoup
import ClassName
import Fields
import Method
import Constructor
class Comment():
"""
comment stores a comment for later printing
"""
def __init__(self, indent):
"""
Make a new instance of the comment class
attributes:
comment_lines: list of lines in the comment that will have new line characters appended to facilitate readability
:param indent: whether or not to indent (used to make unindented comments at the top of class files
"""
self.indent = indent
self.comment_lines = list()
def __repr__(self):
"""
method __repr__(self)
returns:
/**
* self.comment_lines
Doesn't close the comment to allow addition of parameters and returns
post-condition: cursor is at end of comment line, no \n has been inserted
"""
if self.indent:
new_str = "\t/**\n"
for comment_line in self.comment_lines:
new_str += "\t * " + comment_line + "\n"
else:
new_str = "/**\n"
for comment_line in self.comment_lines:
new_str += " * " + comment_line + "\n"
return new_str[:len(new_str) - 1] # removes new line character from end to prevent gaps in comments
class WrittenClass(object):
"""
Stores class for later printing
attributes:
package: string containing the package location
head_text: the name of the method along with what it implements and extends
constructor: a constructor object for this class (not combined with method as it does some special guessing)
methods: a python list filled with type method used to store the methods of this class
fields: a python list filled with type fields used to store the fields of this class
"""
def __init__(self):
self.package = ""
self.head_text = ""
self.methods = list()
self.fields = list()
self.constructor = ""
def __repr__(self, interface):
"""
Go through each of the attribute and add it to the string if it exists. Special characters like "{" and "}"
as necessary. Head_text should always be present, but is in the if statement just in case.
"""
javaClass = ""
if self.package:
javaClass += "package " + str(self.package) + ";\n"
if self.head_text:
javaClass += str(self.head_text) + " {\n\n"
if self.fields:
javaClass += str_list_no_int(self.fields) + "\n\n"
if self.constructor:
javaClass += self.constructor.__repr__(interface) + "\n\n"
if self.methods:
javaClass += str_list(self.methods, interface)
return javaClass + "\n}"
def parameter_print(parameters_in):
"""
Takes a list of parameters and turns it into a single string (with line breaks)
The first item in the list is the parameter name and the second is the description.
pre-condition: cursor is at the end of the previous line
post-condition: cursor is at the end of the previous line
"""
parameters_out = ""
for parameter in parameters_in:
parameter[1] = " ".join(
str(parameter[1]).replace("\n", "").split()) # removes new line characters from a parameter's description
parameters_out += "\t * @param " + parameter[0] + " " + parameter[1] + "\n"
if parameters_out:
return "\n" + parameters_out[:len(parameters_out) - 1] # starts a new line for the first parameter to print
# removes last new line so cursor is at end of last line
else:
return ""
def str_list_no_int(pyList):
"""
Used when interface vs non-interface has no effect and repr doesn't take another parameter
(might be able to be combined with str_list)
:param pyList: list to be turned into a string
:return: new string
"""
new_str = ""
for list_item in pyList:
new_str += str(list_item.__repr__())
return new_str
def str_list(pyList, interface):
"""
Used when interface vs non-interface has an effect and repr takes another parameter to indicate interface or not
(might be able to be combined with str_list_no_int)
:param pyList: list to be turned into a string
:return: new string
"""
new_str = ""
for list_item in pyList:
new_str += str(list_item.__repr__(interface))
return new_str
def create_comment(comment_text, indent):
"""
Makes a new instance of the comment class
Removes the line breaks so it can be printed as one string
:param comment_text: text to be added. Should not contain parameters or returns
:param indent: whether or not to indent this comment. Used for class comments
:return: instance of the comment class
"""
new_comment = Comment(indent)
for line in comment_text.split("\n"):
new_comment.comment_lines.append(str(line))
return new_comment
def find_package(soup):
"""
Finds the package of the class
:param soup: html of the class file
:return: string of the package the class is in
"""
package = soup.find("div", {"class": "subTitle"})
if package:
return str(package.text)
def ReverseDoc(html, location):
"""
Creates the class or interface file for the given class
:param html: html of class file
:param location: URL of top level documentation. Used so fields can look for constants
:return: Written class object that can be printed to a file
"""
my_class = WrittenClass()
soup = BeautifulSoup(html)
my_class.package = find_package(soup)
my_class.head_text = ClassName.find_class_name(soup)
my_class.fields = Fields.find_fields(soup, location)
my_class.methods = Method.find_methods(soup)
my_class.constructor = Constructor.find_constructor(soup, my_class.fields)
return my_class
def main(htmlfile=''):
# htmlfile = input("Enter file name with path: ")
# htmlfile = "/home/andrew/Documents/AJ-College/Projects/Reverse-Javadoc/tests/Mogwai.html"
# htmlfile = "/home/andrew/Documents/AJ-College/Projects/Reverse-Javadoc/tests/overview-tree.html"
interface_answer = input("Is this an interface? (y/n) ")
# interface_answer = 'n'
interface = interface_answer.upper() in {"YES", "Y"}
with open(htmlfile) as f:
htmltext = f.read()
java = ReverseDoc(htmltext, htmlfile)
with open(htmlfile.replace("html", "java"), "w") as f:
f.write(java.__str__(interface))
if __name__ == '__main__':
main()
| mit | -2,180,981,688,187,114,000 | 33.234694 | 125 | 0.625037 | false |
feureau/Small-Scripts | Blender/Blender config/2.91/scripts/addons/assemblme_v1-4-0/functions/common/color_effects.py | 1 | 13577 | # Author: Christopher Gearhart
# System imports
from numba import cuda, jit, prange
import numpy as np
from colorsys import rgb_to_hsv, hsv_to_rgb
# Blender imports
# NONE!
# Module imports
# from .color_effects_cuda import *
from .images import *
def initialize_gradient_texture(width, height, quadratic=False):
pixels = np.empty((height, width))
for row in prange(height):
val = 1 - (height - 1 - row) / (height - 1)
if quadratic:
val = val ** 0.5
pixels[row, :] = val
pixels = get_1d_pixel_array(pixels)
return pixels
def convert_channels(channels, old_pixels, old_channels, use_alpha=False):
assert channels != old_channels
old_pixels = get_2d_pixel_array(old_pixels, old_channels)
new_pixels = np.empty((len(old_pixels), channels))
if channels > old_channels:
if old_channels == 1:
for i in range(channels):
new_pixels[:, i] = old_pixels[:, 0]
elif old_channels == 3:
new_pixels[:, :3] = old_pixels[:, :3]
new_pixels[:, 3] = 1
elif channels < old_channels:
if channels == 1 and old_channels == 4 and use_alpha:
new_pixels[:, 0] = old_pixels[:, 3]
elif channels == 1:
new_pixels[:, 0] = 0.2126 * old_pixels[:, 0] + 0.7152 * old_pixels[:, 1] + 0.0722 * old_pixels[:, 2]
elif channels == 3:
new_pxiels[:, :3] = old_pixels[:, :3]
new_pixels = get_1d_pixel_array(new_pixels)
return new_pixels
def set_alpha_channel(num_pix, old_pixels, old_channels, value):
old_pixels = get_2d_pixel_array(old_pixels, old_channels)
new_pixels = np.empty((num_pix, 4))
new_pixels[:, :3] = old_pixels[:, :3]
new_pixels[:, 3] = value
new_pixels = get_1d_pixel_array(new_pixels)
return new_pixels
@jit(nopython=True, parallel=True)
def resize_pixels(size, channels, old_pixels, old_size):
new_pixels = np.empty(size[0] * size[1] * channels)
for col in prange(size[0]):
col1 = int((col / size[0]) * old_size[0])
for row in range(size[1]):
row1 = int((row / size[1]) * old_size[1])
pixel_number = (size[0] * row + col) * channels
pixel_number_ref = (old_size[0] * row1 + col1) * channels
for ch in range(channels):
new_pixels[pixel_number + ch] = old_pixels[pixel_number_ref + ch]
return new_pixels
@jit(nopython=True, parallel=True)
def resize_pixels_preserve_borders(size, channels, old_pixels, old_size):
new_pixels = np.empty(len(old_pixels))
offset_col = int((old_size[0] - size[0]) / 2)
offset_row = int((old_size[1] - size[1]) / 2)
for col in prange(old_size[0]):
col1 = int(((col - offset_col) / size[0]) * old_size[0])
for row in range(old_size[1]):
row1 = int(((row - offset_row) / size[1]) * old_size[1])
pixel_number = (old_size[0] * row + col) * channels
if 0 <= col1 < old_size[0] and 0 <= row1 < old_size[1]:
pixel_number_ref = (old_size[0] * row1 + col1) * channels
for ch in range(channels):
new_pixels[pixel_number + ch] = old_pixels[pixel_number_ref + ch]
else:
for ch in range(channels):
new_pixels[pixel_number + ch] = 0
return new_pixels
def crop_pixels(size, channels, old_pixels, old_size):
old_pixels = get_3d_pixel_array(old_pixels, old_size[0], old_size[1], channels)
offset_col = (old_size[0] - size[0]) // 2
offset_row = (old_size[1] - size[1]) // 2
new_pixels = old_pixels[offset_row:offset_row + size[1], offset_col:offset_col + size[0]]
new_pixels = get_1d_pixel_array(new_pixels)
return new_pixels
def pad_pixels(size, channels, old_pixels, old_size):
new_pixels = np.zeros((size[1], size[0], channels))
offset_col = (size[0] - old_size[0]) // 2
offset_row = (size[1] - old_size[1]) // 2
new_pixels[offset_row:offset_row + old_size[1], offset_col:offset_col + old_size[0]] = old_pixels[:, :]
new_pixels = get_1d_pixel_array(new_pixels)
return new_pixels
def blend_pixels(im1_pixels, im2_pixels, width, height, channels, operation, use_clamp, factor_pixels):
new_pixels = np.empty((width * height, channels))
im1_pixels = get_2d_pixel_array(im1_pixels, channels)
im2_pixels = get_2d_pixel_array(im2_pixels, channels)
if isinstance(factor, np.ndarray):
new_factor = np.empty((len(factor), channels))
for i in range(channels):
new_factor[:, i] = factor
factor = new_factor
if operation == "MIX":
new_pixels = im1_pixels * (1 - factor) + im2_pixels * factor
elif operation == "ADD":
new_pixels = im1_pixels + im2_pixels * factor
elif operation == "SUBTRACT":
new_pixels = im1_pixels - im2_pixels * factor
elif operation == "MULTIPLY":
new_pixels = im1_pixels * ((1 - factor) + im2_pixels * factor)
elif operation == "DIVIDE":
new_pixels = im1_pixels / ((1 - factor) + im2_pixels * factor)
elif operation == "POWER":
new_pixels = im1_pixels ** ((1 - factor) + im2_pixels * factor)
# elif operation == "LOGARITHM":
# new_pixels = math.log(im1_pixels, im2_pixels)
elif operation == "SQUARE ROOT":
new_pixels = np.sqrt(im1_pixels)
elif operation == "ABSOLUTE":
new_pixels = abs(im1_pixels)
elif operation == "MINIMUM":
new_pixels = np.clip(im1_pixels, a_min=im2_pixels, a_max=im1_pixels)
elif operation == "MAXIMUM":
new_pixels = np.clip(im1_pixels, a_min=im1_pixels, a_max=im2_pixels)
elif operation == "LESS THAN":
new_pixels = (im1_pixels < im2_pixels).astype(int)
elif operation == "GREATER THAN":
new_pixels = (im1_pixels > im2_pixels).astype(int)
elif operation == "ROUND":
new_pixels = np.round(im1_pixels)
elif operation == "FLOOR":
new_pixels = np.floor(im1_pixels)
elif operation == "CEIL":
new_pixels = np.ceil(im1_pixels)
# elif operation == "FRACT":
# new_pixels =
elif operation == "MODULO":
new_pixels = im1_pixels % im2_pixels
new_pixels = get_1d_pixel_array(new_pixels)
if use_clamp:
np.clip(new_pixels, 0, 1, new_pixels)
return new_pixels
def math_operation_on_pixels(pixels, operation, value, clamp=False):
new_pixels = np.empty(pixels.size)
if operation == "ADD":
new_pixels = pixels + value
elif operation == "SUBTRACT":
new_pixels = pixels - value
elif operation == "MULTIPLY":
new_pixels = pixels * value
elif operation == "DIVIDE":
new_pixels = pixels / value
elif operation == "POWER":
new_pixels = pixels ** value
# elif operation == "LOGARITHM":
# for i in prange(new_pixels.size):
# new_pixels = math.log(pixels, value)
elif operation == "SQUARE ROOT":
new_pixels = np.sqrt(pixels)
elif operation == "ABSOLUTE":
new_pixels = abs(pixels)
elif operation == "MINIMUM":
new_pixels = np.clip(pixels, a_min=value, a_max=pixels)
elif operation == "MAXIMUM":
new_pixels = np.clip(pixels, a_min=pixels, a_max=value)
elif operation == "LESS THAN":
new_pixels = (pixels < value).astype(int)
elif operation == "GREATER THAN":
new_pixels = (pixels > value).astype(int)
elif operation == "ROUND":
new_pixels = np.round(pixels)
elif operation == "FLOOR":
new_pixels = np.floor(pixels)
elif operation == "CEIL":
new_pixels = np.ceil(pixels)
# elif operation == "FRACT":
# new_pixels =
elif operation == "MODULO":
new_pixels = pixels % value
elif operation == "SINE":
new_pixels = np.sin(pixels)
elif operation == "COSINE":
new_pixels = np.cos(pixels)
elif operation == "TANGENT":
new_pixels = np.tan(pixels)
elif operation == "ARCSINE":
new_pixels = np.arcsin(pixels)
elif operation == "ARCCOSINE":
new_pixels = np.arccos(pixels)
elif operation == "ARCTANGENT":
new_pixels = np.arctan(pixels)
elif operation == "ARCTAN2":
new_pixels = np.arctan2(pixels) #, value)
if clamp:
np.clip(new_pixels, 0, 1, new_pixels)
return new_pixels
def clamp_pixels(pixels, minimum, maximum):
return np.clip(pixels, minimum, maximum)
def adjust_bright_contrast(pixels, bright, contrast):
return contrast * (pixels - 0.5) + 0.5 + bright
def adjust_hue_saturation_value(pixels, hue, saturation, value, channels=3):
assert channels in (3, 4)
pixels = get_2d_pixel_array(pixels, channels)
hue_adjust = hue - 0.5
pixels[:, 0] = (pixels[:, 0] + hue_adjust) % 1
pixels[:, 1] = pixels[:, 1] * saturation
pixels[:, 2] = pixels[:, 2] * value
return pixels
def invert_pixels(pixels, factor, channels):
pixels = get_2d_pixel_array(pixels, channels)
inverted_factor = 1 - factor
if channels == 4:
pixels[:, :3] = (inverted_factor * pixels[:, :3]) + (factor * (1 - pixels[:, :3]))
else:
pixels = (inverted_factor * pixels) + (factor * (1 - pixels))
pixels = get_1d_pixel_array(pixels)
return pixels
@jit(nopython=True, parallel=True)
def dilate_pixels_dist(old_pixels, pixel_dist, width, height):
mult = 1 if pixel_dist[0] > 0 else -1
new_pixels = np.empty(len(old_pixels))
# for i in prange(width * height):
# x = i / height
# row = round((x % 1) * height)
# col = round(x - (x % 1))
for col in prange(width):
for row in prange(height):
pixel_number = width * row + col
max_val = old_pixels[pixel_number]
for c in range(-pixel_dist[0], pixel_dist[0] + 1):
for r in range(-pixel_dist[1], pixel_dist[1] + 1):
if not (0 < col + c < width and 0 < row + r < height):
continue
width_amt = abs(c) / pixel_dist[0]
height_amt = abs(r) / pixel_dist[1]
ratio = (width_amt - height_amt) / 2 + 0.5
weighted_dist = pixel_dist[0] * ratio + ((1 - ratio) * pixel_dist[1])
dist = ((abs(c)**2 + abs(r)**2) ** 0.5)
if dist > weighted_dist + 0.5:
continue
pixel_number1 = width * (row + r) + (col + c)
cur_val = old_pixels[pixel_number1]
if cur_val * mult > max_val * mult:
max_val = cur_val
new_pixels[pixel_number] = max_val
return new_pixels
@jit(nopython=True, parallel=True)
def dilate_pixels_step(old_pixels, pixel_dist, width, height):
mult = 1 if pixel_dist[0] > 0 else -1
new_pixels = np.empty(len(old_pixels))
# for i in prange(width * height):
# x = i / height
# row = round((x % 1) * height)
# col = round(x - (x % 1))
for col in prange(width):
for row in range(height):
pixel_number = width * row + col
max_val = old_pixels[pixel_number]
for c in range(-pixel_dist[0], pixel_dist[0] + 1):
if not 0 < col + c < width:
continue
pixel_number1 = width * row + (col + c)
cur_val = old_pixels[pixel_number1]
if cur_val * mult > max_val * mult:
max_val = cur_val
new_pixels[pixel_number] = max_val
old_pixels = new_pixels
new_pixels = np.empty(len(old_pixels))
for col in prange(width):
for row in range(height):
pixel_number = width * row + col
max_val = old_pixels[pixel_number]
for r in range(-pixel_dist[1], pixel_dist[1] + 1):
if not 0 < row + r < height:
continue
pixel_number1 = width * (row + r) + col
cur_val = old_pixels[pixel_number1]
if cur_val * mult > max_val * mult:
max_val = cur_val
new_pixels[pixel_number] = max_val
return new_pixels
def flip_pixels(old_pixels, flip_x, flip_y, width, height, channels):
old_pixels = get_3d_pixel_array(old_pixels, width, height, channels)
if flix_x and not flip_y:
new_pixels = old_pixels[:, ::-1]
elif not flix_x and flip_y:
new_pixels = old_pixels[::-1, :]
elif flix_x and flip_y:
new_pixels = old_pixels[::-1, ::-1]
new_pixels = get_1d_pixel_array(new_pixels)
return new_pixels
def translate_pixels(old_pixels, translate_x, translate_y, wrap_x, wrap_y, width, height, channels):
new_pixels = np.empty((height, width, channels))
# reshape
old_pixels = get_3d_pixel_array(old_pixels, width, height, channels)
# translate x
if translate_x > 0 or (wrap_x and translate_x != 0):
new_pixels[:, translate_x:] = old_pixels[:, :-translate_x]
if translate_x < 0 or (wrap_x and translate_x != 0):
new_pixels[:, :translate_x] = old_pixels[:, -translate_x:]
# reset old pixels if translating on both axes
if translate_x != 0 and translate_y != 0:
old_pixels = new_pixels.copy()
# translate y
if translate_y > 0 or (wrap_y and translate_y != 0):
new_pixels[translate_y:, :] = old_pixels[:-translate_y, :]
if translate_y < 0 or (wrap_y and translate_y != 0):
new_pixels[:translate_y, :] = old_pixels[-translate_y:, :]
# reshape
new_pixels = get_1d_pixel_array(new_pixels)
return new_pixels
| gpl-3.0 | 4,217,206,529,779,193,300 | 37.902579 | 112 | 0.574133 | false |
amitsaha/apigatewaydemo | webapp-1/app2.py | 1 | 2163 | #!/usr/bin/env python
# Reflects the requests from HTTP methods GET, POST, PUT, and DELETE
# Written by Nathan Hamiel (2010)
# Copied from https://gist.github.com/huyng/814831
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from optparse import OptionParser
import consulate
import os
import signal
import sys
port = int(os.environ.get('PORT', 5000))
service_id = 'projects_%s' % port
consul = consulate.Consul()
# Add "projects" service to the local agent
consul.agent.service.register('projects', service_id=service_id, port=port)
def signal_handler(signal, frame):
consul.agent.service.deregister(service_id)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
class RequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
request_path = self.path
print("\n----- Request Start ----->\n")
print(request_path)
print(self.headers)
print("<----- Request End -----\n")
self.send_response(200)
self.send_header("Set-Cookie", "foo=bar")
def do_POST(self):
request_path = self.path
print("\n----- Request Start ----->\n")
print(request_path)
request_headers = self.headers
#content_length = request_headers.getheaders('content-length')
content_length = request_headers.getheaders('Content-Length')
length = int(content_length[0]) if content_length else 0
print(request_headers)
print("Bytes received: %s\n" % length)
print(self.rfile.read(length))
print("<----- Request End -----\n")
self.send_response(200)
do_PUT = do_POST
do_DELETE = do_GET
def main():
print('Listening on localhost:%s' % port)
server = HTTPServer(('', port), RequestHandler)
server.serve_forever()
if __name__ == "__main__":
parser = OptionParser()
parser.usage = ("Creates an http-server that will echo out any GET or POST parameters\n"
"Run:\n\n"
" reflect")
(options, args) = parser.parse_args()
main()
| mit | -7,872,632,249,936,792,000 | 27.090909 | 92 | 0.606103 | false |
FuelCellUAV/FC_datalogger | quick2wire/quick2wire/i2c_ctypes.py | 1 | 1711 | # Warning: not part of the published Quick2Wire API.
#
# Converted from i2c.h and i2c-dev.h
# I2C only, no SMB definitions
from ctypes import c_int, c_uint16, c_ushort, c_short, c_char, POINTER, Structure
# /usr/include/linux/i2c-dev.h: 38
class i2c_msg(Structure):
"""<linux/i2c-dev.h> struct i2c_msg"""
_fields_ = [
('addr', c_uint16),
('flags', c_ushort),
('len', c_short),
('buf', POINTER(c_char))]
__slots__ = [name for name, type in _fields_]
# i2c_msg flags
I2C_M_TEN = 0x0010 # this is a ten bit chip address
I2C_M_RD = 0x0001 # read data, from slave to master
I2C_M_NOSTART = 0x4000 # if I2C_FUNC_PROTOCOL_MANGLING
I2C_M_REV_DIR_ADDR = 0x2000 # if I2C_FUNC_PROTOCOL_MANGLING
I2C_M_IGNORE_NAK = 0x1000 # if I2C_FUNC_PROTOCOL_MANGLING
I2C_M_NO_RD_ACK = 0x0800 # if I2C_FUNC_PROTOCOL_MANGLING
I2C_M_RECV_LEN = 0x0400 # length will be first received byte
# /usr/include/linux/i2c-dev.h: 155
class i2c_rdwr_ioctl_data(Structure):
"""<linux/i2c-dev.h> struct i2c_rdwr_ioctl_data"""
_fields_ = [
('msgs', POINTER(i2c_msg)),
('nmsgs', c_int)]
__slots__ = [name for name, type in _fields_]
I2C_FUNC_I2C = 0x00000001
I2C_FUNC_10BIT_ADDR = 0x00000002
I2C_FUNC_PROTOCOL_MANGLING = 0x00000004 # I2C_M_NOSTART etc.
# ioctls
I2C_SLAVE = 0x0703 # Change slave address
# Attn.: Slave address is 7 or 10 bits
I2C_SLAVE_FORCE = 0x0706 # Change slave address
# Attn.: Slave address is 7 or 10 bits
# This changes the address, even if it
# is already taken!
I2C_TENBIT = 0x0704 # 0 for 7 bit addrs, != 0 for 10 bit
I2C_FUNCS = 0x0705 # Get the adapter functionality
I2C_RDWR = 0x0707 # Combined R/W transfer (one stop only)
| cc0-1.0 | 2,400,107,093,059,200,000 | 29.553571 | 81 | 0.665108 | false |
lfalvarez/votai | backend_candidate/tests/help_finding_these_candidates_tests.py | 1 | 4140 | # coding=utf-8
from backend_candidate.tests import SoulMateCandidateAnswerTestsBase
from elections.models import Candidate
from django.contrib.auth.models import User
from django.urls import reverse
from backend_candidate.models import (Candidacy,
is_candidate,
CandidacyContact,
send_candidate_a_candidacy_link,
add_contact_and_send_mail,
send_candidate_username_and_password)
from backend_candidate.forms import get_form_for_election
from backend_candidate.tasks import (let_candidate_now_about_us,
send_candidate_username_and_pasword_task,
send_candidates_their_username_and_password)
from django.template import Template, Context
from elections.models import Election, Area
from candidator.models import TakenPosition
from django.core import mail
from django.test import override_settings
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.sites.models import Site
from django.core.management import call_command
from popular_proposal.models import (Commitment,
PopularProposal,
)
from popolo.models import ContactDetail
from django.test import override_settings
class HelpFindingCandidatesTestCase(SoulMateCandidateAnswerTestsBase):
def setUp(self):
super(HelpFindingCandidatesTestCase, self).setUp()
self.feli = User.objects.get(username='feli')
self.candidate1 = Candidate.objects.get(pk=1)
self.candidate2 = Candidate.objects.get(pk=2)
self.candidate3 = Candidate.objects.get(pk=3)
self.candidate4 = Candidate.objects.get(pk=4)
self.candidate5 = Candidate.objects.get(pk=5)
self.candidate6 = Candidate.objects.get(pk=6)
def test_page_listing_candidates(self):
url = reverse('help')
self.assertEquals(self.client.get(url).status_code, 200)
Candidacy.objects.create(user=self.feli,
candidate=self.candidate1
)
self.client.login(username=self.feli,
password='alvarez')
# Should not be in because has user that has logged in and has completed 1/2 naranja
response = self.client.get(url)
self.assertNotIn(self.candidate1, response.context['candidates'])
# Should be here because we hace a contact detail and hasn't log in
self.feli.last_login = None
self.feli.save()
Candidacy.objects.create(user=self.feli,
candidate=self.candidate2)
self.candidate2.taken_positions.all().delete()
self.candidate2.add_contact_detail(contact_type='TWITTER', value='perrito', label='perrito')
response = self.client.get(url)
self.assertIn(self.candidate2, response.context['candidates'])
# candidate 3 is not here because even though hasn't log in and doesn't have answers,
# we dont have a way to contact her/him
self.feli.last_login = None
self.feli.save()
Candidacy.objects.create(user=self.feli,
candidate=self.candidate3)
self.candidate3.taken_positions.all().delete()
self.candidate3.add_contact_detail(contact_type='TWITTER', value='perrito', label='perrito')
response = self.client.get(url)
self.assertIn(self.candidate3, response.context['candidates'])
@override_settings(PRIORITY_CANDIDATES=[2,])
def test_only_priority_candidates(self):
self.candidate2.add_contact_detail(contact_type='TWITTER', value='perrito', label='perrito')
self.candidate3.add_contact_detail(contact_type='TWITTER', value='gatito', label='gatito')
url = reverse('help')
response = self.client.get(url)
self.assertIn(self.candidate2, response.context['candidates'])
self.assertEquals(len(response.context['candidates']), 1)
| gpl-3.0 | -5,023,670,798,739,782,000 | 49.487805 | 100 | 0.645652 | false |
Siecje/asphalt | tests/test_command.py | 1 | 4411 | from unittest.mock import patch, Mock
import pytest
import yaml
from asphalt.core import command
DerivedApplication = None
def test_quickstart_application(monkeypatch, tmpdir, capsys):
def mock_input(text):
if text == 'Project name: ':
return 'Example Project'
elif text == 'Top level package name: ':
return 'example'
raise ValueError('Unexpected input: ' + text)
get_distribution = Mock()
get_distribution('asphalt').parsed_version.public = '1.0.0'
monkeypatch.setattr('pkg_resources.get_distribution', get_distribution)
monkeypatch.setattr('builtins.input', mock_input)
tmpdir.chdir()
command.quickstart_application()
# Check that the project directory and the top level package were created
projectdir = tmpdir.join('Example Project')
assert projectdir.check(dir=True)
assert projectdir.join('example').join('__init__.py').check(file=1)
# Check that example/application.py was properly generated
with projectdir.join('example').join('application.py').open() as f:
assert f.read() == """\
from asphalt.core.application import Application
from asphalt.core.context import ApplicationContext
class ExampleProjectApplication(Application):
@coroutine
def start(app_ctx: ApplicationContext):
pass # IMPLEMENT CUSTOM LOGIC HERE
"""
with projectdir.join('config.yml').open() as f:
config_data = f.read()
assert isinstance(yaml.load(config_data), dict)
assert config_data == """\
---
application: example:ExampleProjectApplication
components:
foo: {} # REPLACE ME
settings:
bar: 1 # REPLACE ME
logging:
version: 1
disable_existing_loggers: false
handlers:
console:
class: logging.StreamHandler
formatter: generic
formatters:
generic:
format: "%(asctime)s:%(levelname)s:%(name)s:%(message)s"
root:
handlers: [console]
level: INFO
"""
# Check that setup.py was properly generated
with projectdir.join('setup.py').open() as f:
assert f.read() == """\
from setuptools import setup
setup(
name='example',
version='1.0.0',
description='Example Project',
long_description='FILL IN HERE',
author='FILL IN HERE',
author_email='FILL IN HERE',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3'
],
zip_safe=True,
packages=[
'example'
],
install_requires=[
'asphalt >= 1.0.0, < 2.0.0'
]
)
"""
# Check that another run will raise an error because the directory exists already
pytest.raises(SystemExit, command.quickstart_application)
out, err = capsys.readouterr()
assert err == 'Error: the directory "Example Project" already exists.\n'
@pytest.mark.parametrize('unsafe', [False, True], ids=['safe', 'unsafe'])
def test_run_from_config_file(tmpdir, unsafe):
if unsafe:
app_class = '!!python/name:{}.DerivedApplication'.format(__spec__.name)
else:
app_class = '{}:DerivedApplication'.format(__spec__.name)
with patch('{}.DerivedApplication'.format(__spec__.name)) as cls:
path = tmpdir.join('test.yaml')
path.write("""\
---
application: {}
components:
foo: {{}}
bar: {{}}
settings:
setting: blah
logging:
version: 1
disable_existing_loggers: false
""".format(app_class))
command.run_from_config_file(str(path), unsafe)
components = {'foo': {}, 'bar': {}}
logging = {'version': 1, 'disable_existing_loggers': False}
settings = {'setting': 'blah'}
cls.assert_called_once_with(components=components, logging=logging, settings=settings)
cls().run.assert_called_once_with()
@pytest.mark.parametrize('args, exits', [
(['asphalt', '--help'], True),
(['asphalt'], False)
], ids=['help', 'noargs'])
def test_main_help(capsys, args, exits):
with patch('sys.argv', args):
pytest.raises(SystemExit, command.main) if exits else command.main()
out, err = capsys.readouterr()
assert out.startswith('usage: asphalt [-h]')
def test_main_run():
args = ['/bogus/path', '--unsafe']
patch1 = patch('sys.argv', ['asphalt', 'run'] + args)
patch2 = patch.object(command, 'run_from_config_file')
with patch1, patch2 as run_from_config_file:
command.main()
assert run_from_config_file.called_once_with(args)
| apache-2.0 | -4,736,903,730,325,377,000 | 28.604027 | 94 | 0.647472 | false |
esacosta/u-mooc | tests/integration/pageobjects.py | 1 | 17116 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Page objects used in functional tests for U-MOOC."""
__author__ = 'John Orr ([email protected])'
from selenium.webdriver.common import action_chains
from selenium.webdriver.common import by
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support import select
from selenium.webdriver.support import wait
class PageObject(object):
"""Superclass to hold shared logic used by page objects."""
def __init__(self, tester):
self._tester = tester
def find_element_by_css_selector(self, selector):
return self._tester.driver.find_element_by_css_selector(selector)
def find_element_by_id(self, elt_id):
return self._tester.driver.find_element_by_id(elt_id)
def find_element_by_link_text(self, text):
return self._tester.driver.find_element_by_link_text(text)
def find_element_by_name(self, name):
return self._tester.driver.find_element_by_name(name)
def expect_status_message_to_be(self, value):
wait.WebDriverWait(self._tester.driver, 15).until(
ec.text_to_be_present_in_element(
(by.By.ID, 'formStatusMessage'), value))
class EditorPageObject(PageObject):
"""Page object for pages which wait for the editor to finish loading."""
def __init__(self, tester):
super(EditorPageObject, self).__init__(tester)
def successful_butter_bar(driver):
form_status_message = driver.find_element_by_id('formStatusMessage')
return 'Success.' in form_status_message.text or (
not form_status_message.is_displayed())
wait.WebDriverWait(self._tester.driver, 15).until(successful_butter_bar)
def set_status(self, status):
select.Select(self.find_element_by_name(
'is_draft')).select_by_visible_text(status)
return self
def click_save(self, link_text='Save', status_message='Saved'):
self.find_element_by_link_text(link_text).click()
self.expect_status_message_to_be(status_message)
return self
def _close_and_return_to(self, continue_page):
self.find_element_by_link_text('Close').click()
return continue_page(self._tester)
class DashboardEditor(EditorPageObject):
"""A base class for the editors accessed from the Dashboard."""
def click_close(self):
return self._close_and_return_to(DashboardPage)
class RootPage(PageObject):
"""Page object to model the interactions with the root page."""
def load(self, base_url):
self._tester.driver.get(base_url + '/')
return self
def click_login(self):
self.find_element_by_link_text('Login').click()
return LoginPage(self._tester)
def click_dashboard(self):
self.find_element_by_link_text('Dashboard').click()
return DashboardPage(self._tester)
def click_admin(self):
self.find_element_by_link_text('Admin').click()
return AdminPage(self._tester)
def click_announcements(self):
self.find_element_by_link_text('Announcements').click()
return AnnouncementsPage(self._tester)
def click_register(self):
self.find_element_by_link_text('Register').click()
return RegisterPage(self._tester)
class RegisterPage(PageObject):
"""Page object to model the registration page."""
def enroll(self, name):
enroll = self.find_element_by_name('form01')
enroll.send_keys(name)
enroll.submit()
return RegisterPage(self._tester)
def verify_enrollment(self):
self._tester.assertTrue(
'Thank you for registering' in self.find_element_by_css_selector(
'p.top_content').text)
return self
def click_course(self):
self.find_element_by_link_text('Course').click()
return RootPage(self._tester)
class AnnouncementsPage(PageObject):
"""Page object to model the announcements page."""
def click_add_new(self):
self.find_element_by_css_selector(
'#gcb-add-announcement > button').click()
return AnnouncementsEditorPage(self._tester)
def verify_announcement(self, title=None, date=None, body=None):
"""Verify that the announcement has the given fields."""
if title:
self._tester.assertEquals(
title, self._tester.driver.find_elements_by_css_selector(
'div.gcb-aside h2')[0].text)
if date:
self._tester.assertEquals(
date, self._tester.driver.find_elements_by_css_selector(
'div.gcb-aside p')[0].text)
if body:
self._tester.assertEquals(
body, self._tester.driver.find_elements_by_css_selector(
'div.gcb-aside p')[1].text)
return self
class AnnouncementsEditorPage(EditorPageObject):
"""Page to model the announcements editor."""
def enter_fields(self, title=None, date=None, body=None):
"""Enter title, date, and body into the announcement form."""
if title:
title_el = self.find_element_by_name('title')
title_el.clear()
title_el.send_keys(title)
if date:
date_el = self.find_element_by_name('date')
date_el.clear()
date_el.send_keys(date)
if body:
body_el = self.find_element_by_name('html')
body_el.clear()
body_el.send_keys(body)
return self
def click_close(self):
return self._close_and_return_to(AnnouncementsPage)
class LoginPage(PageObject):
"""Page object to model the interactions with the login page."""
def login(self, login, admin=False):
email = self._tester.driver.find_element_by_id('email')
email.clear()
email.send_keys(login)
if admin:
self.find_element_by_id('admin').click()
self.find_element_by_id('submit-login').click()
return RootPage(self._tester)
class DashboardPage(PageObject):
"""Page object to model the interactions with the dashboard landing page."""
def load(self, base_url, name):
self._tester.driver.get('/'.join([base_url, name, 'dashboard']))
return self
def verify_read_only_course(self):
self._tester.assertEquals(
'Read-only course.',
self.find_element_by_id('gcb-alerts-bar').text)
return self
def verify_selected_tab(self, tab_text):
tab = self.find_element_by_link_text(tab_text)
self._tester.assertEquals('selected', tab.get_attribute('class'))
def verify_not_publicly_available(self):
self._tester.assertEquals(
'The course is not publicly available.',
self.find_element_by_id('gcb-alerts-bar').text)
return self
def click_import(self):
self.find_element_by_css_selector('#import_course').click()
return Import(self._tester)
def click_add_unit(self):
self.find_element_by_css_selector('#add_unit > button').click()
return AddUnit(self._tester)
def click_add_assessment(self):
self.find_element_by_css_selector('#add_assessment > button').click()
return AddAssessment(self._tester)
def click_add_link(self):
self.find_element_by_css_selector('#add_link > button').click()
return AddLink(self._tester)
def click_add_lesson(self):
self.find_element_by_css_selector('#add_lesson > button').click()
return AddLesson(self._tester)
def click_organize(self):
self.find_element_by_css_selector('#edit_unit_lesson').click()
return Organize(self._tester)
def click_assets(self):
self.find_element_by_link_text('Assets').click()
return AssetsPage(self._tester)
def verify_course_outline_contains_unit(self, unit_title):
self.find_element_by_link_text(unit_title)
return self
class AssetsPage(PageObject):
"""Page object for the dashboard's assets tab."""
def click_upload(self):
self.find_element_by_link_text('Upload').click()
return AssetsEditorPage(self._tester)
def verify_image_file_by_name(self, name):
self.find_element_by_link_text(name) # throw exception if not found
return self
def verify_no_image_file_by_name(self, name):
self.find_element_by_link_text(name) # throw exception if not found
return self
def click_edit_image(self, name):
self.find_element_by_link_text(
name).parent.find_element_by_link_text('[Edit]').click()
return ImageEditorPage(self._tester)
class AssetsEditorPage(DashboardEditor):
"""Page object for upload image page."""
def select_file(self, path):
self.find_element_by_name('file').send_keys(path)
return self
def click_upload_and_expect_saved(self):
self.find_element_by_link_text('Upload').click()
self.expect_status_message_to_be('Saved.')
# Page automatically redirects after successful save.
wait.WebDriverWait(self._tester.driver, 15).until(
ec.title_contains('Assets'))
return AssetsPage(self._tester)
class ImageEditorPage(EditorPageObject):
"""Page object for the dashboard's view/delete image page."""
def click_delete(self):
self.find_element_by_link_text('Delete').click()
return self
def confirm_delete(self):
self._tester.driver.switch_to_alert().accept()
return AssetsPage(self._tester)
class AddUnit(DashboardEditor):
"""Page object to model the dashboard's add unit editor."""
def __init__(self, tester):
super(AddUnit, self).__init__(tester)
self.expect_status_message_to_be('New unit has been created and saved.')
def set_title(self, title):
title_el = self.find_element_by_name('title')
title_el.clear()
title_el.send_keys(title)
return self
class Import(DashboardEditor):
"""Page object to model the dashboard's unit/lesson organizer."""
pass
class AddAssessment(DashboardEditor):
"""Page object to model the dashboard's assessment editor."""
def __init__(self, tester):
super(AddAssessment, self).__init__(tester)
self.expect_status_message_to_be(
'New assessment has been created and saved.')
class AddLink(DashboardEditor):
"""Page object to model the dashboard's link editor."""
def __init__(self, tester):
super(AddLink, self).__init__(tester)
self.expect_status_message_to_be(
'New link has been created and saved.')
class AddLesson(DashboardEditor):
"""Page object to model the dashboard's lesson editor."""
RTE_EDITOR_ID = 'gcbRteField-0_editor'
RTE_TEXTAREA_ID = 'gcbRteField-0'
def __init__(self, tester):
super(AddLesson, self).__init__(tester)
self.expect_status_message_to_be(
'New lesson has been created and saved.')
def click_rich_text(self):
el = self.find_element_by_css_selector('div.rte-control')
self._tester.assertEqual('Rich Text', el.text)
el.click()
wait.WebDriverWait(self._tester.driver, 15).until(
ec.element_to_be_clickable((by.By.ID, AddLesson.RTE_EDITOR_ID)))
return self
def click_plain_text(self):
el = self.find_element_by_css_selector('div.rte-control')
self._tester.assertEqual('<HTML>', el.text)
el.click()
return self
def send_rte_text(self, text):
self.find_element_by_id('gcbRteField-0_editor').send_keys(text)
return self
def select_rte_custom_tag_type(self, option_text):
"""Select the given option from the custom content type selector."""
self._ensure_rte_iframe_ready_and_switch_to_it()
select_tag = self.find_element_by_name('tag')
for option in select_tag.find_elements_by_tag_name('option'):
if option.text == option_text:
option.click()
break
else:
self._tester.fail('No option "%s" found' % option_text)
wait.WebDriverWait(self._tester.driver, 15).until(
ec.element_to_be_clickable(
(by.By.PARTIAL_LINK_TEXT, 'Close')))
self._tester.driver.switch_to_default_content()
return self
def click_rte_add_custom_tag(self):
self.find_element_by_link_text(
'Insert Google U-MOOC component').click()
return self
def doubleclick_rte_element(self, elt_css_selector):
self._tester.driver.switch_to_frame(AddLesson.RTE_EDITOR_ID)
target = self.find_element_by_css_selector(elt_css_selector)
action_chains.ActionChains(
self._tester.driver).double_click(target).perform()
self._tester.driver.switch_to_default_content()
return self
def _ensure_rte_iframe_ready_and_switch_to_it(self):
wait.WebDriverWait(self._tester.driver, 15).until(
ec.frame_to_be_available_and_switch_to_it('modal-editor-iframe'))
# Ensure inputEx has initialized too
wait.WebDriverWait(self._tester.driver, 15).until(
ec.element_to_be_clickable(
(by.By.PARTIAL_LINK_TEXT, 'Close')))
def set_rte_lightbox_field(self, field_css_selector, value):
self._ensure_rte_iframe_ready_and_switch_to_it()
field = self.find_element_by_css_selector(field_css_selector)
field.clear()
field.send_keys(value)
self._tester.driver.switch_to_default_content()
return self
def ensure_rte_lightbox_field_has_value(self, field_css_selector, value):
self._ensure_rte_iframe_ready_and_switch_to_it()
self._tester.assertEqual(
value,
self.find_element_by_css_selector(
field_css_selector).get_attribute('value'))
self._tester.driver.switch_to_default_content()
return self
def click_rte_save(self):
self._ensure_rte_iframe_ready_and_switch_to_it()
self.find_element_by_link_text('Save').click()
self._tester.driver.switch_to_default_content()
return self
def ensure_objectives_textarea_matches(self, text):
self._tester.assertEqual(text, self.find_element_by_id(
AddLesson.RTE_TEXTAREA_ID).get_attribute('value'))
return self
class Organize(DashboardEditor):
"""Page object to model the dashboard's unit/lesson organizer."""
pass
class AdminPage(PageObject):
"""Page object to model the interactions with the admimn landing page."""
def click_add_course(self):
self.find_element_by_id('add_course').click()
return AddCourseEditorPage(self._tester)
def click_settings(self):
self.find_element_by_link_text('Settings').click()
return AdminSettingsPage(self._tester)
class AdminSettingsPage(PageObject):
"""Page object for the admin settings."""
def click_override_admin_user_emails(self):
self._tester.driver.find_elements_by_css_selector(
'button.btn btn-primary')[0].click()
return ConfigPropertyOverridePage(self._tester)
def verify_admin_user_emails_contains(self, email):
self._tester.assertTrue(
email in self._tester.driver.find_elements_by_css_selector(
'table.gcb-config tr')[1].find_elements_by_css_selector(
'td')[1].text)
class ConfigPropertyOverridePage(EditorPageObject):
"""Page object for the admin property override editor."""
def set_value(self, value):
self.find_element_by_name('value').send_keys(value)
return self
def click_close(self):
return self._close_and_return_to(AdminSettingsPage)
class AddCourseEditorPage(EditorPageObject):
"""Page object for the dashboards' add course page."""
def set_fields(self, name=None, title=None, email=None):
"""Populate the fields in the add course page."""
name_el = self.find_element_by_name('name')
title_el = self.find_element_by_name('title')
email_el = self.find_element_by_name('admin_email')
name_el.clear()
title_el.clear()
email_el.clear()
if name:
name_el.send_keys(name)
if title:
title_el.send_keys(title)
if email:
email_el.send_keys(email)
return self
def click_close(self):
return self._close_and_return_to(AdminPage)
| apache-2.0 | -6,051,078,908,374,405,000 | 33.369478 | 80 | 0.640512 | false |
cyanfish/heltour | heltour/tournament/tasks.py | 1 | 26748 | from heltour.tournament.models import *
from heltour.tournament import lichessapi, slackapi, pairinggen, \
alternates_manager, signals, uptime
from heltour.celery import app
from celery.utils.log import get_task_logger
from datetime import datetime
from django.core.cache import cache
from heltour import settings
import reversion
from django.contrib import messages
from math import ceil
from django.urls import reverse
from heltour.tournament.workflows import RoundTransitionWorkflow
from django.dispatch.dispatcher import receiver
from django.db.models.signals import post_save
from django.contrib.sites.models import Site
import time
import textwrap
logger = get_task_logger(__name__)
@app.task(bind=True)
def update_player_ratings(self):
usernames = [p.lichess_username for p in Player.objects.all()]
try:
updated = 0
for user_meta in lichessapi.enumerate_user_metas(usernames, priority=1):
p = Player.objects.get(lichess_username__iexact=user_meta['id'])
p.update_profile(user_meta)
updated += 1
logger.info('Updated ratings for %d/%d players' % (updated, len(usernames)))
except Exception as e:
logger.warning('Error getting ratings: %s' % e)
@app.task(bind=True)
def populate_historical_ratings(self):
pairings_that_should_have_ratings = PlayerPairing.objects.exclude(game_link='',
result='').exclude(white=None,
black=None).nocache()
pairings_that_need_ratings = pairings_that_should_have_ratings.filter(
white_rating=None) | pairings_that_should_have_ratings.filter(black_rating=None)
api_poll_count = 0
for p in pairings_that_need_ratings.exclude(game_link=''):
# Poll ratings for the game from the lichess API
if p.game_id() is None:
continue
p.refresh_from_db()
game_meta = lichessapi.get_game_meta(p.game_id(), priority=0, timeout=300)
p.white_rating = game_meta['players']['white']['rating']
p.black_rating = game_meta['players']['black']['rating']
p.save(update_fields=['white_rating', 'black_rating'])
api_poll_count += 1
if api_poll_count >= 100:
# Limit the processing per task execution
return
for p in pairings_that_need_ratings.filter(game_link=''):
round_ = p.get_round()
if round_ is None:
continue
season = round_.season
league = season.league
p.refresh_from_db()
if not round_.is_completed:
p.white_rating = p.white.rating_for(league)
p.black_rating = p.black.rating_for(league)
else:
# Look for ratings from a close time period
p.white_rating = _find_closest_rating(p.white, round_.end_date, season)
p.black_rating = _find_closest_rating(p.black, round_.end_date, season)
p.save(update_fields=['white_rating', 'black_rating'])
for b in PlayerBye.objects.filter(player_rating=None, round__publish_pairings=True).nocache():
b.refresh_from_db()
if not b.round.is_completed:
b.player_rating = b.player.rating_for(b.round.season.league)
else:
b.player_rating = _find_closest_rating(b.player, b.round.end_date, b.round.season)
b.save(update_fields=['player_rating'])
for tm in TeamMember.objects.filter(player_rating=None,
team__season__is_completed=True).nocache():
tm.refresh_from_db()
tm.player_rating = _find_closest_rating(tm.player, tm.team.season.end_date(),
tm.team.season)
tm.save(update_fields=['player_rating'])
for alt in Alternate.objects.filter(player_rating=None,
season_player__season__is_completed=True).nocache():
alt.refresh_from_db()
alt.player_rating = _find_closest_rating(alt.season_player.player,
alt.season_player.season.end_date(),
alt.season_player.season)
alt.save(update_fields=['player_rating'])
for sp in SeasonPlayer.objects.filter(final_rating=None, season__is_completed=True).nocache():
sp.refresh_from_db()
sp.final_rating = _find_closest_rating(sp.player, sp.season.end_date(), sp.season)
sp.save(update_fields=['final_rating'])
def _find_closest_rating(player, date, season):
if player is None:
return None
if season.league.competitor_type == 'team':
season_pairings = TeamPlayerPairing.objects.filter(
team_pairing__round__season=season).exclude(white_rating=None,
black_rating=None).nocache()
else:
season_pairings = LonePlayerPairing.objects.filter(round__season=season).exclude(
white_rating=None, black_rating=None).nocache()
pairings = season_pairings.filter(white=player) | season_pairings.filter(black=player)
def pairing_date(p):
if season.league.competitor_type == 'team':
return p.team_pairing.round.end_date
else:
return p.round.end_date
def rating(p):
if p.white == player:
return p.white_rating
else:
return p.black_rating
pairings_by_date = sorted([(pairing_date(p), p) for p in pairings], key=lambda p: p[0])
if len(pairings_by_date) == 0:
# Try to find the seed rating
sp = SeasonPlayer.objects.filter(season=season, player=player).first()
if sp is not None and sp.seed_rating is not None:
return sp.seed_rating
# Default to current rating
return player.rating_for(season.league)
pairings_by_date_lt = [p for p in pairings_by_date if p[0] <= date]
pairings_by_date_gt = [p for p in pairings_by_date if p[0] > date]
if len(pairings_by_date_lt) > 0:
# Get the rating AFTER the game
p = pairings_by_date_lt[-1][1]
if p.game_id() is not None:
game_meta = lichessapi.get_game_meta(p.game_id(), priority=0, timeout=300)
player_meta = game_meta['players']['white'] if p.white == player else \
game_meta['players']['black']
if 'ratingDiff' in player_meta:
return player_meta['rating'] + player_meta['ratingDiff']
return rating(p)
else:
return rating(pairings_by_date_gt[0][1])
@app.task(bind=True)
def update_tv_state(self):
games_starting = PlayerPairing.objects.filter(result='', game_link='',
scheduled_time__lt=timezone.now()).nocache()
games_starting = games_starting.filter(loneplayerpairing__round__end_date__gt=timezone.now()) | \
games_starting.filter(
teamplayerpairing__team_pairing__round__end_date__gt=timezone.now())
games_in_progress = PlayerPairing.objects.filter(result='', tv_state='default').exclude(
game_link='').nocache()
for game in games_starting:
try:
league = game.get_round().season.league
for meta in lichessapi.get_latest_game_metas(game.white.lichess_username, 5, priority=1,
timeout=300):
try:
if meta['players']['white']['user'][
'id'].lower() == game.white.lichess_username.lower() and \
meta['players']['black']['user'][
'id'].lower() == game.black.lichess_username.lower() and \
meta['clock']['initial'] == league.time_control_initial() and \
meta['clock']['increment'] == league.time_control_increment() and \
meta['rated'] == True:
game.game_link = get_gamelink_from_gameid(meta['id'])
game.save()
except KeyError:
pass
except Exception as e:
logger.warning('Error updating tv state for %s: %s' % (game, e))
for game in games_in_progress:
gameid = get_gameid_from_gamelink(game.game_link)
if gameid is not None:
try:
meta = lichessapi.get_game_meta(gameid, priority=1, timeout=300)
if 'status' not in meta or meta['status'] != 'started':
game.tv_state = 'hide'
if 'status' in meta and meta['status'] == 'draw':
game.result = '1/2-1/2'
elif 'winner' in meta and meta[
'status'] != 'timeout': # timeout = claim victory (which isn't allowed)
if meta['winner'] == 'white':
game.result = '1-0'
elif meta['winner'] == 'black':
game.result = '0-1'
game.save()
except Exception as e:
logger.warning('Error updating tv state for %s: %s' % (game.game_link, e))
@app.task(bind=True)
def update_lichess_presence(self):
games_starting = PlayerPairing.objects.filter( \
result='', game_link='', \
scheduled_time__lt=timezone.now() + timedelta(minutes=5), \
scheduled_time__gt=timezone.now() - timedelta(minutes=22)) \
.exclude(white=None).exclude(black=None).select_related('white', 'black').nocache()
games_starting = games_starting.filter(loneplayerpairing__round__end_date__gt=timezone.now()) | \
games_starting.filter(
teamplayerpairing__team_pairing__round__end_date__gt=timezone.now())
users = {}
for game in games_starting:
users[game.white.lichess_username.lower()] = game.white
users[game.black.lichess_username.lower()] = game.black
for status in lichessapi.enumerate_user_statuses(list(users.keys()), priority=1, timeout=60):
if status.get('online'):
user = users[status.get('id').lower()]
for g in games_starting:
if user in (g.white, g.black):
presence = g.get_player_presence(user)
presence.online_for_game = True
presence.save()
@app.task(bind=True)
def update_slack_users(self):
slack_users = {u.id: u for u in slackapi.get_user_list()}
for p in Player.objects.all():
u = slack_users.get(p.slack_user_id)
if u != None and u.tz_offset != (p.timezone_offset and p.timezone_offset.total_seconds()):
p.timezone_offset = None if u.tz_offset is None else timedelta(seconds=u.tz_offset)
p.save()
# How late an event is allowed to run before it's discarded instead
_max_lateness = timedelta(hours=1)
@app.task(bind=True)
def run_scheduled_events(self):
now = timezone.now()
with cache.lock('run_scheduled_events'):
future_event_time = None
for event in ScheduledEvent.objects.all():
# Determine a range of times to search
# If the comparison point (e.g. round start) is in the range, we run the event
upper_bound = now - event.offset
lower_bound = max(event.last_run or event.date_created,
now - _max_lateness) - event.offset
# Determine an upper bound for events that should be run before the next task execution
# The idea is that we want events to be run as close to their scheduled time as possible,
# not just at whatever interval this task happens to be run
future_bound = upper_bound + settings.CELERYBEAT_SCHEDULE['run_scheduled_events'][
'schedule']
def matching_rounds(**kwargs):
result = Round.objects.filter(**kwargs).filter(season__is_active=True)
if event.league is not None:
result = result.filter(season__league=event.league)
if event.season is not None:
result = result.filter(season=event.season)
return result
def matching_pairings(**kwargs):
team_result = PlayerPairing.objects.filter(**kwargs).filter(
teamplayerpairing__team_pairing__round__season__is_active=True)
lone_result = PlayerPairing.objects.filter(**kwargs).filter(
loneplayerpairing__round__season__is_active=True)
if event.league is not None:
team_result = team_result.filter(
teamplayerpairing__team_pairing__round__season__league=event.league)
lone_result = lone_result.filter(
loneplayerpairing__round__season__league=event.league)
if event.season is not None:
team_result = team_result.filter(
teamplayerpairing__team_pairing__round__season=event.season)
lone_result = lone_result.filter(loneplayerpairing__round__season=event.season)
return team_result | lone_result
if event.relative_to == 'round_start':
for obj in matching_rounds(start_date__gt=lower_bound, start_date__lte=upper_bound):
event.run(obj)
for obj in matching_rounds(start_date__gt=upper_bound,
start_date__lte=future_bound):
future_event_time = obj.start_date + event.offset if future_event_time is None else min(
future_event_time, obj.start_date + event.offset)
elif event.relative_to == 'round_end':
for obj in matching_rounds(end_date__gt=lower_bound, end_date__lte=upper_bound):
event.run(obj)
for obj in matching_rounds(end_date__gt=upper_bound, end_date__lte=future_bound):
future_event_time = obj.end_date + event.offset if future_event_time is None else min(
future_event_time, obj.end_date + event.offset)
elif event.relative_to == 'game_scheduled_time':
for obj in matching_pairings(scheduled_time__gt=lower_bound,
scheduled_time__lte=upper_bound):
event.run(obj)
for obj in matching_pairings(scheduled_time__gt=upper_bound,
scheduled_time__lte=future_bound):
future_event_time = obj.scheduled_time + event.offset if future_event_time is None else min(
future_event_time, obj.scheduled_time + event.offset)
# Run ScheduledNotifications now
upper_bound = now
lower_bound = now - _max_lateness
future_bound = upper_bound + settings.CELERYBEAT_SCHEDULE['run_scheduled_events'][
'schedule']
for n in ScheduledNotification.objects.filter(notification_time__gt=lower_bound,
notification_time__lte=upper_bound):
n.run()
for n in ScheduledNotification.objects.filter(notification_time__gt=upper_bound,
notification_time__lte=future_bound):
future_event_time = n.notification_time if future_event_time is None else min(
future_event_time, n.notification_time)
# Schedule this task to be run again at the next event's scheduled time
# Note: This could potentially lead to multiple tasks running at the same time. That's why we have a lock
if future_event_time is not None:
run_scheduled_events.apply_async(args=[], eta=future_event_time)
@app.task(bind=True)
def round_transition(self, round_id):
season = Round.objects.get(pk=round_id).season
workflow = RoundTransitionWorkflow(season)
warnings = workflow.warnings
if len(warnings) > 0:
signals.no_round_transition.send(sender=round_transition, season=season, warnings=warnings)
else:
msg_list = workflow.run(complete_round=True, complete_season=True, update_board_order=True,
generate_pairings=True, background=True)
signals.starting_round_transition.send(sender=round_transition, season=season,
msg_list=msg_list)
@receiver(signals.do_round_transition, dispatch_uid='heltour.tournament.tasks')
def do_round_transition(sender, round_id, **kwargs):
round_transition.apply_async(args=[round_id])
@app.task(bind=True)
def generate_pairings(self, round_id, overwrite=False):
round_ = Round.objects.get(pk=round_id)
pairinggen.generate_pairings(round_, overwrite)
round_.publish_pairings = False
with reversion.create_revision():
reversion.set_comment('Generated pairings.')
round_.save()
signals.pairings_generated.send(sender=generate_pairings, round_=round_)
@receiver(signals.do_generate_pairings, dispatch_uid='heltour.tournament.tasks')
def do_generate_pairings(sender, round_id, overwrite=False, **kwargs):
generate_pairings.apply_async(args=[round_id, overwrite], countdown=1)
@app.task(bind=True)
def validate_registration(self, reg_id):
reg = Registration.objects.get(pk=reg_id)
fail_reason = None
warnings = []
try:
user_meta = lichessapi.get_user_meta(reg.lichess_username, 1)
player, _ = Player.objects.get_or_create(lichess_username__iexact=reg.lichess_username,
defaults={
'lichess_username': reg.lichess_username})
player.update_profile(user_meta)
reg.classical_rating = player.rating_for(reg.season.league)
reg.peak_classical_rating = lichessapi.get_peak_rating(reg.lichess_username,
reg.season.league.rating_type)
reg.has_played_20_games = not player.provisional_for(reg.season.league)
if player.account_status != 'normal':
fail_reason = 'The lichess user "%s" has the "%s" mark.' % (
reg.lichess_username, player.account_status)
if reg.already_in_slack_group and not player.slack_user_id:
reg.already_in_slack_group = False
except lichessapi.ApiWorkerError:
fail_reason = 'The lichess user "%s" could not be found.' % reg.lichess_username
if not reg.has_played_20_games:
warnings.append('Has a provisional rating.')
if not reg.can_commit and (
reg.season.league.competitor_type != 'team' or reg.alternate_preference != 'alternate'):
warnings.append('Can\'t commit to a game per week.')
if not reg.agreed_to_rules:
warnings.append('Didn\'t agree to rules.')
if fail_reason:
reg.validation_ok = False
reg.validation_warning = False
comment_text = 'Validation error: %s' % fail_reason
elif warnings:
reg.validation_ok = True
reg.validation_warning = True
comment_text = 'Validation warning: %s' % ' '.join(warnings)
else:
reg.validation_ok = True
reg.validation_warning = False
comment_text = 'Validated.'
add_system_comment(reg, comment_text)
with reversion.create_revision():
reversion.set_comment('Validated registration.')
reg.save()
@receiver(post_save, sender=Registration, dispatch_uid='heltour.tournament.tasks')
def registration_saved(instance, created, **kwargs):
if not created:
return
validate_registration.apply_async(args=[instance.pk], countdown=1)
@receiver(signals.do_validate_registration, dispatch_uid='heltour.tournament.tasks')
def do_validate_registration(reg_id, **kwargs):
validate_registration.apply_async(args=[reg_id], countdown=1)
@app.task(bind=True)
def pairings_published(self, round_id, overwrite=False):
round_ = Round.objects.get(pk=round_id)
season = round_.season
league = season.league
if round_.number == season.rounds and season.registration_open and league.get_leaguesetting().close_registration_at_last_round:
with reversion.create_revision():
reversion.set_comment('Close registration')
season.registration_open = False
season.save()
slackapi.send_control_message('refresh pairings %s' % league.tag)
alternates_manager.round_pairings_published(round_)
signals.notify_mods_pairings_published.send(sender=pairings_published, round_=round_)
signals.notify_players_round_start.send(sender=pairings_published, round_=round_)
signals.notify_mods_round_start_done.send(sender=pairings_published, round_=round_)
@receiver(signals.do_pairings_published, dispatch_uid='heltour.tournament.tasks')
def do_pairings_published(sender, round_id, **kwargs):
pairings_published.apply_async(args=[round_id], countdown=1)
@app.task(bind=True)
def schedule_publish(self, round_id):
with cache.lock('schedule_publish'):
round_ = Round.objects.get(pk=round_id)
if round_.publish_pairings:
# Already published
return
round_.publish_pairings = True
round_.save()
# Update ranks in case of manual edits
rank_dict = lone_player_pairing_rank_dict(round_.season)
for lpp in round_.loneplayerpairing_set.all().nocache():
lpp.refresh_ranks(rank_dict)
with reversion.create_revision():
reversion.set_comment('Published pairings.')
lpp.save()
for bye in round_.playerbye_set.all():
bye.refresh_rank(rank_dict)
with reversion.create_revision():
reversion.set_comment('Published pairings.')
bye.save()
@receiver(signals.do_schedule_publish, dispatch_uid='heltour.tournament.tasks')
def do_schedule_publish(sender, round_id, eta, **kwargs):
schedule_publish.apply_async(args=[round_id], eta=eta)
if eta > timezone.now():
signals.publish_scheduled.send(sender=do_schedule_publish, round_id=round_id, eta=eta)
@app.task(bind=True)
def notify_slack_link(self, lichess_username):
player = Player.get_or_create(lichess_username)
email = slackapi.get_user(player.slack_user_id).email
msg = 'Your lichess account has been successfully linked with the Slack account "%s".' % email
lichessapi.send_mail(lichess_username, 'Slack Account Linked', msg)
@receiver(signals.slack_account_linked, dispatch_uid='heltour.tournament.tasks')
def do_notify_slack_link(lichess_username, **kwargs):
notify_slack_link.apply_async(args=[lichess_username], countdown=1)
@app.task(bind=True)
def create_team_channel(self, team_ids):
intro_message = textwrap.dedent("""
Welcome! This is your private team channel. Feel free to chat, study, discuss strategy, or whatever you like!
You need to pick a team captain and a team name by {season_start}.
Once you've chosen (or if you need help with anything), contact one of the moderators:
{mods}
Here are some useful links for your team:
- <{pairings_url}|View your team pairings>
- <{calendar_url}|Import your team pairings to your calendar>""")
for team in Team.objects.filter(id__in=team_ids).select_related('season__league').nocache():
pairings_url = abs_url(reverse('by_league:by_season:pairings_by_team',
args=[team.season.league.tag, team.season.tag, team.number]))
calendar_url = abs_url(reverse('by_league:by_season:pairings_by_team_icalendar',
args=[team.season.league.tag, team.season.tag,
team.number])).replace('https:', 'webcal:')
mods = team.season.league.leaguemoderator_set.filter(is_active=True)
mods_str = ' '.join(('<@%s>' % lm.player.lichess_username.lower() for lm in mods))
season_start = '?' if team.season.start_date is None else team.season.start_date.strftime(
'%b %-d')
intro_message_formatted = intro_message.format(mods=mods_str, season_start=season_start,
pairings_url=pairings_url,
calendar_url=calendar_url)
team_members = team.teammember_set.select_related('player').nocache()
user_ids = [tm.player.slack_user_id for tm in team_members]
channel_name = 'team-%d-s%s' % (team.number, team.season.tag)
topic = "Team Pairings: %s | Team Calendar: %s" % (pairings_url, calendar_url)
try:
group = slackapi.create_group(channel_name)
time.sleep(1)
except slackapi.NameTaken:
logger.error('Could not create slack team, name taken: %s' % channel_name)
continue
channel_ref = '#%s' % group.name
for user_id in user_ids:
if user_id:
try:
slackapi.invite_to_group(group.id, user_id)
except slackapi.SlackError:
logger.exception('Could not invite %s to slack' % user_id)
time.sleep(1)
slackapi.invite_to_group(group.id, settings.CHESSTER_USER_ID)
time.sleep(1)
with reversion.create_revision():
reversion.set_comment('Creating slack channel')
team.slack_channel = group.id
team.save()
slackapi.set_group_topic(group.id, topic)
time.sleep(1)
slackapi.leave_group(group.id)
time.sleep(1)
slackapi.send_message(channel_ref, intro_message_formatted)
time.sleep(1)
@receiver(signals.do_create_team_channel, dispatch_uid='heltour.tournament.tasks')
def do_create_team_channel(sender, team_ids, **kwargs):
create_team_channel.apply_async(args=[team_ids], countdown=1)
@app.task(bind=True)
def alternates_manager_tick(self):
for season in Season.objects.filter(is_active=True, is_completed=False):
if season.alternates_manager_enabled():
alternates_manager.tick(season)
@app.task(bind=True)
def celery_is_up(self):
uptime.celery.is_up = True
@receiver(post_save, sender=PlayerPairing, dispatch_uid='heltour.tournament.tasks')
def pairing_changed(instance, created, **kwargs):
if instance.game_link != '' and instance.result == '':
game_id = get_gameid_from_gamelink(instance.game_link)
if game_id:
lichessapi.add_watch(game_id)
| mit | -4,893,947,234,258,996,000 | 45.518261 | 131 | 0.606999 | false |
funkotron/django-lean | setup.py | 1 | 2630 | # -*- coding: utf-8 -*-
"""A/B Testing for Django
django-lean allows you to perform split-test experiments on your users.
In brief, this involves exposing 50% of your users to one implementation
and 50% to another, then comparing the performance of these two groups
with regards to certain metrics.
"""
from distutils.core import setup
description, long_description = __doc__.split('\n\n', 1)
setup(
name='django-lean',
version='0.15',
author='Akoha, Inc.',
author_email='[email protected]',
description=('A framework for performing and analyzing split-test ' +
'experiments in Django applications.'),
long_description=('django-lean aims to be a collection of tools for ' +
'Lean Startups using the Django platform. Currently ' +
'it provides a framework for implementing split-test ' +
'experiments in JavaScript, Python, or Django template ' +
'code along with administrative views for analyzing ' +
'the results of those experiments.'),
license='BSD',
platforms=['any'],
url='http://bitbucket.org/akoha/django-lean/wiki/Home',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
],
package_dir = {'': 'src'},
packages=[
'django_lean',
'django_lean.experiments',
'django_lean.experiments.management',
'django_lean.experiments.management.commands',
'django_lean.experiments.migrations',
'django_lean.experiments.templatetags',
'django_lean.experiments.tests',
'django_lean.lean_analytics',
'django_lean.lean_retention',
'django_lean.lean_retention.migrations',
'django_lean.lean_retention.tests',
'django_lean.lean_segments',
'django_lean.lean_segments.management',
'django_lean.lean_segments.management.commands',
],
package_data={
'django_lean.experiments': ['media/experiments/*.js',
'templates/experiments/*.html',
'templates/experiments/include/*.html'],
'django_lean.experiments.tests': ['data/*.json'],
},
install_requires=['django >= 1.0'],
tests_require=['BeautifulSoup', 'mox'],
)
| bsd-3-clause | -1,679,957,604,122,841,600 | 39.461538 | 80 | 0.609886 | false |
mr-tim/sandpit | sandpit_app/webapp/admin.py | 1 | 1110 | from flask import Blueprint, redirect, render_template
import json
import docker
import security
admin = Blueprint('admin', __name__)
@admin.route('/admin')
@security.logged_in
@security.admin
def admin_home():
images = sorted(docker.images(), key=lambda i: i['repository'])
processes = sorted(docker.ps(), key=lambda p: p['image'])
return render_template('admin.html', images=images, processes=processes, current_tab='admin')
@admin.route('/admin/image/<image_id>')
@security.logged_in
@security.admin
def image(image_id):
image = json.dumps(docker.inspect(image_id), indent=2)
return render_template('image.html', image=image, current_tab='admin')
@admin.route('/admin/process/<process_id>')
@security.logged_in
@security.admin
def process(process_id):
process = json.dumps(docker.inspect(process_id), indent=2)
return render_template('process.html', process=process, current_tab='admin')
@admin.route('/admin/process/<process_id>/stop', methods=['POST'])
@security.logged_in
@security.admin
def stop_process(process_id):
docker.stop(process_id)
return redirect('/admin')
| mit | 6,534,282,961,573,552,000 | 29.833333 | 97 | 0.725225 | false |
stonebig/numba | numba/core/errors.py | 1 | 22392 | """
Numba-specific errors and warnings.
"""
import abc
import contextlib
import os
import sys
import warnings
import numba.core.config
import numpy as np
from collections import defaultdict
from numba.core.utils import chain_exception
from functools import wraps
from abc import abstractmethod
# Filled at the end
__all__ = []
class NumbaWarning(Warning):
"""
Base category for all Numba compiler warnings.
"""
def __init__(self, msg, loc=None, highlighting=True, ):
self.msg = msg
self.loc = loc
if highlighting:
highlight = termcolor().errmsg
else:
def highlight(x):
return x
if loc:
super(NumbaWarning, self).__init__(
highlight("%s\n%s\n" % (msg, loc.strformat())))
else:
super(NumbaWarning, self).__init__(highlight("%s" % (msg,)))
class NumbaPerformanceWarning(NumbaWarning):
"""
Warning category for when an operation might not be
as fast as expected.
"""
class NumbaDeprecationWarning(NumbaWarning):
"""
Warning category for use of a deprecated feature.
"""
class NumbaPendingDeprecationWarning(NumbaWarning):
"""
Warning category for use of a feature that is pending deprecation.
"""
class NumbaParallelSafetyWarning(NumbaWarning):
"""
Warning category for when an operation in a prange
might not have parallel semantics.
"""
class NumbaTypeSafetyWarning(NumbaWarning):
"""
Warning category for unsafe casting operations.
"""
class NumbaExperimentalFeatureWarning(NumbaWarning):
"""
Warning category for using an experimental feature.
"""
class NumbaInvalidConfigWarning(NumbaWarning):
"""
Warning category for using an invalid configuration.
"""
# These are needed in the color formatting of errors setup
class _ColorScheme(metaclass=abc.ABCMeta):
@abstractmethod
def code(self, msg):
pass
@abstractmethod
def errmsg(self, msg):
pass
@abstractmethod
def filename(self, msg):
pass
@abstractmethod
def indicate(self, msg):
pass
@abstractmethod
def highlight(self, msg):
pass
@abstractmethod
def reset(self, msg):
pass
class _DummyColorScheme(_ColorScheme):
def __init__(self, theme=None):
pass
def code(self, msg):
pass
def errmsg(self, msg):
pass
def filename(self, msg):
pass
def indicate(self, msg):
pass
def highlight(self, msg):
pass
def reset(self, msg):
pass
# holds reference to the instance of the terminal color scheme in use
_termcolor_inst = None
try:
import colorama
# If the colorama version is < 0.3.9 it can break stdout/stderr in some
# situations, as a result if this condition is met colorama is disabled and
# the user is warned. Note that early versions did not have a __version__.
colorama_version = getattr(colorama, '__version__', '0.0.0')
if tuple([int(x) for x in colorama_version.split('.')]) < (0, 3, 9):
msg = ("Insufficiently recent colorama version found. "
"Numba requires colorama >= 0.3.9")
# warn the user
warnings.warn(msg)
# trip the exception to disable color errors
raise ImportError
# If Numba is running in testsuite mode then do not use error message
# coloring so CI system output is consistently readable without having
# to read between shell escape characters.
if os.environ.get('NUMBA_DISABLE_ERROR_MESSAGE_HIGHLIGHTING', None):
raise ImportError # just to trigger the exception handler below
except ImportError:
class NOPColorScheme(_DummyColorScheme):
def __init__(self, theme=None):
if theme is not None:
raise ValueError("specifying a theme has no effect")
_DummyColorScheme.__init__(self, theme=theme)
def code(self, msg):
return msg
def errmsg(self, msg):
return msg
def filename(self, msg):
return msg
def indicate(self, msg):
return msg
def highlight(self, msg):
return msg
def reset(self, msg):
return msg
def termcolor():
global _termcolor_inst
if _termcolor_inst is None:
_termcolor_inst = NOPColorScheme()
return _termcolor_inst
else:
from colorama import init, reinit, deinit, Fore, Style
class ColorShell(object):
_has_initialized = False
def __init__(self):
init()
self._has_initialized = True
def __enter__(self):
if self._has_initialized:
reinit()
def __exit__(self, *exc_detail):
Style.RESET_ALL
deinit()
class reset_terminal(object):
def __init__(self):
self._buf = bytearray(b'')
def __enter__(self):
return self._buf
def __exit__(self, *exc_detail):
self._buf += bytearray(Style.RESET_ALL.encode('utf-8'))
# define some default themes, if more are added, update the envvars docs!
themes = {}
# No color added, just bold weighting
themes['no_color'] = {'code': None,
'errmsg': None,
'filename': None,
'indicate': None,
'highlight': None,
'reset': None, }
# suitable for terminals with a dark background
themes['dark_bg'] = {'code': Fore.BLUE,
'errmsg': Fore.YELLOW,
'filename': Fore.WHITE,
'indicate': Fore.GREEN,
'highlight': Fore.RED,
'reset': Style.RESET_ALL, }
# suitable for terminals with a light background
themes['light_bg'] = {'code': Fore.BLUE,
'errmsg': Fore.BLACK,
'filename': Fore.MAGENTA,
'indicate': Fore.BLACK,
'highlight': Fore.RED,
'reset': Style.RESET_ALL, }
# suitable for terminals with a blue background
themes['blue_bg'] = {'code': Fore.WHITE,
'errmsg': Fore.YELLOW,
'filename': Fore.MAGENTA,
'indicate': Fore.CYAN,
'highlight': Fore.RED,
'reset': Style.RESET_ALL, }
# suitable for use in jupyter notebooks
themes['jupyter_nb'] = {'code': Fore.BLACK,
'errmsg': Fore.BLACK,
'filename': Fore.GREEN,
'indicate': Fore.CYAN,
'highlight': Fore.RED,
'reset': Style.RESET_ALL, }
default_theme = themes['no_color']
class HighlightColorScheme(_DummyColorScheme):
def __init__(self, theme=default_theme):
self._code = theme['code']
self._errmsg = theme['errmsg']
self._filename = theme['filename']
self._indicate = theme['indicate']
self._highlight = theme['highlight']
self._reset = theme['reset']
_DummyColorScheme.__init__(self, theme=theme)
def _markup(self, msg, color=None, style=Style.BRIGHT):
features = ''
if color:
features += color
if style:
features += style
with ColorShell():
with reset_terminal() as mu:
mu += features.encode('utf-8')
mu += (msg).encode('utf-8')
return mu.decode('utf-8')
def code(self, msg):
return self._markup(msg, self._code)
def errmsg(self, msg):
return self._markup(msg, self._errmsg)
def filename(self, msg):
return self._markup(msg, self._filename)
def indicate(self, msg):
return self._markup(msg, self._indicate)
def highlight(self, msg):
return self._markup(msg, self._highlight)
def reset(self, msg):
return self._markup(msg, self._reset)
def termcolor():
global _termcolor_inst
if _termcolor_inst is None:
scheme = themes[numba.core.config.COLOR_SCHEME]
_termcolor_inst = HighlightColorScheme(scheme)
return _termcolor_inst
feedback_details = """
Please report the error message and traceback, along with a minimal reproducer
at: https://github.com/numba/numba/issues/new
If more help is needed please feel free to speak to the Numba core developers
directly at: https://gitter.im/numba/numba
Thanks in advance for your help in improving Numba!
"""
unsupported_error_info = """
Unsupported functionality was found in the code Numba was trying to compile.
If this functionality is important to you please file a feature request at:
https://github.com/numba/numba/issues/new
"""
interpreter_error_info = """
Unsupported Python functionality was found in the code Numba was trying to
compile. This error could be due to invalid code, does the code work
without Numba? (To temporarily disable Numba JIT, set the `NUMBA_DISABLE_JIT`
environment variable to non-zero, and then rerun the code).
If the code is valid and the unsupported functionality is important to you
please file a feature request at: https://github.com/numba/numba/issues/new
To see Python/NumPy features supported by the latest release of Numba visit:
https://numba.pydata.org/numba-doc/latest/reference/pysupported.html
and
https://numba.pydata.org/numba-doc/latest/reference/numpysupported.html
"""
constant_inference_info = """
Numba could not make a constant out of something that it decided should be
a constant. This could well be a current limitation in Numba's internals,
however please first check that your code is valid for compilation,
particularly with respect to string interpolation (not supported!) and
the requirement of compile time constants as arguments to exceptions:
https://numba.pydata.org/numba-doc/latest/reference/pysupported.html?highlight=exceptions#constructs
If the code is valid and the unsupported functionality is important to you
please file a feature request at: https://github.com/numba/numba/issues/new
If you think your code should work with Numba. %s
""" % feedback_details
typing_error_info = """
This is not usually a problem with Numba itself but instead often caused by
the use of unsupported features or an issue in resolving types.
To see Python/NumPy features supported by the latest release of Numba visit:
https://numba.pydata.org/numba-doc/latest/reference/pysupported.html
and
https://numba.pydata.org/numba-doc/latest/reference/numpysupported.html
For more information about typing errors and how to debug them visit:
https://numba.pydata.org/numba-doc/latest/user/troubleshoot.html#my-code-doesn-t-compile
If you think your code should work with Numba, please report the error message
and traceback, along with a minimal reproducer at:
https://github.com/numba/numba/issues/new
"""
reportable_issue_info = """
-------------------------------------------------------------------------------
This should not have happened, a problem has occurred in Numba's internals.
You are currently using Numba version %s.
%s
""" % (numba.__version__, feedback_details)
error_extras = dict()
error_extras['unsupported_error'] = unsupported_error_info
error_extras['typing'] = typing_error_info
error_extras['reportable'] = reportable_issue_info
error_extras['interpreter'] = interpreter_error_info
error_extras['constant_inference'] = constant_inference_info
def deprecated(arg):
"""Define a deprecation decorator.
An optional string should refer to the new API to be used instead.
Example:
@deprecated
def old_func(): ...
@deprecated('new_func')
def old_func(): ..."""
subst = arg if isinstance(arg, str) else None
def decorator(func):
def wrapper(*args, **kwargs):
msg = "Call to deprecated function \"{}\"."
if subst:
msg += "\n Use \"{}\" instead."
warnings.warn(msg.format(func.__name__, subst),
category=DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
return wraps(func)(wrapper)
if not subst:
return decorator(arg)
else:
return decorator
class WarningsFixer(object):
"""
An object "fixing" warnings of a given category caught during
certain phases. The warnings can have their filename and lineno fixed,
and they are deduplicated as well.
"""
def __init__(self, category):
self._category = category
# {(filename, lineno, category) -> messages}
self._warnings = defaultdict(set)
@contextlib.contextmanager
def catch_warnings(self, filename=None, lineno=None):
"""
Store warnings and optionally fix their filename and lineno.
"""
with warnings.catch_warnings(record=True) as wlist:
warnings.simplefilter('always', self._category)
yield
for w in wlist:
msg = str(w.message)
if issubclass(w.category, self._category):
# Store warnings of this category for deduplication
filename = filename or w.filename
lineno = lineno or w.lineno
self._warnings[filename, lineno, w.category].add(msg)
else:
# Simply emit other warnings again
warnings.warn_explicit(msg, w.category,
w.filename, w.lineno)
def flush(self):
"""
Emit all stored warnings.
"""
def key(arg):
# It is possible through codegen to create entirely identical
# warnings, this leads to comparing types when sorting which breaks
# on Python 3. Key as str() and if the worse happens then `id`
# creates some uniqueness
return str(arg) + str(id(arg))
for (filename, lineno, category), messages in sorted(
self._warnings.items(), key=key):
for msg in sorted(messages):
warnings.warn_explicit(msg, category, filename, lineno)
self._warnings.clear()
class NumbaError(Exception):
def __init__(self, msg, loc=None, highlighting=True):
self.msg = msg
self.loc = loc
if highlighting:
highlight = termcolor().errmsg
else:
def highlight(x):
return x
if loc:
new_msg = "%s\n%s\n" % (msg, loc.strformat())
else:
new_msg = "%s" % (msg,)
super(NumbaError, self).__init__(highlight(new_msg))
@property
def contexts(self):
try:
return self._contexts
except AttributeError:
self._contexts = lst = []
return lst
def add_context(self, msg):
"""
Add contextual info. The exception message is expanded with the new
contextual information.
"""
self.contexts.append(msg)
f = termcolor().errmsg('{0}\n') + termcolor().filename('During: {1}')
newmsg = f.format(self, msg)
self.args = (newmsg,)
return self
def patch_message(self, new_message):
"""
Change the error message to the given new message.
"""
self.args = (new_message,) + self.args[1:]
class UnsupportedError(NumbaError):
"""
Numba does not have an implementation for this functionality.
"""
pass
class UnsupportedRewriteError(UnsupportedError):
"""UnsupportedError from rewrite passes
"""
pass
class IRError(NumbaError):
"""
An error occurred during Numba IR generation.
"""
pass
class RedefinedError(IRError):
"""
An error occurred during interpretation of IR due to variable redefinition.
"""
pass
class NotDefinedError(IRError):
"""
An undefined variable is encountered during interpretation of IR.
"""
def __init__(self, name, loc=None):
self.name = name
msg = ("The compiler failed to analyze the bytecode. "
"Variable '%s' is not defined." % name)
super(NotDefinedError, self).__init__(msg, loc=loc)
class VerificationError(IRError):
"""
An error occurred during IR verification. Once Numba's internal
representation (IR) is constructed it is then verified to ensure that
terminators are both present and in the correct places within the IR. If
it is the case that this condition is not met, a VerificationError is
raised.
"""
pass
class DeprecationError(NumbaError):
"""
Functionality is deprecated.
"""
pass
class LoweringError(NumbaError):
"""
An error occurred during lowering.
"""
def __init__(self, msg, loc=None):
super(LoweringError, self).__init__(msg, loc=loc)
class UnsupportedParforsError(NumbaError):
"""
An error ocurred because parfors is not supported on the platform.
"""
pass
class ForbiddenConstruct(LoweringError):
"""
A forbidden Python construct was encountered (e.g. use of locals()).
"""
pass
class TypingError(NumbaError):
"""
A type inference failure.
"""
pass
class UntypedAttributeError(TypingError):
def __init__(self, value, attr, loc=None):
module = getattr(value, 'pymod', None)
if module is not None and module == np:
# unsupported numpy feature.
msg = ("Use of unsupported NumPy function 'numpy.%s' "
"or unsupported use of the function.") % attr
else:
msg = "Unknown attribute '{attr}' of type {type}"
msg = msg.format(type=value, attr=attr)
super(UntypedAttributeError, self).__init__(msg, loc=loc)
class ByteCodeSupportError(NumbaError):
"""
Failure to extract the bytecode of the user's function.
"""
def __init__(self, msg, loc=None):
super(ByteCodeSupportError, self).__init__(msg, loc=loc)
class CompilerError(NumbaError):
"""
Some high-level error in the compiler.
"""
pass
class ConstantInferenceError(NumbaError):
"""
Failure during constant inference.
"""
def __init__(self, value, loc=None):
super(ConstantInferenceError, self).__init__(value, loc=loc)
class InternalError(NumbaError):
"""
For wrapping internal error occured within the compiler
"""
def __init__(self, exception):
super(InternalError, self).__init__(str(exception))
self.old_exception = exception
class RequireLiteralValue(TypingError):
"""
For signalling that a function's typing requires a constant value for
some of its arguments.
"""
pass
class ForceLiteralArg(NumbaError):
"""A Pseudo-exception to signal the dispatcher to type an argument literally
Attributes
----------
requested_args : frozenset[int]
requested positions of the arguments.
"""
def __init__(self, arg_indices, fold_arguments=None, loc=None):
"""
Parameters
----------
arg_indices : Sequence[int]
requested positions of the arguments.
fold_arguments: callable
A function ``(tuple, dict) -> tuple`` that binds and flattens
the ``args`` and ``kwargs``.
loc : numba.ir.Loc or None
"""
super(ForceLiteralArg, self).__init__(
"Pseudo-exception to force literal arguments in the dispatcher",
loc=loc,
)
self.requested_args = frozenset(arg_indices)
self.fold_arguments = fold_arguments
def bind_fold_arguments(self, fold_arguments):
"""Bind the fold_arguments function
"""
e = ForceLiteralArg(self.requested_args, fold_arguments,
loc=self.loc)
return chain_exception(e, self)
def combine(self, other):
"""Returns a new instance by or'ing the requested_args.
"""
if not isinstance(other, ForceLiteralArg):
m = '*other* must be a {} but got a {} instead'
raise TypeError(m.format(ForceLiteralArg, type(other)))
return ForceLiteralArg(self.requested_args | other.requested_args)
def __or__(self, other):
"""Same as self.combine(other)
"""
return self.combine(other)
class LiteralTypingError(TypingError):
"""
Failure in typing a Literal type
"""
pass
def _format_msg(fmt, args, kwargs):
return fmt.format(*args, **kwargs)
_numba_path = os.path.dirname(__file__)
loc_info = {}
@contextlib.contextmanager
def new_error_context(fmt_, *args, **kwargs):
"""
A contextmanager that prepend contextual information to any exception
raised within. If the exception type is not an instance of NumbaError,
it will be wrapped into a InternalError. The exception class can be
changed by providing a "errcls_" keyword argument with the exception
constructor.
The first argument is a message that describes the context. It can be a
format string. If there are additional arguments, it will be used as
``fmt_.format(*args, **kwargs)`` to produce the final message string.
"""
errcls = kwargs.pop('errcls_', InternalError)
loc = kwargs.get('loc', None)
if loc is not None and not loc.filename.startswith(_numba_path):
loc_info.update(kwargs)
try:
yield
except NumbaError as e:
e.add_context(_format_msg(fmt_, args, kwargs))
raise
except AssertionError:
# Let assertion error pass through for shorter traceback in debugging
raise
except Exception as e:
newerr = errcls(e).add_context(_format_msg(fmt_, args, kwargs))
tb = sys.exc_info()[2] if numba.core.config.FULL_TRACEBACKS else None
raise newerr.with_traceback(tb)
__all__ += [name for (name, value) in globals().items()
if not name.startswith('_') and isinstance(value, type)
and issubclass(value, (Exception, Warning))]
| bsd-2-clause | -8,959,926,455,638,270,000 | 28.501976 | 100 | 0.610129 | false |
spradeepv/dive-into-python | hackerrank/domain/algorithms/sorting/correctness-loop-invariant.py | 1 | 2946 | """
Problem Statement
In the previous challenge, you wrote code to perform an Insertion Sort on an
unsorted array. But how would you prove that the code is correct? I.e. how
do you show that for any input your code will provide the right output?
Loop Invariant
In computer science, you could prove it formally with a loop invariant,
where you state that a desired property is maintained in your loop. Such a
proof is broken down into the following parts:
Initialization: It is true (in a limited sense) before the loop runs.
Maintenance: If it's true before an iteration of a loop, it remains true
before the next iteration.
Termination: It will terminate in a useful way once it is finished.
Insertion Sort's Invariant
Say, you have some InsertionSort code, where the outer loop goes through the
whole array A:
for(int i = 1; i < A.length; i++){
//insertion sort code
You could then state the following loop invariant:
At the start of every iteration of the outer loop (indexed with i),
the subarray until ar[i] consists of the original elements that were there,
but in sorted order.
To prove Insertion Sort is correct, you will then demonstrate it for the
three stages:
Initialization - The subarray starts with the first element of the array,
and it is (obviously) sorted to begin with.
Maintenance - Each iteration of the loop expands the subarray, but keeps the
sorted property. An element V gets inserted into the array only when it is
greater than the element to its left. Since the elements to its left have
already been sorted, it means V is greater than all the elements to its
left, so the array remains sorted. (In Insertion Sort 2 we saw this by
printing the array each time an element was properly inserted.)
Termination - The code will terminate after i has reached the last element
in the array, which means the sorted subarray has expanded to encompass the
entire array. The array is now fully sorted.
Loop Invariant Chart
You can often use a similar process to demonstrate the correctness of many
algorithms. You can see these notes for more information.
Challenge
In the InsertionSort code below, there is an error. Can you fix it? Print
the array only once, when it is fully sorted.
Details
The Input format and the constraints are the same as in the previous
challenges and are presented below.
Input Format
There will be two lines of input:
s - the size of the array
ar - the list of numbers that makes up the array
Output Format
Output the numbers in order, space-separated.
Constraints
1<=s<=1000
-1500<=V<=1500,V = ar
Sample Input
6
1 4 3 5 6 2
Sample Output
1 2 3 4 5 6
"""
def insertion_sort(l):
for i in range(1, len(l)):
j = i - 1
key = l[i]
while (l[j] > key) and (j >= 0):
l[j + 1] = l[j]
j -= 1
l[j + 1] = key
m = int(input().strip())
ar = [int(i) for i in input().strip().split()]
insertion_sort(ar)
print(" ".join(map(str, ar)))
| mit | 7,067,585,179,489,689,000 | 30.340426 | 76 | 0.741005 | false |
GoodiesHQ/PyPad | pypad/tests/test_iso_10126.py | 1 | 2249 | """
Unit tests for the ISO 10126 padding algorithm
"""
from pypad import iso_10126
from pypad.exceptions import InvalidBlockSize, InvalidMessage
import pytest
def test_iso_10126_sample():
"""Testing the ISO 10126 implementation with some generic test data"""
original = b"Testing"
expected = b"Testing", b"\x03"
block_sz = 10
padded = iso_10126.pad(original, block_sz)
assert padded.startswith(expected[0]) and padded.endswith(expected[1])
unpadded = iso_10126.unpad(padded)
assert unpadded == original
def test_iso_10126_aligned():
"""Testing the ISO 10126 implementation with aligned data"""
original = b"Testing"
expected = b"Testing", b"\x07"
block_sz = 7
padded = iso_10126.pad(original, block_sz)
assert padded.startswith(expected[0]) and padded.endswith(expected[1])
unpadded = iso_10126.unpad(padded)
assert unpadded == original
def test_iso_10126_empty():
"""Testing the ISO 10126 with an empty buffer with a small block size"""
original = b""
expected = b"", b"\x0a"
block_sz = 10
padded = iso_10126.pad(original, block_sz)
assert padded.startswith(expected[0]) and padded.endswith(expected[1])
unpadded = iso_10126.unpad(padded)
assert unpadded == original
def test_iso_10126_empty_max():
"""Testing the ISO 10126 with an empty buffer with the maximum block size"""
original = b""
expected = b"\x00"
padded = iso_10126.pad(original)
assert padded.endswith(expected)
unpadded = iso_10126.unpad(padded)
assert unpadded == original
def test_iso_10126_invalid_block_size():
"""Testing the ISO 10126 with an invalid block size"""
original = b"Testing"
block_sz = iso_10126.MAX_BLOCK_SIZE + 1
with pytest.raises(InvalidBlockSize):
iso_10126.pad(original, block_sz)
def test_iso_10126_invalid_message():
"""Testing the ISO 10126 with an invalid message"""
bad_msg = b"Testing\x09"
with pytest.raises(InvalidMessage):
iso_10126.unpad(bad_msg)
def test_iso_10126_invalid_type():
"""Testing the ISO 10126 with an invalid message type"""
bad_msg = ['T', 'e', 's', 't', 'i', 'n', 'g']
with pytest.raises(TypeError):
iso_10126.pad(bad_msg)
| mit | -6,792,367,685,178,103,000 | 27.1125 | 80 | 0.673633 | false |
our-city-app/oca-backend | src/solutions/common/bizz/city_vouchers.py | 1 | 3297 | # -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import json
import logging
from typing import List
from mcfw.rpc import returns, arguments
from rogerthat.rpc import users
from rogerthat.rpc.service import BusinessException
from rogerthat.to.service import SendApiCallCallbackResultTO, UserDetailsTO
from rogerthat.utils.app import get_app_user_tuple
from solutions import translate
from solutions.common.dal import get_solution_settings
from solutions.common.models.loyalty import CustomLoyaltyCard
def _create_resolve_result(result_type, url, email, app_id):
return {
'type': result_type,
'url': url,
'content': url,
'userDetails': {
'appId': app_id,
'email': email,
'name': email
}
}
@returns(dict)
@arguments(service_user=users.User, service_identity=unicode, url=unicode)
def _resolve_voucher(service_user, service_identity, url):
"""Lookup the provided URL. It will be treated as a custom loyalty card."""
# 1/ Check if a custom loyalty card already exists for this URL
custom_loyalty_card = CustomLoyaltyCard.get_by_url(url)
if custom_loyalty_card and custom_loyalty_card.app_user:
human_user, app_id = get_app_user_tuple(custom_loyalty_card.app_user)
return _create_resolve_result(CustomLoyaltyCard.TYPE, url, human_user.email(), app_id)
logging.debug('Unknown QR code scanned: %s. Loyalty device will create custom paper loyalty card.', url)
return _create_resolve_result(u'unknown', url, u'dummy', u'dummy')
@returns(SendApiCallCallbackResultTO)
@arguments(service_user=users.User, email=unicode, method=unicode, params=unicode, tag=unicode,
service_identity=unicode,
user_details=[UserDetailsTO])
def solution_voucher_resolve(service_user, email, method, params, tag, service_identity, user_details):
# type: (users.User, unicode, unicode, unicode, unicode, unicode, List[UserDetailsTO]) -> SendApiCallCallbackResultTO
logging.debug("Received voucher resolve call with params: %s", params)
r = SendApiCallCallbackResultTO()
r.result = None
r.error = None
try:
jsondata = json.loads(params)
r_dict = _resolve_voucher(service_user, service_identity, jsondata['url'])
result = json.dumps(r_dict)
r.result = result if isinstance(result, unicode) else result.decode("utf8")
except BusinessException as be:
r.error = be.message
except:
logging.error("solutions.voucher.resolve exception occurred", exc_info=True)
sln_settings = get_solution_settings(service_user)
r.error = translate(sln_settings.main_language, 'error-occured-unknown')
return r
| apache-2.0 | 3,162,790,153,654,418,000 | 39.703704 | 121 | 0.715499 | false |
choderalab/openpathsampling | openpathsampling/pathsimulators/path_simulator.py | 1 | 3675 | import abc
import sys
import logging
from future.utils import with_metaclass
import openpathsampling as paths
from openpathsampling.netcdfplus import StorableNamedObject, StorableObject
from ..ops_logging import initialization_logging
logger = logging.getLogger(__name__)
init_log = logging.getLogger('openpathsampling.initialization')
class MCStep(StorableObject):
"""
A monte-carlo step in the main PathSimulation loop
It references all objects created and used in a MC step. The used mover,
and simulator as well as the initial and final sampleset, the step
number and the generated movechange.
Attributes
----------
simulation : PathSimulation
the running pathsimulation responsible for generating the step
mccycle : int
the step number counting from the root sampleset
previous : SampleSet
the initial (pre) sampleset
active : SampleSet
the final (post) sampleset
change : MoveChange
the movechange describing the transition from pre to post
"""
def __init__(self, simulation=None, mccycle=-1, previous=None,
active=None, change=None):
super(MCStep, self).__init__()
self.simulation = simulation
self.previous = previous
self.active = active
self.change = change
self.mccycle = mccycle
class PathSimulator(with_metaclass(abc.ABCMeta, StorableNamedObject)):
"""Abstract class for the "main" function of a simulation.
Parameters
----------
storage : :class:`.Storage`
Storage file for results
Attributes
----------
save_frequency : int
Results should be sync'd (saved to disk) after every
``save_frequency`` steps. Note: subclasses must directly implement
this, the attribute is just a placeholder.
output_stream : file
Subclasses should write output to this, allowing a standard way to
redirect any output.
allow_refresh : bool
Whether to allow the output to refresh an ipynb cell; default True.
This is likely to be overridden when a pathsimulator is wrapped in
another simulation.
"""
#__metaclass__ = abc.ABCMeta
calc_name = "PathSimulator"
_excluded_attr = ['sample_set', 'step', 'save_frequency',
'output_stream']
def __init__(self, storage):
super(PathSimulator, self).__init__()
self.storage = storage
# self.engine = engine
self.save_frequency = 1
self.step = 0
initialization_logging(
logger=init_log, obj=self,
entries=['storage']#, 'engine']
)
self.sample_set = None
self.output_stream = sys.stdout # user can change to file handler
self.allow_refresh = True
def sync_storage(self):
"""
Will sync all collective variables and the storage to disk
"""
if self.storage is not None:
self.storage.sync_all()
@abc.abstractmethod
def run(self, n_steps):
"""
Run the simulator for a number of steps
Parameters
----------
n_steps : int
number of step to be run
"""
pass
def save_initial_step(self):
"""
Save the initial state as an MCStep to the storage
"""
mcstep = MCStep(
simulation=self,
mccycle=self.step,
active=self.sample_set,
change=paths.AcceptedSampleMoveChange(self.sample_set.samples)
)
if self.storage is not None:
self.storage.steps.save(mcstep)
self.storage.sync_all()
| lgpl-2.1 | -4,037,917,422,712,159,700 | 29.122951 | 76 | 0.623129 | false |
guitarmanusa/metau | metau_lib/AboutDialog.py | 1 | 1914 | # -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2013 <Kyle Francis> <[email protected]>
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
from gi.repository import Gtk # pylint: disable=E0611
from . helpers import get_builder
class AboutDialog(Gtk.AboutDialog):
__gtype_name__ = "AboutDialog"
def __new__(cls):
"""Special static method that's automatically called by Python when
constructing a new instance of this class.
Returns a fully instantiated AboutDialog object.
"""
builder = get_builder('AboutMetauDialog')
new_object = builder.get_object("about_metau_dialog")
new_object.finish_initializing(builder)
return new_object
def finish_initializing(self, builder):
"""Called while initializing this instance in __new__
finish_initalizing should be called after parsing the ui definition
and creating a AboutDialog object with it in order
to finish initializing the start of the new AboutMetauDialog
instance.
Put your initialization code in here and leave __init__ undefined.
"""
# Get a reference to the builder and set up the signals.
self.builder = builder
self.ui = builder.get_ui(self)
| gpl-3.0 | 8,972,623,279,978,186,000 | 38.875 | 76 | 0.691745 | false |
cjaymes/pyscap | src/scap/model/xal_2_0/PostBoxType.py | 1 | 1725 | # Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
from scap.Model import Model
import logging
logger = logging.getLogger(__name__)
class PostBoxType(Model):
MODEL_MAP = {
'tag_name': 'PostBox',
'elements': [
{'tag_name': 'AddressLine', 'list': 'address_lines', 'class': 'AddressLineType'},
{'tag_name': 'PostBoxNumber', 'in': 'post_box_number', 'class': 'PostBoxNumberType'},
{'tag_name': 'PostBoxNumberPrefix', 'in': 'post_box_number_prefix', 'class': 'PostBoxNumberPrefixType'},
{'tag_name': 'PostBoxNumberSuffix', 'in': 'post_box_number_suffix', 'class': 'PostBoxNumberSuffixType'},
{'tag_name': 'PostBoxNumberExtension', 'in': 'post_box_number_extension', 'class': 'PostBoxNumberExtensionType'},
{'tag_name': 'Firm', 'in': 'firm', 'class': 'FirmType'},
{'tag_name': 'PostalCode', 'in': 'postal_code', 'class': 'PostalCodeType'},
{'tag_name': '*'},
],
'attributes': {
'Type': {},
'Indicator': {},
'*': {},
}
}
| gpl-3.0 | 940,065,220,103,556,200 | 42.125 | 125 | 0.628406 | false |
butala/pyrsss | pyrsss/signal/convmtx.py | 1 | 6852 | from __future__ import division
import numpy as NP
import scipy.linalg
import scipy.signal
import scipy.sparse as sparse
from util import zero_pad
class Convmtx(sparse.coo_matrix):
def __new__(cls, n, H, mode='full'):
"""
Construct sparse convolution matrix to operate on vector of
dimension *n* with the kernel *H*. The *mode* parameter can be
one of:
- full: standard convolution, i.e., zero-padding at the edges.
- valid: convolution where only those portions of complete
overlap, i.e., no zero-padding, are considered.
- circ: circular convolution, i.e., periodic boundary
condition at the edges.
"""
def toeplitz_mapper_full(h):
if (h == 0).all():
return sparse.coo_matrix((k[-1], n[-1]))
else:
c = h
r = NP.array([c[0]] + [0]*(n[-1]-1))
return sparse.coo_matrix(scipy.linalg.toeplitz(c, r))
def toeplitz_mapper_valid(h):
if (h == 0).all():
return sparse.coo_matrix((k[-1], n[-1]))
else:
r = NP.zeros(n[-1])
r[:len(h)] = h
c = NP.zeros(k[-1])
c[0] = r[0]
return sparse.coo_matrix(scipy.linalg.toeplitz(c, r))
def toeplitz_mapper_circ(h):
if (h == 0).all():
return sparse.coo_matrix((k[-1], n[-1]))
else:
c = h
r = NP.zeros(n[-1])
r[0] = c[0]
r[1:] = h[:0:-1]
return sparse.coo_matrix(scipy.linalg.toeplitz(c, r))
def block_mapper_full(n, k, blocks):
c = [blocks[i] for i in range(k)]
r = [c[0]] + [None]*(n-1)
return sparse.bmat(scipy.linalg.toeplitz(c, r).tolist(), format='coo')
def block_mapper_valid(n, k, blocks):
r = []
for i in range(n):
if (n - k - i < 0):
r.append(None)
else:
r.append(blocks[n - k - i])
c = []
for i in range(n-k, n):
c.append(blocks[i])
return sparse.bmat(scipy.linalg.toeplitz(c, r).tolist(), format='coo')
def block_mapper_circ(n, k, blocks):
c = [blocks[i] for i in range(k)]
r = []
r.append(blocks[0])
r.extend(blocks[:0:-1])
return sparse.bmat(scipy.linalg.toeplitz(c, r).tolist(), format='coo')
m = H.shape
if mode == 'full':
k = tuple(NP.array(n) + NP.array(m) - 1)
toeplitz_mapper = toeplitz_mapper_full
block_mapper = block_mapper_full
H_zp = zero_pad(H, k)
c_list = NP.split(H_zp.flatten(), NP.prod(k[:-1]))
elif mode == 'valid':
k = tuple(NP.array(n) - NP.array(m) + 1)
toeplitz_mapper = toeplitz_mapper_valid
block_mapper = block_mapper_valid
H_zp = zero_pad(H[...,::-1], n)
c_list = NP.split(H_zp.flatten(), NP.prod(n[:-1]))
elif mode == 'circ':
assert((NP.array(m) <= NP.array(n)).all())
k = n
toeplitz_mapper = toeplitz_mapper_circ
block_mapper = block_mapper_circ
H_zp = zero_pad(H, k)
c_list = NP.split(H_zp.flatten(), NP.prod(k[:-1]))
else:
raise ValueError('Unknown mode {0}'.format(mode))
blocks = map(toeplitz_mapper, c_list)
for n_i, k_i in zip(n[-2::-1], k[-2::-1]):
if mode == 'full' or mode == 'circ':
blocks = map(lambda x: block_mapper(n_i, k_i, x),
NP.split(NP.array(blocks), len(blocks)/k_i))
elif mode =='valid':
blocks = map(lambda x: block_mapper(n_i, k_i, x),
NP.split(NP.array(blocks), len(blocks)/n_i))
else:
raise ValueError('Unknown mode {0}'.format(mode))
return blocks[0]
def ndcircconv(x, h):
"""
Compute the circular convolution of real, n-dimensional vectors
*x* and *h*.
"""
n = x.shape
m = h.shape
k = NP.array(n) + NP.array(m) - 1
return NP.real(NP.fft.ifftn(NP.fft.fftn(h, s=k) * NP.fft.fftn(x, s=k))).flat
def random_validation(N,
n_dim_max=4,
n_max=10,
m_max=10):
"""
Validate the :class:`Convmtx` implementation by comparing *N*
direct circular and full / valid convolutions with those computed
using :class:`Convmtx`. Limit the kernel and signal vector
dimension to *n_dim_max*, and the length per dimension to *n_max*
for the signal and *m_max* for the convolution kernel.
"""
print('Testing circ mode')
for i in range(1, N + 1):
n_dim = NP.random.random_integers(1, n_dim_max)
n = NP.random.random_integers(1, n_max, n_dim)
m = NP.random.random_integers(1, m_max, n_dim)
x = NP.random.randn(*n)
h = NP.arange(NP.prod(m)) + 1
h.shape = m
k = NP.array(n) + NP.array(m) - 1
print('{} of {} (n={} k={} mode=circ)'.format(i, N, n, k))
H = Convmtx(k, h, mode='circ')
y_true = ndcircconv(x, h)
x_zp = zero_pad(x, k)
y_mtx = H * x_zp.flat
assert(NP.allclose(y_true, y_mtx))
print('')
print('Testing full and valid modes')
mode_list = ['full', 'valid']
for i in range(1, N + 1):
n_dim = NP.random.random_integers(1, n_dim_max)
n = NP.random.random_integers(1, n_max, n_dim)
m = NP.random.random_integers(1, m_max, n_dim)
mode = mode_list[NP.random.random_integers(0, 1)]
if mode == 'full':
k = tuple(NP.array(n) + NP.array(m) - 1)
elif mode == 'valid':
k = tuple(NP.array(n) - NP.array(m) + 1)
else:
assert(False)
x = NP.random.randn(*n)
h = NP.arange(NP.prod(m)) + 1
h.shape = m
if (NP.array(k) <= 0).any():
assert(mode == 'valid')
n, m = m, n
x, h = h, x
k = tuple(NP.array(n) - NP.array(m) + 1)
print('{} of {} (n={} k={} mode={})'.format(i, N, n, k, mode))
if (NP.array(k) > 0).all():
H = Convmtx(n, h, mode=mode)
y_true = scipy.signal.convolve(x, h, mode=mode)
y_mtx = H * x.flat
y_mtx.shape = k
assert(NP.allclose(y_true, y_mtx))
else:
try:
y_true = scipy.signal.convolve(x, h, mode=mode)
except ValueError:
pass
else:
assert(NP.prod(y_true.shape) == 0)
return True
if __name__ == '__main__':
random_validation(100)
| mit | 5,486,634,381,309,362,000 | 32.42439 | 82 | 0.484092 | false |
agriggio/pysmt | pysmt/test/test_walkers.py | 1 | 7279 | #
# This file is part of pySMT.
#
# Copyright 2014 Andrea Micheli and Marco Gario
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from six.moves import xrange
import pysmt.operators as op
from pysmt.shortcuts import FreshSymbol, Symbol, Int, Bool, ForAll
from pysmt.shortcuts import And, Or, Iff, Not, Function, Real
from pysmt.shortcuts import LT, GT, Plus, Minus, Equals
from pysmt.shortcuts import get_env, substitute, TRUE
from pysmt.typing import INT, BOOL, REAL, FunctionType
from pysmt.walkers import TreeWalker, DagWalker, IdentityDagWalker
from pysmt.test import TestCase, main
from pysmt.formula import FormulaManager
from pysmt.test.examples import get_example_formulae
from pysmt.exceptions import UnsupportedOperatorError, PysmtTypeError
from pysmt.substituter import MSSubstituter
class TestWalkers(TestCase):
def test_subst(self):
varA = Symbol("At", INT)
varB = Symbol("Bt", INT)
f = And(LT(varA, Plus(varB, Int(1))),
GT(varA, Minus(varB, Int(1))))
g = Equals(varA, varB)
h = Iff(f, g)
res = substitute(h, subs={varA:varB})
self.assertEqual(res, h.substitute({varA:varB}))
res = substitute(h, subs={varA:Int(1)})
self.assertEqual(res, h.substitute({varA:Int(1)}))
def test_substituter_conditions(self):
x = Symbol("x")
y = Symbol("y")
and_x_x = And(x, x)
ftype = FunctionType(BOOL, [BOOL])
f = Symbol("f", ftype)
# 1. All arguments must be terms
args_good = {x:y}
args_bad = {x:f}
substitute(and_x_x, args_good)
with self.assertRaisesRegex(PysmtTypeError, " substitutions"):
substitute(and_x_x, args_bad)
# 2. All arguments belong to the manager of the substituter.
new_mgr = FormulaManager(get_env())
new_x = new_mgr.Symbol("x")
self.assertNotEqual(x, new_x)
args_1 = {x: new_x}
args_2 = {new_x: new_x}
with self.assertRaisesRegex(PysmtTypeError, "Formula Manager" ):
substitute(and_x_x, args_1)
with self.assertRaisesRegex(PysmtTypeError, "Formula Manager."):
substitute(and_x_x, args_2)
with self.assertRaisesRegex(PysmtTypeError, "substitute()"):
substitute(f, {x:x})
def test_undefined_node(self):
varA = Symbol("At", INT)
dag_walker = DagWalker()
with self.assertRaises(UnsupportedOperatorError):
dag_walker.walk(varA)
tree_walker = TreeWalker()
with self.assertRaises(UnsupportedOperatorError):
tree_walker.walk(varA)
def test_walker_is_complete(self):
op.ALL_TYPES.append(-1)
with self.assertRaises(AssertionError):
TreeWalker()
op.ALL_TYPES.remove(-1)
def test_identity_walker_simple(self):
def walk_and_to_or(formula, args, **kwargs):
return Or(args)
def walk_or_to_and(formula, args, **kwargs):
return And(args)
walker = IdentityDagWalker(env=get_env())
walker.set_function(walk_and_to_or, op.AND)
walker.set_function(walk_or_to_and, op.OR)
x, y, z = Symbol('x'), Symbol('y'), Symbol('z')
cnf = And(Or(x,y,z), Or(z, Not(y)))
fake_dnf = Or(And(x,y,z), And(z, Not(y)))
result = walker.walk(cnf)
self.assertEqual(result, fake_dnf)
alternation = Or(cnf, Not(cnf))
expected = And(fake_dnf, Not(fake_dnf))
result = walker.walk(alternation)
self.assertEqual(result, expected)
def test_identity_dag_walker(self):
idw = IdentityDagWalker()
for (f, _, _, _) in get_example_formulae():
rebuilt = idw.walk(f)
self.assertTrue(rebuilt == f, "Rebuilt formula is not identical")
def test_substitution_on_quantifiers(self):
x, y = FreshSymbol(), FreshSymbol()
# y /\ Forall x. x /\ y.
f = And(y, ForAll([x], And(x, y)))
subs = {y: Bool(True)}
f_subs = substitute(f, subs).simplify()
self.assertEqual(f_subs, ForAll([x], x))
subs = {x: Bool(True)}
f_subs = substitute(f, subs).simplify()
self.assertEqual(f_subs, f)
def test_substitution_complex(self):
x, y = FreshSymbol(REAL), FreshSymbol(REAL)
# y = 0 /\ (Forall x. x > 3 /\ y < 2)
f = And(Equals(y, Real(0)),
ForAll([x], And(GT(x, Real(3)), LT(y, Real(2)))))
subs = {y: Real(0),
ForAll([x], And(GT(x, Real(3)), LT(y, Real(2)))): TRUE()}
f_subs = substitute(f, subs).simplify()
self.assertEqual(f_subs, TRUE())
def test_substitution_complex_mss(self):
x, y = FreshSymbol(REAL), FreshSymbol(REAL)
# y = 0 /\ (Forall x. x > 3 /\ y < 2)
f = And(Equals(y, Real(0)),
ForAll([x], And(GT(x, Real(3)), LT(y, Real(2)))))
subs = {y: Real(0),
ForAll([x], And(GT(x, Real(3)), LT(Real(0), Real(2)))): TRUE()}
f_subs = MSSubstituter(env=self.env).substitute(f, subs).simplify()
self.assertEqual(f_subs, TRUE())
def test_substitution_term(self):
x, y = FreshSymbol(REAL), FreshSymbol(REAL)
# y = 0 /\ Forall x. x > 3
f = And(Equals(y, Real(0)), ForAll([x], GT(x, Real(3))))
subs = {GT(x, Real(3)): TRUE()}
f_subs = substitute(f, subs)
# Since 'x' is quantified, we cannot replace the term
# therefore the substitution does not yield any result.
self.assertEqual(f_subs, f)
def test_substitution_on_functions(self):
i, r = FreshSymbol(INT), FreshSymbol(REAL)
f = Symbol("f", FunctionType(BOOL, [INT, REAL]))
phi = Function(f, [Plus(i, Int(1)), Minus(r, Real(2))])
phi_sub = substitute(phi, {i: Int(0)}).simplify()
self.assertEqual(phi_sub, Function(f, [Int(1), Minus(r, Real(2))]))
phi_sub = substitute(phi, {r: Real(0)}).simplify()
self.assertEqual(phi_sub, Function(f, [Plus(i, Int(1)), Real(-2)]))
phi_sub = substitute(phi, {r: Real(0), i: Int(0)}).simplify()
self.assertEqual(phi_sub, Function(f, [Int(1), Real(-2)]))
def test_iterative_get_free_variables(self):
f = Symbol("x")
for _ in xrange(1000):
f = And(f, f)
cone = f.get_free_variables()
self.assertEqual(cone, set([Symbol("x")]))
def test_walk_error(self):
"""All walk methods by default call walk_error."""
from pysmt.walkers import DagWalker
x = Symbol("x")
w = DagWalker()
for o in op.ALL_TYPES:
with self.assertRaises(UnsupportedOperatorError):
w.functions[o](x)
if __name__ == '__main__':
main()
| apache-2.0 | -8,793,883,856,465,397,000 | 33.173709 | 79 | 0.592801 | false |
OmkarPathak/pygorithm | pygorithm/data_structures/trie.py | 1 | 3224 | """
Author: MrDupin
"""
class Node:
def __init__(self, v, p=None, w=False):
self.word = w #If the node represents the end of a word or not
self.parent = p
self.value = v
self.children = {}
class Trie:
def __init__(self):
self.root = Node('') #The root of the trie is always empty
def insert(self, word):
"""
Inserts a word in the trie. Starting from the root, move down the trie
following the path of characters in the word. If the nodes for the word
characters end, add them. When the last char is added, mark it as a
word-ending node.
"""
l = len(word)
curr = self.root
for i, c in enumerate(word):
last = False
if(i == l-1):
#The last char of the word
last = True
if(c not in curr.children):
curr.children[c] = Node(c, curr, last)
elif(last):
#c already exists, but as it is the last char of word,
#it should now be flagged as a word in the trie.
curr.children[c].word = True
curr = curr.children[c]
def search(self, word):
"""
Searches for given word in trie. We want to find the last node for the
word. If we can't, then it means the word is not in the trie.
"""
if self.find_final_node(word):
return True
else:
return False
def find_words(self, prefix):
"""
Find all words with the given prefix
"""
v = self.find_final_node(prefix)
wList = self.build_word_list(v, prefix)
if(v and v.word):
#v exists and the prefix is itself a word; add it to the list.
wList.append(prefix)
return wList
def find_final_node(self, word):
"""
Returns the last node in given word. The process goes like this:
Start from the root. For every char in word, go down one level.
If we can't go down a level, then the word doesn't exist.
If we do, and the current char is the last char of the word and
the node we are currently at is a word, then we have found the given
word.
"""
curr = self.root
l = len(word)
for i, c in enumerate(word):
if(c not in curr.children):
#There is no prefix of cWord + c
return None
if(i == l-1):
#Last char of word
return curr.children[c]
curr = curr.children[c]
return None
def build_word_list(self, v, cWord):
"""
Recursively builds the list of words.
* v: Node to check
* cWord : The word built up to v
"""
if(not v):
return None
wList = []
for i, k in v.children.items():
tempWord = cWord + i
if(k.word):
#If the iterated prefix is a word, add it to the list
wList.append(tempWord)
#The list of words under tWord
wList.extend(self.build_word_list(k, tempWord))
return wList
| mit | 4,062,834,501,498,244,000 | 28.851852 | 79 | 0.526055 | false |
liurenqiu520/AutobahnPython | examples/twisted/wamp/basic/client.py | 1 | 3070 | ###############################################################################
##
## Copyright (C) 2011-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
if __name__ == '__main__':
import sys, argparse
from twisted.python import log
from twisted.internet.endpoints import clientFromString
## parse command line arguments
##
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", action = "store_true",
help = "Enable debug output.")
parser.add_argument("-c", "--component", type = str,
help = "Start WAMP-WebSocket client with this application component, e.g. 'timeservice.TimeServiceFrontend'")
parser.add_argument("--websocket", type = str, default = "tcp:127.0.0.1:8080",
help = 'WebSocket client Twisted endpoint descriptor, e.g. "tcp:127.0.0.1:8080" or "unix:/tmp/mywebsocket".')
parser.add_argument("--wsurl", type = str, default = "ws://127.0.0.1:8080/ws",
help = 'WebSocket URL (must suit the endpoint), e.g. "ws://127.0.0.1:8080/ws".')
args = parser.parse_args()
## start Twisted logging to stdout
##
if args.debug:
log.startLogging(sys.stdout)
## we use an Autobahn utility to import the "best" available Twisted reactor
##
from autobahn.twisted.choosereactor import install_reactor
reactor = install_reactor()
if args.debug:
print("Running on reactor {}".format(reactor))
## create a WAMP application session factory
##
from autobahn.twisted.wamp import ApplicationSessionFactory
session_factory = ApplicationSessionFactory()
## dynamically load the application component ..
##
import importlib
c = args.component.split('.')
mod, klass = '.'.join(c[:-1]), c[-1]
app = importlib.import_module(mod)
## .. and set the session class on the factory
##
session_factory.session = getattr(app, klass)
## create a WAMP-over-WebSocket transport client factory
##
from autobahn.twisted.websocket import WampWebSocketClientFactory
transport_factory = WampWebSocketClientFactory(session_factory, args.wsurl, debug = args.debug)
transport_factory.setProtocolOptions(failByDrop = False)
## start a WebSocket client from an endpoint
##
client = clientFromString(reactor, args.websocket)
client.connect(transport_factory)
## now enter the Twisted reactor loop
##
reactor.run()
| apache-2.0 | 6,052,565,354,310,117,000 | 31.659574 | 132 | 0.640391 | false |
Ladsgroup/MP3-cleaner | mp3.py | 1 | 1167 | import fnmatch
import os
import eyed3
import shutil
path_to_clean = u'/media/amir/Files/Download/'
path_to_move = u'/media/amir/Files/Music/'
matches = []
for root, dirnames, filenames in os.walk(path_to_clean):
for filename in fnmatch.filter(filenames, u'*.mp3'):
matches.append(os.path.join(root, filename))
print len(matches)
for file in matches:
file = eval("u\"%s\"" % file)
try:
audiofile = eyed3.load(file)
artist = audiofile.tag.artist.strip()
album = audiofile.tag.album.strip()
try:
os.mkdir('%s%s' % (path_to_move, artist))
except OSError:
pass
try:
os.mkdir('%s%s/%s' % (path_to_move, artist, album))
except OSError:
shutil.move(
file, u'%s%s/%s/%s' % (
path_to_move, artist, album, file.split("/")[-1]))
print "moved"
except:
print "Not moved"
pass
else:
shutil.move(
file, u'%s%s/%s/%s' % (
path_to_move, artist, album, file.split("/")[-1]))
print "Moved"
except:
pass
| apache-2.0 | -5,187,029,414,978,394,000 | 29.710526 | 70 | 0.520137 | false |
aino/django-pgindex | pgindex/management/commands/reindex.py | 1 | 1687 | import pgindex
import sys
from django.core.management.base import BaseCommand, CommandError
from django.utils.translation import ugettext as _
from optparse import make_option
from pgindex.models import Index
class Command(BaseCommand):
help = _('Reindex for django-pgindex')
option_list = BaseCommand.option_list + (
make_option('--apps',
action='store',
dest='apps',
default='',
help=_('specify apps to reindex for.'),
),
make_option('--all',
action='store_true',
dest='all',
default=False,
help=_('reindex all apps.'),
)
)
def handle(self, *args, **options):
registry = pgindex.helpers._registry
if options['all']:
Index._default_manager.all().delete()
elif options['apps']:
apps = [ app.strip() for app in options['apps'].split(',') ]
Index._default_manager.filter(obj_app_label__in=apps).delete()
else:
raise CommandError(_('No apps to reindex.'))
for model, idx_classes in registry.iteritems():
opts = model._meta
if options['all'] or opts.app_label in apps:
sys.stdout.write(_('Reindexing %s.%s') % (
opts.app_label, opts.object_name
))
for obj in model._default_manager.all():
for idx_cls in idx_classes:
idx = idx_cls(obj)
idx.update()
sys.stdout.write('.')
sys.stdout.flush()
sys.stdout.write('OK\n')
| bsd-3-clause | 4,848,674,826,842,466,000 | 34.145833 | 74 | 0.515708 | false |
sonidosmutantes/apicultor | examples/prototypes/SMComposition.py | 1 | 11643 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import pykov # Markov chains helpers
import time
import random
import urllib2
import OSC
import sys
import os.path
import json
from pyo import *
import signal
#TODO: addi/use formal loggin
# import logging
# import logging.handlers
from mir.db.FreesoundDB import FreesoundDB
from mir.db.RedPanalDB import RedPanalDB
import platform
#from __future__ import print_function
DATA_PATH = "data"
SAMPLES_PATH = "samples"
# OSC Server
osc_client = OSC.OSCClient()
sc_Port = 57120
sc_IP = '127.0.0.1' #Local SC server
#sc_IP = '10.142.39.109' #Remote server
# Virtual Box: Network device config not in bridge or NAT mode
# Select 'Network host-only adapter' (Name=vboxnet0)
sc_IP = '192.168.56.1' # Remote server is the host of the VM
osc_client.connect( ( sc_IP, sc_Port ) )
### Signal handler (ctrl+c)
def signal_handler(signal, frame):
global log
print('Ctrl+C')
log.close()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
### Pyo Sound Server ###
if platform.system() == "Darwin" or platform.system() == "Windows":
### Default
s = Server().boot()
# s = Server(duplex=0).boot()
# s = Server(audio='portaudio').boot()
s = Server().boot()
else: #Linux
### JACK ###
# or export PYO_SERVER_AUDIO=jack (~/.bashrc)
s = Server(audio='jack')
# s.setJackAuto(False, False) #some linux bug workaround (not needed with jackd compiled without dbus, when X system is not running)
s.boot()
s.setJackAutoConnectOutputPorts(['system:playback_1', 'system:playback_2'])
s.start() #no s.gui(locals())
"""
ETL of the sound database
=========================
Audio normalization performed offline to save realtime resources (raspberry pi implementation)
see ../helper scripts
TODO: remove "silence" sounds from db (actually checking the file length)
"""
# sffade = Fader(fadein=0.05, fadeout=1, dur=0, mul=0.5).play()
# Mixer
# 3 outputs mixer, 1 second of amplitude fade time
#mm = Mixer(outs=3, chnls=2, time=1)
dry_val = 1
wet_val = 0.5 #check which reverb algorithm is using
# dry_val = 0.7
# dry_val = 0.3
a = Sine(freq=10, mul=0.3) #start signal
VOL_ADJUST = 6
c = Clip(a, mul=VOL_ADJUST)
#d = c.mix(2).out() #full dry output
out = c.mix(2).out() #dry output
# Reverb
# b1 = Allpass(out, delay=[.0204,.02011], feedback=0.35) # wet output
# b2 = Allpass(b1, delay=[.06653,.06641], feedback=0.41)
# b3 = Allpass(b2, delay=[.035007,.03504], feedback=0.5)
# b4 = Allpass(b3, delay=[.023021 ,.022987], feedback=0.65)
# c1 = Tone(b1, 5000, mul=0.2).out()
# c2 = Tone(b2, 3000, mul=0.2).out()
# c3 = Tone(b3, 1500, mul=0.2).out()
# c4 = Tone(b4, 500, mul=0.2).out()
#Another reverb
# comb1 = Delay(out, delay=[0.0297,0.0277], feedback=0.65)
# comb2 = Delay(out, delay=[0.0371,0.0393], feedback=0.51)
# comb3 = Delay(out, delay=[0.0411,0.0409], feedback=0.5)
# comb4 = Delay(out, delay=[0.0137,0.0155], feedback=0.73)
# combsum = out + comb1 + comb2 + comb3 + comb4
# all1 = Allpass(combsum, delay=[.005,.00507], feedback=0.75)
# all2 = Allpass(all1, delay=[.0117,.0123], feedback=0.61)
# lowp = Tone(all2, freq=3500, mul=wet_val).out()
#buggy? segmentation fault
"""
8 delay lines FDN (Feedback Delay Network) reverb, with feedback matrix based upon physical modeling scattering junction of 8 lossless waveguides of equal characteristic impedance.
"""
pan = SPan(out, pan=[.25, .4, .6, .75]).mix(2)
rev = WGVerb(pan, feedback=.65, cutoff=3500, bal=.2)
# rev.out()
gt = Gate(rev, thresh=-24, risetime=0.005, falltime=0.01, lookahead=5, mul=.4)
gt.out()
# Loads the sound file in RAM. Beginning and ending points
# can be controlled with "start" and "stop" arguments.
# t = SndTable(path)
# #FIXME: test purposes
# #hardcoded sound files
# A_snd = "../samples/1194_sample0.wav"
# B_snd = "../samples/Solo_guitar_solo_sample1.wav"
# C_snd = "../samples/Cuesta_caminar_batero_sample3.wav"
# snd_dict = dict()
# snd_dict["A"] = A_snd
# snd_dict["B"] = B_snd
# snd_dict["C"] = C_snd
# snd_dict["D"] = C_snd
# snd_dict["E"] = C_snd
# snd_dict["F"] = C_snd
# snd_dict["G"] = C_snd
# snd_dict["H"] = C_snd
# def freesound_search(api_key="", id=""):
# call = """curl -H "Authorization: Token %(api_key)s" 'http://www.freesound.org/apiv2/sounds/%(id)s/'"""%locals()
# response = urllib2.urlopen(call).read()
# print(response)
# #freesound_search()
def external_synth(new_file):
"""
Sends OSC
Sends OSC to external synthesis engine like SuperCollider or pd
"""
print("\tPlaying %s"%new_file)
msg = OSC.OSCMessage()
msg.setAddress("/play")
#mac os #FIXME
msg.append( "/Users/hordia/Documents/apicultor"+new_file.split('.')[1]+'.wav' )
try:
osc_client.send(msg)
except Exception,e:
print(e)
#TODO: get duration from msg (via API)
time.sleep(duration)
#external_synth()
def pyo_synth(new_file, dry_value):
#Phase Vocoder
sfplay = SfPlayer(new_file, loop=True, mul=dry_value)
pva = PVAnal(sfplay, size=1024, overlaps=4, wintype=2)
pvs = PVAddSynth(pva, pitch=1., num=500, first=10, inc=10).mix(2)#.out()
# pvs = PVAddSynth(pva, pitch=notes['pitch'], num=500, first=10, inc=10, mul=p).mix(2).out()
c.setInput(pvs, fadetime=.25)
# c = c.mix(2).out()
#pyo_synth()
def granular_synth(new_file):
"""
Granulator sound
"""
pass
# snd = SndTable(file_chosen)
# env = HannTable()
# # note_in_pitch = 62
# # posx = Port( Midictl(ctlnumber=[78], minscale=0, maxscale=snd.getSize()), 0.02)
# # posf = Port( Midictl(ctlnumber=[16], minscale=0, maxscale=snd.getSize()), 0.02)
# #porta = Midictl(ctlnumber=[79], minscale=0., maxscale=60.)
# # posxx = (note_in_pitch-48.)/(96.-48.)*posf+posx
# # pos = SigTo(posxx)
# # tf = TrigFunc(Change(porta), function=set_ramp_time)
# # pitch = Port(Midictl(ctlnumber=[17], minscale=0.0, maxscale=2.0),0.02)
# # noisemul = Midictl(ctlnumber=[18], minscale=0.0, maxscale=0.2)
# # noiseadd = Port(Midictl(ctlnumber=[19], minscale=0.0, maxscale=1.0),0.02)
# # dur = Noise(mul=noisemul)+noiseadd
# pitch = 62
# dur = 3
# pos = 1
# g = Granulator(snd, env, pitch*0.1/dur, pos , dur, 16, mul=.3).mix(2).out()
#granulator_synth()
#TODO: chequear si se usa
def set_ramp_time():
pos.time = porta.get()
Usage = "./StateMachine.py [StateComposition.json]"
if __name__ == '__main__':
if len(sys.argv) < 2:
print("\nBad amount of input arguments\n\t", Usage, "\n")
sys.exit(1)
logfile = "apicultor.log"
try:
log = open(logfile, "a") #append? or overwrite ('w')
except:
print("Log file error")
sys.exit(2)
# JSON config file
config = ""
try:
config = json.load( open(".apicultor_config.json",'r') )
except Exception, e:
print(e)
print("No json config file or error.")
sys.exit(3)
api_type = config["api"]
if api_type=="redpanal":
db_url = config["RedPanal.org"][0]["url"]
api = RedPanalDB(db_url)
elif api_type=="freesound":
freesound_api_key = config["Freesound.org"][0]["API_KEY"]
api = FreesoundDB()
api.set_api_key(freesound_api_key)
else:
print("Bad api key config")
sys.exit(4)
print("Using "+api_type+" API")
api.av_conv = config["av_conv"]
#JSON composition file
json_data = ""
try:
json_comp_file = sys.argv[1]
# with open(json_file,'r') as file:
# json_data = json.load( file )
json_data = json.load( open(json_comp_file,'r') )
except Exception, e:
print(e)
print("JSON composition file error.")
sys.exit(2)
print("Starting MIR state machine")
log.write("Starting MIR state machine: "+json_comp_file+"\n") #WARNING: bad realtime practice (writing file) TODO: add to a memory buffer and write before exit
states_dict = dict() # id to name conversion
states_dur = dict() #states duration
states_mirdef = dict() #mir state definition
start_state = json_data['statesArray'][0]['text'] #TODO: add as property (start: True)
for st in json_data['statesArray']:
states_dict[ st['id'] ] = st['text'] # 'text' is the name of the state
try:
states_mirdef[ st['text'] ] = st['mir'][0]
except:
states_mirdef[ st['text'] ] = {"sfx.duration": "* TO 3", "sfx.inharmonicity.mean": "0.1" } #default value
try:
states_dur[ st['text'] ] = st['duration'] #default value
except:
states_dur[ st['text'] ] = 1. # default duration
sd = states_dict
T = pykov.Matrix()
for st in json_data['linkArray']:
# print( float(st['text']) )
T[ sd[st['from']], sd[st['to']] ] = float( st['text'] )
try:
T.stochastic() #check
except Exception,e:
print(e)
exit(1)
#########################
#FIXME: Time
# duration = 1 #FIXME: hardcoded (default duration)
# time_bt_states = 1 # (delay within states...)
#########################
#########################
# Init conditions
#state = 'idle' #start state
# state = "A" #start state
state = start_state
previous_state = "H"
#Fixed amount or infinite with while(1 ) ()
# events = 10 # or loop with while(1)
# for i in range(events):
while(1):
print( "State: %s"%state ) # TODO: call the right method for the state here
#(optional) change sound in the same state or not (add as json config file)
if state!=previous_state:
#retrieve new sound
# call = '/list/samples' #gets only wav files because SuperCollider
# response = urllib2.urlopen(URL_BASE + call).read()
# audioFiles = list()
# for file in response.split('\n'):
# if len(file)>0: #avoid null paths
# audioFiles.append(file)
# # print file
mir_state = states_mirdef[ state ]
print("MIR State: "+str(mir_state))
file_chosen, autor, sound_id = api.get_one_by_mir(mir_state)
print( os.path.getsize(file_chosen) )
if os.path.exists( file_chosen ) and os.path.getsize(file_chosen)>1000: #FIXME: prior remove 'silence' sounds from DB (ETL)
print(file_chosen)
log.write(file_chosen+" by "+ autor + " - id: "+str(sound_id)+"\n") #WARNING: bad realtime practice (writing file) TODO: add to a memory buffer and write before exit. FIXME
pyo_synth(file_chosen, dry_val)
# Hardcoded sound for each MIR state
# file_chosen = snd_dict[state]
# granular_synth(file_chosen)
# external_synth(file_chosen)
time_bt_states = states_dur[ state ]
# time_between_notes = random.uniform(0.,2.) #in seconds
#time.sleep(time_between_notes)
#TODO: add random variation time?
#TODO: transpose all to the same pitch
# MIDI
# notes = Notein(poly=10, scale=1, mul=.5)
# p = Port(notes['velocity'], .001, .5)
# # Add inputs to the mixer
# mm.addInput(voice=new_voice, input=sfplay)
#mm.addInput(voice=new_voice, input=pvs)
# Delay within states
time.sleep(time_bt_states)
#next state
previous_state = state
state = T.succ(state).choose() #new state
# if state==end_state: break
log.close()
#end
| gpl-3.0 | 8,775,023,752,194,463,000 | 31.07438 | 188 | 0.597355 | false |
xuegang/gpdb | src/test/tinc/tincrepo/mpp/gpdb/tests/storage/walrepl/oom/__init__.py | 1 | 4084 | """
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import subprocess
import socket
from time import sleep
import tinctest
from tinctest.lib import local_path
from gppylib.commands.base import Command
from mpp.lib.config import GPDBConfig
from mpp.lib.PSQL import PSQL
from mpp.gpdb.tests.storage.walrepl.lib.pg_util import GpUtility
from mpp.gpdb.tests.storage.walrepl.gpinitstandby import GpinitStandby
from mpp.gpdb.tests.storage.walrepl.lib.verify import StandbyVerify
from mpp.gpdb.tests.storage.walrepl.lib.standby import Standby
config = GPDBConfig()
class OOMClass(object):
'''Class for methods required for OOM testcase'''
standby_port = '5433'
standby_dirname = 'newstandby'
def __init__(self):
self.gpinit = GpinitStandby()
self.mdd = os.environ.get('MASTER_DATA_DIRECTORY')
self.config = GPDBConfig()
self.pgutil = GpUtility()
self.verify = StandbyVerify()
self.host = socket.gethostname()
self.standby_loc = os.path.join(os.path.split(self.mdd)[0],
self.standby_dirname)
self.standby = Standby(self.standby_loc, self.standby_port)
def create_standby(self):
self.pgutil.clean_dir(self.host,self.standby_loc)
self.gpinit.run(option = '-P %s -s %s -F pg_system:%s' % (self.standby_port, self.host, self.standby_loc))
def setup_oom(self):
# Build it before testing.
thisdir = os.path.dirname(__file__)
builddir = os.path.join(thisdir, 'lib')
subprocess.check_call(['make', '-C', builddir, 'install'])
#Copy oom_malloc.so and wrapper.sh to all the segment nodes
for host in config.get_hosts() :
if host.strip() == self.host :
continue
cmd = "gpssh -h %s -e 'mkdir -p %s; scp %s/oom_malloc.so %s:%s/;scp %s/wrapper.sh %s:%s/'" % (host.strip(), builddir, builddir, host.strip(), builddir, builddir, host.strip(), builddir)
self.pgutil.run(cmd)
def touch_malloc(self):
# Touch file oom_malloc in standby directory
cmd = 'touch %s/oom_malloc' % self.standby_loc
self.pgutil.run(cmd)
def startdb(self):
(rc, result) = self.pgutil.run('gpstart -a --wrapper %s' % (local_path('lib/wrapper.sh')))
if rc != 0 and 'Could not start standby master' in result :
return False
return True
def restartdb(self):
# Remove file oom_malloc from standby
cmd = 'rm %s/oom_malloc' % self.standby_loc
self.pgutil.run(cmd)
(rc, result) = self.pgutil.run('gpstop -ar')
if rc == 0 and (self.verify.check_pg_stat_replication()):
return True
return False
def psql_and_oom(self):
#Touch oom_malloc in standby_dir and issue PSQL : Check if processes are gone
self.touch_malloc()
PSQL.run_sql_command('Drop table if exists wal_oomt1;Create table wal_oomt1(a1 int, a2 text) with(appendonly=true);')
sleep(2)
if not (self.verify.check_standby_processes()):
return True
return False
def start_standby(self):
# Remove oom_malloc and start standby : Check if all processes are back
cmd = 'rm %s/oom_malloc' % self.standby_loc
self.pgutil.run(cmd)
res = self.standby.start()
sleep(2)
if (self.verify.check_standby_processes()) :
return True
return False
| apache-2.0 | 1,188,264,735,477,348,900 | 37.895238 | 198 | 0.65573 | false |
Spinmob/spinmob | _pylab_colormap.py | 1 | 18278 | import os as _os
import matplotlib as _mpl
import pylab as _pylab
from functools import partial as _partial
try: from . import _pylab_tweaks
except: import _pylab_tweaks
import spinmob as _s
_qtw = _s._qtw
_qt = _s._qt
_qtcore = _s._qtc
# make sure we have an application
if __name__ == '__main__':
_qtapp = _qtcore.QCoreApplication.instance()
if not _qtapp:
print("_pylab_colormap.py: Creating QApplication")
_qtapp = _qtw.QApplication(_os.sys.argv)
from . import _settings
_settings = _settings.settings()
class colormap():
# Each element of the list contains the position on the colorbar,
# and the bottom/top colors
_colorpoint_list = [[0.0, [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
[0.5, [0.0, 0.0, 1.0], [0.0, 0.0, 1.0]],
[1.0, [1.0, 0.0, 0.0], [1.0, 0.0, 0.0]] ]
# name of the colormap
_name = "Last Used"
# pylab image object
_image=None
def __init__(self, name="Last Used", image='auto'):
"""
This object is responsible for loading and saving all the spinmob
colormaps, and converting them to a cmap for pylab.
"""
self.set_name(name)
self.set_image(image)
self.load_colormap()
return
def __repr__(self):
s = "\n"+self.get_name()
n = 0
for x in self._colorpoint_list:
s = s+"\n"+str(n)+": " + str(x[0])+" "+str(x[1])+" "+str(x[2])
n += 1
return s+"\n"
def __getitem__(self, n): return self._colorpoint_list[n]
def load_colormap(self, name=None):
"""
Loads a colormap of the supplied name. None means used the internal
name. (See self.get_name())
"""
if name == None: name = self.get_name()
if name == "" or not type(name)==str: return "Error: Bad name."
# assemble the path to the colormap
path = _os.path.join(_settings.path_home, "colormaps", name+".cmap")
# make sure the file exists
if not _os.path.exists(path):
print("load_colormap(): Colormap '"+name+"' does not exist. Creating.")
self.save_colormap(name)
return
# open the file and get the lines
f = open(path, 'r')
x = f.read()
f.close()
try:
self._colorpoint_list = eval(x)
except:
print("Invalid colormap. Overwriting.")
self.save_colormap()
# update the image
self.update_image()
return self
def save_colormap(self, name=None):
"""
Saves the colormap with the specified name. None means use internal
name. (See get_name())
"""
if name == None: name = self.get_name()
if name == "" or not type(name)==str: return "Error: invalid name."
# get the colormaps directory
colormaps = _os.path.join(_settings.path_home, 'colormaps')
# make sure we have the colormaps directory
_settings.make_dir(colormaps)
# assemble the path to the colormap
path = _os.path.join(_settings.path_home, 'colormaps', name+".cmap")
# open the file and overwrite
f = open(path, 'w')
f.write(str(self._colorpoint_list))
f.close()
return self
def delete_colormap(self, name=None):
"""
Deletes the colormap with the specified name. None means use the internal
name (see get_name())
"""
if name == None: name = self.get_name()
if name == "" or not type(name)==str: return "Error: invalid name."
# assemble the path to the colormap
path = _os.path.join(_settings.path_home, 'colormaps', name+".cmap")
_os.unlink(path)
return self
def set_name(self, name="My Colormap"):
"""
Sets the name.
Make sure the name is something your OS could name a file.
"""
if not type(name)==str:
print("set_name(): Name must be a string.")
return
self._name = name
return self
def get_name(self):
"""
Returns the name of the current colormap.
"""
return self._name
def set_image(self, image='auto'):
"""
Set which pylab image to tweak.
"""
if image=="auto": image = _pylab.gca().images[0]
self._image=image
self.update_image()
def update_image(self):
"""
Set's the image's cmap.
"""
if self._image:
self._image.set_cmap(self.get_cmap())
_pylab.draw()
def pop_colorpoint(self, n=0):
"""
Removes and returns the specified colorpoint. Will always leave two behind.
"""
# make sure we have more than 2; otherwise don't pop it, just return it
if len(self._colorpoint_list) > 2:
# do the popping
x = self._colorpoint_list.pop(n)
# make sure the endpoints are 0 and 1
self._colorpoint_list[0][0] = 0.0
self._colorpoint_list[-1][0] = 1.0
# update the image
self.update_image()
return x
# otherwise just return the indexed item
else: return self[n]
def insert_colorpoint(self, position=0.5, color1=[1.0,1.0,0.0], color2=[1.0,1.0,0.0]):
"""
Inserts the specified color into the list.
"""
L = self._colorpoint_list
# if position = 0 or 1, push the end points inward
if position <= 0.0:
L.insert(0,[0.0,color1,color2])
elif position >= 1.0:
L.append([1.0,color1,color2])
# otherwise, find the position where it belongs
else:
# loop over all the points
for n in range(len(self._colorpoint_list)):
# check if it's less than the next one
if position <= L[n+1][0]:
# found the place to insert it
L.insert(n+1,[position,color1,color2])
break
# update the image with the new cmap
self.update_image()
return self
def modify_colorpoint(self, n, position=0.5, color1=[1.0,1.0,1.0], color2=[1.0,1.0,1.0]):
"""
Changes the values of an existing colorpoint, then updates the colormap.
"""
if n==0.0 : position = 0.0
elif n==len(self._colorpoint_list)-1: position = 1.0
else: position = max(self._colorpoint_list[n-1][0], position)
self._colorpoint_list[n] = [position, color1, color2]
self.update_image()
self.save_colormap("Last Used")
def get_cmap(self):
"""
Generates a pylab cmap object from the colorpoint data.
"""
# now generate the colormap from the ordered list
r = []
g = []
b = []
for p in self._colorpoint_list:
r.append((p[0], p[1][0]*1.0, p[2][0]*1.0))
g.append((p[0], p[1][1]*1.0, p[2][1]*1.0))
b.append((p[0], p[1][2]*1.0, p[2][2]*1.0))
# store the formatted dictionary
c = {'red':r, 'green':g, 'blue':b}
# now set the dang thing
return _mpl.colors.LinearSegmentedColormap('custom', c)
class colormap_interface(colormap):
def __init__(self, name="Last Used", image="auto"):
"""
This is the graphical interface for interacting with a
pylab image.
"""
colormap.__init__(self,name,image)
# create the main window
self._window = _qtw.QMainWindow()
self._window.setWindowTitle("Colormap")
# main widget inside window
self._central_widget = _qtw.QWidget()
self._window.setCentralWidget(self._central_widget)
# add all the controls
self._build_gui()
# disable the save (just loaded)
self._button_save.setEnabled(False)
# set the location
pos, size = _pylab_tweaks.get_figure_window_geometry()
self._window.move(pos[0]+size[0],pos[1])
self.show()
def _build_gui(self):
"""
Removes all existing sliders and rebuilds them based on the colormap.
"""
# remove all widgets (should destroy all children too)
self._central_widget.deleteLater()
# remove all references to other controls
self._sliders = []
self._buttons_top_color = []
self._buttons_bottom_color = []
self._checkboxes = []
self._buttons_plus = []
self._buttons_minus = []
self._color_dialogs_top = []
self._color_dialogs_bottom = []
# create the new central widget
self._central_widget = _qtw.QWidget()
self._window.setCentralWidget(self._central_widget)
# layout for main widget
self._layout = _qtw.QGridLayout(self._central_widget)
self._central_widget.setLayout(self._layout)
# add the list of cmaps
self._combobox_cmaps = _qtw.QComboBox(self._central_widget)
self._combobox_cmaps.setEditable(True)
self._load_cmap_list()
# add the save and delete buttons
self._button_save = _qtw.QPushButton("Save", self._central_widget)
self._button_delete = _qtw.QPushButton("Delete", self._central_widget)
self._button_save.setFixedWidth(70)
self._button_delete.setFixedWidth(70)
# layouts
self._layout.addWidget(self._combobox_cmaps, 1,1, 1,3, _qtcore.Qt.Alignment(0))
self._layout.addWidget(self._button_save, 1,5, 1,1, _qtcore.Qt.Alignment(1))
self._layout.addWidget(self._button_delete, 1,6, 1,2, _qtcore.Qt.Alignment(1))
# actions
self._combobox_cmaps.currentIndexChanged.connect(self._signal_load)
self._button_save .clicked.connect(self._button_save_clicked)
self._button_delete.clicked.connect(self._button_delete_clicked)
# ensmallen the window
self._window.resize(10,10)
# now create a control set for each color point
for n in range(len(self._colorpoint_list)):
c1 = self._colorpoint_list[n][1]
c2 = self._colorpoint_list[n][2]
# create a top-color button
self._buttons_top_color.append(_qtw.QPushButton(self._central_widget))
self._buttons_top_color[-1].setStyleSheet("background-color: rgb("+str(int(c2[0]*255))+","+str(int(c2[1]*255))+","+str(int(c2[2]*255))+"); border-radius: 3px;")
# create a bottom-color button
self._buttons_bottom_color.append(_qtw.QPushButton(self._central_widget))
self._buttons_bottom_color[-1].setStyleSheet("background-color: rgb("+str(int(c1[0]*255))+","+str(int(c1[1]*255))+","+str(int(c1[2]*255))+"); border-radius: 3px;")
# create color dialogs
self._color_dialogs_top.append(_qtw.QColorDialog(self._central_widget))
self._color_dialogs_top[-1].setCurrentColor(self._buttons_top_color[-1].palette().color(1))
self._color_dialogs_bottom.append(_qtw.QColorDialog(self._central_widget))
self._color_dialogs_bottom[-1].setCurrentColor(self._buttons_top_color[-1].palette().color(1))
# create link checkboxes
self._checkboxes.append(_qtw.QCheckBox(self._central_widget))
self._checkboxes[-1].setChecked(c1==c2)
# create a slider
self._sliders.append(_qtw.QSlider(self._central_widget))
self._sliders[-1].setOrientation(_qtcore.Qt.Horizontal)
self._sliders[-1].setMaximum(1000)
self._sliders[-1].setValue(int(self._colorpoint_list[n][0]*1000))
self._sliders[-1].setFixedWidth(250)
# create + and - buttons
self._buttons_plus.append(_qtw.QPushButton(self._central_widget))
self._buttons_plus[-1].setText("+")
self._buttons_plus[-1].setFixedWidth(25)
self._buttons_minus.append(_qtw.QPushButton(self._central_widget))
self._buttons_minus[-1].setText("-")
self._buttons_minus[-1].setFixedWidth(25)
# layout
self._layout.addWidget(self._buttons_bottom_color[-1], n+3,1, _qtcore.Qt.AlignCenter)
self._layout.addWidget(self._checkboxes[-1], n+3,2, 1,1, _qtcore.Qt.AlignCenter)
self._layout.addWidget(self._buttons_top_color[-1], n+3,3, _qtcore.Qt.AlignCenter)
self._layout.addWidget(self._sliders[-1], n+3,4, 1,2, _qtcore.Qt.AlignCenter)
self._layout.setColumnStretch(5,100)
self._layout.addWidget(self._buttons_minus[-1], n+3,7, _qtcore.Qt.AlignCenter)
self._layout.addWidget(self._buttons_plus[-1], n+3,6, _qtcore.Qt.AlignCenter)
# connect the buttons and slider actions to the calls
self._buttons_bottom_color[-1] .clicked.connect(_partial(self._color_button_clicked, n, 0))
self._buttons_top_color[-1] .clicked.connect(_partial(self._color_button_clicked, n, 1))
self._color_dialogs_bottom[-1].currentColorChanged.connect(_partial(self._color_dialog_changed, n, 0))
self._color_dialogs_top[-1] .currentColorChanged.connect(_partial(self._color_dialog_changed, n, 1))
self._buttons_plus[-1] .clicked.connect(_partial(self._button_plus_clicked, n))
self._buttons_minus[-1] .clicked.connect(_partial(self._button_minus_clicked, n))
self._sliders[-1] .valueChanged.connect(_partial(self._slider_changed, n))
# disable the appropriate sliders
self._sliders[0] .setDisabled(True)
self._sliders[-1].setDisabled(True)
def _signal_load(self):
"""
Load the selected cmap.
"""
# set our name
self.set_name(str(self._combobox_cmaps.currentText()))
# load the colormap
self.load_colormap()
# rebuild the interface
self._build_gui()
self._button_save.setEnabled(False)
def _button_save_clicked(self):
"""
Save the selected cmap.
"""
self.set_name(str(self._combobox_cmaps.currentText()))
self.save_colormap()
self._button_save.setEnabled(False)
self._load_cmap_list()
def _button_delete_clicked(self):
"""
Save the selected cmap.
"""
name = str(self._combobox_cmaps.currentText())
self.delete_colormap(name)
self._combobox_cmaps.setEditText("")
self._load_cmap_list()
def _color_dialog_changed(self, n, top, c):
"""
Updates the color of the slider.
"""
self._button_save.setEnabled(True)
cp = self._colorpoint_list[n]
# if they're linked, set both
if self._checkboxes[n].isChecked():
self.modify_colorpoint(n, cp[0], [c.red()/255.0, c.green()/255.0, c.blue()/255.0],
[c.red()/255.0, c.green()/255.0, c.blue()/255.0])
self._buttons_top_color [n].setStyleSheet("background-color: rgb("+str(c.red())+","+str(c.green())+","+str(c.green())+"); border-radius: 3px;")
self._buttons_bottom_color[n].setStyleSheet("background-color: rgb("+str(c.red())+","+str(c.green())+","+str(c.green())+"); border-radius: 3px;")
elif top:
self.modify_colorpoint(n, cp[0], cp[1], [c.red()/255.0, c.green()/255.0, c.blue()/255.0])
self._buttons_top_color [n].setStyleSheet("background-color: rgb("+str(c.red())+","+str(c.green())+","+str(c.green())+"); border-radius: 3px;")
else:
self.modify_colorpoint(n, cp[0], [c.red()/255.0, c.green()/255.0, c.blue()/255.0], cp[2])
self._buttons_bottom_color[n].setStyleSheet("background-color: rgb("+str(c.red())+","+str(c.green())+","+str(c.green())+"); border-radius: 3px;")
def _button_plus_clicked(self, n):
"""
Create a new colorpoint.
"""
self._button_save.setEnabled(True)
self.insert_colorpoint(self._colorpoint_list[n][0],
self._colorpoint_list[n][1],
self._colorpoint_list[n][2])
self._build_gui()
def _button_minus_clicked(self, n):
"""
Remove a new colorpoint.
"""
self._button_save.setEnabled(True)
self.pop_colorpoint(n)
self._build_gui()
def _slider_changed(self, n):
"""
updates the colormap / plot
"""
self._button_save.setEnabled(True)
self.modify_colorpoint(n, self._sliders[n].value()*0.001, self._colorpoint_list[n][1], self._colorpoint_list[n][2])
def _color_button_clicked(self, n,top):
"""
Opens the dialog.
"""
self._button_save.setEnabled(True)
if top: self._color_dialogs_top[n].open()
else: self._color_dialogs_bottom[n].open()
def _load_cmap_list(self):
"""
Searches the colormaps directory for all files, populates the list.
"""
# store the current name
name = self.get_name()
# clear the list
self._combobox_cmaps.blockSignals(True)
self._combobox_cmaps.clear()
# list the existing contents
paths = _settings.list_dir('colormaps')
# loop over the paths and add the names to the list
for path in paths:
self._combobox_cmaps.addItem(_os.path.splitext(path)[0])
# try to select the current name
self._combobox_cmaps.setCurrentIndex(self._combobox_cmaps.findText(name))
self._combobox_cmaps.blockSignals(False)
def close(self):
"""
Closes the window.
"""
self._window.close()
#_qt.QtWidgets.qApp.processEvents()
def show(self):
"""
Shows the window.
"""
self._window.show()
#_qt.QtWidgets.qApp.processEvents()
######################
## Example Code
######################
| gpl-3.0 | 6,873,766,403,259,576,000 | 32.232727 | 175 | 0.55679 | false |
stechu/myria-commandline | scalability_queries.py | 1 | 1515 | worker_2 = '''
R = scan(chushumo:worker_2:twitter_1m);
-- query(x,y,z,p):-R(x,y),S(y,z),T(z,x)
query = [from R, R as S, R as T
where R.$1 = S.$0 and
S.$1 = T.$0 and
T.$1 = R.$0
emit R.$0 as x, S.$0 as y, S.$1 as z];
store(query,chushumo:worker_2:triangle);
'''
worker_4 = '''
R = scan(chushumo:worker_4:twitter_1m);
-- query(x,y,z,p):-R(x,y),S(y,z),T(z,x)
query = [from R, R as S, R as T
where R.$1 = S.$0 and
S.$1 = T.$0 and
T.$1 = R.$0
emit R.$0 as x, S.$0 as y, S.$1 as z];
store(query,chushumo:worker_4:triangle);
'''
worker_8 = '''
R = scan(chushumo:worker_8:twitter_1m);
-- query(x,y,z,p):-R(x,y),S(y,z),T(z,x)
query = [from R, R as S, R as T
where R.$1 = S.$0 and
S.$1 = T.$0 and
T.$1 = R.$0
emit R.$0 as x, S.$0 as y, S.$1 as z];
store(query,chushumo:worker_8:triangle);
'''
worker_16 = '''
R = scan(chushumo:worker_16:twitter_1m);
-- query(x,y,z,p):-R(x,y),S(y,z),T(z,x)
query = [from R, R as S, R as T
where R.$1 = S.$0 and
S.$1 = T.$0 and
T.$1 = R.$0
emit R.$0 as x, S.$0 as y, S.$1 as z];
store(query,chushumo:worker_16:triangle);
'''
worker_32 = '''
R = scan(chushumo:worker_32:twitter_1m);
-- query(x,y,z,p):-R(x,y),S(y,z),T(z,x)
query = [from R, R as S, R as T
where R.$1 = S.$0 and
S.$1 = T.$0 and
T.$1 = R.$0
emit R.$0 as x, S.$0 as y, S.$1 as z];
store(query,chushumo:worker_32:triangle);
'''
| mit | 8,279,889,561,414,007,000 | 27.055556 | 45 | 0.489109 | false |
jdemel/gnuradio | gnuradio-runtime/python/pmt/pmt_to_python.py | 1 | 5374 | # Copyright 2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
from __future__ import unicode_literals
from . import pmt_python as pmt
import numpy
# SWIG isn't taking in the #define PMT_NIL;
# getting the singleton locally.
PMT_NIL = pmt.get_PMT_NIL()
#define missing
def pmt_to_tuple(p):
elems = list()
for i in range(pmt.length(p)):
elem = pmt.tuple_ref(p, i)
elems.append(pmt_to_python(elem))
return tuple(elems)
def pmt_from_tuple(p):
args = list(map(python_to_pmt, p))
return pmt.make_tuple(*args)
def pmt_to_vector(p):
v = list()
for i in range(pmt.length(p)):
elem = pmt.vector_ref(p, i)
v.append(pmt_to_python(elem))
return v
def pmt_from_vector(p):
v = pmt.make_vector(len(p), PMT_NIL)
for i, elem in enumerate(p):
pmt.vector_set(v, i, python_to_pmt(elem))
return v
def pmt_to_dict(p):
d = dict()
items = pmt.dict_items(p)
for i in range(pmt.length(items)):
pair = pmt.nth(i, items)
k = pmt.car(pair)
v = pmt.cdr(pair)
d[pmt_to_python(k)] = pmt_to_python(v)
return d
def pmt_from_dict(p):
d = pmt.make_dict()
for k, v in list(p.items()):
#dict is immutable -> therefore pmt_dict_add returns the new dict
d = pmt.dict_add(d, python_to_pmt(k), python_to_pmt(v))
return d
numpy_mappings = {
numpy.dtype(numpy.float32): (pmt.init_f32vector, float, pmt.f32vector_elements, pmt.is_f32vector),
numpy.dtype(numpy.float64): (pmt.init_f64vector, float, pmt.f64vector_elements, pmt.is_f64vector),
numpy.dtype(numpy.complex64): (pmt.init_c32vector, complex, pmt.c32vector_elements, pmt.is_c32vector),
numpy.dtype(numpy.complex128): (pmt.init_c64vector, complex, pmt.c64vector_elements, pmt.is_c64vector),
numpy.dtype(numpy.int8): (pmt.init_s8vector, int, pmt.s8vector_elements, pmt.is_s8vector),
numpy.dtype(numpy.int16): (pmt.init_s16vector, int, pmt.s16vector_elements, pmt.is_s16vector),
numpy.dtype(numpy.int32): (pmt.init_s32vector, int, pmt.s32vector_elements, pmt.is_s32vector),
# numpy.dtype(numpy.int64): (pmt.init_s64vector, int, pmt.s64vector_elements, pmt.is_s64vector),
numpy.dtype(numpy.uint8): (pmt.init_u8vector, int, pmt.u8vector_elements, pmt.is_u8vector),
numpy.dtype(numpy.uint16): (pmt.init_u16vector, int, pmt.u16vector_elements, pmt.is_u16vector),
numpy.dtype(numpy.uint32): (pmt.init_u32vector, int, pmt.u32vector_elements, pmt.is_u32vector),
# numpy.dtype(numpy.uint64): (pmt.init_u64vector, int, pmt.u64vector_elements, pmt.is_u64vector),
numpy.dtype(numpy.byte): (pmt.init_u8vector, int, pmt.u8vector_elements, pmt.is_u8vector),
}
uvector_mappings = dict([ (numpy_mappings[key][3], (numpy_mappings[key][2], key)) for key in numpy_mappings ])
def numpy_to_uvector(numpy_array):
try:
mapping = numpy_mappings[numpy_array.dtype]
pc = list(map(mapping[1], numpy.ravel(numpy_array)))
return mapping[0](numpy_array.size, pc)
except KeyError:
raise ValueError("unsupported numpy array dtype for conversion to pmt %s"%(numpy_array.dtype))
def uvector_to_numpy(uvector):
match = None
for test_func in list(uvector_mappings.keys()):
if test_func(uvector):
match = uvector_mappings[test_func]
return numpy.array(match[0](uvector), dtype = match[1])
else:
raise ValueError("unsupported uvector data type for conversion to numpy array %s"%(uvector))
type_mappings = ( #python type, check pmt type, to python, from python
(None, pmt.is_null, lambda x: None, lambda x: PMT_NIL),
(bool, pmt.is_bool, pmt.to_bool, pmt.from_bool),
(str, pmt.is_symbol, pmt.symbol_to_string, pmt.string_to_symbol),
(str, lambda x: False, None, lambda x: pmt.string_to_symbol(x.encode('utf-8'))),
(int, pmt.is_integer, pmt.to_long, pmt.from_long),
(int, pmt.is_uint64, lambda x: int(pmt.to_uint64(x)), pmt.from_uint64),
(float, pmt.is_real, pmt.to_double, pmt.from_double),
(complex, pmt.is_complex, pmt.to_complex, pmt.from_complex),
(tuple, pmt.is_tuple, pmt_to_tuple, pmt_from_tuple),
(list, pmt.is_vector, pmt_to_vector, pmt_from_vector),
(dict, pmt.is_dict, pmt_to_dict, pmt_from_dict),
(tuple, pmt.is_pair, lambda x: (pmt_to_python(pmt.car(x)), pmt_to_python(pmt.cdr(x))), lambda x: pmt.cons(python_to_pmt(x[0]), python_to_pmt(x[1]))),
(numpy.ndarray, pmt.is_uniform_vector, uvector_to_numpy, numpy_to_uvector),
)
def pmt_to_python(p):
for python_type, pmt_check, to_python, from_python in type_mappings:
if pmt_check(p):
try:
return to_python(p)
except (RuntimeError, TypeError, ValueError): # TODO: make pybind11 handle wrong_type, convert to type error
# This exception will be handled by the general failure case
pass
raise ValueError("can't convert %s type to pmt (%s)"%(type(p),p))
def python_to_pmt(p):
for python_type, pmt_check, to_python, from_python in type_mappings:
if python_type is None:
if p is None: return from_python(p)
elif isinstance(p, python_type): return from_python(p)
raise ValueError("can't convert %s type to pmt (%s)"%(type(p),p))
| gpl-3.0 | 6,369,554,458,698,232,000 | 41.992 | 153 | 0.653331 | false |
pculture/unisubs | apps/videos/templatetags/widget.py | 1 | 1178 | # Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
from django import template
from django.conf import settings
register = template.Library()
@register.inclusion_tag('videos/_widget.html')
def widget(widget_params, div_id='widget_div'):
return {
'div_id': div_id,
'widget_params': widget_params
}
@register.inclusion_tag('videos/_get_counter.html')
def get_counter():
domain = settings.HOSTNAME
return {
'domain': domain
}
| agpl-3.0 | 6,860,369,454,018,029,000 | 31.722222 | 74 | 0.732598 | false |
informatik-mannheim/Moduro-CC3D | Simulation/SpaBpaPcdiInDa.py | 1 | 1241 | # Copyright 2016 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "Julain Debatin"
__copyright__ = "The authors"
__license__ = "Apache 2"
__email__ = "[email protected]"
__status__ = "Production"
# Example for Simulation.
# Important to have it here. Otherwise error. CC3D uses a special module loader
# that cannot directly instantiate classes. (Wish I knew more on Python)
import sys
from os import environ
import CompuCellSetup
sys.path.append(environ["PYTHON_MODULE_PATH"])
sim, simthread = CompuCellSetup.getCoreSimulationObjects()
# Now load the model to simulate!
from ModuroModel.Spa.SpaBpaPcdiInDa import SpaBpaPcdiInDa
model = SpaBpaPcdiInDa(sim, simthread)
| apache-2.0 | 2,450,295,381,788,317,700 | 33.472222 | 79 | 0.74859 | false |
fredreichbier/genie | genie/slp/__init__.py | 1 | 10209 | """
A modular decoder for the SLP image format as used in the Genie engine.
Thanks to http://alexander-jenkins.co.uk/blog/?p=9 and to
http://www.digitization.org/wiki/index.php/SLP for the great documentation!
"""
import struct
import construct as cons
class FrameAdapter(cons.Adapter):
def _decode(self, obj, context):
return Frame(context['_']['slp_file'], obj)
FRAME = cons.Struct('frames',
cons.ULInt32('cmd_table_offset'),
cons.ULInt32('outline_table_offset'),
cons.ULInt32('palette_offset'),
cons.ULInt32('properties'),
cons.SLInt32('width'),
cons.SLInt32('height'),
cons.SLInt32('hotspot_x'),
cons.SLInt32('hotspot_y'),
)
HEADER = cons.Struct('header',
cons.String('version', 4),
cons.ULInt32('num_frames'),
cons.String('comment', 24),
cons.Array(lambda ctx: ctx['num_frames'], FrameAdapter(FRAME)),
)
class ImageAdapter(object):
"""
A generic image writer. Could be used with PIL, cairo, ...
"""
def __init__(self, frame):
"""
Create a new image with the dimensions given by the frame object.
"""
raise NotImplementedError()
def draw_pixels(self, x, y, amount, color):
"""
Draw *amount* pixels, horizontally, starting at *x*, *y*.
*color* is a 3-tuple (R, G, B) or None for transparency.
"""
raise NotImplementedError()
def get_image(self):
"""
Return the finished image object.
"""
raise NotImplementedError()
class Frame(object):
def __init__(self, slp_file, structure):
self.slp_file = slp_file
self.structure = structure
self.width = self.structure.width
self.height = self.structure.height
self.hotspot_x = self.structure.hotspot_x
self.hotspot_y = self.structure.hotspot_y
def parse_stream(self, player=1, image_adapter_cls=None):
"""
Use the image adapter class to create an image.
"""
stream = self.slp_file.stream
width, height = self.structure.width, self.structure.height
if image_adapter_cls is None:
image_adapter_cls = self.slp_file.image_adapter_cls
adapter = image_adapter_cls(self)
# First, the boundaries.
stream.seek(self.structure.outline_table_offset)
left_boundaries = []
for y in xrange(height):
left, right = struct.unpack('=HH', stream.read(4))
if (left == 0x8000 or right == 0x8000):
# fully transparent row
adapter.draw_pixels(0, y, width, None)
# this will tell the parser to skip this line later.
left_boundaries.append(None)
else:
# draw transparent pixels.
left_boundaries.append(left)
adapter.draw_pixels(0, y, left, None)
adapter.draw_pixels(width - right, y, right, None)
# The command offsets.
command_offsets = []
for y in xrange(height):
command_offsets.append(struct.unpack('=I', stream.read(4))[0])
# Now, the actual commands.
stream.seek(command_offsets[0])
x = left_boundaries[0]
y = 0
while x is None:
# maybe the first row is transparent already?
y += 1
x = left_boundaries[y]
def _get_byte():
""" take a byte from the stream. """
return struct.unpack('=B', stream.read(1))[0]
def _get_4ornext(opcode):
"""
either return the 4 most significant bits from the opcode
or the complete next byte if the former is 0.
"""
return (opcode >> 4) or _get_byte()
def _get_bigbig(opcode):
""" right-shift 4 bits to the right + next byte """
return ((opcode & 0xf0) << 4) + _get_byte()
def _draw_pixels(amount, palette_index):
assert x + amount <= width
if palette_index is None:
color = None
else:
color = self.slp_file.palette[palette_index]
adapter.draw_pixels(x, y, amount, color)
def _get_palette_index(player, relindex):
return player * 16 + relindex
def _draw_special_color(amount, index):
"""
index = 2: player color
index = 1: black. (or so?)
This contradicts Bryce's SLP.rtf, but it looks pretty strange
if 1 is the player color. TODO?
"""
if index == 2:
palette_index = _get_palette_index(player, 0)
else:
palette_index = 0
_draw_pixels(amount, palette_index)
while y < height:
opcode = _get_byte()
twobit = opcode & 0b11
fourbit = opcode & 0b1111
if x > width:
raise Exception('%d > %d' % (x, width))
if fourbit == 0x0f:
y += 1
if y < height:
x = left_boundaries[y]
while x is None:
# fully transparent line! (skip this line)
y += 1
x = left_boundaries[y]
if stream.tell() != command_offsets[y]:
# not an error, but excessive padding might suggest something is slightly wrong!
print "Warning: line %d has %d bytes of air after commands" % (y - 1,
command_offsets[y] - stream.tell())
# get ourselves aligned again
stream.seek(command_offsets[y])
elif fourbit == 0x06:
# player colors
amount = _get_4ornext(opcode)
#print 'player colors', amount
for _ in xrange(amount):
relindex = _get_byte()
_draw_pixels(1, _get_palette_index(player, relindex))
x += 1
elif fourbit == 0x0e:
# Extended command (shadows etc.)
# get the high 4 bits for the extended command
# I only found information about this opcode in
# the slp.rtf file (see README).
# For now, this doesn't actually do anything apart
# from reading the correct number of bytes from the
# stream to parse the image data correctly.
extended = opcode >> 4
#print 'Extended command!', extended
if extended == 0:
# woho! this should only be drawn if the
# sprite is not x-flipped. TODO.
pass
elif extended == 1:
# this should only be drawn if the sprite
# is x-flipped. TODO.
pass
elif extended in (2, 3):
# do some fiddling with transform color tables.
# see documentation.
pass
elif extended in (4, 6):
# special color 1/2, but only 1 byte.
_draw_special_color(1, {4: 1, 6: 2}[extended])
x += 1
elif extended in (5, 7):
# special color 1/2, read amount from stream.
amount = _get_byte()
_draw_special_color(amount, {5: 1, 7: 2}[extended])
x += amount
else:
raise NotImplementedError('Unknown extended opcode: %r' % extended)
elif fourbit == 0x07:
# fill
amount = _get_4ornext(opcode)
#print 'fill', amount
palette_index = _get_byte()
_draw_pixels(amount, palette_index)
x += amount
elif fourbit == 0x0a:
amount = _get_4ornext(opcode)
#print 'player fill', amount
# TODO: this is not really correct
_draw_pixels(amount, _get_palette_index(player, _get_byte()))
x += amount
elif fourbit == 0x0b:
amount = _get_4ornext(opcode)
#print 'Ignoring 0x0b opcode for %d pixels' % amount
x += amount
elif fourbit in (0x4e, 0x5e):
raise NotImplementedError('The 0x%x opcode is not yet implemented.' % fourbit)
elif twobit == 0:
# draw
amount = opcode >> 2
#print 'draw', amount
for _ in xrange(amount):
_draw_pixels(1, _get_byte())
x += 1
elif twobit == 1:
# skip pixels
# 2ornext
amount = opcode >> 2
#print 'skip', amount
if amount == 0:
amount = _get_byte()
_draw_pixels(amount, None)
x += amount
elif twobit == 2:
amount = _get_bigbig(opcode)
#print 'big draw', amount
for _ in xrange(amount):
_draw_pixels(1, _get_byte())
x += 1
elif twobit == 3:
amount = _get_bigbig(opcode)
#print 'big skip', amount
_draw_pixels(amount, None)
x += amount
else:
raise Exception()
return adapter.get_image()
class SLPFile(object):
"""
A SLP file containing multiple `Frame` objects. You need to specify
an `ImageAdapter` subclass (or factory function) to build images, also,
a palette dictionary (AOE1 is the default).
"""
def __init__(self, stream, palette, image_adapter_cls):
self.header = None
self.palette = palette
self.image_adapter_cls = image_adapter_cls
self.stream = stream
self.header = HEADER._parse(stream, cons.Container(slp_file=self))
self.frames = self.header.frames
| bsd-2-clause | 229,002,700,896,511,780 | 36.259124 | 104 | 0.50142 | false |
passy/glashammer-rdrei | examples/notes/notes.py | 1 | 2761 | # -*- coding: utf-8 -*-
"""
glashammer.examples.notes
~~~~~~~~~~~~~~~~~~~~~~~~~
A simple note application.
:copyright: 2010 Glashammer Developers
:license: MIT
"""
from os.path import dirname
from glashammer.application import make_app
from glashammer.utils import run_very_simple, render_response
from glashammer.bundles.sqlalchdb import session, metadata, setup_sqlalchdb
from glashammer.bundles.sqlalchdb import get_engine
from sqlalchemy import Table, Column, types
from sqlalchemy.orm import mapper
FOLDER = dirname(__file__)
def index_view(req):
return render_response('notes_index.jinja',
notes=session.query(Note).order_by(Note.id))
def add_view(req):
# validate form
title = req.form.get('title', 'kT')
text = req.form.get('text')
prio = req.form.get('importance')
if not text:
text = "kT"
note = Note(title, text, prio)
session.add(note)
session.commit()
return render_response('notes_success.jinja')
def edit_view(req, nid):
# find note
note = session.query(Note).get(nid)
# TODO: check if note exists
return render_response('notes_edit.jinja', note=note)
def edit_submit_view(req, nid):
# find note
note = session.query(Note).get(nid)
# TODO: check if note exists
note.title = req.form.get('title')
note.note = req.form.get('text')
note.importance = req.form.get('importance')
session.add(note)
session.commit()
return render_response('notes_success.jinja')
# The SQLA tables
notes = Table('notes', metadata,
Column('id', types.Integer, primary_key=True),
Column('title', types.Unicode),
Column('note', types.Unicode),
Column('importance', types.Unicode))
# The ORM mapped class
class Note(object):
"""Represents a note"""
def __init__(self, title, text, importance=None):
self.title = title
self.note = text
self.importance = importance
# Make a mapper which gives you the objects manager
mapper(Note, notes)
def setup(app):
# Use the sqlalchdb bundle
app.add_setup(setup_sqlalchdb)
# Function to be run during data setup phase
app.add_data_func(init_data)
# Add the template searchpath
app.add_template_searchpath(FOLDER)
# Urls
app.add_url('/', 'example/index', view=index_view)
app.add_url('/add', 'example/add', view=add_view)
app.add_url('/edit/<int:nid>', 'example/edit', view=edit_view)
app.add_url('/edit/<int:nid>/submit', 'example/edit_submit',
view=edit_submit_view)
def init_data(app):
engine = get_engine()
metadata.create_all(engine)
# Used by gh-admin
def create_app():
return make_app(setup, FOLDER)
if __name__ == '__main__':
app = create_app()
run_very_simple(app)
| mit | 790,866,502,890,790,700 | 23.873874 | 75 | 0.659906 | false |
hall1467/wikidata_usage_tracking | wbc_usage/utilities/entity_page_views.py | 1 | 4539 | """
Prints page views for each entity_aspect_wikidb. Merges page view and entity
usage data.
Usage:
entity_page_views (-h|--help)
entity_page_views <aggregated-entity-usage-file> --page-view-file=<location>
[<dbname-file>]
[--output=<location>]
[--debug]
[--verbose]
Options:
-h, --help This help message is printed
<aggregated-entity-usage-file> Path to aggregated entity usage file.
--page-view-file=<location> Path to bzip tsv file to process.
<dbname-file> Path to json file to process. If no file is
provided, uses stdin
--output=<location> Where results will be writen.
[default: <stdout>]
--debug Print debug logging to stderr
--verbose Print dots and stuff to stderr
"""
import logging
import requests
import sys
import json
import bz2
import re
from collections import defaultdict
import docopt
logger = logging.getLogger(__name__)
def main(argv=None):
args = docopt.docopt(__doc__, argv=argv)
logging.basicConfig(
level=logging.INFO if not args['--debug'] else logging.DEBUG,
format='%(asctime)s %(levelname)s:%(name)s -- %(message)s'
)
aggregated_entity_usage_file = open(args['<aggregated-entity-usage-file>'])
page_view_file = args['--page-view-file']
if args['<dbname-file>']:
dbname_file = args['<dbname-file>']
else:
logger.info("Reading from <stdin>")
dbname_file = sys.stdin
if args['--output'] == '<stdout>':
output = sys.stdout
else:
output = open(args['--output'], "w")
verbose = args['--verbose']
run(aggregated_entity_usage_file, dbname_file, page_view_file, output,
verbose)
def run(aggregated_entity_usage_file, dbname_file, page_view_file, output,
verbose):
view_dict = defaultdict(lambda: defaultdict(dict))
wikidb_dict = {}
entity_values = {}
f = bz2.open(page_view_file)
if verbose:
sys.stderr.write("Inserting page views into Python dictionary")
sys.stderr.flush()
for i, entry in enumerate(f):
if i == 0:
continue
entry_list = entry.decode().strip().split("\t")
if len(entry_list) != 3:
logger.warn(" Page view entry \"{0}\" improperly formatted"
.format(entry.decode().strip()))
continue
project, page, views = entry_list
view_dict[project][page] = int(views)
if verbose and i % 1000000 == 0:
sys.stderr.write(".")
sys.stderr.flush()
if verbose:
sys.stderr.write("inserting complete\n")
sys.stderr.flush()
for line in dbname_file:
json_line = json.loads(line)
wikidb_dict[json_line['dbname']] =\
re.findall(r"https://(www\.)?(.*)\.org",json_line['wikiurl'])[0][1]
if verbose:
sys.stderr.write("Checking each line in aggregated entity usage file " +
"against page views")
sys.stderr.flush()
for i, line in enumerate(aggregated_entity_usage_file):
json_line = json.loads(line)
entity_page_view_count = 0
proj = json_line["wikidb"]
entity = json_line["entity_id"]
aspect = json_line["aspect"]
if verbose and i % 1000000 == 0:
sys.stderr.write(".")
sys.stderr.flush()
for page_id in json_line['unique_page_list']:
page_id = str(page_id)
page_views = 0
if wikidb_dict[proj] not in view_dict:
logger.warn("Project \"{0}\" does not have a page view entry"
.format(wikidb_dict[proj]))
break
elif page_id not in view_dict[wikidb_dict[proj]]:
logger.warn("Page id \"{0}\" for project \"{1}\" does not have"
.format(page_id, wikidb_dict[proj])
+ " a page view entry")
else:
page_views = view_dict[wikidb_dict[proj]][page_id]
output.write(json.dumps({
'project' : proj,
'entity_id': entity,
'page_views': page_views,
'page_id': page_id,
'aspect': aspect
}) + "\n")
if verbose:
sys.stderr.write("checking complete\n")
sys.stderr.flush()
| mit | 7,453,180,173,170,969,000 | 27.910828 | 80 | 0.544173 | false |
koljonen/pgcli | tests/test_fuzzy_completion.py | 1 | 2866 | from __future__ import unicode_literals
import pytest
@pytest.fixture
def completer():
import pgcli.pgcompleter as pgcompleter
return pgcompleter.PGCompleter()
def test_ranking_ignores_identifier_quotes(completer):
"""When calculating result rank, identifier quotes should be ignored.
The result ranking algorithm ignores identifier quotes. Without this
correction, the match "user", which Postgres requires to be quoted
since it is also a reserved word, would incorrectly fall below the
match user_action because the literal quotation marks in "user"
alter the position of the match.
This test checks that the fuzzy ranking algorithm correctly ignores
quotation marks when computing match ranks.
"""
text = 'user'
collection = ['user_action', '"user"']
matches = completer.find_matches(text, collection)
assert len(matches) == 2
def test_ranking_based_on_shortest_match(completer):
"""Fuzzy result rank should be based on shortest match.
Result ranking in fuzzy searching is partially based on the length
of matches: shorter matches are considered more relevant than
longer ones. When searching for the text 'user', the length
component of the match 'user_group' could be either 4 ('user') or
7 ('user_gr').
This test checks that the fuzzy ranking algorithm uses the shorter
match when calculating result rank.
"""
text = 'user'
collection = ['api_user', 'user_group']
matches = completer.find_matches(text, collection)
assert matches[1].priority > matches[0].priority
@pytest.mark.parametrize('collection', [
['user_action', 'user'],
['user_group', 'user'],
['user_group', 'user_action'],
])
def test_should_break_ties_using_lexical_order(completer, collection):
"""Fuzzy result rank should use lexical order to break ties.
When fuzzy matching, if multiple matches have the same match length and
start position, present them in lexical (rather than arbitrary) order. For
example, if we have tables 'user', 'user_action', and 'user_group', a
search for the text 'user' should present these tables in this order.
The input collections to this test are out of order; each run checks that
the search text 'user' results in the input tables being reordered
lexically.
"""
text = 'user'
matches = completer.find_matches(text, collection)
assert matches[1].priority > matches[0].priority
def test_matching_should_be_case_insensitive(completer):
"""Fuzzy matching should keep matches even if letter casing doesn't match.
This test checks that variations of the text which have different casing
are still matched.
"""
text = 'foo'
collection = ['Foo', 'FOO', 'fOO']
matches = completer.find_matches(text, collection)
assert len(matches) == 3
| bsd-3-clause | 4,966,568,848,455,134,000 | 31.568182 | 78 | 0.710049 | false |
irin4eto/Bar-Management | orders/migrations/0010_initial.py | 1 | 3480 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Stock'
db.create_table('orders_stock', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=30)),
('price', self.gf('django.db.models.fields.DecimalField')(max_digits=5, decimal_places=2)),
))
db.send_create_signal('orders', ['Stock'])
# Adding model 'Order'
db.create_table('orders_order', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('items', self.gf('django.db.models.fields.CharField')(max_length=30)),
('amount', self.gf('django.db.models.fields.CharField')(max_length=30)),
))
db.send_create_signal('orders', ['Order'])
# Adding model 'StatusOrders'
db.create_table('orders_statusorders', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('items', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=50, blank=True)),
('date_and_time', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('status', self.gf('django.db.models.fields.CharField')(max_length=7)),
('waiter', self.gf('django.db.models.fields.CharField')(max_length=30)),
('bartender', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
))
db.send_create_signal('orders', ['StatusOrders'])
def backwards(self, orm):
# Deleting model 'Stock'
db.delete_table('orders_stock')
# Deleting model 'Order'
db.delete_table('orders_order')
# Deleting model 'StatusOrders'
db.delete_table('orders_statusorders')
models = {
'orders.order': {
'Meta': {'object_name': 'Order'},
'amount': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'orders.statusorders': {
'Meta': {'object_name': 'StatusOrders'},
'bartender': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'date_and_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'waiter': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'orders.stock': {
'Meta': {'object_name': 'Stock'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'})
}
}
complete_apps = ['orders'] | gpl-3.0 | -9,072,361,152,304,698,000 | 46.040541 | 122 | 0.568966 | false |
devdattakulkarni/test-solum | solum/api/handlers/camp/assembly_handler.py | 1 | 1647 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from solum.api.handlers import assembly_handler as solum_assem_handler
from solum import objects
class AssemblyHandler(solum_assem_handler.AssemblyHandler):
def create_from_plan(self, plan_obj):
"""Create an application using a plan resource as a template."""
db_obj = objects.registry.Assembly()
db_obj.uuid = str(uuid.uuid4())
db_obj.user_id = self.context.user
db_obj.project_id = self.context.tenant
db_obj.trigger_id = str(uuid.uuid4())
db_obj.username = self.context.user_name
# use the plan name as the name of this application
db_obj.name = plan_obj.name + "_application"
db_obj.plan_id = plan_obj.id
db_obj.plan_uuid = plan_obj.uuid
db_obj.status = solum_assem_handler.ASSEMBLY_STATES.QUEUED
db_obj.create(self.context)
artifacts = plan_obj.raw_content.get('artifacts', [])
# build each artifact in the plan
for arti in artifacts:
self._build_artifact(assem=db_obj, artifact=arti)
return db_obj
| apache-2.0 | -6,993,427,364,563,070,000 | 36.431818 | 75 | 0.684274 | false |
appleseedhq/gaffer | python/GafferImageUI/FlatToDeepUI.py | 1 | 4334 | ##########################################################################
#
# Copyright (c) 2017, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferImage
import IECore
Gaffer.Metadata.registerNode(
GafferImage.FlatToDeep,
"description",
"""
Sets the deep flag on a flat image, and makes sure that it has a Z channel ( and optionally a ZBack channel )
so that it can be used in deep compositing.
""",
"layout:activator:zConstant", lambda node : node["zMode"].getValue() == GafferImage.FlatToDeep.ZMode.Constant,
"layout:activator:zChannel", lambda node : node["zMode"].getValue() == GafferImage.FlatToDeep.ZMode.Channel,
"layout:activator:zBackThickness", lambda node : node["zBackMode"].getValue() == GafferImage.FlatToDeep.ZBackMode.Thickness,
"layout:activator:zBackChannel", lambda node : node["zBackMode"].getValue() == GafferImage.FlatToDeep.ZBackMode.Channel,
plugs = {
"zMode" : [
"description",
"""
Deep images must have a Z channel - it can be set either as a fixed depth, or using a channel.
""",
"preset:Constant", GafferImage.FlatToDeep.ZMode.Constant,
"preset:Channel", GafferImage.FlatToDeep.ZMode.Channel,
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
],
"depth" : [
"description",
"""
A constant depth value to place the whole image at.
""",
"layout:visibilityActivator", "zConstant",
],
"zChannel" : [
"description",
"""
Uses this channel as a Z channel, defining the depth each pixel is at.
""",
"plugValueWidget:type", "GafferImageUI.ChannelPlugValueWidget",
"layout:visibilityActivator", "zChannel",
],
"zBackMode" : [
"description",
"""
Deep images may optionally have a ZBack channel - for transparent samples, this specifies
the depth range over which the opacity gradually increases from 0 to the alpha value.
""",
"preset:None", GafferImage.FlatToDeep.ZBackMode.None,
"preset:Thickness", GafferImage.FlatToDeep.ZBackMode.Thickness,
"preset:Channel", GafferImage.FlatToDeep.ZBackMode.Channel,
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
],
"thickness" : [
"description",
"""
A constant thickness value for the whole image. Transparent images will be
interpreted as fog where the density increases over this range.
""",
"layout:visibilityActivator", "zBackThickness",
],
"zBackChannel" : [
"description",
"""
Uses this channel as a ZBack channel, defining the end of the depth range for each
pixel.
""",
"plugValueWidget:type", "GafferImageUI.ChannelPlugValueWidget",
"layout:visibilityActivator", "zBackChannel",
],
}
)
| bsd-3-clause | 5,892,267,718,167,482,000 | 32.859375 | 125 | 0.695201 | false |
danieltahara/indivisible | indivisible/datasources/propublica_test.py | 1 | 1343 | import unittest
from propublica import ProPublica
class TestPropublica(unittest.TestCase):
def setUp(self):
self.pp = ProPublica()
def test_get_member_by_id(self):
member = self.pp.get_member_by_id("A000360")
self.assertEqual(member['first_name'], "Lamar")
member = self.pp.get_member_by_id("FOOBARBAZ")
self.assertIsNone(member)
def test_get_members_by_location(self):
members = self.pp.get_members_by_location("house", "ny", 17)
self.assertEqual(len(members), 1)
member = members[0]
self.assertEqual(member['name'], "Nita M. Lowey")
# Converts state name
members = self.pp.get_members_by_location("house", "new york", 17)
self.assertEqual(len(members), 1)
member = members[0]
self.assertEqual(member['name'], "Nita M. Lowey")
# Gets senators
members = self.pp.get_members_by_location("senate", "ny")
self.assertEqual(len(members), 2)
member = members[0]
self.assertEqual(member['name'], "Kirsten E. Gillibrand")
# District doesn't matter for senate
members = self.pp.get_members_by_location("senate", "ny", 201293812810)
self.assertEqual(len(members), 2)
if __name__ == '__main__':
propublica.ProPublica.load_api_key()
unittest.main()
| mit | -8,586,940,404,357,357,000 | 31.756098 | 79 | 0.623232 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.