repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
d1b/python-nmap-xml-output-parser | nmap_xml_to_sqlite.py | 1 | 4251 | #!/usr/bin/env python
from lxml import etree
import sqlite3
import os
import datetime
from shows_hosts_with_open_port_and_service_desc import parse_opts
__program__ = 'python_convert_nmap_xml_to_sqlite_db'
___author__ = 'dave b. <[email protected]>'
__license__ = 'GPL v2'
class nmap_xml_to_sqlite:
def __init__(self, filename, store_p=os.path.expanduser('~/.nmap_pdb/')):
self.filename = filename
self.store_p = store_p
self._db_name = "nmap.db"
self.conn = None
self.cursor = None
def create_store_dir(self):
""" create the store directory if it doesn't exist """
if not os.path.exists(self.store_p):
os.mkdir(self.store_p, 16832)
def connect_to_db(self):
""" connect to the database """
self.conn = sqlite3.connect(self.store_p + self._db_name)
self.cursor = self.conn.cursor()
def create_db(self):
""" create the database tables if they don't exist """
self.cursor.execute("""create table if not exists
hosts(addr text, hostname text, scan_time datetime,
unique(addr, hostname, scan_time))""")
self.cursor.execute("""create table if not exists
open_port (addr text, port integer, product text,
protocol text, scan_time datetime, name text,
servicefp text, version text,
unique(protocol, port, addr, scan_time))""")
self.cursor.execute("""create table if not exists scan
(scan_time datetime, args text, unique (scan_time, args))""")
def insert_scan_into_db(self, time_of_scan, args):
""" insert a scan into the database """
sql_statement = """insert or ignore into scan (scan_time, args) VALUES (?, ?) """
self.cursor.execute(sql_statement, (time_of_scan, args))
def insert_host_into_db(self, addr, hostname, time_of_scan):
""" insert a host into the database """
sql_statement = """insert or ignore into hosts (addr, hostname, scan_time) VALUES (?, ?, ?) """
self.cursor.execute(sql_statement, (addr, hostname, time_of_scan))
def insert_port_into_db(self, addr, protocol, serv_d, time_of_scan):
""" insert a port into the database """
sql_statement = """insert or ignore into open_port (addr, port, product, protocol, scan_time,
name, servicefp, version) VALUES (?, ?, ?, ?, ?, ?, ?, ?)"""
self.cursor.execute(sql_statement, (addr, serv_d["portid"], serv_d["product"], \
protocol, time_of_scan, serv_d["name"], serv_d["servicefp"], serv_d["version"] ))
def insert_all_scan_info_into_db(self):
"""
XXX: make this method cleaner!
insert every host that has open ports in the nmap xml file and
a description for it (the port) into the database
"""
self._doc = etree.parse(self.filename)
time_of_scan, args = "", ""
for x in self._doc.xpath("//nmaprun"):
time_of_scan = datetime.datetime.fromtimestamp(float(x.attrib['start']))
args = x.attrib['args']
self.insert_scan_into_db(time_of_scan, args)
for x in self._doc.xpath("//host"):
hostname = "" #this will be the value of the last hostname node's name element
address = ""
desc = ""
protocol = ""
for host_n in x.xpath("hostnames/hostname/@name"):
hostname = host_n
for addr in x.xpath("address/@addr[@addrtype!='mac']"):
address = addr
break
self.insert_host_into_db(address, hostname, time_of_scan)
for open_p in x.xpath("ports/port[state[@state='open']]"):
protocol = open_p.attrib['protocol']
wrap_service_dict = self._service_wrap_attrib(list(open_p)[1].attrib)
wrap_service_dict["portid"] = open_p.attrib["portid"]
self.insert_port_into_db(address, protocol, wrap_service_dict, time_of_scan)
def _service_wrap_attrib(self, child_attrib):
""" some fields are optional - so enter a blank value for a key if it doesn't exist """
wrapped_dict_result = {}
for key in ["version", "product", "name", "servicefp"]:
if key in child_attrib.keys():
wrapped_dict_result[key] = child_attrib[key]
else:
wrapped_dict_result[key] = ""
return wrapped_dict_result
def close_and_commit_to_db(self):
""" commit to the database and close the cursor """
self.conn.commit()
self.cursor.close()
def main():
filename = parse_opts()
s = nmap_xml_to_sqlite(filename)
s.create_store_dir()
s.connect_to_db()
s.create_db()
s.insert_all_scan_info_into_db()
s.close_and_commit_to_db()
if __name__ == "__main__":
main()
| bsd-2-clause | -8,696,299,061,923,030,000 | 35.025424 | 97 | 0.669019 | false |
mne-tools/mne-python | mne/inverse_sparse/tests/test_mxne_inverse.py | 1 | 18421 | # Author: Alexandre Gramfort <[email protected]>
# Daniel Strohmeier <[email protected]>
#
# License: Simplified BSD
import os.path as op
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_allclose,
assert_array_less, assert_array_equal)
import pytest
import mne
from mne.datasets import testing
from mne.label import read_label
from mne import (read_cov, read_forward_solution, read_evokeds,
convert_forward_solution)
from mne.inverse_sparse import mixed_norm, tf_mixed_norm
from mne.inverse_sparse.mxne_inverse import make_stc_from_dipoles, _split_gof
from mne.inverse_sparse.mxne_inverse import _compute_mxne_sure
from mne.inverse_sparse.mxne_optim import norm_l2inf
from mne.minimum_norm import apply_inverse, make_inverse_operator
from mne.minimum_norm.tests.test_inverse import \
assert_var_exp_log, assert_stc_res
from mne.utils import assert_stcs_equal, catch_logging
from mne.dipole import Dipole
from mne.source_estimate import VolSourceEstimate
from mne.simulation import simulate_sparse_stc, simulate_evoked
data_path = testing.data_path(download=False)
# NOTE: These use the ave and cov from sample dataset (no _trunc)
fname_data = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')
fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
label = 'Aud-rh'
fname_label = op.join(data_path, 'MEG', 'sample', 'labels', '%s.label' % label)
@pytest.fixture(scope='module', params=[testing._pytest_param])
def forward():
"""Get a forward solution."""
# module scope it for speed (but don't overwrite in use!)
return read_forward_solution(fname_fwd)
@testing.requires_testing_data
@pytest.mark.timeout(150) # ~30 sec on Travis Linux
@pytest.mark.slowtest
def test_mxne_inverse_standard(forward):
"""Test (TF-)MxNE inverse computation."""
# Read noise covariance matrix
cov = read_cov(fname_cov)
# Handling average file
loose = 0.0
depth = 0.9
evoked = read_evokeds(fname_data, condition=0, baseline=(None, 0))
evoked.crop(tmin=-0.05, tmax=0.2)
evoked_l21 = evoked.copy()
evoked_l21.crop(tmin=0.081, tmax=0.1)
label = read_label(fname_label)
assert label.hemi == 'rh'
forward = convert_forward_solution(forward, surf_ori=True)
# Reduce source space to make test computation faster
inverse_operator = make_inverse_operator(evoked_l21.info, forward, cov,
loose=loose, depth=depth,
fixed=True, use_cps=True)
stc_dspm = apply_inverse(evoked_l21, inverse_operator, lambda2=1. / 9.,
method='dSPM')
stc_dspm.data[np.abs(stc_dspm.data) < 12] = 0.0
stc_dspm.data[np.abs(stc_dspm.data) >= 12] = 1.
weights_min = 0.5
# MxNE tests
alpha = 70 # spatial regularization parameter
stc_prox = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8,
active_set_size=10, weights=stc_dspm,
weights_min=weights_min, solver='prox')
with pytest.warns(None): # CD
stc_cd = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8,
active_set_size=10, weights=stc_dspm,
weights_min=weights_min, solver='cd')
stc_bcd = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8, active_set_size=10,
weights=stc_dspm, weights_min=weights_min,
solver='bcd')
assert_array_almost_equal(stc_prox.times, evoked_l21.times, 5)
assert_array_almost_equal(stc_cd.times, evoked_l21.times, 5)
assert_array_almost_equal(stc_bcd.times, evoked_l21.times, 5)
assert_allclose(stc_prox.data, stc_cd.data, rtol=1e-3, atol=0.0)
assert_allclose(stc_prox.data, stc_bcd.data, rtol=1e-3, atol=0.0)
assert_allclose(stc_cd.data, stc_bcd.data, rtol=1e-3, atol=0.0)
assert stc_prox.vertices[1][0] in label.vertices
assert stc_cd.vertices[1][0] in label.vertices
assert stc_bcd.vertices[1][0] in label.vertices
# vector
with pytest.warns(None): # no convergence
stc = mixed_norm(evoked_l21, forward, cov, alpha, loose=1, maxit=2)
with pytest.warns(None): # no convergence
stc_vec = mixed_norm(evoked_l21, forward, cov, alpha, loose=1, maxit=2,
pick_ori='vector')
assert_stcs_equal(stc_vec.magnitude(), stc)
with pytest.warns(None), pytest.raises(ValueError, match='pick_ori='):
mixed_norm(evoked_l21, forward, cov, alpha, loose=0, maxit=2,
pick_ori='vector')
with pytest.warns(None), catch_logging() as log: # CD
dips = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8, active_set_size=10,
weights=stc_dspm, weights_min=weights_min,
solver='cd', return_as_dipoles=True, verbose=True)
stc_dip = make_stc_from_dipoles(dips, forward['src'])
assert isinstance(dips[0], Dipole)
assert stc_dip.subject == "sample"
assert_stcs_equal(stc_cd, stc_dip)
assert_var_exp_log(log.getvalue(), 51, 53) # 51.8
# Single time point things should match
with pytest.warns(None), catch_logging() as log:
dips = mixed_norm(evoked_l21.copy().crop(0.081, 0.081),
forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8, active_set_size=10,
weights=stc_dspm, weights_min=weights_min,
solver='cd', return_as_dipoles=True, verbose=True)
assert_var_exp_log(log.getvalue(), 37.8, 38.0) # 37.9
gof = sum(dip.gof[0] for dip in dips) # these are now partial exp vars
assert_allclose(gof, 37.9, atol=0.1)
with pytest.warns(None), catch_logging() as log:
stc, res = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8,
weights=stc_dspm, # gh-6382
active_set_size=10, return_residual=True,
solver='cd', verbose=True)
assert_array_almost_equal(stc.times, evoked_l21.times, 5)
assert stc.vertices[1][0] in label.vertices
assert_var_exp_log(log.getvalue(), 51, 53) # 51.8
assert stc.data.min() < -1e-9 # signed
assert_stc_res(evoked_l21, stc, forward, res)
# irMxNE tests
with pytest.warns(None), catch_logging() as log: # CD
stc, residual = mixed_norm(
evoked_l21, forward, cov, alpha, n_mxne_iter=5, loose=0.0001,
depth=depth, maxit=300, tol=1e-8, active_set_size=10,
solver='cd', return_residual=True, pick_ori='vector', verbose=True)
assert_array_almost_equal(stc.times, evoked_l21.times, 5)
assert stc.vertices[1][0] in label.vertices
assert stc.vertices == [[63152], [79017]]
assert_var_exp_log(log.getvalue(), 51, 53) # 51.8
assert_stc_res(evoked_l21, stc, forward, residual)
# Do with TF-MxNE for test memory savings
alpha = 60. # overall regularization parameter
l1_ratio = 0.01 # temporal regularization proportion
stc, _ = tf_mixed_norm(evoked, forward, cov,
loose=loose, depth=depth, maxit=100, tol=1e-4,
tstep=4, wsize=16, window=0.1, weights=stc_dspm,
weights_min=weights_min, return_residual=True,
alpha=alpha, l1_ratio=l1_ratio)
assert_array_almost_equal(stc.times, evoked.times, 5)
assert stc.vertices[1][0] in label.vertices
# vector
stc_nrm = tf_mixed_norm(
evoked, forward, cov, loose=1, depth=depth, maxit=2, tol=1e-4,
tstep=4, wsize=16, window=0.1, weights=stc_dspm,
weights_min=weights_min, alpha=alpha, l1_ratio=l1_ratio)
stc_vec, residual = tf_mixed_norm(
evoked, forward, cov, loose=1, depth=depth, maxit=2, tol=1e-4,
tstep=4, wsize=16, window=0.1, weights=stc_dspm,
weights_min=weights_min, alpha=alpha, l1_ratio=l1_ratio,
pick_ori='vector', return_residual=True)
assert_stcs_equal(stc_vec.magnitude(), stc_nrm)
pytest.raises(ValueError, tf_mixed_norm, evoked, forward, cov,
alpha=101, l1_ratio=0.03)
pytest.raises(ValueError, tf_mixed_norm, evoked, forward, cov,
alpha=50., l1_ratio=1.01)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_mxne_vol_sphere():
"""Test (TF-)MxNE with a sphere forward and volumic source space."""
evoked = read_evokeds(fname_data, condition=0, baseline=(None, 0))
evoked.crop(tmin=-0.05, tmax=0.2)
cov = read_cov(fname_cov)
evoked_l21 = evoked.copy()
evoked_l21.crop(tmin=0.081, tmax=0.1)
info = evoked.info
sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.080)
src = mne.setup_volume_source_space(subject=None, pos=15., mri=None,
sphere=(0.0, 0.0, 0.0, 0.08),
bem=None, mindist=5.0,
exclude=2.0, sphere_units='m')
fwd = mne.make_forward_solution(info, trans=None, src=src,
bem=sphere, eeg=False, meg=True)
alpha = 80.
pytest.raises(ValueError, mixed_norm, evoked, fwd, cov, alpha,
loose=0.0, return_residual=False,
maxit=3, tol=1e-8, active_set_size=10)
pytest.raises(ValueError, mixed_norm, evoked, fwd, cov, alpha,
loose=0.2, return_residual=False,
maxit=3, tol=1e-8, active_set_size=10)
# irMxNE tests
with catch_logging() as log:
stc = mixed_norm(evoked_l21, fwd, cov, alpha,
n_mxne_iter=1, maxit=30, tol=1e-8,
active_set_size=10, verbose=True)
assert isinstance(stc, VolSourceEstimate)
assert_array_almost_equal(stc.times, evoked_l21.times, 5)
assert_var_exp_log(log.getvalue(), 9, 11) # 10.2
# Compare orientation obtained using fit_dipole and gamma_map
# for a simulated evoked containing a single dipole
stc = mne.VolSourceEstimate(50e-9 * np.random.RandomState(42).randn(1, 4),
vertices=[stc.vertices[0][:1]],
tmin=stc.tmin,
tstep=stc.tstep)
evoked_dip = mne.simulation.simulate_evoked(fwd, stc, info, cov, nave=1e9,
use_cps=True)
dip_mxne = mixed_norm(evoked_dip, fwd, cov, alpha=80,
n_mxne_iter=1, maxit=30, tol=1e-8,
active_set_size=10, return_as_dipoles=True)
amp_max = [np.max(d.amplitude) for d in dip_mxne]
dip_mxne = dip_mxne[np.argmax(amp_max)]
assert dip_mxne.pos[0] in src[0]['rr'][stc.vertices[0]]
dip_fit = mne.fit_dipole(evoked_dip, cov, sphere)[0]
assert np.abs(np.dot(dip_fit.ori[0], dip_mxne.ori[0])) > 0.99
dist = 1000 * np.linalg.norm(dip_fit.pos[0] - dip_mxne.pos[0])
assert dist < 4. # within 4 mm
# Do with TF-MxNE for test memory savings
alpha = 60. # overall regularization parameter
l1_ratio = 0.01 # temporal regularization proportion
stc, _ = tf_mixed_norm(evoked, fwd, cov, maxit=3, tol=1e-4,
tstep=16, wsize=32, window=0.1, alpha=alpha,
l1_ratio=l1_ratio, return_residual=True)
assert isinstance(stc, VolSourceEstimate)
assert_array_almost_equal(stc.times, evoked.times, 5)
@pytest.mark.parametrize('mod', (
None, 'mult', 'augment', 'sign', 'zero', 'less'))
def test_split_gof_basic(mod):
"""Test splitting the goodness of fit."""
# first a trivial case
gain = np.array([[0., 1., 1.], [1., 1., 0.]]).T
M = np.ones((3, 1))
X = np.ones((2, 1))
M_est = gain @ X
assert_allclose(M_est, np.array([[1., 2., 1.]]).T) # a reasonable estimate
if mod == 'mult':
gain *= [1., -0.5]
X[1] *= -2
elif mod == 'augment':
gain = np.concatenate((gain, np.zeros((3, 1))), axis=1)
X = np.concatenate((X, [[1.]]))
elif mod == 'sign':
gain[1] *= -1
M[1] *= -1
M_est[1] *= -1
elif mod in ('zero', 'less'):
gain = np.array([[1, 1., 1.], [1., 1., 1.]]).T
if mod == 'zero':
X[:, 0] = [1., 0.]
else:
X[:, 0] = [1., 0.5]
M_est = gain @ X
else:
assert mod is None
res = M - M_est
gof = 100 * (1. - (res * res).sum() / (M * M).sum())
gof_split = _split_gof(M, X, gain)
assert_allclose(gof_split.sum(), gof)
want = gof_split[[0, 0]]
if mod == 'augment':
want = np.concatenate((want, [[0]]))
if mod in ('mult', 'less'):
assert_array_less(gof_split[1], gof_split[0])
elif mod == 'zero':
assert_allclose(gof_split[0], gof_split.sum(0))
assert_allclose(gof_split[1], 0., atol=1e-6)
else:
assert_allclose(gof_split, want, atol=1e-12)
@testing.requires_testing_data
@pytest.mark.parametrize('idx, weights', [
# empirically determined approximately orthogonal columns: 0, 15157, 19448
([0], [1]),
([0, 15157], [1, 1]),
([0, 15157], [1, 3]),
([0, 15157], [5, -1]),
([0, 15157, 19448], [1, 1, 1]),
([0, 15157, 19448], [1e-2, 1, 5]),
])
def test_split_gof_meg(forward, idx, weights):
"""Test GOF splitting on MEG data."""
gain = forward['sol']['data'][:, idx]
# close to orthogonal
norms = np.linalg.norm(gain, axis=0)
triu = np.triu_indices(len(idx), 1)
prods = np.abs(np.dot(gain.T, gain) / np.outer(norms, norms))[triu]
assert_array_less(prods, 5e-3) # approximately orthogonal
# first, split across time (one dipole per time point)
M = gain * weights
gof_split = _split_gof(M, np.diag(weights), gain)
assert_allclose(gof_split.sum(0), 100., atol=1e-5) # all sum to 100
assert_allclose(gof_split, 100 * np.eye(len(weights)), atol=1) # loc
# next, summed to a single time point (all dipoles active at one time pt)
weights = np.array(weights)[:, np.newaxis]
x = gain @ weights
assert x.shape == (gain.shape[0], 1)
gof_split = _split_gof(x, weights, gain)
want = (norms * weights.T).T ** 2
want = 100 * want / want.sum()
assert_allclose(gof_split, want, atol=1e-3, rtol=1e-2)
assert_allclose(gof_split.sum(), 100, rtol=1e-5)
@pytest.mark.parametrize('n_sensors, n_dipoles, n_times', [
(10, 15, 7),
(20, 60, 20),
])
@pytest.mark.parametrize('nnz', [2, 4])
@pytest.mark.parametrize('corr', [0.75])
@pytest.mark.parametrize('n_orient', [1, 3])
def test_mxne_inverse_sure_synthetic(n_sensors, n_dipoles, n_times, nnz, corr,
n_orient, snr=4):
"""Tests SURE criterion for automatic alpha selection on synthetic data."""
rng = np.random.RandomState(0)
sigma = np.sqrt(1 - corr ** 2)
U = rng.randn(n_sensors)
# generate gain matrix
G = np.empty([n_sensors, n_dipoles], order='F')
G[:, :n_orient] = np.expand_dims(U, axis=-1)
n_dip_per_pos = n_dipoles // n_orient
for j in range(1, n_dip_per_pos):
U *= corr
U += sigma * rng.randn(n_sensors)
G[:, j * n_orient:(j + 1) * n_orient] = np.expand_dims(U, axis=-1)
# generate coefficient matrix
support = rng.choice(n_dip_per_pos, nnz, replace=False)
X = np.zeros((n_dipoles, n_times))
for k in support:
X[k * n_orient:(k + 1) * n_orient, :] = rng.normal(
size=(n_orient, n_times))
# generate measurement matrix
M = G @ X
noise = rng.randn(n_sensors, n_times)
sigma = 1 / np.linalg.norm(noise) * np.linalg.norm(M) / snr
M += sigma * noise
# inverse modeling with sure
alpha_max = norm_l2inf(np.dot(G.T, M), n_orient, copy=False)
alpha_grid = np.geomspace(alpha_max, alpha_max / 10, num=15)
_, active_set, _ = _compute_mxne_sure(M, G, alpha_grid, sigma=sigma,
n_mxne_iter=5, maxit=3000, tol=1e-4,
n_orient=n_orient,
active_set_size=10, debias=True,
solver="auto", dgap_freq=10,
random_state=0, verbose=False)
assert np.count_nonzero(active_set, axis=-1) == n_orient * nnz
@pytest.mark.slowtest # slow on Azure
@testing.requires_testing_data
def test_mxne_inverse_sure():
"""Tests SURE criterion for automatic alpha selection on MEG data."""
def data_fun(times):
data = np.zeros(times.shape)
data[times >= 0] = 50e-9
return data
n_dipoles = 2
raw = mne.io.read_raw_fif(fname_raw)
info = mne.io.read_info(fname_data)
info['projs'] = []
noise_cov = mne.make_ad_hoc_cov(info)
label_names = ['Aud-lh', 'Aud-rh']
labels = [mne.read_label(data_path + '/MEG/sample/labels/%s.label' % ln)
for ln in label_names]
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
forward = mne.read_forward_solution(fname_fwd)
forward = mne.pick_types_forward(forward, meg="grad", eeg=False,
exclude=raw.info['bads'])
times = np.arange(100, dtype=np.float64) / raw.info['sfreq'] - 0.1
stc = simulate_sparse_stc(forward['src'], n_dipoles=n_dipoles, times=times,
random_state=1, labels=labels, data_fun=data_fun)
nave = 30
evoked = simulate_evoked(forward, stc, info, noise_cov, nave=nave,
use_cps=False, iir_filter=None)
evoked = evoked.crop(tmin=0, tmax=10e-3)
stc_ = mixed_norm(evoked, forward, noise_cov, loose=0.9, n_mxne_iter=5,
depth=0.9)
assert_array_equal(stc_.vertices, stc.vertices)
| bsd-3-clause | 22,355,592,693,327,612 | 43.17506 | 79 | 0.588079 | false |
hyphaltip/cndtools | util/genbank2fa.py | 1 | 1140 | #!/usr/bin/env python
# Copyright (c) 2006
# Colin Dewey (University of Wisconsin-Madison)
# [email protected]
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys
from Bio import GenBank
from Bio import Fasta
parser = GenBank.RecordParser()
iterator = GenBank.Iterator(sys.stdin, parser)
farec = Fasta.Record()
while 1:
gbrec = iterator.next()
if gbrec is None:
break
farec.sequence = gbrec.sequence
farec.title = gbrec.locus
print farec
| gpl-2.0 | -2,270,592,912,808,029,700 | 26.142857 | 75 | 0.742982 | false |
cgrebeld/pymel | maya/app/startup/basic.py | 1 | 4322 | """
This module is always imported during Maya's startup. It is imported from
both the maya.app.startup.batch and maya.app.startup.gui scripts
"""
import atexit
import os.path
import sys
import traceback
import maya
import maya.app
import maya.app.commands
from maya import cmds, utils
def setupScriptPaths():
"""
Add Maya-specific directories to sys.path
"""
# Extra libraries
#
try:
# Tkinter libraries are included in the zip, add that subfolder
p = [p for p in sys.path if p.endswith('.zip')][0]
sys.path.append( os.path.join(p,'lib-tk') )
except:
pass
# Per-version prefs scripts dir (eg .../maya8.5/prefs/scripts)
#
prefsDir = cmds.internalVar( userPrefDir=True )
sys.path.append( os.path.join( prefsDir, 'scripts' ) )
# Per-version scripts dir (eg .../maya8.5/scripts)
#
scriptDir = cmds.internalVar( userScriptDir=True )
sys.path.append( os.path.dirname(scriptDir) )
# User application dir (eg .../maya/scripts)
#
appDir = cmds.internalVar( userAppDir=True )
sys.path.append( os.path.join( appDir, 'scripts' ) )
def executeSetup(filename):
"""
Look for the given file name in the search path and execute it in the "__main__"
namespace
"""
try:
for path in sys.path:
scriptPath = os.path.join( path, filename )
if os.path.isfile( scriptPath ):
import __main__
execfile( scriptPath, __main__.__dict__ )
except Exception, err:
# err contains the stack of everything leading to execfile,
# while sys.exc_info returns the stack of everything after execfile
try:
# extract the stack trace for the current exception
etype, value, tb = sys.exc_info()
tbStack = traceback.extract_tb(tb)
finally:
del tb # see warning in sys.exc_type docs for why this is deleted here
sys.stderr.write("Failed to execute %s\n" % filename)
sys.stderr.write("Traceback (most recent call last):\n")
# format the traceback, excluding our current level
result = traceback.format_list( tbStack[1:] ) + traceback.format_exception_only(etype, value)
sys.stderr.write(''.join(result))
def executeUserSetup():
executeSetup('userSetup.py')
def executeSiteSetup():
executeSetup('siteSetup.py')
# Set up sys.path to include Maya-specific user script directories.
setupScriptPaths()
# Set up string table instance for application
maya.stringTable = utils.StringTable()
# Set up auto-load stubs for Maya commands implemented in libraries which are not yet loaded
maya.app.commands.processCommandList()
# Set up the maya logger before userSetup.py runs, so that any custom scripts that
# use the logger will have it available
utils.shellLogHandler()
if not os.environ.has_key('MAYA_SKIP_USERSETUP_PY'):
# Run the user's userSetup.py if it exists
executeSiteSetup()
executeUserSetup()
# Register code to be run on exit
atexit.register( maya.app.finalize )
# Copyright (C) 1997-2010 Autodesk, Inc., and/or its licensors.
# All rights reserved.
#
# The coded instructions, statements, computer programs, and/or related
# material (collectively the "Data") in these files contain unpublished
# information proprietary to Autodesk, Inc. ("Autodesk") and/or its licensors,
# which is protected by U.S. and Canadian federal copyright law and by
# international treaties.
#
# The Data is provided for use exclusively by You. You have the right to use,
# modify, and incorporate this Data into other products for purposes authorized
# by the Autodesk software license agreement, without fee.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND. AUTODESK
# DOES NOT MAKE AND HEREBY DISCLAIMS ANY EXPRESS OR IMPLIED WARRANTIES
# INCLUDING, BUT NOT LIMITED TO, THE WARRANTIES OF NON-INFRINGEMENT,
# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, OR ARISING FROM A COURSE
# OF DEALING, USAGE, OR TRADE PRACTICE. IN NO EVENT WILL AUTODESK AND/OR ITS
# LICENSORS BE LIABLE FOR ANY LOST REVENUES, DATA, OR PROFITS, OR SPECIAL,
# DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES, EVEN IF AUTODESK AND/OR ITS
# LICENSORS HAS BEEN ADVISED OF THE POSSIBILITY OR PROBABILITY OF SUCH DAMAGES.
| bsd-3-clause | 1,507,193,115,536,711,700 | 36.912281 | 101 | 0.696668 | false |
loxodes/fairbanks_hackathon_landsat_viewer | landsat_theater.py | 1 | 6073 | # jon klein, [email protected]
# utility to display landsat-8 images for the decision theater north
# created during fairbanks 2015 hackathon
# mit license
import datetime
import time
import subprocess
import pdb
import re
import ast
import os
import argparse
from PIL import Image, ImageDraw, ImageFont
from geopy.geocoders import Nominatim
LATITUDE = 0
LONGITUDE = 1
LANDSAT_DATA_PATH = "/home/kleinjt/landsat/"
LABEL_COLOR = '#FFFFFF'
LABEL_FONT = 'FreeMono.ttf'
LABEL_BASE = (500, 500)
LABEL_SIZE = 400
PROCESSED_DIR = 'processed'
ANNOTATED_DIR = 'annotated'
TILE_DIR = '/home/kleinjt/repos/fairbanks_hackathon_landsat_viewer/Leaflet.Zoomify/tile_data'
ansi_escape = re.compile(r'\x1b[^m]*m') # regular expression to strip coloring from landsat return
# return (latitude, longitude) tuple from an address or place name
def get_latlong(place):
geolocator = Nominatim()
location = geolocator.geocode(place)
return (location.latitude, location.longitude)
# search for landsat records centered on a location
def landsat_search(location, startdate = None, enddate = None, maxcloud = None, maxreturns = 16, matchpath = True):
latlong_tuple = get_latlong(location)
latitude = str(latlong_tuple[LATITUDE])
longitude = str(latlong_tuple[LONGITUDE])
command = ['landsat', 'search']
command.append('--lat')
command.append(latitude)
command.append('--lon')
command.append(longitude)
if maxcloud:
command.append('--cloud')
command.append(str(maxcloud))
if maxreturns:
command.append('--limit')
command.append(str(maxreturns))
if startdate:
command.append('--start')
startdate = startdate.strftime("%m/%d/%Y")
command.append(startdate)
if enddate:
command.append('--end')
enddate = enddate.strftime("%m/%d/%Y")
command.append(enddate)
print ' '.join(command)
search = subprocess.check_output(command)
search = ansi_escape.sub('', search)
scene_dict = ast.literal_eval('\n'.join(search.split('\n')[1:-4]))
assert scene_dict['status'] == 'SUCCESS'
landsat_results = scene_dict['results']
landsat_result_dates = [time.strptime(lr['date'], "%Y-%m-%d") for lr in landsat_results]
# sort landsat results by date
landsat_records = [landsat_results for landsat_result_dates, landsat_results in sorted(zip(landsat_result_dates, landsat_results))]
# the landsat may fly over a spot using different paths, we might want to limit the search to records that use the same path
if matchpath:
path_matches = []
latest_path = landsat_records[-1]['path']
for record in landsat_records:
if record['path'] == latest_path:
path_matches.append(record)
landsat_records = path_matches
print('finished search')
return landsat_records
def landsat_download(landsat_records, bands = None, process = True, pansharpen = False):
command = ['landsat', 'download']
if process:
command.append('--process')
if pansharpen:
command.append('--pansharpen')
if bands:
command.append('--bands')
command.append(bands)
for record in landsat_records:
print('adding sceneID {} to download list'.format(record['sceneID']))
command.append(record['sceneID'])
print ' '.join(command)
print('starting download and processing, this may take some time...')
download = subprocess.check_output(command)
print('download and processing complete')
# find filename for landsat record image, create directory structure if it doesn't exist
def record_image_filename(record, imgdir, band = '432'):
ext = 'TIF'
if imgdir == ANNOTATED_DIR:
ext = 'PNG'
filename = '{}_bands_{}.{}'.format(record['sceneID'], band, ext)
directory = os.path.join(LANDSAT_DATA_PATH, imgdir, record['sceneID'])
if not os.path.exists(directory):
os.makedirs(directory)
full_filename = os.path.join(directory, filename)
return full_filename
# annotate processed images with date and location, then save them to ANNOTATED_DIR
def annotate_landsat_images(landsat_records, bands = '432', location = '', downsize = False, tile = False):
for record in landsat_records:
print('annotating {}'.format(record['date']))
filename = record_image_filename(record, PROCESSED_DIR)
outfile = record_image_filename(record, ANNOTATED_DIR)
record_file = open(filename, 'rb')
record_image = Image.open(filename)
draw = ImageDraw.Draw(record_image)
font = ImageFont.truetype(LABEL_FONT, 144)
label = 'Landsat {}\n{}, Band {}\n{}'.format(record['sat_type'], record['date'], bands, location)
draw.text(LABEL_BASE, label, fill = LABEL_COLOR, font = font)
# resize image for less memory usage..
if downsize:
newsize = (record_image.width * downsize, record_image.height * downsize)
record_image.resize(newsize)
record_image.save(outfile, 'png')
if tile:
tilename = record['sceneID']
tiledir = os.path.join(TILE_DIR, tilename)
if not os.path.exists(tiledir):
os.makedirs(tiledir)
command = ['tileup', '--in', outfile, '--output-dir', tiledir, '--prefix', tilename, '--verbose', '--auto-zoom', '6']
output = subprocess.check_output(command)
if __name__ == '__main__':
# see https://pyglet.readthedocs.org/en/latest/programming_guide/windowing.html
#display = platform.get_display(display_name)
#window = pyglet.window.Window(display = display)
#screens = display.get_screens()
#img = pyglet.image.load('test.jpg')
location = 'Chiniak, AK'
startdate = datetime.datetime(2014, 1, 1)
records = landsat_search(location, startdate = startdate, maxreturns = 20)
landsat_download(records)
annotate_landsat_images(records, location = location)
pdb.set_trace()
| mit | 6,940,265,505,476,265,000 | 33.117978 | 135 | 0.65503 | false |
SweetPalma/Perver | perver.py | 1 | 18147 | #!/usr/bin/python
# coding: utf-8
# Perver - tiny Python 3 server for perverts.
# Check README and LICENSE for details.
from sys import platform as os_platform
from hashlib import sha1 as hash_id
from urllib.parse import unquote
from mimetypes import guess_type
from traceback import format_exc
from functools import wraps
import threading as thread
import concurrent.futures
import logging as log
import asyncio
import base64
import time
import sys
import os
import re
# Version control:
__author__ = 'SweetPalma'
__version__ = '0.25'
# Custom internal exceptions:
class PerverException(Exception):
def __init__(self, message):
self.message = str(message)
# Handling HTTP requests:
class PerverHandler:
# Path substitution pattern:
path_pattern = re.compile(r'(\{.+?\})')
# Making server link:
def __init__(self, server):
self.server = server
# Handling requests:
@asyncio.coroutine
def handle_request(self, reader, writer):
# Preparing basic values:
peername = writer.get_extra_info('peername')
ip, port = peername[0], peername[1]
# Client basic values:
self.ip = ip
self.port = port
self.reader = reader
self.writer = writer
self.time = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
# Client info, used in logging:
client_info = ' '.join([
self.time,
self.ip,
])
# Terminator shortcut:
killer = PerverException
# Handling:
try:
# Reading header until EOF:
header, length = b'', 0
while True:
try:
# Reading:
line = yield from reader.readline()
# Setting request type and maximal request size at start:
if len(header) == 0:
if line.startswith(b'POST'):
request_type = b'POST'
request_max = self.server.post_max
else:
request_type = b'GET'
request_max = self.server.get_max
# Setting break:
if line == b'\r\n' or not line:
break
# Reading content length:
if line.startswith(b'Content-Length'):
length = int(line.split(b':')[1])
# Reading header:
header = header + line
# Some spooky errors during reading:
except:
break
# Reading content:
content = b''
if 0 < length < request_max:
content = yield from reader.readexactly(length)
# Close connection in case of big file:
elif length > request_max:
self.writer.close()
raise killer('REQUEST IS TOO BIG')
# Parsing data:
self.client = yield from self.build_client(header, content)
client = self.client
# In case of disconnection:
if not client:
self.writer.close()
raise killer('CLIENT CLOSED CONNECTION')
# Logging full information:
client_info = client_info + ' ' + ' '.join([
client.type,
client.path,
])
# Checking routing:
route_post = self.check_route(client.path, self.server.route_post)
route_get = self.check_route(client.path, self.server.route_get)
if client.type == 'POST' and route_post:
raise killer((yield from self.respond_script(*route_post)))
if client.type == 'GET' and route_get:
raise killer((yield from self.respond_script(*route_get)))
# Checking static files:
for dir, real in self.server.route_static.items():
if client.path.startswith(dir):
filepath = client.path.replace(dir, real, 1)
raise killer((yield from self.respond_file(filepath[1:])))
# Routing 404 error:
raise killer((yield from self.respond_error(404)))
# Timeout/Cancelled:
except concurrent.futures._base.CancelledError:
yield from self.respond_error(500)
log.info(client_info + ' TIMED OUT')
# Terminator:
except killer as exception:
log.info(client_info + ' ' + exception.message)
# Sending file:
@asyncio.coroutine
def respond_file(self, path):
try:
with open(path, "rb") as file:
size = os.path.getsize(path)
return (yield from self.respond(
status = 200,
content = file.read(),
type = self.get_mime(path),
length = size
))
# No file found:
except IOError:
return (yield from self.respond_error(404))
# Sending error message:
@asyncio.coroutine
def respond_error(self, number, custom=None):
error = {
400: 'Bad Request',
404: 'Not Found',
500: 'Internal Error',
}
error_text = number in error and error[number] or 'Unknown Error'
error_cont = str(number) + ' ' + error_text
return (yield from self.respond(number, error_cont))
# Executing client script and sending it response:
@asyncio.coroutine
def respond_script(self, script, keys={}):
script_result = (yield from script(self.client, **keys)) or b''
return (yield from self.respond(
status = self.client.status,
content = script_result,
header = self.client.header,
type = self.client.mime
))
# Pure data response:
@asyncio.coroutine
def respond(self, status, content=b'', type='text/html', length=None, header={}):
# Forming header:
encoding = self.server.encoding
self.header = 'HTTP/1.1 ' + str(status) + '\r\n'
self.form_header('Accept-Charset', encoding)
self.form_header('Server', 'Perver/' + __version__)
# Setting mime type (and encoding for text):
if type.startswith('text/'):
ctype = type + ';charset=' + encoding
else:
ctype = type
self.form_header('Content-Type', ctype)
# Working with custom headers:
for key, value in header.items():
self.form_header(key, value)
# Encoding unicode content:
if not isinstance(content, bytes):
content = content.encode(encoding)
# Forming content length:
length = length or len(content)
self.form_header('Content-Length', str(length))
# Forming response:
header = self.header.encode(encoding)
response = header + b'\r\n' + content + b'\r\n'
# Go:
self.writer.write(response)
self.writer.write_eof()
# Done:
return status
# Making client ID using cut SHA hash on client IP and User-Agent:
def get_id(self, clnt):
ident = str(clnt.ip) + str(clnt.agent)
ident_encoded = ident.encode(self.server.encoding)
hashed = hash_id(ident_encoded).digest()[:self.server.length_id]
cooked = base64.urlsafe_b64encode(hashed).decode(self.server.encoding)
return cooked[:-2] # Removed two last minuses for better readibility.
# Power of regexp!
def check_route(self, path, map):
# Pure path:
if path in map:
return (map[path], {})
# Path with substitutions:
right_path, groups = None, sys.maxsize
for route in map:
# Removing retarded slash in the end of path:
path = path.endswith('/') and path[:-1] or path
# Patterns:
path_pattern = '^' + self.path_pattern.sub('([^/]+)', route) + '$'
matched = re.match(path_pattern, path)
# Testing route:
if matched:
keys = [key[1:-1] for key in self.path_pattern.findall(route)]
values = list(matched.groups())
if len(values) < groups:
groups = len(values)
right_path = (map[route], dict(zip(keys, values)))
# In case of fail:
return right_path
# Appending certain header lines:
def form_header(self, arg, var):
self.header = self.header + arg + ': ' + var + '\r\n'
# Retrieving type:
def get_mime(self, path):
fname, extension = os.path.splitext(path)
if extension == '':
return guess_type(path)[0] or 'text/html'
else:
return guess_type(path)[0] or 'application'
# Parsing GET and COOKIES:
@asyncio.coroutine
def parse(self, path):
# Preparing %key%=%value% regex:
get_word = '[^=;&?]'
pattern = '(%s+)=(%s+)' % (get_word, get_word)
# Unquoting map:
unq = lambda x: map(unquote, x)
# Replacing retarded pluses to spaces in path:
path = path.replace('+', ' ')
# Working:
matched = [unq(x) for x in re.findall(pattern, path)]
return dict(matched)
# Parsing POST multipart:
@asyncio.coroutine
def parse_post(self, content, type, boundary):
# Establishing default encoding:
encoding = self.server.encoding
# Parsing multipart:
if type == 'multipart/form-data':
# Splitting request to fields:
fields = content.split(boundary)
fields_dict = {}
# Turning `em to dictionary:
for field in fields:
# Checking:
field_rows = field.split(b'\r\n\r\n')
if len(field_rows) == 2:
header, value = field_rows
value = value[:-2]
# Decoding key:
key = re.findall(b';[ ]*name="([^;]+)"', header)[0]
key = key.decode(encoding)
# Checking content-type:
ctype = re.search(b'Content-Type: ([^;]+)$', header)
# File upload field:
if ctype:
if value == b'' or value == b'\r\n':
continue
ctype = ctype.group()
fname = re.findall(b';[ ]*filename="([^;]+)"', header)
fname = len(fname) == 1 and fname[0] or b'unknown'
fields_dict[key] = {
'filename': fname.decode(encoding),
'mime': ctype.decode(encoding),
'file': value,
}
# Text field:
else:
fields_dict[key] = value.decode(encoding)
return fields_dict
# Parsing average urlencoded:
else:
if isinstance(content, bytes):
content = content.decode(encoding)
return self.parse(content)
# Parsing client data:
@asyncio.coroutine
def build_client(self, header_raw, content_raw=b''):
# Safe dict values:
def safe_dict(dictionary, value, default):
if value in dictionary:
return dictionary[value]
else:
return default
# Decoding:
try:
# Decoding header:
header_decoded = header_raw.decode(self.server.encoding)
# Three basic values: request type, path and version:
pattern = r'^(GET|POST) ([A-Za-z0-9_.~?&%/\-]+) (HTTP/1.1|HTTP/1.0)'
unpacked = re.findall(pattern, header_decoded)
if len(unpacked) > 0:
type, path, version = re.findall(pattern, header_decoded)[0]
else:
raise PerverException('WRONG CLIENT HEAD')
# Splitting GET and PATH:
if '?' in path:
path, GET = path.split('?')
else:
GET = ''
# Raw header to header dictionary:
pattern = '([^:]+):[ ]*(.+)\r\n'
header = dict(re.findall(pattern, header_decoded))
# Basic client variables:
client = PerverClient()
client.version = version
client.type, client.path = type, unquote(path)
client.path_dir = '/'.join(unquote(path).split('/')[:-1])
# Client header:
client.header_raw, client.content_raw = header_raw, content_raw
client.content_type = safe_dict(header, 'Content-Type', '')
client.content_length = safe_dict(header, 'Content-Length', 0)
client.agent = safe_dict(header, 'User-Agent', 'Unknown')
client.mime = self.get_mime(client.path)
client.form_type = client.content_type.split(';')[0]
# Server client values:
client.ip, client.port, client.time = self.ip, self.port, self.time
client.id = self.get_id(client)
# POST boundary:
boundary = re.findall('boundary=(-*[0-9]*)', client.content_type)
if len(boundary) > 0:
boundary = boundary[0].encode(self.server.encoding)
else:
boundary = b''
# POST/GET/COOKIES:
client.get = yield from self.parse(GET)
client.post = yield from self.parse_post(content_raw, client.form_type, boundary)
client.cookie = yield from self.parse(safe_dict(header, 'Cookie', ''))
# Client ID cookie, can be overrided later:
client.header['Set-Cookie'] = 'id=' + client.id
# Client server-side container:
if not client.id in self.server.client:
self.server.client[client.id] = {}
client.container = self.server.client[client.id]
# Fixing client path dir:
if client.path_dir == '':
client.path_dir = '/'
# Done!
return client
# In case of fail:
except BaseException as exc:
log.warning('Error parsing user request.')
yield from self.respond_error(400)
raise exc
# Script client:
class PerverClient:
# GET/POST arguments:
get = {}
post = {}
# Client headers:
status = 200
header = {}
cookie = {}
mime = 'text/html'
# Redirection:
def redirect(self, page):
""" Redirects client to a certain page using 302 status code. """
self.header['Location'] = page
self.status = 302
return 'Redirecting...'
# Templating:
def template(self, text, **replace):
""" Used in templating - works same as str.format. """
return text.format(**replace)
# Rendering page:
def render(self, filename, **replace):
""" Same as template, but used in files. Returns templated file. """
file = open(filename, 'r')
return self.template(file.read(), **replace)
# Retrieving file:
def file(self, filename):
""" Simply returns file contents, binary. """
self.mime = guess_type(filename)[0]
file = open(filename, 'rb')
return file.read()
# Own header:
def set_header(self, key, value):
""" Sets custom client HTTP header. """
self.header[key] = value
# Cookies:
def set_cookie(self, name, value):
""" Sets custom client cookie, overriding default Perver ID Cookie. """
self.header['Set-Cookie'] = name + '=' + value +';'
# Status:
def set_status(self, status):
""" Sets custom response status, overriding default 200. """
self.status = status
# Mime:
def set_mime(self, mime):
""" Sets custom mime response. """
self.mime = mime
# Making HTML template:
def html(self, body, head='', doctype='html'):
""" HTML-correct template for nice pages. """
doctype = '<!DOCTYPE %s>' % doctype
head = '\r\n'.join(['<head>', head, '</head>'])
body = '\r\n'.join(['<body>', body, '</body>'])
return '\r\n'.join([doctype, head, body])
# Making forms:
def form(self, action, method, *inputs, id='', multipart=False):
""" Used for building forms. """
if multipart:
enctype='multipart/form-data'
else:
enctype='application/x-www-form-urlencoded'
form_desc = (action, method, id, enctype)
html = '<form action="%s" method="%s" id="%s" enctype="%s">' % form_desc
inputs = [list(inp.items()) for inp in inputs]
for input in inputs:
args = ' '.join('%s="%s"' % arg for arg in input)
html = '\r\n'.join([html, '<input %s><br>' % args])
return ''.join([html, '</form>'])
# Multipart form:
def form_multipart(self, *args, **kargs):
""" Works same as previous, but with multipart argument set to True."""
kargs['multipart'] = True
return self.form(*args, **kargs)
# Part of the previous function:
def input(self, name, **kargs):
""" Single form input. """
return dict(name=name, **kargs)
# Input submit:
def input_submit(self, value='Submit', **kargs):
""" Form submit button. """
return dict(type='submit', value=value, **kargs)
# Perver Server itself:
class Perver:
# PARAMETERS:
# Main server values:
encoding = 'utf-8'
backlog = 5
timeout = 30
# Maximal requests length:
get_max = 1024 * 8
post_max = 1024 * 1024 * 100
# Client ID length:
length_id = 10
# I highly recommend not to change this value.
# Routing paths:
route_get = {}
route_post = {}
route_static = {}
# Active clients list:
client = {}
# METHODS:
# Routing GET:
# DECORATOR:
def get(self, path):
""" Binds all GET requests from path to certain function. """
def decorator(func):
@wraps(func)
def wrapper(*args, **kwds):
return asyncio.coroutine(func)(*args, **kwds)
self.route_get[path] = wrapper
return wrapper
return decorator
# Routing POST:
# DECORATOR:
def post(self, path):
""" Binds all POST requests from path to certain function. """
def decorator(func):
@wraps(func)
def wrapper(*args, **kwds):
return asyncio.coroutine(func)(*args, **kwds)
self.route_post[path] = wrapper
return wrapper
return decorator
# Global routing:
# DECORATOR:
def route(self, path):
""" Binds all POST/GET requests from path to certain function. """
def decorator(func):
@wraps(func)
def wrapper(*args, **kwds):
return asyncio.coroutine(func)(*args, **kwds)
self.route_post[path] = wrapper
self.route_get[path] = wrapper
return wrapper
return decorator
# Adding static route:
def static(self, web, local):
""" Uses local path for serving static files for web requests. """
local = local.replace('\\', '/')
if not (local.startswith('/') and os.path.isabs(local)):
local = '/' + local
if not local.endswith('/'):
local = local + '/'
self.route_static[web] = local
# Starting:
def start(self, host='', port=80):
""" Starts the (mostly) infinite loop of server. """
# Configuring output:
self.host, self.port = host, port
log.basicConfig(level=log.INFO, format='%(levelname)s: %(message)s')
# Nice header for Windows:
if os_platform == 'win32':
os.system('title Perver v' + __version__)
# Trying running:
try:
self._loop = asyncio.get_event_loop()
self._server = asyncio.start_server(
self.handler,
host=host,
port=port,
backlog=self.backlog,
reuse_address=True,
)
self._server = self._loop.run_until_complete(self._server)
start_time = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
log.info('Perver has started at ' + start_time + '.')
self._loop.run_forever()
# In case of Skype on 80 port, access denials and other socket errors:
except OSError:
log.error('OS error, probably server is already running at that port \
or user is not sudoer.')
# Stop?
def stop(self):
""" Stops the Perver. """
self._server.close()
self._loop.stop()
# HTTP request handler:
@asyncio.coroutine
def handler(self, reader, writer):
try:
handler = PerverHandler(self)
yield from asyncio.wait_for(
handler.handle_request(reader, writer),
timeout=self.timeout
)
except KeyboardInterrupt:
log.warning('Interrupted by user.')
self.stop()
except SystemExit:
self.stop()
except asyncio.TimeoutError:
pass
except:
log.warning('Exception caught! \r\n' + format_exc())
# Pythonic async database
class PerverDB:
# Initialization:
def __init__(self, filename):
pass
# Not standalone:
if __name__ == '__main__':
print('Perver is not a standalone application. Use it as framework.')
print('Check "github.com/SweetPalma/Perver" for details.')
| mit | 1,527,089,193,175,442,400 | 25.686765 | 87 | 0.639885 | false |
sigven/pcgr | src/pcgr/pcgr_vcfanno.py | 1 | 17190 | #!/usr/bin/env python
import argparse
import subprocess
from cyvcf2 import VCF
import random
import annoutils
import os
import re
import sys
logger = annoutils.getlogger('pcgr-vcfanno')
global debug
def __main__():
parser = argparse.ArgumentParser(description='Run brentp/vcfanno - annotate a VCF file against multiple VCF files in parallel', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('query_vcf', help='Bgzipped input VCF file with query variants (SNVs/InDels)')
parser.add_argument('out_vcf', help='Output VCF file with appended annotations from multiple VCF files')
parser.add_argument('pcgr_db_dir', help='PCGR assembly-specific data directory')
parser.add_argument('--num_processes', help="Number of processes vcfanno can use during annotation", default=4)
parser.add_argument("--docm",action = "store_true", help="Annotate VCF with annotations from Database of Curated Mutations")
parser.add_argument("--clinvar",action = "store_true", help="Annotate VCF with annotations from ClinVar")
parser.add_argument("--ncer",action = "store_true", help="Annotate VCF with ranking of variant deleteriousness in non-coding regions (ncER)")
parser.add_argument('--dbmts',action = "store_true", help="Annotate VCF file with variants predicted to cause loss/gain of miRNA target sites in 3'UTR regions")
parser.add_argument('--gerp',action = "store_true", help="Annotate VCF file with GERP RS scores (cancer predisposition gene/SF/GWAS loci only)")
parser.add_argument("--dbnsfp",action = "store_true", help="Annotate VCF with annotations from database of non-synonymous functional predictions")
parser.add_argument("--tcga",action = "store_true", help="Annotate VCF with variant frequencies from the The Cancer Genome Atlas")
parser.add_argument("--tcga_pcdm",action = "store_true", help="Annotate VCF with putative cancer driver mutations from The Cancer Genome Atlas")
parser.add_argument("--chasmplus", action="store_true",help="Annotate VCF with putative cancer driver mutations from CHASMplus algorithm")
parser.add_argument("--civic",action = "store_true", help="Annotate VCF with annotations from the Clinical Interpretation of Variants in Cancer database")
parser.add_argument("--cgi",action = "store_true", help="Annotate VCF with annotations from the Cancer bioMarkers database")
parser.add_argument("--icgc",action = "store_true", help="Annotate VCF with known variants found in the ICGC-PCAWG sequencing project")
parser.add_argument("--cancer_hotspots",action = "store_true", help="Annotate VCF with mutation hotspots from cancerhotspots.org")
parser.add_argument("--uniprot",action = "store_true", help="Annotate VCF with protein functional features from the UniProt Knowledgebase")
parser.add_argument("--pcgr_onco_xref",action = "store_true", help="Annotate VCF with transcript annotations from PCGR (targeted drugs, protein complexes, cancer gene associations, etc)")
parser.add_argument("--gwas",action = "store_true", help="Annotate VCF against known loci associated with cancer, as identified from genome-wide association studies (GWAS)")
parser.add_argument("--rmsk",action = "store_true", help="Annotate VCF against known sequence repeats, as identified by RepeatMasker (rmsk)")
parser.add_argument("--simplerepeats",action = "store_true", help="Annotate VCF against known sequence repeats, as identified by Tandem Repeats Finder (simplerepeats)")
parser.add_argument("--winmsk",action = "store_true", help="Annotate VCF against known sequence repeats, as identified by Windowmasker (winmsk)")
parser.add_argument("--gnomad_cpsr",action = "store_true",help="Annotate VCF with population-specific allelic counts and frequencies in cancer predisposition genes (gnomAD non-cancer subset)")
parser.add_argument("--panel_normal_vcf",dest="panel_normal_vcf",help="Annotate VCF with germline calls from panel of normals")
parser.add_argument("--keep_logs",action = "store_true")
parser.add_argument('--debug',action='store_true',default=False, help='Print full docker commands to log')
args = parser.parse_args()
global debug
debug = args.debug
query_info_tags = get_vcf_info_tags(args.query_vcf)
vcfheader_file = args.out_vcf + '.tmp.' + str(random.randrange(0,10000000)) + '.header.txt'
conf_fname = args.out_vcf + '.tmp.conf.toml'
print_vcf_header(args.query_vcf, vcfheader_file, chromline_only = False)
run_vcfanno(args.num_processes, args.query_vcf, args.panel_normal_vcf, query_info_tags, vcfheader_file,
args.pcgr_db_dir, conf_fname, args.out_vcf, args.docm, args.clinvar, args.ncer, args.dbmts, args.gerp, args.tcga, args.tcga_pcdm,
args.chasmplus, args.dbnsfp, args.civic, args.cgi, args.icgc, args.uniprot, args.cancer_hotspots,
args.pcgr_onco_xref, args.gwas, args.rmsk, args.simplerepeats, args.winmsk, args.gnomad_cpsr, args.keep_logs)
def prepare_vcfanno_configuration(vcfanno_data_directory, conf_fname, vcfheader_file, logger, datasource_info_tags, query_info_tags, datasource):
for t in datasource_info_tags:
if t in query_info_tags:
logger.warning("Query VCF has INFO tag " + str(t) + ' - this is also present in the ' + str(datasource) + ' VCF/BED annotation file. This tag will be overwritten if not renamed in the query VCF')
append_to_conf_file(datasource, datasource_info_tags, vcfanno_data_directory, conf_fname)
append_to_vcf_header(vcfanno_data_directory, datasource, vcfheader_file)
def run_vcfanno(num_processes, query_vcf, panel_normal_vcf, query_info_tags, vcfheader_file, pcgr_db_directory, conf_fname,
output_vcf, docm, clinvar, ncer, dbmts, gerp, tcga, tcga_pcdm, chasmplus, dbnsfp, civic, cgi, icgc, uniprot, cancer_hotspots,
pcgr_onco_xref, gwas, rmsk, simplerepeats, winmsk, gnomad_cpsr, keep_logs):
"""
Function that annotates a VCF file with vcfanno against a user-defined set of germline and somatic VCF files
"""
civic_info_tags = ["CIVIC_ID","CIVIC_ID_SEGMENT"]
cgi_info_tags = ["CGI_ID","CGI_ID_SEGMENT"]
icgc_info_tags = ["ICGC_PCAWG_OCCURRENCE","ICGC_PCAWG_AFFECTED_DONORS"]
docm_info_tags = ["DOCM_PMID"]
tcga_info_tags = ["TCGA_FREQUENCY","TCGA_PANCANCER_COUNT"]
tcga_pcdm_info_tags = ["PUTATIVE_DRIVER_MUTATION"]
chasmplus_info_tags = ["CHASMPLUS_DRIVER","CHASMPLUS_TTYPE","CHASMPLUS_PANCAN"]
ncer_info_tags = ["NCER_PERCENTILE"]
clinvar_info_tags = ["CLINVAR_MSID","CLINVAR_PMID","CLINVAR_CLNSIG","CLINVAR_VARIANT_ORIGIN","CLINVAR_CONFLICTED","CLINVAR_UMLS_CUI","CLINVAR_HGVSP",
"CLINVAR_UMLS_CUI_SOMATIC","CLINVAR_CLNSIG_SOMATIC","CLINVAR_PMID_SOMATIC","CLINVAR_ALLELE_ID","CLINVAR_MOLECULAR_EFFECT",
"CLINVAR_REVIEW_STATUS_STARS","CLINVAR_CLASSIFICATION","CLINVAR_ENTREZGENE"]
cancer_hotspots_info_tags = ["MUTATION_HOTSPOT","MUTATION_HOTSPOT_TRANSCRIPT","MUTATION_HOTSPOT_CANCERTYPE"]
dbnsfp_info_tags = ["DBNSFP"]
uniprot_info_tags = ["UNIPROT_FEATURE"]
pcgr_onco_xref_info_tags = ["PCGR_ONCO_XREF"]
gwas_info_tags = ["GWAS_HIT"]
rmsk_info_tags = ["RMSK_HIT"]
simplerepeats_info_tags = ["SIMPLEREPEATS_HIT"]
winmsk_info_tags = ["WINMASKER_HIT"]
panel_normal_tags = ["PANEL_OF_NORMALS"]
dbmts_info_tags = ["DBMTS"]
gerp_info_tags = ['GERP_SCORE']
gnomad_cpsr_tags = []
gnomad_cpsr_tags.append('NON_CANCER_AC_GLOBAL')
gnomad_cpsr_tags.append('NON_CANCER_NHOMALT_GLOBAL')
gnomad_cpsr_tags.append('NON_CANCER_AN_GLOBAL')
gnomad_cpsr_tags.append('NON_CANCER_AF_GLOBAL')
for pop in ['ASJ','NFE','SAS','FIN','EAS','AMR','AFR','OTH']:
gnomad_cpsr_tags.append('NON_CANCER_AC_' + str(pop))
gnomad_cpsr_tags.append('NON_CANCER_AN_' + str(pop))
gnomad_cpsr_tags.append('NON_CANCER_AF_' + str(pop))
gnomad_cpsr_tags.append('NON_CANCER_NHOMALT_' + str(pop))
if icgc is True:
prepare_vcfanno_configuration(pcgr_db_directory, conf_fname, vcfheader_file, logger, icgc_info_tags, query_info_tags, "icgc")
if clinvar is True:
prepare_vcfanno_configuration(pcgr_db_directory, conf_fname, vcfheader_file, logger, clinvar_info_tags, query_info_tags, "clinvar")
if ncer is True:
prepare_vcfanno_configuration(pcgr_db_directory, conf_fname, vcfheader_file, logger, ncer_info_tags, query_info_tags, "ncer")
if gerp is True:
prepare_vcfanno_configuration(pcgr_db_directory, conf_fname, vcfheader_file, logger, gerp_info_tags, query_info_tags, "gerp")
if dbmts is True:
prepare_vcfanno_configuration(pcgr_db_directory, conf_fname, vcfheader_file, logger, dbmts_info_tags, query_info_tags, "dbmts")
if dbnsfp is True:
prepare_vcfanno_configuration(pcgr_db_directory, conf_fname, vcfheader_file, logger, dbnsfp_info_tags, query_info_tags, "dbnsfp")
if cgi is True:
prepare_vcfanno_configuration(pcgr_db_directory, conf_fname, vcfheader_file, logger, cgi_info_tags, query_info_tags, "cgi")
if tcga is True:
prepare_vcfanno_configuration(pcgr_db_directory, conf_fname, vcfheader_file, logger, tcga_info_tags, query_info_tags, "tcga")
if tcga_pcdm is True:
prepare_vcfanno_configuration(pcgr_db_directory, conf_fname, vcfheader_file, logger, tcga_pcdm_info_tags, query_info_tags, "tcga_pcdm")
if chasmplus is True:
prepare_vcfanno_configuration(pcgr_db_directory, conf_fname, vcfheader_file, logger, chasmplus_info_tags, query_info_tags, "chasmplus")
if civic is True:
prepare_vcfanno_configuration(pcgr_db_directory, conf_fname, vcfheader_file, logger, civic_info_tags, query_info_tags, "civic")
if cancer_hotspots is True:
prepare_vcfanno_configuration(pcgr_db_directory, conf_fname, vcfheader_file, logger, cancer_hotspots_info_tags, query_info_tags, "cancer_hotspots")
if uniprot is True:
prepare_vcfanno_configuration(pcgr_db_directory, conf_fname, vcfheader_file, logger, uniprot_info_tags, query_info_tags, "uniprot")
if docm is True:
prepare_vcfanno_configuration(pcgr_db_directory, conf_fname, vcfheader_file, logger, docm_info_tags, query_info_tags, "docm")
if pcgr_onco_xref is True:
prepare_vcfanno_configuration(pcgr_db_directory, conf_fname, vcfheader_file, logger, pcgr_onco_xref_info_tags, query_info_tags, "pcgr_onco_xref")
if gwas is True:
prepare_vcfanno_configuration(pcgr_db_directory, conf_fname, vcfheader_file, logger, gwas_info_tags, query_info_tags, "gwas")
if rmsk is True:
prepare_vcfanno_configuration(pcgr_db_directory, conf_fname, vcfheader_file, logger, rmsk_info_tags, query_info_tags, "rmsk")
if simplerepeats is True:
prepare_vcfanno_configuration(pcgr_db_directory, conf_fname, vcfheader_file, logger, simplerepeats_info_tags, query_info_tags, "simplerepeats")
if winmsk is True:
prepare_vcfanno_configuration(pcgr_db_directory, conf_fname, vcfheader_file, logger, winmsk_info_tags, query_info_tags, "winmsk")
if gnomad_cpsr is True:
prepare_vcfanno_configuration(pcgr_db_directory, conf_fname, vcfheader_file, logger, gnomad_cpsr_tags, query_info_tags, "gnomad_cpsr")
if not panel_normal_vcf is None:
if "PANEL_OF_NORMALS" in query_info_tags:
logger.warning("Query VCF has INFO tag \"PANEL_OF_NORMALS\" - this is also present in the panel of normal VCF file. This tag will be overwritten if not renamed in the query VCF")
append_to_vcf_header(pcgr_db_directory, "panel_of_normals", vcfheader_file)
fh = open(conf_fname,'a')
fh.write('[[annotation]]\n')
fh.write('file="' + str(panel_normal_vcf) + '"\n')
fields_string = 'fields = ["' + '","'.join(panel_normal_tags) + '"]'
ops = ['self'] * len(panel_normal_tags)
ops_string = 'ops=["' + '","'.join(ops) + '"]'
fh.write(fields_string + '\n')
fh.write(ops_string + '\n\n')
fh.close()
out_vcf_vcfanno_unsorted1 = output_vcf + '.tmp.unsorted.1'
query_prefix = re.sub(r'\.vcf.gz$','',query_vcf)
print_vcf_header(query_vcf, vcfheader_file, chromline_only = True)
command1 = "vcfanno -p=" + str(num_processes) + " " + str(conf_fname) + " " + str(query_vcf) + " > " + str(out_vcf_vcfanno_unsorted1) + " 2> " + str(query_prefix) + '.vcfanno.log'
check_subprocess(command1)
check_subprocess('cat ' + str(vcfheader_file) + ' > ' + str(output_vcf))
check_subprocess('cat ' + str(out_vcf_vcfanno_unsorted1) + ' | grep -v \'^#\' >> ' + str(output_vcf))
if not keep_logs is True:
check_subprocess('rm -f ' + str(output_vcf) + '.tmp*')
check_subprocess('bgzip -f ' + str(output_vcf))
check_subprocess('tabix -f -p vcf ' + str(output_vcf) + '.gz')
def append_to_vcf_header(pcgr_db_directory, datasource, vcfheader_file):
"""
Function that appends the VCF header information for a given 'datasource' (containing INFO tag formats/descriptions, and datasource version)
"""
vcf_info_tags_file = str(pcgr_db_directory) + '/' + str(datasource) + '/' + str(datasource) + '.vcfanno.vcf_info_tags.txt'
check_subprocess('cat ' + str(vcf_info_tags_file) + ' >> ' + str(vcfheader_file))
def append_to_conf_file(datasource, datasource_info_tags, pcgr_db_directory, conf_fname):
"""
Function that appends data to a vcfanno conf file ('conf_fname') according to user-defined ('datasource').
The datasource defines the set of tags that will be appended during annotation
"""
fh = open(conf_fname,'a')
if datasource == 'ncer' or datasource == 'gerp':
fh.write('[[annotation]]\n')
fh.write('file="' + str(pcgr_db_directory) + '/' + str(datasource) + '/' + str(datasource) + '.bed.gz"\n')
fh.write('columns=[4]\n')
names_string = 'names=["' + '","'.join(datasource_info_tags) + '"]'
fh.write(names_string +'\n')
fh.write('ops=["mean"]\n\n')
elif datasource == 'pcgr_onco_xref' or datasource == 'uniprot' or datasource == 'rmsk':
fh.write('[[annotation]]\n')
fh.write('file="' + str(pcgr_db_directory) + '/' + str(datasource) + '/' + str(datasource) + '.bed.gz"\n')
fh.write('columns=[4]\n')
names_string = 'names=["' + '","'.join(datasource_info_tags) + '"]'
fh.write(names_string +'\n')
fh.write('ops=["concat"]\n\n')
elif datasource == 'simplerepeats' or datasource == 'winmsk':
fh.write('[[annotation]]\n')
fh.write('file="' + str(pcgr_db_directory) + '/' + str(datasource) + '/' + str(datasource) + '.bed.gz"\n')
fh.write('columns=[4]\n')
names_string = 'names=["' + '","'.join(datasource_info_tags) + '"]'
fh.write(names_string +'\n')
fh.write('ops=["flag"]\n\n')
elif datasource == 'civic':
fh.write('[[annotation]]\n')
fh.write('file="' + str(pcgr_db_directory) + '/' + str(datasource) + '/' + str(datasource) + '.bed.gz"\n')
fh.write('columns=[4]\n')
fh.write('names=["CIVIC_ID_SEGMENT"]\n')
fh.write('ops=["concat"]\n\n')
fh.write('[[annotation]]\n')
fh.write('file="' + str(pcgr_db_directory) + '/' + str(datasource) + '/' + str(datasource) + '.vcf.gz"\n')
fh.write('fields = ["CIVIC_ID"]\n')
fh.write('ops=["concat"]\n\n')
elif datasource == 'cgi':
fh.write('[[annotation]]\n')
fh.write('file="' + str(pcgr_db_directory) + '/' + str(datasource) + '/' + str(datasource) + '.bed.gz"\n')
fh.write('columns=[4]\n')
fh.write('names=["CGI_ID_SEGMENT"]\n')
fh.write('ops=["concat"]\n\n')
fh.write('[[annotation]]\n')
fh.write('file="' + str(pcgr_db_directory) + '/' + str(datasource) + '/' + str(datasource) + '.vcf.gz"\n')
fh.write('fields = ["CGI_ID"]\n')
fh.write('ops=["concat"]\n\n')
else:
fh.write('[[annotation]]\n')
fh.write('file="' + str(pcgr_db_directory) + '/' + str(datasource) + '/' + str(datasource) + '.vcf.gz"\n')
fields_string = 'fields = ["' + '","'.join(datasource_info_tags) + '"]'
ops = ['concat'] * len(datasource_info_tags)
ops_string = 'ops=["' + '","'.join(ops) + '"]'
fh.write(fields_string + '\n')
fh.write(ops_string + '\n\n')
fh.close()
return
def get_vcf_info_tags(vcffile):
vcf = VCF(vcffile)
info_tags = {}
for e in vcf.header_iter():
header_element = e.info()
if 'ID' in header_element.keys() and 'HeaderType' in header_element.keys():
if header_element['HeaderType'] == 'INFO':
info_tags[str(header_element['ID'])] = 1
return info_tags
def print_vcf_header(query_vcf, vcfheader_file, chromline_only = False):
if chromline_only == True:
check_subprocess('bgzip -dc ' + str(query_vcf) + ' | egrep \'^#\' | egrep \'^#CHROM\' >> ' + str(vcfheader_file))
else:
check_subprocess('bgzip -dc ' + str(query_vcf) + ' | egrep \'^#\' | egrep -v \'^#CHROM\' > ' + str(vcfheader_file))
def check_subprocess(command):
if debug:
logger.info(command)
try:
output = subprocess.check_output(str(command), stderr=subprocess.STDOUT, shell=True)
if len(output) > 0:
print (str(output.decode()).rstrip())
except subprocess.CalledProcessError as e:
print (e.output.decode())
exit(0)
if __name__=="__main__": __main__()
| mit | 3,088,626,634,943,032,300 | 60.834532 | 204 | 0.676905 | false |
maku77/contest | codejam/2014_Round1A/A-ChargingChaos.py | 1 | 2170 | #!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Contest: Google Code Jam - 2014 Round A [2014-04-26]
# Problem: A. Charging Chaos
# URL: https://code.google.com/codejam/contest/2984486/dashboard
# Author: Masatoshi Ohta
# Strategy:
# 2 つのビット列の xor を取ると、異なる部分が分かることを利用する。
# 初期配置のリスト init[] の 1 つ目 init[0] のビット列と、
# 最終系のリスト desired[i] との xor を取ると、
# 候補となるスイッチのパターンを構成できる。
# つまり候補は desired[] の数 N だけ存在するが、
# この時点ではそのパターンが正しいとは分からない。
# そのパターンを残りの init[1..N-1] にも適用 (xor) していき、
# それぞれがいずれかの desired[] と一致するようであれば、
# そのパターン(スイッチの組み合わせ)は組み合わせとして正しいことになる。
# あとは正しいスイッチのパターンのうち、1 のビット数が少ないものを選ぶ。
import sys
def read_int(): return int(sys.stdin.readline())
def read_ints(): return [int(x) for x in sys.stdin.readline().split()]
def read_strs(): return sys.stdin.readline().split()
INF = float('inf')
def count_bits(val):
count = 0
while val > 0:
if val & 1 == 1:
count += 1
val >>= 1
return count
def solve():
N, L = read_ints()
inits = [int(x, 2) for x in read_strs()]
desired = [int(x, 2) for x in read_strs()]
patterns = map(lambda x: x ^ inits[0], desired)
min_change = INF
for p in patterns:
for i in range(1, N):
if not (p ^ inits[i] in desired):
# pattern p is not acceptable by inits[i]
break
else:
# pattern p seems acceptable
c = count_bits(p)
if c < min_change:
min_change = c
if min_change == INF:
return 'NOT POSSIBLE'
else:
return min_change
if __name__ == '__main__':
T = read_int()
for i in range(T):
print('Case #{}: {}'.format(i+1, str(solve())))
| mit | -1,554,268,937,330,754,300 | 28.37931 | 70 | 0.589202 | false |
MSeifert04/nddata | nddata/nddata/mixins/ndreduce.py | 1 | 16463 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ...utils.copyutils import do_copy
import numpy as np
from ..nduncertainty_stddev import StdDevUncertainty
from ..nduncertainty_var import VarianceUncertainty
from ...utils.inputvalidation import as_unsigned_integer
__all__ = ['NDReduceMixin']
class NDReduceMixin(object):
"""Mixin to provide methods for `~nddata.nddata.NDDataBase` which are \
applied along one dimension (axis) of the data.
These methods take the ``mask`` besides the ``data`` into account and
calculate based on the error of the result.
.. note::
The ``unit`` and ``meta`` of the result will be a copy of the original
`~nddata.nddata.NDDataBase` instance. ``wcs`` and ``flags`` as well but
this might change because they **should** be subject to a reduction
themselves- depending on the type of attribute.
"""
def _reduce_get_others(self):
# Meta and unit should stay the same for the reduce functions.
kwargs = {'meta': do_copy(self.meta),
'unit': self.unit,
'wcs': do_copy(self.wcs),
'flags': do_copy(self.flags)}
# TODO: WCS and Flags may also be subject to changes because of the
# reduction, but currently just copy them.
return kwargs
def reduce_average(self, axis=0, weights=None):
"""Compute the average along an axis with specified weights.
Parameters
----------
axis: positive `int`, optional
The axis (dimension) along which to compute the average. Must not
be ``None``. If you are loooking for overall statistics use:
:meth:`~nddata.nddata.mixins.NDStatsMixin.stats`.
Default is ``0``.
weights : `numpy.ndarray`-like or None, optional
The weights for averaging. Must be scalar or have the same shape as
the ``data`` or the same length as ``data.shape[axis]``. If the
weights are ``None`` it will call :meth:`reduce_mean`.
Default is ``None``.
Returns
-------
ndd : `~nddata.nddata.NDDataBase`-like
The result will have the same class as the instance this method was
called on. The results ``data`` contains the average of the
calculation while the ``mask`` is set in case any element had no
values to average and the ``uncertainty`` will be the variance of
the average (already corrected by the number of valid elements).
Examples
--------
Calculate the weighted mean of a 2 x 5 array along the first axis::
>>> import numpy as np
>>> from nddata.nddata import NDData
>>> ndd = NDData([[3, 2, 1, 1, 4], [2, 2, 2, 2, 2]],
... mask=np.array([[0, 1, 0, 1, 0], [0, 1, 0, 0, 0]],
... dtype=bool))
>>> avg = ndd.reduce_average(axis=0, weights=[1, 1.5])
>>> avg
NDData([ 2.4, 0. , 1.6, 2. , 2.8])
>>> avg.mask
array([False, True, False, False, False], dtype=bool)
>>> avg.uncertainty
VarianceUncertainty([ 0.096, 0. , 0.096, 0. , 0.384])
.. note::
The correction for the resulting uncertainty is the total number of
valid values **without** taking any degrees of freedom into
account.
"""
# If no weights are given this is essentially a mean reduce. So return
# the mean reduction result.
if weights is None:
return self.reduce_mean(axis=axis)
# To allow also list-like weights convert them to a numpy array here.
# Since this doesn't copy existing np.arrays this is relativly cheap if
# it's already an array.
weights = np.asarray(weights)
# The axis must be integer and because of later restrictions it also
# needs to be positive.
axis = as_unsigned_integer(axis)
# Get the data and the mask from the instance attributes
data = self.data
mask = self._get_mask_numpylike()
# Setup the masked array based on the data and mask saved in the
# instance. Important profiling information about this np.any is
# described in reduce_mean. This should stay the way it is.
if np.any(mask):
marr = np.ma.array(data, mask=mask, copy=False)
avg_func = np.ma.average
else:
marr = np.array(data, copy=False)
avg_func = np.average
# Abort the call in case the array is 1D, for 1D statistics see the
# NDStatsMixin.
if marr.ndim < 2:
raise ValueError('reduce functions need the data to have more '
'than one dimension.')
# Calculate the reduced data with np.average. The weights will be
# checked in here and an appropriate exception is raised if the shape
# does not match.
red_data = avg_func(marr, axis=axis, weights=weights)
# There is no builtin ufunc to calculate the weighted standard
# deviation so we need to do use the average again. This will
# calculate the variance of the average, but we have a
# VarianceUncertainty and the user can convert it later if he wants
# standard deviations.
# To calculate the difference we need to expand the reduced dimension
# of the reduced data again otherwise broadcasting could fail.
diff = (marr - np.expand_dims(red_data, axis=axis)) ** 2
red_uncert, eff_weights = avg_func(diff, axis=axis, weights=weights,
returned=True)
# To get the variance of the mean we need to divide this reduced
# variance by the number of valid values. This number of valid values
# are contained in the "eff_weights".
# So we don't end up with division by 0 problems set the values where
# we have no valid value to 1. Since the average of the uncertainty
# contains zeros where no valid element was present - the corrected
# variance will be calculated there as 0/1 = 0 which is exactly what
# we would expect. And not the 0/0 = nan we would otherwise have.
no_valid_value = (eff_weights == 0)
eff_weights[no_valid_value] = 1
# To get the variance of the mean we divide by the number of valid
# elements.
red_uncert = VarianceUncertainty(red_uncert / eff_weights)
# TODO: In theory it could be that we need some bias (dof) correction
# here. So either allow a ddof parameter here or clearly state that
# this isn't done here!
# TODO: The number of valid elements would make a good flag array
# maybe include it?
# The "red_data" is a masked array so the resulting class should
# split data and mask by itself.
return self.__class__(red_data, uncertainty=red_uncert,
**self._reduce_get_others())
def reduce_mean(self, axis=0):
"""Compute the mean along an axis.
Parameters
----------
axis: positive `int`, optional
The axis (dimension) along which to compute the mean. Must not
be ``None``. If you are loooking for overall statistics use:
:meth:`~nddata.nddata.mixins.NDStatsMixin.stats`.
Default is ``0``..
Returns
-------
ndd : `~nddata.nddata.NDDataBase`-like
The result will have the same class as the instance this method was
called on. The results ``data`` contains the mean of the
calculation while the ``mask`` is set in case any element had no
values to avergae and the ``uncertainty`` will be the variance of
the mean (already corrected by the number of valid elements).
Examples
--------
Calculate the mean of a 2 x 5 array along the first axis::
>>> import numpy as np
>>> from nddata.nddata import NDData
>>> ndd = NDData([[3, 2, 1, 1, 4], [2, 2, 2, 2, 2]],
... mask=np.array([[0, 1, 0, 1, 0], [0, 1, 0, 0, 0]],
... dtype=bool))
>>> avg = ndd.reduce_mean(axis=0)
>>> avg
NDData([ 2.5, 0. , 1.5, 2. , 3. ])
>>> avg.mask
array([False, True, False, False, False], dtype=bool)
>>> avg.uncertainty
VarianceUncertainty([ 0.125, 0. , 0.125, 0. , 0.5 ])
.. note::
This method is identical to :meth:`reduce_average` with
``weights=None``.
.. note::
The correction for the resulting uncertainty is the total number of
valid values **without** taking any degrees of freedom into
account.
"""
# Much the same as average but without weights and instead of average
# with mean and std
axis = as_unsigned_integer(axis)
data = self.data
mask = self._get_mask_numpylike()
# np.mean and np.var work on masked arrays so can create a normal numpy
# array if no value is masked. This will probably be a lot faster.
# IMPORTANT: Line profiling shows that in case of big arrays the
# _reduce_get_mask() function takes only 0.1% of the total run-time and
# the np.any() 0-3% so this could make a difference if we special cased
# the case when no mask is present but NOT much.
# On the other hand the np.mean on a plain numpy array is approximatly
# 6-10 times faster than on masked arrays so that actually makes a huge
# difference. So even if we have a mask it could be wise to check if
# there are any masked values at all.
# Therefore: This should stay as is!
if np.any(mask):
marr = np.ma.array(data, mask=mask, copy=False)
marr_is_masked = True
else:
marr = np.array(data, copy=False)
marr_is_masked = False
# Abort the call in case the array is 1D, for 1D statistics see the
# NDStatsMixin.
if marr.ndim < 2:
raise ValueError('reduce functions need the data to have more '
'than one dimension.')
red_data = np.mean(marr, axis=axis)
# np.var and np.std have the same runtime but since we would need to
# take the square root of the number of valid values calculating the
# variance and then just dividing by the number of valid pixel is much
# faster than calculating the std and then diving by the SQRT of the
# number of valid pixel. In case someone wants the resulting
# uncertainty in standard deviations he can cast it to one!
red_uncertainty = np.var(marr, axis=axis)
# We need to determine the number of valid pixel ourself, fortunatly
# this number is just the sum of unmakes values along the specified
# axis. With the correction for cases where no valid values is. This
# correction is described in reduce_average.
if marr_is_masked:
n_values = (~marr.mask).sum(axis=axis)
no_valid_value = (n_values == 0)
n_values[no_valid_value] = 1
else:
# In case no values were masked the number of valid values is just
# the length of the array along the given axis.
n_values = marr.shape[axis]
red_uncertainty = VarianceUncertainty(red_uncertainty / n_values)
return self.__class__(red_data, uncertainty=red_uncertainty,
**self._reduce_get_others())
def reduce_median(self, axis=0):
"""Compute the median along an axis.
Parameters
----------
axis: positive `int`, optional
The axis (dimension) along which to compute the median. Must not
be ``None``. If you are loooking for overall statistics use:
:meth:`~nddata.nddata.mixins.NDStatsMixin.stats`.
Default is ``0``..
Returns
-------
ndd : `~nddata.nddata.NDDataBase`-like
The result will have the same class as the instance this method was
called on. The results ``data`` contains the median of the
calculation while the ``mask`` is set in case any element had no
values for the computation and the ``uncertainty`` will be the
median absolute standard deviation of the median (already corrected
by the number of valid elements).
Examples
--------
Calculate the median of a 2 x 4 array along the first axis::
>>> import numpy as np
>>> from nddata.nddata import NDData
>>> ndd = NDData([[3, 2, 1, 1], [2, 2, 2, 2]],
... mask=np.array([[0, 1, 0, 1], [0, 1, 0, 0]],
... dtype=bool))
>>> avg = ndd.reduce_median(axis=0)
>>> avg
NDData([ 2.5, 0. , 1.5, 2. ])
>>> avg.mask
array([False, True, False, False], dtype=bool)
>>> avg.uncertainty
StdDevUncertainty([ 0.52417904, 0. , 0.52417904, 0. \
])
.. note::
The correction for the resulting uncertainty is the total number of
valid values **without** taking any degrees of freedom into
account.
"""
# This method is some hybrid from average and mean reduce. Only the
# real differences are commented upon. For further details on the
# rationale see these other methods.
axis = as_unsigned_integer(axis)
data = self.data
mask = self._get_mask_numpylike()
if np.any(mask):
marr = np.ma.array(data, mask=mask, copy=False)
# np.median doesn't work on masked arrays so we need to use
# np.ma.median here
med_func = np.ma.median
marr_is_masked = True
else:
marr = np.array(data, copy=False)
med_func = np.median
marr_is_masked = False
if marr.ndim < 2:
raise ValueError('reduce functions need the data to have more '
'than one dimension.')
red_data = med_func(marr, axis=axis)
# Constant is taken from astropy mad_std
# IMPORTANT: Using the astropy.stats.mad_std would calculate the median
# again, since we already have the median along the axis we can omit
# this expensive recalculation - but then we cannot reuse mad_std. But
# especially for large masked arrays the speed gain is huge.
diff = np.abs(marr - np.expand_dims(red_data, axis=axis))
red_uncertainty = 1.482602218505602 * med_func(diff, axis=axis)
if marr_is_masked:
n_values = (~marr.mask).sum(axis=axis)
no_valid_value = (n_values == 0)
n_values[no_valid_value] = 1
else:
n_values = marr.shape[axis]
# This time we work with standard deviations because that's what
# the median absolute deviation approximates so we need to take the
# square root of the n_values correction factor
n_values = np.sqrt(n_values)
red_uncertainty = StdDevUncertainty(red_uncertainty / n_values)
# FIXME: Strangly the result has an uncertainty different from 0 when
# all values are masked here. This is not the case for average or mean
# but it seems to be a problem with the median. I guess this is because
# the np.expand_dims doesn't preserve the mask and something weird
# happens so that the median of the "diff" doesn't realize it's all
# masked and returns something. Maybe this could be a numpy Bug but for
# now I just make it work by replacing them manually:
if marr_is_masked:
red_uncertainty.data[no_valid_value] = 0
return self.__class__(red_data, uncertainty=red_uncertainty,
**self._reduce_get_others())
| bsd-3-clause | 6,610,416,277,689,173,000 | 42.323684 | 79 | 0.586892 | false |
diophantus7/plugin.video.romwod | resources/lib/wistia.py | 1 | 1879 | import re
import requests
import xbmc
import json
try:
from BeautifulSoup import BeautifulSoup
except ImportError:
from bs4 import BeautifulSoup
_JSON_URL = "http://fast.wistia.com/embed/medias/%s.json"
_IFRAME_URL = "http://fast.wistia.net/embed/iframe/%s"
class ResolveError(Exception):
def __init__(self, message):
self.message = message
class WistiaExtractor:
def __init__(self, html_page, format):
self.html_page = html_page
self.video_id = self._extract_video_id()
self._format = format
def _extract_video_id(self):
bs = BeautifulSoup(self.html_page)
video_block = json.loads(bs.find('div', {'data-react-class':'VideoView'})['data-react-props'])
return video_block['video']['external_id']
#return re.search('wistia_async_([0-9a-z]*) ', str(bs)).group(1)
def _download_json(self):
s = requests.Session()
s.headers.update({'referer':_IFRAME_URL % self.video_id})
req = s.get(_JSON_URL % self.video_id)
return req.json()
def get_video_url(self):
json_data = self._download_json()
try:
url = next(d['url'] for d in json_data['media']['unnamed_assets']
if d['display_name'] == self._format and d['ext'] == 'm3u8')
except:
video_data = [d for d in json_data['media']['unnamed_assets']
if d['status'] == 2 and 'opt_vbitrate' in d
and 'display_name' in d and
'p' in d['display_name']]
if not video_data:
raise ResolveError("No video found.")
url = max(video_data,
key=lambda d: int(d['display_name'].strip('p')))['url']
xbmc.log("Fallback to url: %s" % url)
return url
| gpl-3.0 | 3,844,811,310,062,879,000 | 31.396552 | 102 | 0.548164 | false |
geobricks/geobricks_data_manager | geobricks_data_manager/config/config.py | 1 | 1641 | import logging
config = {
"settings": {
# To be used by Flask: DEVELOPMENT ONLY
"debug": True,
# Flask host: DEVELOPMENT ONLY
"host": "localhost",
# Flask port: DEVELOPMENT ONLY
"port": 5904,
# Logging configurations
"logging": {
"level": logging.INFO,
"format": "%(asctime)s | %(levelname)-8s | %(name)-20s | Line: %(lineno)-5d | %(message)s",
"datefmt": "%d-%m-%Y | %H:%M:%s"
},
# Metadata
"metadata": {
"url_create_metadata": "http://localhost:7788/v2/msd/resources/metadata",
"url_get_metadata_uid": "http://localhost:7788/v2/msd/resources/metadata/uid/<uid>",
# delete metadata
"url_delete_metadata": "http://localhost:7788/v2/msd/resources/metadata/uid/<uid>",
# get metadata
"url_get_metadata": "http://localhost:7788/v2/msd/resources/find",
"url_get_metadata_full": "http://localhost:7788/v2/msd/resources/metadata/uid/<uid>?full=true&dsd=true",
# coding system
"url_create_coding_system": "http://localhost:7788/v2/msd/resources",
"url_data_coding_system": "http://localhost:7788/v2/msd/resources/data/uid/<uid>",
# DSD
"url_overwrite_dsd_rid": "http://localhost:7788/v2/msd/resources/dsd"
},
# geoserver settings
"geoserver": {
"geoserver_master": "http://localhost:9090/geoserver/rest",
"geoserver_slaves": [],
"username": "admin",
"password": "geoserver",
}
}
}
| gpl-2.0 | 2,782,221,596,152,575,500 | 31.82 | 116 | 0.540524 | false |
open-io/oio-swift | tests/unit/common/middleware/test_hashedcontainer.py | 1 | 2935 | # Copyright (C) 2016-2020 OpenIO SAS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import sys
import unittest
from swift.common import swob, utils
from swift.common.swob import Request
from oioswift.common.middleware import hashedcontainer
from oio.cli.common import clientmanager
# Hack PYTHONPATH so "test" is swift's test directory
sys.path.insert(1, os.path.abspath(os.path.join(__file__, '../../../../..'))) # noqa
from test.unit.common.middleware.helpers import FakeSwift # noqa: E402
class TestHashedContainer(unittest.TestCase):
GLOBAL_CONF = {
'sds_namespace': 'OPENIO',
'sds_default_account': 'OPENIO',
'sds_proxy_url': '127.0.0.1:666'
}
def setUp(self):
self.app = FakeSwift()
# prevent a call to oio-proxy
clientmanager.ClientManager.nsinfo = {
'options': {'flat_bitlength': '17'}}
self.hc = hashedcontainer.filter_factory(self.GLOBAL_CONF)(self.app)
def call_app(self, req, app=None):
if app is None:
app = self.app
self.authorized = []
def authorize(req):
self.authorized.append(req)
if 'swift.authorize' not in req.environ:
req.environ['swift.authorize'] = authorize
req.headers.setdefault("User-Agent", "Melted Cheddar")
status = [None]
headers = [None]
def start_response(s, h, ei=None):
status[0] = s
headers[0] = h
body_iter = app(req.environ, start_response)
with utils.closing_if_possible(body_iter):
body = b''.join(body_iter)
return status[0], headers[0], body
def _check_conversion(self, path_in, path_out):
self.app.register('PUT', path_out, swob.HTTPCreated, {})
req = Request.blank(path_in, method='PUT')
resp = self.call_app(req, app=self.hc)
self.assertEqual(resp[0], "201 Created")
self.assertEqual(self.app.calls, [('PUT', path_out)])
def test_default_config(self):
self._check_conversion(
'/prefix/229/358493922_something',
'/v1/OPENIO/6C800/prefix/229/358493922_something')
def test_custom_bits(self):
self.hc = hashedcontainer.filter_factory(
self.GLOBAL_CONF, bits=12)(self.app)
self._check_conversion(
'/prefix/229/358493922_something',
'/v1/OPENIO/6C8/prefix/229/358493922_something')
| apache-2.0 | 5,798,349,336,250,656,000 | 32.352273 | 85 | 0.643271 | false |
andreas-h/pybtex | pybtex/style/sorting/invyear_author_title.py | 1 | 2418 | # Copyright (c) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Andrey Golovizin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from pybtex.style.sorting import BaseSortingStyle
from datetime import datetime
import locale
class SortingStyle(BaseSortingStyle):
name = 'invyear_author_title'
def sorting_key(self, entry):
if entry.type in ('book', 'inbook'):
author_key = self.author_editor_key(entry)
else:
author_key = self.persons_key(entry.persons['author'])
time = int(entry.fields.get('year', '')) * 100
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
try:
time += datetime.strptime(entry.fields.get('month', '').strip(), "%B").month
except:
pass
return (-time, author_key, entry.fields.get('title', ''))
def persons_key(self, persons):
return ' '.join(self.person_key(person) for person in persons)
def person_key(self, person):
return ' '.join((
' '.join(person.prelast() + person.last()),
' '.join(person.first() + person.middle()),
' '.join(person.lineage()),
)).lower()
def author_editor_key(self, entry):
if entry.persons.get('author'):
return self.persons_key(entry.persons['author'])
elif entry.persons.get('editor'):
return self.persons_key(entry.persons['editor'])
| mit | 2,218,421,661,150,355,000 | 41.421053 | 88 | 0.674938 | false |
borntyping/python-colorlog | colorlog/tests/test_escape_codes.py | 1 | 1571 | """Test the colorlog.escape_codes module."""
from colorlog.escape_codes import escape_codes, esc, parse_colors
import pytest
def test_esc():
assert esc(1, 2, 3) == "\033[1;2;3m"
def test_reset():
assert escape_codes["reset"] == "\033[0m"
def test_bold_color():
assert escape_codes["bold_red"] == "\033[1;31m"
def test_fg_color():
assert escape_codes["fg_bold_yellow"] == "\033[1;33m"
def test_bg_color():
assert escape_codes["bg_bold_blue"] == "\033[104m"
def test_rainbow(create_and_test_logger):
"""Test *all* escape codes, useful to ensure backwards compatibility."""
create_and_test_logger(
"%(log_color)s%(levelname)s%(reset)s:%(bold_black)s%(name)s:"
"%(message)s%(reset)s:"
"%(bold_red)sr%(red)sa%(yellow)si%(green)sn%(bold_blue)sb"
"%(blue)so%(purple)sw%(reset)s "
"%(fg_bold_red)sr%(fg_red)sa%(fg_yellow)si%(fg_green)sn"
"%(fg_bold_blue)sb%(fg_blue)so%(fg_purple)sw%(reset)s "
"%(bg_red)sr%(bg_bold_red)sa%(bg_yellow)si%(bg_green)sn"
"%(bg_bold_blue)sb%(bg_blue)so%(bg_purple)sw%(reset)s "
)
def test_parse_colors():
assert parse_colors("reset") == "\033[0m"
def test_parse_multiple_colors():
assert parse_colors("bold_red,bg_bold_blue") == "\033[1;31m\033[104m"
def test_parse_invalid_colors():
with pytest.raises(KeyError):
parse_colors("false")
def test_256_colors():
for i in range(256):
assert parse_colors("fg_%d" % i) == "\033[38;5;%dm" % i
assert parse_colors("bg_%d" % i) == "\033[48;5;%dm" % i
| mit | 564,843,401,368,212,200 | 26.086207 | 76 | 0.60471 | false |
sdiehl/rpygtk | rpygtk/lib/rsession.py | 1 | 16192 | # Copyright 2009-2010 Stephen Diehl
#
# This file is part of RPyGTK and distributed under the terms
# of the GPLv3 license. See the file LICENSE in the RPyGTK
# distribution for full details.
import rpy2.robjects as robjects
from rpy2 import rinterface
import time
import numpy
from ui import prefs
import threading
import thread
ri = rinterface
ro = robjects
r = robjects.r
env = robjects.r.globalenv()
#R Convience Wrappers
summary = robjects.r.summary
plot = robjects.r.plot
ls = robjects.r.ls
rm = robjects.r.rm
png = r.png
svg = r.svg
postscript = r.postscript
pdf = r.pdf
devoff = r['dev.off']
devcur = r['dev.cur']
devlist = r['dev.list']
X11 = r['X11']
typeof = lambda obj: (r['typeof'](obj))[0]
null = r['as.null']()
#TODO: We should convert these so that they actually return booleans, aka
# is_ts = lambda obj: r['is.ts'](obj)[0]
is_ts = r['is.ts']
is_array = r['is.array']
is_null = r['is.null']
#Using 'time' messes with the threads
times = r['time']
sapply = r['sapply']
df = r['data.frame']
library = r['library']
#Initialize the R Interface
rinterface.initr()
gdata_is_available = False
xtable_is_available = False
nlstools_is_available = False
#Check what libraries we have installed
try:
library('gdata')
gdata_is_available = True
except rinterface.RRuntimeError,RError:
print('Could not load the gdata library, importing of Excel spreadsheets is disabled.')
try:
library('xtable')
xtable_is_available = True
except rinterface.RRuntimeError,RError:
print('Could not load the xtable library, exporting LaTeX is disabled.')
try:
library('nlstools')
nlstools_is_available = True
except rinterface.RRuntimeError,RError:
print('Could not load the nlstools library, summary of nls is disabled.')
class rdict(dict):
'''A dictionary type that does not permit None types or empty strings in values'''
def __setitem__(self, key, value):
if value != None and value != '':
#Fetch the parent class (dict) and invoke __setitem__ just like normal but with the
#condition that we don't allow empty values
super(rdict, self).__setitem__(key, value)
def arguments_to_string(d):
'''Take a dictionary of arguments and return a string of comma seperated key,value pairs'''
#If we end using the low-level iterface (which we do too much), then we need a way of
#passing arguments
argument_str = ''
for key,value in d.iteritems():
if type(value) is str:
value = '"' + str(value) + '"'
#R likes capitalized logicals
if type(value) is bool:
value = str(value).upper()
argument_str += ',' + str(key)+'='+str(value)
return argument_str
def translate_types(type, reverse=False):
'''Translate between R types and Python types and vise versa, reverse=False implies translation to python'''
#r -> python
if not reverse:
if type == 'double':
return float
elif type == 'string':
return str
elif type == 'integer':
return int
elif type == 'character':
return str
elif type == 'logical':
return bool
elif type == int:
return type
elif type == float:
return type
elif type == str:
return type
else:
error('Cannot cast')
return
'''Translate between R types and Python types'''
#python -> r
if reverse:
if type == int:
return 'integer'
elif type == float:
return 'double'
elif type == str:
return 'character'
elif type == bool:
return 'logical'
elif type == 'double':
return 'double'
elif type == 'character':
return 'character'
elif type == 'integer':
return 'integer'
else:
error('Cannot cast')
def translate_to_vector(column):
'''Take a vanilla list or numpy column array and return the "equivelent" r vector form'''
if (type(column) is not numpy.ndarray) and (type(column) is not list):
print('Cannot translate non-numpy or list object to R Vector')
return
if type(column) is list:
return ro.FloatVector(column)
if column.dtype is numpy.dtype(int):
return ro.IntVector(column)
elif column.dtype is numpy.dtype(float):
return ro.FloatVector(column)
elif column.dtype is numpy.dtype(bool):
return ro.BoolVector(column)
elif column.dtype is numpy.dtype(str):
return ro.StrVector(column)
else:
print 'Mismatched (or strange) datatype in numpy array'
return
def column_extractor(data,output='rvector'):
'''Take any object (R Dataframe, List of Lists, Numpy Array, Python Array)
and return an iterator on its columns which yields either a numpy array,
RVector or a vanilla list
output='rvector'|'list'|'numpy'
'''
if type(data) == type(r['data.frame']):
for i in range(data.ncol()):
column = numpy.array(data.rx2(i))
yield translate_to_vector(column)
elif type(data) is list:
for i in range(len(data[0])):
column = lambda n: [x[n] for x in data]
yield robjects.FloatVector(column(i))
elif type(data) is numpy.ndarray:
#Check to see if we have a column
if len(data.shape)<2:
yield robjects.FloatVector(data)
else:
for i in range(data.shape[1]):
column = data[:,i]
yield robjects.FloatVector(column)
#This is a 'Borg' design pattern, all items appended get stored
#in the master shared state. We'll use this for threads so that
#we can kill every thread from the main thread
class ThreadHandler(list):
#Shared state
shared = []
def append(self,value):
super(ThreadHandler,self).append(value)
self.shared.append(value)
def remove(self,value):
super(ThreadHandler,self).remove(value)
self.shared.remove(value)
def get_shared(self):
return self.shared
def iter_shared(self):
for item in self.shared:
yield item
class RPlotThread( threading.Thread ):
halt = False
args = {}
data = {}
cmd_stack = []
#use f(**dict) to pass a dictionary of args to func
def __init__(self,data=dict(),args=dict(),type=None,export=None,par_mode=False):
self.args = args
self.data = data
self.type = type
self.export = export
self.par_mode = par_mode
threading.Thread.__init__(self)
def run (self):
#Plot types
plot = r.plot
hist = r.hist
barplot = r.barplot
pie = r.pie
qqnorm = r.qqnorm
qqplot= r.qqplot
#Export types
if self.export:
#Shut down all other plots before we do anything involving exporting
r['graphics.off']()
filename,extension = self.export
if extension not in filename:
extension += filename
if extension == '.svg':
svg(filename)
elif extension == '.png':
png(filename)
elif extension == '.ps':
postscript(filename)
elif extension == '.pdf':
#There is a rather strange bug where points get rendered as
#letters unless we toggle useDingbats=False
pdf(filename,useDingbats=False)
if self.par_mode:
rows, columns = self.par_mode
r('par(mfrow=c(%s,%s))' % (rows,columns))
else:
#Don't bother opening a new window if there already is an open one
#unless the user has specified that every new plot should open in a
#new window
if thereArePlotWindowsOpen() and prefs.get_pref('single_plot'):
#Clear the previous plots, unless we're in par mode
if not self.par_mode:
r('plot.new()')
else:
rows, columns = self.par_mode
r('par(mfrow=c(%s,%s))' % (rows,columns))
else:
X11()
if self.type=='scatter':
x = self.data['x']
y = self.data['y']
plot(x,y,**self.args)
if self.type=='scatter.smooth':
x = self.data['x']
y = self.data['y']
r['scatter.smooth'](x=x,y=y,**self.args)
if self.type=='matplot':
df = robjects.r['data.frame'](**self.data)
plot(df,**self.args)
if self.type=='histogram':
x = self.data['x']
hist(x,**self.args)
if self.type=='bar':
x = self.data['x']
barplot(x,**self.args)
if self.type=='pie':
x = self.data['x']
pie(x,**self.args)
if self.type=='qqnorm':
x = self.data['x']
qqnorm(x,**self.args)
if self.type=='qqplot':
x = self.data['x']
y = self.data['y']
qqplot(x,y,**self.args)
if self.type=='boxplot':
data = r['data.frame'](**self.data)
r['boxplot'](data,**self.args)
if self.type=='general':
'''data is passed directly to plot'''
plot(self.data,**self.args)
if self.export:
#Run through an secondary commands before we save the image
for c in self.cmd_stack:
cmd,args,kwargs = c
apply(cmd,args,kwargs)
self.cmd_stack.remove(c)
devoff()
return
self.t = threading.Timer(0.1, self.refresh)
self.t.start()
def add_cmd(self,command,*args,**kwargs):
'''Add a command to be executed after the plot is created'''
#Since this is a seperate thread we have to have a stack to handle
#commands passed after the timer is started'''
self.cmd_stack.append((command,args,kwargs))
def refresh(self):
while self.halt == False:
for c in self.cmd_stack:
cmd,args,kwargs = c
apply(cmd,args,kwargs)
self.cmd_stack.remove(c)
rinterface.process_revents()
time.sleep(0.1)
if self.halt == True:
self.t.cancel()
#----------------------------------------
# RPy Wrapper Classes
#----------------------------------------
class robject(object):
'The base class for all robjects'
#Human readable name of object
type = None
#Where the object should be viewed 'frame' or 'output'
outputsTo = None
#Reference to the RPy2 object
object = None
#Label of the object, should be identical to the name of the object
#in the globalEnv of RPy2
label = None
#Icon to show in object sidebar
icon = None
def __init__(self,*args,**kwargs):
apply(self.construct,args,kwargs)
def construct(self):
pass
def refresh(self):
pass
#---------------------------------
# Data Storing Objects
#---------------------------------
class dataframe(robject):
'''We store the data in three ways
columns -- a dictionary {column name, column type}
column_data -- a dictionary {column name, rvector}
rawdata -- a numpy array of the data
object -- the reference to the actual robject in the rsession
'''
# Ok to summarize this non-intuitive code...
# Say we have a dataframe in R, when we bring it in to python
# we store the data in a couple of ways
#
# V1 V2
# 1 0.1 5
# 2 0.2 6
# 3 0.3 7
# 4 0.4 8
# 5 0.5 9
#
# rawdata would hold the numpy array [ [0.1,0.2,0.3,0.4,0.5] , [5,6,7,8,9] ]
# rownames would hold the array [1,2,3,4,5]
# columns would hold column labels and their types {'V1':float , 'V2':int}
# column_data would hold {'V1': [0.1,0.2,0.3,0.4,0.5] , 'V2':[5,6,7,8,9]}
columns = {}
column_data = {}
rawdata = None
rownames = None
object = None
isColumn = False
outputsTo = 'frame'
icon = './ui/icons/dataframe.png'
type = 'Data Frame'
def construct(self,data,columns=None,label=None,rownames=None):
'''Take an array of data and dict of columns and create a dataframe class with
self.object as a reference to the rpy2 object
'''
self.rawdata = data
self.label = label
self.rownames = rownames
self.columns = columns
if len(data.shape)==1:
self.isColumn = True
d = dict()
for i,col in enumerate(column_extractor(data)):
column_name = columns.keys()[i]
#This gets passed to R
d[column_name] = col
#This is stored on the python side
self.column_data[column_name] = col
self.object = r['data.frame'](**d)
def refresh(self):
'''Rebuild the R object from the internal numpy array and return the R object'''
self.construct(data=self.rawdata,columns=self.columns,label=self.label,rownames=self.rownames)
return self.object
def __getitem__(self,key):
'''Returns a string containing the R code to access the column
data.frame $ column
'''
return self.label + '$' + key
class timeseries(robject):
start = None
end = None
frequency = None
times = None
deltat = None
times = None
columns = {}
column_data = {}
rawdata = None
rownames = None
object = None
isColumn = False
outputsTo = 'frame'
icon = './ui/icons/timeseries.png'
def construct(self,data,label=None,start=None,end=None,frequency=None,deltat=None):
self.rawdata = data
self.label = label
self.columns = {'V1':float}
data=translate_to_vector(data)
self.column_data = {'V1':data}
args = rdict({
'start':start,
'end':end,
'frequency':frequency,
'deltat':deltat,
})
self.type = "Time Series"
self.object = r['ts'](data,**args)
self.times = times(self.object)
self.rownames = self.times
#This is a 'hidden' variable that doesn't show up in the frame view
#but is still accessible if called directly by plots, stat tests, etc...
self.column_data['(Time)'] = r['as.numeric'](self.times)
def __getitem__(self,key):
if key == '(Time)':
return self.times
class dist(robject):
def construct(self):
pass
class matrix(robject):
def construct(self):
pass
class linear_model(robject):
type = 'Linear Model'
icon = './ui/icons/description.png'
outputsTo = 'output'
def construct(self,fit,label=None):
self.object = fit
self.label = label
#This isn't going to make it into this release
#numpy.array(r['coef'](fit))
#self.coefficents = dataframe(coefs,columns={'Residuals':float},label=label+'$'+'coefficents')
self.coefs = r['coef'](fit)
self.residuals = r['resid'](fit)
self.fitted = r['fitted'](fit)
self.text = str(fit)
class description(robject):
'''A text description of some stastical function: mean anova...'''
object = None
outputsTo = 'output'
label = None
text = None
type = 'Description'
icon = './ui/icons/description.png'
'''XTable can't handle some data types so we need need to run table()'''
tabelize = False
def construct(self,object,label=None,tabelize=False):
self.object = object
self.label = label
self.tabelize = tabelize
#Cache the output text so we aren't calling it constantly
self.text = str(object)
def thereArePlotWindowsOpen():
#dev.list is a vector, so is_null returns a vector apparently
window_list_is_empty = is_null(r['dev.list']())[0]
if window_list_is_empty:
return False
else:
return True
| gpl-3.0 | 519,104,289,013,916,160 | 27.862745 | 112 | 0.573617 | false |
cechrist/cardoon | cardoon/devices/memductor.py | 1 | 6848 | """
:mod:`memductor` -- Basic (nonlinear) memductor
-----------------------------------------------
.. module:: memductor
.. moduleauthor:: Carlos Christoffersen
"""
import numpy as np
from cardoon.globalVars import const, glVar
import cardoon.circuit as cir
import cppaddev as ad
class Device(cir.Element):
r"""
Memductor
---------
Connection diagram::
+ Vin - Iin
_______________ ---->
0 |_ _ _ _| | 1
o----| |_| |_| |_| | |-------o External view
|_____________|_|
Device equation:
.. math::
q(t) = q(\varphi(t))
\frac{dq}{dt} = \frac{dq}{d\varphi} \frac{d\varphi}{dt}
I_{in} = W(\varphi) V_{in}
:math:`W(\varphi)` is the memductance function.
Netlist example::
memd:m1 1 0 w = '1e3 * (np.cosh(1e6 * phi)-1.)'
Notes:
* the memductance function (``W(phi)``) is given as an
expression in the ``w`` parameter. The independent variable is
the memductor flux: ``phi``. Constants and mathematical
functions can also be used in the definition of ``w``.
* The initial flux can be adjusted with the ``phi0`` parameter
* the memductor loses its memory as the capacitor discharges
through Rleak (Rleak is necessary to ensure a unique DC
solution). The values of C and Rleak can be adjusted to change
the time constant
* The capacitor value has no effect on the memductance, but has
an effect in the internal model: a larger capacitor will
produce lower voltages at vc.
Internal Topology
+++++++++++++++++
The internal implementation uses a gyrator and adds one internal
node: ``vc``. The voltage at ``vc`` is equal to ``(gyr/C) * phi``,
where ``gyr`` is a global variable that can be changed with the
``.options`` keyword::
--> Iin
0 o---------+
|
+ /|\ i = w(phi) * Vin
Vin ( | )
- \V/ phi = (C/gyr) * vc
|
1 o---------+
Term: vc
+ +----------------+--------+---------,
| | | |
/^\ ----- / /^\
vc ( | ) gyr Vin ----- C \ Rleak ( | ) phi0 * gyr / C / Rleak
\|/ | / \|/
| | | |
- +----------------+--------+---------'
|
--- tref
-
"""
# Device category
category = "Basic components"
# devtype is the 'model' name
devType = "memd"
# Number of terminals. If numTerms is set here, the parser knows
# in advance how many external terminals to expect. By default the
# parser makes no assumptions and allows any number of connections
#
numTerms = 2
isNonlinear = True
paramDict = dict(
w = ('Memductance function W(phi)', 'Siemens', str, 'abs(1e-3*phi)'),
phi0 = ('Initial flux', 'Vs', float, 0.),
c = ('Auxiliary capacitance', 'F', float, 10e-6),
rleak = ('Leackage resistance', 'Ohms', float, 1e9)
)
def __init__(self, instanceName):
# Here the Element constructor must be called. Do not connect
# internal nodes here.
cir.Element.__init__(self, instanceName)
def process_params(self):
# Called once the external terminals have been connected and
# the non-default parameters have been set. Make sanity checks
# here. Internal terminals/devices should also be defined
# here. Raise cir.CircuitError if a fatal error is found.
# remove any existing internal connections
self.clean_internal_terms()
# Test parameters
if not self.rleak:
raise cir.CircuitError(self.instanceName
+ ': leackage resistance can not be zero')
if not self.c:
raise cir.CircuitError(self.instanceName
+ ': capacitance can not be zero')
# test m expression to make sure it is valid
try:
phi = .5
result = eval(self.w)
except Exception as e:
raise cir.CircuitError(
'{0}: Invalid expression: {1} ({2})'.format(self.instanceName,
self.w, e))
try:
abs(result)
except TypeError:
raise cir.CircuitError(
'{0}: Invalid expression: {1} (result not a number)'.format(
self.instanceName, self.w))
# Connect internal terminal
tvc = self.add_internal_term('vc', 'V')
tref = self.add_reference_term()
# Set up source if phi0 is given
if self.phi0 != 0.:
self.isDCSource = True
self.sourceOutput = (tref, tvc)
self._i0 = self.phi0 * glVar.gyr / self.c / self.rleak
# Setup gyrator
# Access to global variables is through the glVar
self.linearVCCS = [((0,1), (tref, tvc), glVar.gyr),
((tvc, tref), (tvc, tref), 1./self.rleak)]
self.linearVCQS = [((tvc, tref), (tvc, tref), self.c)]
self.controlPorts = [(0,1), (tvc, tref)]
self.csOutPorts = [(0,1)]
self.qsOutPorts = []
def eval_cqs(self, vPort, getOP = False):
"""
Returns memductor current given input voltage. Charge vector
is empty
vPort[0] = memductor voltage
vPort[1] = internal cap voltage
iout[0] = memductor current
If getOP == True, a dictionary with OP variables is returned
"""
phi = self.c * vPort[1] / glVar.gyr
W = eval(self.w)
iout = np.array([W * vPort[0]])
if getOP:
return {'v': vPort[0],
'i': iout[0],
'phi': phi,
'W': W}
else:
return (iout, np.array([]))
# Use automatic differentiation for eval and deriv function
eval_and_deriv = ad.eval_and_deriv
eval = ad.eval
def get_OP(self, vPort):
"""
Calculates operating point information
vPort[0] = memductor voltage
vPort[1] = internal cap voltage
"""
return self.eval_cqs(vPort, getOP=True)
def get_DCsource(self):
return self._i0
| gpl-3.0 | 7,188,436,473,438,162,000 | 32.242718 | 79 | 0.475905 | false |
Tomographer/tomographer | test/pytest_t_tools_densedm.py | 1 | 1933 | #!/usr/bin/env python
from __future__ import print_function
import re
import numpy as np
import numpy.testing as npt
import logging
logging.basicConfig(level=logging.DEBUG)
import unittest
# import the module
import tomographer.tools.densedm
import tomographer.tools.densedm.mle
import tomographer
import tomographer.densedm
class SimulateMeasurements(unittest.TestCase):
def test_sim(self):
rho_sim = np.array([[0.9, 0], [0, 0.1]])
Mk = tomographer.tools.densedm.PauliMeasEffectsQubit
n = 1000
d = tomographer.tools.densedm.simulate_measurements(rho_sim, Mk, n)
self.assertEqual(d.Nm[0] + d.Nm[1], n)
self.assertEqual(d.Nm[2] + d.Nm[3], n)
self.assertEqual(d.Nm[4] + d.Nm[5], n)
# Hoeffding's inequality: Prob( |N(+) - p*n| > eps*n ) < 2*exp(-2*eps^2*n)
#
# --> so the probability to deviate by more than 0.1 fraction is bounded by
# 2*exp(-2 * 0.1**2 * n) ~ 4e-9 (for n=1000)
self.assertLessEqual( (d.Nm[0] - 0.5*n) , 0.1*n )
self.assertLessEqual( (d.Nm[2] - 0.5*n) , 0.1*n )
self.assertLessEqual( (d.Nm[4] - 0.9*n) , 0.1*n )
class Mle(unittest.TestCase):
def test_mle(self):
Emn = sum(tomographer.tools.densedm.PauliMeasEffectsQubit, [])
Nm = np.array([250, 250, 250, 250, 500, 0]) # really extreme example
llh = tomographer.densedm.IndepMeasLLH(tomographer.densedm.DMTypes(2))
llh.setMeas(Emn, Nm)
(rho_MLE, d) = tomographer.tools.densedm.mle.find_mle(llh)
# we know the exact solution, rho_MLE = |0><0|
npt.assert_array_almost_equal(rho_MLE,
np.array([[1, 0], [0, 0]]))
# normally, this is not needed as we are being run via pyruntest.py, but it might be
# useful if we want to run individually picked tests
if __name__ == '__main__':
unittest.main()
| mit | 5,261,438,763,180,251,000 | 27.426471 | 84 | 0.606829 | false |
unball/strategy | simple_strategy/potential_fields.py | 1 | 3902 | #!/usr/bin/env python
import numpy as np
#Convert vector from cartesian to polar coordinate
#@vector = [x, y]
def cart2polar(vector):
x = vector[0]
y = vector[1]
r = np.sqrt(x*x + y*y)
th = np.arctan2(y, x)
return np.array([r, th])
#Convert vector from polar to cartesian coordinate
#@vector = [r, th]
def polar2cart(vector):
r = vector[0]
th = vector[1]
x = r*np.cos(th)
y = r*np.sin(th)
return np.array([x,y])
class AttractivePotentialField:
"""Radial attractive potential field
@origin Point that starts the field - Cartesian coordinates
@magnitude Radius of field """
def __init__(self, origin, magnitude, min_magnitude=1):
self.origin = origin
self.magnitude = magnitude
self.min_magnitude = min_magnitude
def calculate_force(self, position):
return self.origin
#difference = difference*np.array([self.magnitude, 1])
#if(difference[0] < self.min_magnitude):
# difference[0] = self.min_magnitude
class RepulsivePotentialField:
"""Radial repulsive potential field
@origin Point that starts the field - Cartesian coordinates
@range_field Distance from origin that fields act
@magnitude_weight Weight that fields act"""
def __init__(self, origin, range_field, magnitude_weight):
self.origin = origin
self.range_field = range_field
self.magnitude_weight = magnitude_weight
def calculate_force(self, position):
difference = cart2polar(position - self.origin)
if(difference[0] < self.range_field):
difference[0] = (self.range_field - difference[0])/(self.range_field/self.magnitude_weight)
else:
difference = np.array([0,0])
return polar2cart(difference)
class TangencialPotentialField:
"""Tangencial potential field
@origin Point that starts the field - Cartesian coordinates
"""
def __init__(self, origin, magnitude):
self.origin = origin
self.magnitude = magnitude
def calculate_force(self, position):
difference = cart2polar(self.origin - position)
difference[0] = self.magnitude
difference[1] += np.pi/2.5
print difference[1]
return polar2cart(difference)
class SelectivePotentialField:
"""Selective Potential field
set a combination of fields thats allows to kick the ball inside
of a conic region
@origin Point that starts the field - Cartesian coordinates
@direction Vector that indicates the direction
@magnitude
x"""
def __init__(self, origin, width, range_field, direction, goal,
mag_attractive_field, mag_tangencial_field):
self.origin = origin
self.width = width
self.range_field = range_field
self.direction = direction
self.mag_attractive_field = mag_attractive_field
self.mag_tangencial_field = mag_tangencial_field
self.goal = goal
def calculate_force(self, position):
angle = cart2polar(self.direction)[1]
difference = position - self.origin
force = np.array([0, 0])
weight = 1.0
if((np.fabs(angle - cart2polar(difference)[1]) <= weight*self.width) and (cart2polar(difference)[0] <= 0.4)):
attractive_field = AttractivePotentialField(self.goal, self.mag_attractive_field)
force = attractive_field.calculate_force(position)
print 'ME SEGURA TO INDO'
else:
tangencial_field = TangencialPotentialField(self.origin, self.mag_tangencial_field)
force = tangencial_field.calculate_force(position)
print 'RODA A ROLETA'
return force
class ConstantPotentialField:
def __init__(self, field_force):
self.field_force = field_force
def calculate_force(self):
return self.field_force
| mit | -7,559,067,572,238,686,000 | 32.350427 | 117 | 0.644541 | false |
cmpitg/blutkit | blutkit/gui/keycombination.py | 1 | 8206 | #
# Copyright 2013 © Nguyễn Hà Dương (cmpitgATgmailDOTcom)
#
# This file is part of Blutkit.
#
# Blutkit is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Blutkit is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Blutkit. If not, see <http://www.gnu.org/licenses/>.
#
import builtins
from blutkit.gui.keyconstants import *
from blutkit.gui.keyutils import *
class SingleKeyCombination:
"""
This class takes care of a single key/mouse combination and should NOT
be used directly. Every key/mouse combination should be constructed
using KeyCombination class.
"""
def __init__(self, key=None, mods=set(), mouse=[]):
self.key = key
self.mods = mods
self.mouse = mouse
def __key(self):
"""
The __key function serves the purpose of equality test and
hash.
"""
return (self.key,
frozenset(sorted(self.mods)), # Do we need to sort?
frozenset(self.mouse))
def __eq__(x, y):
"""Equality test."""
return x.__key() == y.__key()
def __hash__(self):
"""Quick and easy hash function."""
return hash(self.__key())
def fromString(keycombstr):
"""
Constructor, new SingleKeyCombination from string. The string format
belongs to one of the following examples:
SingleKeyCombination.fromString("Mouse_Left")
SingleKeyCombination.fromString("Mouse_Left-Mouse_Right")
SingleKeyCombination.fromString("Control-Shift-c")
SingleKeyCombination.fromString("Alt-Control-Shift-h")
This class serves as a helper class of KeyCombination.
"""
if keycombstr.strip() == "":
return SingleKeyCombination()
key = None
mods = set()
mouse = []
keycombstr = keycombstr.lower()
# Special case: bind the "-" button
if keycombstr == "-":
return SingleKeyCombination(toQtKey(keycombstr),
mods,
mouse)
for part in keycombstr.split('-'):
# Special case: if "-" is a part of the combination, then
# part is ""
if part == "":
currentKey = "-"
else:
currentKey = part
if isKeymod(currentKey):
mods.add(toKeymod(currentKey))
elif isMouseButton(currentKey):
mouse.append(toMouseButton(currentKey))
else:
key = toQtKey(currentKey)
return SingleKeyCombination(key, mods, mouse)
def keyToString(self):
"""Return key as string."""
if key:
return toKeyStr(key)
else:
return ""
def modsToString(self):
"""Return key mods as string."""
res = [capitalizeStr(toKeyStr(mod)) for mod in self.mods]
return "-".join(sorted(res))
def mouseToString(self):
"""Return mouse chords as string."""
res = [capitalizeStr(MouseButtonToStr[button])
for button in self.mouse]
return "-".join(res)
def toString(self):
"""Return the string representation of a SingleKeyCombination."""
formatStr = self.modsToString()
if self.key:
# Key event
formatStr += "-" + capitalizeStr(toKeyStr(self.key))
else:
# Mouse chord event
formatStr += "-" + capitalizeStr(self.mouseToString())
# Strip the redundant "-" if there's no modifier
if self.mods == set():
formatStr = formatStr[1:]
return formatStr.strip()
def __str__(self):
"""Return the string representation of a SingleKeyCombination."""
return self.toString()
def __repr__(self):
"""Return the repr form of a SingleKeyCombination."""
return self.toString()
class KeyCombination(builtins.tuple):
"""
This class takes care of complete key/mouse combination, including modal
ones. This class has no constructor since it's a subclass of
builtins.tuple. To construct programmatically, use its static methods
fromString, fromSingleKeyCombination, or fromKeys. Best practice:
use kbd and pass a string.
The string argument is case-insensitive.
E.g.
kbd('Control-S')
kbd('Control-C Control-Z')
kbd('Mouse_Left-Mouse_Right')
"""
def __init__(self, *args):
super(KeyCombination, self).__init__(args)
def isModal(keystr):
"""
Determine whether or not the string representing a key combination
is modal.
"""
return len(keystr) >= 2 and keystr.find(" ") != -1
def fromString(keystr):
"""
Construct a new Combination from string. The string format belongs
to one of the following examples:
KeyCombination.fromString("-")
KeyCombination.fromString("Control-S")
KeyCombination.fromString("Control-&")
KeyCombination.fromString("Control-X Control-Q")
KeyCombination.fromString("Mouse_Left-Mouse_Right")
Modal key combination is supported. Every key combination should
be constructed using this class, not SingleKeyCombination.
"""
if KeyCombination.isModal(keystr):
keycombList = map(lambda k: SingleKeyCombination.fromString(k),
keystr.split(" "))
return KeyCombination(keycombList)
else:
return KeyCombination((SingleKeyCombination.fromString(keystr),))
def fromKeys(key, mods, mouse):
return KeyCombination((SingleKeyCombination(key, mods, mouse),))
def fromSingleKeyCombination(keycomb):
return KeyCombination((keycomb,))
def sharePrefix(self, keyCombination):
"""Determine whether 2 key combinations share prefix."""
minLength = min(len(self), len(keyCombination))
for i in range(minLength):
if self[i] != keyCombination[i]:
return False
return True
def toString(self):
"""
Return the string representation of a key combination, which can be
used to reconstruct the key combination by using
KeyCombination.fromString.
"""
return "[" + " ".join(map(lambda x: str(x), self)) + "]"
def __add__(self, keys):
"""
Add a key combination to the current combination.
E.g.
kbd("Control-] Control-Q") + kbd("Control-W")
"""
if type(keys) != KeyCombination:
keys = KeyCombination.fromSingleKeyCombination(keys)
if self == kbd(""):
return keys
return KeyCombination(list(self) + list(keys))
def __str__(self):
"""Aliased to self.toString()"""
return self.toString()
def __repr__(self):
"""
Return the string representation of a key combination. See doc of
the __str__ method for more info.
"""
return self.__str__()
def kbd(keystr):
"""
Construct a KeyCombination from string by calling
KeyCombination.fromString. The string argument is case-insensitive.
E.g.
kbd('Control-S')
kbd('Control-C Control-Z')
kbd('Mouse_Left-Mouse_Right')
"""
return KeyCombination.fromString(keystr)
# print(kbd(','))
# print(kbd('Control-S'))
# print(kbd('Control-C Control-Z'))
# print(kbd('Mouse_Left-Mouse_Right'))
# print(kbd('Control-C Shift-Mouse_Left'))
# print(kbd('Control-C Control-Z').sharePrefix(kbd('Control-C'))) # True
# print(kbd('Control-C Control-Z').sharePrefix(kbd('Control-A'))) # False
# print(kbd('Control-C Control-Z').sharePrefix(kbd(''))) # False
| gpl-3.0 | -4,797,116,071,737,052,000 | 30.417625 | 77 | 0.602073 | false |
mazi-project/back-end | lib/sht11.py | 1 | 1426 | #!/usr/bin/pytho
import sys
import warnings
def help_message():
print ' '
print 'sht11'
print ' --help Displays this usage message '
print ' --detect Displays if the sensor is connected on Raspberry Pi'
print ' -h , --humidity Displays the Humidity '
print ' -t , --temperature Displays the Temperature'
def sht11( sensor ):
try:
warnings.filterwarnings("ignore")
from sht1x.Sht1x import Sht1x as SHT1x
dataPin = 5
clkPin = 3
sht1x = SHT1x(dataPin, clkPin, SHT1x.GPIO_BOARD)
if (sensor == "humidity"):
mesurement = sht1x.read_humidity()
elif (sensor == "temperature"):
mesurement = sht1x.read_temperature_C()
return mesurement
except:
return "false"
def detect():
var = sht11("temperature")
if (type(var) == int or type(var) == float):
print 'sht11'
if __name__ == '__main__':
args = len(sys.argv)
while ( args > 1):
args -= 1
if(sys.argv[args] == "--help"):
help_message()
elif(sys.argv[args] == "--detect"):
detect()
elif(sys.argv[args] == "-t" or sys.argv[args] == "--temperature"):
temperature = sht11("temperature")
print ("temperature %.1f" % temperature)
elif(sys.argv[args] == "-h" or sys.argv[args] == "--humidity"):
humidity = sht11("humidity")
print ("humidity %.1f" % humidity)
| mit | -2,510,886,870,512,368,600 | 26.960784 | 97 | 0.570827 | false |
syuu1228/seastar | test.py | 1 | 6467 | #!/usr/bin/env python3
#
# This file is open source software, licensed to you under the terms
# of the Apache License, Version 2.0 (the "License"). See the NOTICE file
# distributed with this work for additional information regarding copyright
# ownership. You may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import sys
import argparse
import subprocess
import signal
import re
boost_tests = [
'alloc_test',
'futures_test',
'thread_test',
'memcached/test_ascii_parser',
'sstring_test',
'unwind_test',
'defer_test',
'output_stream_test',
'httpd',
'fstream_test',
'foreign_ptr_test',
'semaphore_test',
'expiring_fifo_test',
'shared_ptr_test',
'weak_ptr_test',
'fileiotest',
'packet_test',
'tls_test',
'rpc_test',
'connect_test',
'json_formatter_test',
'execution_stage_test',
'lowres_clock_test',
'program_options_test',
'tuple_utils_test',
'noncopyable_function_test',
'abort_source_test',
]
other_tests = [
'smp_test',
'timertest',
'directory_test',
'thread_context_switch',
'fair_queue_test',
]
last_len = 0
def print_status_short(msg):
global last_len
print('\r' + ' '*last_len, end='')
last_len = len(msg)
print('\r' + msg, end='')
print_status_verbose = print
class Alarm(Exception):
pass
def alarm_handler(signum, frame):
raise Alarm
if __name__ == "__main__":
all_modes = ['debug', 'release']
parser = argparse.ArgumentParser(description="Seastar test runner")
parser.add_argument('--fast', action="store_true", help="Run only fast tests")
parser.add_argument('--name', action="store", help="Run only test whose name contains given string")
parser.add_argument('--mode', choices=all_modes, help="Run only tests for given build mode")
parser.add_argument('--timeout', action="store",default="300",type=int, help="timeout value for test execution")
parser.add_argument('--jenkins', action="store",help="jenkins output file prefix")
parser.add_argument('--verbose', '-v', action = 'store_true', default = False,
help = 'Verbose reporting')
args = parser.parse_args()
print_status = print_status_verbose if args.verbose else print_status_short
# Run on 2 shard - it should be enough
cpu_count = 2
test_to_run = []
modes_to_run = all_modes if not args.mode else [args.mode]
for mode in modes_to_run:
prefix = os.path.join('build', mode, 'tests')
for test in other_tests:
test_to_run.append((os.path.join(prefix, test),'other'))
for test in boost_tests:
test_to_run.append((os.path.join(prefix, test),'boost'))
test_to_run.append(('tests/memcached/test.py --mode ' + mode + (' --fast' if args.fast else ''),'other'))
test_to_run.append((os.path.join(prefix, 'distributed_test'),'other'))
allocator_test_path = os.path.join(prefix, 'allocator_test')
if args.fast:
if mode == 'debug':
test_to_run.append((allocator_test_path + ' --iterations 5','other'))
else:
test_to_run.append((allocator_test_path + ' --time 0.1','other'))
else:
test_to_run.append((allocator_test_path,'other'))
if args.name:
test_to_run = [t for t in test_to_run if args.name in t[0]]
all_ok = True
n_total = len(test_to_run)
env = os.environ
# disable false positive due to new (with_alignment(...)) ...
env['ASAN_OPTIONS'] = 'alloc_dealloc_mismatch=0'
for n, test in enumerate(test_to_run):
path = test[0]
prefix = '[%d/%d]' % (n + 1, n_total)
print_status('%s RUNNING %s' % (prefix, path))
signal.signal(signal.SIGALRM, alarm_handler)
if args.jenkins and test[1] == 'boost':
mode = 'release'
if test[0].startswith(os.path.join('build','debug')):
mode = 'debug'
xmlout = args.jenkins+"."+mode+"."+os.path.basename(test[0])+".boost.xml"
path = path + " --output_format=XML --log_level=all --report_level=no --log_sink=" + xmlout
print(path)
if os.path.isfile('tmp.out'):
os.remove('tmp.out')
outf=open('tmp.out','w')
# Limit shards count
if test[1] == 'boost':
path = path + " -- --smp={}".format(cpu_count)
else:
if not re.search("tests/memcached/test.py", path):
if re.search("allocator_test", path) or re.search("fair_queue_test", path):
path = path + " -- --smp={}".format(cpu_count)
else:
path = path + " --smp={}".format(cpu_count)
proc = subprocess.Popen(path.split(' '), stdout=outf, stderr=subprocess.PIPE, env=env,preexec_fn=os.setsid)
signal.alarm(args.timeout)
err = None
out = None
try:
out,err = proc.communicate()
signal.alarm(0)
except:
os.killpg(os.getpgid(proc.pid), signal.SIGKILL)
proc.kill()
proc.returncode = -1
finally:
outf.close();
if proc.returncode:
print_status('FAILED: %s\n' % (path))
if proc.returncode == -1:
print_status('TIMED OUT\n')
else:
print_status(' with error code {code}\n'.format(code=proc.returncode))
print('=== stdout START ===')
with open('tmp.out') as outf:
for line in outf:
print(line)
print('=== stdout END ===')
if err:
print('=== stderr START ===')
print(err.decode())
print('=== stderr END ===')
all_ok = False
else:
print_status('%s PASSED %s' % (prefix, path))
if all_ok:
print('\nOK.')
else:
print_status('')
sys.exit(1)
| apache-2.0 | 4,200,851,341,556,707,000 | 33.036842 | 116 | 0.570898 | false |
dvu4/Data-Wrangling-with-MongoDB | Lesson_4_Working_with_MongoDB/23-Using_$in_Operator/find_cars.py | 1 | 1106 | #!/usr/bin/env python
""" Your task is to write a query that will return all cars manufactured by "Ford Motor Company"
that are assembled in Germany, United Kingdom, or Japan.
Please modify only 'in_query' function, as only that will be taken into account.
Your code will be run against a MongoDB instance that we have provided.
If you want to run this code locally on your machine,
you have to install MongoDB, download and insert the dataset.
For instructions related to MongoDB setup and datasets please see Course Materials.
"""
def get_db():
from pymongo import MongoClient
client = MongoClient('localhost:27017')
db = client.examples
return db
def in_query():
# Write the query
query = {"manufacturer" : "Ford Motor Company", "assembly" :{"$in" : ["Germany", "United Kingdom", "Japan"]}}
return query
if __name__ == "__main__":
db = get_db()
query = in_query()
autos = db.autos.find(query, {"name":1, "manufacturer":1, "assembly": 1, "_id":0})
print "Found autos:", autos.count()
import pprint
for a in autos:
pprint.pprint(a)
| agpl-3.0 | 656,383,715,339,374,500 | 30.6 | 113 | 0.679928 | false |
cerndb/wls-cli | wls_rest/src/wlscli/service/security/data/netrc.py | 1 | 1165 | #*******************************************************************************
# Copyright (C) 2015, CERN
# This software is distributed under the terms of the GNU General Public
# License version 3 (GPL Version 3), copied verbatim in the file "LICENSE".
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as Intergovernmental Organization
# or submit itself to any jurisdiction.
#
#
#*******************************************************************************
'''
Created on Nov 4, 2015
@author: Konrad Kaczkowski
'''
from wlscli.service.security import AuthenticationService
import os
class NetrcAuthenticationService(AuthenticationService):
def __init__(self, data_storage):
''' Constructor '''
self.data_storage = data_storage
def get_data(self):
self.check_netrc_exists()
self.set_netrc()
def set_netrc(self):
self.data_storage.netrc = 1
def check_netrc_exists(self):
home_directory = os.path.expanduser("~")
netrc_path = home_directory + "/.netrc"
os.path.isfile(netrc_path)
| gpl-3.0 | 2,624,321,533,661,844,500 | 32.285714 | 80 | 0.583691 | false |
TartuNLP/nazgul | nmtnazgul.py | 1 | 6225 | #!/usr/bin/python3
import sock
import translator
import sys
import html
import json
from time import time
from nltk import sent_tokenize
from constraints import getPolitenessConstraints as getCnstrs
from log import log
# IP and port for the server
MY_IP = '172.17.66.215'
MY_PORT = 12346
supportedStyles = { 'fml', 'inf', 'auto' }
styleToDomain = { 'fml': 'ep', 'inf': 'os', 'auto': 'pc' }
#supportedStyles = { "os", "un", "dg", "jr", "ep", "pc", "em", "nc" }
supportedOutLangs = { 'et', 'lv', 'en', 'ru', 'fi', 'lt', 'de' }
extraSupportedOutLangs = { 'est': 'et', 'lav': 'lv', 'eng': 'en', 'rus': 'ru', 'fin': 'fi', 'lit': 'lt', 'ger': 'de' }
defaultStyle = 'auto'
defaultOutLang = 'en'
USAGE_MSG = """\nUsage: nmtnazgul.py translation_model truecaser_model segmenter_model [output_lang [output_style]]
translation_model: path to a trained Sockeye model folder
truecaser_model: path to a trained TartuNLP truecaser model file
segmenter_model: path to a trained Google SentencePiece model file
Without the output language and any further parameters an NMT server is started; otherwise the script translates STDIN
output_lang: output language (one of the following: {0})
output_style: output style (one of the following: {1}; default: {2})
Further info: http://github.com/tartunlp/nazgul\n\n""".format(", ".join(list(supportedOutLangs)), ", ".join(list(supportedStyles)), defaultStyle)
#############################################################################################
###################################### STDIN and Server #####################################
#############################################################################################
def getConf(rawConf):
style = 'auto'
outlang = 'en'
for field in rawConf.split(','):
if field in supportedStyles:
style = field
if field in supportedOutLangs:
outlang = field
if field in extraSupportedOutLangs:
outlang = extraSupportedOutLangs[field]
return style, outlang
def parseInput(rawText):
global supportedStyles, defaultStyle, supportedOutLangs, defaultOutLang
try:
fullText = rawText['src']
rawStyle, rawOutLang = getConf(rawText['conf'])
livesubs = "|" in fullText
sentences = fullText.split("|") if livesubs else sent_tokenize(fullText)
delim = "|" if livesubs else " "
except KeyError:
sentences = rawText['sentences']
rawStyle = rawText['outStyle']
rawOutLang = rawText['outLang']
delim = False
if rawStyle not in supportedStyles:
#raise ValueError("style bad: " + rawStyle)
rawStyle = defaultStyle
if rawOutLang not in supportedOutLangs:
#raise ValueError("out lang bad: " + rawOutLang)
rawOutLang = defaultOutLang
outputLang = rawOutLang
outputStyle = styleToDomain[rawStyle]
return sentences, outputLang, outputStyle, delim
def decodeRequest(rawMessage):
struct = json.loads(rawMessage.decode('utf-8'))
segments, outputLang, outputStyle, delim = parseInput(struct)
return segments, outputLang, outputStyle, delim
def encodeResponse(translationList, delim):
translationText = delim.join(translationList)
result = json.dumps({'raw_trans': ['-'],
'raw_input': ['-'],
'final_trans': translationText})
return bytes(result, 'utf-8')
def serverTranslationFunc(rawMessage, models):
segments, outputLang, outputStyle, delim = decodeRequest(rawMessage)
translations, _, _, _ = translator.translate(models, segments, outputLang, outputStyle, getCnstrs())
return encodeResponse(translations, delim)
def startTranslationServer(models, ip, port):
log("started server")
# start listening as a socket server; apply serverTranslationFunc to incoming messages to genereate the response
sock.startServer(serverTranslationFunc, (models,), port = port, host = ip)
def translateStdinInBatches(models, outputLang, outputStyle):
"""Read lines from STDIN and treat each as a segment to translate;
translate them and print out tab-separated scores (decoder log-prob)
and the translation outputs"""
#read STDIN as a list of segments
lines = [line.strip() for line in sys.stdin]
#translate segments and get translations and scores
translations, scores, _, _ = translator.translate(models, lines, outputLang, outputStyle, getCnstrs())
#print each score and translation, separated with a tab
for translation, score in zip(translations, scores):
print("{0}\t{1}".format(score, translation))
#############################################################################################
################################## Cmdline and main block ###################################
#############################################################################################
def readCmdlineModels():
"""Read translation, truecaser and segmenter model paths from cmdline;
show usage info if failed"""
#This is a quick hack for reading cmdline args, should use argparse instead
try:
translationModelPath = sys.argv[1]
truecaserModelPath = sys.argv[2]
segmenterModelPath = sys.argv[3]
except IndexError:
sys.stderr.write(USAGE_MSG)
sys.exit(-1)
return translationModelPath, truecaserModelPath, segmenterModelPath
def readLangAndStyle():
"""Read output language and style off cmdline.
Language is optional -- if not given, a server is started.
Style is optional -- if not given, default (auto) is used."""
# EAFP
try:
outputLanguage = sys.argv[4]
try:
outputStyle = sys.argv[5]
except IndexError:
outputStyle = defaultStyle
except IndexError:
outputLanguage = None
outputStyle = None
return outputLanguage, outputStyle
if __name__ == "__main__":
# read translation and preprocessing model paths off cmdline
modelPaths = readCmdlineModels()
# read output language and style off cmdline -- both are optional and will be "None" if not given
olang, ostyle = readLangAndStyle()
# load translation and preprocessing models using paths
models = translator.loadModels(*modelPaths)
# if language is given, STDIN is translated; otherwise a server is started
if olang:
translateStdinInBatches(models, olang, ostyle)
else:
# when argparse is finally used, set MY_IP and MY_PORT to cmdline arguments
startTranslationServer(models, MY_IP, MY_PORT)
| mit | -6,915,478,788,811,677,000 | 29.970149 | 145 | 0.667952 | false |
ctalbert/mozharness | test/test_base_config.py | 1 | 7710 | import os
import subprocess
import sys
import unittest
JSON_TYPE = None
try:
import simplejson as json
except ImportError:
import json
JSON_TYPE = 'json'
else:
JSON_TYPE = 'simplejson'
import mozharness.base.config as config
class TestParseConfigFile(unittest.TestCase):
def _get_json_config(self, filename="configs/test/test.json",
output='dict'):
fh = open(filename)
contents = json.load(fh)
fh.close()
if 'output' == 'dict':
return dict(contents)
else:
return contents
def _get_python_config(self, filename="configs/test/test.py",
output='dict'):
global_dict = {}
local_dict = {}
execfile(filename, global_dict, local_dict)
return local_dict['config']
def test_json_config(self):
c = config.BaseConfig(initial_config_file='test/test.json')
content_dict = self._get_json_config()
for key in content_dict.keys():
self.assertEqual(content_dict[key], c._config[key])
def test_python_config(self):
c = config.BaseConfig(initial_config_file='test/test.py')
config_dict = self._get_python_config()
for key in config_dict.keys():
self.assertEqual(config_dict[key], c._config[key])
def test_illegal_config(self):
self.assertRaises(IOError, config.parse_config_file, "this_file_does_not_exist.py", search_path="yadda")
def test_illegal_suffix(self):
self.assertRaises(RuntimeError, config.parse_config_file, "test/test.illegal_suffix")
def test_malformed_json(self):
if JSON_TYPE == 'simplejson':
self.assertRaises(json.decoder.JSONDecodeError, config.parse_config_file, "test/test_malformed.json")
else:
self.assertRaises(ValueError, config.parse_config_file, "test/test_malformed.json")
def test_malformed_json(self):
self.assertRaises(SyntaxError, config.parse_config_file, "test/test_malformed.py")
class TestReadOnlyDict(unittest.TestCase):
control_dict = {
'b':'2',
'c':{'d': '4'},
'e':['f', 'g'],
}
def get_unlocked_ROD(self):
r = config.ReadOnlyDict(self.control_dict)
return r
def get_locked_ROD(self):
r = config.ReadOnlyDict(self.control_dict)
r.lock()
return r
def test_create_ROD(self):
r = self.get_unlocked_ROD()
self.assertEqual(r, self.control_dict,
msg="can't transfer dict to ReadOnlyDict")
def test_pop_item(self):
r = self.get_unlocked_ROD()
r.popitem()
self.assertEqual(len(r), len(self.control_dict) - 1,
msg="can't popitem() ReadOnlyDict when unlocked")
def test_pop(self):
r = self.get_unlocked_ROD()
r.pop('e')
self.assertEqual(len(r), len(self.control_dict) - 1,
msg="can't pop() ReadOnlyDict when unlocked")
def test_set(self):
r = self.get_unlocked_ROD()
r['e'] = 'yarrr'
self.assertEqual(r['e'], 'yarrr',
msg="can't set var in ReadOnlyDict when unlocked")
def test_del(self):
r = self.get_unlocked_ROD()
del r['e']
self.assertEqual(len(r), len(self.control_dict) - 1,
msg="can't del in ReadOnlyDict when unlocked")
def test_clear(self):
r = self.get_unlocked_ROD()
r.clear()
self.assertEqual(r, {},
msg="can't clear() ReadOnlyDict when unlocked")
def test_set_default(self):
r = self.get_unlocked_ROD()
for key in self.control_dict.keys():
r.setdefault(key, self.control_dict[key])
self.assertEqual(r, self.control_dict,
msg="can't setdefault() ReadOnlyDict when unlocked")
def test_locked_set(self):
r = self.get_locked_ROD()
# TODO use |with self.assertRaises(AssertionError):| if/when we're
# all on 2.7.
try:
r['e'] = 2
except:
pass
else:
self.assertEqual(0, 1, msg="can set r['e'] when locked")
def test_locked_del(self):
r = self.get_locked_ROD()
try:
del r['e']
except:
pass
else:
self.assertEqual(0, 1, "can del r['e'] when locked")
def test_locked_popitem(self):
r = self.get_locked_ROD()
self.assertRaises(AssertionError, r.popitem)
def test_locked_update(self):
r = self.get_locked_ROD()
self.assertRaises(AssertionError, r.update, {})
def test_locked_set_default(self):
r = self.get_locked_ROD()
self.assertRaises(AssertionError, r.setdefault, {})
def test_locked_pop(self):
r = self.get_locked_ROD()
self.assertRaises(AssertionError, r.pop)
def test_locked_clear(self):
r = self.get_locked_ROD()
self.assertRaises(AssertionError, r.clear)
class TestActions(unittest.TestCase):
all_actions=['a', 'b', 'c', 'd', 'e']
default_actions = ['b', 'c', 'd']
def test_verify_actions(self):
c = config.BaseConfig(initial_config_file='test/test.json')
try:
c.verify_actions(['not_a_real_action'])
except:
pass
else:
self.assertEqual(0, 1, msg="verify_actions() didn't die on invalid action")
c = config.BaseConfig(initial_config_file='test/test.json')
returned_actions = c.verify_actions(c.all_actions)
self.assertEqual(c.all_actions, returned_actions,
msg="returned actions from verify_actions() changed")
def test_default_actions(self):
c = config.BaseConfig(default_actions=self.default_actions,
all_actions=self.all_actions,
initial_config_file='test/test.json')
self.assertEqual(self.default_actions, c.get_actions(),
msg="default_actions broken")
def test_no_action1(self):
c = config.BaseConfig(default_actions=self.default_actions,
all_actions=self.all_actions,
initial_config_file='test/test.json')
c.parse_args(args=['foo', '--no-action', 'a'])
self.assertEqual(self.default_actions, c.get_actions(),
msg="--no-ACTION broken")
def test_no_action2(self):
c = config.BaseConfig(default_actions=self.default_actions,
all_actions=self.all_actions,
initial_config_file='test/test.json')
c.parse_args(args=['foo', '--no-c'])
self.assertEqual(['b', 'd'], c.get_actions(),
msg="--no-ACTION broken")
def test_add_action(self):
c = config.BaseConfig(default_actions=self.default_actions,
all_actions=self.all_actions,
initial_config_file='test/test.json')
c.parse_args(args=['foo', '--add-action', 'e'])
self.assertEqual(['b', 'c', 'd', 'e'], c.get_actions(),
msg="--add-action ACTION broken")
def test_only_action(self):
c = config.BaseConfig(default_actions=self.default_actions,
all_actions=self.all_actions,
initial_config_file='test/test.json')
c.parse_args(args=['foo', '--only-a', '--only-e'])
self.assertEqual(['a', 'e'], c.get_actions(),
msg="--only-ACTION broken")
if __name__ == '__main__':
unittest.main()
| mpl-2.0 | -1,693,769,947,599,084,000 | 33.72973 | 113 | 0.563165 | false |
Flamacue/pretix | src/tests/multidomain/test_urlreverse.py | 2 | 3969 | import pytest
from django.conf import settings
from django.test import override_settings
from django.utils.timezone import now
from tests import assert_num_queries
from pretix.base.models import Event, Organizer
from pretix.multidomain.models import KnownDomain
from pretix.multidomain.urlreverse import build_absolute_uri, eventreverse
@pytest.fixture
def env():
o = Organizer.objects.create(name='MRMCD', slug='mrmcd')
event = Event.objects.create(
organizer=o, name='MRMCD2015', slug='2015',
date_from=now()
)
settings.SITE_URL = 'http://example.com'
event.get_cache().clear()
return o, event
@pytest.mark.django_db
def test_event_main_domain_front_page(env):
assert eventreverse(env[1], 'presale:event.index') == '/mrmcd/2015/'
assert eventreverse(env[0], 'presale:organizer.index') == '/mrmcd/'
@pytest.mark.django_db
def test_event_custom_domain_kwargs(env):
KnownDomain.objects.create(domainname='foobar', organizer=env[0])
assert eventreverse(env[1], 'presale:event.checkout', {'step': 'payment'}) == 'http://foobar/2015/checkout/payment/'
@pytest.mark.django_db
def test_event_main_domain_kwargs(env):
assert eventreverse(env[1], 'presale:event.checkout', {'step': 'payment'}) == '/mrmcd/2015/checkout/payment/'
@pytest.mark.django_db
def test_event_custom_domain_front_page(env):
KnownDomain.objects.create(domainname='foobar', organizer=env[0])
assert eventreverse(env[1], 'presale:event.index') == 'http://foobar/2015/'
assert eventreverse(env[0], 'presale:organizer.index') == 'http://foobar/'
@pytest.mark.django_db
def test_event_custom_domain_keep_port(env):
settings.SITE_URL = 'http://example.com:8081'
KnownDomain.objects.create(domainname='foobar', organizer=env[0])
assert eventreverse(env[1], 'presale:event.index') == 'http://foobar:8081/2015/'
@pytest.mark.django_db
def test_event_custom_domain_keep_scheme(env):
settings.SITE_URL = 'https://example.com'
KnownDomain.objects.create(domainname='foobar', organizer=env[0])
assert eventreverse(env[1], 'presale:event.index') == 'https://foobar/2015/'
@pytest.mark.django_db
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
})
def test_event_main_domain_cache(env):
env[0].get_cache().clear()
with assert_num_queries(1):
eventreverse(env[1], 'presale:event.index')
with assert_num_queries(0):
eventreverse(env[1], 'presale:event.index')
@pytest.mark.django_db
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
})
def test_event_custom_domain_cache(env):
KnownDomain.objects.create(domainname='foobar', organizer=env[0])
env[0].get_cache().clear()
with assert_num_queries(1):
eventreverse(env[1], 'presale:event.index')
with assert_num_queries(0):
eventreverse(env[1], 'presale:event.index')
@pytest.mark.django_db
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
})
def test_event_custom_domain_cache_clear(env):
kd = KnownDomain.objects.create(domainname='foobar', organizer=env[0])
env[0].get_cache().clear()
with assert_num_queries(1):
eventreverse(env[1], 'presale:event.index')
kd.delete()
with assert_num_queries(1):
eventreverse(env[1], 'presale:event.index')
@pytest.mark.django_db
def test_event_main_domain_absolute(env):
assert build_absolute_uri(env[1], 'presale:event.index') == 'http://example.com/mrmcd/2015/'
@pytest.mark.django_db
def test_event_custom_domain_absolute(env):
KnownDomain.objects.create(domainname='foobar', organizer=env[0])
assert build_absolute_uri(env[1], 'presale:event.index') == 'http://foobar/2015/'
| apache-2.0 | 7,996,995,170,991,871,000 | 32.635593 | 120 | 0.693878 | false |
xaedes/canopen_301_402 | src/canopen_301_402/datatypes.py | 1 | 7126 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import struct
from canopen_301_402.utils import collect_all_leaf_subclasses
from canopen_301_402.utils import parseIntAutoBase
from canopen_301_402.constants import CanOpenBasicDatatypes
class CanDatatype(object):
def __init__(self):
'''
@summary: abstract base class for all can datatypes
@raises: NotImplemented
'''
raise NotImplemented
def identifier(self):
'''
@summary: return standard data type identifier
@param self:
@result: uint16 containing data type identifier
@see http://atlas.web.cern.ch/Atlas/GROUPS/DAQTRIG/DCS/LMB/PROFILE/cano-eds.htm
'''
raise NotImplemented
def number_of_bits(self):
'''
@summary: returns number of bits for one value encoded with this datatype
@param self:
@result: number of bits
'''
raise NotImplemented
def decode(self, data):
'''
@summary: returns value of decoded data
@param self:
@param data: byte array
@result: value
'''
raise NotImplemented
def encode(self, value):
'''
@summary: returns encoded value
@param self:
@param value: value to be encoded
@result: data byte array
'''
raise NotImplemented
def decode_string(self, string):
'''
@summary: returns value of human readable representation
@param self:
@param string: human readable representation of value as string
@result: value
'''
raise NotImplemented
def encode_string(self, value):
'''
@summary: returns human readable representation
@param self:
@param value: value to be encoded
@result: human readable representation of value as string
'''
raise NotImplemented
class CanDatatypeStruct(CanDatatype):
def __init__(self, identifier, struct_data_format):
'''
@summary: Can data type base class using python 'struct' module for data coding
@param identifier: specifies can datatype identifier
@param struct_data_format: specifies data format for struct.pack and struct.unpack
example data_format "<i"
'<' little endian
'i' 32 bit signed integer
'''
self._identifier = identifier
self._data_format = struct_data_format
self._number_of_bits = struct.calcsize(self.data_format)*8
def identifier(self):
return self._identifier
def number_of_bits(self):
return self._number_of_bits
@property
def data_format(self):
# '<' : little endian
return '<' + self._data_format
def decode(self, data):
result = struct.unpack_from(self.data_format, data)
# unpack value of length-1 tuples
if len(result) == 1:
value, = result
return value
def encode(self, value):
return bytearray(struct.pack(self.data_format, value))
def decode_string(self, string):
# default implementation tries to interprete as integer number
return parseIntAutoBase(string)
def encode_string(self, value):
return str(value)
class CanDatatypeFloat32(CanDatatypeStruct):
def __init__(self):
super(CanDatatypeFloat32,self).__init__(CanOpenBasicDatatypes.float32,"f")
def decode_string(self, string):
num_value = float(string)
return num_value
class CanDatatypeBoolean(CanDatatypeStruct):
def __init__(self):
super(CanDatatypeBoolean,self).__init__(CanOpenBasicDatatypes.boolean,"?")
def decode_string(self, string):
# look for true/false keywords
if str.lower(string).strip() == "true":
return True
elif str.lower(string).strip() == "false":
return False
# try to interprete as integer number
num_value = parseIntAutoBase(string)
if num_value is None: # interpretation failed
return None
else:
return num_value != 0 # c interpretation of bool
class CanDatatypePDOMapping(CanDatatype):
def __init__(self, node, num_mapped=0, mappings=list()):
'''
@summary: Can data type representing a specific pdo mapping
@param identifier: specifies can datatype identifier
@param num_mapped: number of currently mapped objects
@param mappings: list of currently mapped object identifiers
max_num_mappings will be constant after initialization
num_mapped & max_num_mappings can still be updated (to remap the pdo)
'''
self.node = node
self.canopen = node.canopen
self._identifier = identifier
self._num_mapped = num_mapped
self.mappings = [0]*64 # max 64 mappings 301_v04020005_cor3.pdf pg. 93
for k,mapping in enumerate(mappings):
self.mappings[k] = mapping
def identifier(self):
return self._identifier
def number_of_bits(self):
return self._number_of_bits
@property
def num_mapped(self):
return self._num_mapped
@num_mapped.setter
def num_mapped(self,v):
if 0 <= v <= self.max_num_mappings:
self._num_mapped = v
else:
raise ValueError()
@property
def data_format(self):
result = ""
for obj_id in self.mappings[:self.num_mapped]:
datatype = self.node.obj_dict.objects[obj_id].datatype
if not hasattr(datatype,"_data_format"):
raise RuntimeError()
result += datatype._data_format
return "<" + result
def decode(self, data):
obj_values = struct.unpack_from(self.data_format, data)
return obj_values
def encode(self, obj_values):
return bytearray(struct.pack(self.data_format, obj_values))
def decode_string(self, string):
raise RuntimeError()
def encode_string(self, value):
raise RuntimeError()
class CanDatatypes(object):
def __init__(self):
# generate basic datatypes
self.all_datatypes = list()
self.all_datatypes.append(CanDatatypeStruct(CanOpenBasicDatatypes.int8,"b"))
self.all_datatypes.append(CanDatatypeStruct(CanOpenBasicDatatypes.int16,"h"))
self.all_datatypes.append(CanDatatypeStruct(CanOpenBasicDatatypes.int32,"i"))
self.all_datatypes.append(CanDatatypeStruct(CanOpenBasicDatatypes.uint8,"B"))
self.all_datatypes.append(CanDatatypeStruct(CanOpenBasicDatatypes.uint16,"H"))
self.all_datatypes.append(CanDatatypeStruct(CanOpenBasicDatatypes.uint32,"I"))
self.all_datatypes.append(CanDatatypeFloat32())
self.all_datatypes.append(CanDatatypeBoolean())
# add datatypes to dictionary mapping from its identifiers
self.datatypes = dict()
for datatype in self.all_datatypes:
self.datatypes[datatype.identifier().value] = datatype
| mit | -3,884,830,007,540,805,600 | 30.396476 | 90 | 0.621807 | false |
ccxt/ccxt | examples/py/kraken-fetch-my-trades-pagination.py | 1 | 1540 | # -*- coding: utf-8 -*-
import os
import sys
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt # noqa: E402
exchange = ccxt.kraken({
'apiKey': 'YOUR_API_KEY',
'secret': 'YOUR_API_SECRET',
'enableRateLimit': True, # required by the Manual https://github.com/ccxt/ccxt/wiki/Manual#rate-limit
})
exchange.load_markets()
# exchange.verbose = True # uncomment for verbose debug output
exchange.rateLimit = 10000 # set a higher value if you get rate-limiting errors
all_trades = []
offset = 0
while True:
trades = exchange.fetch_my_trades(symbol=None, since=None, limit=None, params={'ofs': offset})
print('-----------------------------------------------------------------')
print(exchange.iso8601(exchange.milliseconds()), 'Fetched', len(trades), 'trades')
if len(trades) < 1:
break
else:
first = exchange.safe_value(trades, 0)
last = exchange.safe_value(trades, len(trades) - 1)
print('From:', first['datetime'])
print('To:', last['datetime'])
all_trades = trades + all_trades
offset += len(trades)
print(len(all_trades), 'trades fetched in total')
print('-----------------------------------------------------------------')
print(len(all_trades), 'trades fetched')
first = exchange.safe_value(all_trades, 0)
if first:
last = exchange.safe_value(all_trades, len(all_trades) - 1)
print('First:', first['datetime'])
print('Last:', last['datetime'])
| mit | -830,446,503,497,661,400 | 31.765957 | 106 | 0.596104 | false |
lindong28/kafka | tests/kafkatest/services/security/security_config.py | 1 | 20652 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import subprocess
from tempfile import mkdtemp
from shutil import rmtree
from ducktape.template import TemplateRenderer
from kafkatest.services.security.minikdc import MiniKdc
from kafkatest.services.security.listener_security_config import ListenerSecurityConfig
import itertools
from kafkatest.utils.remote_account import java_version
class SslStores(object):
def __init__(self, local_scratch_dir, logger=None):
self.logger = logger
self.ca_crt_path = os.path.join(local_scratch_dir, "test.ca.crt")
self.ca_jks_path = os.path.join(local_scratch_dir, "test.ca.jks")
self.ca_passwd = "test-ca-passwd"
self.truststore_path = os.path.join(local_scratch_dir, "test.truststore.jks")
self.truststore_passwd = "test-ts-passwd"
self.keystore_passwd = "test-ks-passwd"
# Zookeeper TLS (as of v3.5.6) does not support a key password different than the keystore password
self.key_passwd = self.keystore_passwd
# Allow upto one hour of clock skew between host and VMs
self.startdate = "-1H"
for file in [self.ca_crt_path, self.ca_jks_path, self.truststore_path]:
if os.path.exists(file):
os.remove(file)
def generate_ca(self):
"""
Generate CA private key and certificate.
"""
self.runcmd("keytool -genkeypair -alias ca -keyalg RSA -keysize 2048 -keystore %s -storetype JKS -storepass %s -keypass %s -dname CN=SystemTestCA -startdate %s --ext bc=ca:true" % (self.ca_jks_path, self.ca_passwd, self.ca_passwd, self.startdate))
self.runcmd("keytool -export -alias ca -keystore %s -storepass %s -storetype JKS -rfc -file %s" % (self.ca_jks_path, self.ca_passwd, self.ca_crt_path))
def generate_truststore(self):
"""
Generate JKS truststore containing CA certificate.
"""
self.runcmd("keytool -importcert -alias ca -file %s -keystore %s -storepass %s -storetype JKS -noprompt" % (self.ca_crt_path, self.truststore_path, self.truststore_passwd))
def generate_and_copy_keystore(self, node):
"""
Generate JKS keystore with certificate signed by the test CA.
The generated certificate has the node's hostname as a DNS SubjectAlternativeName.
"""
ks_dir = mkdtemp(dir="/tmp")
ks_path = os.path.join(ks_dir, "test.keystore.jks")
csr_path = os.path.join(ks_dir, "test.kafka.csr")
crt_path = os.path.join(ks_dir, "test.kafka.crt")
self.runcmd("keytool -genkeypair -alias kafka -keyalg RSA -keysize 2048 -keystore %s -storepass %s -storetype JKS -keypass %s -dname CN=systemtest -ext SAN=DNS:%s -startdate %s" % (ks_path, self.keystore_passwd, self.key_passwd, self.hostname(node), self.startdate))
self.runcmd("keytool -certreq -keystore %s -storepass %s -storetype JKS -keypass %s -alias kafka -file %s" % (ks_path, self.keystore_passwd, self.key_passwd, csr_path))
self.runcmd("keytool -gencert -keystore %s -storepass %s -storetype JKS -alias ca -infile %s -outfile %s -dname CN=systemtest -ext SAN=DNS:%s -startdate %s" % (self.ca_jks_path, self.ca_passwd, csr_path, crt_path, self.hostname(node), self.startdate))
self.runcmd("keytool -importcert -keystore %s -storepass %s -storetype JKS -alias ca -file %s -noprompt" % (ks_path, self.keystore_passwd, self.ca_crt_path))
self.runcmd("keytool -importcert -keystore %s -storepass %s -storetype JKS -keypass %s -alias kafka -file %s -noprompt" % (ks_path, self.keystore_passwd, self.key_passwd, crt_path))
node.account.copy_to(ks_path, SecurityConfig.KEYSTORE_PATH)
# generate ZooKeeper client TLS config file for encryption-only (no client cert) use case
str = """zookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty
zookeeper.ssl.client.enable=true
zookeeper.ssl.truststore.location=%s
zookeeper.ssl.truststore.password=%s
""" % (SecurityConfig.TRUSTSTORE_PATH, self.truststore_passwd)
node.account.create_file(SecurityConfig.ZK_CLIENT_TLS_ENCRYPT_ONLY_CONFIG_PATH, str)
# also generate ZooKeeper client TLS config file for mutual authentication use case
str = """zookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty
zookeeper.ssl.client.enable=true
zookeeper.ssl.truststore.location=%s
zookeeper.ssl.truststore.password=%s
zookeeper.ssl.keystore.location=%s
zookeeper.ssl.keystore.password=%s
""" % (SecurityConfig.TRUSTSTORE_PATH, self.truststore_passwd, SecurityConfig.KEYSTORE_PATH, self.keystore_passwd)
node.account.create_file(SecurityConfig.ZK_CLIENT_MUTUAL_AUTH_CONFIG_PATH, str)
rmtree(ks_dir)
def hostname(self, node):
""" Hostname which may be overridden for testing validation failures
"""
return node.account.hostname
def runcmd(self, cmd):
if self.logger:
self.logger.log(logging.DEBUG, cmd)
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
raise RuntimeError("Command '%s' returned non-zero exit status %d: %s" % (cmd, proc.returncode, stdout))
class SecurityConfig(TemplateRenderer):
PLAINTEXT = 'PLAINTEXT'
SSL = 'SSL'
SASL_PLAINTEXT = 'SASL_PLAINTEXT'
SASL_SSL = 'SASL_SSL'
SASL_SECURITY_PROTOCOLS = [SASL_PLAINTEXT, SASL_SSL]
SSL_SECURITY_PROTOCOLS = [SSL, SASL_SSL]
SASL_MECHANISM_GSSAPI = 'GSSAPI'
SASL_MECHANISM_PLAIN = 'PLAIN'
SASL_MECHANISM_SCRAM_SHA_256 = 'SCRAM-SHA-256'
SASL_MECHANISM_SCRAM_SHA_512 = 'SCRAM-SHA-512'
SCRAM_CLIENT_USER = "kafka-client"
SCRAM_CLIENT_PASSWORD = "client-secret"
SCRAM_BROKER_USER = "kafka-broker"
SCRAM_BROKER_PASSWORD = "broker-secret"
CONFIG_DIR = "/mnt/security"
KEYSTORE_PATH = "/mnt/security/test.keystore.jks"
TRUSTSTORE_PATH = "/mnt/security/test.truststore.jks"
ZK_CLIENT_TLS_ENCRYPT_ONLY_CONFIG_PATH = "/mnt/security/zk_client_tls_encrypt_only_config.properties"
ZK_CLIENT_MUTUAL_AUTH_CONFIG_PATH = "/mnt/security/zk_client_mutual_auth_config.properties"
JAAS_CONF_PATH = "/mnt/security/jaas.conf"
# allows admin client to connect with broker credentials to create User SCRAM credentials
ADMIN_CLIENT_AS_BROKER_JAAS_CONF_PATH = "/mnt/security/admin_client_as_broker_jaas.conf"
KRB5CONF_PATH = "/mnt/security/krb5.conf"
KEYTAB_PATH = "/mnt/security/keytab"
# This is initialized only when the first instance of SecurityConfig is created
ssl_stores = None
def __init__(self, context, security_protocol=None, interbroker_security_protocol=None,
client_sasl_mechanism=SASL_MECHANISM_GSSAPI, interbroker_sasl_mechanism=SASL_MECHANISM_GSSAPI,
zk_sasl=False, zk_tls=False, template_props="", static_jaas_conf=True, jaas_override_variables=None,
listener_security_config=ListenerSecurityConfig(), tls_version=None,
serves_controller_sasl_mechanism=None, # Raft Controller does this
serves_intercontroller_sasl_mechanism=None, # Raft Controller does this
uses_controller_sasl_mechanism=None, # communication to Raft Controller (broker and controller both do this)
raft_tls=False):
"""
Initialize the security properties for the node and copy
keystore and truststore to the remote node if the transport protocol
is SSL. If security_protocol is None, the protocol specified in the
template properties file is used. If no protocol is specified in the
template properties either, PLAINTEXT is used as default.
"""
self.context = context
if not SecurityConfig.ssl_stores:
# This generates keystore/trustore files in a local scratch directory which gets
# automatically destroyed after the test is run
# Creating within the scratch directory allows us to run tests in parallel without fear of collision
SecurityConfig.ssl_stores = SslStores(context.local_scratch_dir, context.logger)
SecurityConfig.ssl_stores.generate_ca()
SecurityConfig.ssl_stores.generate_truststore()
if security_protocol is None:
security_protocol = self.get_property('security.protocol', template_props)
if security_protocol is None:
security_protocol = SecurityConfig.PLAINTEXT
elif security_protocol not in [SecurityConfig.PLAINTEXT, SecurityConfig.SSL, SecurityConfig.SASL_PLAINTEXT, SecurityConfig.SASL_SSL]:
raise Exception("Invalid security.protocol in template properties: " + security_protocol)
if interbroker_security_protocol is None:
interbroker_security_protocol = security_protocol
self.interbroker_security_protocol = interbroker_security_protocol
serves_raft_sasl = []
if serves_controller_sasl_mechanism is not None:
serves_raft_sasl += [serves_controller_sasl_mechanism]
if serves_intercontroller_sasl_mechanism is not None:
serves_raft_sasl += [serves_intercontroller_sasl_mechanism]
self.serves_raft_sasl = set(serves_raft_sasl)
uses_raft_sasl = []
if uses_controller_sasl_mechanism is not None:
uses_raft_sasl += [uses_controller_sasl_mechanism]
self.uses_raft_sasl = set(uses_raft_sasl)
self.zk_sasl = zk_sasl
self.zk_tls = zk_tls
self.static_jaas_conf = static_jaas_conf
self.listener_security_config = listener_security_config
self.properties = {
'security.protocol' : security_protocol,
'ssl.keystore.location' : SecurityConfig.KEYSTORE_PATH,
'ssl.keystore.password' : SecurityConfig.ssl_stores.keystore_passwd,
'ssl.key.password' : SecurityConfig.ssl_stores.key_passwd,
'ssl.truststore.location' : SecurityConfig.TRUSTSTORE_PATH,
'ssl.truststore.password' : SecurityConfig.ssl_stores.truststore_passwd,
'ssl.endpoint.identification.algorithm' : 'HTTPS',
'sasl.mechanism' : client_sasl_mechanism,
'sasl.mechanism.inter.broker.protocol' : interbroker_sasl_mechanism,
'sasl.kerberos.service.name' : 'kafka'
}
self.raft_tls = raft_tls
if tls_version is not None:
self.properties.update({'tls.version' : tls_version})
self.properties.update(self.listener_security_config.client_listener_overrides)
self.jaas_override_variables = jaas_override_variables or {}
self.calc_has_sasl()
self.calc_has_ssl()
def calc_has_sasl(self):
self.has_sasl = self.is_sasl(self.properties['security.protocol']) \
or self.is_sasl(self.interbroker_security_protocol) \
or self.zk_sasl \
or self.serves_raft_sasl or self.uses_raft_sasl
def calc_has_ssl(self):
self.has_ssl = self.is_ssl(self.properties['security.protocol']) \
or self.is_ssl(self.interbroker_security_protocol) \
or self.zk_tls \
or self.raft_tls
def client_config(self, template_props="", node=None, jaas_override_variables=None,
use_inter_broker_mechanism_for_client = False):
# If node is not specified, use static jaas config which will be created later.
# Otherwise use static JAAS configuration files with SASL_SSL and sasl.jaas.config
# property with SASL_PLAINTEXT so that both code paths are tested by existing tests.
# Note that this is an arbitrary choice and it is possible to run all tests with
# either static or dynamic jaas config files if required.
static_jaas_conf = node is None or (self.has_sasl and self.has_ssl)
if use_inter_broker_mechanism_for_client:
client_sasl_mechanism_to_use = self.interbroker_sasl_mechanism
else:
# csv is supported here, but client configs only supports a single mechanism,
# so arbitrarily take the first one defined in case it has multiple values
client_sasl_mechanism_to_use = self.client_sasl_mechanism.split(',')[0].strip()
return SecurityConfig(self.context, self.security_protocol,
client_sasl_mechanism=client_sasl_mechanism_to_use,
template_props=template_props,
static_jaas_conf=static_jaas_conf,
jaas_override_variables=jaas_override_variables,
listener_security_config=self.listener_security_config,
tls_version=self.tls_version)
def enable_sasl(self):
self.has_sasl = True
def enable_ssl(self):
self.has_ssl = True
def enable_security_protocol(self, security_protocol):
self.has_sasl = self.has_sasl or self.is_sasl(security_protocol)
self.has_ssl = self.has_ssl or self.is_ssl(security_protocol)
def setup_ssl(self, node):
node.account.ssh("mkdir -p %s" % SecurityConfig.CONFIG_DIR, allow_fail=False)
node.account.copy_to(SecurityConfig.ssl_stores.truststore_path, SecurityConfig.TRUSTSTORE_PATH)
SecurityConfig.ssl_stores.generate_and_copy_keystore(node)
def setup_sasl(self, node):
node.account.ssh("mkdir -p %s" % SecurityConfig.CONFIG_DIR, allow_fail=False)
jaas_conf_file = "jaas.conf"
java_version = node.account.ssh_capture("java -version")
jaas_conf = None
if 'sasl.jaas.config' not in self.properties:
jaas_conf = self.render_jaas_config(
jaas_conf_file,
{
'node': node,
'is_ibm_jdk': any('IBM' in line for line in java_version),
'SecurityConfig': SecurityConfig,
'client_sasl_mechanism': self.client_sasl_mechanism,
'enabled_sasl_mechanisms': self.enabled_sasl_mechanisms
}
)
else:
jaas_conf = self.properties['sasl.jaas.config']
if self.static_jaas_conf:
node.account.create_file(SecurityConfig.JAAS_CONF_PATH, jaas_conf)
node.account.create_file(SecurityConfig.ADMIN_CLIENT_AS_BROKER_JAAS_CONF_PATH,
self.render_jaas_config(
"admin_client_as_broker_jaas.conf",
{
'node': node,
'is_ibm_jdk': any('IBM' in line for line in java_version),
'SecurityConfig': SecurityConfig,
'client_sasl_mechanism': self.client_sasl_mechanism,
'enabled_sasl_mechanisms': self.enabled_sasl_mechanisms
}
))
elif 'sasl.jaas.config' not in self.properties:
self.properties['sasl.jaas.config'] = jaas_conf.replace("\n", " \\\n")
if self.has_sasl_kerberos:
node.account.copy_to(MiniKdc.LOCAL_KEYTAB_FILE, SecurityConfig.KEYTAB_PATH)
node.account.copy_to(MiniKdc.LOCAL_KRB5CONF_FILE, SecurityConfig.KRB5CONF_PATH)
def render_jaas_config(self, jaas_conf_file, config_variables):
"""
Renders the JAAS config file contents
:param jaas_conf_file: name of the JAAS config template file
:param config_variables: dict of variables used in the template
:return: the rendered template string
"""
variables = config_variables.copy()
variables.update(self.jaas_override_variables) # override variables
return self.render(jaas_conf_file, **variables)
def setup_node(self, node):
if self.has_ssl:
self.setup_ssl(node)
if self.has_sasl:
self.setup_sasl(node)
if java_version(node) <= 11 and self.properties.get('tls.version') == 'TLSv1.3':
self.properties.update({'tls.version': 'TLSv1.2'})
def clean_node(self, node):
if self.security_protocol != SecurityConfig.PLAINTEXT:
node.account.ssh("rm -rf %s" % SecurityConfig.CONFIG_DIR, allow_fail=False)
def get_property(self, prop_name, template_props=""):
"""
Get property value from the string representation of
a properties file.
"""
value = None
for line in template_props.split("\n"):
items = line.split("=")
if len(items) == 2 and items[0].strip() == prop_name:
value = str(items[1].strip())
return value
def is_ssl(self, security_protocol):
return security_protocol in SecurityConfig.SSL_SECURITY_PROTOCOLS
def is_sasl(self, security_protocol):
return security_protocol in SecurityConfig.SASL_SECURITY_PROTOCOLS
def is_sasl_scram(self, sasl_mechanism):
return sasl_mechanism == SecurityConfig.SASL_MECHANISM_SCRAM_SHA_256 or sasl_mechanism == SecurityConfig.SASL_MECHANISM_SCRAM_SHA_512
@property
def security_protocol(self):
return self.properties['security.protocol']
@property
def tls_version(self):
return self.properties.get('tls.version')
@property
def client_sasl_mechanism(self):
return self.properties['sasl.mechanism']
@property
def interbroker_sasl_mechanism(self):
return self.properties['sasl.mechanism.inter.broker.protocol']
@property
def enabled_sasl_mechanisms(self):
sasl_mechanisms = []
if self.is_sasl(self.security_protocol):
sasl_mechanisms += [self.client_sasl_mechanism]
if self.is_sasl(self.interbroker_security_protocol):
sasl_mechanisms += [self.interbroker_sasl_mechanism]
if self.serves_raft_sasl:
sasl_mechanisms += list(self.serves_raft_sasl)
if self.uses_raft_sasl:
sasl_mechanisms += list(self.uses_raft_sasl)
return set(sasl_mechanisms)
@property
def has_sasl_kerberos(self):
return self.has_sasl and (SecurityConfig.SASL_MECHANISM_GSSAPI in self.enabled_sasl_mechanisms)
@property
def kafka_opts(self):
if self.has_sasl:
if self.static_jaas_conf:
return "\"-Djava.security.auth.login.config=%s -Djava.security.krb5.conf=%s\"" % (SecurityConfig.JAAS_CONF_PATH, SecurityConfig.KRB5CONF_PATH)
else:
return "\"-Djava.security.krb5.conf=%s\"" % SecurityConfig.KRB5CONF_PATH
else:
return ""
def props(self, prefix=''):
"""
Return properties as string with line separators, optionally with a prefix.
This is used to append security config properties to
a properties file.
:param prefix: prefix to add to each property
:return: a string containing line-separated properties
"""
if self.security_protocol == SecurityConfig.PLAINTEXT:
return ""
if self.has_sasl and not self.static_jaas_conf and 'sasl.jaas.config' not in self.properties:
raise Exception("JAAS configuration property has not yet been initialized")
config_lines = (prefix + key + "=" + value for key, value in self.properties.items())
# Extra blank lines ensure this can be appended/prepended safely
return "\n".join(itertools.chain([""], config_lines, [""]))
def __str__(self):
"""
Return properties as a string with line separators.
"""
return self.props()
| apache-2.0 | 4,421,555,070,726,050,300 | 48.171429 | 274 | 0.649816 | false |
MadMac/PyTetris | src/main/main.py | 1 | 1650 | import pygame, sys, os, random
from classes import *
from pygame.locals import *
blocksFile = "blocks.txt"
thisBlock = ""
allBlocks = []
boardWidth = 15
boardHeight = 20
gameOver = False
# Make all the blocks which are in file "blocks.txt"
file = open(blocksFile, "r")
while file:
line = file.readline()
if line.find("END") >= 0:
break
if line.find("/") >= 0:
allBlocks.append(blockStyle(thisBlock))
thisBlock = ""
continue
thisBlock = thisBlock + line
# Make board
gameBoard = board(boardWidth, boardHeight)
# All pygame init
pygame.init()
gameWindow = pygame.display.set_mode((640, 480))
pygame.display.set_caption('PyTetris')
clock = pygame.time.Clock()
playerBlock = block(boardWidth, boardHeight, allBlocks[random.randrange(len(allBlocks))].getStyle(), gameBoard)
pygame.time.Clock()
pygame.time.set_timer(pygame.USEREVENT + 1, 150)
pygame.time.set_timer(pygame.USEREVENT + 2, 1000)
#Game loop
while gameOver == False:
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameOver = True
elif event.type == KEYDOWN and event.key == K_ESCAPE:
gameOver = True
elif event.type == pygame.USEREVENT + 1:
playerBlock.handlePlayerInput()
elif event.type == pygame.USEREVENT + 2:
playerBlock.updatePlayer()
if playerBlock.isDown == True:
playerBlock.changeStyle(allBlocks[random.randrange(len(allBlocks))].getStyle())
gameWindow.fill((0,0,0))
gameBoard.drawBoard()
gameBoard.update()
playerBlock.drawBlock()
pygame.display.flip()
pygame.quit()
| mit | -2,331,694,246,887,766,000 | 23.626866 | 111 | 0.664242 | false |
OpenNews/opennews-source | source/base/feeds.py | 1 | 7562 | from datetime import datetime, timedelta
from django.conf import settings
from django.contrib.syndication.views import Feed
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import Http404
from django.shortcuts import get_object_or_404
from source.articles.models import Article, Section, Category
from source.code.models import Code
from source.guides.models import Guide
from source.jobs.models import Job
from source.tags.models import TechnologyTag, ConceptTag
from source.tags.utils import get_validated_tag_list, get_tag_filtered_queryset
from taggit.models import Tag
ONE_DAY_AGO = datetime.now() - timedelta(hours=24)
class ObjectWithTagsFeed(Feed):
'''common get_object for Article and Code feeds to handle tag queries'''
def get_object(self, request, *args, **kwargs):
self.section = kwargs.get('section', None)
if self.section:
self.section = get_object_or_404(Section, slug=self.section)
self.category = kwargs.get('category', None)
if self.category:
self.category = get_object_or_404(Category, slug=self.category)
self.tag_slugs = kwargs.get('tag_slugs', None)
if self.tag_slugs:
self.tag_slug_list = self.tag_slugs.split('+')
self.tags = get_validated_tag_list(self.tag_slug_list, tags=[])
return ''
class ArticleFeed(ObjectWithTagsFeed):
description_template = "feeds/article_description.html"
def title(self, obj):
if self.section:
return "Source: %s" % self.section.name
elif self.category:
return "Source: Articles in the category %s" % self.category.name
elif self.tag_slugs:
return "Source: Articles tagged with '%s'" % "+".join([tag.name for tag in self.tags])
return "Source"
def link(self, obj):
if self.section:
return reverse('article_list')
#return reverse('article_list_by_section', kwargs={'section': self.section.slug})
elif self.category:
return reverse('article_list_by_category', kwargs={'category': self.category.slug})
elif self.tag_slugs:
return reverse('article_list_by_tag', kwargs={'tag_slugs': self.tag_slugs})
return reverse('homepage')
def description(self, obj):
identifier = 'from Source'
if self.section:
identifier = "in the %s section" % self.section.name
elif self.category:
identifier = "in the %s category" % self.category.name
elif self.tag_slugs:
identifier = "tagged with '%s'" % "+".join([tag.name for tag in self.tags])
return "Recent articles %s" % identifier
def item_title(self, item):
_title = item.title
# Alert anyone using an RSS feed on staging
if settings.DEBUG:
_title = "THIS IS A TEST ARTICLE ON THE STAGING SITE: " + _title
return _title
def item_pubdate(self, item):
return item.pubdate
def item_author_name(self, item):
if item.get_live_author_set().exists():
return ','.join([author.name() for author in item.get_live_author_set()])
return ''
def item_categories(self, item):
if item.category:
return [item.category.name]
return ''
def items(self, obj):
queryset = Article.live_objects.filter(show_in_lists=True)
if self.section:
queryset = queryset.filter(category__section=self.section)
elif self.category:
queryset = queryset.filter(category=self.category)
elif self.tag_slugs:
queryset = get_tag_filtered_queryset(queryset, self.tag_slug_list)
return queryset[:20]
class CodeFeed(ObjectWithTagsFeed):
def title(self, obj):
identifier = ""
if self.tag_slugs:
identifier = " tagged '%s'" % "+".join([tag.name for tag in self.tags])
return "Source: Code%s" % identifier
def link(self, obj):
if self.tag_slugs:
return reverse('code_list_by_tag', kwargs={'tag_slugs': self.tag_slugs})
return reverse('code_list')
def description(self, obj):
identifier = " from Source"
if self.tag_slugs:
identifier = " tagged '%s'" % "+".join([tag.name for tag in self.tags])
return "Recent code index pages%s" % identifier
def item_title(self, item):
_name = item.name
# Alert anyone using an RSS feed on staging
if settings.DEBUG:
_name = "THIS IS A TEST ENTRY ON THE STAGING SITE: " + _name
return _name
def item_description(self, item):
return item.description
def items(self, obj):
queryset = Code.live_objects.order_by('-created')
if self.tag_slugs:
queryset = get_tag_filtered_queryset(queryset, self.tag_slug_list)
return queryset[:20]
class JobFeed(Feed):
def title(self, obj):
return "Source: Jobs"
def link(self, obj):
return reverse('job_list')
def description(self, obj):
return 'Recent jobs listed on Source'
def item_title(self, item):
_name = item.name
# Alert anyone using an RSS feed on staging
if settings.DEBUG:
_name = "THIS IS A TEST ENTRY ON THE STAGING SITE: " + _name
return _name
def item_description(self, item):
return 'Job posting from %s' % item.organization
def item_link(self, item):
'''
We don't have individual detail pages, so use item.url
or fall back to jobs list page.
'''
return item.url or reverse('job_list')
def items(self, obj):
queryset = Job.live_objects.order_by('-created')
return queryset[:20]
class GuideFeed(Feed):
def title(self, obj):
return "Source: Guides"
def link(self, obj):
return reverse('guide_list')
def description(self, obj):
return 'Recent guides from Source'
def item_title(self, item):
_name = item.title
# Alert anyone using an RSS feed on staging
if settings.DEBUG:
_name = "THIS IS A TEST ENTRY ON THE STAGING SITE: " + _name
return _name
def item_description(self, item):
return item.summary_or_description
def items(self, obj):
queryset = Guide.live_objects.order_by('-pubdate')
return queryset[:20]
class RecentArticleSummaryFeed(Feed):
description_template = "feeds/article_summary_only.html"
def title(self, obj):
return "Source: Latest Article Summaries"
def link(self, obj):
return reverse('article_list')
def description(self, obj):
return 'Recent articles from Source'
def item_title(self, item):
_name = item.title
# Alert anyone using an RSS feed on staging
if settings.DEBUG:
_name = "THIS IS A TEST ENTRY ON THE STAGING SITE: " + _name
return _name
def item_pubdate(self, item):
return item.pubdate
def item_author_name(self, item):
if item.get_live_author_set().exists():
return ','.join([author.name() for author in item.get_live_author_set()])
return ''
def item_description(self, item):
return item.safe_summary
def items(self, obj):
queryset = Article.live_objects.filter(show_in_lists=True)
queryset = queryset.filter(pubdate__gte=ONE_DAY_AGO)
return queryset
| mit | -2,670,830,463,679,315,500 | 32.758929 | 98 | 0.618752 | false |
openslack/openslack-web | openslack/apps/company/views.py | 1 | 1072 | # encoding:utf-8
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from apps.company.models import Company, Comment
from django.shortcuts import get_object_or_404
from utils.page import paginator_objects
def index(request):
return render_to_response('index.html', {}, RequestContext(request))
def company_list(request, template, page=1):
"""
公司分类列表
"""
page = int(page)
companies = Company.objects.filter(status=True)
print companies
page_range, companies = paginator_objects(page, companies)
dt_view = {
"companies": companies,
"page_range": page_range,
"page": page
}
return render_to_response(template, dt_view, context_instance=RequestContext(request))
# @silk_profile(name='Get Detail')
def company_detail(request, template, pk):
company = get_object_or_404(Company, pk=pk, status=True)
dt_view = {
"company": company,
}
return render_to_response(template, dt_view, context_instance=RequestContext(request))
| apache-2.0 | -7,157,730,011,399,489,000 | 29.285714 | 90 | 0.701887 | false |
localhuman/neo-python | examples/json-rpc-api-server.py | 1 | 2783 | #!/usr/bin/env python3
"""
This example provides a JSON-RPC API to query blockchain data, implementing `neo.api.JSONRPC.JsonRpcApi`
"""
import argparse
import os
from logzero import logger
from twisted.internet import reactor, task
from neo import __version__
from neo.Core.Blockchain import Blockchain
from neo.Implementations.Blockchains.LevelDB.LevelDBBlockchain import LevelDBBlockchain
from neo.Implementations.Notifications.LevelDB.NotificationDB import NotificationDB
from neo.api.JSONRPC.JsonRpcApi import JsonRpcApi
from neo.Network.NodeLeader import NodeLeader
from neo.Settings import settings, DIR_PROJECT_ROOT
from neo.UserPreferences import preferences
# Logfile settings & setup
LOGFILE_FN = os.path.join(DIR_PROJECT_ROOT, 'json-rpc.log')
LOGFILE_MAX_BYTES = 5e7 # 50 MB
LOGFILE_BACKUP_COUNT = 3 # 3 logfiles history
settings.set_logfile(LOGFILE_FN, LOGFILE_MAX_BYTES, LOGFILE_BACKUP_COUNT)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--mainnet", action="store_true", default=False,
help="Use MainNet instead of the default TestNet")
parser.add_argument("-p", "--privnet", action="store_true", default=False,
help="Use PrivNet instead of the default TestNet")
parser.add_argument("-c", "--config", action="store", help="Use a specific config file")
parser.add_argument('--version', action='version',
version='neo-python v{version}'.format(version=__version__))
args = parser.parse_args()
if args.config and (args.mainnet or args.privnet):
print("Cannot use both --config and --mainnet/--privnet arguments, please use only one.")
exit(1)
if args.mainnet and args.privnet:
print("Cannot use both --mainnet and --privnet arguments")
exit(1)
# Setup depending on command line arguments. By default, the testnet settings are already loaded.
if args.config:
settings.setup(args.config)
elif args.mainnet:
settings.setup_mainnet()
elif args.privnet:
settings.setup_privnet()
# Instantiate the blockchain and subscribe to notifications
blockchain = LevelDBBlockchain(settings.LEVELDB_PATH)
Blockchain.RegisterBlockchain(blockchain)
dbloop = task.LoopingCall(Blockchain.Default().PersistBlocks)
dbloop.start(.1)
settings.set_log_smart_contract_events(False)
ndb = NotificationDB.instance()
ndb.start()
# Run
reactor.suggestThreadPoolSize(15)
NodeLeader.Instance().Start()
host = "0.0.0.0"
port = settings.RPC_PORT
logger.info("Starting json-rpc api server on http://%s:%s" % (host, port))
api_server = JsonRpcApi(port)
api_server.app.run(host, port)
if __name__ == "__main__":
main()
| mit | 3,520,455,970,852,346,400 | 34.227848 | 104 | 0.701761 | false |
ehua7365/RibbonOperators | TEBD/mpstest8.py | 1 | 5968 | """
mpstest8.py
A test of manipulating matrix product states with numpy.
2014-08-25
"""
import numpy as np
import matplotlib.pyplot as plt
from cmath import *
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
def main():
#test3()
getMPS(randomState(2,5),3).shape
def test3():
""" Test MPS conversion functions by computing fidelity between
generated MPS and orginal, with new and old bond dimensions
chi0 and chi1 varied.
"""
print("*** Started testing MPS ***")
N = 5
d = 2
# Points to plot on 3d graph
(X,Y,Z) = ([],[],[])
for chi0 in xrange(1,5):
for chi1 in xrange(1,5):
F = 0
# Run random test for 20 points and take average fidelity
for i in xrange(10):
mps0 = randomMPS(N,chi0,d) # Make random MPS
state0 = getState(mps0) # Convert to state
mps1 = getMPS(state0,chi1) # Convert back to MPS with new bond dimension
state1 = getState(mps1) # Convert back to state
F += fidelityMPS(mps0,mps1) # Compute fidelity and add to sum
# F += fidelity(state0,state1) # Uncomment this to try with vectors
X.append(chi0)
Y.append(chi1)
Z.append(F/20)
X = np.array(X)
Y = np.array(Y)
Z = np.array(Z)
# Plot the surface
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_trisurf(X, Y, Z, cmap=cm.jet, linewidth=0.2)
ax.set_xlabel('chi0')
ax.set_ylabel('chi1')
ax.set_zlabel('fidelity')
plt.show()
print("*** Finished testing MPS ***")
def fidelityMPS(A,B):
""" Fidelity of two MPS representations
f = <A|B><B|A>/(<A|A><B|B>).
"""
return innerProduct(A,B)*innerProduct(B,A)\
/innerProduct(A,A)/innerProduct(B,B)
def fidelity(a,b):
""" Fidelity of two state vectors
f = <a|b><b|a>/(<a|a><b|b>).
"""
return np.inner(np.conj(a),b)*np.inner(np.conj(b),a)\
/np.inner(np.conj(a),a)/np.inner(np.conj(b),b)
def randomMPS(N,chi,d):
""" Returns a random MPS given parameters N, chi, d."""
A = []
for i in xrange(N):
# Each real part of each value varies between -0.5 and 0.5.
A.append((np.random.rand(chi,d,chi)-.5)+1j*(np.random.rand(chi,d,chi)-.5))
return np.array(A)
def getState(A):
""" State vector of a MPS by contracting MPS."""
N = len(A) # Number of spins
chi = A[0].shape[0] # Bond dimension
d = A[0].shape[1] # d = 2 for qubits
c = A[0]
for i in xrange(1,N):
c = np.tensordot(c,A[i],axes=(-1,0))
c = np.trace(c,axis1=0,axis2=-1)
return np.reshape(c,d**N)
def getMPS(state,chi):
""" MPS of a state."""
d = 2 # Qubits have 2 states each
N = int(np.log2(len(state))) # Number of qubits
c = np.reshape(state,cShape(d,N)) # State amplitudes tensor c.
A = [] # List of N matrices of MPS, each of shape (chi,d,chi)
# Start left end with a vector of size (d,chi)
c = np.reshape(c,(d,d**(N-1))) # Reshape c
(ap,sv,c) = np.linalg.svd(c) # Apply SVD
s = np.zeros((d,chi),dtype=complex) # Construct singular value matrix shape
s[:d,:d] = np.diag(sv[:chi]) # Fill s with singular values
# Trim c or fill rest of c with zeros
newc = np.zeros((chi,d**(N-1)),dtype=complex)
newc[:min(chi,d**(N-1)),:] = c[:chi,:]
c = newc
A.append(np.dot(ap,s)) # Contract and append to A
# Sweep through the middle, creating matrix products each with
# shape (chi,d,chi)
for i in xrange(1,N-2):
c = np.reshape(c,(d*chi,d**(N-i-1)))
(ap,sv,c) = np.linalg.svd(c)
s = np.zeros((d*chi,chi),dtype=complex)
s[:min(chi,len(sv)),:min(chi,len(sv))] = np.diag(sv[:chi])
A.append(np.reshape(np.dot(ap,s),(chi,d,chi)))
newc = np.zeros((chi,d**(N-i-1)),dtype=complex)
newc[:min(chi,len(sv)),:] = c[:chi,:]
c = newc
# Finish right end with the remaining vector
c = np.reshape(c,(d*chi,d))
(ap,sv,c) = np.linalg.svd(c)
s = np.zeros((chi,d),dtype=complex)
s[:d,:d] = np.diag(sv[:chi])
A.append(np.reshape(ap[:chi,:],(chi,d,chi)))
c = np.dot(s,c)
A.append(c)
prod = A[0]
for i in xrange(1,N):
prod = np.tensordot(prod,A[i],axes=(-1,0))
print(prod-np.reshape(state,cShape(d,N)))
# Fix up ends by filling first row of correctly shaped zeros with
# end vectors such that the trace is preserved.
start = np.zeros((chi,d,chi),dtype=complex)
start[0,:,:] = A[0]
A[0] = start
finish = np.zeros((chi,d,chi),dtype=complex)
finish[:,:,0] = A[-1]
A[-1] = finish
# Return MPS as numpy array with shape (N,chi,d,chi)
return np.array(A)
def innerProduct(A,B):
""" Inner product <A|B> using transfer matrices
where A and B are MPS representations of }A> and }B>.
"""
N = len(A) # Number of qubits
chiA = A.shape[1] # bond dimension of MPS in A
chiB = B.shape[1] # bond dimension of MPS in B
d = A.shape[2] # d = 2 for qubits
# Take adjoint of |A> to get <A|
A = np.conj(A)
# Construct list of transfer matrices by contracting pairs of
# tensors from A and B.
transfer = []
for i in xrange(N):
t = np.tensordot(A[i],B[i],axes=(1,1))
t = np.transpose(t,axes=(0,2,1,3))
t = np.reshape(t,(chiA*chiB,chiA*chiB))
transfer.append(t)
# Contract the transfer matrices.
prod = transfer[0]
for i in xrange(1,len(transfer)):
prod = np.tensordot(prod,transfer[i],axes=(-1,0))
return np.trace(prod)
def randomState(d,N):
state = (np.random.rand(d**N)-.5) + (np.random.rand(d**N)-.5)*1j
state = state/np.linalg.norm(state)
return state
def cShape(d,N):
""" Returns the shape of c tensor representation.
I.e. simply just (d,d,...,d) N times.
"""
return tuple([d for i in xrange(N)])
if __name__ == "__main__":
main()
| mit | -5,615,611,594,896,353,000 | 31.972376 | 88 | 0.575737 | false |
PolyCortex/pyMuse | pymuse/signal.py | 1 | 1235 | from dataclasses import dataclass
from threading import Event
from pymuse.utils.stoppablequeue import StoppableQueue
@dataclass
class SignalData():
"""
Dataclass for a signal data point. Event_marker attribute is optional
"""
time: float
values: list
event_marker: list = None
class Signal():
"""Represents the accumulated signal that is store in a queue. It tag every sample with a time"""
def __init__(self, length: int, acquisition_frequency: float):
self._shutdown_event = Event()
self._signal_queue: StoppableQueue = StoppableQueue(length, self._shutdown_event)
self._signal_period: float = (1 / acquisition_frequency)
self._data_counter: int = 0
@property
def signal_queue(self) -> StoppableQueue:
return self._signal_queue
def push(self, data_list: list):
time = self._data_counter * self._signal_period
signal_data: SignalData = SignalData(time, data_list)
self._signal_queue.put(signal_data, True, self._signal_period)
self._data_counter += 1
def pop(self, timeout=None) -> SignalData:
return self._signal_queue.get(True, timeout)
def shutdown(self):
self._shutdown_event.set()
| mit | 1,287,241,640,009,721,900 | 31.5 | 101 | 0.670445 | false |
p-hofmann/ConfigParserWrapper | configparserwrapper.py | 1 | 10557 | __author__ = 'Peter Hofmann'
__version__ = '0.1.3'
import os
import sys
from collections import Iterable
from io import StringIO
if sys.version_info < (3,):
from ConfigParser import SafeConfigParser as ConfigParser
else:
from configparser import ConfigParser
from scripts.loggingwrapper import DefaultLogging
class ConfigParserWrapper(DefaultLogging):
"""
@type _config: ConfigParser
"""
_boolean_states = {
'yes': True, 'true': True, 'on': True,
'no': False, 'false': False, 'off': False,
'y': True, 't': True, 'n': False, 'f': False}
def __init__(self, logfile=None, verbose=True):
"""
Wrapper for the SafeConfigParser class for easy use.
@attention: config_file argument may be file path or stream.
@param logfile: file handler or file path to a log file
@type logfile: file | FileIO | StringIO | None
@param verbose: No stdout or stderr messages. Warnings and errors will be only logged to a file, if one is given
@type verbose: bool
@return: None
@rtype: None
"""
super(ConfigParserWrapper, self).__init__(
label="ConfigParserWrapper", logfile=logfile, verbose=verbose)
self._config = ConfigParser()
self._config_file_path = None
def read(self, config_file):
"""
Read a configuration file in ini format
@attention: config_file argument may be file path or stream.
@param config_file: file handler or file path to a config file
@type config_file: file | FileIO | StringIO
@rtype: None
"""
assert isinstance(config_file, str) or self.is_stream(config_file), "Invalid config file path: {}".format(config_file)
if isinstance(config_file, str) and not os.path.isfile(config_file):
self._logger.error("Config file does not exist: '{}'".format(config_file))
raise Exception("File does not exist")
if isinstance(config_file, str):
self._config.read(config_file)
self._config_file_path = config_file
elif self.is_stream(config_file):
if sys.version_info < (3,):
self._config.readfp(config_file)
else:
self._config.read_file(config_file)
self._config_file_path = config_file.name
else:
self._logger.error("Invalid config file argument '{}'".format(config_file))
raise Exception("Unknown argument")
def write(self, file_path):
"""
Write config file
@param file_path: Output file path
@type file_path: str
@rtype: None
"""
with open(file_path, "w") as write_handler:
self._config.write(write_handler)
def set_value(self, option, value, section=None):
"""
@param section:
@type section: str
@param value:
@type value: any
@rtype: None
"""
if not self._config.has_section(section):
self._config.add_section(section)
self._config.set(section, option, value)
def validate_sections(self, list_sections):
"""
Validate a list of section names for availability.
@param list_sections: list of section names
@type list_sections: list of str
@return: None if all valid, otherwise list of invalid sections
@rtype: None | list[str]
"""
assert isinstance(list_sections, Iterable), "Invalid, not a list: '{}'".format(list_sections)
invalid_sections = []
for section in list_sections:
if not self._config.has_section(section):
invalid_sections.append(section)
if len(invalid_sections) > 0:
return invalid_sections
return None
def log_invalid_sections(self, list_sections):
"""
print out a list of invalid section names to log.
@param list_sections: list of section names
@type list_sections: list[str]
@return: None
@rtype: None
"""
assert isinstance(list_sections, Iterable), "Invalid, not a list: '{}'".format(list_sections)
for section in list_sections:
self._logger.warning("Invalid section '{}'".format(section))
def get_value(self, option, section=None, is_digit=False, is_boolean=False, is_path=False, silent=False):
"""
get a value of an option in a specific section of the config file.
@attention: Set obligatory to False if a section or option that does not exist is no error.
@param option: name of option in a section
@type option: str
@param section: name of section
@type section: str
@param is_digit: value is a number and will be returned as such
@type is_digit: bool
@param is_boolean: value is bool and will be returned as True or False
@type is_boolean: bool
@param is_path: value is a path and will be returned as absolute path
@type is_path: bool
@param silent: Error is given if error not available unless True
@type silent: bool
@return: None if not available or ''. Else: depends on given arguments
@rtype: None | str | int | float | bool
"""
assert section is None or isinstance(section, str), "Invalid section: '{}'".format(section)
assert isinstance(option, str), "Invalid option: '{}'".format(option)
assert isinstance(is_digit, bool), "Invalid argument, 'is_digit' must be boolean, but got: '{}'".format(type(is_digit))
assert isinstance(is_boolean, bool), "Invalid argument, 'is_boolean' must be boolean, but got: '{}'".format(type(is_boolean))
assert isinstance(silent, bool), "Invalid argument, 'silent' must be boolean, but got: '{}'".format(type(silent))
assert isinstance(is_path, bool), "Invalid argument, 'is_path' must be boolean, but got: '{}'".format(type(is_path))
if section is None:
section = self._get_section_of_option(option)
if not self._config.has_section(section):
if not silent:
if section is None:
self._logger.error("Missing option '{}'".format(option))
else:
self._logger.error("Missing section '{}'".format(section))
return None
if not self._config.has_option(section, option):
if not silent:
self._logger.error("Missing option '{}' in section '{}'".format(option, section))
return None
value = self._config.get(section, option)
if value == '':
if not silent:
self._logger.warning("Empty value in '{}': '{}'".format(section, option))
return None
if is_digit:
return self._string_to_digit(value)
if is_boolean:
return self._is_true(value)
if is_path:
return self._get_full_path(value)
return value
def _get_section_of_option(self, option):
"""
get the section of a unique option
@param option: name of option in a section
@type option: str
@return: Section name. None if not available
@rtype: None | str
"""
assert isinstance(option, str), "Invalid argument, 'option' must be string, but got: '{}'".format(type(option))
for section in self._config.sections():
if self._config.has_option(section, option):
return section
return None
def search_sections_of(self, option):
"""
get the section of a unique option
@param option: name of option in a section
@type option: str
@return: Section name. None if not available
@rtype: set[str]
"""
assert isinstance(option, str), "Invalid argument, 'option' must be string, but got: '{}'".format(type(option))
result = set()
for section in self._config.sections():
if self._config.has_option(section, option):
result.add(section)
return result
def _string_to_digit(self, value):
"""
parse string to an int or float.
@param value: some string to be converted
@type value: str
@return: None if invalid, otherwise int or float
@rtype: None | int | float
"""
assert isinstance(value, str), "Invalid argument, 'value' must be string, but got: '{}'".format(type(value))
try:
if '.' in value:
return float(value)
return int(value)
except ValueError:
self._logger.error("Invalid digit value '{}'".format(value))
return None
def _is_true(self, value):
"""
parse string to True or False.
@param value: some string to be converted
@type value: str
@return: None if invalid, otherwise True or False
@rtype: None | bool
"""
assert isinstance(value, str), "Invalid argument, 'value' must be string, but got: '{}'".format(type(value))
if value.lower() not in ConfigParserWrapper._boolean_states:
self._logger.error("Invalid bool value '{}'".format(value))
return None
return ConfigParserWrapper._boolean_states[value.lower()]
@staticmethod
def _get_full_path(value):
"""
convert string to absolute normpath.
@param value: some string to be converted
@type value: str
@return: absolute normpath
@rtype: str
"""
assert isinstance(value, str), "Invalid argument, 'value' must be string, but got: '{}'".format(type(value))
parent_directory, filename = os.path.split(value)
if not parent_directory and not os.path.isfile(value):
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, filename)
if os.path.isfile(exe_file):
value = exe_file
break
value = os.path.expanduser(value)
value = os.path.normpath(value)
value = os.path.abspath(value)
return value
| gpl-2.0 | -7,490,994,053,260,148,000 | 35.912587 | 133 | 0.575637 | false |
abcdef123/stem | stem/response/events.py | 1 | 30484 | import datetime
import re
import StringIO
import stem
import stem.control
import stem.descriptor.router_status_entry
import stem.response
import stem.version
from stem.util import connection, log, str_tools, tor_tools
# Matches keyword=value arguments. This can't be a simple "(.*)=(.*)" pattern
# because some positional arguments, like circuit paths, can have an equal
# sign.
KW_ARG = re.compile("^(.*) ([A-Za-z0-9_]+)=(\S*)$")
QUOTED_KW_ARG = re.compile("^(.*) ([A-Za-z0-9_]+)=\"(.*)\"$")
class Event(stem.response.ControlMessage):
"""
Base for events we receive asynchronously, as described in section 4.1 of the
`control-spec
<https://gitweb.torproject.org/torspec.git/blob/HEAD:/control-spec.txt>`_.
:var str type: event type
:var int arrived_at: unix timestamp for when the message arrived
:var list positional_args: positional arguments of the event
:var dict keyword_args: key/value arguments of the event
"""
_POSITIONAL_ARGS = () # attribute names for recognized positional arguments
_KEYWORD_ARGS = {} # map of 'keyword => attribute' for recognized attributes
_QUOTED = () # positional arguments that are quoted
_SKIP_PARSING = False # skip parsing contents into our positional_args and keyword_args
_VERSION_ADDED = stem.version.Version('0.1.1.1-alpha') # minimum version with control-spec V1 event support
def _parse_message(self, arrived_at):
if not str(self).strip():
raise stem.ProtocolError("Received a blank tor event. Events must at the very least have a type.")
self.type = str(self).split().pop(0)
self.arrived_at = arrived_at
# if we're a recognized event type then translate ourselves into that subclass
if self.type in EVENT_TYPE_TO_CLASS:
self.__class__ = EVENT_TYPE_TO_CLASS[self.type]
self.positional_args = []
self.keyword_args = {}
if not self._SKIP_PARSING:
self._parse_standard_attr()
self._parse()
def _parse_standard_attr(self):
"""
Most events are of the form...
650 *( positional_args ) *( key "=" value )
This parses this standard format, populating our **positional_args** and
**keyword_args** attributes and creating attributes if it's in our event's
**_POSITIONAL_ARGS** and **_KEYWORD_ARGS**.
"""
# Tor events contain some number of positional arguments followed by
# key/value mappings. Parsing keyword arguments from the end until we hit
# something that isn't a key/value mapping. The rest are positional.
content = str(self)
while True:
match = QUOTED_KW_ARG.match(content)
if not match:
match = KW_ARG.match(content)
if match:
content, keyword, value = match.groups()
self.keyword_args[keyword] = value
else:
break
# Setting attributes for the fields that we recognize.
self.positional_args = content.split()[1:]
positional = list(self.positional_args)
for attr_name in self._POSITIONAL_ARGS:
attr_value = None
if positional:
if attr_name in self._QUOTED:
attr_values = [positional.pop(0)]
if not attr_values[0].startswith('"'):
raise stem.ProtocolError("The %s value should be quoted, but didn't have a starting quote: %s" % (attr_name, self))
while True:
if not positional:
raise stem.ProtocolError("The %s value should be quoted, but didn't have an ending quote: %s" % (attr_name, self))
attr_values.append(positional.pop(0))
if attr_values[-1].endswith('"'): break
attr_value = " ".join(attr_values)[1:-1]
else:
attr_value = positional.pop(0)
setattr(self, attr_name, attr_value)
for controller_attr_name, attr_name in self._KEYWORD_ARGS.items():
setattr(self, attr_name, self.keyword_args.get(controller_attr_name))
# method overwritten by our subclasses for special handling that they do
def _parse(self):
pass
def _log_if_unrecognized(self, attr, attr_enum):
"""
Checks if an attribute exists in a given enumeration, logging a message if
it isn't. Attributes can either be for a string or collection of strings
:param str attr: name of the attribute to check
:param stem.util.enum.Enum enum: enumeration to check against
"""
attr_values = getattr(self, attr)
if attr_values:
if isinstance(attr_values, str):
attr_values = [attr_values]
for value in attr_values:
if not value in attr_enum:
log_id = "event.%s.unknown_%s.%s" % (self.type.lower(), attr, value)
unrecognized_msg = "%s event had an unrecognized %s (%s). Maybe a new addition to the control protocol? Full Event: '%s'" % (self.type, attr, value, self)
log.log_once(log_id, log.INFO, unrecognized_msg)
class AddrMapEvent(Event):
"""
Event that indicates a new address mapping.
:var str hostname: address being resolved
:var str destination: destionation of the resolution, this is usually an ip,
but could be a hostname if TrackHostExits is enabled or **NONE** if the
resolution failed
:var datetime expiry: expiration time of the resolution in local time
:var str error: error code if the resolution failed
:var datetime utc_expiry: expiration time of the resolution in UTC
The ADDRMAP event was one of the first Control Protocol V1 events and was
introduced in tor version 0.1.1.1-alpha.
"""
_POSITIONAL_ARGS = ("hostname", "destination", "expiry")
_KEYWORD_ARGS = {
"error": "error",
"EXPIRES": "utc_expiry",
}
_QUOTED = ("expiry")
def _parse(self):
if self.destination == "<error>":
self.destination = None
if self.expiry is not None:
self.expiry = datetime.datetime.strptime(self.expiry, "%Y-%m-%d %H:%M:%S")
if self.utc_expiry is not None:
self.utc_expiry = datetime.datetime.strptime(self.utc_expiry, "%Y-%m-%d %H:%M:%S")
class AuthDirNewDescEvent(Event):
"""
Event specific to directory authorities, indicating that we just received new
descriptors. The descriptor type contained within this event is unspecified
so the descriptor contents are left unparsed.
:var stem.AuthDescriptorAction action: what is being done with the descriptor
:var str message: explanation of why we chose this action
:var str descriptor: content of the descriptor
The AUTHDIR_NEWDESCS event was introduced in tor version 0.1.1.10-alpha.
"""
_SKIP_PARSING = True
_VERSION_ADDED = stem.version.Version('0.1.1.10-alpha')
def _parse(self):
lines = str(self).split('\n')
if len(lines) < 5:
raise stem.ProtocolError("AUTHDIR_NEWDESCS events must contain lines for at least the type, action, message, descriptor, and terminating 'OK'")
elif not lines[-1] == "OK":
raise stem.ProtocolError("AUTHDIR_NEWDESCS doesn't end with an 'OK'")
self.action = lines[1]
self.message = lines[2]
self.descriptor = '\n'.join(lines[3:-1])
class BandwidthEvent(Event):
"""
Event emitted every second with the bytes sent and received by tor.
:var long read: bytes received by tor that second
:var long written: bytes sent by tor that second
The BW event was one of the first Control Protocol V1 events and was
introduced in tor version 0.1.1.1-alpha.
"""
_POSITIONAL_ARGS = ("read", "written")
def _parse(self):
if not self.read:
raise stem.ProtocolError("BW event is missing its read value")
elif not self.written:
raise stem.ProtocolError("BW event is missing its written value")
elif not self.read.isdigit() or not self.written.isdigit():
raise stem.ProtocolError("A BW event's bytes sent and received should be a positive numeric value, received: %s" % self)
self.read = long(self.read)
self.written = long(self.written)
class BuildTimeoutSetEvent(Event):
"""
Event indicating that the timeout value for a circuit has changed. This was
first added in tor version 0.2.2.7.
:var stem.TimeoutSetType set_type: way in which the timeout is changing
:var int total_times: circuit build times tor used to determine the timeout
:var int timeout: circuit timeout value in milliseconds
:var int xm: Pareto parameter Xm in milliseconds
:var float alpha: Pareto parameter alpha
:var float quantile: CDF quantile cutoff point
:var float timeout_rate: ratio of circuits that have time out
:var int close_timeout: duration to keep measurement circuits in milliseconds
:var float close_rate: ratio of measurement circuits that are closed
The BUILDTIMEOUT_SET event was introduced in tor version 0.2.2.7-alpha.
"""
_POSITIONAL_ARGS = ("set_type",)
_KEYWORD_ARGS = {
"TOTAL_TIMES": "total_times",
"TIMEOUT_MS": "timeout",
"XM": "xm",
"ALPHA": "alpha",
"CUTOFF_QUANTILE": "quantile",
"TIMEOUT_RATE": "timeout_rate",
"CLOSE_MS": "close_timeout",
"CLOSE_RATE": "close_rate",
}
_VERSION_ADDED = stem.version.Version('0.2.2.7-alpha')
def _parse(self):
# convert our integer and float parameters
for param in ('total_times', 'timeout', 'xm', 'close_timeout'):
param_value = getattr(self, param)
if param_value is not None:
try:
setattr(self, param, int(param_value))
except ValueError:
raise stem.ProtocolError("The %s of a BUILDTIMEOUT_SET should be an integer: %s" % (param, self))
for param in ('alpha', 'quantile', 'timeout_rate', 'close_rate'):
param_value = getattr(self, param)
if param_value is not None:
try:
setattr(self, param, float(param_value))
except ValueError:
raise stem.ProtocolError("The %s of a BUILDTIMEOUT_SET should be a float: %s" % (param, self))
self._log_if_unrecognized('set_type', stem.TimeoutSetType)
class CircuitEvent(Event):
"""
Event that indicates that a circuit has changed.
The fingerprint or nickname values in our 'path' may be **None** if the
VERBOSE_NAMES feature isn't enabled. The option was first introduced in tor
version 0.1.2.2, and on by default after 0.2.2.1.
:var str id: circuit identifier
:var stem.CircStatus status: reported status for the circuit
:var tuple path: relays involved in the circuit, these are
**(fingerprint, nickname)** tuples
:var tuple build_flags: :data:`~stem.CircBuildFlag` attributes
governing how the circuit is built
:var stem.CircPurpose purpose: purpose that the circuit is intended for
:var stem.HiddenServiceState hs_state: status if this is a hidden service circuit
:var str rend_query: circuit's rendezvous-point if this is hidden service related
:var datetime created: time when the circuit was created or cannibalized
:var stem.CircClosureReason reason: reason for the circuit to be closed
:var stem.CircClosureReason remote_reason: remote side's reason for the circuit to be closed
The CIRC event was one of the first Control Protocol V1 events and was
introduced in tor version 0.1.1.1-alpha.
"""
_POSITIONAL_ARGS = ("id", "status", "path")
_KEYWORD_ARGS = {
"BUILD_FLAGS": "build_flags",
"PURPOSE": "purpose",
"HS_STATE": "hs_state",
"REND_QUERY": "rend_query",
"TIME_CREATED": "created",
"REASON": "reason",
"REMOTE_REASON": "remote_reason",
}
def _parse(self):
self.path = tuple(stem.control._parse_circ_path(self.path))
if self.build_flags is not None:
self.build_flags = tuple(self.build_flags.split(','))
if self.created is not None:
try:
self.created = str_tools.parse_iso_timestamp(self.created)
except ValueError, exc:
raise stem.ProtocolError("Unable to parse create date (%s): %s" % (exc, self))
if not tor_tools.is_valid_circuit_id(self.id):
raise stem.ProtocolError("Circuit IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self))
self._log_if_unrecognized('status', stem.CircStatus)
self._log_if_unrecognized('build_flags', stem.CircBuildFlag)
self._log_if_unrecognized('purpose', stem.CircPurpose)
self._log_if_unrecognized('hs_state', stem.HiddenServiceState)
self._log_if_unrecognized('reason', stem.CircClosureReason)
self._log_if_unrecognized('remote_reason', stem.CircClosureReason)
class CircMinorEvent(Event):
"""
Event providing information about minor changes in our circuits. This was
first added in tor version 0.2.3.11.
:var str id: circuit identifier
:var stem.CircEvent event: type of change in the circuit
:var tuple path: relays involved in the circuit, these are
**(fingerprint, nickname)** tuples
:var tuple build_flags: :data:`~stem.CircBuildFlag` attributes
governing how the circuit is built
:var stem.CircPurpose purpose: purpose that the circuit is intended for
:var stem.HiddenServiceState hs_state: status if this is a hidden service circuit
:var str rend_query: circuit's rendezvous-point if this is hidden service related
:var datetime created: time when the circuit was created or cannibalized
:var stem.CircPurpose old_purpose: prior purpose for the circuit
:var stem.HiddenServiceState old_hs_state: prior status as a hidden service circuit
The CIRC_MINOR event was introduced in tor version 0.2.3.11-alpha.
"""
_POSITIONAL_ARGS = ("id", "event", "path")
_KEYWORD_ARGS = {
"BUILD_FLAGS": "build_flags",
"PURPOSE": "purpose",
"HS_STATE": "hs_state",
"REND_QUERY": "rend_query",
"TIME_CREATED": "created",
"OLD_PURPOSE": "old_purpose",
"OLD_HS_STATE": "old_hs_state",
}
_VERSION_ADDED = stem.version.Version('0.2.3.11-alpha')
def _parse(self):
self.path = tuple(stem.control._parse_circ_path(self.path))
if self.build_flags is not None:
self.build_flags = tuple(self.build_flags.split(','))
if self.created is not None:
try:
self.created = str_tools.parse_iso_timestamp(self.created)
except ValueError, exc:
raise stem.ProtocolError("Unable to parse create date (%s): %s" % (exc, self))
if not tor_tools.is_valid_circuit_id(self.id):
raise stem.ProtocolError("Circuit IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self))
self._log_if_unrecognized('event', stem.CircEvent)
self._log_if_unrecognized('build_flags', stem.CircBuildFlag)
self._log_if_unrecognized('purpose', stem.CircPurpose)
self._log_if_unrecognized('hs_state', stem.HiddenServiceState)
self._log_if_unrecognized('old_purpose', stem.CircPurpose)
self._log_if_unrecognized('old_hs_state', stem.HiddenServiceState)
class ClientsSeenEvent(Event):
"""
Periodic event on bridge relays that provides a summary of our users.
:var datetime start_time: time in UTC that we started collecting these stats
:var dict locales: mapping of country codes to a rounded count for the number of users
:var dict ip_versions: mapping of ip protocols to a rounded count for the number of users
The CLIENTS_SEEN event was introduced in tor version 0.2.1.10-alpha.
"""
_KEYWORD_ARGS = {
"TimeStarted": "start_time",
"CountrySummary": "locales",
"IPVersions": "ip_versions",
}
_VERSION_ADDED = stem.version.Version('0.2.1.10-alpha')
def _parse(self):
if self.start_time is not None:
self.start_time = datetime.datetime.strptime(self.start_time, "%Y-%m-%d %H:%M:%S")
if self.locales is not None:
locale_to_count = {}
for entry in self.locales.split(','):
if not '=' in entry:
raise stem.ProtocolError("The CLIENTS_SEEN's CountrySummary should be a comma separated listing of '<locale>=<count>' mappings: %s" % self)
locale, count = entry.split('=', 1)
if len(locale) != 2:
raise stem.ProtocolError("Locales should be a two character code, got '%s': %s" % (locale, self))
elif not count.isdigit():
raise stem.ProtocolError("Locale count was non-numeric (%s): %s" % (count, self))
elif locale in locale_to_count:
raise stem.ProtocolError("CountrySummary had multiple mappings for '%s': %s" % (locale, self))
locale_to_count[locale] = int(count)
self.locales = locale_to_count
if self.ip_versions is not None:
protocol_to_count = {}
for entry in self.ip_versions.split(','):
if not '=' in entry:
raise stem.ProtocolError("The CLIENTS_SEEN's IPVersions should be a comma separated listing of '<protocol>=<count>' mappings: %s" % self)
protocol, count = entry.split('=', 1)
if not count.isdigit():
raise stem.ProtocolError("IP protocol count was non-numeric (%s): %s" % (count, self))
protocol_to_count[protocol] = int(count)
self.ip_versions = protocol_to_count
class ConfChangedEvent(Event):
"""
Event that indicates that our configuration changed, either in response to a
SETCONF or RELOAD signal.
:var dict config: mapping of configuration options to their new values
(**None** if the option is being unset)
The CONF_CHANGED event was introduced in tor version 0.2.3.3-alpha.
"""
_SKIP_PARSING = True
_VERSION_ADDED = stem.version.Version('0.2.3.3-alpha')
def _parse(self):
self.config = {}
# Skip first and last line since they're the header and footer. For
# instance...
#
# 650-CONF_CHANGED
# 650-ExitNodes=caerSidi
# 650-ExitPolicy
# 650-MaxCircuitDirtiness=20
# 650 OK
for line in str(self).splitlines()[1:-1]:
if '=' in line:
key, value = line.split('=', 1)
else:
key, value = line, None
self.config[key] = value
class DescChangedEvent(Event):
"""
Event that indicates that our descriptor has changed.
The DESCCHANGED event was introduced in tor version 0.1.2.2-alpha.
"""
_VERSION_ADDED = stem.version.Version('0.1.2.2-alpha')
pass
class GuardEvent(Event):
"""
Event that indicates that our guard relays have changed.
:var stem.GuardType guard_type: purpose the guard relay is for
:var str name: nickname or fingerprint of the guard relay
:var stem.GuardStatus status: status of the guard relay
The GUARD event was introduced in tor version 0.1.2.5-alpha.
"""
_VERSION_ADDED = stem.version.Version('0.1.2.5-alpha')
# TODO: We should replace the 'name' field with a fingerprint or nickname
# attribute once we know what it can be...
#
# https://trac.torproject.org/7619
_POSITIONAL_ARGS = ("guard_type", "name", "status")
class LogEvent(Event):
"""
Tor logging event. These are the most visible kind of event since, by
default, tor logs at the NOTICE :data:`~stem.Runlevel` to stdout.
:var stem.Runlevel runlevel: runlevel of the logged message
:var str message: logged message
The logging events were some of the first Control Protocol V1 events
and were introduced in tor version 0.1.1.1-alpha.
"""
_SKIP_PARSING = True
def _parse(self):
self.runlevel = self.type
self._log_if_unrecognized('runlevel', stem.Runlevel)
# message is our content, minus the runlevel and ending "OK" if a
# multi-line message
self.message = str(self)[len(self.runlevel) + 1:].rstrip("\nOK")
class NetworkStatusEvent(Event):
"""
Event for when our copy of the consensus has changed. This was introduced in
tor version 0.1.2.3.
:var list desc: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` for the changed descriptors
The NS event was introduced in tor version 0.1.2.3-alpha.
"""
_SKIP_PARSING = True
_VERSION_ADDED = stem.version.Version('0.1.2.3-alpha')
def _parse(self):
content = str(self).lstrip("NS\n")
self.desc = list(stem.descriptor.router_status_entry.parse_file(
StringIO.StringIO(content),
True,
entry_class = stem.descriptor.router_status_entry.RouterStatusEntryV3,
))
class NewConsensusEvent(Event):
"""
Event for when we have a new consensus. This is similar to
:class:`~stem.response.events.NetworkStatusEvent`, except that it contains
the whole consensus so anything not listed is implicitly no longer
recommended.
:var list desc: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` for the changed descriptors
The NEWCONSENSUS event was introduced in tor version 0.2.1.13-alpha.
"""
_SKIP_PARSING = True
_VERSION_ADDED = stem.version.Version('0.2.1.13-alpha')
def _parse(self):
content = str(self).lstrip("NEWCONSENSUS\n")
self.desc = list(stem.descriptor.router_status_entry.parse_file(
StringIO.StringIO(content),
True,
entry_class = stem.descriptor.router_status_entry.RouterStatusEntryV3,
))
class NewDescEvent(Event):
"""
Event that indicates that a new descriptor is available.
The fingerprint or nickname values in our 'relays' may be **None** if the
VERBOSE_NAMES feature isn't enabled. The option was first introduced in tor
version 0.1.2.2, and on by default after 0.2.2.1.
:var tuple relays: **(fingerprint, nickname)** tuples for the relays with
new descriptors
The NEWDESC event was one of the first Control Protocol V1 events and was
introduced in tor version 0.1.1.1-alpha.
"""
def _parse(self):
self.relays = tuple([stem.control._parse_circ_entry(entry) for entry in str(self).split()[1:]])
class ORConnEvent(Event):
"""
Event that indicates a change in a relay connection. The 'endpoint' could be
any of several things including a...
* fingerprint
* nickname
* 'fingerprint=nickname' pair
* address:port
The derived 'endpoint_*' attributes are generally more useful.
:var str endpoint: relay that the event concerns
:var str endpoint_fingerprint: endpoint's finterprint if it was provided
:var str endpoint_nickname: endpoint's nickname if it was provided
:var str endpoint_address: endpoint's address if it was provided
:var int endpoint_port: endpoint's port if it was provided
:var stem.ORStatus status: state of the connection
:var stem.ORClosureReason reason: reason for the connection to be closed
:var int circ_count: number of established and pending circuits
The ORCONN event was one of the first Control Protocol V1 events and was
introduced in tor version 0.1.1.1-alpha.
"""
_POSITIONAL_ARGS = ("endpoint", "status")
_KEYWORD_ARGS = {
"REASON": "reason",
"NCIRCS": "circ_count",
}
def _parse(self):
self.endpoint_fingerprint = None
self.endpoint_nickname = None
self.endpoint_address = None
self.endpoint_port = None
try:
self.endpoint_fingerprint, self.endpoint_nickname = \
stem.control._parse_circ_entry(self.endpoint)
except stem.ProtocolError:
if not ':' in self.endpoint:
raise stem.ProtocolError("ORCONN endpoint is neither a relay nor 'address:port': %s" % self)
address, port = self.endpoint.split(':', 1)
if not connection.is_valid_port(port):
raise stem.ProtocolError("ORCONN's endpoint location's port is invalid: %s" % self)
self.endpoint_address = address
self.endpoint_port = int(port)
if self.circ_count is not None:
if not self.circ_count.isdigit():
raise stem.ProtocolError("ORCONN event got a non-numeric circuit count (%s): %s" % (self.circ_count, self))
self.circ_count = int(self.circ_count)
self._log_if_unrecognized('status', stem.ORStatus)
self._log_if_unrecognized('reason', stem.ORClosureReason)
class SignalEvent(Event):
"""
Event that indicates that tor has received and acted upon a signal being sent
to the process. As of tor version 0.2.4.6 the only signals conveyed by this
event are...
* RELOAD
* DUMP
* DEBUG
* NEWNYM
* CLEARDNSCACHE
:var stem.Signal signal: signal that tor received
The SIGNAL event was introduced in tor version 0.2.3.1-alpha.
"""
_POSITIONAL_ARGS = ("signal",)
_VERSION_ADDED = stem.version.Version('0.2.3.1-alpha')
def _parse(self):
# log if we recieved an unrecognized signal
expected_signals = (
stem.Signal.RELOAD,
stem.Signal.DUMP,
stem.Signal.DEBUG,
stem.Signal.NEWNYM,
stem.Signal.CLEARDNSCACHE,
)
self._log_if_unrecognized('signal', expected_signals)
class StatusEvent(Event):
"""
Notification of a change in tor's state. These are generally triggered for
the same sort of things as log messages of the NOTICE level or higher.
However, unlike :class:`~stem.response.events.LogEvent` these contain well
formed data.
:var stem.StatusType status_type: category of the status event
:var stem.Runlevel runlevel: runlevel of the logged message
:var str message: logged message
The STATUS_GENERAL, STATUS_CLIENT, STATUS_SERVER events were introduced
in tor version 0.1.2.3-alpha.
"""
_POSITIONAL_ARGS = ("runlevel", "action")
_VERSION_ADDED = stem.version.Version('0.1.2.3-alpha')
def _parse(self):
if self.type == 'STATUS_GENERAL':
self.status_type = stem.StatusType.GENERAL
elif self.type == 'STATUS_CLIENT':
self.status_type = stem.StatusType.CLIENT
elif self.type == 'STATUS_SERVER':
self.status_type = stem.StatusType.SERVER
else:
raise ValueError("BUG: Unrecognized status type (%s), likely an EVENT_TYPE_TO_CLASS addition without revising how 'status_type' is assigned." % self.type)
self._log_if_unrecognized('runlevel', stem.Runlevel)
class StreamEvent(Event):
"""
Event that indicates that a stream has changed.
:var str id: stream identifier
:var stem.StreamStatus status: reported status for the stream
:var str circ_id: circuit that the stream is attached to
:var str target: destination of the stream
:var str target_address: destination address (ip or hostname)
:var int target_port: destination port
:var stem.StreamClosureReason reason: reason for the stream to be closed
:var stem.StreamClosureReason remote_reason: remote side's reason for the stream to be closed
:var stem.StreamSource source: origin of the REMAP request
:var str source_addr: requester of the connection
:var str source_address: requester address (ip or hostname)
:var int source_port: requester port
:var stem.StreamPurpose purpose: purpose for the stream
The STREAM event was one of the first Control Protocol V1 events and was
introduced in tor version 0.1.1.1-alpha.
"""
_POSITIONAL_ARGS = ("id", "status", "circ_id", "target")
_KEYWORD_ARGS = {
"REASON": "reason",
"REMOTE_REASON": "remote_reason",
"SOURCE": "source",
"SOURCE_ADDR": "source_addr",
"PURPOSE": "purpose",
}
def _parse(self):
if self.target is None:
raise stem.ProtocolError("STREAM event didn't have a target: %s" % self)
else:
if not ':' in self.target:
raise stem.ProtocolError("Target location must be of the form 'address:port': %s" % self)
address, port = self.target.split(':', 1)
if not connection.is_valid_port(port, allow_zero = True):
raise stem.ProtocolError("Target location's port is invalid: %s" % self)
self.target_address = address
self.target_port = int(port)
if self.source_addr is None:
self.source_address = None
self.source_port = None
else:
if not ':' in self.source_addr:
raise stem.ProtocolError("Source location must be of the form 'address:port': %s" % self)
address, port = self.source_addr.split(':', 1)
if not connection.is_valid_port(port, allow_zero = True):
raise stem.ProtocolError("Source location's port is invalid: %s" % self)
self.source_address = address
self.source_port = int(port)
# spec specifies a circ_id of zero if the stream is unattached
if self.circ_id == "0":
self.circ_id = None
self._log_if_unrecognized('reason', stem.StreamClosureReason)
self._log_if_unrecognized('remote_reason', stem.StreamClosureReason)
self._log_if_unrecognized('purpose', stem.StreamPurpose)
class StreamBwEvent(Event):
"""
Event (emitted approximately every second) with the bytes sent and received
by the application since the last such event on this stream.
:var str id: stream identifier
:var long written: bytes sent by the application
:var long read: bytes received by the application
The STREAM_BW event was introduced in tor version 0.1.2.8-beta.
"""
_POSITIONAL_ARGS = ("id", "written", "read")
_VERSION_ADDED = stem.version.Version('0.1.2.8-beta')
def _parse(self):
if not tor_tools.is_valid_stream_id(self.id):
raise stem.ProtocolError("Stream IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self))
elif not self.written:
raise stem.ProtocolError("STREAM_BW event is missing its written value")
elif not self.read:
raise stem.ProtocolError("STREAM_BW event is missing its read value")
elif not self.read.isdigit() or not self.written.isdigit():
raise stem.ProtocolError("A STREAM_BW event's bytes sent and received should be a positive numeric value, received: %s" % self)
self.read = long(self.read)
self.written = long(self.written)
EVENT_TYPE_TO_CLASS = {
"ADDRMAP": AddrMapEvent,
"AUTHDIR_NEWDESCS": AuthDirNewDescEvent,
"BUILDTIMEOUT_SET": BuildTimeoutSetEvent,
"BW": BandwidthEvent,
"CIRC": CircuitEvent,
"CIRC_MINOR": CircMinorEvent,
"CLIENTS_SEEN": ClientsSeenEvent,
"CONF_CHANGED": ConfChangedEvent,
"DEBUG": LogEvent,
"DESCCHANGED": DescChangedEvent,
"ERR": LogEvent,
"GUARD": GuardEvent,
"INFO": LogEvent,
"NEWCONSENSUS": NewConsensusEvent,
"NEWDESC": NewDescEvent,
"NOTICE": LogEvent,
"NS": NetworkStatusEvent,
"ORCONN": ORConnEvent,
"SIGNAL": SignalEvent,
"STATUS_CLIENT": StatusEvent,
"STATUS_GENERAL": StatusEvent,
"STATUS_SERVER": StatusEvent,
"STREAM": StreamEvent,
"STREAM_BW": StreamBwEvent,
"WARN": LogEvent,
# accounting for a bug in tor 0.2.0.22
"STATUS_SEVER": StatusEvent,
}
| lgpl-3.0 | -4,938,717,986,826,051,000 | 34.653801 | 164 | 0.673468 | false |
caseyc37/pygame_cffi | conformance/conf_tests/test_shapes.py | 1 | 4836 | # Test lines
from pygame import draw, rect
def test_rect(test_surf):
"""Draw several rectangles."""
for y in range(10, 200, 45):
x = 10
for width in range(10, 30, 3):
for height in range(10, 40, 7):
r = rect.Rect(x, y, width, height)
x += width + 2
draw.rect(test_surf, (255, 255, 255, 255), r)
def test_polygon(test_surf):
"""Draw several polygons."""
# triangle
draw.polygon(test_surf, (255, 255, 255, 255), [(10, 10), (30, 30),
(45, 30)])
draw.polygon(test_surf, (255, 0, 255, 255), [(50, 10), (80, 30), (95, 20)],
3)
# overlap
draw.polygon(test_surf, (255, 0, 128, 255), [(50, 15), (80, 25), (105, 25)],
0)
# square
draw.polygon(test_surf, (255, 255, 255, 255), [(150, 10), (150, 50),
(190, 50), (190, 10)])
draw.polygon(test_surf, (0, 255, 255, 255), [(220, 10), (220, 50),
(260, 50), (260, 10)], 1)
# hexagon
draw.polygon(test_surf, (0, 0, 255, 255), [(310, 10), (320, 25), (320, 50),
(310, 65), (300, 50), (300, 25)])
# Funky
draw.polygon(test_surf, (255, 255, 255, 255), [(246,134), (268, 146),
(262, 144), (271, 81),
(204, 116), (243, 102),
(275, 150), (234, 82),
(231, 94), (217, 134),
(212, 99), (237, 96)], 2)
draw.polygon(test_surf, (255, 255, 255, 255), [(369, 158), (342, 193),
(319, 205), (316, 217),
(356, 183), (312, 169),
(333, 212), (358, 200),
(310, 168), (301, 151),
(300, 145), (307, 214)], 0)
def test_hollow_circles(test_surf):
"""Draw several circles of different thicknesses and sizes"""
for thickness in range(1, 7):
cent_x = 100 + thickness * 50
cent_y = 10
for radius in range(10,200,10):
cent_y += radius + 1
draw.circle(test_surf, (255, 255, 255, 255), (cent_x, cent_y),
radius, thickness)
def test_filled_circles(test_surf):
"""Draw several filled circles"""
for cent_x, color in ((100, (0, 0, 255, 255)), (400, (0, 255, 255, 255)),
(600, (255, 0, 255, 255))):
cent_y = 10
for radius in range(10,100,10):
cent_y += radius + 1
draw.circle(test_surf, color, (cent_x, cent_y),
radius)
def test_filled_ellipses_1(test_surf):
"""Draw several filled circles"""
for cent_x, color in ((100, (0, 0, 255, 255)), (300, (0, 255, 255, 255)),
(500, (255, 0, 255, 255))):
cent_y = 10
div = 8
offset = 0
for radius in range(10,100,10):
if div > 2:
div = div // 2
else:
div = div * 2
cent_y += radius // div + 1
offset += 35
e_rect = rect.Rect(cent_x - radius + offset, cent_y - radius // div,
radius, div * radius)
draw.ellipse(test_surf, color, e_rect)
def test_filled_ellipses_2(test_surf):
"""Draw several filled circles"""
for cent_x, color in ((100, (0, 0, 255, 255)), (400, (0, 255, 255, 255)),
(600, (255, 0, 255, 255))):
cent_y = 10
div = 9
for radius in range(10,100,10):
cent_y += radius + 1
if div > 3:
div = div // 3
else:
div = div * 3
e_rect = rect.Rect(cent_x - radius // div, cent_y - radius,
div * radius, radius)
draw.ellipse(test_surf, color, e_rect)
def test_hollow_ellipses(test_surf):
for cent_x, cent_y, color in ((70, 130, (255, 0, 0, 255)),
(150, 450, (255, 255, 255, 255)),
(200, 200, (0, 255, 0, 255)),
(500, 500, (255, 128, 128, 255))):
for r1, r2 in ((30, 20), (50, 10), (10, 40), (15, 90)):
for thickness in range(1, 9, 3):
e_rect = (cent_x - r1 + 30 * thickness,
cent_y - r2 - 30 * thickness,
2 * r1, 2 * r2)
draw.ellipse(test_surf, color, e_rect, thickness)
| lgpl-2.1 | 2,379,209,523,506,091,500 | 39.983051 | 80 | 0.400744 | false |
henriquebastos/fixofx | bin/ofxfix.py | 1 | 9764 | #!/usr/bin/env python
# Copyright 2005-2010 Wesabe, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# fixofx.py - canonicalize all recognized upload formats to OFX 2.0
#
import os
import os.path
import sys
from fixofx.ofx import Response, FileTyper
from fixofx.ofxtools.ofc_converter import OfcConverter
from fixofx.ofxtools.qif_converter import QifConverter
def fixpath(filename):
mypath = os.path.dirname(sys._getframe(1).f_code.co_filename)
return os.path.normpath(os.path.join(mypath, filename))
from optparse import OptionParser
from pyparsing import ParseException
__doc__ = \
"""Canonicalizes files from several supported data upload formats (currently
OFX 1.02, OFX 1.5, OFX 1.6, OFX 2.0, OFC, and QIF) to OFX 2.0 (which is a
standard XML 1.0 file). Since it is easiest for the database loader to use a
single, XML-based format, and since users might prefer an XML document to OFX
1.02 or other formats for export, this script essentially removes the need for
any other code to know about all of the variations in data formats. By
default, the converter will read a single file of any supported format from
standard input and write the converted OFX 2.0 file to standard output. A
command line option also allows reading a single file, and other options allow
you to insert data into the output file not available in the source file (for
instance, QIF does not contain the account number, so an option allows you to
specify that for insertion into the OFX output)."""
# Import Psyco if available, for speed.
try:
import psyco
psyco.full()
except ImportError:
pass
def convert(filecontent, filetype, verbose=False, fid="UNKNOWN", org="UNKNOWN",
bankid="UNKNOWN", accttype="UNKNOWN", acctid="UNKNOWN",
balance="UNKNOWN", curdef=None, lang="ENG", dayfirst=False,
debug=False):
text = os.linesep.join(s for s in filecontent.splitlines() if s)
# This finishes a verbosity message started by the caller, where the
# caller explains the source command-line option and this explains the
# source format.
if verbose:
sys.stderr.write("Converting from %s format.\n" % filetype)
if options.debug and (filetype in ["OFC", "QIF"] or filetype.startswith("OFX")):
sys.stderr.write("Starting work on raw text:\n")
sys.stderr.write(rawtext + "\n\n")
if filetype.startswith("OFX/2"):
if verbose: sys.stderr.write("No conversion needed; returning unmodified.\n")
# The file is already OFX 2 -- return it unaltered, ignoring
# any of the parameters passed to this method.
return text
elif filetype.startswith("OFX"):
if verbose: sys.stderr.write("Converting to OFX/2.0...\n")
# This will throw a ParseException if it is unable to recognize
# the source format.
response = Response(text, debug=debug)
return response.as_xml(original_format=filetype)
elif filetype == "OFC":
if verbose: sys.stderr.write("Beginning OFC conversion...\n")
converter = OfcConverter(text, fid=fid, org=org, curdef=curdef,
lang=lang, debug=debug)
# This will throw a ParseException if it is unable to recognize
# the source format.
if verbose:
sys.stderr.write("Converting to OFX/1.02...\n\n%s\n\n" %
converter.to_ofx102())
sys.stderr.write("Converting to OFX/2.0...\n")
return converter.to_xml()
elif filetype == "QIF":
if verbose: sys.stderr.write("Beginning QIF conversion...\n")
converter = QifConverter(text, fid=fid, org=org,
bankid=bankid, accttype=accttype,
acctid=acctid, balance=balance,
curdef=curdef, lang=lang, dayfirst=dayfirst,
debug=debug)
# This will throw a ParseException if it is unable to recognize
# the source format.
if verbose:
sys.stderr.write("Converting to OFX/1.02...\n\n%s\n\n" %
converter.to_ofx102())
sys.stderr.write("Converting to OFX/2.0...\n")
return converter.to_xml()
else:
raise TypeError("Unable to convert source format '%s'." % filetype)
parser = OptionParser(description=__doc__)
parser.add_option("-d", "--debug", action="store_true", dest="debug",
default=False, help="spit out gobs of debugging output during parse")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="be more talkative, social, outgoing")
parser.add_option("-t", "--type", action="store_true", dest="type",
default=False, help="print input file type and exit")
parser.add_option("-f", "--file", dest="filename", default=None,
help="source file to convert (writes to STDOUT)")
parser.add_option("--fid", dest="fid", default="UNKNOWN",
help="(OFC/QIF only) FID to use in output")
parser.add_option("--org", dest="org", default="UNKNOWN",
help="(OFC/QIF only) ORG to use in output")
parser.add_option("--curdef", dest="curdef", default=None,
help="(OFC/QIF only) Currency identifier to use in output")
parser.add_option("--lang", dest="lang", default="ENG",
help="(OFC/QIF only) Language identifier to use in output")
parser.add_option("--bankid", dest="bankid", default="UNKNOWN",
help="(QIF only) Routing number to use in output")
parser.add_option("--accttype", dest="accttype", default="UNKNOWN",
help="(QIF only) Account type to use in output")
parser.add_option("--acctid", dest="acctid", default="UNKNOWN",
help="(QIF only) Account number to use in output")
parser.add_option("--balance", dest="balance", default="UNKNOWN",
help="(QIF only) Account balance to use in output")
parser.add_option("--dayfirst", action="store_true", dest="dayfirst", default=False,
help="(QIF only) Parse dates day first (UK format)")
parser.add_option("-s", "--string", dest="string", default=None,
help="string to convert")
(options, args) = parser.parse_args()
#
# Check the python environment for minimum sanity levels.
#
if options.verbose and not hasattr(open, 'newlines'):
# Universal newlines are generally needed to deal with various QIF downloads.
sys.stderr.write('Warning: universal newline support NOT available.\n')
if options.verbose: print("Options: %s" % options)
#
# Load up the raw text to be converted.
#
rawtext = None
if options.filename:
if os.path.isfile(options.filename):
if options.verbose:
sys.stderr.write("Reading from '%s'\n." % options.filename)
try:
srcfile = open(options.filename, 'rU')
rawtext = srcfile.read()
srcfile.close()
except Exception as detail:
print("Exception during file read:\n%s" % detail)
print("Exiting.")
sys.stderr.write("fixofx failed with error code 1\n")
sys.exit(1)
else:
print("'%s' does not appear to be a file. Try --help." % options.filename)
sys.stderr.write("fixofx failed with error code 2\n")
sys.exit(2)
elif options.string:
if options.verbose:
sys.stderr.write("Reading from string\n")
rawtext = options.string.replace('\r','')
else:
if options.verbose:
sys.stderr.write("Reading from standard input.\n")
stdin_universal = os.fdopen(os.dup(sys.stdin.fileno()), "rU")
rawtext = stdin_universal.read()
if rawtext == "" or rawtext is None:
print("No input. Pipe a file to convert to the script,\n" + \
"or call with -f. Call with --help for more info.")
sys.stderr.write("fixofx failed with error code 3\n")
sys.exit(3)
#
# Convert the raw text to OFX 2.0.
#
try:
# Determine the type of file contained in 'text', using a quick guess
# rather than parsing the file to make sure. (Parsing will fail
# below if the guess is wrong on OFX/1 and QIF.)
filetype = FileTyper(rawtext).trust()
if options.type:
print("Input file type is %s." % filetype)
sys.exit(0)
elif options.debug:
sys.stderr.write("Input file type is %s.\n" % filetype)
converted = convert(rawtext, filetype, verbose=options.verbose,
fid=options.fid, org=options.org, bankid=options.bankid,
accttype=options.accttype, acctid=options.acctid,
balance=options.balance, curdef=options.curdef,
lang=options.lang, dayfirst=options.dayfirst,
debug=options.debug)
print(converted)
sys.exit(0)
except ParseException as detail:
print("Parse exception during '%s' conversion:\n%s" % (filetype, detail))
print("Exiting.")
sys.stderr.write("fixofx failed with error code 4\n")
sys.exit(4)
except TypeError as detail:
print(detail)
print("Exiting.")
sys.stderr.write("fixofx failed with error code 5\n")
sys.exit(5)
| apache-2.0 | -8,566,761,918,685,850,000 | 38.530364 | 87 | 0.643589 | false |
gostevehoward/absimulation | simulation_test.py | 1 | 4663 | #!/usr/bin/env python
import unittest
import simulation
def state(visitors_per_bucket, baseline_conversions, treatment_conversions):
return simulation.ExperimentState(
baseline_conversions,
visitors_per_bucket - baseline_conversions,
treatment_conversions,
visitors_per_bucket - treatment_conversions,
)
class ChisqDecisionTest(unittest.TestCase):
def test_sample_size_calculation(self):
# test values from http://www.stat.ubc.ca/~rollin/stats/ssize/b2.html
self.assertEqual(
14751,
simulation.ChisqDecision(0.95, 0.1).necessary_sample_size_per_bucket(0.1),
)
self.assertEqual(
9780,
simulation.ChisqDecision(0.85, 0.1).necessary_sample_size_per_bucket(0.1),
)
self.assertEqual(
2507,
simulation.ChisqDecision(0.95, 0.25).necessary_sample_size_per_bucket(0.1),
)
self.assertEqual(
6510,
simulation.ChisqDecision(0.95, 0.1).necessary_sample_size_per_bucket(0.2),
)
def test_decision(self):
baseline_rate = 0.5
chisq_decision = simulation.ChisqDecision(0.95, 0.1)
# sanity checks
self.assertEqual('keep running', chisq_decision.decision(state(20, 7, 10), baseline_rate))
self.assertEqual(
'baseline',
chisq_decision.decision(state(10000, 5000, 5000), baseline_rate),
)
self.assertEqual(
'baseline',
chisq_decision.decision(state(10000, 6000, 4000), baseline_rate),
)
self.assertEqual(
'treatment',
chisq_decision.decision(state(10000, 4000, 6000), baseline_rate),
)
# some close calls, using Chi-squared values from
# http://www.graphpad.com/quickcalcs/contingency1.cfm
self.assertEqual(
'baseline',
chisq_decision.decision(state(10000, 5000, 5100), baseline_rate),
)
self.assertEqual(
'treatment',
chisq_decision.decision(state(10000, 5000, 5150), baseline_rate),
)
class BayesianDecisionTest(unittest.TestCase):
def setUp(self):
self.decision = simulation.BayesianDecision(0.01)
def test_posterior_probability_treatment_is_better(self):
# sanity checks
self.assertAlmostEqual(
1,
self.decision.posterior_probability_treatment_is_better(state(1000, 1, 999)),
)
self.assertAlmostEqual(
0,
self.decision.posterior_probability_treatment_is_better(state(1000, 999, 1)),
)
self.assertAlmostEqual(
0.5,
self.decision.posterior_probability_treatment_is_better(state(100, 50, 50)),
)
self.assertGreater(
self.decision.posterior_probability_treatment_is_better(state(100, 50, 51)),
0.5,
)
self.assertLess(
self.decision.posterior_probability_treatment_is_better(state(100, 50, 49)),
0.5,
)
# some less obvious ones which might be wrong (generated using my own implementation), but
# useful for catching unintended changes at least
self.assertAlmostEqual(
0.92318343,
self.decision.posterior_probability_treatment_is_better(state(1000, 100, 120)),
)
self.assertAlmostEqual(
0.22343071,
self.decision.posterior_probability_treatment_is_better(state(1000, 100, 90)),
)
def test_expected_loss_from_choosing_treatment(self):
# sanity checks
self.assertAlmostEqual(
0.9,
self.decision.expected_loss_from_choosing_treatment(state(1000, 950, 50)),
places=2,
)
self.assertAlmostEqual(
0,
self.decision.expected_loss_from_choosing_treatment(state(1000, 1, 999)),
)
# some values from Chris Stucchio's numerical integration code
# https://gist.github.com/stucchio/9090456
# see stucchio.py in this repository
self.assertAlmostEqual(
0.017,
self.decision.expected_loss_from_choosing_treatment(state(100, 10, 10)),
places=3,
)
self.assertAlmostEqual(
0.0005,
self.decision.expected_loss_from_choosing_treatment(state(100, 10, 20)),
places=4,
)
self.assertAlmostEqual(
0.1,
self.decision.expected_loss_from_choosing_treatment(state(100, 20, 10)),
places=1,
)
if __name__ == '__main__':
unittest.main()
| mit | 8,198,010,060,414,050,000 | 33.540741 | 98 | 0.596826 | false |
samdroid-apps/browse | pdfviewer.py | 1 | 21028 | # Copyright (C) 2012, One Laptop Per Child
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import logging
import tempfile
from gettext import gettext as _
from gi.repository import GObject
from gi.repository import Gtk
from gi.repository import GLib
from gi.repository import WebKit
from sugar3.graphics.toolbarbox import ToolbarBox
from sugar3.graphics.toolbutton import ToolButton
from sugar3.graphics.icon import Icon
from sugar3.graphics.progressicon import ProgressIcon
from sugar3.graphics import style
from sugar3.datastore import datastore
from sugar3.activity import activity
from sugar3.bundle.activitybundle import ActivityBundle
class EvinceViewer(Gtk.Overlay):
"""PDF viewer with a toolbar overlay for basic navigation and an
option to save to Journal.
"""
__gsignals__ = {
'save-to-journal': (GObject.SignalFlags.RUN_FIRST,
None,
([])),
'open-link': (GObject.SignalFlags.RUN_FIRST,
None,
([str])),
}
def __init__(self, uri):
GObject.GObject.__init__(self)
self._uri = uri
# delay Evince import until is needed to improve activity startup time
from gi.repository import EvinceDocument
from gi.repository import EvinceView
# Create Evince objects to handle the PDF in the URI:
EvinceDocument.init()
self._doc = EvinceDocument.Document.factory_get_document(uri)
self._view = EvinceView.View()
self._model = EvinceView.DocumentModel()
self._model.set_document(self._doc)
self._view.set_model(self._model)
self._EVINCE_MODE_FREE = EvinceView.SizingMode.FREE
self._view.connect('external-link', self.__handle_link_cb)
self._model.connect('page-changed', self.__page_changed_cb)
self._back_page_button = None
self._forward_page_button = None
self._toolbar_box = self._create_toolbar()
self._update_nav_buttons()
self._toolbar_box.set_halign(Gtk.Align.FILL)
self._toolbar_box.set_valign(Gtk.Align.END)
self.add_overlay(self._toolbar_box)
self._toolbar_box.show()
scrolled_window = Gtk.ScrolledWindow()
self.add(scrolled_window)
scrolled_window.show()
scrolled_window.add(self._view)
self._view.show()
def _create_toolbar(self):
toolbar_box = ToolbarBox()
zoom_out_button = ToolButton('zoom-out')
zoom_out_button.set_tooltip(_('Zoom out'))
zoom_out_button.connect('clicked', self.__zoom_out_cb)
toolbar_box.toolbar.insert(zoom_out_button, -1)
zoom_out_button.show()
zoom_in_button = ToolButton('zoom-in')
zoom_in_button.set_tooltip(_('Zoom in'))
zoom_in_button.connect('clicked', self.__zoom_in_cb)
toolbar_box.toolbar.insert(zoom_in_button, -1)
zoom_in_button.show()
zoom_original_button = ToolButton('zoom-original')
zoom_original_button.set_tooltip(_('Actual size'))
zoom_original_button.connect('clicked', self.__zoom_original_cb)
toolbar_box.toolbar.insert(zoom_original_button, -1)
zoom_original_button.show()
separator = Gtk.SeparatorToolItem()
separator.props.draw = True
toolbar_box.toolbar.insert(separator, -1)
separator.show()
self._back_page_button = ToolButton('go-previous-paired')
self._back_page_button.set_tooltip(_('Previous page'))
self._back_page_button.props.sensitive = False
self._back_page_button.connect('clicked', self.__go_back_page_cb)
toolbar_box.toolbar.insert(self._back_page_button, -1)
self._back_page_button.show()
self._forward_page_button = ToolButton('go-next-paired')
self._forward_page_button.set_tooltip(_('Next page'))
self._forward_page_button.props.sensitive = False
self._forward_page_button.connect('clicked', self.__go_forward_page_cb)
toolbar_box.toolbar.insert(self._forward_page_button, -1)
self._forward_page_button.show()
separator = Gtk.SeparatorToolItem()
separator.props.draw = True
toolbar_box.toolbar.insert(separator, -1)
separator.show()
self._save_to_journal_button = ToolButton('save-to-journal')
self._save_to_journal_button.set_tooltip(_('Save PDF to Journal'))
self._save_to_journal_button.connect('clicked',
self.__save_to_journal_button_cb)
toolbar_box.toolbar.insert(self._save_to_journal_button, -1)
self._save_to_journal_button.show()
return toolbar_box
def disable_journal_button(self):
self._save_to_journal_button.props.sensitive = False
def __handle_link_cb(self, widget, url):
self.emit('open-link', url.get_uri())
def __page_changed_cb(self, model, page_from, page_to):
self._update_nav_buttons()
def __zoom_out_cb(self, widget):
self.zoom_out()
def __zoom_in_cb(self, widget):
self.zoom_in()
def __zoom_original_cb(self, widget):
self.zoom_original()
def __go_back_page_cb(self, widget):
self._view.previous_page()
def __go_forward_page_cb(self, widget):
self._view.next_page()
def __save_to_journal_button_cb(self, widget):
self.emit('save-to-journal')
self._save_to_journal_button.props.sensitive = False
def _update_nav_buttons(self):
current_page = self._model.props.page
self._back_page_button.props.sensitive = current_page > 0
self._forward_page_button.props.sensitive = \
current_page < self._doc.get_n_pages() - 1
def zoom_original(self):
self._model.props.sizing_mode = self._EVINCE_MODE_FREE
self._model.props.scale = 1.0
def zoom_in(self):
self._model.props.sizing_mode = self._EVINCE_MODE_FREE
self._view.zoom_in()
def zoom_out(self):
self._model.props.sizing_mode = self._EVINCE_MODE_FREE
self._view.zoom_out()
def get_pdf_title(self):
return self._doc.get_title()
class DummyBrowser(GObject.GObject):
"""Has the same interface as browser.Browser ."""
__gsignals__ = {
'new-tab': (GObject.SignalFlags.RUN_FIRST, None, ([str])),
'tab-close': (GObject.SignalFlags.RUN_FIRST, None, ([object])),
'selection-changed': (GObject.SignalFlags.RUN_FIRST, None, ([])),
'security-status-changed': (GObject.SignalFlags.RUN_FIRST, None, ([])),
}
__gproperties__ = {
"title": (object, "title", "Title", GObject.PARAM_READWRITE),
"uri": (object, "uri", "URI", GObject.PARAM_READWRITE),
"progress": (object, "progress", "Progress", GObject.PARAM_READWRITE),
"load-status": (object, "load status", "a WebKit LoadStatus",
GObject.PARAM_READWRITE),
}
def __init__(self, tab):
GObject.GObject.__init__(self)
self._tab = tab
self._title = ""
self._uri = ""
self._progress = 0.0
self._load_status = WebKit.LoadStatus.PROVISIONAL
self.security_status = None
def do_get_property(self, prop):
if prop.name == 'title':
return self._title
elif prop.name == 'uri':
return self._uri
elif prop.name == 'progress':
return self._progress
elif prop.name == 'load-status':
return self._load_status
else:
raise AttributeError, 'Unknown property %s' % prop.name
def do_set_property(self, prop, value):
if prop.name == 'title':
self._title = value
elif prop.name == 'uri':
self._uri = value
elif prop.name == 'progress':
self._progress = value
elif prop.name == 'load-status':
self._load_status = value
else:
raise AttributeError, 'Unknown property %s' % prop.name
def get_title(self):
return self._title
def get_uri(self):
return self._uri
def get_progress(self):
return self._progress
def get_load_status(self):
return self._load_status
def emit_new_tab(self, uri):
self.emit('new-tab', uri)
def emit_close_tab(self):
self.emit('tab-close', self._tab)
def get_history(self):
return [{'url': self.props.uri, 'title': self.props.title}]
def can_undo(self):
return False
def can_redo(self):
return False
def can_go_back(self):
return False
def can_go_forward(self):
return False
def can_copy_clipboard(self):
return False
def can_paste_clipboard(self):
return False
def set_history_index(self, index):
pass
def get_history_index(self):
return 0
def set_zoom_level(self, zoom_level):
pass
def get_zoom_level(self):
return 0
def stop_loading(self):
self._tab.close_tab()
def reload(self):
pass
def load_uri(self, uri):
pass
def grab_focus(self):
pass
class PDFProgressMessageBox(Gtk.EventBox):
def __init__(self, message, button_callback):
Gtk.EventBox.__init__(self)
self.modify_bg(Gtk.StateType.NORMAL,
style.COLOR_WHITE.get_gdk_color())
alignment = Gtk.Alignment.new(0.5, 0.5, 0.1, 0.1)
self.add(alignment)
alignment.show()
box = Gtk.VBox()
alignment.add(box)
box.show()
icon = ProgressIcon(icon_name='book',
pixel_size=style.LARGE_ICON_SIZE,
stroke_color=style.COLOR_BUTTON_GREY.get_svg(),
fill_color=style.COLOR_SELECTION_GREY.get_svg())
self.progress_icon = icon
box.pack_start(icon, expand=True, fill=False, padding=0)
icon.show()
label = Gtk.Label()
color = style.COLOR_BUTTON_GREY.get_html()
label.set_markup('<span weight="bold" color="%s">%s</span>' % ( \
color, GLib.markup_escape_text(message)))
box.pack_start(label, expand=True, fill=False, padding=0)
label.show()
button_box = Gtk.HButtonBox()
button_box.set_layout(Gtk.ButtonBoxStyle.CENTER)
box.pack_start(button_box, False, True, 0)
button_box.show()
button = Gtk.Button(label=_('Cancel'))
button.connect('clicked', button_callback)
button.props.image = Icon(icon_name='dialog-cancel',
pixel_size=style.SMALL_ICON_SIZE)
button_box.pack_start(button, expand=True, fill=False, padding=0)
button.show()
class PDFErrorMessageBox(Gtk.EventBox):
def __init__(self, title, message, button_callback):
Gtk.EventBox.__init__(self)
self.modify_bg(Gtk.StateType.NORMAL,
style.COLOR_WHITE.get_gdk_color())
alignment = Gtk.Alignment.new(0.5, 0.5, 0.1, 0.1)
self.add(alignment)
alignment.show()
box = Gtk.VBox()
alignment.add(box)
box.show()
# Get the icon of this activity through the bundle path.
bundle_path = activity.get_bundle_path()
activity_bundle = ActivityBundle(bundle_path)
icon = Icon(pixel_size=style.LARGE_ICON_SIZE,
file=activity_bundle.get_icon(),
stroke_color=style.COLOR_BUTTON_GREY.get_svg(),
fill_color=style.COLOR_TRANSPARENT.get_svg())
box.pack_start(icon, expand=True, fill=False, padding=0)
icon.show()
color = style.COLOR_BUTTON_GREY.get_html()
label = Gtk.Label()
label.set_markup('<span weight="bold" color="%s">%s</span>' % ( \
color, GLib.markup_escape_text(title)))
box.pack_start(label, expand=True, fill=False, padding=0)
label.show()
label = Gtk.Label()
label.set_markup('<span color="%s">%s</span>' % ( \
color, GLib.markup_escape_text(message)))
box.pack_start(label, expand=True, fill=False, padding=0)
label.show()
button_box = Gtk.HButtonBox()
button_box.set_layout(Gtk.ButtonBoxStyle.CENTER)
box.pack_start(button_box, False, True, 0)
button_box.show()
button = Gtk.Button(label=_('Try again'))
button.connect('clicked', button_callback)
button.props.image = Icon(icon_name='entry-refresh',
pixel_size=style.SMALL_ICON_SIZE,
stroke_color=style.COLOR_WHITE.get_svg(),
fill_color=style.COLOR_TRANSPARENT.get_svg())
button_box.pack_start(button, expand=True, fill=False, padding=0)
button.show()
class PDFTabPage(Gtk.HBox):
"""Shows a basic PDF viewer, download the file first if the PDF is
in a remote location.
When the file is remote, display a message while downloading.
"""
def __init__(self):
GObject.GObject.__init__(self)
self._browser = DummyBrowser(self)
self._message_box = None
self._evince_viewer = None
self._pdf_uri = None
self._requested_uri = None
def setup(self, requested_uri, title=None):
self._requested_uri = requested_uri
# The title may be given from the Journal:
if title is not None:
self._browser.props.title = title
self._browser.props.uri = requested_uri
self._browser.props.load_status = WebKit.LoadStatus.PROVISIONAL
# show PDF directly if the file is local (from the system tree
# or from the journal)
if requested_uri.startswith('file://'):
self._pdf_uri = requested_uri
self._browser.props.load_status = WebKit.LoadStatus.FINISHED
self._show_pdf()
elif requested_uri.startswith('journal://'):
self._pdf_uri = self._get_path_from_journal(requested_uri)
self._browser.props.load_status = WebKit.LoadStatus.FINISHED
self._show_pdf(from_journal=True)
# download first if file is remote
elif requested_uri.startswith('http://'):
self._download_from_http(requested_uri)
def _get_browser(self):
return self._browser
browser = GObject.property(type=object, getter=_get_browser)
def _show_pdf(self, from_journal=False):
self._evince_viewer = EvinceViewer(self._pdf_uri)
self._evince_viewer.connect('save-to-journal',
self.__save_to_journal_cb)
self._evince_viewer.connect('open-link',
self.__open_link_cb)
# disable save to journal if the PDF is already loaded from
# the journal:
if from_journal:
self._evince_viewer.disable_journal_button()
self._evince_viewer.show()
self.pack_start(self._evince_viewer, True, True, 0)
# If the PDF has a title, set it as the browse page title,
# otherwise use the last part of the URI. Only when the title
# was not set already from the Journal.
if from_journal:
self._browser.props.title = self._browser.props.title
return
pdf_title = self._evince_viewer.get_pdf_title()
if pdf_title is not None:
self._browser.props.title = pdf_title
else:
self._browser.props.title = os.path.basename(self._requested_uri)
def _get_path_from_journal(self, journal_uri):
"""Get the system tree URI of the file for the Journal object."""
journal_id = self.__journal_id_from_uri(journal_uri)
jobject = datastore.get(journal_id)
return 'file://' + jobject.file_path
def _download_from_http(self, remote_uri):
"""Download the PDF from a remote location to a temporal file."""
# Display a message
self._message_box = PDFProgressMessageBox(
message=_("Downloading document..."),
button_callback=self.close_tab)
self.pack_start(self._message_box, True, True, 0)
self._message_box.show()
# Figure out download URI
temp_path = os.path.join(activity.get_activity_root(), 'instance')
if not os.path.exists(temp_path):
os.makedirs(temp_path)
fd, dest_path = tempfile.mkstemp(dir=temp_path)
self._pdf_uri = 'file://' + dest_path
network_request = WebKit.NetworkRequest.new(remote_uri)
self._download = WebKit.Download.new(network_request)
self._download.set_destination_uri('file://' + dest_path)
# FIXME: workaround for SL #4385
# self._download.connect('notify::progress', self.__download_progress_cb)
self._download.connect('notify::current-size',
self.__current_size_changed_cb)
self._download.connect('notify::status', self.__download_status_cb)
self._download.connect('error', self.__download_error_cb)
self._download.start()
def __current_size_changed_cb(self, download, something):
current_size = download.get_current_size()
total_size = download.get_total_size()
progress = current_size / float(total_size)
self._browser.props.progress = progress
self._message_box.progress_icon.update(progress)
def __download_progress_cb(self, download, data):
progress = download.get_progress()
self._browser.props.progress = progress
self._message_box.progress_icon.update(progress)
def __download_status_cb(self, download, data):
status = download.get_status()
if status == WebKit.DownloadStatus.STARTED:
self._browser.props.load_status = WebKit.LoadStatus.PROVISIONAL
elif status == WebKit.DownloadStatus.FINISHED:
self._browser.props.load_status = WebKit.LoadStatus.FINISHED
self.remove(self._message_box)
self._message_box = None
self._show_pdf()
elif status == WebKit.DownloadStatus.CANCELLED:
logging.debug('Download PDF canceled')
def __download_error_cb(self, download, err_code, err_detail, reason):
logging.debug('Download error! code %s, detail %s: %s' % \
(err_code, err_detail, reason))
title = _('This document could not be loaded')
self._browser.props.title = title
if self._message_box is not None:
self.remove(self._message_box)
self._message_box = PDFErrorMessageBox(
title=title,
message=_('Please make sure you are connected to the Internet.'),
button_callback=self.reload)
self.pack_start(self._message_box, True, True, 0)
self._message_box.show()
def reload(self, button=None):
self.remove(self._message_box)
self._message_box = None
self.setup(self._requested_uri)
def close_tab(self, button=None):
self._browser.emit_close_tab()
def cancel_download(self):
self._download.cancel()
def __journal_id_to_uri(self, journal_id):
"""Return an URI for a Journal object ID."""
return "journal://" + journal_id + ".pdf"
def __journal_id_from_uri(self, journal_uri):
"""Return a Journal object ID from an URI."""
return journal_uri[len("journal://"):-len(".pdf")]
def __save_to_journal_cb(self, widget):
"""Save the PDF in the Journal.
Put the PDF title as the title, or if the PDF doesn't have
one, use the filename instead. Put the requested uri as the
description.
"""
jobject = datastore.create()
jobject.metadata['title'] = self._browser.props.title
jobject.metadata['description'] = _('From: %s') % self._requested_uri
jobject.metadata['mime_type'] = "application/pdf"
jobject.file_path = self._pdf_uri[len("file://"):]
datastore.write(jobject)
# display the new URI:
self._browser.props.uri = self.__journal_id_to_uri(jobject.object_id)
def __open_link_cb(self, widget, uri):
"""Open the external link of a PDF in a new tab."""
self._browser.emit_new_tab(uri)
| gpl-2.0 | 9,211,314,109,004,162,000 | 33.872305 | 81 | 0.606097 | false |
web-masons/pyramid-zappa-api-boilerplate | PyZAPI/pyzapi/tests.py | 1 | 1710 | import unittest
import transaction
from pyramid import testing
def dummy_request(dbsession):
return testing.DummyRequest(dbsession=dbsession)
class BaseTest(unittest.TestCase):
def setUp(self):
self.config = testing.setUp(settings={
'sqlalchemy.url': 'sqlite:///:memory:'
})
self.config.include('.models')
settings = self.config.get_settings()
from .models import (
get_engine,
get_session_factory,
get_tm_session,
)
self.engine = get_engine(settings)
session_factory = get_session_factory(self.engine)
self.session = get_tm_session(session_factory, transaction.manager)
def init_database(self):
from .models.meta import Base
Base.metadata.create_all(self.engine)
def tearDown(self):
from .models.meta import Base
testing.tearDown()
transaction.abort()
Base.metadata.drop_all(self.engine)
class TestMyViewSuccessCondition(BaseTest):
def setUp(self):
super(TestMyViewSuccessCondition, self).setUp()
self.init_database()
from .models import MyModel
model = MyModel(name='one', value=55)
self.session.add(model)
def test_passing_view(self):
from .views.default import my_view
info = my_view(dummy_request(self.session))
self.assertEqual(info['one'].name, 'one')
self.assertEqual(info['project'], 'PyZAPI')
class TestMyViewFailureCondition(BaseTest):
def test_failing_view(self):
from .views.default import my_view
info = my_view(dummy_request(self.session))
self.assertEqual(info.status_int, 500)
| mit | 183,657,782,567,541,060 | 25.307692 | 75 | 0.638596 | false |
gamechanger/dusty | tests/unit/systems/known_hosts/init_test.py | 1 | 2430 | import os
import tempfile
from mock import patch
import dusty.constants
from dusty.systems.known_hosts import ensure_known_hosts
from ....testcases import DustyTestCase
@patch('dusty.systems.known_hosts._get_known_hosts_path')
@patch('dusty.systems.known_hosts.check_output')
class TestKnownHostsSystem(DustyTestCase):
def setUp(self):
super(TestKnownHostsSystem, self).setUp()
self.temp_hosts_path = tempfile.mkstemp()[1]
def tearDown(self):
super(TestKnownHostsSystem, self).tearDown()
os.remove(self.temp_hosts_path)
def test_preserves_existing_content(self, fake_check_output, fake_get_known_hosts):
fake_get_known_hosts.return_value = self.temp_hosts_path
fake_check_output.return_value = 'dusty.host:SOMESHA'
initial_content = 'prev.known.host.1:SOMESHA\nprev.known.host.2:SOMESHA'
with open(self.temp_hosts_path, 'w') as f:
f.write(initial_content)
expected_result_content = 'prev.known.host.1:SOMESHA\nprev.known.host.2:SOMESHA\ndusty.host:SOMESHA'
ensure_known_hosts(['dusty.host'])
with open(self.temp_hosts_path, 'r') as f:
self.assertEqual(f.read(), expected_result_content)
def test_not_modified(self, fake_check_output, fake_get_known_hosts):
fake_get_known_hosts.return_value = self.temp_hosts_path
fake_check_output.return_value = 'prev.known.host.1:SOMESHA'
initial_content = 'prev.known.host.1:SOMESHA\nprev.known.host.2:SOMESHA'
with open(self.temp_hosts_path, 'w') as f:
f.write(initial_content)
ensure_known_hosts(['prev.known.host.1'])
with open(self.temp_hosts_path, 'r') as f:
self.assertEqual(f.read(), initial_content)
def test_redundant_additions(self, fake_check_output, fake_get_known_hosts):
fake_get_known_hosts.return_value = self.temp_hosts_path
fake_check_output.return_value = 'dusty.host:SOMESHA'
initial_content = 'prev.known.host.1:SOMESHA\nprev.known.host.2:SOMESHA'
with open(self.temp_hosts_path, 'w') as f:
f.write(initial_content)
expected_result_content = 'prev.known.host.1:SOMESHA\nprev.known.host.2:SOMESHA\ndusty.host:SOMESHA'
ensure_known_hosts(['dusty.host', 'dusty.host', 'dusty.host'])
with open(self.temp_hosts_path, 'r') as f:
self.assertEqual(f.read(), expected_result_content)
| mit | -5,612,656,755,419,461,000 | 41.631579 | 108 | 0.677366 | false |
mucow24/statusboard | darksky.py | 1 | 1582 | import time
import urllib2
import json
import sys
Default_Lat = 40.697017
Default_Lon = -73.995267
Request_Url = "https://api.forecast.io/forecast"
def getWeather(key, lat = Default_Lat, lon = Default_Lon):
request = "%s/%s/%s,%s" % (Request_Url, key, lat, lon)
u = urllib2.urlopen(request)
return json.loads(u.read())
def makeRainPlot(data):
# Find max precip:
Inch_to_MM = 25.4
max_rain_mm = 5
for e in data['minutely']['data']:
if e['precipIntensity'] * Inch_to_MM > max_rain_mm:
max_rain_mm = e['precipIntensity'] * Inch_to_MM
ret = {}
ret['graph'] = {}
graph = ret['graph']
graph['title'] = "Dark Sky Next Hour"
graph['type'] = "bar"
graph['yAxis'] = { 'minValue' : 0, 'maxValue' : max_rain_mm }
graph['datasequences'] = []
graph['refreshEveryNSeconds'] = 15
dataseq = graph['datasequences']
dataseq.append({})
seq = dataseq[0]
seq['title'] = "Rain (mm/hr)"
seq['color'] = 'aqua'
seq['datapoints'] = []
ctr = 0
for e in data['minutely']['data']:
ctr = ctr + 1
if ctr % 2 == 0:
continue
time_str = time.strftime("%H:%M", time.localtime(e['time']))
precip = e['precipIntensity'] * Inch_to_MM
seq['datapoints'].append({'title' : time_str, 'value' : precip})
return ret
def main(argv):
refresh_interval = int(argv[0])
output_file = argv[1]
while True:
d = getWeather()
p = makeRainPlot(d)
f = open(output_file, 'w')
f.write(json.dumps(j, indent = 2, separators = (',', ': ')))
f.close()
sleep(refresh_interval)
if __name__ == "__main__":
main(sys.argv)
| gpl-2.0 | 5,671,711,468,693,888,000 | 25.366667 | 68 | 0.600506 | false |
PaddlePaddle/models | PaddleAudio/examples/panns/parse_result.py | 1 | 2990 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import ast
import os
from typing import Dict, List
import numpy as np
from paddleaudio.utils import logger
# yapf: disable
parser = argparse.ArgumentParser(__doc__)
parser.add_argument('--tagging_file', type=str, required=True, help='')
parser.add_argument('--top_k', type=int, default=10, help='Get top k predicted results of audioset labels.')
parser.add_argument('--smooth', type=ast.literal_eval, default=True, help='Set "True" to apply posterior smoothing.')
parser.add_argument('--smooth_size', type=int, default=5, help='Window size of posterior smoothing.')
parser.add_argument('--label_file', type=str, default='./assets/audioset_labels.txt', help='File of audioset labels.')
parser.add_argument('--output_dir', type=str, default='./output_dir', help='Directory to save tagging labels.')
args = parser.parse_args()
# yapf: enable
def smooth(results: np.ndarray, win_size: int):
"""
Execute posterior smoothing in-place.
"""
for i in range(len(results) - 1, -1, -1):
if i < win_size - 1:
left = 0
else:
left = i + 1 - win_size
results[i] = np.sum(results[left:i + 1], axis=0) / (i - left + 1)
def generate_topk_label(k: int, label_map: Dict, result: np.ndarray):
"""
Return top k result.
"""
result = np.asarray(result)
topk_idx = (-result).argsort()[:k]
ret = ''
for idx in topk_idx:
label, score = label_map[idx], result[idx]
ret += f'{label}: {score}\n'
return ret
if __name__ == "__main__":
label_map = {}
with open(args.label_file, 'r') as f:
for i, l in enumerate(f.readlines()):
label_map[i] = l.strip()
results = np.load(args.tagging_file, allow_pickle=True)
times, scores = results['time'], results['scores']
if args.smooth:
logger.info('Posterior smoothing...')
smooth(scores, win_size=args.smooth_size)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
output_file = os.path.join(
args.output_dir,
os.path.basename(args.tagging_file).split('.')[0] + '.txt')
with open(output_file, 'w') as f:
for time, score in zip(times, scores):
f.write(f'{time}\n')
f.write(generate_topk_label(args.top_k, label_map, score) + '\n')
logger.info(f'Saved tagging labels to {output_file}')
| apache-2.0 | 3,274,352,965,856,468,500 | 34.595238 | 118 | 0.652508 | false |
zsiki/realcentroid | realcentroid_dialog.py | 1 | 4135 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
RealCentroidDialog
A QGIS plugin
Create internal point for a polygon layer
Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/
-------------------
begin : 2018-12-01
git sha : $Format:%H$
copyright : (C) 2018 by Zotan Siki
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os
from PyQt5 import uic
from PyQt5 import QtWidgets
from PyQt5 import QtCore
from qgis.core import QgsMapLayerProxyModel, QgsSettings
from qgis.gui import QgsEncodingFileDialog
FORM_CLASS, _ = uic.loadUiType(os.path.join(
os.path.dirname(__file__), 'realcentroid_dialog_base.ui'))
class RealCentroidDialog(QtWidgets.QDialog, FORM_CLASS):
""" dialog class for realcentroid QGIS3 plugin """
def __init__(self, parent=None):
"""Constructor."""
super(RealCentroidDialog, self).__init__(parent)
self.setupUi(self)
self.encoding = None
self.layerBox.currentIndexChanged.connect(self.sel)
self.browseButton.clicked.connect(self.browse)
self.cancelBtn.clicked.connect(self.reject)
self.okBtn.clicked.connect(self.ok)
def showEvent(self, event):
""" initialize dialog widgets """
# filter polygonlayers
self.layerBox.setFilters(QgsMapLayerProxyModel.PolygonLayer)
# clear previous pointlayer
self.pointEdit.clear()
self.sel()
def sel(self):
""" check/uncheck selectBox if selected layer changed """
l = self.layerBox.currentLayer()
try:
sf = l.selectedFeatures()
except:
sf = None
if sf: # is not None and len(sf):
self.selectedBox.setEnabled(True)
self.selectedBox.setCheckState(QtCore.Qt.Checked)
else:
self.selectedBox.setEnabled(False)
self.selectedBox.setCheckState(QtCore.Qt.Unchecked)
def browse(self):
""" open save layer dialog """
settings = QgsSettings()
dirName = settings.value("/UI/lastShapefileDir")
encode = settings.value("/UI/encoding")
fileDialog = QgsEncodingFileDialog(self, "Output shape file", dirName,
"Shape file (*.shp)", encode)
fileDialog.setDefaultSuffix("shp")
fileDialog.setFileMode(QtWidgets.QFileDialog.AnyFile)
fileDialog.setAcceptMode(QtWidgets.QFileDialog.AcceptSave)
#fileDialog.setConfirmOverwrite(True)
if not fileDialog.exec_() == QtWidgets.QDialog.Accepted:
return
files = fileDialog.selectedFiles()
self.pointEdit.setText(files[0])
self.encoding = fileDialog.encoding()
def ok(self):
""" check widgets """
if len(self.layerBox.currentText()) == 0:
QtWidgets.QMessageBox.warning(self, "Realcentroid", \
QtWidgets.QApplication.translate("RealCentroid", \
"No polygon layer selected", None))
return
if len(self.pointEdit.text()) == 0:
QtWidgets.QMessageBox.warning(self, "Realcentroid", \
QtWidgets.QApplication.translate("RealCentroid", \
"No point layer given", None))
return
self.accept()
| gpl-2.0 | -666,068,943,287,623,000 | 39.539216 | 78 | 0.536397 | false |
morucci/repoxplorer | repoxplorer/index/__init__.py | 1 | 4623 | # Copyright 2016, Fabien Boucher
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
import time
import pytz
import datetime
from pecan import conf
from Crypto.Hash import SHA
from elasticsearch import client
from jsonschema import validate as schema_validate
from repoxplorer.index.yamlbackend import YAMLBackend
def date2epoch(date):
d = datetime.datetime.strptime(date, "%Y-%m-%d")
d = d.replace(tzinfo=pytz.utc)
epoch = (d - datetime.datetime(1970, 1, 1,
tzinfo=pytz.utc)).total_seconds()
return int(epoch)
def get_elasticsearch_version(es):
version = es.info()['version']['number']
return int(version.split('.')[0])
def add_params(es):
if get_elasticsearch_version(es) >= 7:
return {'include_type_name': 'true'}
else:
return {}
# From https://stackoverflow.com/a/27974027/1966658
def clean_empty(d):
if not isinstance(d, (dict, list)):
return d
if isinstance(d, list):
return [v for v in (clean_empty(v) for v in d) if v]
return {k: v for k, v in ((k, clean_empty(v)) for k, v in d.items()) if (
v or v == False)} # noqa: E712
class Connector(object):
def __init__(self, host=None, port=None, index=None, index_suffix=None):
self.host = (host or
getattr(conf, 'elasticsearch_host', None) or
'localhost')
self.port = (port or
getattr(conf, 'elasticsearch_port', None) or
9200)
self.index = (index or
getattr(conf, 'elasticsearch_index', None) or
'repoxplorer')
if index_suffix:
self.index += "-%s" % index_suffix
if (getattr(conf, 'elasticsearch_user', None) and
getattr(conf, 'elasticsearch_password', None)):
self.http_auth = "%s:%s" % (
getattr(conf, 'elasticsearch_user', None),
getattr(conf, 'elasticsearch_password', None))
# NOTE(dpawlik) Opendistro is using self signed certs,
# so verify_certs is set to False.
self.es = client.Elasticsearch(
[{"host": self.host,
"port": self.port,
"http_auth": self.http_auth,
"use_ssl": True,
"verify_certs": False,
"ssl_show_warn": True}], timeout=60)
else:
self.es = client.Elasticsearch(
[{"host": self.host, "port": self.port}],
timeout=60)
self.ic = client.IndicesClient(self.es)
if not self.ic.exists(index=self.index):
self.ic.create(index=self.index)
# Give some time to have the index fully created
time.sleep(1)
class YAMLDefinition(object):
def __init__(self, db_path=None, db_default_file=None,
db_cache_path=None):
db_cache_path = db_cache_path or conf.get('db_cache_path') or db_path
self.yback = YAMLBackend(
db_path or conf.get('db_path'),
db_default_file=db_default_file or conf.get('db_default_file'),
db_cache_path=db_cache_path)
self.yback.load_db()
self.hashes_str = SHA.new(
"".join(self.yback.hashes).encode(errors='ignore')).hexdigest()
self.default_data, self.data = self.yback.get_data()
self._merge()
def _check_basic(self, key, schema, identifier):
""" Verify schema and no data duplicated
"""
issues = []
ids = set()
for d in self.data:
data = d.get(key, {})
try:
schema_validate({key: data},
yaml.load(schema))
except Exception as e:
issues.append(e.message)
duplicated = set(data.keys()) & ids
if duplicated:
issues.append("%s IDs [%s,] are duplicated" % (
identifier, ",".join(duplicated)))
ids.update(set(data.keys()))
return ids, issues
| apache-2.0 | 8,594,167,975,764,877,000 | 34.561538 | 77 | 0.567597 | false |
maczniak/emberjs | website_rebase.py | 1 | 3151 | #!/usr/bin/python
#
# The original Ember.js website is under http://emberjs.com/. And translation
# website is under http://maczniak.github.io/emberjs/. Then all absolute urls
# were broken. I failed to search generic html rebase tools, made a simple
# script in Python. It is a specific solution to this problem.
# see also: http://a3nm.net/blog/htmlrebase.html
#-- configuration start --
BUILD_ROOT = 'build/'
PREFIX = 'emberjs/' # must include a trailing slash only
#-- configuration end --
import os
import os.path
import re
# <link href="/stylesheets/fonts/fontello-ie7.css" media="screen" rel="stylesheet" type="text/css" />
html_link_str = '<link.*?href="/'
html_link_pattern = re.compile(html_link_str)
# _gaq.push(['_setAccount', 'UA-27675533-1']);
# from layout.erb
html_ga_str = 'UA-27675533-1'
html_ga_pattern = re.compile(html_ga_str)
# <script type="text/javascript" src="/javascripts/common-old-ie.js"></script>
html_script_str = '<script.*?src="/(?=[^/])'
html_script_pattern = re.compile(html_script_str)
# <a id="logo" href="/">
# <a href="/guides">
html_a_str = '<a .*?href="/'
html_a_pattern = re.compile(html_a_str)
# <img src="/images/about/mhelabs.png">
# exclude src="//ssl.gstatic.com/images/icons/gplus-32.png"
html_img_str = '<img.*?src="/(?=[^/])'
html_img_pattern = re.compile(html_img_str)
# var i=r.map(function(e){return $.ajax("/javascripts/app/examples/"+n+"/
# from javascripts/app/about/inline-examples.js
# <div class="example-app example-loading" data-name="loading" data-files="app.js templates/application.hbs">
js_ajax_str = '[$][.]ajax[(]"/'
js_ajax_pattern = re.compile(js_ajax_str)
# background-image:url("/images/background-shades.svg")
css_url_str = 'url[(]"/'
css_url_pattern = re.compile(css_url_str)
# url("../../fonts -> url("../fonts
css_font_str = 'url[(]"../../'
css_font_pattern = re.compile(css_font_str)
def read(filename):
f = open(filename, 'r')
content = f.read()
f.close()
return content
def write(filename, content):
f = open(filename, 'w')
content = f.write(content)
f.close()
def handle_html(filename):
content = read(filename)
content = html_link_pattern.sub('\g<0>' + PREFIX, content)
content = html_ga_pattern.sub('UA-45832618-1', content)
content = html_script_pattern.sub('\g<0>' + PREFIX, content)
content = html_a_pattern.sub('\g<0>' + PREFIX, content)
content = html_img_pattern.sub('\g<0>' + PREFIX, content)
write(filename, content)
def handle_js(filename):
content = read(filename)
content = js_ajax_pattern.sub('\g<0>' + PREFIX, content)
write(filename, content)
def handle_css(filename):
content = read(filename)
content = css_url_pattern.sub('\g<0>' + PREFIX, content)
content = css_font_pattern.sub('url("../', content)
write(filename, content)
def extension(filename):
idx = filename.rfind('.')
if idx == -1:
return ''
else:
return filename[idx:]
for root, dirs, files in os.walk(BUILD_ROOT):
for file in files:
ext = extension(file)
if ext == '.html':
handle_html(os.path.join(root, file))
elif ext == '.js':
handle_js(os.path.join(root, file))
elif ext == '.css':
handle_css(os.path.join(root, file))
| mit | -3,186,537,805,941,783,600 | 29.892157 | 109 | 0.675341 | false |
google/glazier | glazier/lib/actions/domain.py | 1 | 1479 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Actions for interacting with the company domain."""
from glazier.lib import domain_join
from glazier.lib.actions.base import ActionError
from glazier.lib.actions.base import BaseAction
from glazier.lib.actions.base import ValidationError
class DomainJoin(BaseAction):
"""Create an imaging timer."""
def Run(self):
method = str(self._args[0])
domain = str(self._args[1])
ou = None
if len(self._args) > 2:
ou = str(self._args[2])
joiner = domain_join.DomainJoin(method, domain, ou)
try:
joiner.JoinDomain()
except domain_join.DomainJoinError as e:
raise ActionError('Unable to complete domain join. %s' % str(e))
def Validate(self):
self._ListOfStringsValidator(self._args, length=2, max_length=3)
if self._args[0] not in domain_join.AUTH_OPTS:
raise ValidationError('Invalid join method: %s' % self._args[0])
| apache-2.0 | 2,751,999,170,932,907,000 | 35.073171 | 74 | 0.721433 | false |
markgw/jazzparser | lib/nltk/parse/earleychart.py | 1 | 18301 | # -*- coding: utf-8 -*-
# Natural Language Toolkit: An Incremental Earley Chart Parser
#
# Copyright (C) 2001-2010 NLTK Project
# Author: Peter Ljunglöf <[email protected]>
# Rob Speer <[email protected]>
# Edward Loper <[email protected]>
# Steven Bird <[email protected]>
# Jean Mark Gawron <[email protected]>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
#
# $Id: chart.py 8144 2009-06-01 22:27:39Z edloper $
"""
Data classes and parser implementations for I{incremental} chart
parsers, which use dynamic programming to efficiently parse a text.
A X{chart parser} derives parse trees for a text by iteratively adding
\"edges\" to a \"chart\". Each X{edge} represents a hypothesis about the tree
structure for a subsequence of the text. The X{chart} is a
\"blackboard\" for composing and combining these hypotheses.
A parser is X{incremental}, if it guarantees that for all i, j where i < j,
all edges ending at i are built before any edges ending at j.
This is appealing for, say, speech recognizer hypothesis filtering.
The main parser class is L{EarleyChartParser}, which is a top-down
algorithm, originally formulated by Jay Earley (1970).
"""
from nltk.grammar import *
from api import *
from chart import *
from featurechart import *
#////////////////////////////////////////////////////////////
# Incremental Chart
#////////////////////////////////////////////////////////////
class IncrementalChart(Chart):
def initialize(self):
# A sequence of edge lists contained in this chart.
self._edgelists = tuple([] for x in self._positions())
# The set of child pointer lists associated with each edge.
self._edge_to_cpls = {}
# Indexes mapping attribute values to lists of edges
# (used by select()).
self._indexes = {}
def edges(self):
return list(self.iteredges())
def iteredges(self):
return (edge for edgelist in self._edgelists for edge in edgelist)
def select(self, end, **restrictions):
edgelist = self._edgelists[end]
# If there are no restrictions, then return all edges.
if restrictions=={}: return iter(edgelist)
# Find the index corresponding to the given restrictions.
restr_keys = restrictions.keys()
restr_keys.sort()
restr_keys = tuple(restr_keys)
# If it doesn't exist, then create it.
if restr_keys not in self._indexes:
self._add_index(restr_keys)
vals = tuple(restrictions[key] for key in restr_keys)
return iter(self._indexes[restr_keys][end].get(vals, []))
def _add_index(self, restr_keys):
# Make sure it's a valid index.
for key in restr_keys:
if not hasattr(EdgeI, key):
raise ValueError, 'Bad restriction: %s' % key
# Create the index.
index = self._indexes[restr_keys] = tuple({} for x in self._positions())
# Add all existing edges to the index.
for end, edgelist in enumerate(self._edgelists):
this_index = index[end]
for edge in edgelist:
vals = tuple(getattr(edge, key)() for key in restr_keys)
this_index.setdefault(vals, []).append(edge)
def _register_with_indexes(self, edge):
end = edge.end()
for (restr_keys, index) in self._indexes.items():
vals = tuple(getattr(edge, key)() for key in restr_keys)
index[end].setdefault(vals, []).append(edge)
def _append_edge(self, edge):
self._edgelists[edge.end()].append(edge)
def _positions(self):
return xrange(self.num_leaves() + 1)
class FeatureIncrementalChart(IncrementalChart, FeatureChart):
def select(self, end, **restrictions):
edgelist = self._edgelists[end]
# If there are no restrictions, then return all edges.
if restrictions=={}: return iter(edgelist)
# Find the index corresponding to the given restrictions.
restr_keys = restrictions.keys()
restr_keys.sort()
restr_keys = tuple(restr_keys)
# If it doesn't exist, then create it.
if restr_keys not in self._indexes:
self._add_index(restr_keys)
vals = tuple(self._get_type_if_possible(restrictions[key])
for key in restr_keys)
return iter(self._indexes[restr_keys][end].get(vals, []))
def _add_index(self, restr_keys):
# Make sure it's a valid index.
for key in restr_keys:
if not hasattr(EdgeI, key):
raise ValueError, 'Bad restriction: %s' % key
# Create the index.
index = self._indexes[restr_keys] = tuple({} for x in self._positions())
# Add all existing edges to the index.
for end, edgelist in enumerate(self._edgelists):
this_index = index[end]
for edge in edgelist:
vals = tuple(self._get_type_if_possible(getattr(edge, key)())
for key in restr_keys)
this_index.setdefault(vals, []).append(edge)
def _register_with_indexes(self, edge):
end = edge.end()
for (restr_keys, index) in self._indexes.items():
vals = tuple(self._get_type_if_possible(getattr(edge, key)())
for key in restr_keys)
index[end].setdefault(vals, []).append(edge)
#////////////////////////////////////////////////////////////
# Incremental CFG Rules
#////////////////////////////////////////////////////////////
class CompleteFundamentalRule(SingleEdgeFundamentalRule):
def _apply_incomplete(self, chart, grammar, left_edge):
end = left_edge.end()
# When the chart is incremental, we only have to look for
# empty complete edges here.
for right_edge in chart.select(start=end, end=end,
is_complete=True,
lhs=left_edge.next()):
new_edge = left_edge.move_dot_forward(right_edge.end())
if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
yield new_edge
class CompleterRule(CompleteFundamentalRule):
_fundamental_rule = CompleteFundamentalRule()
def apply_iter(self, chart, grammar, edge):
if not isinstance(edge, LeafEdge):
for new_edge in self._fundamental_rule.apply_iter(chart, grammar, edge):
yield new_edge
class ScannerRule(CompleteFundamentalRule):
_fundamental_rule = CompleteFundamentalRule()
def apply_iter(self, chart, grammar, edge):
if isinstance(edge, LeafEdge):
for new_edge in self._fundamental_rule.apply_iter(chart, grammar, edge):
yield new_edge
class PredictorRule(CachedTopDownPredictRule):
pass
class FilteredCompleteFundamentalRule(FilteredSingleEdgeFundamentalRule):
def apply_iter(self, chart, grammar, edge):
# Since the Filtered rule only works for grammars without empty productions,
# we only have to bother with complete edges here.
if edge.is_complete():
for new_edge in self._apply_complete(chart, grammar, edge):
yield new_edge
#////////////////////////////////////////////////////////////
# Incremental FCFG Rules
#////////////////////////////////////////////////////////////
class FeatureCompleteFundamentalRule(FeatureSingleEdgeFundamentalRule):
def _apply_incomplete(self, chart, grammar, left_edge):
fr = self._fundamental_rule
end = left_edge.end()
# When the chart is incremental, we only have to look for
# empty complete edges here.
for right_edge in chart.select(start=end, end=end,
is_complete=True,
lhs=left_edge.next()):
for new_edge in fr.apply_iter(chart, grammar, left_edge, right_edge):
yield new_edge
class FeatureCompleterRule(CompleterRule):
_fundamental_rule = FeatureCompleteFundamentalRule()
class FeatureScannerRule(ScannerRule):
_fundamental_rule = FeatureCompleteFundamentalRule()
class FeaturePredictorRule(FeatureTopDownPredictRule):
pass
#////////////////////////////////////////////////////////////
# Incremental CFG Chart Parsers
#////////////////////////////////////////////////////////////
EARLEY_STRATEGY = [LeafInitRule(),
TopDownInitRule(),
CompleterRule(),
ScannerRule(),
PredictorRule()]
TD_INCREMENTAL_STRATEGY = [LeafInitRule(),
TopDownInitRule(),
CachedTopDownPredictRule(),
CompleteFundamentalRule()]
BU_INCREMENTAL_STRATEGY = [LeafInitRule(),
EmptyPredictRule(),
BottomUpPredictRule(),
CompleteFundamentalRule()]
BU_LC_INCREMENTAL_STRATEGY = [LeafInitRule(),
EmptyPredictRule(),
BottomUpPredictCombineRule(),
CompleteFundamentalRule()]
LC_INCREMENTAL_STRATEGY = [LeafInitRule(),
FilteredBottomUpPredictCombineRule(),
FilteredCompleteFundamentalRule()]
class IncrementalChartParser(ChartParser):
"""
An I{incremental} chart parser implementing Jay Earley's
parsing algorithm:
- For each index I{end} in [0, 1, ..., N]:
- For each I{edge} s.t. I{edge}.end = I{end}:
- If I{edge} is incomplete, and I{edge}.next is not a part
of speech:
- Apply PredictorRule to I{edge}
- If I{edge} is incomplete, and I{edge}.next is a part of
speech:
- Apply ScannerRule to I{edge}
- If I{edge} is complete:
- Apply CompleterRule to I{edge}
- Return any complete parses in the chart
"""
def __init__(self, grammar, strategy=BU_LC_INCREMENTAL_STRATEGY,
trace=0, trace_chart_width=50,
chart_class=IncrementalChart):
"""
Create a new Earley chart parser, that uses C{grammar} to
parse texts.
@type grammar: C{ContextFreeGrammar}
@param grammar: The grammar used to parse texts.
@type trace: C{int}
@param trace: The level of tracing that should be used when
parsing a text. C{0} will generate no tracing output;
and higher numbers will produce more verbose tracing
output.
@type trace_chart_width: C{int}
@param trace_chart_width: The default total width reserved for
the chart in trace output. The remainder of each line will
be used to display edges.
@param chart_class: The class that should be used to create
the charts used by this parser.
"""
self._grammar = grammar
self._trace = trace
self._trace_chart_width = trace_chart_width
self._chart_class = chart_class
self._axioms = []
self._inference_rules = []
for rule in strategy:
if rule.NUM_EDGES == 0:
self._axioms.append(rule)
elif rule.NUM_EDGES == 1:
self._inference_rules.append(rule)
else:
raise ValueError("Incremental inference rules must have "
"NUM_EDGES == 0 or 1")
def chart_parse(self, tokens, trace=None):
if trace is None: trace = self._trace
trace_new_edges = self._trace_new_edges
tokens = list(tokens)
self._grammar.check_coverage(tokens)
chart = self._chart_class(tokens)
grammar = self._grammar
# Width, for printing trace edges.
trace_edge_width = self._trace_chart_width / (chart.num_leaves() + 1)
if trace: print chart.pp_leaves(trace_edge_width)
for axiom in self._axioms:
new_edges = axiom.apply(chart, grammar)
trace_new_edges(chart, axiom, new_edges, trace, trace_edge_width)
inference_rules = self._inference_rules
for end in range(chart.num_leaves()+1):
if trace > 1: print "\n* Processing queue:", end, "\n"
agenda = list(chart.select(end=end))
while agenda:
edge = agenda.pop()
for rule in inference_rules:
new_edges = rule.apply_iter(chart, grammar, edge)
if trace:
new_edges = list(new_edges)
trace_new_edges(chart, rule, new_edges, trace, trace_edge_width)
for new_edge in new_edges:
if new_edge.end()==end:
agenda.append(new_edge)
return chart
class EarleyChartParser(IncrementalChartParser):
def __init__(self, grammar, **parser_args):
IncrementalChartParser.__init__(self, grammar, EARLEY_STRATEGY, **parser_args)
pass
class IncrementalTopDownChartParser(IncrementalChartParser):
def __init__(self, grammar, **parser_args):
IncrementalChartParser.__init__(self, grammar, TD_INCREMENTAL_STRATEGY, **parser_args)
class IncrementalBottomUpChartParser(IncrementalChartParser):
def __init__(self, grammar, **parser_args):
IncrementalChartParser.__init__(self, grammar, BU_INCREMENTAL_STRATEGY, **parser_args)
class IncrementalBottomUpLeftCornerChartParser(IncrementalChartParser):
def __init__(self, grammar, **parser_args):
IncrementalChartParser.__init__(self, grammar, BU_LC_INCREMENTAL_STRATEGY, **parser_args)
class IncrementalLeftCornerChartParser(IncrementalChartParser):
def __init__(self, grammar, **parser_args):
if not grammar.is_nonempty():
raise ValueError("IncrementalLeftCornerParser only works for grammars "
"without empty productions.")
IncrementalChartParser.__init__(self, grammar, LC_INCREMENTAL_STRATEGY, **parser_args)
#////////////////////////////////////////////////////////////
# Incremental FCFG Chart Parsers
#////////////////////////////////////////////////////////////
EARLEY_FEATURE_STRATEGY = [LeafInitRule(),
FeatureTopDownInitRule(),
FeatureCompleterRule(),
FeatureScannerRule(),
FeaturePredictorRule()]
TD_INCREMENTAL_FEATURE_STRATEGY = [LeafInitRule(),
FeatureTopDownInitRule(),
FeatureTopDownPredictRule(),
FeatureCompleteFundamentalRule()]
BU_INCREMENTAL_FEATURE_STRATEGY = [LeafInitRule(),
FeatureEmptyPredictRule(),
FeatureBottomUpPredictRule(),
FeatureCompleteFundamentalRule()]
BU_LC_INCREMENTAL_FEATURE_STRATEGY = [LeafInitRule(),
FeatureEmptyPredictRule(),
FeatureBottomUpPredictCombineRule(),
FeatureCompleteFundamentalRule()]
class FeatureIncrementalChartParser(IncrementalChartParser, FeatureChartParser):
def __init__(self, grammar,
strategy=BU_LC_INCREMENTAL_FEATURE_STRATEGY,
trace_chart_width=20,
chart_class=FeatureIncrementalChart,
**parser_args):
IncrementalChartParser.__init__(self, grammar,
strategy=strategy,
trace_chart_width=trace_chart_width,
chart_class=chart_class,
**parser_args)
class FeatureEarleyChartParser(FeatureIncrementalChartParser):
def __init__(self, grammar, **parser_args):
FeatureIncrementalChartParser.__init__(self, grammar, EARLEY_FEATURE_STRATEGY, **parser_args)
class FeatureIncrementalTopDownChartParser(FeatureIncrementalChartParser):
def __init__(self, grammar, **parser_args):
FeatureIncrementalChartParser.__init__(self, grammar, TD_INCREMENTAL_FEATURE_STRATEGY, **parser_args)
class FeatureIncrementalBottomUpChartParser(FeatureIncrementalChartParser):
def __init__(self, grammar, **parser_args):
FeatureIncrementalChartParser.__init__(self, grammar, BU_INCREMENTAL_FEATURE_STRATEGY, **parser_args)
class FeatureIncrementalBottomUpLeftCornerChartParser(FeatureIncrementalChartParser):
def __init__(self, grammar, **parser_args):
FeatureIncrementalChartParser.__init__(self, grammar, BU_LC_INCREMENTAL_FEATURE_STRATEGY, **parser_args)
#////////////////////////////////////////////////////////////
# Demonstration
#////////////////////////////////////////////////////////////
def demo(should_print_times=True, should_print_grammar=False,
should_print_trees=True, trace=2,
sent='I saw John with a dog with my cookie', numparses=5):
"""
A demonstration of the Earley parsers.
"""
import sys, time
# The grammar for ChartParser and SteppingChartParser:
grammar = nltk.parse.chart.demo_grammar()
if should_print_grammar:
print "* Grammar"
print grammar
# Tokenize the sample sentence.
print "* Sentence:"
print sent
tokens = sent.split()
print tokens
print
# Do the parsing.
earley = EarleyChartParser(grammar, trace=trace)
t = time.clock()
chart = earley.chart_parse(tokens)
parses = chart.parses(grammar.start())
t = time.clock()-t
# Print results.
if numparses:
assert len(parses)==numparses, 'Not all parses found'
if should_print_trees:
for tree in parses: print tree
else:
print "Nr trees:", len(parses)
if should_print_times:
print "Time:", t
if __name__ == '__main__': demo()
| gpl-3.0 | 8,186,104,699,162,732,000 | 39.939597 | 112 | 0.575847 | false |
sassoftware/rpath-product-definition | doc/example.py | 1 | 5275 | #
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Example code for interacting with rPath product definition xml files.
"""
from rpath_proddef import api1 as proddef
import sys
# This is an example of how this module would be used to generate the XML
# for the proddef source trove.
#
# This should produce an xml file equivalent to example.xml
baseFlavor = """
~MySQL-python.threadsafe, ~X, ~!alternatives, !bootstrap,
~builddocs, ~buildtests, !cross, ~desktop, ~!dietlibc, ~!dom0, ~!domU,
~emacs, ~gcj, ~gnome, ~grub.static, ~gtk, ~ipv6, ~kde, ~!kernel.debug,
~kernel.debugdata, ~!kernel.numa, ~kernel.smp, ~krb, ~ldap, ~nptl,
~!openssh.smartcard, ~!openssh.static_libcrypto, pam, ~pcre, ~perl,
~!pie, ~!postfix.mysql, ~python, ~qt, ~readline, ~!sasl, ~!selinux,
~sqlite.threadsafe, ssl, ~tcl, tcpwrappers, ~tk, ~uClibc, !vmware,
~!xen, ~!xfce, ~!xorg-x11.xprint
"""
productDescription = """
This here is my awesome appliance.
Is it not nifty?
Worship the appliance.
"""
productVersionDescription = """
Version 1.0 features "stability" and "usefulness", which is a
vast improvement over our pre-release code.
"""
prodDef = proddef.ProductDefinition()
prodDef.setProductName("My Awesome Appliance")
prodDef.setProductShortname("awesome")
prodDef.setProductDescription(productDescription)
prodDef.setProductVersion("1.0")
prodDef.setProductVersionDescription(productVersionDescription)
prodDef.setConaryRepositoryHostname("product.example.com")
prodDef.setConaryNamespace("exm")
prodDef.setImageGroup("group-awesome-dist")
prodDef.setBaseFlavor(baseFlavor)
# Don't use addPromoteMap unless you know what you're doing; see
# https://issues.rpath.com/browse/RPCL-17 for more information on
# how to use them. These maps cause packages in devel groups to
# be flattened into the main label on promote to QA and promotes
# from example to be flattened into an alternate label.
prodDef.addStage(name='devel', labelSuffix='-devel',
promoteMaps = [('contrib', 'contrib.rpath.org@rpl:2'),
('other', 'example.rpath.org@rpl:2')])
prodDef.addStage(name='qa', labelSuffix='-qa',
promoteMaps = [('contrib', '/product.example.com@exm:group-awesome-dist-1-qa'),
('other', '/product.example.com@exm:other-1-qa') ])
prodDef.addStage(name='release', labelSuffix='',
promoteMaps = [('other', '/product.example.com@exm:other-1')])
prodDef.addSearchPath(troveName='group-rap-standard',
label='rap.rpath.com@rpath:linux-1')
prodDef.addSearchPath(troveName='group-postgres',
label='products.rpath.com@rpath:postgres-8.2')
prodDef.addFactorySource(troveName='group-factories',
label='products.rpath.com@rpath:factories-1')
prodDef.addBuildDefinition(name='x86 Installable ISO Build',
baseFlavor='is: x86',
imageType=prodDef.imageType('installableIsoImage'),
stages = ['devel', 'qa', 'release'])
prodDef.addBuildDefinition(name='x86-64 Installable ISO Build',
baseFlavor='is: x86 x86_64',
imageType=prodDef.imageType('installableIsoImage'),
stages = ['devel', 'qa', 'release'])
prodDef.addBuildDefinition(name='x86 Citrix Xenserver Virtual Appliance',
baseFlavor='~xen, ~domU is: x86',
imageType=prodDef.imageType('xenOvaImage'),
stages = ['devel', 'qa', 'release'])
prodDef.addBuildDefinition(name='Another Xen Build',
baseFlavor='~xen, ~domU is: x86',
imageType=prodDef.imageType('rawHdImage',
dict(autoResolve="true",
baseFileName="/poo/moo/foo")),
stages = ['devel', 'qa', 'release'])
prodDef.addBuildDefinition(name='VMWare build',
baseFlavor='~vmware is: x86 x86_64',
imageType=prodDef.imageType('vmwareImage',
dict(autoResolve="true",
baseFileName="foobar")),
stages = ['devel', 'qa'])
prodDef.addBuildDefinition(name='Totally VMware optional build from a different group',
baseFlavor='~vmware is: x86 x86_64',
imageGroup='group-foo-dist',
imageType=prodDef.imageType('vmwareImage'))
# Don't use addSecondaryLabel unless you know what you're doing
prodDef.addSecondaryLabel('Xen', '-xen')
prodDef.addSecondaryLabel('VMware', 'my@label:vmware')
prodDef.serialize(sys.stdout)
sys.stdout.flush()
sys.exit(0)
| apache-2.0 | 2,042,442,910,359,023,400 | 42.595041 | 87 | 0.647583 | false |
sunny94/temp | sympy/functions/combinatorial/tests/test_comb_factorials.py | 1 | 8372 | from sympy import (Symbol, symbols, factorial, factorial2, binomial,
rf, ff, gamma, polygamma, EulerGamma, O, pi, nan,
oo, zoo, simplify, expand_func)
from sympy.functions.combinatorial.factorials import subfactorial
from sympy.utilities.pytest import XFAIL, raises
def test_rf_eval_apply():
x, y = symbols('x,y')
assert rf(nan, y) == nan
assert rf(x, y) == rf(x, y)
assert rf(oo, 0) == 1
assert rf(-oo, 0) == 1
assert rf(oo, 6) == oo
assert rf(-oo, 7) == -oo
assert rf(oo, -6) == oo
assert rf(-oo, -7) == oo
assert rf(x, 0) == 1
assert rf(x, 1) == x
assert rf(x, 2) == x*(x + 1)
assert rf(x, 3) == x*(x + 1)*(x + 2)
assert rf(x, 5) == x*(x + 1)*(x + 2)*(x + 3)*(x + 4)
assert rf(x, -1) == 1/(x - 1)
assert rf(x, -2) == 1/((x - 1)*(x - 2))
assert rf(x, -3) == 1/((x - 1)*(x - 2)*(x - 3))
assert rf(1, 100) == factorial(100)
n = Symbol('n', integer=True)
k = Symbol('k', integer=True)
m = Symbol('m', integer=True, nonnegative=True)
assert rf(x, m).is_integer is None
assert rf(n, k).is_integer is None
assert rf(n, m).is_integer is True
assert rf(n, k + pi).is_integer is False
assert rf(n, m + pi).is_integer is False
assert rf(pi, m).is_integer is False
def test_ff_eval_apply():
x, y = symbols('x,y')
assert ff(nan, y) == nan
assert ff(x, y) == ff(x, y)
assert ff(oo, 0) == 1
assert ff(-oo, 0) == 1
assert ff(oo, 6) == oo
assert ff(-oo, 7) == -oo
assert ff(oo, -6) == oo
assert ff(-oo, -7) == oo
assert ff(x, 0) == 1
assert ff(x, 1) == x
assert ff(x, 2) == x*(x - 1)
assert ff(x, 3) == x*(x - 1)*(x - 2)
assert ff(x, 5) == x*(x - 1)*(x - 2)*(x - 3)*(x - 4)
assert ff(x, -1) == 1/(x + 1)
assert ff(x, -2) == 1/((x + 1)*(x + 2))
assert ff(x, -3) == 1/((x + 1)*(x + 2)*(x + 3))
assert ff(100, 100) == factorial(100)
n = Symbol('n', integer=True)
k = Symbol('k', integer=True)
m = Symbol('m', integer=True, nonnegative=True)
assert ff(x, m).is_integer is None
assert ff(n, k).is_integer is None
assert ff(n, m).is_integer is True
assert ff(n, k + pi).is_integer is False
assert ff(n, m + pi).is_integer is False
assert ff(pi, m).is_integer is False
def test_factorial():
x = Symbol('x')
n = Symbol('n', integer=True)
k = Symbol('k', integer=True, positive=True)
r = Symbol('r', integer=False)
assert factorial(-2) == zoo
assert factorial(0) == 1
assert factorial(7) == 5040
assert factorial(n).func == factorial
assert factorial(2*n).func == factorial
assert factorial(x).is_integer is None
assert factorial(n).is_integer
assert factorial(r).is_integer is None
assert factorial(n).is_positive is None
assert factorial(k).is_positive
assert factorial(oo) == oo
def test_factorial_diff():
n = Symbol('n', integer=True)
assert factorial(n).diff(n) == \
gamma(1 + n)*polygamma(0, 1 + n)
assert factorial(n**2).diff(n) == \
2*n*gamma(1 + n**2)*polygamma(0, 1 + n**2)
def test_factorial_series():
n = Symbol('n', integer=True)
assert factorial(n).series(n, 0, 3) == \
1 - n*EulerGamma + n**2*(EulerGamma**2/2 + pi**2/12) + O(n**3)
def test_factorial_rewrite():
n = Symbol('n', integer=True)
assert factorial(n).rewrite(gamma) == gamma(n + 1)
def test_factorial2():
n = Symbol('n', integer=True)
assert factorial2(-1) == 1
assert factorial2(0) == 1
assert factorial2(7) == 105
assert factorial2(8) == 384
assert factorial2(n).func == factorial2
# The following is exhaustive
tt = Symbol('tt', integer=True, nonnegative=True)
tf = Symbol('tf', integer=True, nonnegative=False)
ft = Symbol('ft', integer=False, nonnegative=True)
ff = Symbol('ff', integer=False, nonnegative=False)
fn = Symbol('fn', integer=False)
nt = Symbol('nt', nonnegative=True)
nf = Symbol('nf', nonnegative=False)
nn = Symbol('nn')
assert factorial2(tt - 1).is_integer
assert factorial2(tf - 1).is_integer is False
assert factorial2(n).is_integer is None
assert factorial2(ft - 1).is_integer is False
assert factorial2(ff - 1).is_integer is False
assert factorial2(fn).is_integer is False
assert factorial2(nt - 1).is_integer is None
assert factorial2(nf - 1).is_integer is False
assert factorial2(nn).is_integer is None
assert factorial2(tt - 1).is_positive
assert factorial2(tf - 1).is_positive is False
assert factorial2(n).is_positive is None
assert factorial2(ft - 1).is_positive is False
assert factorial2(ff - 1).is_positive is False
assert factorial2(fn).is_positive is False
assert factorial2(nt - 1).is_positive is None
assert factorial2(nf - 1).is_positive is False
assert factorial2(nn).is_positive is None
def test_binomial():
n = Symbol('n', integer=True)
k = Symbol('k', integer=True)
u = Symbol('v', negative=True)
v = Symbol('m', positive=True)
assert binomial(0, 0) == 1
assert binomial(1, 1) == 1
assert binomial(10, 10) == 1
assert binomial(1, 2) == 0
assert binomial(1, -1) == 0
assert binomial(-1, 1) == -1
assert binomial(-10, 1) == -10
assert binomial(-10, 7) == -11440
assert binomial(n, -1) == 0
assert binomial(n, 0) == 1
assert expand_func(binomial(n, 1)) == n
assert expand_func(binomial(n, 2)) == n*(n - 1)/2
assert expand_func(binomial(n, n - 2)) == n*(n - 1)/2
assert expand_func(binomial(n, n - 1)) == n
assert binomial(n, 3).func == binomial
assert binomial(n, 3).expand(func=True) == n**3/6 - n**2/2 + n/3
assert expand_func(binomial(n, 3)) == n*(n - 2)*(n - 1)/6
assert binomial(n, n) == 1
assert binomial(n, n + 1) == 0
assert binomial(n, u) == 0
assert binomial(n, v).func == binomial
assert binomial(n, k).func == binomial
assert binomial(n, n + v) == 0
assert expand_func(binomial(n, n-3)) == n*(n - 2)*(n - 1)/6
assert binomial(n, k).is_integer
def test_binomial_diff():
n = Symbol('n', integer=True)
k = Symbol('k', integer=True)
assert binomial(n, k).diff(n) == \
(-polygamma(0, 1 + n - k) + polygamma(0, 1 + n))*binomial(n, k)
assert binomial(n**2, k**3).diff(n) == \
2*n*(-polygamma(
0, 1 + n**2 - k**3) + polygamma(0, 1 + n**2))*binomial(n**2, k**3)
assert binomial(n, k).diff(k) == \
(-polygamma(0, 1 + k) + polygamma(0, 1 + n - k))*binomial(n, k)
assert binomial(n**2, k**3).diff(k) == \
3*k**2*(-polygamma(
0, 1 + k**3) + polygamma(0, 1 + n**2 - k**3))*binomial(n**2, k**3)
def test_binomial_rewrite():
n = Symbol('n', integer=True)
k = Symbol('k', integer=True)
assert binomial(n, k).rewrite(
factorial) == factorial(n)/(factorial(k)*factorial(n - k))
assert binomial(
n, k).rewrite(gamma) == gamma(n + 1)/(gamma(k + 1)*gamma(n - k + 1))
@XFAIL
def test_factorial_simplify_fail():
# simplify(factorial(x + 1).diff(x) - ((x + 1)*factorial(x)).diff(x))) == 0
from sympy.abc import x
assert simplify(x*polygamma(0, x + 1) - x*polygamma(0, x + 2) +
polygamma(0, x + 1) - polygamma(0, x + 2) + 1) == 0
def test_subfactorial():
assert all(subfactorial(i) == ans for i, ans in enumerate(
[1, 0, 1, 2, 9, 44, 265, 1854, 14833, 133496]))
raises(ValueError, lambda: subfactorial(0.1))
raises(ValueError, lambda: subfactorial(-2))
tt = Symbol('tt', integer=True, nonnegative=True)
tf = Symbol('tf', integer=True, nonnegative=False)
tn = Symbol('tf', integer=True)
ft = Symbol('ft', integer=False, nonnegative=True)
ff = Symbol('ff', integer=False, nonnegative=False)
fn = Symbol('ff', integer=False)
nt = Symbol('nt', nonnegative=True)
nf = Symbol('nf', nonnegative=False)
nn = Symbol('nf')
assert subfactorial(tt).is_integer
assert subfactorial(tf).is_integer is False
assert subfactorial(tn).is_integer is None
assert subfactorial(ft).is_integer is False
assert subfactorial(ff).is_integer is False
assert subfactorial(fn).is_integer is False
assert subfactorial(nt).is_integer is None
assert subfactorial(nf).is_integer is False
assert subfactorial(nn).is_integer is None
| bsd-3-clause | 5,140,877,816,998,110,000 | 30.954198 | 79 | 0.588629 | false |
janmtl/drift_qec | drift_qec/A_old.py | 1 | 4165 | """
Exposes the 5 parameter unital channel.
"""
import numpy as np
import scipy as sp
from scipy.linalg import polar
PDIAG = np.zeros((9, 9))
for esi in np.eye(3):
one = np.kron(esi, esi)
PDIAG = PDIAG + np.outer(one, one)
PDIAG = PDIAG.astype(np.int)
FIXEDQ = np.array([[-0.1911, 0.3136, -0.9301],
[-0.8547, 0.4128, 0.3148],
[ 0.4826, 0.8551, 0.1891]])
def o(Q, D):
return np.dot(np.dot(Q, D), Q.T)
def Ls(d1=0.1, d2=0.1, d3=0.1):
L1 = np.array([[np.cos(d1), -np.sin(d1), 0],
[np.sin(d1), np.cos(d1), 0],
[0, 0, 1]])
L2 = np.array([[np.cos(d2), 0, -np.sin(d2)],
[0, 1, 0],
[np.sin(d2), 0, np.cos(d2)]])
L3 = np.array([[1, 0, 0],
[0, np.cos(d3), -np.sin(d3)],
[0, np.sin(d3), np.cos(d3)]])
return L1, L2, L3
def SENSOR(d1=0.1, d2=0.1, d3=0.1):
L1, L2, L3 = Ls(d1, d2, d3)
LL1 = np.dot(PDIAG, np.kron(L1, L1))
LL2 = np.dot(PDIAG, np.kron(L2, L2))
LL3 = np.dot(PDIAG, np.kron(L3, L3))
SENSOR = np.r_[LL1[[0, 4, 8], :], LL2[[0, 4, 8], :], LL3[[0, 4, 8], :]]
return SENSOR
class Channel(object):
def __init__(self, kx, ky, kz, **kwargs):
# Ground truth variables
self.kx, self.ky, self.kz = kx, ky, kz
self.n = kwargs.get("n", 1e6)
self.Q = kwargs.get("Q", np.eye(3))
self.C = np.dot(np.dot(self.Q,
np.diag([self.kx, self.ky, self.kz])),
self.Q.T)
self.Q = np.linalg.svd(self.C)[0]
# Sensor parameters
self.d1 = kwargs.get("d1", 0.01)
self.d2 = kwargs.get("d2", 0.01)
self.d3 = kwargs.get("d3", 0.01)
# Estimators
self.at = np.zeros(9)
self.Vt = np.zeros((9, 9))
self.Qc = np.linalg.qr(np.random.randn(3, 3))[0]
self.M = np.zeros((3, 3))
self.cycle = 1
def sample_data(self):
QcQc = np.kron(self.Qc, self.Qc)
cvec = np.dot(QcQc, np.reshape(self.C, (9,)))
rates = np.dot(SENSOR(self.d1, self.d2, self.d3), cvec)
# Get samples for each L_i
D1 = np.random.multinomial(self.n, rates[0:3]) / float(self.n)
D2 = np.random.multinomial(self.n, rates[3:6]) / float(self.n)
D3 = np.random.multinomial(self.n, rates[6:9]) / float(self.n)
data = np.r_[D1, D2, D3]
return data
def update(self):
# Get new data at this effective orientation
x = self.sample_data()
# Recover the vectorized process matrix and its covariance through a
# linear inversion
a, Sa = self.recover_a(x)
# Update the running mean of the covariance matrix and of the linear
# inversion channel estimate
self.Vt = self.Vt + np.linalg.pinv(Sa)
self.at = np.dot(np.linalg.pinv(self.Vt),
self.at + np.dot(np.linalg.pinv(Sa), a))
# Recover the physical process matrix from the linear inversion
A = np.reshape(self.at, (3, 3))
self.M = self.recoverM(A)
# Get the estimated channel Pauli-basis
self.Qc = np.linalg.svd(self.M)[0]
# Update the process matrices
self.cycle = self.cycle + 1
def recover_a(self, x):
# Initiate the sensor and basis matrices
L = SENSOR(self.d1, self.d2, self.d3)
Linv = np.linalg.pinv(L)
QcQc = np.kron(self.Qc, self.Qc)
# Calculate the data covariance
Sx = sp.linalg.block_diag(
1.0 / self.n * np.outer(x[0:3], x[0:3]),
1.0 / self.n * np.outer(x[3:6], x[3:6]),
1.0 / self.n * np.outer(x[6:9], x[6:9])
)
Sx[np.diag_indices(9)] = 1.0 / self.n * x * (1.0 - x)
# Perform the linear inversion and transform to the standard basis
ac = np.dot(Linv, x)
Sac = o(Linv, Sx)
a = np.dot(QcQc.T, ac)
Sa = o(QcQc.T, Sac)
return a, Sa
@staticmethod
def recoverM(A):
B = 0.5 * (A + A.T)
H = polar(B)[1]
M = 0.5 * (B+H)
M = M / np.trace(M)
return M
| isc | 7,062,491,971,050,405,000 | 29.625 | 76 | 0.507803 | false |
IECS/MansOS | tools/IDE/src/upload_module.py | 1 | 5090 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2012 the MansOS team. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import wx
from newMote import NewMote
from motelist import Motelist
from Translater import localize
class UploadModule(wx.Panel):
def __init__(self, parent, API):
super(UploadModule, self).__init__(parent = parent)
self.API = API
self.editorManager = self.API.tabManager.GetCurrentPage()
self.filename = self.editorManager.fileName
Motelist.addUpdateCallback(self.updateMotelist)
self.tmpDir = self.API.path + '/temp/'
self.haveMote = False
self.platform = "telosb"
self.moteOrder = list()
# this is path from /mansos/tools/IDE
self.pathToMansos = self.API.path + "/../.."
self.motes = []
self.main = wx.BoxSizer(wx.VERTICAL)
self.controls = wx.GridBagSizer(10, 10)
#self.source = wx.ComboBox(self, choices = ["USB", "Shell"])
#self.source.SetValue("USB")
self.upload = wx.Button(self, label = localize("Upload"))
self.platforms = wx.ComboBox(self, choices = self.API.getPlatforms())
self.refresh = wx.Button(self, label = localize("Refresh"))
self.compile = wx.Button(self, label = localize("Compile"))
self.newMote = wx.Button(self, label = localize("Add mote"))
self.platforms.SetValue(self.API.getActivePlatform())
if self.API.platformOnly != None:
self.platforms.Enable(False)
self.controls.Add(self.compile, (0, 0), flag = wx.EXPAND | wx.ALL)
self.controls.Add(self.platforms, (0, 1), flag = wx.EXPAND | wx.ALL)
self.controls.Add(self.upload, (0, 2), span = (2, 2),
flag = wx.EXPAND | wx.ALL)
#self.controls.Add(self.source, (1, 1), flag = wx.EXPAND | wx.ALL)
self.controls.Add(self.newMote, (1, 1), flag = wx.EXPAND | wx.ALL)
self.controls.Add(self.refresh, (1, 0), flag = wx.EXPAND | wx.ALL)
self.list = wx.CheckListBox(self, wx.ID_ANY, style = wx.MULTIPLE)
self.main.Add(self.controls, 0, wx.EXPAND | wx.ALL, 3);
self.main.Add(self.list, 0, wx.EXPAND | wx.ALL, 3);
self.Bind(wx.EVT_BUTTON, self.API.doCompile, self.compile)
self.Bind(wx.EVT_BUTTON, self.API.doUpload, self.upload)
self.Bind(wx.EVT_BUTTON, self.updateMotelist, self.refresh)
#self.Bind(wx.EVT_COMBOBOX, self.populateMotelist, self.source)
self.Bind(wx.EVT_BUTTON, self.openNewMoteDialog, self.newMote)
self.Bind(wx.EVT_COMBOBOX, self.API.changePlatform, self.platforms)
self.Bind(wx.EVT_CHECKLISTBOX, self.modifyTargets, self.list)
self.SetSizerAndFit(self.main)
self.SetAutoLayout(1)
self.Show()
self.updateMotelist()
def __del__(self):
Motelist.removeUpdateCallback(self.updateMotelist)
def updateMotelist(self, event = None):
old = self.list.GetCheckedStrings()
pos = 0
self.list.Clear()
for mote in Motelist.getMotelist(False):
self.list.Enable(True)
self.list.Insert(mote.getNiceName(), pos)
if mote.getNiceName() in old:
self.list.Check(pos)
pos += 1
if self.list.GetCount() == 0:
self.list.Enable(False)
self.list.Insert(localize("No devices found!"), 0)
def modifyTargets(self, event):
temp = list()
for target in self.list.GetCheckedStrings():
if target.count("(") != 0:
temp.append(target.split("(")[1].split(")")[0])
self.API.targets = temp
def openNewMoteDialog(self, event):
dialog = NewMote(self, self.API)
dialog.ShowModal()
dialog.Destroy()
self.updateMotelist()
| mit | 1,440,115,819,273,884,700 | 39.07874 | 77 | 0.654813 | false |
ajing/clusterVis | LigandsPlot.py | 1 | 2385 | """
Display a list of ligand structures in file
"""
import os
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
from TreeParser import *
from NodeReference import *
IMAGE_DIR = "./Image"
def ReturnFileDir(ligandname):
return os.path.join(IMAGE_DIR, ligandname)
def IndexOfLigands():
infile = "tmp"
liganddict = dict()
for line in open(infile):
content = line.strip().split(" : ")
liganddict[content[1]] = content[0]
print liganddict
return liganddict
def PlotLigandStructures(ligands, nodesize):
N = len(ligands)
col_num = 3
row_num = N/col_num + 1
liganddict = LigandDict()
plt.figure(figsize = (40,40))
## This is for getting numbering mapping for ligand name, can be deleted later
index_dict = IndexOfLigands()
########################
for i in range(N):
plt.subplot(row_num, col_num, i + 1)
a_ligand = ligands[i]
proteinname = liganddict.GetProteinName(a_ligand)
liganddir = ReturnFileDir(a_ligand)
img=mpimg.imread(liganddir)
imgplot = plt.imshow(img)
plt.title(a_ligand + "," + index_dict[a_ligand] + "\n" + proteinname + "," + str(nodesize[i]), fontsize=35)
plt.axis('off')
plt.savefig( "./Data/" + str(ligands[0]) + ".pdf", format = 'pdf')
plt.show()
if __name__ == "__main__":
IndexOfLigands()
tree_file = "./Data/all_0.9.gv"
#ligandname = "ASD01911150" # 40
#ligandname = "ASD01910794" # 47
#ligandname = "ASD01910452" # 14
#ligandname = "CHEMBL106917" # 60
#ligandname = "ASD03540047" # 32
ligandname = "CHEMBL347077" # 0
#ligandname = "CHEMBL566469" # 29
#ligandname = "ASD01150884" # 43
#ligandname = "ASD02900007" # 49 this ligand branch is kind of mixture of everything
#ligandname = "ASD01410309" # 5
#ligandname = "ASD03720006" # 42 mixed with different receptors
#ligandname = "ASD01410309" # 42
#ligandname = "ASD00170564" # 54
#ligandname = "ASD01150113" # 21
#ligandname = "ASD01120069" # 4
#ligandname = "ASD01120153" # 59
#ligandname = "ASD03910042" # 26
#ligandname = "CHEMBL596211" # 16
#ligandname = "ASD03090737" # 37
ligandlist, node_size = GetBranchLargeCluster(ligandname, tree_file)
PlotLigandStructures(ligandlist, node_size)
print ligandlist
print node_size
| apache-2.0 | 2,718,396,933,683,163,000 | 32.591549 | 115 | 0.630608 | false |
CiscoSystems/nova-solver-scheduler | nova/tests/scheduler/test_solver_scheduler_host_manager.py | 1 | 7720 | # Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For SolverSchedulerHostManager
"""
from nova.openstack.common import timeutils
from nova.scheduler import solver_scheduler_host_manager as host_manager
from nova import test
from nova.tests.scheduler import solver_scheduler_fakes as fakes
class SolverSchedulerHostManagerTestCase(test.NoDBTestCase):
"""Test case for HostManager class."""
def setUp(self):
super(SolverSchedulerHostManagerTestCase, self).setUp()
self.host_manager = host_manager.SolverSchedulerHostManager()
self.fake_hosts = [host_manager.SolverSchedulerHostState(
'fake_host%s' % x, 'fake-node') for x in xrange(1, 5)]
self.fake_hosts += [host_manager.SolverSchedulerHostState(
'fake_multihost', 'fake-node%s' % x) for x in xrange(1, 5)]
self.addCleanup(timeutils.clear_time_override)
def _verify_result(self, info, result):
self.assertEqual(set(info['expected_objs']), set(result))
def test_get_hosts_with_ignore(self):
fake_properties = {'ignore_hosts': ['fake_host1', 'fake_host3',
'fake_host5', 'fake_multihost']}
# [1] and [3] are host2 and host4
info = {'expected_objs': [self.fake_hosts[1], self.fake_hosts[3]],
'expected_fprops': fake_properties}
result = self.host_manager.get_hosts_stripping_ignored_and_forced(
self.fake_hosts, fake_properties)
self._verify_result(info, result)
def test_get_hosts_with_force(self):
fake_properties = {'force_hosts': ['fake_host1', 'fake_host3',
'fake_host5']}
# [0] and [2] are host1 and host3
info = {'expected_objs': [self.fake_hosts[0], self.fake_hosts[2]],
'expected_fprops': fake_properties,
'got_fprops': []}
result = self.host_manager.get_hosts_stripping_ignored_and_forced(
self.fake_hosts, fake_properties)
self._verify_result(info, result)
def test_get_hosts_with_no_matching_force_hosts(self):
fake_properties = {'force_hosts': ['fake_host5', 'fake_host6']}
info = {'expected_objs': [],
'expected_fprops': fake_properties,
'got_fprops': []}
result = self.host_manager.get_hosts_stripping_ignored_and_forced(
self.fake_hosts, fake_properties)
self._verify_result(info, result)
def test_get_hosts_with_ignore_and_force_hosts(self):
# Ensure ignore_hosts processed before force_hosts in host filters.
fake_properties = {'force_hosts': ['fake_host3', 'fake_host1'],
'ignore_hosts': ['fake_host1']}
# only fake_host3 should be left.
info = {'expected_objs': [self.fake_hosts[2]],
'expected_fprops': fake_properties,
'got_fprops': []}
result = self.host_manager.get_hosts_stripping_ignored_and_forced(
self.fake_hosts, fake_properties)
self._verify_result(info, result)
def test_get_hosts_with_force_host_and_many_nodes(self):
# Ensure all nodes returned for a host with many nodes
fake_properties = {'force_hosts': ['fake_multihost']}
info = {'expected_objs': [self.fake_hosts[4], self.fake_hosts[5],
self.fake_hosts[6], self.fake_hosts[7]],
'expected_fprops': fake_properties,
'got_fprops': []}
result = self.host_manager.get_hosts_stripping_ignored_and_forced(
self.fake_hosts, fake_properties)
self._verify_result(info, result)
def test_get_hosts_with_force_nodes(self):
fake_properties = {'force_nodes': ['fake-node2', 'fake-node4',
'fake-node9']}
# [5] is fake-node2, [7] is fake-node4
info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
'expected_fprops': fake_properties,
'got_fprops': []}
result = self.host_manager.get_hosts_stripping_ignored_and_forced(
self.fake_hosts, fake_properties)
self._verify_result(info, result)
def test_get_hosts_with_force_hosts_and_nodes(self):
# Ensure only overlapping results if both force host and node
fake_properties = {'force_hosts': ['fake_host1', 'fake_multihost'],
'force_nodes': ['fake-node2', 'fake-node9']}
# [5] is fake-node2
info = {'expected_objs': [self.fake_hosts[5]],
'expected_fprops': fake_properties,
'got_fprops': []}
result = self.host_manager.get_hosts_stripping_ignored_and_forced(
self.fake_hosts, fake_properties)
self._verify_result(info, result)
def test_get_hosts_with_force_hosts_and_wrong_nodes(self):
# Ensure non-overlapping force_node and force_host yield no result
fake_properties = {'force_hosts': ['fake_multihost'],
'force_nodes': ['fake-node']}
info = {'expected_objs': [],
'expected_fprops': fake_properties,
'got_fprops': []}
result = self.host_manager.get_hosts_stripping_ignored_and_forced(
self.fake_hosts, fake_properties)
self._verify_result(info, result)
def test_get_hosts_with_ignore_hosts_and_force_nodes(self):
# Ensure ignore_hosts can coexist with force_nodes
fake_properties = {'force_nodes': ['fake-node4', 'fake-node2'],
'ignore_hosts': ['fake_host1', 'fake_host2']}
info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
'expected_fprops': fake_properties,
'got_fprops': []}
result = self.host_manager.get_hosts_stripping_ignored_and_forced(
self.fake_hosts, fake_properties)
self._verify_result(info, result)
def test_get_hosts_with_ignore_hosts_and_force_same_nodes(self):
# Ensure ignore_hosts is processed before force_nodes
fake_properties = {'force_nodes': ['fake_node4', 'fake_node2'],
'ignore_hosts': ['fake_multihost']}
info = {'expected_objs': [],
'expected_fprops': fake_properties,
'got_fprops': []}
result = self.host_manager.get_hosts_stripping_ignored_and_forced(
self.fake_hosts, fake_properties)
self._verify_result(info, result)
class SolverSchedulerHostManagerChangedNodesTestCase(test.NoDBTestCase):
"""Test case for HostManager class."""
# reserved for future uses
pass
class SolverSchedulerHostStateTestCase(test.NoDBTestCase):
"""Test case for SolverSchedulerHostState class."""
# reserved for future uses
pass
| apache-2.0 | -4,972,781,149,865,470,000 | 41.888889 | 78 | 0.587565 | false |
waaaaargh/katzenblog | katzenblog/model.py | 1 | 2234 | from datetime import datetime
from werkzeug.security import generate_password_hash, check_password_hash
from katzenblog import db
from katzenblog.util import slugify
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String)
passwordhash = db.Column(db.String)
email = db.Column(db.String)
screenname = db.Column(db.String)
bio = db.Column(db.String)
def __init__(self, username, email, password, screenname, bio):
self.username = username
self.email = email
self.screenname = screenname
self.bio = bio
self.passwordhash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.passwordhash, password)
def set_password(self, password):
self.passwordhash = generate_password_hash(password)
class Category(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String)
def __init__(self, name):
self.name = name
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String)
text = db.Column(db.String)
published = db.Column(db.Boolean)
slug = db.Column(db.String)
create_time = db.Column(db.DateTime)
last_edit_time = db.Column(db.DateTime)
owner = db.relationship('User', backref=db.backref('posts',
lazy='dynamic'))
owner_id = db.Column(db.Integer, db.ForeignKey('user.id'))
category = db.relationship('Category', backref=db.backref('posts',
lazy='dynamic'))
category_id = db.Column(db.Integer, db.ForeignKey('category.id'))
def __init__(self, title, text, owner):
self.title = title
self.text = text
self.owner = owner
self.slug = slugify(title)
self.create_time = datetime.now()
self.last_edit_time = datetime.now()
def edit(self, title, text):
self.title = title
self.text = text
self.slug = slugify(title)
self.last_edit_time = datetime.now()
| gpl-3.0 | 453,065,405,940,258,600 | 30.464789 | 78 | 0.601164 | false |
Kwentar/ImageDownloader | Internet.py | 1 | 4529 | import os
import shutil
from threading import Thread
import urllib
import requests
from tqdm import tqdm
class Internet:
@staticmethod
def write_to_failed_image_urls_file(file_name, image_url, failed_image_urls_file):
"""
Check image in file and write it if need
:param file_name: image file name
:param image_url: image URL
:param failed_image_urls_file: name of file with fails
:return: None
"""
with open(failed_image_urls_file, 'a+') as need_reload:
need_reload.seek(0)
lines = need_reload.readlines()
founded = False
for line in lines:
if line.startswith(image_url):
print('File is here')
founded = True
break
if not founded:
need_reload.write(image_url + "," + file_name + '\n')
@staticmethod
def write_response_to_file(response, file_name):
with open(file_name, 'wb') as f:
for chunk in response.iter_content(chunk_size=2048):
f.write(chunk)
@staticmethod
def load_image_chunk(image_url, file_name, dir_):
"""
Loading image by URL
:param image_url: URL of image
:param file_name: destination file name
:param dir_: destination directory
:return: None
"""
r = requests.get(image_url, stream=True)
if r.status_code == requests.codes.ok:
try:
Internet.write_response_to_file(r, file_name)
except OSError as err_:
print(err_.__str__(), 'try redownload...')
index = 0
while True:
file_name = os.path.join(dir_, index.__str__() + '.jpg')
if not os.path.exists(file_name):
break
index += 1
Internet.write_response_to_file(r, file_name)
else:
print(r)
@staticmethod
def load_image2(image, file_name, need_reload_file):
r = requests.get(image, stream=True)
if r.status_code == 200:
with open(file_name, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
else:
print(r)
@staticmethod
def load_image(image, file_name, need_reload_file):
try:
if os.path.exists(file_name):
print("file ", file_name, " exist now")
else:
urllib.request.urlretrieve(image, file_name)
print("".join(['downloaded ', image]))
except urllib.error.ContentTooShortError as err_:
print("".join(['ERROR ', err_.__str__()]))
if need_reload_file is not None:
Internet.write_to_failed_image_urls_file(file_name, image, need_reload_file)
except urllib.error.URLError as err_:
print("".join(['ERROR ', err_.__str__()]))
if need_reload_file is not None:
Internet.write_to_failed_image_urls_file(file_name, image, need_reload_file)
@staticmethod
def load_images(image_url_list, dir_, failed_image_urls_file, number, delay=5):
"""
Loading list of images
:param number: current number of user from all amount of users
:param image_url_list: list of image urls
:param dir_: destination dir
:param failed_image_urls_file: name of file with unsuccessful urls
:param delay: delay for thread
:return:None
"""
abs_failed_image_urls_file = os.path.join(dir_, failed_image_urls_file)
if not os.path.exists(abs_failed_image_urls_file):
with open(abs_failed_image_urls_file, 'w') as _:
pass
for index, image in tqdm(enumerate(image_url_list), total=len(image_url_list), desc=str(number)):
f = os.path.join(dir_, image.split('/')[-1])
if os.path.exists(f):
print("file ", f, " exist now")
else:
# print('downloading {}: {}...'.format(index, f))
t = Thread(target=Internet.load_image_chunk, args=(image, f, dir_))
t.start()
t.join(delay)
if t.isAlive():
print('Bad, bad thread!')
if abs_failed_image_urls_file is not None:
Internet.write_to_failed_image_urls_file(f, image, abs_failed_image_urls_file)
| mit | -2,301,983,070,617,782,500 | 38.043103 | 105 | 0.539633 | false |
lmazuel/azure-sdk-for-python | azure-servicefabric/azure/servicefabric/models/safety_check_wrapper.py | 1 | 1297 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SafetyCheckWrapper(Model):
"""A wrapper for the safety check object. Safety checks are performed by
service fabric before continuing with the operations. These checks ensure
the availability of the service and the reliability of the state.
:param safety_check: Represents a safety check performed by service fabric
before continuing with the operations. These checks ensure the
availability of the service and the reliability of the state.
:type safety_check: ~azure.servicefabric.models.SafetyCheck
"""
_attribute_map = {
'safety_check': {'key': 'SafetyCheck', 'type': 'SafetyCheck'},
}
def __init__(self, safety_check=None):
super(SafetyCheckWrapper, self).__init__()
self.safety_check = safety_check
| mit | 7,484,912,101,823,494,000 | 39.53125 | 78 | 0.642251 | false |
annahs/atmos_research | util_migrate_sqlite_table_to_mysql.py | 1 | 2193 | import sys
import os
import numpy as np
import sqlite3
import mysql.connector
#connect to sqlite database
conn = sqlite3.connect('C:/projects/dbs/SP2_data.db')
c = conn.cursor()
#connect to mysql database
cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon')
cursor = cnx.cursor()
add_stats = ('INSERT INTO polar6_coating_2015'
'(sp2b_file,file_index,instrument,instrument_locn,particle_type,particle_dia,UNIX_UTC_ts,actual_scat_amp,actual_peak_posn,actual_zero_x_posn,FF_scat_amp,FF_peak_posn,FF_gauss_width,incand_amp,LF_scat_amp,LF_baseline_pct_diff,rBC_mass_fg,coat_thickness_nm)'
'VALUES (%(sp2b_file)s,%(file_index)s,%(instr)s,%(instr_locn)s,%(particle_type)s,%(particle_dia)s,%(unix_ts_utc)s,%(actual_scat_amp)s,%(actual_peak_pos)s,%(zero_crossing_posn)s,%(FF_scat_amp)s,%(FF_peak_pos)s,%(FF_gauss_width)s,%(incand_amp)s,%(LF_scat_amp)s,%(LF_baseline_pct_diff)s,%(rBC_mass_fg)s,%(coat_thickness_nm)s)')
errs =0
instrument = 'UBCSP2'
instrument_locn = 'POLAR6'
for row in c.execute('''SELECT
sp2b_file,
file_index,
instr,
instr_locn,
particle_type,
particle_dia,
unix_ts_utc,
actual_scat_amp,
actual_peak_pos,
zero_crossing_posn,
FF_scat_amp,
FF_peak_pos,
FF_gauss_width,
incand_amp,
LF_scat_amp,
LF_baseline_pct_diff,
rBC_mass_fg,
coat_thickness_nm
FROM SP2_coating_analysis WHERE instr = ? and instr_locn=?
ORDER BY unix_ts_utc''',
(instrument, instrument_locn)):
stats = {
'sp2b_file': row[0],
'file_index': row[1],
'instr' : row[2],
'instr_locn': row[3],
'particle_type': row[4],
'particle_dia': row[5],
'unix_ts_utc': row[6],
'actual_scat_amp': row[7],
'actual_peak_pos': row[8],
'zero_crossing_posn': row[9],
'FF_scat_amp': row[10],
'FF_peak_pos': row[11],
'FF_gauss_width': row[12],
'incand_amp': row[13],
'LF_scat_amp': row[14],
'LF_baseline_pct_diff': row[15],
'rBC_mass_fg': row[16],
'coat_thickness_nm': row[17],
}
try:
cursor.execute(add_stats, stats)
except mysql.connector.Error as err:
print("Something went wrong: {}".format(err))
errs += 1
cnx.commit()
print 'errors', errs
conn.close()
cnx.close() | mit | 8,911,286,735,066,189,000 | 25.433735 | 338 | 0.670315 | false |
yeraydiazdiaz/nonrel-blog | django_mongodb_engine/base.py | 1 | 8661 | import copy
import datetime
import decimal
import sys
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.signals import connection_created
from django.db.utils import DatabaseError
from pymongo.collection import Collection
from pymongo.connection import Connection
# handle pymongo backward compatibility
try:
from bson.objectid import ObjectId
from bson.errors import InvalidId
except ImportError:
from pymongo.objectid import ObjectId, InvalidId
from djangotoolbox.db.base import (
NonrelDatabaseClient,
NonrelDatabaseFeatures,
NonrelDatabaseIntrospection,
NonrelDatabaseOperations,
NonrelDatabaseValidation,
NonrelDatabaseWrapper
)
from djangotoolbox.db.utils import decimal_to_string
from .creation import DatabaseCreation
from .utils import CollectionDebugWrapper
class DatabaseFeatures(NonrelDatabaseFeatures):
supports_microsecond_precision = False
supports_long_model_names = False
class DatabaseOperations(NonrelDatabaseOperations):
compiler_module = __name__.rsplit('.', 1)[0] + '.compiler'
def max_name_length(self):
return 254
def check_aggregate_support(self, aggregate):
import aggregations
try:
getattr(aggregations, aggregate.__class__.__name__)
except AttributeError:
raise NotImplementedError("django-mongodb-engine doesn't support "
"%r aggregates." % type(aggregate))
def sql_flush(self, style, tables, sequence_list, allow_cascade=False):
"""
Returns a list of SQL statements that have to be executed to
drop all `tables`. No SQL in MongoDB, so just clear all tables
here and return an empty list.
"""
for table in tables:
if table.startswith('system.'):
# Do not try to drop system collections.
continue
self.connection.database[table].remove()
return []
def validate_autopk_value(self, value):
"""
Mongo uses ObjectId-based AutoFields.
"""
if value is None:
return None
return unicode(value)
def _value_for_db(self, value, field, field_kind, db_type, lookup):
"""
Allows parent to handle nonrel fields, convert AutoField
keys to ObjectIds and date and times to datetimes.
Let everything else pass to PyMongo -- when the value is used
the driver will raise an exception if it got anything
unacceptable.
"""
if value is None:
return None
# Parent can handle iterable fields and Django wrappers.
value = super(DatabaseOperations, self)._value_for_db(
value, field, field_kind, db_type, lookup)
# Convert decimals to strings preserving order.
if field_kind == 'DecimalField':
value = decimal_to_string(
value, field.max_digits, field.decimal_places)
# Anything with the "key" db_type is converted to an ObjectId.
if db_type == 'key':
try:
return ObjectId(value)
# Provide a better message for invalid IDs.
except (TypeError, InvalidId):
if isinstance(value, (str, unicode)) and len(value) > 13:
value = value[:10] + '...'
msg = "AutoField (default primary key) values must be " \
"strings representing an ObjectId on MongoDB (got " \
"%r instead)." % value
if field.model._meta.db_table == 'django_site':
# Also provide some useful tips for (very common) issues
# with settings.SITE_ID.
msg += " Please make sure your SITE_ID contains a " \
"valid ObjectId string."
raise DatabaseError(msg)
# PyMongo can only process datatimes?
elif db_type == 'date':
return datetime.datetime(value.year, value.month, value.day)
elif db_type == 'time':
return datetime.datetime(1, 1, 1, value.hour, value.minute,
value.second, value.microsecond)
return value
def _value_from_db(self, value, field, field_kind, db_type):
"""
Deconverts keys, dates and times (also in collections).
"""
# It is *crucial* that this is written as a direct check --
# when value is an instance of serializer.LazyModelInstance
# calling its __eq__ method does a database query.
if value is None:
return None
# All keys have been turned into ObjectIds.
if db_type == 'key':
value = unicode(value)
# We've converted dates and times to datetimes.
elif db_type == 'date':
value = datetime.date(value.year, value.month, value.day)
elif db_type == 'time':
value = datetime.time(value.hour, value.minute, value.second,
value.microsecond)
# Revert the decimal-to-string encoding.
if field_kind == 'DecimalField':
value = decimal.Decimal(value)
return super(DatabaseOperations, self)._value_from_db(
value, field, field_kind, db_type)
class DatabaseClient(NonrelDatabaseClient):
pass
class DatabaseValidation(NonrelDatabaseValidation):
pass
class DatabaseIntrospection(NonrelDatabaseIntrospection):
def table_names(self, cursor=None):
return self.connection.database.collection_names()
def sequence_list(self):
# Only required for backends that use integer primary keys.
pass
class DatabaseWrapper(NonrelDatabaseWrapper):
"""
Public API: connection, database, get_collection.
"""
def __init__(self, *args, **kwargs):
self.collection_class = kwargs.pop('collection_class', Collection)
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = DatabaseValidation(self)
self.connected = False
del self.connection
def get_collection(self, name, **kwargs):
if (kwargs.pop('existing', False) and
name not in self.connection.database.collection_names()):
return None
collection = self.collection_class(self.database, name, **kwargs)
if settings.DEBUG:
collection = CollectionDebugWrapper(collection, self.alias)
return collection
def __getattr__(self, attr):
if attr in ['connection', 'database']:
assert not self.connected
self._connect()
return getattr(self, attr)
raise AttributeError(attr)
def _connect(self):
settings = copy.deepcopy(self.settings_dict)
def pop(name, default=None):
return settings.pop(name) or default
db_name = pop('NAME')
host = pop('HOST')
port = pop('PORT')
user = pop('USER')
password = pop('PASSWORD')
options = pop('OPTIONS', {})
self.operation_flags = options.pop('OPERATIONS', {})
if not any(k in ['save', 'delete', 'update']
for k in self.operation_flags):
# Flags apply to all operations.
flags = self.operation_flags
self.operation_flags = {'save': flags, 'delete': flags,
'update': flags}
# Lower-case all OPTIONS keys.
for key in options.iterkeys():
options[key.lower()] = options.pop(key)
try:
self.connection = Connection(host=host, port=port, **options)
self.database = self.connection[db_name]
except TypeError:
exc_info = sys.exc_info()
raise ImproperlyConfigured, exc_info[1], exc_info[2]
if user and password:
if not self.database.authenticate(user, password):
raise ImproperlyConfigured("Invalid username or password.")
self.connected = True
connection_created.send(sender=self.__class__, connection=self)
def _reconnect(self):
if self.connected:
del self.connection
del self.database
self.connected = False
self._connect()
def _commit(self):
pass
def _rollback(self):
pass
def close(self):
pass
| bsd-3-clause | -4,339,837,880,499,807,000 | 32.700389 | 78 | 0.608013 | false |
seppi91/CouchPotatoServer | couchpotato/core/media/movie/providers/info/themoviedb.py | 1 | 11383 | import random
import traceback
import itertools
from base64 import b64decode as bd
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import toUnicode, ss, tryUrlencode
from couchpotato.core.helpers.variable import tryInt, splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.base import MovieProvider
from couchpotato.environment import Env
log = CPLog(__name__)
autoload = 'TheMovieDb'
class TheMovieDb(MovieProvider):
http_time_between_calls = .35
configuration = {
'images': {
'secure_base_url': 'https://image.tmdb.org/t/p/',
},
}
ak = ['ZjdmNTE3NzU4NzdlMGJiNjcwMzUyMDk1MmIzYzc4NDA=', 'ZTIyNGZlNGYzZmVjNWY3YjU1NzA2NDFmN2NkM2RmM2E=',
'YTNkYzExMWU2NjEwNWY2Mzg3ZTk5MzkzODEzYWU0ZDU=', 'ZjZiZDY4N2ZmYTYzY2QyODJiNmZmMmM2ODc3ZjI2Njk=']
languages = [ 'en' ]
default_language = 'en'
def __init__(self):
addEvent('info.search', self.search, priority = 1)
addEvent('movie.search', self.search, priority = 1)
addEvent('movie.info', self.getInfo, priority = 1)
addEvent('movie.info_by_tmdb', self.getInfo)
addEvent('app.load', self.config)
def config(self):
# Reset invalid key
if self.conf('api_key') == '9b939aee0aaafc12a65bf448e4af9543':
self.conf('api_key', '')
languages = self.getLanguages()
# languages should never be empty, the first language is the default language used for all the description details
self.default_language = languages[0]
# en is always downloaded and it is the fallback
if 'en' in languages:
languages.remove('en')
# default language has a special management
languages.remove(self.default_language)
self.languages = languages
configuration = self.request('configuration')
if configuration:
self.configuration = configuration
def search(self, q, limit = 3):
""" Find movie by name """
if self.isDisabled():
return False
log.debug('Searching for movie: %s', q)
raw = None
try:
name_year = fireEvent('scanner.name_year', q, single = True)
raw = self.request('search/movie', {
'query': name_year.get('name', q),
'year': name_year.get('year'),
'search_type': 'ngram' if limit > 1 else 'phrase'
}, return_key = 'results')
except:
log.error('Failed searching TMDB for "%s": %s', (q, traceback.format_exc()))
results = []
if raw:
try:
nr = 0
for movie in raw:
parsed_movie = self.parseMovie(movie, extended = False)
if parsed_movie:
results.append(parsed_movie)
nr += 1
if nr == limit:
break
log.info('Found: %s', [result['titles'][0] + ' (' + str(result.get('year', 0)) + ')' for result in results])
return results
except SyntaxError as e:
log.error('Failed to parse XML response: %s', e)
return False
return results
def getInfo(self, identifier = None, extended = True, **kwargs):
if not identifier:
return {}
result = self.parseMovie({
'id': identifier
}, extended = extended)
return result or {}
def parseMovie(self, movie, extended = True):
# Do request, append other items
movie = self.request('movie/%s' % movie.get('id'), {
'language': self.conf('preferred_language').upper(),
'append_to_response': 'alternative_titles' + (',images,casts' if extended else ''),
})
if not movie:
return
movie_default = movie if self.default_language == 'en' else self.request('movie/%s' % movie.get('id'), {
'append_to_response': 'alternative_titles' + (',images,casts' if extended else ''),
'language': self.default_language
})
movie_default = movie_default or movie
movie_others = [ self.request('movie/%s' % movie.get('id'), {
'append_to_response': 'alternative_titles' + (',images,casts' if extended else ''),
'language': language
}) for language in self.languages] if self.languages else []
# Images
poster = self.getImage(movie, type = 'poster', size = 'w154')
poster_original = self.getImage(movie, type = 'poster', size = 'original')
backdrop_original = self.getImage(movie, type = 'backdrop', size = 'original')
extra_thumbs = self.getMultImages(movie, type = 'backdrops', size = 'original') if extended else []
images = {
'poster': [poster] if poster else [],
#'backdrop': [backdrop] if backdrop else [],
'poster_original': [poster_original] if poster_original else [],
'backdrop_original': [backdrop_original] if backdrop_original else [],
'actors': {},
'extra_thumbs': extra_thumbs
}
# Genres
try:
genres = [genre.get('name') for genre in movie.get('genres', [])]
except:
genres = []
# 1900 is the same as None
year = str(movie.get('release_date') or '')[:4]
if not movie.get('release_date') or year == '1900' or year.lower() == 'none':
year = None
# Gather actors data
actors = {}
if extended:
# Full data
cast = movie.get('casts', {}).get('cast', [])
for cast_item in cast:
try:
actors[toUnicode(cast_item.get('name'))] = toUnicode(cast_item.get('character'))
images['actors'][toUnicode(cast_item.get('name'))] = self.getImage(cast_item, type = 'profile', size = 'original')
except:
log.debug('Error getting cast info for %s: %s', (cast_item, traceback.format_exc()))
movie_data = {
'type': 'movie',
'via_tmdb': True,
'tmdb_id': movie.get('id'),
'alternate_titles': [m['title'] for m in movie['alternative_titles']['titles']],
'titles': [toUnicode(movie_default.get('title') or movie.get('title'))],
'original_title': movie.get('original_title'),
'images': images,
'imdb': movie.get('imdb_id'),
'runtime': movie.get('runtime'),
'released': str(movie.get('release_date')),
'year': tryInt(year, None),
'plot': movie_default.get('overview') or movie.get('overview'),
'genres': genres,
'collection': getattr(movie.get('belongs_to_collection'), 'name', None),
'actor_roles': actors
}
movie_data = dict((k, v) for k, v in movie_data.items() if v)
# Add alternative names
movies = [ movie ] + movie_others if movie == movie_default else [ movie, movie_default ] + movie_others
movie_titles = [ self.getTitles(movie) for movie in movies ]
all_titles = sorted(list(itertools.chain.from_iterable(movie_titles)))
alternate_titles = movie_data['titles']
for title in all_titles:
if title and title not in alternate_titles and title.lower() != 'none' and title is not None:
alternate_titles.append(title)
movie_data['titles'] = alternate_titles
return movie_data
def getImage(self, movie, type = 'poster', size = 'poster'):
image_url = ''
try:
path = movie.get('%s_path' % type)
if path:
image_url = '%s%s%s' % (self.configuration['images']['secure_base_url'], size, path)
except:
log.debug('Failed getting %s.%s for "%s"', (type, size, ss(str(movie))))
return image_url
def getMultImages(self, movie, type = 'backdrops', size = 'original'):
image_urls = []
try:
for image in movie.get('images', {}).get(type, [])[1:5]:
image_urls.append(self.getImage(image, 'file', size))
except:
log.debug('Failed getting %s.%s for "%s"', (type, size, ss(str(movie))))
return image_urls
def request(self, call = '', params = {}, return_key = None):
params = dict((k, v) for k, v in params.items() if v)
params = tryUrlencode(params)
try:
url = 'https://api.themoviedb.org/3/%s?api_key=%s%s' % (call, self.getApiKey(), '&%s' % params if params else '')
data = self.getJsonData(url, show_error = False)
except:
log.debug('Movie not found: %s, %s', (call, params))
data = None
if data and return_key and return_key in data:
data = data.get(return_key)
return data
def isDisabled(self):
if self.getApiKey() == '':
log.error('No API key provided.')
return True
return False
def getApiKey(self):
key = self.conf('api_key')
return bd(random.choice(self.ak)) if key == '' else key
def getLanguages(self):
languages = splitString(Env.setting('languages', section = 'core'))
if len(languages):
return languages
return [ 'en' ]
def getTitles(self, movie):
# add the title to the list
title = toUnicode(movie.get('title'))
titles = [title] if title else []
# add the original_title to the list
alternate_title = toUnicode(movie.get('original_title'))
if alternate_title and alternate_title not in titles:
titles.append(alternate_title)
# Add alternative titles
alternate_titles = movie.get('alternative_titles', {}).get('titles', [])
for alt in alternate_titles:
alt_name = toUnicode(alt.get('title'))
if alt_name and alt_name not in titles and alt_name.lower() != 'none' and alt_name is not None:
titles.append(alt_name)
return titles;
config = [{
'name': 'themoviedb',
'groups': [
{
'tab': 'searcher',
'name': 'searcher',
'options': [
{
'name': 'preferred_language',
'label': 'Preferred langauge code',
'description': 'Please provide your language code. It will be used for providers supporting altnerate title searching.',
'default': 'en',
'placeholder': 'en|de|fr...',
},
],
}, {
'tab': 'providers',
'name': 'tmdb',
'label': 'TheMovieDB',
'hidden': True,
'description': 'Used for all calls to TheMovieDB.',
'options': [
{
'name': 'api_key',
'default': '',
'label': 'Api Key',
},
],
},
],
}]
| gpl-3.0 | -8,751,652,352,963,587,000 | 33.917178 | 140 | 0.53615 | false |
jmesteve/saas3 | openerp/addons_extra/l10n_es_payment_order/remesas.py | 1 | 6715 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2006 ACYSOS S.L.. (http://acysos.com) All Rights Reserved.
# Pedro Tarrafeta <[email protected]>
#
# Corregido para instalación TinyERP estándar 4.2.0: Zikzakmedia S.L. 2008
# Jordi Esteve <[email protected]>
#
# Añadidas cuentas de remesas y tipos de pago. 2008
# Pablo Rocandio <[email protected]>
#
# Corregido para instalación OpenERP 5.0.0 sobre account_payment_extension: Zikzakmedia S.L. 2009
# Jordi Esteve <[email protected]>
#
# Adaptación para instalación OpenERP 6.0.0 sobre account_payment_extension: Zikzakmedia S.L. 2010
# Jordi Esteve <[email protected]>
#
# Añadidos conceptos extras del CSB 19: Acysos S.L. 2011
# Ignacio Ibeas <[email protected]>
#
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.tools.translate import _
class payment_mode(osv.osv):
_name= 'payment.mode'
_inherit = 'payment.mode'
def onchange_partner(self, cr, uid, ids, partner_id):
if partner_id:
#pool = self.pooler.get_pool(cr.dbname)
obj = self.pool.get('res.partner')
field = ['name']
ids = [partner_id]
filas = obj.read(cr, uid, ids, field)
return {'value':{'nombre': filas[0]["name"][:40]}}
return {'value':{'nombre': ""}}
_columns = {
'tipo': fields.selection([('none','None'),('csb_19','CSB 19'),('csb_32','CSB 32'),('csb_34','CSB 34'),('34_01','CSB 34-01'),('csb_58','CSB 58')], 'Type of payment file', size=6, select=True, required=True),
'sufijo': fields.char('suffix',size=3, select=True),
'partner_id': fields.many2one('res.partner', 'Partner', select=True),
'nombre': fields.char('Company name in file', size=40),
'cif': fields.related('partner_id','vat', type='char', string='VAT code', select=True),
# Código INE (9 dígitos)
'ine': fields.char('INE code',size=9),
'cedente': fields.char('Cedente', size=15),
# Incluir registro obligatorio de domicilio (para no domiciliados)
'inc_domicile': fields.boolean('Include domicile', help='Add partner domicile records to the exported file (CSB 58)'),
# Usar formato alternativo para el registro de domicilio
'alt_domicile_format': fields.boolean('Alt. domicile format', help='Alternative domicile record format'),
# Require bank account?
'require_bank_account': fields.boolean('Require bank account', help='If your bank allows you to send orders without the bank account info, you may disable this option'),
'csb34_type': fields.selection([('transfer', 'Transfer'),('promissory_note', 'Promissory Note'),('cheques', 'Cheques'),('certified_payments', 'Certified Payments')], 'Type of CSB 34 payment'),
'text1': fields.char('Line 1', size=36, help='Enter text and/or select a field of the invoice to include as a description in the letter. The possible values are: ${amount}, ${communication}, {communication2}, {date}, {ml_maturity_date}, {create_date}, {ml_date_created}'),
'text2': fields.char('Line 2', size=36, help='Enter text and/or select a field of the invoice to include as a description in the letter. The possible values are: ${amount}, ${communication}, {communication2}, {date}, {ml_maturity_date}, {create_date}, {ml_date_created}'),
'text3': fields.char('Line 3', size=36, help='Enter text and/or select a field of the invoice to include as a description in the letter. The possible values are: ${amount}, ${communication}, {communication2}, {date}, {ml_maturity_date}, {create_date}, {ml_date_created}'),
'payroll_check': fields.boolean('Payroll Check', help='Check it if you want to add the 018 data type in the file (the vat of the recipient is added in the 018 data type).'),
'add_date': fields.boolean('Add Date', help='Check it if you want to add the 910 data type in the file to include the payment date.'),
'send_type':fields.selection([
('mail','Ordinary Mail'),
('certified_mail','Certified Mail'),
('other','Other'),
],'Send Type', help="The sending type of the payment file"),
'not_to_the_order':fields.boolean('Not to the Order'),
'barred':fields.boolean('Barred'),
'cost_key':fields.selection([
('payer','Expense of the Payer'),
('recipient','Expense of the Recipient'),
],'Cost Key'),
'concept':fields.selection([
('payroll','Payroll'),
('pension','Pension'),
('other','Other'),
],'Concept of the Order', help="Concept of the Order."),
'direct_pay_order':fields.boolean('Direct Pay Order', help="By default 'Not'."),
'csb19_extra_concepts': fields.boolean('Extra Concepts', help='Check it if you want to add the invoice lines to the extra concepts (Max. 15 lines)'),
}
_defaults = {
'tipo': lambda *a: 'none',
'sufijo': lambda *a: '000',
'inc_domicile': lambda *a: False,
'alt_domicile_format': lambda *a: False,
# Override default: We want to be safe so we require bank account by default
'require_bank_account': lambda *a: True,
'csb34_type': lambda *a: 'transfer',
'text1': lambda self,cr,uid,context: _('Dear Sir'),
'text2': lambda self,cr,uid,context: _('Payment ref.')+' ${communication}',
'text3': lambda self,cr,uid,context: _('Total:')+' ${amount}',
'send_type': lambda *a: 'mail',
'not_to_the_order': lambda *a: True,
'barred': lambda *a: True,
'cost_key': lambda *a: 'payer',
'concept': lambda *a: 'other',
'direct_pay_order': lambda *a: False,
}
payment_mode()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 5,191,690,641,885,414,000 | 53.422764 | 282 | 0.626681 | false |
tomasdubec/openstack-cinder | cinder/tests/test_volume_configuration.py | 1 | 2385 | #!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the configuration wrapper in volume drivers."""
from oslo.config import cfg
from cinder import flags
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration
from cinder.volume import driver
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
volume_opts = [
cfg.StrOpt('str_opt', default='STR_OPT'),
cfg.BoolOpt('bool_opt', default=False)
]
more_volume_opts = [
cfg.IntOpt('int_opt', default=1),
]
FLAGS.register_opts(volume_opts)
FLAGS.register_opts(more_volume_opts)
class VolumeConfigurationTest(test.TestCase):
def setUp(self):
super(VolumeConfigurationTest, self).setUp()
def tearDown(self):
super(VolumeConfigurationTest, self).tearDown()
def test_group_grafts_opts(self):
c = configuration.Configuration(volume_opts, config_group='foo')
self.assertEquals(c.str_opt, FLAGS.foo.str_opt)
self.assertEquals(c.bool_opt, FLAGS.foo.bool_opt)
def test_opts_no_group(self):
c = configuration.Configuration(volume_opts)
self.assertEquals(c.str_opt, FLAGS.str_opt)
self.assertEquals(c.bool_opt, FLAGS.bool_opt)
def test_grafting_multiple_opts(self):
c = configuration.Configuration(volume_opts, config_group='foo')
c.append_config_values(more_volume_opts)
self.assertEquals(c.str_opt, FLAGS.foo.str_opt)
self.assertEquals(c.bool_opt, FLAGS.foo.bool_opt)
self.assertEquals(c.int_opt, FLAGS.foo.int_opt)
def test_safe_get(self):
c = configuration.Configuration(volume_opts, config_group='foo')
self.assertEquals(c.safe_get('none_opt'), None)
| apache-2.0 | 942,622,923,200,035,300 | 32.125 | 78 | 0.706918 | false |
fieldOfView/pyQNodesEditor | qneport.py | 1 | 4898 | # Copyright (c) 2014, ALDO HOEBEN
# Copyright (c) 2012, STANISLAW ADASZEWSKI
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of STANISLAW ADASZEWSKI nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL STANISLAW ADASZEWSKI BE LIABLE FOR ANY
#DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
#ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from PySide.QtCore import (Qt)
from PySide.QtGui import (QBrush, QColor, QPainter, QPainterPath, QPen)
from PySide.QtGui import (QGraphicsItem, QGraphicsPathItem, QGraphicsTextItem)
class QNEPort(QGraphicsPathItem):
(NamePort, TypePort) = (1, 2)
(Type) = (QGraphicsItem.UserType +1)
def __init__(self, parent):
super(QNEPort, self).__init__(parent)
self.label = QGraphicsTextItem(self)
self.radius_ = 4
self.margin = 3
path = QPainterPath()
path.addEllipse(-self.radius_, -self.radius_, 2*self.radius_, 2*self.radius_);
self.setPath(path)
self.setPen(QPen(Qt.darkRed))
self.setBrush(Qt.red)
self.setFlag(QGraphicsItem.ItemSendsScenePositionChanges)
self.m_portFlags = 0
self.isOutput_ = False
self.m_block = None
self.m_connections = []
def __del__(self):
#print("Del QNEPort %s" % self.name)
pass
def delete(self):
for connection in self.m_connections:
connection.delete()
self.scene().removeItem(self)
self.m_block = None
self.m_connections = []
def setName(self, name):
self.name = name
self.label.setPlainText(name)
def setIsOutput(self, isOutput):
self.isOutput_ = isOutput
if self.isOutput_:
self.label.setPos(-self.radius_ - self.margin - self.label.boundingRect().width(),
-self.label.boundingRect().height()/2);
else:
self.label.setPos(self.radius_ + self.margin,
-self.label.boundingRect().height()/2);
def setNEBlock(self, block):
self.m_block = block
def setPortFlags(self, flags):
self.m_portFlags = flags
if self.m_portFlags & self.TypePort:
font = self.scene().font()
font.setItalic(True)
self.label.setFont(font)
self.setPath(QPainterPath())
elif self.m_portFlags & self.NamePort:
font = self.scene().font()
font.setBold(True)
self.label.setFont(font)
self.setPath(QPainterPath())
def setPtr(self, ptr):
self.m_ptr = ptr
def type(self):
return self.Type
def radius(self):
return self.radius_
def portName(self):
return self.name
def isOutput(self):
return self.isOutput_
def block(self):
return self.m_block
def portFlags(self):
return self.m_portFlags
def ptr(self):
return self.m_ptr;
def addConnection(self, connection):
self.m_connections.append(connection)
def removeConnection(self, connection):
try:
self.m_connections.remove(connection)
except: pass
def connections(self):
return self.m_connections
def isConnected(self, other):
for connection in self.m_connections:
if connection.port1() == other or connection.port2() == other:
return True
return False
def itemChange(self, change, value):
if change == QGraphicsItem.ItemScenePositionHasChanged:
for connection in self.m_connections:
connection.updatePosFromPorts()
connection.updatePath()
return value
| bsd-3-clause | 2,503,830,618,461,107,700 | 28.154762 | 94 | 0.652511 | false |
norling/metlab | metlab/external.py | 1 | 2186 | #!/usr/bin/env python2.7
import os
import time
import logging
import threading
from subprocess import Popen, PIPE
class External(threading.Thread):
def __init__(self, name="", args = [], log_name = "external", pid = 0, log_level = logging.INFO, wd=None):
threading.Thread.__init__(self)
self.name = name
self.args = args
self.pid = pid
self.log = logging.getLogger( log_name )
self.log.setLevel( log_level )
self.status = "idle"
self.retval = None
self._stop = threading.Event()
self.started = False
self.wd = wd if wd else os.getcwd()
self.log.info("External: %s" % name)
self.log.info(" args: %s" % args)
def run(self):
try:
self.status = "running"
self.log.info("Starting %s" % self.name)
self.log.info("cmd: %s" % ([self.name] + self.args))
try:
self.started = True
if self.args[-1].startswith(">"):
self.process = Popen([self.name] + self.args[:-1], stdout=open(self.args[-1][1:], "w"), stderr=PIPE, cwd=self.wd)
else:
self.process = Popen([self.name] + self.args, stdout=PIPE, cwd=self.wd)
self.retval = self.process.communicate()[0]
self.retval = self.retval.strip() if self.retval else self.retval
except Exception as e:
self.log.error(e)
if self._stop.isSet():
self.log.warning("%s aborted" % self.name)
self.process.kill()
self.status = "aborted"
elif self.process.returncode != 0:
self.log.error("Failed Running %s, retval %s" % (self.name, self.process.returncode))
self.status = "failed"
else:
self.log.info("Finished Running %s" % self.name)
self.status = "completed"
except Exception as e:
self.log.warning(e)
self.status = "failed"
return self.retval
def stop(self):
self.process.kill()
self._stop.set()
| gpl-3.0 | -6,979,680,684,814,166,000 | 35.433333 | 133 | 0.521043 | false |
kelceydamage/raspi | raspi/sensors/grove/i2c/color.py | 1 | 15702 | import smbus2 as smbus
import time
import math
import RPi.GPIO
"""
## License
The MIT License (MIT)
Copyright (c) 2016 Frederic Aguiard
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
class GroveI2CColorSensor:
""" Provides access to the Grove I2C color sensor from Seeedstudio.
This library supports 2 of the operating modes of the sensor:
- Continuous, back-to-back color measures ('integrations') of pre-defined durations
- Single measure of arbitrary duration
The other sensor operating modes (using an external SYNC pin, interrupts...) which are not supported by this
library.
Usage:
1. Use either use_continuous_integration() or use_manual_integration() to select operating mode
2. If necessary, adjust gain and prescaler to obtain a color measure of sufficient precision without saturating the
sensor.
3. Start integration using start_integration()
4. In manual integration mode: use stop_integration() after the desired duration
5. Use one of the read functions to get the measured color value
Reference documentation:
- Seeedstudio wiki: http://www.seeedstudio.com/wiki/index.php?title=Twig_-_I2C_Color_Sensor_v0.9b
- TCS3414-A Datasheet: http://www.seeedstudio.com/wiki/File:TCS3404_TCS3414-A.pdf
"""
# Common colors coordinates (CIE xy and RGB)
COLOR_TABLE = {"Red": {"x": 0.64, "y": 0.33, "r": 255, "g": 0, "b": 0},
"Green": {"x": 0.3, "y": 0.6, "r": 0, "g": 255, "b": 0},
"Blue": {"x": 0.15, "y": 0.06, "r": 0, "g": 0, "b": 255},
"Yellow": {"x": 0.419, "y": 0.505, "r": 255, "g": 255, "b": 0},
"Magenta": {"x": 0.321, "y": 0.154, "r": 255, "g": 0, "b": 255},
"Cyan": {"x": 0.225, "y": 0.329, "r": 0, "g": 255, "b": 255},
"Deep pink": {"x": 0.466, "y": 0.238, "r": 255, "g": 20, "b": 147},
"Orange": {"x": 0.5, "y": 0.441, "r": 255, "g": 165, "b": 0},
"Saddle brown": {"x": 0.526, "y": 0.399, "r": 139, "g": 69, "b": 19},
"Grey / White": {"x": 0.313, "y": 0.329, "r": 255, "g": 255, "b": 255},
"Black": {"x": 0, "y": 0, "r": 0, "g": 0, "b": 0}}
# Sensor address on SMBus / I2C bus
_I2C_SENSOR_ADDRESS = 0X39
# Sensor registers addresses
_REGISTER_COMMAND = 0X80
_REGISTER_CONTROL = _REGISTER_COMMAND | 0X00
_REGISTER_TIMING = _REGISTER_COMMAND | 0X01
_REGISTER_INTERRUPT_CONTROL = _REGISTER_COMMAND | 0X02
_REGISTER_INT_SOURCE = _REGISTER_COMMAND | 0X03
_REGISTER_ID = _REGISTER_COMMAND | 0X04
_REGISTER_GAIN = _REGISTER_COMMAND | 0X07
_REGISTER_INTERRUPT_LOW_THRESH_LOW_BYTE = _REGISTER_COMMAND | 0X08
_REGISTER_INTERRUPT_LOW_THRESH_HIGH_BYTE = _REGISTER_COMMAND | 0X09
_REGISTER_INTERRUPT_HIGH_THRESH_LOW_BYTE = _REGISTER_COMMAND | 0X0A
_REGISTER_INTERRUPT_HIGH_THRESH_HIGH_BYTE = _REGISTER_COMMAND | 0X0B
_REGISTER_DATA_GREEN_LOW = _REGISTER_COMMAND | 0X10
_REGISTER_DATA_GREEN_HIGH = _REGISTER_COMMAND | 0X11
_REGISTER_DATA_RED_LOW = _REGISTER_COMMAND | 0X012
_REGISTER_DATA_RED_HIGH = _REGISTER_COMMAND | 0X13
_REGISTER_DATA_BLUE_LOW = _REGISTER_COMMAND | 0X14
_REGISTER_DATA_BLUE_HIGH = _REGISTER_COMMAND | 0X15
_REGISTER_DATA_CLEAR_LOW = _REGISTER_COMMAND | 0X16
_REGISTER_DATA_CLEAR_HIGH = _REGISTER_COMMAND | 0X17
_REGISTER_INTERRUPT_CLEAR = _REGISTER_COMMAND | 0X60
# Values for control register
_CONTROL_ADC_IS_VALID = 0X10
_CONTROL_ADC_ENABLE = 0X02
_CONTROL_ADC_DISABLE = 0X00
_CONTROL_ADC_POWER_ON = 0X01
_CONTROL_ADC_POWER_OFF = 0X00
# Values for timing register
_TIMING_SYNC_EDGE = 0X40
_TIMING_INTEGRATION_MODE_CONTINUOUS = 0X00
_TIMING_INTEGRATION_MODE_MANUAL = 0X10
_TIMING_INTEGRATION_MODE_SYNC_SINGLE_PULSE = 0X20
_TIMING_INTEGRATION_MODE_SYNC_MULTIPLE_PULSE = 0X30
_TIMING_PARAM_INTEGRATION_TIME_12MS = 0X00
_TIMING_PARAM_INTEGRATION_TIME_100MS = 0X01
_TIMING_PARAM_INTEGRATION_TIME_400MS = 0X02
_TIMING_PARAM_SYNC_PULSE_COUNT_1 = 0X00
_TIMING_PARAM_SYNC_PULSE_COUNT_2 = 0X01
_TIMING_PARAM_SYNC_PULSE_COUNT_4 = 0X02
_TIMING_PARAM_SYNC_PULSE_COUNT_8 = 0X03
_TIMING_PARAM_SYNC_PULSE_COUNT_16 = 0X04
_TIMING_PARAM_SYNC_PULSE_COUNT_32 = 0X05
_TIMING_PARAM_SYNC_PULSE_COUNT_64 = 0X06
_TIMING_PARAM_SYNC_PULSE_COUNT_128 = 0X07
_TIMING_PARAM_SYNC_PULSE_COUNT_256 = 0X08
# Values for interrupt control register
_INTERRUPT_CONTROL_MODE_DISABLE = 0X00
_INTERRUPT_CONTROL_MODE_LEVEL = 0X10
_INTERRUPT_CONTROL_MODE_SMB_ALERT = 0x20
_INTERRUPT_CONTROL_PERSIST_EVERY_CYCLE = 0X00
_INTERRUPT_CONTROL_PERSIST_OUTSIDE_RANGE_ONCE = 0X01
_INTERRUPT_CONTROL_PERSIST_OUTSIDE_RANGE_100MS = 0X02
_INTERRUPT_CONTROL_PERSIST_OUTSIDE_RANGE_1000MS = 0X03
# Values for interrupt source register
_INTERRUPT_SOURCE_GREEN = 0X00
_INTERRUPT_SOURCE_RED = 0X01
_INTERRUPT_SOURCE_BLUE = 0X10
_INTERRUPT_SOURCE_CLEAR = 0X03
# Values for gain register
_GAIN_1X = 0X00
_GAIN_4X = 0X10
_GAIN_16X = 0X20
_GAIN_64X = 0X30
_PRESCALER_1 = 0X00
_PRESCALER_2 = 0X01
_PRESCALER_4 = 0X02
_PRESCALER_8 = 0X03
_PRESCALER_16 = 0X04
_PRESCALER_32 = 0X05
_PRESCALER_64 = 0X06
# Wait time introduced after each register write (except integration start)
_SLEEP_VALUE = 0.05
def __init__(self, bus_number=None):
"""Initialize i2c communication with the sensor and sets default parameters.
Default parameters: continuous integration (not started) with 12ms cycles, gain 1x, pre-scale 1.
:param bus_number: the i2c bus number (usually 0 or 1, depending on the hardware). Use the i2cdetect command
line tool to identify the right bus. If set to None, will use the Raspberry Pi revision number to guess which
bus to use.
"""
if bus_number is None:
# Use Rasbperry Pi revision to choose bus number
board_revision = RPi.GPIO.RPI_REVISION
if board_revision == 2 or board_revision == 3:
bus_number = 1
else:
bus_number = 0
self.bus = smbus.SMBus(bus_number)
self.use_continuous_integration()
self.set_gain_and_prescaler(1, 1)
def use_continuous_integration(self, integration_time_in_ms=12):
"""Configure the sensor to perform continuous, back-to-back integrations of pre-defined duration.
Continuous integration will begin after calling start_integration() and will stop after calling
stop_integration().
:param integration_time_in_ms: supported values in ms are 12, 100 and 400.
"""
assert integration_time_in_ms == 12 \
or integration_time_in_ms == 100 \
or integration_time_in_ms == 400, \
"Continuous integration supports only 12ms, 100ms or 400ms integration durations"
# Convert integration time value into the corresponding byte values expected by the sensor.
if integration_time_in_ms == 12:
integration_time_reg = self._TIMING_PARAM_INTEGRATION_TIME_12MS
elif integration_time_in_ms == 100:
integration_time_reg = self._TIMING_PARAM_INTEGRATION_TIME_100MS
elif integration_time_in_ms == 400:
integration_time_reg = self._TIMING_PARAM_INTEGRATION_TIME_400MS
else:
integration_time_reg = self._TIMING_PARAM_INTEGRATION_TIME_12MS
self.bus.write_i2c_block_data(self._I2C_SENSOR_ADDRESS,
self._REGISTER_TIMING,
[self._TIMING_INTEGRATION_MODE_CONTINUOUS | integration_time_reg])
time.sleep(self._SLEEP_VALUE)
def use_manual_integration(self):
"""Configure the sensor to perform a single integration manually started and stopped.
Manual integration will begin after calling start_integration(), and will stop after calling stop_integration().
"""
self.bus.write_i2c_block_data(self._I2C_SENSOR_ADDRESS,
self._REGISTER_TIMING,
[self._TIMING_INTEGRATION_MODE_MANUAL])
time.sleep(self._SLEEP_VALUE)
def set_gain_and_prescaler(self, gain_multiplier=1, prescaler_divider=1):
"""Configure the sensor gain and prescaler.
:param gain_multiplier: Gain sets the sensibility of the sensor, effectively extending the dynamic range of the
sensor but eventually inducing saturation. Supported values are 1, 4, 16 and 64.
:param prescaler_divider: Prescaler scales the values by dividing them before storage in the output registers,
hence reducing saturation at the cost of reducing measurement precision. Supported prescaler dividers are 1, 2,
4, 8, 16, 32 and 64.
"""
assert gain_multiplier == 1 or gain_multiplier == 4 or gain_multiplier == 16 or gain_multiplier == 64, \
"Supported gain multipliers: 1, 4, 16 and 64"
assert prescaler_divider == 1 \
or prescaler_divider == 2 \
or prescaler_divider == 4 \
or prescaler_divider == 8 \
or prescaler_divider == 16 \
or prescaler_divider == 32 \
or prescaler_divider == 64, \
"Supported prescaler dividers: 1, 2, 4, 8, 16, 32 and 64"
# Convert gain multiplier into the corresponding byte values expected by the sensor.
if gain_multiplier == 1:
gain_reg = self._GAIN_1X
elif gain_multiplier == 4:
gain_reg = self._GAIN_4X
elif gain_multiplier == 16:
gain_reg = self._GAIN_16X
elif gain_multiplier == 64:
gain_reg = self._GAIN_64X
else:
gain_reg = self._GAIN_1X
# Convert prescaler divider into the corresponding byte values expected by the sensor.
if prescaler_divider == 1:
prescaler_reg = self._PRESCALER_1
elif prescaler_divider == 2:
prescaler_reg = self._PRESCALER_2
elif prescaler_divider == 4:
prescaler_reg = self._PRESCALER_4
elif prescaler_divider == 8:
prescaler_reg = self._PRESCALER_8
elif prescaler_divider == 16:
prescaler_reg = self._PRESCALER_16
elif prescaler_divider == 32:
prescaler_reg = self._PRESCALER_32
elif prescaler_divider == 64:
prescaler_reg = self._PRESCALER_64
else:
prescaler_reg = self._PRESCALER_1
self.bus.write_i2c_block_data(self._I2C_SENSOR_ADDRESS, self._REGISTER_GAIN, [gain_reg | prescaler_reg])
time.sleep(self._SLEEP_VALUE)
def start_integration(self):
"""Start the integration.
"""
self.bus.write_i2c_block_data(
self._I2C_SENSOR_ADDRESS,
self._REGISTER_CONTROL,
[self._CONTROL_ADC_ENABLE | self._CONTROL_ADC_POWER_ON])
def stop_integration(self):
"""Stop the integration.
"""
self.bus.write_i2c_block_data(
self._I2C_SENSOR_ADDRESS,
self._REGISTER_CONTROL,
[self._CONTROL_ADC_DISABLE | self._CONTROL_ADC_POWER_ON])
def is_integration_complete(self):
""" Checks if an integration has been successfully completed and color data is ready to be read.
:return: True if integration is completed.
"""
integration_status = self.bus.read_i2c_block_data(self._I2C_SENSOR_ADDRESS, self._REGISTER_CONTROL, 1)
return integration_status[0] & self._CONTROL_ADC_IS_VALID == self._CONTROL_ADC_IS_VALID
def read_rgbc_word(self):
""" Reads the measured color, split over 4 channels: red, green, blue, clear.
Each value is provided as a word.
:return: a (r,g,b,c) tuple of the 4 word values measured by the red/green/blue/clear channels
"""
# Integration result registers are 8 consecutive bytes starting by lower value of green channel.
# Reading them in a single pass.
raw_color = self.bus.read_i2c_block_data(self._I2C_SENSOR_ADDRESS, self._REGISTER_DATA_GREEN_LOW, 8)
return (raw_color[2] + raw_color[3] * 256,
raw_color[0] + raw_color[1] * 256,
raw_color[4] + raw_color[5] * 256,
raw_color[6] + raw_color[7] * 256)
def read_rgbc(self):
""" Reads the measured color, split over 4 channels: red, green, blue, clear (unfiltered).
Each value is provided as a byte.
:return: a (r,g,b,c) tuple of the 4 byte values measured by the red/green/blue/clear channels
"""
# Integration result registers are 8 consecutive bytes starting by lower value of green channel.
# Reading them in a single pass.
raw_color = self.bus.read_i2c_block_data(self._I2C_SENSOR_ADDRESS, self._REGISTER_DATA_GREEN_LOW, 8)
# Discard lower byte of each channel
return (raw_color[3],
raw_color[1],
raw_color[5],
raw_color[7])
def read_xy(self):
""" Reads the measured color and converts it as CIE x,y coordinates.
See http://www.techmind.org/colour/ and https://en.wikipedia.org/wiki/CIE_1931_color_space for more information.
:return: a (x, y) tuple
"""
rgbc = self.read_rgbc_word()
x_bar = -0.14282 * rgbc[0] + 1.54924 * rgbc[1] + -0.95641 * rgbc[2]
y_bar = -0.32466 * rgbc[0] + 1.57837 * rgbc[1] + -0.73191 * rgbc[2]
z_bar = -0.68202 * rgbc[0] + 0.77073 * rgbc[1] + 0.563320 * rgbc[2]
x = x_bar / (x_bar + y_bar + z_bar)
y = y_bar / (x_bar + y_bar + z_bar)
return [x, y]
def read_color_name(self):
""" Reads the measured color and maps it to the nearest color present in COLOR_TABLE.
Warning: current implementation does not work well with white / grey / black or dark colors.
:return: The color name used as a key in COLOR_TABLE.
"""
xy = self.read_xy()
closest_color = None
closest_distance = 1
for current_color in self.COLOR_TABLE:
current_coordinates = self.COLOR_TABLE[current_color]
current_dist = math.sqrt(
(current_coordinates["y"] - xy[1])**2 + (current_coordinates["x"] - xy[0])**2)
if current_dist < closest_distance:
closest_color = current_color
closest_distance = current_dist
return closest_color | apache-2.0 | 6,583,448,653,865,636,000 | 44.648256 | 120 | 0.626735 | false |
gbolet/BlenderCacheManager | clearCache.py | 1 | 4262 | bl_info = {
"name": "BVH Cache Manager",
"category": "Render",
"description":"Easily delete cached BVH data!",
"location":"Properties Editor > Render > BVH Cache Manager",
"author":"Gregory Bolet",
"version":"001",
"warning":"Alpha Version"
}
import bpy
import os
import shutil
cacheDirectory = ""
class InterfacePanel(bpy.types.Panel): #[ref: Panel(bpy_struct)]
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context= "render"
bl_label = "BVH Cache Manager"
#this will create some UI elements
print("***Staring Interface Panel...***")
bpy.types.Scene.manualDirectory = bpy.props.StringProperty(name = "Cache Folder", default = "You can leave me blank!",
description = "Manually select cache folder directory",
subtype = 'DIR_PATH'
)
print("***Interface Ready!***")
#draw function gets called A LOT,
#object edits cannot be performed here, only UI updates
def draw(self, context):
layout = self.layout
col = layout.column(align=True) #adds a column to the UI
col.label("Manual Entry:")
col.prop(context.scene, 'manualDirectory', expand=False)
print("Manual Directory IS:"+ context.scene.manualDirectory)
col.label("")
col.operator("blender.execute",text="Clear Cache")
return None
class ClearCacheButton(bpy.types.Operator):
bl_idname = "blender.execute"
bl_label = "Clear BVH Cache"
bl_options = {"UNDO"}
def __init__(self):
global cacheDirectory
from sys import platform as _platform
manualDir = bpy.context.scene.manualDirectory
if (os.path.isdir(manualDir) == False and manualDir != ""):
print("Invalid manual entry directory. Using default cache folder...")
elif (os.path.isdir(manualDir) == False and manualDir == ""):
print("Looking for default cache folder...")
if(manualDir != "" and os.path.isdir(manualDir)):
cacheDirectory = manualDir[:-1] #removes extra slash
elif _platform == "linux" or _platform == "linux2":
#This will always work on Linux
#$HOME/.config/blender/2.76/
cacheDirectory += "$HOME/.config/blender/"
cacheDirectory += '{:.4}'.format(bpy.app.version_string)
cacheDirectory += "/cache"
elif _platform == "darwin":
#This will always work on Mac OSX
#/Users/$USER/Library/Application Support/Blender/2.76/
cacheDirectory += "~/Library/Application Support/Blender/"
cacheDirectory += '{:.4}'.format(bpy.app.version_string)
cacheDirectory += "/cache"
elif _platform == "win32":
#this always works on Windows machines
#C:\Documents and Settings\$USERNAME\AppData\Roaming\Blender Foundation\Blender\2.76\
cacheDirectory += os.getenv('APPDATA')
cacheDirectory += "\Blender Foundation\Blender\\"
cacheDirectory += '{:.4}'.format(bpy.app.version_string)
cacheDirectory += "\cache"
print("User Cache Directory: "+cacheDirectory)
return None;
def clearCache(self):
global cacheDirectory
if(os.path.isdir(cacheDirectory)):
shutil.rmtree(cacheDirectory)
if(os.path.isdir(cacheDirectory) == False):
os.makedirs(cacheDirectory)
print("\nNo cache directory exists, creating one...")
print("New cache folder directory: "+cacheDirectory+"\n")
return None;
def execute(self, context):
global cacheDirectory
print("\nStarting process...")
self.clearCache()
cacheDirectory = ""
print("FINISHED! \n\n\n")
return {"FINISHED"}
#end invoke
def register():
bpy.utils.register_class(InterfacePanel)
bpy.utils.register_class(ClearCacheButton)
def unregister():
bpy.utils.unregister_class(ClearCacheButton)
bpy.utils.unregister_class(InterfacePanel)
if __name__ == "__main__":
register()
| mit | -270,100,889,357,932,200 | 29.442857 | 122 | 0.591272 | false |
simudream/GeoIP2-python | setup.py | 1 | 1626 | #!/usr/bin/env python
import codecs
import os
import sys
import geoip2
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
packages = ['geoip2']
requirements = [i.strip() for i in open("requirements.txt").readlines()]
setup(
name='geoip2',
version=geoip2.__version__,
description='MaxMind GeoIP2 API',
long_description=codecs.open('README.rst', 'r', 'utf-8').read(),
author='Gregory Oschwald',
author_email='[email protected]',
url='http://www.maxmind.com/',
packages=['geoip2'],
package_data={'': ['LICENSE']},
package_dir={'geoip2': 'geoip2'},
include_package_data=True,
install_requires=requirements,
extras_require={
':python_version=="2.6" or python_version=="2.7"': ['ipaddr']},
tests_require=['requests_mock'],
test_suite="tests",
license=geoip2.__license__,
classifiers=(
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python',
'Topic :: Internet :: Proxy Servers',
'Topic :: Internet',
),
)
| apache-2.0 | -3,090,010,712,760,477,700 | 28.563636 | 72 | 0.619311 | false |
itpubs/reflecting | apps/users/models.py | 1 | 1816 | from django.contrib.auth.models import AbstractUser
from django.db import models
from utils.default_model import random_nick_name
from blog.models import Article
__all__ = [
'UserProfile',
'EmailVerifyCode',
'Message',
'Comment',
'Reply'
]
# Create your models here.
class UserProfile(AbstractUser):
gender_choices = (
('male', '男'),
('female', '女'),
('unknown', '未知')
)
nick_name = models.CharField(max_length=100, default=random_nick_name)
gender = models.CharField(choices=gender_choices, default='unknown', max_length=20)
image = models.ImageField(upload_to='avatar/%Y/%m', max_length=100, default='avatar/avatar.png')
def get_message_count(self):
return Message.objects.filter(status=False).count()
def get_comment_count(self):
return Comment.objects.filter(status=False).count()
class EmailVerifyCode(models.Model):
code = models.CharField(max_length=20, unique=True)
email = models.EmailField(max_length=50)
send_time = models.DateTimeField(auto_now_add=True)
class Message(models.Model):
add_time = models.DateTimeField(auto_now_add=True)
body = models.CharField(max_length=200)
status = models.BooleanField(default=False)
class Comment(models.Model):
user = models.ForeignKey(UserProfile)
article = models.ForeignKey(Article, related_name='article_comment')
body = models.TextField()
add_time = models.DateTimeField(auto_now_add=True)
status = models.BooleanField(default=False)
def get_reply(self):
return Reply.objects.filter(comment=self.pk)
class Reply(models.Model):
user = models.ForeignKey(UserProfile)
comment = models.ForeignKey(Comment)
body = models.TextField()
add_time = models.DateTimeField(auto_now_add=True)
| mit | -6,211,186,313,144,723,000 | 28.639344 | 100 | 0.69635 | false |
dhocker/athomepowerlineserver | commands/delete_device_program.py | 1 | 1356 | #
# Delete a device program
# Copyright © 2020 Dave Hocker
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# See the LICENSE file for more details.
#
from commands.ServerCommand import ServerCommand
from database.program_assignments import ProgramAssignments
class DeleteDeviceProgram(ServerCommand):
"""
Command handler for deleting a device timer program
"""
def Execute(self, request):
args = request["args"]
device_id = int(args["device-id"])
program_id = int(args["program-id"])
# Generate a successful response
r = self.CreateResponse(request["request"])
# Remove program from database
pa = ProgramAssignments()
result = pa.delete(device_id, program_id)
if result:
r['result-code'] = DeleteDeviceProgram.SUCCESS
r['device-id'] = args["device-id"]
r['program-id'] = args["program-id"]
r['message'] = DeleteDeviceProgram.MSG_SUCCESS
else:
r['result-code'] = pa.last_error_code
r['device-id'] = args["device-id"]
r['program-id'] = args["program-id"]
r['message'] = pa.last_error
return r
| gpl-3.0 | -1,568,335,787,459,006,700 | 30.511628 | 70 | 0.631734 | false |
pkleimert/hrpt | urls.py | 1 | 3670 | from django.conf.urls.defaults import *
from django.conf import settings
from django.views.generic.simple import redirect_to
from haystack.views import SearchView, search_view_factory
from haystack.forms import SearchForm
from apps.ew_contact_form.forms import ContactForm
from views import LatestEntriesFeed
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
('^admin/cms/page/18/edit-plugin/[0-9]+/.*escapeHtml.*icon_src.*/$', 'django.views.defaults.page_not_found'),
(r'^admin/surveys-editor/', include('apps.pollster.urls')),
(r'^admin/', include(admin.site.urls)),
url(r'^surveys/(?P<survey_shortname>.+)/charts/(?P<chart_shortname>.+)/tile/(?P<z>\d+)/(?P<x>\d+)/(?P<y>\d+)$', 'apps.pollster.views.map_tile', name='pollster_map_tile'),
url(r'^surveys/(?P<survey_shortname>.+)/charts/(?P<chart_shortname>.+)/click/(?P<lat>[\d.-]+)/(?P<lng>[\d.-]+)$', 'apps.pollster.views.map_click', name='pollster_map_click'),
url(r'^surveys/(?P<survey_shortname>.+)/charts/(?P<chart_shortname>.+)\.json$', 'apps.pollster.views.chart_data', name='pollster_chart_data'),
(r'^surveys/(?P<shortname>.+)/$', 'apps.pollster.views.survey_run'),
(r'^survey/', include('apps.survey.urls')),
(r'^reminder/', include('apps.reminder.urls')),
(r'^influenzanet/', 'django.views.generic.simple.direct_to_template', {'template': 'influenzanet.html'}),
(r'^googlec96088c11ef7e5c4.html$', 'django.views.generic.simple.direct_to_template', {'template': 'googlec96088c11ef7e5c4.html'}),
(r'nu.html$', 'django.views.generic.simple.direct_to_template', {'template': 'nu.html'}),
(r'^mobile/login/$', 'views.mobile_login'),
(r'^mobile/surveys/(?P<shortname>.+)/$', 'apps.pollster.views.survey_run', {'clean_template': True}),
(r'^mobile/map/(?P<survey_shortname>.+)/(?P<chart_shortname>.+)/$', 'apps.pollster.views.survey_map'),
(r'^xss/$', LatestEntriesFeed()),
#url(r'^captcha/', include('captcha.urls')),
#(r'^tellafriend/', include('tellafriend.urls')),
url(r'^search/$', search_view_factory(
view_class=SearchView,
form_class=SearchForm
), name='haystack_search'),
(r'^test-search/$', 'views.test_search'),
(r'^accounts/', include('apps.accounts.urls')),
url(r'^login/$', redirect_to, {'url': settings.LOGIN_URL},
name='loginurl-index'),
(r'^login/', include('loginurl.urls')),
(r'^count/', include('apps.count.urls')),
(r'^contest/', include('apps.contest.urls')),
url(r'^contact/$', 'contact_form.views.contact_form', {'form_class': ContactForm}, name='contact_form'),
url(r'^contact/sent/$', 'django.views.generic.simple.direct_to_template', {'template': 'contact_form/contact_form_sent.html'}, name='contact_form_sent'),
(r'^colors.css$', 'apps.partnersites.views.colors_css'),
url(r'^register/$',
'registration.views.register',
{ 'backend': 'registration.backends.default.DefaultBackend',
'template_name': 'registration/registration_explanation.html' },
name='registration_register_explanation'),
(r'^forum/', include('pybb.urls', namespace='pybb')),
)
if settings.DEBUG:
urlpatterns = patterns('',
(r'^404/$', 'django.views.defaults.page_not_found'),
(r'^500/$', 'views.server_error'),
(r'^' + settings.MEDIA_URL.lstrip('/'), include('appmedia.urls'), {'show_indexes': True}),
) + urlpatterns
if settings.MOBILE_INTERFACE_ACTIVE:
urlpatterns += patterns('', (r'^ema/', include('apps.survey.api.urls')))
# Catchall
urlpatterns += patterns('', url(r'^', include('cms.urls')))
handler500 = 'views.server_error'
| agpl-3.0 | -3,252,625,988,242,738,000 | 46.051282 | 178 | 0.648774 | false |
vsoch/docfish | scripts/import/upload_storage.py | 1 | 5156 | #!/usr/bin/env python
# This is an example script to upload data (images, text, metadata) to
# google cloud storage and datastore (for general data)
# Preparation of Pubmed Open Access Data
ftp_base = "ftp://ftp.ncbi.nlm.nih.gov/pub/pmc"
file_list = "ftp://ftp.ncbi.nlm.nih.gov/pub/pmc/oa_file_list.txt"
from glob import glob
import requests
import signal
import xmltodict
import imghdr
import tempfile
import shutil
import tarfile
import numpy
import urllib
import pandas
import os
import re
import pwd
######################################################################
# Pubmed Functions
######################################################################
def get_userhome():
'''get userhome gets the user's home based on the uid,
and NOT the environment variable which is not consistent'''
return pwd.getpwuid(os.getuid())[5]
def get_pubmed(download_base=None):
'''retrieve pubmed database, either from locally saved file,
or if not yet generated, obtain from FTP server
'''
if download_base is None:
download_base = get_userhome()
output_file = '%s/pmc.tsv' %(download_base)
if not os.path.exists(output_file):
download_folder = tempfile.mkdtemp()
pmc_file = '%s/pmc.txt' %download_folder
urllib.request.urlretrieve(file_list, pmc_file)
pmc = pandas.read_csv(pmc_file,sep="\t",skiprows=1,header=None)
pmc.columns = ["TARGZ_FILE","JOURNAL","PMCID","PMID","LICENSE"]
pmc.to_csv(output_file,sep="\t",index=None)
return pmc
return pandas.read_csv(output_file,sep="\t")
def read_file(file_path):
with open (file_path, "r") as myfile:
return myfile.read().replace('\n', '')
def read_xml(xml_file):
with open(xml_file) as fd:
return xmltodict.parse(fd.read())
def format_name(name):
'''format name will ensure that all collection names have
periods removed, lowercase, and spaces replaced with -
I'm not sure if this is best, but it's safer than allowing anything
'''
return name.replace('.','').lower().replace(" ",'-')
def get_metadata(row):
'''get_uid will return the metadata for a row, with the uid corresponding
first to the PMID, and the PMC id if that is not defined
'''
pmid = row[1].PMID
if not isinstance(pmid,str):
if numpy.isnan(pmid):
pmid = row[1].PMCID
download_url = "%s/%s" %(ftp_base,row[1].TARGZ_FILE)
metadata = {"pmcid":row[1].PMCID,
"type":"article",
"uid":pmid,
"publication_date": publication_date,
"download_url":download_url,
"license":row[1].LICENSE}
if not isinstance(row[1].PMID,str):
if not numpy.isnan(row[1].PMID):
metadata['pmid'] = row[1].PMID
return metadata
def create_article(metadata):
tmpdir = tempfile.mkdtemp()
pmc_file = '%s/article.tar.gz' %(tmpdir)
print('Downloading: %s' %(metadata['uid']))
urllib.request.urlretrieve(metadata['download_url'], pmc_file)
tar = tarfile.open(pmc_file, "r:gz")
tar.extractall(tmpdir)
files = glob('%s/%s/*' %(tmpdir,metadata['pmcid']))
images = [x for x in files if imghdr.what(x) is not None]
pdf_files = [x for x in files if x.lower().endswith('pdf')]
xml_file = [x for x in files if x.lower().endswith('xml')]
images = images + pdf_files
general_client.upload_dataset(images=images,
texts=xml_file,
collection=collection,
uid=metadata['uid'],
metadata=metadata)
shutil.rmtree(tmpdir)
######################################################################
# Signals
######################################################################
def signal_handler(signum, frame):
raise Exception("Timed out!")
# Only allow each paper a 30 seconds to download
signal.signal(signal.SIGALRM, signal_handler)
######################################################################
# Preparation of Pubmed Open Access Data
######################################################################
import os
import tempfile
import tarfile
import imghdr
import urllib
from glob import glob
# Obtain 1.5 million pmc articles
pmc = get_pubmed()
# Start google storage client for pmc-stanford
from som.api.google.storage.general import Client
general_client = Client(bucket_name='pmc-stanford')
timeouts = []
current = 625
for row in pmc.iterrows():
if row[0] >= current:
try:
signal.alarm(30)
journal_name = row[1].JOURNAL
date_match = re.search("\d{4}",journal_name)
publication_date = journal_name[date_match.start():]
journal_name = format_name(journal_name[:date_match.start()].strip())
collection = general_client.get_collection(uid=journal_name)
metadata = get_metadata(row)
article = create_article(metadata)
except:
timeouts.append(row[0])
general_client = Client(bucket_name='pmc-stanford')
current+=1
| mit | -8,730,548,330,262,620,000 | 31.427673 | 81 | 0.579519 | false |
aleroddepaz/python-samples | justanotherchat/chat.py | 1 | 1885 | import os
import json
import random
import logging
import urlparse
import webapp2
from google.appengine.api import channel
from google.appengine.ext import db
from google.appengine.ext.webapp import template
class Client(db.Model):
username = db.StringProperty(required=True)
token = db.StringProperty(required=False)
class MainPage(webapp2.RequestHandler):
def get(self):
default_username = 'john.doe' + str(random.randint(0, 1000))
username = self.request.get('username', default_username)
client = Client(username = username)
db.put(client)
client.token = channel.create_channel(str(client.key().id()))
db.put(client)
path = os.path.join(os.path.dirname(__file__), 'index.html')
self.response.out.write(template.render(path, {
'token': client.token,
'username': username
}))
def post(self):
qs = urlparse.parse_qs(self.request.body)
data = json.dumps({
'message': qs['message'][0],
'username': qs['username'][0]
})
for client in Client.all():
client_id = str(client.key().id())
logging.info('Sending data to {}...'.format(client_id))
channel.send_message(client_id, data)
class ConnectedHandler(webapp2.RequestHandler):
def post(self):
client_id = self.request.get('from')
logging.info("{} joined the party".format(client_id))
class DisconnectedHandler(webapp2.RequestHandler):
def post(self):
client_id = self.request.get('from')
logging.info("Goodbye {}!".format(client_id))
db.delete(Client.get_by_id(int(client_id)))
application = webapp2.WSGIApplication([
('/', MainPage),
('/_ah/channel/connected/', ConnectedHandler),
('/_ah/channel/disconnected/', DisconnectedHandler)
], debug=True)
| mit | -4,153,536,256,127,778,000 | 28.920635 | 69 | 0.632361 | false |
obulpathi/poppy | tests/api/admin/test_get_service_by_domain.py | 1 | 18720 | # coding= utf-8
# Copyright (c) 2015 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ddt
import uuid
from tests.api import base
from tests.api.utils.schema import services
@ddt.ddt
class TestGetServiceByDomain(base.TestBase):
def setUp(self):
super(TestGetServiceByDomain, self).setUp()
if self.test_config.run_operator_tests is False:
self.skipTest(
'Test Operator Functions is disabled in configuration')
self.service_name = self.generate_random_string(prefix='api-test')
self.flavor_id = self.test_flavor
domain1 = self.generate_random_string(
prefix='www.api-test-domain') + '.com'
domain2 = self.generate_random_string(
prefix='www.api-test-domain') + '.com'
domain3 = self.generate_random_string(
prefix='www.api-test-domain') + '.com'
self.domain_list = [
{"domain": domain1, "protocol": "http"},
{"domain": domain2, "protocol": "http"},
{"domain": domain3, "protocol": "http"}
]
origin = self.generate_random_string(
prefix='api-test-origin') + u'.com'
self.origin_list = [
{
u"origin": origin,
u"port": 80,
u"ssl": False,
u"rules": [{
u"name": u"default",
u"request_url": u"/*"
}]
}
]
self.caching_list = [
{
u"name": u"default",
u"ttl": 3600,
u"rules": [{
u"name": "default",
u"request_url": "/*"
}]
},
{
u"name": u"home",
u"ttl": 1200,
u"rules": [{
u"name": u"index",
u"request_url": u"/index.htm"
}]
}
]
self.restrictions_list = [
{
u"name": u"website only",
u"access": u"whitelist",
u"rules": [
{
u"name": domain1,
u"referrer": domain1,
u"request_url": "/*"
}
]
}
]
resp = self.setup_service(
service_name=self.service_name,
domain_list=self.domain_list,
origin_list=self.origin_list,
caching_list=self.caching_list,
restrictions_list=self.restrictions_list,
flavor_id=self.flavor_id)
self.assertEqual(resp.status_code, 202)
self.assertEqual(resp.text, '')
self.service_url = resp.headers['location']
self.client.wait_for_service_status(
location=self.service_url,
status='deployed',
abort_on_status='failed',
retry_interval=self.test_config.status_check_retry_interval,
retry_timeout=self.test_config.status_check_retry_timeout)
def test_get_service_by_domain(self):
self.skipTest('See https://bugs.launchpad.net/poppy/+bug/1486103')
resp = self.operator_client.admin_get_service_by_domain_name(
self.domain_list[0]['domain'])
self.assertEqual(resp.status_code, 200)
body = resp.json()
self.assertSchema(body, services.get_service)
for item in self.domain_list:
if 'protocol' not in item:
item['protocol'] = 'http'
self.assertEqual(body['domains'], self.domain_list)
for item in self.origin_list:
if 'rules' not in item:
item[u'rules'] = []
if 'hostheadertype' not in item:
item[u'hostheadertype'] = 'domain'
elif item['hostheadertype'] == 'origin':
item[u'hostheadervalue'] = item['origin']
self.assertEqual(body['origins'], self.origin_list)
self.assertEqual(body['caching'], self.caching_list)
self.assertEqual(body['restrictions'], self.restrictions_list)
self.assertEqual(body['flavor_id'], self.flavor_id)
def test_get_service_by_multiple_domains(self):
self.skipTest('See https://bugs.launchpad.net/poppy/+bug/1486103')
api_resp = self.operator_client.admin_get_service_by_domain_name(
self.domain_list[0]['domain'])
self.assertEqual(api_resp.status_code, 200)
api_resp1 = self.operator_client.admin_get_service_by_domain_name(
self.domain_list[1]['domain'])
self.assertEqual(api_resp1.status_code, 200)
api_resp2 = self.operator_client.admin_get_service_by_domain_name(
self.domain_list[2]['domain'])
self.assertEqual(api_resp2.status_code, 200)
def test_negative_get_by_non_existing_domain(self):
if self.test_config.run_operator_tests is False:
self.skipTest(
'Test Operator Functions is disabled in configuration')
domain_name = self.domain_list[0]['domain'] + str(uuid.uuid1()) + \
".com"
resp = self.operator_client.admin_get_service_by_domain_name(
domain_name)
self.assertEqual(resp.status_code, 404)
@ddt.data("http://www.non-existing-domain",
"https://www.non-existing-domain",
"http://www.קאַץ")
def test_get_service_by_non_existing_bad_domain(self, domain):
domain_name = domain + str(uuid.uuid1()) + ".com"
resp = self.operator_client.admin_get_service_by_domain_name(
domain_name)
self.assertEqual(resp.status_code, 404)
def test_get_service_negative_very_long_domain(self):
domain = "www.too_long_name_too_long_name_too_long_name_too_long_" \
"name_too_long_name_too_long_name_too_long_name_too_long_" \
"name_too_long_name_too_long_name_too_long_name_too_long_" \
"name_too_long_name_too_long_name_too_long_name_too_long_" \
"name_too_long_name_too_long.com"
resp = self.operator_client.admin_get_service_by_domain_name(domain)
self.assertEqual(resp.status_code, 400)
def tearDown(self):
self.client.delete_service(location=self.service_url)
if self.test_config.generate_flavors:
self.client.delete_flavor(flavor_id=self.flavor_id)
super(TestGetServiceByDomain, self).tearDown()
@ddt.ddt
class TestGetServiceBySharedDomain(base.TestBase):
def setUp(self):
super(TestGetServiceBySharedDomain, self).setUp()
if self.test_config.run_operator_tests is False:
self.skipTest(
'Test Operator Functions is disabled in configuration')
self.service_name = self.generate_random_string(prefix='API-Test-')
self.flavor_id = self.test_flavor
domain = self.generate_random_string(
prefix='api-test-domain')
self.domain_list = [
{"domain": domain, "protocol": "https", "certificate": "shared"}
]
origin = self.generate_random_string(
prefix='api-test-origin') + u'.com'
self.origin_list = [
{
u"origin": origin,
u"port": 443,
u"ssl": True,
u"rules": [{
u"name": u"default",
u"request_url": u"/*"
}]
}
]
self.caching_list = [
{
u"name": u"default",
u"ttl": 3600,
u"rules": [{
u"name": "default",
u"request_url": "/*"
}]
},
{
u"name": u"home",
u"ttl": 1200,
u"rules": [{
u"name": u"index",
u"request_url": u"/index.htm"
}]
}
]
self.restrictions_list = [
{
u"name": u"website only",
u"rules": [
{
u"name": domain,
u"referrer": "domain.com",
u"request_url": "/*"
}
],
u"access": "whitelist"
}
]
resp = self.setup_service(
service_name=self.service_name,
domain_list=self.domain_list,
origin_list=self.origin_list,
caching_list=self.caching_list,
restrictions_list=self.restrictions_list,
flavor_id=self.flavor_id)
self.assertEqual(resp.status_code, 202)
self.assertEqual(resp.text, '')
self.service_url = resp.headers['location']
self.client.wait_for_service_status(
location=self.service_url,
status='deployed',
abort_on_status='failed',
retry_interval=self.test_config.status_check_retry_interval,
retry_timeout=self.test_config.status_check_retry_timeout)
def test_get_service_by_domain(self):
self.skipTest('See https://bugs.launchpad.net/poppy/+bug/1486103')
get_resp = self.client.get_service(self.service_url)
resp_body = get_resp.json()
domain = resp_body['domains'][0]['domain']
resp = self.operator_client.admin_get_service_by_domain_name(domain)
self.assertEqual(resp.status_code, 200)
body = resp.json()
self.assertSchema(body, services.get_service)
for item in self.origin_list:
if 'rules' not in item:
item[u'rules'] = []
if 'hostheadertype' not in item:
item[u'hostheadertype'] = 'domain'
elif item['hostheadertype'] == 'origin':
item[u'hostheadervalue'] = item['origin']
self.assertEqual(body['origins'], self.origin_list)
self.assertEqual(body['caching'], self.caching_list)
self.assertEqual(body['restrictions'], self.restrictions_list)
self.assertEqual(body['flavor_id'], self.flavor_id)
def tearDown(self):
self.client.delete_service(location=self.service_url)
if self.test_config.generate_flavors:
self.client.delete_flavor(flavor_id=self.flavor_id)
super(TestGetServiceBySharedDomain, self).tearDown()
@ddt.ddt
class TestGetServiceBySANCertDomain(base.TestBase):
def setUp(self):
super(TestGetServiceBySANCertDomain, self).setUp()
if self.test_config.run_operator_tests is False:
self.skipTest(
'Test Operator Functions is disabled in configuration')
self.service_name = self.generate_random_string(prefix='API-Test-')
self.flavor_id = self.test_flavor
domain = self.generate_random_string(
prefix='www.api-test-domain') + '.com'
self.domain_list = [
{"domain": domain, "protocol": "https", "certificate": "san"}
]
origin = self.generate_random_string(
prefix='api-test-origin') + u'.com'
self.origin_list = [
{
u"origin": origin,
u"port": 443,
u"ssl": True,
u"rules": [{
u"name": u"default",
u"request_url": u"/*"
}]
}
]
self.caching_list = [
{
u"name": u"default",
u"ttl": 3600,
u"rules": [{
u"name": "default",
u"request_url": "/*"
}]
},
{
u"name": u"home",
u"ttl": 1200,
u"rules": [{
u"name": u"index",
u"request_url": u"/index.htm"
}]
}
]
self.restrictions_list = [
{
u"name": u"website only",
u"rules": [
{
u"name": domain,
u"referrer": domain,
u"request_url": "/*"
}
],
u"access": "whitelist"
}
]
resp = self.setup_service(
service_name=self.service_name,
domain_list=self.domain_list,
origin_list=self.origin_list,
caching_list=self.caching_list,
restrictions_list=self.restrictions_list,
flavor_id=self.flavor_id)
self.service_url = resp.headers["location"]
self.client.wait_for_service_status(
location=self.service_url,
status='deployed',
abort_on_status='failed',
retry_interval=self.test_config.status_check_retry_interval,
retry_timeout=self.test_config.status_check_retry_timeout)
def test_get_service_by_domain(self):
self.skipTest('See https://bugs.launchpad.net/poppy/+bug/1486103')
get_resp = self.client.get_service(self.service_url)
resp_body = get_resp.json()
domain = resp_body['domains'][0]['domain']
resp = self.operator_client.admin_get_service_by_domain_name(domain)
self.assertEqual(resp.status_code, 200)
body = resp.json()
self.assertSchema(body, services.get_service)
for item in self.origin_list:
if 'rules' not in item:
item[u'rules'] = []
if 'hostheadertype' not in item:
item[u'hostheadertype'] = 'domain'
elif item['hostheadertype'] == 'origin':
item[u'hostheadervalue'] = item['origin']
self.assertEqual(body['origins'], self.origin_list)
self.assertEqual(body['caching'], self.caching_list)
self.assertEqual(body['restrictions'], self.restrictions_list)
self.assertEqual(body['flavor_id'], self.flavor_id)
def tearDown(self):
self.client.delete_service(location=self.service_url)
if self.test_config.generate_flavors:
self.client.delete_flavor(flavor_id=self.flavor_id)
super(TestGetServiceBySANCertDomain, self).tearDown()
@ddt.ddt
class TestGetServiceByCustomCertDomain(base.TestBase):
def setUp(self):
super(TestGetServiceByCustomCertDomain, self).setUp()
if self.test_config.run_operator_tests is False:
self.skipTest(
'Test Operator Functions is disabled in configuration')
self.service_name = self.generate_random_string(prefix='API-Test-')
self.flavor_id = self.test_flavor
domain = self.generate_random_string(
prefix='www.api-test-domain') + '.com'
self.domain_list = [
{"domain": domain, "protocol": "https", "certificate": "custom"}
]
origin = self.generate_random_string(
prefix='api-test-origin') + u'.com'
self.origin_list = [
{
u"origin": origin,
u"port": 443,
u"ssl": True,
u"rules": [{
u"name": u"default",
u"request_url": u"/*"
}]
}
]
self.caching_list = [
{
u"name": u"default",
u"ttl": 3600,
u"rules": [{
u"name": "default",
u"request_url": "/*"
}]
},
{
u"name": u"home",
u"ttl": 1200,
u"rules": [{
u"name": u"index",
u"request_url": u"/index.htm"
}]
}
]
self.restrictions_list = [
{
u"name": u"website only",
u"rules": [
{
u"name": domain,
u"referrer": domain,
u"request_url": "/*"
}
],
u"access": "whitelist"
}
]
resp = self.setup_service(
service_name=self.service_name,
domain_list=self.domain_list,
origin_list=self.origin_list,
caching_list=self.caching_list,
restrictions_list=self.restrictions_list,
flavor_id=self.flavor_id)
self.assertEqual(resp.status_code, 202)
self.assertEqual(resp.text, '')
self.service_url = resp.headers['location']
self.client.wait_for_service_status(
location=self.service_url,
status='deployed',
abort_on_status='failed',
retry_interval=self.test_config.status_check_retry_interval,
retry_timeout=self.test_config.status_check_retry_timeout)
def test_get_service_by_domain(self):
self.skipTest('See https://bugs.launchpad.net/poppy/+bug/1486103')
get_resp = self.client.get_service(self.service_url)
resp_body = get_resp.json()
domain = resp_body['domains'][0]['domain']
resp = self.operator_client.admin_get_service_by_domain_name(domain)
self.assertEqual(resp.status_code, 200)
body = resp.json()
self.assertSchema(body, services.get_service)
for item in self.origin_list:
if 'rules' not in item:
item[u'rules'] = []
if 'hostheadertype' not in item:
item[u'hostheadertype'] = 'domain'
elif item['hostheadertype'] == 'origin':
item[u'hostheadervalue'] = item['origin']
self.assertEqual(body['origins'], self.origin_list)
self.assertEqual(body['caching'], self.caching_list)
self.assertEqual(body['restrictions'], self.restrictions_list)
self.assertEqual(body['flavor_id'], self.flavor_id)
def tearDown(self):
self.client.delete_service(location=self.service_url)
if self.test_config.generate_flavors:
self.client.delete_flavor(flavor_id=self.flavor_id)
super(TestGetServiceByCustomCertDomain, self).tearDown()
| apache-2.0 | 5,607,592,291,727,860,000 | 33.788104 | 77 | 0.527944 | false |
deapplegate/wtgpipeline | skewsurface_plots.py | 1 | 4060 | #######################
# Plot results of skew surface tests
#######################
import os, re, glob
import numpy as np
import pylab
import scipy.spatial as spatial
import ldac
#######################
def loadCats(cluster, lensfilter, image, filter):
clusterdir = '/u/ki/dapple/subaru/%s/' % cluster
photdir = '%s/PHOTOMETRY_%s_aper' % (clusterdir, lensfilter)
lensingdir = '%s/LENSING_%s_%s_aper/%s' % (clusterdir, lensfilter, lensfilter, image)
stars = ldac.openObjectFile('%s/coadd_stars.cat' % lensingdir)
catfiles = glob.glob('%s/%s/unstacked/%s.%s.*.skew' % (photdir, filter, cluster, filter))
cats = {}
for catfile in catfiles:
base = os.path.basename(catfile)
config = base.split('.')[2]
cat = ldac.openObjectFile(catfile)
tree = spatial.KDTree(np.column_stack([cat['Xpos'], cat['Ypos']]))
dist, index = tree.query(np.column_stack([stars['Xpos'], stars['Ypos']]), distance_upper_bound = 3.)
starorder = index[np.isfinite(dist)]
cats[config] = cat.filter(starorder)
return cats
#################
def matchCat(refcat, othercat):
tree = spatial.KDTree(np.column_stack([othercat['Xpos'], othercat['Ypos']]))
dist, index = tree.query(np.column_stack([refcat['Xpos'], refcat['Ypos']]), distance_upper_bound = 3.)
order = index[np.isfinite(dist)]
return othercat.filter(order)
def loadExpCats(cluster, lensfilter, image, filter, config='SUBARU-10_2'):
clusterdir = '/u/ki/dapple/subaru/%s/' % cluster
photdir = '%s/PHOTOMETRY_%s_aper' % (clusterdir, lensfilter)
lensingdir = '%s/LENSING_%s_%s_aper/%s' % (clusterdir, lensfilter, lensfilter, image)
unstackeddir = '%s/%s/unstacked' % (photdir, filter)
stars = ldac.openObjectFile('%s/coadd_stars.cat' % lensingdir)
stats = ldac.openObjectFile('%s/%s/SCIENCE/cat/chips.cat8' % (clusterdir, filter),
'STATS')
exposures = [ldac.openObjectFile('%s/%s.filtered.cat.corrected.cat' % (unstackeddir, exp[:11])) for exp in stats['IMAGENAME']]
mastercat = ldac.openObjectFile('%s/%s.%s.%s.unstacked.cor.cat' % (unstackeddir, cluster, filter, config))
mastercat = matchCat(stars, mastercat)
cats = [matchCat(mastercat, expcat) for expcat in exposures]
return mastercat, cats
##################
def plotSkewSurface(cat, configchip):
xpos = cat['Xpos']
ypos = cat['Ypos']
skew = cat['FLUX_APER-%s' % configchip][:,1]
goodskew = np.isfinite(skew)
fig = pylab.figure()
pylab.scatter(xpos[goodskew], ypos[goodskew], c=skew[goodskew], vmin=-1.5, vmax = 1.5)
pylab.colorbar()
pylab.title('Skew Surface')
fig2 = pylab.figure()
pylab.hist(skew[goodskew], bins=30)
pylab.xlabel('Skew')
fig3 = pylab.figure()
negskew = np.logical_and(skew < -1, goodskew)
pylab.scatter(xpos[negskew], ypos[negskew], c='b')
posskew = np.logical_and(skew > 1, goodskew)
pylab.scatter(xpos[posskew], ypos[posskew], c='r')
return fig, fig2, fig3
###################
def plotSigSurface(cat, configchip):
xpos = cat['Xpos']
ypos = cat['Ypos']
err = cat['FLUXERR_APER-%s' % configchip][:,1]
gooderr = np.logical_and(err > 0, np.logical_and(err < 90, np.isfinite(err)))
fig = pylab.figure()
pylab.scatter(xpos[gooderr], ypos[gooderr], c=err[gooderr])
pylab.colorbar()
pylab.title('Sig Ratio Surface')
fig2 = pylab.figure()
pylab.hist(err[gooderr], bins=50)
pylab.xlabel('Sig Ratio')
return fig, fig2
#########################
def plotDeltas(mastercat, expcats, config='SUBARU-10_2-1'):
figs = []
for cat in expcats:
fig = pylab.figure()
delta = cat['MAG_APER'][:,1] - mastercat['MAG_APER-%s' % config][:,1]
pylab.scatter(cat['Xpos'], cat['Ypos'], c=delta - np.median(delta), vmin=-0.05, vmax=0.05)
pylab.colorbar()
pylab.title(cat.sourcefile)
figs.append(fig)
return figs
#########################
| mit | -6,138,162,053,475,650,000 | 26.432432 | 130 | 0.600246 | false |
beeftornado/plex-custom-media-scanner | Scanners/Movies/Tivo Movie Scanner.py | 1 | 5478 | #!/usr/bin/env python
# Copyright (C) 2013 Casey Duquette
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import re, os, os.path
import sys
# I needed some plex libraries, you may need to adjust your plex install location accordingly
sys.path.append("/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Scanners/Movies")
sys.path.append("/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-ins/Scanners.bundle/Contents/Resources/Common/")
import Media, VideoFiles, Stack, Utils
from mp4file import mp4file, atomsearch
__author__ = "Casey Duquette"
__copyright__ = "Copyright 2013"
__credits__ = ["Casey Duquette"]
__license__ = "GPLv2"
__version__ = "1.0"
__maintainer__ = "Casey Duquette"
__email__ = ""
episode_regexps = [
'(Ep[^0-9a-z](?P<season>[0-9]{1,2})(?P<ep>[0-9]{2})[_]?)?(?P<title>[\w\s,.\-:;\'\"]+?)\s\(Rec.*$', # Ep#112_Bad Wolf (Rec 08_19_2012).mp4, Blink (Rec 09_13_2012).mp4
'(?P<show>.*?)[sS](?P<season>[0-9]+)[\._ ]*[eE](?P<ep>[0-9]+)([- ]?[Ee+](?P<secondEp>[0-9]+))?', # S03E04-E05
'(?P<show>.*?)[sS](?P<season>[0-9]{2})[\._\- ]+(?P<ep>[0-9]+)', # S03-03
'(?P<show>.*?)([^0-9]|^)(?P<season>[0-9]{1,2})[Xx](?P<ep>[0-9]+)(-[0-9]+[Xx](?P<secondEp>[0-9]+))?', # 3x03
'(.*?)[^0-9a-z](?P<season>[0-9]{1,2})(?P<ep>[0-9]{2})([\.\-][0-9]+(?P<secondEp>[0-9]{2})([ \-_\.]|$)[\.\-]?)?([^0-9a-z%]|$)' # .602.
]
date_regexps = [
'(?P<year>[0-9]{4})[^0-9a-zA-Z]+(?P<month>[0-9]{2})[^0-9a-zA-Z]+(?P<day>[0-9]{2})([^0-9]|$)', # 2009-02-10
'(?P<month>[0-9]{2})[^0-9a-zA-Z]+(?P<day>[0-9]{2})[^0-9a-zA-Z(]+(?P<year>[0-9]{4})([^0-9a-zA-Z]|$)', # 02-10-2009
]
standalone_episode_regexs = [
'(.*?)( \(([0-9]+)\))? - ([0-9]+)+x([0-9]+)(-[0-9]+[Xx]([0-9]+))?( - (.*))?', # Newzbin style, no _UNPACK_
'(.*?)( \(([0-9]+)\))?[Ss]([0-9]+)+[Ee]([0-9]+)(-[0-9]+[Xx]([0-9]+))?( - (.*))?' # standard s00e00
]
season_regex = '.*?(?P<season>[0-9]+)$' # folder for a season
just_episode_regexs = [
'(?P<ep>[0-9]{1,3})[\. -_]of[\. -_]+[0-9]{1,3}', # 01 of 08
'^(?P<ep>[0-9]{1,3})[^0-9]', # 01 - Foo
'e[a-z]*[ \.\-_]*(?P<ep>[0-9]{2,3})([^0-9c-uw-z%]|$)', # Blah Blah ep234
'.*?[ \.\-_](?P<ep>[0-9]{2,3})[^0-9c-uw-z%]+', # Flah - 04 - Blah
'.*?[ \.\-_](?P<ep>[0-9]{2,3})$', # Flah - 04
'.*?[^0-9x](?P<ep>[0-9]{2,3})$' # Flah707
]
ends_with_number = '.*([0-9]{1,2})$'
ends_with_episode = ['[ ]*[0-9]{1,2}x[0-9]{1,3}$', '[ ]*S[0-9]+E[0-9]+$']
# Look for episodes.
def Scan(path, files, mediaList, subdirs):
# Scan for video files.
VideoFiles.Scan(path, files, mediaList, subdirs)
# Take top two as show/season, but require at least the top one.
paths = Utils.SplitPath(path)
if len(paths) > 0 and len(paths[0]) > 0:
done = False
if done == False:
(title, year) = VideoFiles.CleanName(paths[0])
for i in files:
done = False
is_movie = False
file = os.path.basename(i)
(file, ext) = os.path.splitext(file)
# See if there's a pytivo metadata file to peek at
meta = dict()
metadata_filename = '{0}.txt'.format(i.replace('_LQ', ''))
if os.path.isfile(metadata_filename):
with open(metadata_filename, 'r') as f:
for line in f:
line = line.strip()
if line and len(line):
line_a = line.split(' : ')
if len(line_a) > 1:
key, value = line.split(' : ')
meta[key] = value
#print "pytivo metadata, ", meta
# Skip tv shows based on pytivo metadata file and backup to filename if not present
if 'isEpisode' in meta:
if meta['isEpisode'] == 'false':
is_movie = True
elif file.strip().startswith('(Rec'):
is_movie = True
# If we still think it is not a movie then skip it
if is_movie == False:
print "File {0} is determined to be a tv show by pytivo metadata file, skipping".format(file)
continue
if 'title' in meta:
title = meta['title']
if 'movieYear' in meta:
year = meta['movieYear']
# Create the movie
movie = Media.Movie(title, year)
movie.source = VideoFiles.RetrieveSource(i)
movie.parts.append(i)
mediaList.append(movie)
# Stack the results.
Stack.Scan(path, files, mediaList, subdirs)
def find_data(atom, name):
child = atomsearch.find_path(atom, name)
data_atom = child.find('data')
if data_atom and 'data' in data_atom.attrs:
return data_atom.attrs['data']
if __name__ == '__main__':
print "Hello, world!"
path = sys.argv[1]
files = [os.path.join(path, file) for file in os.listdir(path)]
media = []
Scan(path[1:], files, media, [])
print "Media:", media
| gpl-2.0 | -1,551,859,463,111,051,500 | 37.577465 | 180 | 0.529755 | false |
gorbyo/admin_couchdb | admin_couchdb/couch_set_repl.py | 1 | 2231 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# couch_set_repl.py
#
# Copyright 2013 Oleh Horbachov <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
"""
The script help you to enable push replication between 2 servers
Usage:
python couch_set_repl.py --source/-s source_host:port --target/-t target_host:port
Example:
python couch_set_repl.py -s couch-src.example.com:5984 -t couch-trg.example.com:5984
"""
import couchquery
import couchdb
import argparse
from argparse import RawTextHelpFormatter
def arguments():
parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter,
description="This script create push replication for all databases")
parser.add_argument('-t', '--target', help='Target COUCHDB Server')
parser.add_argument('-s', '--source', help='Source COUCHDB Server')
return parser
def main(dbsource, dbtarget):
couchdbserver = couchdb.Server('http://'+dbsource+'/')
dbrep = couchquery.Database('http://' + dbsource + '/' + '_replicator')
for id in couchdbserver:
if id != '_replicator' and id != '_users':
dbrep.create({'_id': id+'_to_'+dbtarget, 'source': id, 'target': 'http://'+dbtarget+'/'+id,
'create_target': True, 'continuous': True})
return 0
if __name__ == '__main__':
try:
dbsource = arguments().parse_args().source
dbtarget = arguments().parse_args().target
main(dbsource, dbtarget)
except:
arguments().print_help()
| gpl-3.0 | -5,853,721,085,586,909,000 | 32.298507 | 105 | 0.670551 | false |
Azure/azure-sdk-for-python | sdk/securityinsight/azure-mgmt-securityinsight/azure/mgmt/securityinsight/operations/_incident_comments_operations.py | 1 | 15014 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class IncidentCommentsOperations(object):
"""IncidentCommentsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.securityinsight.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_incident(
self,
resource_group_name, # type: str
workspace_name, # type: str
incident_id, # type: str
filter=None, # type: Optional[str]
orderby=None, # type: Optional[str]
top=None, # type: Optional[int]
skip_token=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.IncidentCommentList"]
"""Gets all incident comments.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param incident_id: Incident ID.
:type incident_id: str
:param filter: Filters the results, based on a Boolean condition. Optional.
:type filter: str
:param orderby: Sorts the results. Optional.
:type orderby: str
:param top: Returns only the first n results. Optional.
:type top: int
:param skip_token: Skiptoken is only used if a previous operation returned a partial result. If
a previous response contains a nextLink element, the value of the nextLink element will include
a skiptoken parameter that specifies a starting point to use for subsequent calls. Optional.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IncidentCommentList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.securityinsight.models.IncidentCommentList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.IncidentCommentList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-01-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_incident.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=90, min_length=1),
'incidentId': self._serialize.url("incident_id", incident_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('IncidentCommentList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_incident.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/incidents/{incidentId}/comments'} # type: ignore
def get(
self,
resource_group_name, # type: str
workspace_name, # type: str
incident_id, # type: str
incident_comment_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.IncidentComment"
"""Gets an incident comment.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param incident_id: Incident ID.
:type incident_id: str
:param incident_comment_id: Incident comment ID.
:type incident_comment_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IncidentComment, or the result of cls(response)
:rtype: ~azure.mgmt.securityinsight.models.IncidentComment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.IncidentComment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-01-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=90, min_length=1),
'incidentId': self._serialize.url("incident_id", incident_id, 'str'),
'incidentCommentId': self._serialize.url("incident_comment_id", incident_comment_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('IncidentComment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/incidents/{incidentId}/comments/{incidentCommentId}'} # type: ignore
def create_comment(
self,
resource_group_name, # type: str
workspace_name, # type: str
incident_id, # type: str
incident_comment_id, # type: str
message=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.IncidentComment"
"""Creates the incident comment.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param incident_id: Incident ID.
:type incident_id: str
:param incident_comment_id: Incident comment ID.
:type incident_comment_id: str
:param message: The comment message.
:type message: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IncidentComment, or the result of cls(response)
:rtype: ~azure.mgmt.securityinsight.models.IncidentComment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.IncidentComment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_incident_comment = models.IncidentComment(message=message)
api_version = "2020-01-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_comment.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=90, min_length=1),
'incidentId': self._serialize.url("incident_id", incident_id, 'str'),
'incidentCommentId': self._serialize.url("incident_comment_id", incident_comment_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_incident_comment, 'IncidentComment')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('IncidentComment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_comment.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/incidents/{incidentId}/comments/{incidentCommentId}'} # type: ignore
| mit | 5,687,567,949,912,011,000 | 50.068027 | 279 | 0.638338 | false |
kjedruczyk/phabricator-tools | py/abd/abdi_processrepoarglist.py | 1 | 16178 | """Process a list of repository arguments."""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# abdi_processrepoarglist
#
# Public Functions:
# do
# determine_max_workers_default
# fetch_if_needed
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import json
import logging
import multiprocessing
import os
import time
import phlcon_reviewstatecache
import phlgitx_refcache
import phlmp_cyclingpool
import phlsys_conduit
import phlsys_fs
import phlsys_git
import phlsys_strtotime
import phlsys_subprocess
import phlsys_timer
import phlurl_watcher
import abdmail_mailer
import abdt_classicnaming
import abdt_compositenaming
import abdt_conduit
import abdt_differresultcache
import abdt_errident
import abdt_exhandlers
import abdt_fs
import abdt_git
import abdt_logging
import abdt_rbranchnaming
import abdt_tryloop
import abdi_processexitcodes
import abdi_processrepo
import abdi_repoargs
_LOGGER = logging.getLogger(__name__)
def do(
repo_configs,
sys_admin_emails,
sleep_secs,
is_no_loop,
external_report_command,
mail_sender,
max_workers,
overrun_secs):
conduit_manager = _ConduitManager()
fs_accessor = abdt_fs.make_default_accessor()
url_watcher_wrapper = phlurl_watcher.FileCacheWatcherWrapper(
fs_accessor.layout.urlwatcher_cache_path)
# decide max workers based on number of CPUs if no value is specified
if max_workers == 0:
max_workers = determine_max_workers_default()
repo_list = []
for name, config in repo_configs:
repo_list.append(
_ArcydManagedRepository(
name,
config,
conduit_manager,
url_watcher_wrapper,
sys_admin_emails,
mail_sender))
# if we always overrun half our workers then the loop is sustainable, if we
# overrun more than that then we'll be lagging too far behind. In the event
# that we only have one worker then we can't overrun any.
max_overrun_workers = max_workers // 2
pool = phlmp_cyclingpool.CyclingPool(
repo_list, max_workers, max_overrun_workers)
cycle_timer = phlsys_timer.Timer()
cycle_timer.start()
exit_code = None
while exit_code is None:
# This timer needs to be separate from the cycle timer. The cycle timer
# must be reset every time it is reported. The sleep timer makes sure
# that each run of the loop takes a minimum amount of time.
sleep_timer = phlsys_timer.Timer()
sleep_timer.start()
# refresh git snoops
with abdt_logging.remote_io_read_event_context(
'refresh-git-snoop', ''):
abdt_tryloop.critical_tryloop(
url_watcher_wrapper.watcher.refresh,
abdt_errident.GIT_SNOOP,
'')
with abdt_logging.remote_io_read_event_context('refresh-conduit', ''):
conduit_manager.refresh_conduits()
with abdt_logging.misc_operation_event_context(
'process-repos',
'{} workers, {} repos'.format(max_workers, len(repo_list))):
if max_workers > 1:
for i, res in pool.cycle_results(overrun_secs=overrun_secs):
repo = repo_list[i]
repo.merge_from_worker(res)
else:
for r in repo_list:
r()
# important to do this before stopping arcyd and as soon as possible
# after doing fetches
url_watcher_wrapper.save()
# report cycle stats
report = {
"cycle_time_secs": cycle_timer.restart(),
"overrun_jobs": pool.num_active_jobs,
}
_LOGGER.debug("cycle-stats: {}".format(report))
if external_report_command:
report_json = json.dumps(report)
full_path = os.path.abspath(external_report_command)
with abdt_logging.misc_operation_event_context(
'external-report-command', external_report_command):
try:
phlsys_subprocess.run(full_path, stdin=report_json)
except phlsys_subprocess.CalledProcessError as e:
_LOGGER.error(
"External command: {} failed with exception: "
"{}.".format(
external_report_command, type(e).__name__))
_LOGGER.error("VERBOSE MESSAGE: CycleReportJson:{}".format(
e))
if is_no_loop:
exit_code = abdi_processexitcodes.ExitCodes.ec_exit
elif os.path.isfile(fs_accessor.layout.killfile):
exit_code = abdi_processexitcodes.ExitCodes.ec_exit
if phlsys_fs.read_text_file(fs_accessor.layout.killfile):
_LOGGER.info("Reason for stopping arcyd: {}".format(
phlsys_fs.read_text_file(fs_accessor.layout.killfile)))
os.remove(fs_accessor.layout.killfile)
elif os.path.isfile(fs_accessor.layout.reloadfile):
exit_code = abdi_processexitcodes.ExitCodes.ec_reload
os.remove(fs_accessor.layout.reloadfile)
# sleep to pad out the cycle
secs_to_sleep = float(sleep_secs) - float(sleep_timer.duration)
if secs_to_sleep > 0 and exit_code is None:
with abdt_logging.misc_operation_event_context(
'sleep', secs_to_sleep):
time.sleep(secs_to_sleep)
# finish any jobs that overran
for i, res in pool.finish_results():
repo = repo_list[i]
repo.merge_from_worker(res)
# important to do this before stopping arcyd and as soon as
# possible after doing fetches
url_watcher_wrapper.save()
return exit_code
def determine_max_workers_default():
max_workers = 1
try:
# use the same default as multiprocessing.Pool
max_workers = multiprocessing.cpu_count()
_LOGGER.debug(
"max_workers unspecified, defaulted to cpu_count: {}".format(
max_workers))
except NotImplementedError:
_LOGGER.warning(
"multiprocessing.cpu_count() not supported, disabling "
"multiprocessing. Specify max workers explicitly to enable.")
return max_workers
class _RecordingWatcherWrapper(object):
def __init__(self, watcher):
self._watcher = watcher
self._tested_urls = set()
def peek_has_url_recently_changed(self, url):
return self._watcher.peek_has_url_recently_changed(url)
def has_url_recently_changed(self, url):
self._tested_urls.add(url)
return self._watcher.has_url_recently_changed(url)
def get_data_for_merging(self):
data = self._watcher.get_data_for_merging()
tested = self._tested_urls
new_data = {k: v for k, v in data.iteritems() if k in tested}
return new_data
class _RepoActiveRetryState(object):
"""Determine when a repo is active and when to retry it."""
def __init__(self, retry_timestr_list):
self._is_active = True
self._reactivate_time = None
self._retry_delays = [
phlsys_strtotime.duration_string_to_time_delta(s)
for s in retry_timestr_list
]
def calc_active(self):
if not self._is_active and self._reactivate_time is not None:
if datetime.datetime.utcnow() >= self._reactivate_time:
self._is_active = True
self._reactivate_time = None
return self._is_active
@property
def is_active(self):
return self._is_active
def disable(self):
self._is_active = False
retry_delay = None
if self._retry_delays:
retry_delay = self._retry_delays.pop(0)
self._reactivate_time = datetime.datetime.utcnow() + retry_delay
else:
self._reactivate_time = None
return retry_delay
@property
def reactivate_time(self):
return self._reactivate_time
class _ArcydManagedRepository(object):
def __init__(
self,
repo_name,
repo_args,
conduit_manager,
url_watcher_wrapper,
sys_admin_emails,
mail_sender):
self._active_state = _RepoActiveRetryState(
retry_timestr_list=["10 seconds", "10 minutes", "1 hours"])
sys_repo = phlsys_git.Repo(repo_args.repo_path)
self._refcache_repo = phlgitx_refcache.Repo(sys_repo)
self._differ_cache = abdt_differresultcache.Cache(self._refcache_repo)
self._abd_repo = abdt_git.Repo(
self._refcache_repo,
self._differ_cache,
"origin",
repo_args.repo_desc)
self._name = repo_name
self._args = repo_args
self._conduit_manager = conduit_manager
conduit_cache = conduit_manager.get_conduit_and_cache_for_args(
repo_args)
self._arcyd_conduit, self._review_cache = conduit_cache
self._mail_sender = mail_sender
self._url_watcher_wrapper = url_watcher_wrapper
self._mail_sender = mail_sender
self._on_exception = abdt_exhandlers.make_exception_delay_handler(
sys_admin_emails, repo_name)
def __call__(self):
watcher = _RecordingWatcherWrapper(
self._url_watcher_wrapper.watcher)
old_active_reviews = set(self._review_cache.active_reviews)
was_active = self._active_state.is_active
if self._active_state.calc_active():
if not was_active:
_LOGGER.info(
'repo-event: {} re-enabled'.format(self._name))
try:
_process_repo(
self._abd_repo,
self._name,
self._args,
self._arcyd_conduit,
watcher,
self._mail_sender)
except Exception:
retry_delay = self._active_state.disable()
_LOGGER.info(
'repo-event: {} disabled, retry in {}'.format(
self._name, retry_delay))
self._on_exception(retry_delay)
else:
_LOGGER.debug(
'repo-status: {} is inactive until {}'.format(
self._name, self._active_state.reactivate_time))
return (
self._review_cache.active_reviews - old_active_reviews,
self._active_state,
watcher.get_data_for_merging(),
self._refcache_repo.peek_hash_ref_pairs(),
self._differ_cache.get_cache()
)
def merge_from_worker(self, results):
(
active_reviews,
active_state,
watcher_data,
hash_ref_pairs,
differ_cache
) = results
self._review_cache.merge_additional_active_reviews(active_reviews)
self._active_state = active_state
self._refcache_repo.set_hash_ref_pairs(hash_ref_pairs)
self._differ_cache.set_cache(differ_cache)
# merge in the consumed urls from the worker
self._url_watcher_wrapper.watcher.merge_data_consume_only(watcher_data)
class _ConduitManager(object):
def __init__(self):
super(_ConduitManager, self).__init__()
self._conduits_caches = {}
def get_conduit_and_cache_for_args(self, args):
key = (
args.instance_uri,
args.arcyd_user,
args.arcyd_cert,
args.https_proxy
)
if key not in self._conduits_caches:
# create an array so that the 'connect' closure binds to the
# 'conduit' variable as we'd expect, otherwise it'll just
# modify a local variable and this 'conduit' will remain 'None'
# XXX: we can _process_repo better in python 3.x (nonlocal?)
conduit = [None]
def connect():
# XXX: we'll rebind in python 3.x, instead
# nonlocal conduit
conduit[0] = phlsys_conduit.MultiConduit(
args.instance_uri,
args.arcyd_user,
args.arcyd_cert,
https_proxy=args.https_proxy)
abdt_tryloop.tryloop(
connect, abdt_errident.CONDUIT_CONNECT, args.instance_uri)
multi_conduit = conduit[0]
cache = phlcon_reviewstatecache.make_from_conduit(multi_conduit)
arcyd_conduit = abdt_conduit.Conduit(multi_conduit, cache)
self._conduits_caches[key] = (arcyd_conduit, cache)
else:
arcyd_conduit, cache = self._conduits_caches[key]
return arcyd_conduit, cache
def refresh_conduits(self):
for conduit, cache in self._conduits_caches.itervalues():
abdt_tryloop.critical_tryloop(
cache.refresh_active_reviews,
abdt_errident.CONDUIT_REFRESH,
conduit.describe())
def fetch_if_needed(url_watcher, snoop_url, repo, repo_desc):
did_fetch = False
# fetch only if we need to
if not snoop_url or url_watcher.peek_has_url_recently_changed(snoop_url):
abdt_tryloop.tryloop(
repo.checkout_master_fetch_prune,
abdt_errident.FETCH_PRUNE,
repo_desc)
did_fetch = True
if did_fetch and snoop_url:
# consume the 'newness' of this repo, since fetching succeeded
url_watcher.has_url_recently_changed(snoop_url)
return did_fetch
def _process_repo(
repo,
unused_repo_name,
args,
arcyd_conduit,
url_watcher,
mail_sender):
fetch_if_needed(
url_watcher,
abdi_repoargs.get_repo_snoop_url(args),
repo,
args.repo_desc)
admin_emails = set(_flatten_list(args.admin_emails))
# TODO: this should be a URI for users not conduit
mailer = abdmail_mailer.Mailer(
mail_sender,
admin_emails,
args.repo_desc,
args.instance_uri)
branch_url_callable = None
if args.branch_url_format:
def make_branch_url(branch_name):
return args.branch_url_format.format(
branch=branch_name,
repo_url=args.repo_url)
branch_url_callable = make_branch_url
branch_naming = abdt_compositenaming.Naming(
abdt_classicnaming.Naming(),
abdt_rbranchnaming.Naming())
branches = abdt_git.get_managed_branches(
repo,
args.repo_desc,
branch_naming,
branch_url_callable)
abdi_processrepo.process_branches(branches, arcyd_conduit, mailer)
def _flatten_list(hierarchy):
for x in hierarchy:
# recurse into hierarchy if it's a list
if hasattr(x, '__iter__') and not isinstance(x, str):
for y in _flatten_list(x):
yield y
else:
yield x
# -----------------------------------------------------------------------------
# Copyright (C) 2014-2015 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| apache-2.0 | -3,659,248,281,344,877,600 | 31.949084 | 79 | 0.580232 | false |
LarryHillyer/PoolHost | PoolHost/pool/urls.py | 1 | 2770 | from django.conf.urls import url
from . import views
app_name = 'pool'
urlpatterns = [
url(r'^transfer/(?P<pool_id>[0-9]+)/(?P<poolowner_id>[0-9]+)/(?P<poolgroup_id>[0-9]+)/(?P<groupowner_id>[0-9]+)/(?P<filter>[0-9]+)/(?P<modelstate>.*)/$', views.transfer.as_view(), name = 'transfer'),
url(r'^transfer/(?P<pool_id>[0-9]+)/(?P<poolowner_id>[0-9]+)/(?P<poolgroup_id>[0-9]+)/(?P<groupowner_id>[0-9]+)/(?P<filter>[0-9]+)/$', views.transfer.as_view(), name = 'transfer'),
url(r'^edit/(?P<pool_id>[0-9]+)/(?P<poolowner_id>[0-9]+)/(?P<poolgroup_id>[0-9]+)/(?P<groupowner_id>[0-9]+)/(?P<filter>[0-9]+)/(?P<modelstate>.*)/$', views.edit.as_view(), name = 'edit'),
url(r'^edit/(?P<pool_id>[0-9]+)/(?P<poolowner_id>[0-9]+)/(?P<poolgroup_id>[0-9]+)/(?P<groupowner_id>[0-9]+)/(?P<filter>[0-9]+)/$', views.edit.as_view(), name = 'edit'),
url(r'^delete/(?P<pool_id>[0-9]+)/(?P<poolowner_id>[0-9]+)/(?P<poolgroup_id>[0-9]+)/(?P<groupowner_id>[0-9]+)/(?P<filter>[0-9]+)/(?P<modelstate>.*)/$', views.delete.as_view(), name = 'delete'),
url(r'^delete/(?P<pool_id>[0-9]+)/(?P<poolowner_id>[0-9]+)/(?P<poolgroup_id>[0-9]+)/(?P<groupowner_id>[0-9]+)/(?P<filter>[0-9]+)/$', views.delete.as_view(), name = 'delete'),
url(r'^details/(?P<pool_id>[0-9]+)/(?P<poolowner_id>[0-9]+)/(?P<poolgroup_id>[0-9]+)/(?P<groupowner_id>[0-9]+)/(?P<filter>[0-9]+)/(?P<modelstate>.*)/$', views.details.as_view(), name = 'details'),
url(r'^details/(?P<pool_id>[0-9]+)/(?P<poolowner_id>[0-9]+)/(?P<poolgroup_id>[0-9]+)/(?P<groupowner_id>[0-9]+)/(?P<filter>[0-9]+)/$', views.details.as_view(), name = 'details'),
url(r'^create/(?P<poolowner_id>[0-9]+)/(?P<poolgroup_id>[0-9]+)/(?P<groupowner_id>[0-9]+)/(?P<filter>[0-9]+)/(?P<modelstate>.*)/$', views.create.as_view(), name = 'create'),
url(r'^create/(?P<poolowner_id>[0-9]+)/(?P<poolgroup_id>[0-9]+)/(?P<groupowner_id>[0-9]+)/(?P<filter>[0-9]+)/$', views.create.as_view(), name = 'create'),
url(r'^(?P<poolowner_id>[0-9]+)/(?P<poolgroup_id>[0-9]+)/(?P<groupowner_id>[0-9]+)/(?P<filter>[0-9]+)/(?P<modelstate>.*)/$', views.index.as_view(), name = 'index'),
url(r'^(?P<poolowner_id>[0-9]+)/(?P<poolgroup_id>[0-9]+)/(?P<groupowner_id>[0-9]+)/(?P<filter>[0-9]+)/$', views.index.as_view(), name = 'index'),
url(r'^poolgroups_by_groupowner/$', views.poolgroups_by_groupowner_id.as_view(), name = 'poolgroups_by_groupowner_id'),
url(r'^poolowners_by_poolgroup/$', views.poolowners_by_poolgroup_id.as_view(), name = 'poolowners_by_poolgroup_id'),
url(r'^edit/poolgroups_by_groupowner/$', views.poolgroups_by_groupowner_id.as_view(), name = 'poolgroups_by_groupowner_id'),
url(r'^edit/poolowners_by_poolgroup/$', views.poolowners_by_poolgroup_id.as_view(), name = 'poolowners_by_poolgroup_id'),
] | gpl-3.0 | 1,132,929,921,290,102,000 | 82.969697 | 203 | 0.584116 | false |
dmsovetov/pygling | Pygling/Target/Executable.py | 1 | 1531 | #
# The MIT License (MIT)
#
# Copyright (c) 2015 Dmitry Sovetov
#
# https://github.com/dmsovetov
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from Target import Target
# class Executable
class Executable( Target ):
# ctor
def __init__( self, name, sources = None, paths = None, defines = None, link = None ):
Target.__init__( self, name, sources = sources, paths = paths, defines = defines, link = link, linkTo = Target.Executable )
# shouldLinkLibraries
@property
def shouldLinkLibraries( self ):
return True | mit | 8,647,078,688,976,270,000 | 41.555556 | 125 | 0.755715 | false |
malja/cvut-python | cviceni08/01_trideni_karet.py | 1 | 1202 | import copy
cards = [[0, 'Q'], [2, '6'], [1, 'K'],
[1, '8'], [2, '10'], [2, '4'],
[3, '4'], [0, '4'], [1, '3'],
[2, '5'], [0, 'K'], [3, 'A'],
[1, 'J'], [0, '3'], [0, '9']]
def cardTypeAsInt( card ):
if card[1].isdigit():
return int(card[1])
if card[1] == "J":
return 11
elif card[1] == "Q":
return 12
elif card[1] == "K":
return 13
else:
return 14
def compareCards( card1, card2 ):
print("porovnávám karty:", card1, card2)
if (card1[0] == card2[0]):
print("rovny")
if ( cardTypeAsInt( card1 ) < cardTypeAsInt( card2 ) ):
print("barva1")
return True
else:
print("barva2")
return False
else:
print("else")
return card1[0] < card2[0]
def bubbleSort( array, swap_fn ):
sorted = copy.deepcopy(array)
for i in range( len( sorted ) ):
while( swap_fn( sorted[i], sorted[i-1] ) ):
tmp = sorted[i-1]
sorted[i-1] = sorted[i]
sorted[i] = tmp
return sorted
print( cards )
print( bubbleSort( cards, compareCards) )
| mit | -2,594,362,512,075,148,000 | 20.818182 | 63 | 0.446667 | false |
tswicegood/Dolt | dolt/__init__.py | 1 | 8781 | import httplib2
import urllib
try:
import json
except ImportError:
import simplejson as json
try:
from decorator import decorator
except ImportError:
# No decorator package available. Create a no-op "decorator".
def decorator(f):
def decorate(_func):
def inner(*args, **kwargs):
return f(_func, *args, **kwargs)
return inner
return decorate
@decorator
def _makes_clone(_func, *args, **kw):
"""
A decorator that returns a clone of the current object so that
we can re-use the object for similar requests.
"""
self = args[0]._clone()
_func(self, *args[1:], **kw)
return self
class Dolt(object):
"""
A dumb little wrapper around RESTful interfaces.
Subclass `Dolt` to create specific APIs.
Example::
class MyApi(Dolt):
_api_url = 'https://api.example.com'
_url_template = '%(domain)s/%(generated_url)s.json'
api = MyApi()
print api.images()
"""
_api_url = ''
"""The base url for this API"""
_url_template = '%(domain)s/%(generated_url)s'
"""
Template used to generate URLs.
- `%(domain)s` is the `_api_url`
- `%(generated_url)s` is where the URL parts go.
"""
_stack_collapser = '/'.join
_params_template = '?%s'
def __init__(self, http=None):
self._supported_methods = ("GET", "POST", "PUT", "HEAD", "DELETE", "OPTIONS")
self._attribute_stack = []
self._method = "GET"
self._body = None
self._http = http or httplib2.Http()
self._params = {}
self._headers = {}
def __call__(self, *args, **kwargs):
url = self.get_url(*[str(a) for a in args], **kwargs)
response, data = self._http.request(url, self._method, body=self._body, headers=self._headers)
return self._handle_response(response, data)
def _generate_params(self, params):
return self._params_template % urllib.urlencode(params)
def _handle_response(self, response, data):
"""
Deserializes JSON if the content-type matches, otherwise returns the response
body as is.
"""
# Content-Type headers can include additional parameters(RFC 1521), so
# we split on ; to match against only the type/subtype
if data and response.get('content-type', '').split(';')[0] in (
'application/json',
'application/x-javascript',
'text/javascript',
'text/x-javascript',
'text/x-json'
):
return json.loads(data)
else:
return data
@_makes_clone
def __getitem__(self, name):
"""
Adds `name` to the URL path.
"""
self._attribute_stack.append(name)
return self
@_makes_clone
def __getattr__(self, name):
"""
Sets the HTTP method for the request or adds `name` to the URL path.
::
>>> dolt.GET._method == 'GET'
True
>>> dolt.foo.bar.get_url()
'/foo/bar'
"""
if name in self._supported_methods:
self._method = name
elif not name.endswith(')'):
self._attribute_stack.append(name)
return self
@_makes_clone
def with_params(self, **params):
"""
Add/overwrite URL query parameters to the request.
"""
self._params.update(params)
return self
@_makes_clone
def with_body(self, body=None, **params):
"""
Add a body to the request.
When `body` is a:
- string, it will be used as is.
- dict or list of (key, value) pairs, it will be form encoded
- None, remove request body
- anything else, a TypeError will be raised
If `body` is a dict or None you can also pass in keyword
arguments to add to the body.
::
>>> dolt.with_body(dict(key='val'), foo='bar')._body
'foo=bar&key=val'
"""
if isinstance(body, (tuple, list)):
body = dict(body)
if params:
# Body must be None or able to be a dict
if isinstance(body, dict):
body.update(params)
elif body is None:
body = params
else:
raise ValueError('Body must be None or a dict if used with params, got: %r' % body)
if isinstance(body, basestring):
self._body = body
elif isinstance(body, dict):
self._body = urllib.urlencode(body)
elif body is None:
self._body = None
else:
raise TypeError('Invalid body type %r' % body)
return self
def with_json(self, data=None, **params):
"""
Add a json body to the request.
:param data: A json string, a dict, or a list of key, value pairs
:param params: A dict of key value pairs to JSON encode
"""
if isinstance(data, (tuple, list)):
data = dict(data)
if params:
# data must be None or able to be a dict
if isinstance(data, dict):
data.update(params)
elif data is None:
data = params
else:
raise ValueError('Data must be None or a dict if used with params, got: %r' % data)
req = self.with_headers({'Content-Type': 'application/json', 'Accept': 'application/json'})
if isinstance(data, basestring):
# Looks like it's already been encoded
return req.with_body(data)
else:
return req.with_body(json.dumps(data))
@_makes_clone
def with_headers(self, headers=None, **params):
"""
Add headers to the request.
:param headers: A dict, or a list of key, value pairs
:param params: A dict of key value pairs
"""
if isinstance(headers, (tuple, list)):
headers = dict(headers)
if params:
if isinstance(headers, dict):
headers.update(params)
elif headers is None:
headers = params
self._headers.update(headers)
return self
def get_url(self, *paths, **params):
"""
Returns the URL for this request.
:param paths: Additional URL path parts to add to the request
:param params: Additional query parameters to add to the request
"""
path_stack = self._attribute_stack[:]
if paths:
path_stack.extend(paths)
u = self._stack_collapser(path_stack)
url = self._url_template % {
"domain": self._api_url,
"generated_url" : u,
}
if self._params or params:
internal_params = self._params.copy()
internal_params.update(params)
url += self._generate_params(internal_params)
return url
def _clone(self):
"""
Clones the state of the current operation.
The state is cloned so that you can freeze the state at a certain point for re-use.
::
>>> cat = dolt.cat
>>> cat.get_url()
'/cat'
>>> o = cat.foo
>>> o.get_url()
'/cat/foo'
>>> cat.get_url()
'/cat'
"""
cls = self.__class__
q = cls.__new__(cls)
q.__dict__ = self.__dict__.copy()
q._params = self._params.copy()
q._headers = self._headers.copy()
q._attribute_stack = self._attribute_stack[:]
return q
try:
__IPYTHON__
def __dir__(self):
return [
'_supported_methods',
'_attribute_stack',
'_method',
'_body',
'_http',
'_params',
'_headers',
'_api_url',
'_url_template',
'_stack_collapser',
'_params_template',
'__init__',
'__call__',
'_handle_response',
'__getattr__',
'get_url',
'__dir__',
]
_getAttributeNames = trait_names = __dir__
except NameError:
pass
class Simpleton(Dolt):
"""
A dumber little wrapper around RESTful interfaces.
Example::
api = Simpleton('http://api.example.com')
print api.images()
"""
def __init__(self, base_url, http=None):
super(Simpleton, self).__init__(http=http)
self._api_url = base_url | bsd-3-clause | -1,242,726,497,648,566,800 | 27.237942 | 102 | 0.512015 | false |
nitheesh/AutoMoveMouse | appind.py | 1 | 6321 | #!/usr/bin/env python
import os
import os.path
import pygtk
pygtk.require('2.0')
import gtk
import time
import subprocess
import threading
import atexit
import commands
import appindicator
from Xlib import display
MaxIdle = 10
lockFile = "/tmp/automouse.lck"
appFile = "/tmp/appfile.lck"
# Touch the signal file on script startup
open(appFile, 'a').close()
class AppIndicatorMouse:
def __init__(self):
self.ind = appindicator.Indicator ("AutoMouseMove-Indicator", "indicator-messages", appindicator.CATEGORY_APPLICATION_STATUS)
self.ind.set_status (appindicator.STATUS_ACTIVE)
self.ind.set_attention_icon ("indicator-messages-new")
self.ind.set_icon("distributor-logo")
self.start = True
self.timer = None
self.timer_text = ""
# create a menu
self.menu = gtk.Menu()
_radio = gtk.RadioMenuItem(None, "Demo")
_radio1 = gtk.RadioMenuItem(None, "Demo")
radio = gtk.RadioMenuItem(_radio, "Start")
radio.connect("activate", self.start_btn_pressed)
radio.show()
self.menu.append(radio)
radio1 = gtk.RadioMenuItem(_radio, "Stop")
radio1.connect("activate", self.stop_btn_pressed)
radio1.show()
self.menu.append(radio1)
self.dis_web = gtk.CheckMenuItem("kuku")
self.dis_web.connect("toggled", self.disable_webcam)
self.dis_web.show()
self.menu.append(self.dis_web)
button = gtk.MenuItem(label="Timer")
button.connect("activate", self.TimerpopUp)
button.show()
self.menu.append(button)
image = gtk.ImageMenuItem(gtk.STOCK_QUIT)
image.connect("activate", self.quit)
image.show()
self.menu.append(image)
self.menu.show()
self.ind.set_menu(self.menu)
self.thread = threading.Thread(target=self.StartbashScript)
self.thread.daemon = True
self.thread.start()
# self.thread.join()
def quit(self, widget, data=None):
# print self.thread
try:
self._bash.kill()
except:
pass
gtk.main_quit()
def start_btn_pressed(self, widget):
print "Start button clicked."
try:
os.remove(appFile)
except:
print "Unable to remove appFile"
def stop_btn_pressed(self, widget):
print "Stop clicked."
open(appFile, 'a').close()
# self.ind.set_label("Stopped")
def StartbashScript(self):
self._bash = None
self.thread1 = None
prev_pos = None
count = 0
# self.timer = 30
while True:
if self.timer is not None:
count = count + 1
if int(count) >= int(self.timer) and not os.path.isfile(lockFile):
try:
print "Timer reached"
count = 0
self.timer = None
open(appFile, 'a').close()
except:
print "Timer encountered an error!!"
pass
else:
count = 0
if os.path.isfile(appFile):
print "App is on stop mode!!"
time.sleep(1)
continue
else:
if not os.path.isfile(lockFile):
self._bash = None
prev_pos = None
idle = commands.getstatusoutput('expr $(xprintidle) / 1000')[1]
if (int(idle) > MaxIdle):
if self._bash is None:
print "system goes idle..!"
self.thread1 = threading.Thread(target=self.AutoMouseMove)
self.thread1.daemon = True
self.thread1.start()
self.thread1.join()
else:
print str(idle) + str(" : system active")
if self._bash is not None:
# print("The mouse position on the screen is {0}".format(self.mousepos()))
cur_pos = self.mousepos()
print "Current postion" + str(cur_pos)
if prev_pos is not None and cur_pos != prev_pos:
subprocess.Popen("exec " + "xte 'keyup Control_L' && xte 'keyup Alt_L'", shell=True, stdout=subprocess.PIPE)
print "System activated by user input"
self._bash.terminate()
self._bash = None
print "Lock file removed!"
os.remove(lockFile)
prev_pos = cur_pos
FirstRun = False
time.sleep(1)
def mousepos(self):
"""mousepos() --> (x, y) get the mouse coordinates on the screen (linux, Xlib)."""
data = display.Display().screen().root.query_pointer()._data
return data["root_x"]
def AutoMouseMove(self):
open(lockFile, 'a').close()
self._bash = subprocess.Popen("exec " + "./start-mouse.sh", shell=True, stdout=subprocess.PIPE)
print self._bash.pid
def TimerpopUp(self,btn):
#base this on a message dialog
dialog = gtk.MessageDialog(
None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_QUESTION,
gtk.BUTTONS_OK,
None)
dialog.set_markup('Please set the <b>timer</b>:')
#create the text input field
entry = gtk.Entry()
entry.set_text(self.timer_text)
#allow the user to press enter to do ok
entry.connect("activate", self.responseToDialog, dialog, gtk.RESPONSE_OK)
entry.connect('changed', self.on_changed)
#create a horizontal box to pack the entry and a label
hbox = gtk.HBox()
hbox.pack_start(gtk.Label("Timer (min):"), False, 5, 5)
hbox.pack_end(entry)
#some secondary text
# dialog.format_secondary_markup("This will be used for <i>identification</i> purposes")
#add it and show it
dialog.vbox.pack_end(hbox, True, True, 0)
dialog.show_all()
#go go go
dialog.run()
text = entry.get_text()
dialog.destroy()
if text == '':
self.timer_text = ""
self.timer = None
else:
self.timer_text = text
self.timer = int(text) * 60
print self.timer_text
print "Automation will be active for next " + str(self.timer_text) + str(" mins")
def on_changed(self, entry):
text = entry.get_text().strip()
entry.set_text(''.join([i for i in text if i in '123456789']))
def responseToDialog(entry, dialog, response):
dialog.response(response)
def disable_webcam(self, widget, data=None):
if widget.get_active():
os.system("echo 'passwd' | sudo -S modprobe -r uvcvideo")
else:
os.system("echo 'passwd' | sudo -S modprobe uvcvideo")
if __name__ == "__main__":
gtk.gdk.threads_init()
# test = Test1()
indicator = AppIndicatorMouse()
gtk.main() | gpl-2.0 | 8,106,481,789,371,758,000 | 28.820755 | 129 | 0.615567 | false |
EduPepperPDTesting/pepper2013-testing | lms/djangoapps/sso/sp_metadata.py | 1 | 7015 | from mitxmako.shortcuts import render_to_response
import xmltodict
from django.http import HttpResponse
import json
from django.conf import settings
from collections import defaultdict
import os
from OpenSSL import crypto
import re
from path import path
from permissions.decorators import user_has_perms
BASEDIR = settings.PROJECT_HOME + "/sso/sp"
PEPPER_ENTITY_ID = "www.pepperpd.com"
@user_has_perms('sso', 'administer')
def edit(request):
return render_to_response('sso/manage/sp_metadata.html')
@user_has_perms('sso', 'administer')
def save(request):
data = json.loads(request.POST.get('data'))
entities = []
for d in data:
sso_name = d.get('sso_name', '')
sso_type = d.get('sso_type')
path = BASEDIR + "/" + sso_name
if not os.path.isdir(path):
os.makedirs(path)
typed = d.get('typed')
sso_type = d.get('sso_type')
if typed.get('saml_metadata'):
mdfile = open(path + "/FederationMetadata.xml", "w")
mdfile.write(typed.get('saml_metadata'))
del typed['saml_metadata']
typed_setting = []
for k, v in typed.items():
typed_setting.append('''
<setting name="%s">%s</setting>''' % (k, v))
attributes = []
for a in d.get('attributes'):
attributes.append('''
<attribute name="%s" map="%s"></attribute>''' % (a['name'], a['map']))
entities.append('''
<entity type="%s" name="%s">%s%s
</entity>''' % (d.get('sso_type', ''),
sso_name,
''.join(typed_setting),
''.join(attributes)
))
content = '''<?xml version="1.0"?>
<entities xmlns:ds="http://www.w3.org/2000/09/xmldsig#">%s
</entities>''' % ''.join(entities)
xmlfile = open(BASEDIR + "/metadata.xml", "w")
xmlfile.write(content)
xmlfile.close()
# post process
for d in data:
sso_name = d.get('sso_name', '')
sso_type = d.get('sso_type')
if sso_type == 'SAML':
create_saml_config_files(sso_name)
return HttpResponse("{}", content_type="application/json")
@user_has_perms('sso', 'administer')
def all_json(request):
xmlfile = open(BASEDIR + "/metadata.xml", "r")
parsed_data = xmltodict.parse(xmlfile.read(),
dict_constructor=lambda *args, **kwargs: defaultdict(list, *args, **kwargs))
entity_list = []
if 'entity' in parsed_data['entities'][0]:
for entity in parsed_data['entities'][0]['entity']:
entity_list.append(parse_one_sp(entity))
return HttpResponse(json.dumps(entity_list), content_type="application/json")
def sp_by_name(name):
xmlfile = open(BASEDIR + "/metadata.xml", "r")
parsed_data = xmltodict.parse(xmlfile.read(),
dict_constructor=lambda *args, **kwargs: defaultdict(list, *args, **kwargs))
if 'entity' in parsed_data['entities'][0]:
for entity in parsed_data['entities'][0]['entity']:
if entity['@name'] == name:
return parse_one_sp(entity)
def parse_one_sp(entity):
attribute_list = []
if 'attribute' in entity:
for attribute in entity['attribute']:
attr = {
# 'type': attribute['@type'],
'name': attribute['@name'],
'map': attribute['@map']
}
attribute_list.append(attr)
typed_setting = {}
if 'setting' in entity:
for attribute in entity['setting']:
typed_setting[attribute['@name']] = attribute['#text']
# path = BASEDIR + "/" + entity['@name'] + "/FederationMetadata.xml"
# if os.path.isfile(path):
# mdfile = open(path, "r")
# typed_setting['saml_metadata'] = mdfile.read()
return {
'sso_type': entity['@type'],
'sso_name': entity['@name'],
'attributes': attribute_list,
'typed': typed_setting
}
def create_self_signed_cert(CN, C="US", ST="unknown", L="unknown", O="unknown", OU="unknown", serial_number=1, notBefore=0, notAfter=365*24*60*60):
"""
"""
# create a key pair
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 1024)
# create a self-signed cert
cert = crypto.X509()
cert.get_subject().C = C
cert.get_subject().ST = ST
cert.get_subject().L = L
cert.get_subject().O = O
cert.get_subject().OU = OU
cert.get_subject().CN = CN # most important part
cert.set_serial_number(1000)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(10*365*24*60*60)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(k)
cert.sign(k, 'sha1')
cert = crypto.dump_certificate(crypto.FILETYPE_PEM, cert)
key = crypto.dump_privatekey(crypto.FILETYPE_PEM, k)
return cert, key
def create_saml_config_files(name):
ms = sp_by_name(name)
cert_file = BASEDIR + '/' + name + "/cert.pem"
key_file = BASEDIR + '/' + name + "/key.pem"
if not os.path.isfile(cert_file) or not os.path.isfile(key_file):
cert, key = create_self_signed_cert(name)
open(cert_file, "wt").write(cert)
open(key_file, "wt").write(key)
cert = open(cert_file, "r").read()
key = open(key_file, "r").read()
cert = re.sub('-----.*?-----\n?', '', cert)
key = re.sub('-----.*?-----\n?', '', key)
auth = "http://docs.oasis-open.org/wsfed/authorization/200706"
temp_dir = path(__file__).abspath().dirname()
template = open(temp_dir + "/metadata_templates/sp.xml", "r").read()
attr_tags = ""
for attr in ms.get('attributes'):
mapped_name = attr['map'] if 'map' in attr else attr['name']
attr_tags += '''
<ns0:RequestedAttribute isRequired="true" NameFormat="urn:mace:dir:attribute-def:%s"
Name="%s" FriendlyName="%s"/>''' % (mapped_name, mapped_name, mapped_name)
content = template.format(cert=cert,
entityID=name,
auth=auth,
attr_tags=attr_tags,
slo_post_url="",
slo_redirect_url="",
acs_url=ms.get('typed').get('sso_acs_url'))
f = BASEDIR + '/' + name + "/sp.xml"
open(f, "wt").write(content)
template = open(temp_dir + "/metadata_templates/idp.xml", "r").read()
content = template.format(cert=cert, entityID=PEPPER_ENTITY_ID, auth=auth)
f = BASEDIR + '/' + name + "/idp.xml"
open(f, "wt").write(content)
def download_saml_federation_metadata(request):
name = request.GET.get("name")
ms = sp_by_name(name)
if not ms:
return HttpResponse("SP with name '%s' does not exist." % name)
f = BASEDIR + '/' + name + "/idp.xml"
response = HttpResponse(content_type='application/x-download')
response['Content-Disposition'] = ('attachment; filename=idp.xml')
response.write(open(f, "r").read())
return response
| agpl-3.0 | 8,215,661,476,016,799,000 | 30.886364 | 147 | 0.566358 | false |
memsharded/conan | .ci/jenkins/pr_tags.py | 1 | 2199 | import argparse
import json
import os
from github import Github
def _get_value(body, tag):
pos = body.lower().find(tag.lower())
if pos != -1:
cl = body[pos + len(tag):].splitlines()[0]
return cl.strip()
return None
def get_tag_from_pr(pr_number, tag):
"""Given a PR number and a tag to search, it returns the line written in the body"""
gh_token = os.getenv("GH_TOKEN")
g = Github(gh_token)
repo = g.get_repo("conan-io/conan")
pr = repo.get_pull(pr_number)
body = pr.body
value = _get_value(body, tag)
return value
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Launch tests in a venv')
parser.add_argument('output_file', help='e.g.: file.json')
parser.add_argument('branch_name', help='e.g.: PR-23')
args = parser.parse_args()
TAG_PYVERS = "@PYVERS:"
TAG_TAGS = "@TAGS:"
TAG_REVISIONS = "@REVISIONS:"
out_file = args.output_file
branch = args.branch_name
if not branch.startswith("PR-"):
print("The branch is not a PR")
exit(-1)
pr_number = int(branch.split("PR-")[1])
def clean_list(the_list):
if not the_list:
return []
return [a.strip() for a in the_list.split(",")]
# Read tags to include
tags = clean_list(get_tag_from_pr(pr_number, TAG_TAGS))
# Read pythons to include
tmp = clean_list(get_tag_from_pr(pr_number, TAG_PYVERS))
pyvers = {"Windows": [], "Linux": [], "Macos": []}
for t in tmp:
if "@" in t:
the_os, pyver = t.split("@")
if the_os not in ["Macos", "Linux", "Windows"]:
print("Invalid os: %s" % the_os)
exit(-1)
pyvers[the_os].append(pyver)
else:
pyvers["Macos"].append(t)
pyvers["Linux"].append(t)
pyvers["Windows"].append(t)
# Rest revisions?
tmp = get_tag_from_pr(pr_number, TAG_REVISIONS)
revisions = tmp.strip().lower() in ["1", "true"] if tmp else False
with open(out_file, "w") as f:
the_json = {"tags": tags, "pyvers": pyvers, "revisions": revisions}
f.write(json.dumps(the_json))
| mit | -1,919,777,983,848,685,300 | 27.192308 | 88 | 0.56935 | false |
yandy/sea | setup.py | 1 | 2143 | import os
import re
import ast
from setuptools import setup, find_packages
_version_re = re.compile(r'__version__\s+=\s+(.*)')
_root = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(_root, 'sea/__init__.py')) as f:
version = str(ast.literal_eval(_version_re.search(
f.read()).group(1)))
with open(os.path.join(_root, 'requirements.txt')) as f:
requirements = f.readlines()
with open(os.path.join(_root, 'README.md')) as f:
readme = f.read()
def find_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return filepaths
setup(
name='sea',
version=version,
description='shanbay rpc framework',
long_description=readme,
url='https://github.com/shanbay/sea',
author='Michael Ding',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: POSIX',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules'
],
keywords=['rpc', 'grpc'],
packages=find_packages(exclude=['tests']),
package_data={'sea': find_package_data('sea')},
python_requires='>=3',
install_requires=requirements,
entry_points={
'console_scripts': [
'sea=sea.cli:main'
],
'sea.jobs': [
'celery=sea.contrib.extensions.celery.cmd:main',
]
}
)
| mit | -2,994,986,565,607,564,000 | 28.763889 | 70 | 0.597294 | false |
moden-py/pywinauto | examples/get_winrar_info.py | 1 | 4298 | """Automate WinRAR evaluation copy
We hit a few dialogs and save XML dump and
screenshot from each dialog.
Specify a language at the command line:
0 Czech
1 German
2 French
More then likely you will need to modify the apppath
entry in the 't' dictionary to where you have
extracted the WinRAR executables.
"""
__revision__ = "$Revision$"
import sys
from pywinauto.application import Application
import time
folders = ['wrar351cz', 'wrar351d', 'wrar351fr']
# translations for each language
t = {
'apppath' : (
'c:\.temp\wrar351fr\winrar.exe',
'c:\.temp\wrar351d\winrar.exe',
'c:\.temp\wrar351cz\winrar.exe'
),
# Buy Licence Dialog
'Buy Licence' : (
"Acheter une licence pur winRAR",
"Bittekaufensieeine",
"Zakuptesiprosmlicenci WinRARu"),
'Close' : (
"Fermer",
"Schleissen",
"Zavrit"),
# Options->Configure menu items
"Options->Configure" : (
"Options->Configuration",
"Optionen->Einstellungen",
"Moznosti->Nastaveni"),
# Configure/Options dialog box
'Configure' : (
"Configuration",
"Einstellungen",
"Nastaveni"),
# personalise toolbar buttons button
'Buttons' : (
"Boutons",
"Schaltflachen",
"Vybrattlacitka"),
# Personalize toolbars dialog
'PeronnaliseToolbars' : (
"Peronnalisation de la barre doutils",
"Werkzeugleisteanpassen",
"Vybertlaciteknastrojovelisty"),
# create profile button
'CreateDefaultProfile' : (
u"Creerleprofilpard�fault",
"Standardfestlegen",
"Vytvoritimplicitni"),
# create profile dialog box title
'ConfigureDefaultOptions' : (
"Configurer les options de compre...",
"Standardkomprimierungs",
"Zmenaimplicitnichnast..."),
# context menu's button
"ContextMenus" : (
"Menus contextuels",
"Optionenimkontextmenu",
"Polozkykontextovehamenu"),
# context menu's dialog
"contextMenuDlg" : (
"Rubriques des menus contextuels",
"OptionenindenKontextmenus",
"Polozkykontextovehamenu"),
# file->exit menu option
"File->Exit" : (
"Fichier->Quitter",
"Datei->Beenden",
"Soubor->Konec"),
}
def get_winrar_dlgs(rar_dlg, app, lang):
rar_dlg.menu_select(t["Options->Configure"][lang])
optionsdlg = app[t['Configure'][lang]]
optionsdlg.write_to_xml("Options_%d.xml" % lang)
optionsdlg.capture_as_image().save("Options_%d.png" % lang)
optionsdlg[t['Buttons'][lang]].click()
contextMenuDlg = app[t['PeronnaliseToolbars'][lang]]
contextMenuDlg.write_to_xml("PersonaliseToolbars_%d.xml" % lang)
contextMenuDlg.capture_as_image().save("PersonaliseToolbars_%d.png" % lang)
contextMenuDlg.OK.click()
optionsdlg.TabCtrl.select(1)
optionsdlg[t['CreateDefaultProfile'][lang]].click()
defaultOptionsDlg = app[t['ConfigureDefaultOptions'][lang]]
defaultOptionsDlg.write_to_xml("DefaultOptions_%d.xml" % lang)
defaultOptionsDlg.capture_as_image().save("DefaultOptions_%d.png" % lang)
defaultOptionsDlg.OK.click()
optionsdlg.TabCtrl.select(6)
optionsdlg[t['ContextMenus'][lang]].click()
anotherMenuDlg = app[t['contextMenuDlg'][lang]]
anotherMenuDlg.write_to_xml("2ndMenuDlg_%d.xml" % lang)
anotherMenuDlg.capture_as_image().save("2ndMenuDlg_%d.png" % lang)
anotherMenuDlg.OK.click()
optionsdlg.OK.click()
# get the languages as an integer
langs = [int(arg) for arg in sys.argv[1:]]
for lang in langs:
# start the application
app = Application().start(t['apppath'][lang])
# we have to wait for the Licence Dialog to open
time.sleep(2)
# close the Buy licence dialog box
licence_dlg = app[t['Buy Licence'][lang]]
licence_dlg[t['Close'][lang]].click()
# find the WinRar main dialog
rar_dlg = app.window_(title_re = ".* - WinRAR.*")
# dump and capture some dialogs
get_winrar_dlgs(rar_dlg, app, lang)
# exit WinRar
time.sleep(.5)
rar_dlg.menu_select(t['File->Exit'][lang])
| lgpl-2.1 | -5,219,329,673,420,885,000 | 25.538462 | 79 | 0.617784 | false |
dragoon/edX-AI-course | search/util.py | 1 | 29256 | # util.py
# -------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# ([email protected]) and Dan Klein ([email protected]).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel ([email protected]).
import sys
import inspect
import heapq, random
import cStringIO
class FixedRandom:
def __init__(self):
fixedState = (3, (2147483648L, 507801126L, 683453281L, 310439348L, 2597246090L, \
2209084787L, 2267831527L, 979920060L, 3098657677L, 37650879L, 807947081L,
3974896263L, \
881243242L, 3100634921L, 1334775171L, 3965168385L, 746264660L,
4074750168L, 500078808L, \
776561771L, 702988163L, 1636311725L, 2559226045L, 157578202L, 2498342920L,
2794591496L, \
4130598723L, 496985844L, 2944563015L, 3731321600L, 3514814613L,
3362575829L, 3038768745L, \
2206497038L, 1108748846L, 1317460727L, 3134077628L, 988312410L,
1674063516L, 746456451L, \
3958482413L, 1857117812L, 708750586L, 1583423339L, 3466495450L,
1536929345L, 1137240525L, \
3875025632L, 2466137587L, 1235845595L, 4214575620L, 3792516855L,
657994358L, 1241843248L, \
1695651859L, 3678946666L, 1929922113L, 2351044952L, 2317810202L,
2039319015L, 460787996L, \
3654096216L, 4068721415L, 1814163703L, 2904112444L, 1386111013L,
574629867L, 2654529343L, \
3833135042L, 2725328455L, 552431551L, 4006991378L, 1331562057L,
3710134542L, 303171486L, \
1203231078L, 2670768975L, 54570816L, 2679609001L, 578983064L, 1271454725L,
3230871056L, \
2496832891L, 2944938195L, 1608828728L, 367886575L, 2544708204L,
103775539L, 1912402393L, \
1098482180L, 2738577070L, 3091646463L, 1505274463L, 2079416566L,
659100352L, 839995305L, \
1696257633L, 274389836L, 3973303017L, 671127655L, 1061109122L, 517486945L,
1379749962L, \
3421383928L, 3116950429L, 2165882425L, 2346928266L, 2892678711L,
2936066049L, 1316407868L, \
2873411858L, 4279682888L, 2744351923L, 3290373816L, 1014377279L,
955200944L, 4220990860L, \
2386098930L, 1772997650L, 3757346974L, 1621616438L, 2877097197L,
442116595L, 2010480266L, \
2867861469L, 2955352695L, 605335967L, 2222936009L, 2067554933L,
4129906358L, 1519608541L, \
1195006590L, 1942991038L, 2736562236L, 279162408L, 1415982909L,
4099901426L, 1732201505L, \
2934657937L, 860563237L, 2479235483L, 3081651097L, 2244720867L,
3112631622L, 1636991639L, \
3860393305L, 2312061927L, 48780114L, 1149090394L, 2643246550L,
1764050647L, 3836789087L, \
3474859076L, 4237194338L, 1735191073L, 2150369208L, 92164394L, 756974036L,
2314453957L, \
323969533L, 4267621035L, 283649842L, 810004843L, 727855536L, 1757827251L,
3334960421L, \
3261035106L, 38417393L, 2660980472L, 1256633965L, 2184045390L, 811213141L,
2857482069L, \
2237770878L, 3891003138L, 2787806886L, 2435192790L, 2249324662L,
3507764896L, 995388363L, \
856944153L, 619213904L, 3233967826L, 3703465555L, 3286531781L,
3863193356L, 2992340714L, \
413696855L, 3865185632L, 1704163171L, 3043634452L, 2225424707L,
2199018022L, 3506117517L, \
3311559776L, 3374443561L, 1207829628L, 668793165L, 1822020716L,
2082656160L, 1160606415L, \
3034757648L, 741703672L, 3094328738L, 459332691L, 2702383376L,
1610239915L, 4162939394L, \
557861574L, 3805706338L, 3832520705L, 1248934879L, 3250424034L,
892335058L, 74323433L, \
3209751608L, 3213220797L, 3444035873L, 3743886725L, 1783837251L,
610968664L, 580745246L, \
4041979504L, 201684874L, 2673219253L, 1377283008L, 3497299167L,
2344209394L, 2304982920L, \
3081403782L, 2599256854L, 3184475235L, 3373055826L, 695186388L,
2423332338L, 222864327L, \
1258227992L, 3627871647L, 3487724980L, 4027953808L, 3053320360L,
533627073L, 3026232514L, \
2340271949L, 867277230L, 868513116L, 2158535651L, 2487822909L,
3428235761L, 3067196046L, \
3435119657L, 1908441839L, 788668797L, 3367703138L, 3317763187L,
908264443L, 2252100381L, \
764223334L, 4127108988L, 384641349L, 3377374722L, 1263833251L,
1958694944L, 3847832657L, \
1253909612L, 1096494446L, 555725445L, 2277045895L, 3340096504L,
1383318686L, 4234428127L, \
1072582179L, 94169494L, 1064509968L, 2681151917L, 2681864920L, 734708852L,
1338914021L, \
1270409500L, 1789469116L, 4191988204L, 1716329784L, 2213764829L,
3712538840L, 919910444L, \
1318414447L, 3383806712L, 3054941722L, 3378649942L, 1205735655L,
1268136494L, 2214009444L, \
2532395133L, 3232230447L, 230294038L, 342599089L, 772808141L, 4096882234L,
3146662953L, \
2784264306L, 1860954704L, 2675279609L, 2984212876L, 2466966981L,
2627986059L, 2985545332L, \
2578042598L, 1458940786L, 2944243755L, 3959506256L, 1509151382L,
325761900L, 942251521L, \
4184289782L, 2756231555L, 3297811774L, 1169708099L, 3280524138L,
3805245319L, 3227360276L, \
3199632491L, 2235795585L, 2865407118L, 36763651L, 2441503575L,
3314890374L, 1755526087L, \
17915536L, 1196948233L, 949343045L, 3815841867L, 489007833L, 2654997597L,
2834744136L, \
417688687L, 2843220846L, 85621843L, 747339336L, 2043645709L, 3520444394L,
1825470818L, \
647778910L, 275904777L, 1249389189L, 3640887431L, 4200779599L, 323384601L,
3446088641L, \
4049835786L, 1718989062L, 3563787136L, 44099190L, 3281263107L, 22910812L,
1826109246L, \
745118154L, 3392171319L, 1571490704L, 354891067L, 815955642L, 1453450421L,
940015623L, \
796817754L, 1260148619L, 3898237757L, 176670141L, 1870249326L,
3317738680L, 448918002L, \
4059166594L, 2003827551L, 987091377L, 224855998L, 3520570137L, 789522610L,
2604445123L, \
454472869L, 475688926L, 2990723466L, 523362238L, 3897608102L, 806637149L,
2642229586L, \
2928614432L, 1564415411L, 1691381054L, 3816907227L, 4082581003L,
1895544448L, 3728217394L, \
3214813157L, 4054301607L, 1882632454L, 2873728645L, 3694943071L,
1297991732L, 2101682438L, \
3952579552L, 678650400L, 1391722293L, 478833748L, 2976468591L, 158586606L,
2576499787L, \
662690848L, 3799889765L, 3328894692L, 2474578497L, 2383901391L,
1718193504L, 3003184595L, \
3630561213L, 1929441113L, 3848238627L, 1594310094L, 3040359840L,
3051803867L, 2462788790L, \
954409915L, 802581771L, 681703307L, 545982392L, 2738993819L, 8025358L,
2827719383L, \
770471093L, 3484895980L, 3111306320L, 3900000891L, 2116916652L,
397746721L, 2087689510L, \
721433935L, 1396088885L, 2751612384L, 1998988613L, 2135074843L,
2521131298L, 707009172L, \
2398321482L, 688041159L, 2264560137L, 482388305L, 207864885L, 3735036991L,
3490348331L, \
1963642811L, 3260224305L, 3493564223L, 1939428454L, 1128799656L,
1366012432L, 2858822447L, \
1428147157L, 2261125391L, 1611208390L, 1134826333L, 2374102525L,
3833625209L, 2266397263L, \
3189115077L, 770080230L, 2674657172L, 4280146640L, 3604531615L,
4235071805L, 3436987249L, \
509704467L, 2582695198L, 4256268040L, 3391197562L, 1460642842L,
1617931012L, 457825497L, \
1031452907L, 1330422862L, 4125947620L, 2280712485L, 431892090L,
2387410588L, 2061126784L, \
896457479L, 3480499461L, 2488196663L, 4021103792L, 1877063114L,
2744470201L, 1046140599L, \
2129952955L, 3583049218L, 4217723693L, 2720341743L, 820661843L,
1079873609L, 3360954200L, \
3652304997L, 3335838575L, 2178810636L, 1908053374L, 4026721976L,
1793145418L, 476541615L, \
973420250L, 515553040L, 919292001L, 2601786155L, 1685119450L, 3030170809L,
1590676150L, \
1665099167L, 651151584L, 2077190587L, 957892642L, 646336572L, 2743719258L,
866169074L, \
851118829L, 4225766285L, 963748226L, 799549420L, 1955032629L, 799460000L,
2425744063L, \
2441291571L, 1928963772L, 528930629L, 2591962884L, 3495142819L,
1896021824L, 901320159L, \
3181820243L, 843061941L, 3338628510L, 3782438992L, 9515330L, 1705797226L,
953535929L, \
764833876L, 3202464965L, 2970244591L, 519154982L, 3390617541L, 566616744L,
3438031503L, \
1853838297L, 170608755L, 1393728434L, 676900116L, 3184965776L,
1843100290L, 78995357L, \
2227939888L, 3460264600L, 1745705055L, 1474086965L, 572796246L,
4081303004L, 882828851L, \
1295445825L, 137639900L, 3304579600L, 2722437017L, 4093422709L,
273203373L, 2666507854L, \
3998836510L, 493829981L, 1623949669L, 3482036755L, 3390023939L,
833233937L, 1639668730L, \
1499455075L, 249728260L, 1210694006L, 3836497489L, 1551488720L,
3253074267L, 3388238003L, \
2372035079L, 3945715164L, 2029501215L, 3362012634L, 2007375355L,
4074709820L, 631485888L, \
3135015769L, 4273087084L, 3648076204L, 2739943601L, 1374020358L,
1760722448L, 3773939706L, \
1313027823L, 1895251226L, 4224465911L, 421382535L, 1141067370L,
3660034846L, 3393185650L, \
1850995280L, 1451917312L, 3841455409L, 3926840308L, 1397397252L,
2572864479L, 2500171350L, \
3119920613L, 531400869L, 1626487579L, 1099320497L, 407414753L,
2438623324L, 99073255L, \
3175491512L, 656431560L, 1153671785L, 236307875L, 2824738046L,
2320621382L, 892174056L, \
230984053L, 719791226L, 2718891946L, 624L), None)
self.random = random.Random()
self.random.setstate(fixedState)
"""
Data structures useful for implementing SearchAgents
"""
class Stack:
"A container with a last-in-first-out (LIFO) queuing policy."
def __init__(self):
self.list = []
def push(self, item):
"Push 'item' onto the stack"
self.list.append(item)
def pop(self):
"Pop the most recently pushed item from the stack"
return self.list.pop()
def isEmpty(self):
"Returns true if the stack is empty"
return len(self.list) == 0
class Queue:
"A container with a first-in-first-out (FIFO) queuing policy."
def __init__(self):
self.list = []
def push(self, item):
"Enqueue the 'item' into the queue"
self.list.insert(0, item)
def pop(self):
"""
Dequeue the earliest enqueued item still in the queue. This
operation removes the item from the queue.
"""
return self.list.pop()
def isEmpty(self):
"Returns true if the queue is empty"
return len(self.list) == 0
class PriorityQueue:
"""
Implements a priority queue data structure. Each inserted item
has a priority associated with it and the client is usually interested
in quick retrieval of the lowest-priority item in the queue. This
data structure allows O(1) access to the lowest-priority item.
Note that this PriorityQueue does not allow you to change the priority
of an item. However, you may insert the same item multiple times with
different priorities.
"""
def __init__(self):
self.heap = []
self.count = 0
def push(self, item, priority):
# FIXME: restored old behaviour to check against old results better
# FIXED: restored to stable behaviour
entry = (priority, self.count, item)
# entry = (priority, item)
heapq.heappush(self.heap, entry)
self.count += 1
def pop(self):
(_, _, item) = heapq.heappop(self.heap)
# (_, item) = heapq.heappop(self.heap)
return item
def isEmpty(self):
return len(self.heap) == 0
class PriorityQueueWithFunction(PriorityQueue):
"""
Implements a priority queue with the same push/pop signature of the
Queue and the Stack classes. This is designed for drop-in replacement for
those two classes. The caller has to provide a priority function, which
extracts each item's priority.
"""
def __init__(self, priorityFunction):
"priorityFunction (item) -> priority"
self.priorityFunction = priorityFunction # store the priority function
PriorityQueue.__init__(self) # super-class initializer
def push(self, item):
"Adds an item to the queue with priority from the priority function"
PriorityQueue.push(self, item, self.priorityFunction(item))
def manhattanDistance(xy1, xy2):
"Returns the Manhattan distance between points xy1 and xy2"
return abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1])
"""
Data structures and functions useful for various course projects
The search project should not need anything below this line.
"""
class Counter(dict):
"""
A counter keeps track of counts for a set of keys.
The counter class is an extension of the standard python
dictionary type. It is specialized to have number values
(integers or floats), and includes a handful of additional
functions to ease the task of counting data. In particular,
all keys are defaulted to have value 0. Using a dictionary:
a = {}
print a['test']
would give an error, while the Counter class analogue:
>>> a = Counter()
>>> print a['test']
0
returns the default 0 value. Note that to reference a key
that you know is contained in the counter,
you can still use the dictionary syntax:
>>> a = Counter()
>>> a['test'] = 2
>>> print a['test']
2
This is very useful for counting things without initializing their counts,
see for example:
>>> a['blah'] += 1
>>> print a['blah']
1
The counter also includes additional functionality useful in implementing
the classifiers for this assignment. Two counters can be added,
subtracted or multiplied together. See below for details. They can
also be normalized and their total count and arg max can be extracted.
"""
def __getitem__(self, idx):
self.setdefault(idx, 0)
return dict.__getitem__(self, idx)
def incrementAll(self, keys, count):
"""
Increments all elements of keys by the same count.
>>> a = Counter()
>>> a.incrementAll(['one','two', 'three'], 1)
>>> a['one']
1
>>> a['two']
1
"""
for key in keys:
self[key] += count
def argMax(self):
"""
Returns the key with the highest value.
"""
if len(self.keys()) == 0: return None
all = self.items()
values = [x[1] for x in all]
maxIndex = values.index(max(values))
return all[maxIndex][0]
def sortedKeys(self):
"""
Returns a list of keys sorted by their values. Keys
with the highest values will appear first.
>>> a = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> a['third'] = 1
>>> a.sortedKeys()
['second', 'third', 'first']
"""
sortedItems = self.items()
compare = lambda x, y: sign(y[1] - x[1])
sortedItems.sort(cmp=compare)
return [x[0] for x in sortedItems]
def totalCount(self):
"""
Returns the sum of counts for all keys.
"""
return sum(self.values())
def normalize(self):
"""
Edits the counter such that the total count of all
keys sums to 1. The ratio of counts for all keys
will remain the same. Note that normalizing an empty
Counter will result in an error.
"""
total = float(self.totalCount())
if total == 0: return
for key in self.keys():
self[key] = self[key] / total
def divideAll(self, divisor):
"""
Divides all counts by divisor
"""
divisor = float(divisor)
for key in self:
self[key] /= divisor
def copy(self):
"""
Returns a copy of the counter
"""
return Counter(dict.copy(self))
def __mul__(self, y):
"""
Multiplying two counters gives the dot product of their vectors where
each unique label is a vector element.
>>> a = Counter()
>>> b = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> b['first'] = 3
>>> b['second'] = 5
>>> a['third'] = 1.5
>>> a['fourth'] = 2.5
>>> a * b
14
"""
sum = 0
x = self
if len(x) > len(y):
x, y = y, x
for key in x:
if key not in y:
continue
sum += x[key] * y[key]
return sum
def __radd__(self, y):
"""
Adding another counter to a counter increments the current counter
by the values stored in the second counter.
>>> a = Counter()
>>> b = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> b['first'] = 3
>>> b['third'] = 1
>>> a += b
>>> a['first']
1
"""
for key, value in y.items():
self[key] += value
def __add__(self, y):
"""
Adding two counters gives a counter with the union of all keys and
counts of the second added to counts of the first.
>>> a = Counter()
>>> b = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> b['first'] = 3
>>> b['third'] = 1
>>> (a + b)['first']
1
"""
addend = Counter()
for key in self:
if key in y:
addend[key] = self[key] + y[key]
else:
addend[key] = self[key]
for key in y:
if key in self:
continue
addend[key] = y[key]
return addend
def __sub__(self, y):
"""
Subtracting a counter from another gives a counter with the union of all keys and
counts of the second subtracted from counts of the first.
>>> a = Counter()
>>> b = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> b['first'] = 3
>>> b['third'] = 1
>>> (a - b)['first']
-5
"""
addend = Counter()
for key in self:
if key in y:
addend[key] = self[key] - y[key]
else:
addend[key] = self[key]
for key in y:
if key in self:
continue
addend[key] = -1 * y[key]
return addend
def raiseNotDefined():
fileName = inspect.stack()[1][1]
line = inspect.stack()[1][2]
method = inspect.stack()[1][3]
print "*** Method not implemented: %s at line %s of %s" % (method, line, fileName)
sys.exit(1)
def normalize(vectorOrCounter):
"""
normalize a vector or counter by dividing each value by the sum of all values
"""
normalizedCounter = Counter()
if type(vectorOrCounter) == type(normalizedCounter):
counter = vectorOrCounter
total = float(counter.totalCount())
if total == 0: return counter
for key in counter.keys():
value = counter[key]
normalizedCounter[key] = value / total
return normalizedCounter
else:
vector = vectorOrCounter
s = float(sum(vector))
if s == 0: return vector
return [el / s for el in vector]
def nSample(distribution, values, n):
if sum(distribution) != 1:
distribution = normalize(distribution)
rand = [random.random() for i in range(n)]
rand.sort()
samples = []
samplePos, distPos, cdf = 0, 0, distribution[0]
while samplePos < n:
if rand[samplePos] < cdf:
samplePos += 1
samples.append(values[distPos])
else:
distPos += 1
cdf += distribution[distPos]
return samples
def sample(distribution, values=None):
if type(distribution) == Counter:
items = sorted(distribution.items())
distribution = [i[1] for i in items]
values = [i[0] for i in items]
if sum(distribution) != 1:
distribution = normalize(distribution)
choice = random.random()
i, total = 0, distribution[0]
while choice > total:
i += 1
total += distribution[i]
return values[i]
def sampleFromCounter(ctr):
items = sorted(ctr.items())
return sample([v for k, v in items], [k for k, v in items])
def getProbability(value, distribution, values):
"""
Gives the probability of a value under a discrete distribution
defined by (distributions, values).
"""
total = 0.0
for prob, val in zip(distribution, values):
if val == value:
total += prob
return total
def flipCoin(p):
r = random.random()
return r < p
def chooseFromDistribution(distribution):
"Takes either a counter or a list of (prob, key) pairs and samples"
if type(distribution) == dict or type(distribution) == Counter:
return sample(distribution)
r = random.random()
base = 0.0
for prob, element in distribution:
base += prob
if r <= base: return element
def nearestPoint(pos):
"""
Finds the nearest grid point to a position (discretizes).
"""
( current_row, current_col ) = pos
grid_row = int(current_row + 0.5)
grid_col = int(current_col + 0.5)
return ( grid_row, grid_col )
def sign(x):
"""
Returns 1 or -1 depending on the sign of x
"""
if ( x >= 0 ):
return 1
else:
return -1
def arrayInvert(array):
"""
Inverts a matrix stored as a list of lists.
"""
result = [[] for i in array]
for outer in array:
for inner in range(len(outer)):
result[inner].append(outer[inner])
return result
def matrixAsList(matrix, value=True):
"""
Turns a matrix into a list of coordinates matching the specified value
"""
rows, cols = len(matrix), len(matrix[0])
cells = []
for row in range(rows):
for col in range(cols):
if matrix[row][col] == value:
cells.append(( row, col ))
return cells
def lookup(name, namespace):
"""
Get a method or class from any imported module from its name.
Usage: lookup(functionName, globals())
"""
dots = name.count('.')
if dots > 0:
moduleName, objName = '.'.join(name.split('.')[:-1]), name.split('.')[-1]
module = __import__(moduleName)
return getattr(module, objName)
else:
modules = [obj for obj in namespace.values() if str(type(obj)) == "<type 'module'>"]
options = [getattr(module, name) for module in modules if name in dir(module)]
options += [obj[1] for obj in namespace.items() if obj[0] == name]
if len(options) == 1: return options[0]
if len(options) > 1: raise Exception, 'Name conflict for %s'
raise Exception, '%s not found as a method or class' % name
def pause():
"""
Pauses the output stream awaiting user feedback.
"""
print "<Press enter/return to continue>"
raw_input()
# code to handle timeouts
#
# FIXME
# NOTE: TimeoutFuncton is NOT reentrant. Later timeouts will silently
# disable earlier timeouts. Could be solved by maintaining a global list
# of active time outs. Currently, questions which have test cases calling
# this have all student code so wrapped.
#
import signal
import time
class TimeoutFunctionException(Exception):
"""Exception to raise on a timeout"""
pass
class TimeoutFunction:
def __init__(self, function, timeout):
self.timeout = timeout
self.function = function
def handle_timeout(self, signum, frame):
raise TimeoutFunctionException()
def __call__(self, *args, **keyArgs):
# If we have SIGALRM signal, use it to cause an exception if and
# when this function runs too long. Otherwise check the time taken
# after the method has returned, and throw an exception then.
if hasattr(signal, 'SIGALRM'):
old = signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.timeout)
try:
result = self.function(*args, **keyArgs)
finally:
signal.signal(signal.SIGALRM, old)
signal.alarm(0)
else:
startTime = time.time()
result = self.function(*args, **keyArgs)
timeElapsed = time.time() - startTime
if timeElapsed >= self.timeout:
self.handle_timeout(None, None)
return result
_ORIGINAL_STDOUT = None
_ORIGINAL_STDERR = None
_MUTED = False
class WritableNull:
def write(self, string):
pass
def mutePrint():
global _ORIGINAL_STDOUT, _ORIGINAL_STDERR, _MUTED
if _MUTED:
return
_MUTED = True
_ORIGINAL_STDOUT = sys.stdout
# _ORIGINAL_STDERR = sys.stderr
sys.stdout = WritableNull()
#sys.stderr = WritableNull()
def unmutePrint():
global _ORIGINAL_STDOUT, _ORIGINAL_STDERR, _MUTED
if not _MUTED:
return
_MUTED = False
sys.stdout = _ORIGINAL_STDOUT
# sys.stderr = _ORIGINAL_STDERR
| mit | -1,549,650,670,089,234,400 | 36.945525 | 100 | 0.556262 | false |
MrHamdulay/myjvm | defaultclassloader.py | 1 | 1741 | from __future__ import absolute_import
import os.path
try:
# let's not use rzipfile for now (it's really really slow in python)
raise Exception
from rpython.rlib.rzipfile import RZipFile
ZipFile = RZipFile
except:
RZipFile = None
from zipfile import ZipFile
from classreader import ClassReader
from excep import ClassNotFoundException
class DefaultClassLoader:
def __init__(self, classpath):
self.classpath = classpath
self.lazy_classes = {}
def load_jar(self, jarfilename):
jar = ZipFile(jarfilename)
for zipinfo in jar.filelist:
classname = zipinfo.filename
if not classname.endswith('.class'):
continue
self.lazy_classes[classname.split('.class')[0]] = jar
def load_class_from_jar(self, classname):
if RZipFile:
return self.lazy_classes[classname].read(classname+'.class')
else:
return self.lazy_classes[classname].open(classname+'.class').read()
def load(self, classname):
class_file = None
if classname in self.lazy_classes:
class_file = self.load_class_from_jar(classname)
else:
parts = classname.split('/')
class_file = None
for classpath in self.classpath:
class_filename = '%s/%s.class' % (classpath, classname)
if os.path.isfile(class_filename):
class_file = open(class_filename).read()
break
else:
raise ClassNotFoundException('class file not found: %s' % classname)
assert class_file is not None
klass = ClassReader(classname, class_file).klass
return klass
| mit | 1,773,738,263,681,068,000 | 30.089286 | 84 | 0.608271 | false |
Murali-group/GraphSpace | graphspace/authorization.py | 1 | 3499 | import applications.users as users
import applications.graphs as graphs
from graphspace.exceptions import UserNotAuthorized
from graphspace.utils import get_request_user
class UserRole:
ADMIN = 3
LOGGED_IN = 2
LOGGED_OFF = 1 # When user is not logged in to GraphSpace.
def user_role(request):
"""
Returns the user role for the user making the request.
Parameters
----------
request: HTTP request
Returns
-------
Returns UserRole
"""
user_email = get_request_user(request)
user = users.controllers.get_user(request, user_email) if user_email is not None else None
if user is None:
return UserRole.LOGGED_OFF
elif user.is_admin:
return UserRole.ADMIN
else:
return UserRole.LOGGED_IN
def validate(request, permission, graph_id=None, group_id=None, layout_id=None):
"""
Validates if the user has the given permissions based on information like graph id, group id or layout id.
Returns
-------
Nothing
Raises
-------
UserNotAuthorized - if user doesnt have the given permission.
"""
# TODO: Each application module should implement a validate method.
# Then this validate method can plug into the implemented validate method to expose overall validation functionality for the project.
if graph_id is not None:
if permission == 'GRAPH_READ' and not graphs.controllers.is_user_authorized_to_view_graph(request, username=get_request_user(request), graph_id = graph_id):
raise UserNotAuthorized(request)
if permission == 'GRAPH_UPDATE' and not graphs.controllers.is_user_authorized_to_update_graph(request, username=get_request_user(request), graph_id = graph_id):
raise UserNotAuthorized(request)
if permission == 'GRAPH_DELETE' and not graphs.controllers.is_user_authorized_to_delete_graph(request, username=get_request_user(request), graph_id = graph_id):
raise UserNotAuthorized(request)
if permission == 'GRAPH_SHARE' and not graphs.controllers.is_user_authorized_to_share_graph(request, username=get_request_user(request), graph_id = graph_id):
raise UserNotAuthorized(request)
if group_id is not None:
if permission == 'GROUP_READ' and not users.controllers.is_user_authorized_to_view_group(request, username=get_request_user(request), group_id = group_id):
raise UserNotAuthorized(request)
if permission == 'GROUP_UPDATE' and not users.controllers.is_user_authorized_to_update_group(request, username=get_request_user(request), group_id = group_id):
raise UserNotAuthorized(request)
if permission == 'GROUP_DELETE' and not users.controllers.is_user_authorized_to_delete_group(request, username=get_request_user(request), group_id = group_id):
raise UserNotAuthorized(request)
if permission == 'GROUP_SHARE' and not users.controllers.is_user_authorized_to_share_with_group(request, username=get_request_user(request), group_id = group_id):
raise UserNotAuthorized(request)
if layout_id is not None:
if permission == 'LAYOUT_READ' and not graphs.controllers.is_user_authorized_to_view_layout(request, username=get_request_user(request), layout_id = layout_id):
raise UserNotAuthorized(request)
if permission == 'LAYOUT_UPDATE' and not graphs.controllers.is_user_authorized_to_update_layout(request, username=get_request_user(request), layout_id = layout_id):
raise UserNotAuthorized(request)
if permission == 'LAYOUT_DELETE' and not graphs.controllers.is_user_authorized_to_delete_layout(request, username=get_request_user(request), layout_id = layout_id):
raise UserNotAuthorized(request)
return | gpl-2.0 | 5,456,867,437,812,151,000 | 44.454545 | 166 | 0.76136 | false |
moagstar/python-uncompyle6 | uncompyle6/parsers/astnode.py | 1 | 1501 | import sys
from uncompyle6 import PYTHON3
from uncompyle6.scanners.tok import NoneToken
from spark_parser.ast import AST as spark_AST
if PYTHON3:
intern = sys.intern
class AST(spark_AST):
def isNone(self):
"""An AST None token. We can't use regular list comparisons
because AST token offsets might be different"""
return len(self.data) == 1 and NoneToken == self.data[0]
def __repr__(self):
return self.__repr1__('', None)
def __repr1__(self, indent, sibNum=None):
rv = str(self.type)
if sibNum is not None:
rv = "%2d. %s" % (sibNum, rv)
enumerate_children = False
if len(self) > 1:
rv += " (%d)" % (len(self))
enumerate_children = True
rv = indent + rv
indent += ' '
i = 0
for node in self:
if hasattr(node, '__repr1__'):
if enumerate_children:
child = node.__repr1__(indent, i)
else:
child = node.__repr1__(indent, None)
else:
inst = node.format(line_prefix='L.')
if inst.startswith("\n"):
# Nuke leading \n
inst = inst[1:]
if enumerate_children:
child = indent + "%2d. %s" % (i, inst)
else:
child = indent + inst
pass
rv += "\n" + child
i += 1
return rv
| mit | -742,535,194,500,020,400 | 30.93617 | 67 | 0.476349 | false |
miqui/python-hpOneView | examples/scripts/get-network-set.py | 1 | 3747 | #!/usr/bin/env python
###
# (C) Copyright (2012-2015) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import range
from future import standard_library
standard_library.install_aliases()
import sys
import re
PYTHON_VERSION = sys.version_info[:3]
PY2 = (PYTHON_VERSION[0] == 2)
if PY2:
if PYTHON_VERSION < (2, 7, 9):
raise Exception('Must use Python 2.7.9 or later')
elif PYTHON_VERSION < (3, 4):
raise Exception('Must use Python 3.4 or later')
import hpOneView as hpov
from pprint import pprint
def acceptEULA(con):
# See if we need to accept the EULA before we try to log in
con.get_eula_status()
try:
if con.get_eula_status() is True:
print('EULA display needed')
con.set_eula('no')
except Exception as e:
print('EXCEPTION:')
print(e)
def login(con, credential):
# Login with givin credentials
try:
con.login(credential)
except:
print('Login failed')
def getnetset(net):
sets = net.get_networksets()
pprint(sets)
def main():
parser = argparse.ArgumentParser(add_help=True,
formatter_class=argparse.RawTextHelpFormatter,
description='''
Display Network Sets
Usage: ''')
parser.add_argument('-a', dest='host', required=True,
help='''
HP OneView Appliance hostname or IP address''')
parser.add_argument('-u', dest='user', required=False,
default='Administrator',
help='''
HP OneView Username''')
parser.add_argument('-p', dest='passwd', required=True,
help='''
HP OneView Password''')
parser.add_argument('-c', dest='cert', required=False,
help='''
Trusted SSL Certificate Bundle in PEM (Base64 Encoded DER) Format''')
parser.add_argument('-y', dest='proxy', required=False,
help='''
Proxy (host:port format''')
args = parser.parse_args()
credential = {'userName': args.user, 'password': args.passwd}
con = hpov.connection(args.host)
net = hpov.networking(con)
if args.proxy:
con.set_proxy(args.proxy.split(':')[0], args.proxy.split(':')[1])
if args.cert:
con.set_trusted_ssl_bundle(args.cert)
login(con, credential)
acceptEULA(con)
getnetset(net)
if __name__ == '__main__':
import sys
import argparse
sys.exit(main())
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| mit | 5,146,794,782,429,188,000 | 31.582609 | 79 | 0.655991 | false |
Renmusxd/RSwarm | bot.py | 1 | 8829 | import numpy
from collections import OrderedDict
class Bot:
ID_NUM = 0
# Populated by GET_NINPUTS
VISION = None
INPUTS = None
NINPUTS = None
# Populated by GET_NACTIONS
ACTIONS = None
NACTIONS = None
VIEW_DIST = 100.0
FOV = 60 # Angular distance from center
VISION_BINS = 5
MATE_TIMER = 200
MAX_ENERGY = 1000
MOVE_SPEED = 1.0
SPRINT_SPEED = 3.0
TURN_SPEED = 5.0
# Radius for actions like attacking and mating
ACTION_RADIUS = 10
EAT_AMOUNT = 20
# Rewards
DEATH_REWARD = -100.
ATTACK_PRED_PRED_REWARD = 20.
ATTACK_PRED_PREY_REWARD = 50.
ATTACK_PREY_PRED_REWARD = 5.
ATTACK_PREY_PREY_REWARD = -20.
ATTACKED_REWARD = -50.
ATTACK_FAILED_REWARD = -0.0
EAT_REWARD = 100. # Scaled by hunger: R (E - e) / E
MATE_REWARD = 100.
FAILED_MATE_REWARD = -1.0
def __init__(self, x, y, d, world, color, can_graze, energy=MAX_ENERGY):
"""
Construct a bot
:param x: x position
:param y: y position
:param d: direction (0-360)[OPENGL]
:param world: world to ask for information
"""
self.x, self.y, self.d = x, y, d
self.world = world
self.id = Bot.ID_NUM
Bot.ID_NUM += 1
self.can_graze = can_graze
self.energy = energy
self.r, self.g, self.b = color
self.dead = False
# Indicate that this Bot is attempting to mate
self.mating = False
self.attacking = False
self.attacked = False
self.mate_timer = 0
self.mem = None
def senses(self):
# Evaluate vision
vision = Bot.VISION.eval(self)
# Evaluate introspection
body = numpy.array([v(self) for v in Bot.INPUTS.values()])
state = numpy.concatenate((body, vision))
return state
def memory(self):
return self.mem
def set_memory(self, memory):
self.mem = memory
def act(self, action):
reward_acc = 0
still, left, lmov, forward, \
rmov, right, sprint, eat, \
mate, atck = (action == i for i in range(Bot.GET_NACTIONS()))
if eat:
if self.can_graze:
toeat = min(Bot.EAT_AMOUNT, Bot.MAX_ENERGY - self.energy)
eaten = self.world.eat(self.x, self.y, toeat)
self.energy += eaten
# reward_acc += eaten/Bot.EAT_AMOUNT * (Bot.MAX_ENERGY - self.energy)/Bot.MAX_ENERGY * Bot.EAT_REWARD
reward_acc += eaten * Bot.EAT_REWARD * (Bot.MAX_ENERGY - self.energy)/(Bot.EAT_AMOUNT * Bot.MAX_ENERGY)
elif mate:
# Check if meets mating criteria
# Reward will be added later if mate is successful
if self.mate_timer == Bot.MATE_TIMER and self.energy > Bot.MAX_ENERGY/2:
self.mating = True
elif atck:
self.attacking = True
elif sprint:
self.x += Bot.SPRINT_SPEED * numpy.cos(numpy.deg2rad(self.d))
self.y += Bot.SPRINT_SPEED * numpy.sin(numpy.deg2rad(self.d))
self.energy -= (Bot.SPRINT_SPEED - 1)
elif not still:
if left or lmov:
self.d -= Bot.TURN_SPEED
elif right or rmov:
self.d += Bot.TURN_SPEED
if lmov or forward or rmov:
self.x += Bot.MOVE_SPEED * numpy.cos(numpy.deg2rad(self.d))
self.y += Bot.MOVE_SPEED * numpy.sin(numpy.deg2rad(self.d))
self.energy -= 1
self.mate_timer += 1
self.mate_timer = min(self.mate_timer, Bot.MATE_TIMER)
# Punish death
if self.energy <= 0 or self.world.out_of_bounds(self.x,self.y) or self.attacked:
reward_acc += self.DEATH_REWARD
self.dead = True
return reward_acc
def color(self):
return self.r, self.g, self.b
def mate_succeed(self, other_bot):
self.mating = False
self.mate_timer = 0
self.energy -= Bot.MAX_ENERGY/2
return Bot.MATE_REWARD
def mate_failed(self):
self.mating = False
return Bot.FAILED_MATE_REWARD
def attack_succeed(self, other):
"""
Callback for successful attacks
:param other:
:return: Reward
"""
self.attacking = False
other.attacked = True
if self.can_graze:
return Bot.ATTACK_PREY_PREY_REWARD if other.can_graze else Bot.ATTACK_PREY_PRED_REWARD
else:
#self.energy += Bot.MAX_ENERGY + other.energy
self.energy = Bot.MAX_ENERGY
return Bot.ATTACK_PRED_PREY_REWARD if other.can_graze else Bot.ATTACK_PRED_PRED_REWARD
def attack_failed(self):
self.attacking = False
return Bot.ATTACK_FAILED_REWARD
def was_attacked(self, other):
self.attacked = True
return Bot.ATTACKED_REWARD
@staticmethod
def split_senses(senses):
"""
Splits senses into introspection senses and vision
:param senses: raw input
:return: inputs, vision, distance
"""
ins = senses[:len(Bot.INPUTS)]
vis, dist = Bot.VISION.split_vision(senses[len(Bot.INPUTS):])
return ins, vis, dist
@staticmethod
def label_inputs(inputs):
return {k:v for k,v in zip(Bot.INPUTS.keys(),inputs)}
@staticmethod
def label_actions(actions):
return {k:v for k,v in zip(Bot.ACTIONS,actions)}
@staticmethod
def action_label(action):
if 0 <= action < len(Bot.ACTIONS):
return Bot.ACTIONS[action]
else:
return None
@staticmethod
def make_actions_from_label(label):
actindx = Bot.ACTIONS.index(label)
return max(actindx,0) # No -1 values
@staticmethod
def make_brain(braincons, name):
"""
Make a brain suitable for this bot
:param name: brain name
:param braincons: brain constructor function
:return: instance of brain to use
"""
brain = braincons(name, Bot.GET_NINPUTS(), Bot.GET_NACTIONS())
return brain
@staticmethod
def GET_NINPUTS():
if Bot.INPUTS is None:
Bot.INPUTS = OrderedDict()
# Basic senses
Bot.INPUTS['energy'] = lambda b: min(b.energy / Bot.MAX_ENERGY, 1.0)
Bot.INPUTS['mate'] = lambda b: b.mate_timer / Bot.MATE_TIMER
Bot.INPUTS['tile'] = lambda b: b.world.get_tile_perc(b.x,b.y)
# Vision
Bot.VISION = BotVision("gray")
#Bot.VISION = BotVision("rgb")
Bot.NINPUTS = len(Bot.INPUTS) + len(Bot.VISION)
return Bot.NINPUTS
@staticmethod
def GET_NACTIONS():
if Bot.ACTIONS is None:
Bot.ACTIONS = ["still", "left", "lmov", "forward", "rmov",
"right", "sprint", "eat", "mate", "atck"]
Bot.NACTIONS = len(Bot.ACTIONS)
return Bot.NACTIONS
class BotVision:
GRAY_SIZE = 2
RGB_SIZE = 4
def __init__(self,world,color='gray'):
"""
Construct vision mechanic
:param vbins: number of vision bins
:param fov: field of view in degrees
:param color: color format to use (gray or rgb)
"""
self.color = color
self.world = world
if self.color == 'gray':
self.size = Bot.VISION_BINS * BotVision.GRAY_SIZE
self.shape = (Bot.VISION_BINS, BotVision.GRAY_SIZE)
elif self.color == 'rgb':
self.size = Bot.VISION_BINS * BotVision.RGB_SIZE
self.shape = (Bot.VISION_BINS, BotVision.RGB_SIZE)
def eval(self, bot):
# Gets back 3 colors + 1 distance
vision = bot.world.get_vision(bot.x, bot.y, bot.d, Bot.FOV, Bot.VIEW_DIST, Bot.VISION_BINS)
if self.color == "gray":
# Convert to [-1, 1] scale
vscale = (-vision[:, 0] + vision[:, 2])
distances = vision[:, 3]
new_vision = numpy.ndarray(shape=self.shape)
new_vision[:,0] = vscale
new_vision[:,1] = distances
return new_vision.flatten()
else:
return vision.flatten()
def split_vision(self, vision):
"""
Split into vision and distance components
:param vision: raw vision input (as is output from eval)
:return: vision, distance
"""
vis = vision.reshape(self.shape)
return vis[:,:-1], vis[:,-1]
def apply_filter(self, colors):
return BotVision.filter(colors, self.color)
@staticmethod
def filter(colors,colorfilter):
if colorfilter == "gray":
return -colors[:,0] + colors[:,2]
elif colorfilter == "rgb":
return colors
def __len__(self):
return self.size
| mit | 7,979,461,654,846,873,000 | 29.236301 | 119 | 0.562691 | false |
mvaled/sentry | src/sentry/api/endpoints/organization_eventid.py | 1 | 2224 | from __future__ import absolute_import
import six
from rest_framework.response import Response
from sentry import eventstore
from sentry.api.base import DocSection
from sentry.api.bases.organization import OrganizationEndpoint
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.serializers import serialize
from sentry.models import Project
from sentry.utils.apidocs import scenario, attach_scenarios
@scenario("ResolveEventId")
def resolve_event_id_scenario(runner):
runner.request(
method="GET",
path="/organizations/%s/eventids/%s/" % (runner.org.slug, runner.default_event.event_id),
)
class EventIdLookupEndpoint(OrganizationEndpoint):
doc_section = DocSection.ORGANIZATIONS
@attach_scenarios([resolve_event_id_scenario])
def get(self, request, organization, event_id):
"""
Resolve a Event ID
``````````````````
This resolves a event ID to the project slug and internal issue ID and internal event ID.
:pparam string organization_slug: the slug of the organization the
event ID should be looked up in.
:param string event_id: the event ID to look up.
:auth: required
"""
# Largely copied from ProjectGroupIndexEndpoint
if len(event_id) != 32:
return Response({"detail": "Event ID must be 32 characters."}, status=400)
project_slugs_by_id = dict(
Project.objects.filter(organization=organization).values_list("id", "slug")
)
try:
event = eventstore.get_events(
filter_keys={"project_id": project_slugs_by_id.keys(), "event_id": event_id},
limit=1,
)[0]
except IndexError:
raise ResourceDoesNotExist()
else:
return Response(
{
"organizationSlug": organization.slug,
"projectSlug": project_slugs_by_id[event.project_id],
"groupId": six.text_type(event.group_id),
"eventId": six.text_type(event.id),
"event": serialize(event, request.user),
}
)
| bsd-3-clause | -6,328,588,570,410,256,000 | 33.75 | 97 | 0.61241 | false |
unomena/tunobase | tunobase/corporate/company_info/contact/models.py | 1 | 1349 | """
CONTACT APP
This modules sets up the database structure for the contact app.
Classes:
ContactMessage
Functions:
n/a
Created on 23 Oct 2013
@author: michael
"""
from django.db import models
from django.contrib.sites.models import Site
from django.conf import settings
from tunobase.corporate.company_info.contact import signals
class ContactMessage(models.Model):
"""Contact message sent from the Site."""
user = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True)
name = models.CharField(max_length=255)
email = models.EmailField()
mobile_number = models.CharField(max_length=16, blank=True, null=True)
message = models.TextField()
site = models.ForeignKey(Site, blank=True, null=True)
timestamp = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
"""Return a unicode object."""
return u'%s' % self.name
def send(self):
"""Fire off signal to be received by handlers."""
signals.contact_message_saved.send(
sender=self.__class__,
contact_message_id=self.id
)
def save(self, *args, **kwargs):
""" Save contact form."""
if self.site is None:
self.site = Site.objects.get_current()
super(ContactMessage, self).save(*args, **kwargs)
self.send()
| bsd-3-clause | 9,177,308,814,407,269,000 | 23.981481 | 77 | 0.656783 | false |
sebastienhupin/qxrad | qooxdoo/tool/pylib/ecmascript/frontend/tree.py | 1 | 24785 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# qooxdoo - the new era of web development
#
# http://qooxdoo.org
#
# Copyright:
# 2006-2010 1&1 Internet AG, Germany, http://www.1und1.de
#
# License:
# LGPL: http://www.gnu.org/licenses/lgpl.html
# EPL: http://www.eclipse.org/org/documents/epl-v10.php
# See the LICENSE file in the project's top-level directory for details.
#
# Authors:
# * Sebastian Werner (wpbasti)
# * Fabian Jakobs (fjakobs)
#
################################################################################
import sys, os, copy, re
from misc import util
##
#<h2>Module Description</h2>
#<pre>
# NAME
# tree.py -- providing a tree data structure
#
# SYNTAX
# tree.py --help
#
# or
#
# import tree
# result = tree.Node()
#
# creates a new tree node
#
# DESCRIPTION
# The main provision by this module is the Node class. This lets you create
# arbitrary trees made out of linked nodes (parent - child relation).
#
#</pre>
##
##
# Some nice short description of Foo
#
# @param a Number of foos to bar
class NodeAccessException (Exception):
def __init__ (self, msg, node):
Exception.__init__(self, msg)
self.node = node
NODE_VARIABLE_TYPES = ("dotaccessor", "identifier")
NODE_STATEMENT_CONTAINERS = ("statements", "block")
class Node(object):
def __init__ (self, ntype):
self.type = ntype
self.parent = None
self.children = []
self.attributes = {}
self.dep = None # a potential DependencyItem()
def __str__(self):
return nodeToXmlStringNR(self)
def hasAttributes(self):
#return hasattr(self, "attributes")
# ApiLoader._isNodeIdentical() needs this len() check
# TODO: remove commented calls to hasAttributes() and hasattr(self,attributes)
return len(self.attributes)
def set(self, key, value):
"""Sets an attribute"""
if not isinstance(value, (basestring, int, long, float, complex, bool)):
raise NodeAccessException("'value' is no string or number: " + str(value), self)
#if not self.hasAttributes():
if False:
self.attributes = {}
self.attributes[key] = value
return self
def get(self, key, default = None):
value = None
#if hasattr(self, "attributes") and key in self.attributes:
if key in self.attributes:
value = self.attributes[key]
if value != None:
return value
elif default != None:
return default
else:
raise NodeAccessException("Node " + self.type + " has no attribute " + key, self)
def remove(self, key):
if not key in self.attributes:
return
del self.attributes[key]
if len(self.attributes) == 0:
del self.attributes
##
# Make a default copy of self (this includes instanceof)
def clone(self):
clone_ = copy.copy(self)
# keep .attributes non-shared
if True:
clone_.attributes = copy.copy(self.attributes)
return clone_
##
# Copy the properties of self into other
# (this might not be entirely in sync with treegenerator.symbol())
def patch(self, other):
for attr, val in vars(self).items():
if attr in (
"type", "id", # preserve other's classification
"children", # don't adopt existing children (what would their .parent be?!)
"parent", # avoid tree relations
):
continue
setattr(other, attr, val)
# keep .attributes non-shared
if hasattr(self, "attributes"):
other.attributes = copy.copy(self.attributes)
def hasParent(self):
return self.parent
##
# checks whether the node hierarchy leading to node ends with contextPath,
# ie. if node.parent.type == contextPath[-1], node.parent.parent.type ==
# contextPath[-2] asf. Example: varNode.hasParentContext("call/operand")
# checks whether varNode.parent is "operand" and varNode.parent.parent is
# "call" type, ie. it's a function being called; the wildcard '*' is allowed
# to indicate any type on a particular level, like "value/*/operand"
def hasParentContext(self, contextPath):
path_elems = contextPath.split('/')
currNode = self
for path_elem in reversed(path_elems):
if currNode.parent:
if ( path_elem == '*' or currNode.parent.type == path_elem ):
currNode = currNode.parent
else:
return False
else:
return False # no parent, no match
return True
##
# return the chain of parent (types) of this node
def getParentChain(self):
chain = []
currNode = self
while currNode.parent:
chain.append(currNode.parent.type)
currNode = currNode.parent
return reversed (chain)
##
# return the root of the current tree
def getRoot(self):
rnode = self
while rnode.parent:
rnode = rnode.parent
return rnode
def hasChildren(self, ignoreComments = False):
if not ignoreComments:
return self.children
else:
return [c for c in self.children if c.type not in ("comment", "commentsBefore", "commentsAfter")]
getChildren = hasChildren
def addChild(self, childNode, index = None):
if childNode:
if childNode.parent and childNode in childNode.parent.children:
childNode.parent.removeChild(childNode)
if index != None:
self.children.insert(index, childNode)
else:
self.children.append(childNode)
childNode.parent = self
return self
def removeChild(self, childNode):
if self.children:
self.children.remove(childNode)
#childNode.parent = None
def removeAllChildren(self):
for child in self.children[:]:
self.children.remove(child)
def replaceChild(self, oldChild, newChild):
if oldChild in self.children and oldChild is not newChild:
if newChild.parent and newChild in newChild.parent.children:
newChild.parent.removeChild(newChild)
self.children.insert(self.children.index(oldChild), newChild)
newChild.parent = self
self.children.remove(oldChild)
##
# Get child by type or position
#
def getChild(self, spec, mandatory = True):
if self.children:
for pos,child in enumerate(self.children):
if pos==spec or child.type==spec:
return child
if mandatory:
raise NodeAccessException("Node '%s' has no child with type or position '%s'"
% (self.type, str(spec)), self)
def hasChildRecursive(self, ntype):
if isinstance(ntype, basestring):
if self.type == ntype:
return True
elif isinstance(ntype, util.FinSequenceTypes):
if self.type in ntype:
return True
if self.children:
for child in self.children:
if child.hasChildRecursive(ntype):
return True
return False
##
# Whether <node> is self, or a descendant in the tree rooted by self.
def contains(self, node):
if self is node:
return node
else:
for child in self.children:
if child.contains(node):
return node
return None
##
# TODO: Rename this to hasChildByType
def hasChild(self, ntype):
if self.children:
for child in self.children:
if isinstance(ntype, basestring):
if child.type == ntype:
return True
elif isinstance(ntype, list):
if child.type in ntype:
return True
return False
def getChildrenLength(self, ignoreComments=False):
if self.children:
if ignoreComments:
counter = 0
for child in self.children:
if not child.type in ["comment", "commentsBefore", "commentsAfter"]:
counter += 1
return counter
else:
return len(self.children)
return 0
def makeComplex(self):
makeComplex = self.get("makeComplex", '')
if makeComplex != '':
return makeComplex
else:
makeComplex = False
if self.type == "comment":
makeComplex = True
elif self.type == "block":
if self.children:
counter = 0
for child in self.children:
if child.type != "commentsAfter":
counter += 1
if counter > 1:
makeComplex = True
elif self.type == "loop":
if self.get("loopType") == "IF" and self.parent and self.parent.type == "elseStatement":
pass
else:
makeComplex = True
elif self.type == "function":
makeComplex = self.getChild("body").hasChild("block") and self.getChild("body").getChild("block").getChildrenLength() > 0
elif self.type in ["loop", "switch"]:
makeComplex = True
elif self.hasChild("commentsBefore"):
makeComplex = True
# Final test: Ask the children (slower)
if not makeComplex and not self.type in ["comment", "commentsBefore", "commentsAfter"]:
makeComplex = self.isComplex()
self.set("makeComplex", makeComplex)
# print "makeComplex: %s = %s" % (self.type, makeComplex)
return makeComplex
def isComplex(self):
isComplex = self.get("isComplex", ())
if isComplex != ():
return isComplex
else:
isComplex = False
if not self.children:
isComplex = False
elif self.type == "block":
counter = 0
if self.children:
for child in self.children:
if child.type != "commentsAfter":
counter += 1
if child.hasChild("commentsBefore"):
counter += 1
if counter > 1:
break
if counter > 1:
isComplex = True
else:
if self.getChildrenLength() == 0:
isComplex = False
# in else, try to find the mode of the previous if first
elif self.parent and self.parent.type == "elseStatement":
isComplex = self.parent.parent.getChild("statement").hasComplexBlock()
# in if, try to find the mode of the parent if (if existent)
elif self.parent and self.parent.type == "statement" and self.parent.parent.type == "loop" and self.parent.parent.get("loopType") == "IF":
if self.parent.parent.parent and self.parent.parent.parent.parent:
if self.parent.parent.parent.parent.type == "loop":
isComplex = self.parent.parent.parent.parent.getChild("statement").hasComplexBlock()
# in catch/finally, try to find the mode of the try statement
elif self.parent and self.parent.parent and self.parent.parent.type in ["catch", "finally"]:
isComplex = self.parent.parent.parent.getChild("statement").hasComplexBlock()
elif self.type == "elseStatement":
if self.hasComplexBlock():
isComplex = True
elif self.hasChild("loop") and self.getChild("loop").getChild("statement").hasComplexBlock():
isComplex = True
elif self.type == "array" :
if self.getChildrenLength(True) > 5:
isComplex = True
elif self.type == "map" :
ml = self.getChildrenLength(True)
if ml > 1:
isComplex = True
# Final test: Ask the children (slower)
if not (self.type == "elseStatement" and self.hasChild("loop")):
if not isComplex and self.hasComplexChildren():
isComplex = True
# print self.type + " :: %s" % isComplex
self.set("isComplex", isComplex)
# print "isComplex: %s = %s" % (self.type, isComplex)
return isComplex
def hasComplexChildren(self):
if self.children:
for child in self.children:
if child.makeComplex():
return True
return False
def hasComplexBlock(self):
if self.hasChild("block"):
return self.getChild("block").isComplex()
return False
def hasBlockChildren(self):
if self.hasChild("block"):
return self.getChild("block").hasChildren()
return False
def getChildPosition(self, searchedChild, ignoreComments = False):
if self.children and searchedChild in self.children:
if ignoreComments:
counter = 0
for child in self.children:
if child == searchedChild:
return counter
if not child.type in ["comment", "commentsBefore", "commentsAfter"]:
counter += 1
else:
return self.children.index(searchedChild)
return -1
def getChildByPosition(self, pos, mandatory = True, ignoreComments = False):
if self.children:
i = 0
for child in self.children:
if ignoreComments and child.type in ["comment", "commentsBefore", "commentsAfter"]:
continue
if i == pos:
return child
i += 1
if mandatory:
raise NodeAccessException("Node " + self.type + " has no child as position %s" % pos, self)
##
# List-valued!
def getChildsByTypes(self, type_list):
return [c for c in self.children if c.type in type_list]
def getChildByAttribute(self, key, value, mandatory = True):
if self.children:
for child in self.children:
if child.get(key,mandatory) == value:
return child
if mandatory:
raise NodeAccessException("Node " + self.type + " has no child with attribute " + key + " = " + value, self)
def getChildByTypeAndAttribute(self, ntype, key, value, mandatory = True, recursive = False):
if self.children:
for child in self.children:
if child.type == ntype and child.get(key,mandatory) == value:
return child
elif recursive:
found = child.getChildByTypeAndAttribute(ntype, key, value, False, True)
if found:
return found
if mandatory:
raise NodeAccessException("Node " + self.type + " has no child with type " + ntype + " and attribute " + key + " = " + value, self)
def getFirstChild(self, mandatory = True, ignoreComments = False):
if self.children:
for child in self.children:
if ignoreComments and child.type in ["comment", "commentsBefore", "commentsAfter"]:
continue
return child
if mandatory:
raise NodeAccessException("Node " + self.type + " has no children", self)
def getLastChild(self, mandatory = True, ignoreComments = False):
if self.children:
if not ignoreComments:
return self.children[-1]
else:
pos = len(self.children) - 1
while pos >= 0:
child = self.children[pos]
if ignoreComments and child.type in ["comment", "commentsBefore", "commentsAfter"]:
pos -= 1
continue
return child
if mandatory:
raise NodeAccessException("Node " + self.type + " has no children", self)
def getPreviousSibling(self, mandatory = True, ignoreComments = False):
if self.parent:
prev = None
for child in self.parent.children:
if ignoreComments and child.type in ["comment", "commentsBefore", "commentsAfter"]:
continue
if child == self:
if prev != None:
return prev
else:
break
prev = child
if mandatory:
raise NodeAccessException("Node " + self.type + " has no previous sibling", self)
def getFollowingSibling(self, mandatory = True, ignoreComments = False):
if self.parent:
prev = None
for child in self.parent.children:
if ignoreComments and child.type in ["comment", "commentsBefore", "commentsAfter"]:
continue
if prev != None:
return child
if child == self:
prev = child
if mandatory:
raise NodeAccessException("Node " + self.type + " has no following sibling", self)
def isFirstChild(self, ignoreComments = False):
if not self.parent:
return False
return self.parent.getFirstChild(False, ignoreComments) == self
def isLastChild(self, ignoreComments = False):
if not self.parent:
return False
return self.parent.getLastChild(False, ignoreComments) == self
#def isVar(self):
# return self.type in NODE_VARIABLE_TYPES
def isStatement(self):
return self.parent and self.parent.type in NODE_STATEMENT_CONTAINERS
def addListChild(self, listName, childNode):
listNode = self.getChild(listName, False)
if not listNode:
listNode = Node(listName)
self.addChild(listNode)
listNode.addChild(childNode)
def getListChildByAttribute(self, listName, key, value, mandatory = True):
listNode = self.getChild(listName, False)
if listNode:
return listNode.getChildByAttribute(key, value, mandatory)
if mandatory:
raise NodeAccessException("Node " + self.type + " has no child " + listName, self)
def getFirstListChild(self, listName, mandatory = True):
listNode = self.getChild(listName, False)
if listNode:
return listNode.getFirstChild(mandatory)
if mandatory:
raise NodeAccessException("Node " + self.type + " has no child " + listName, self)
def getAllChildrenOfType(self, ntype):
return self._getAllChildrenOfType(ntype, [])
def _getAllChildrenOfType(self, ntype, found=[]):
if self.children:
for child in self.children:
if child.type == ntype:
found.append(child)
child._getAllChildrenOfType(ntype, found)
return found
def toXml(self, prefix = "", childPrefix = " ", newLine="\n", encoding="utf-8"):
return nodeToXmlString(self, prefix, childPrefix, newLine, encoding)
def toJson(self, prefix = "", childPrefix = " ", newLine="\n"):
return nodeToJsonString(self, prefix, childPrefix, newLine)
def toJavascript(self):
from ecmascript.backend import formatter
optns = formatter.defaultOptions()
result = formatter.formatNode(self, optns, [])
return u''.join(result)
def nodeIter(self):
"A generator/iterator method, to traverse a tree and 'yield' each node"
yield self
if self.children:
for child in self.children:
for node in child.nodeIter():
yield node
def nodeTreeMap(self, fn):
"""As an alternative, a pure recursion walk that applies a function fn to each node.
This allows to control the recursion through fn's return value.
Signature of fn: fn(node,isLeaf)."""
if not self.children:
rc = fn(self,True)
return
else:
rc = fn(self,False)
if rc == 0: # != 0 means prune this subtree
for child in self.children:
child.nodeTreeMap(fn)
return
def nodeToXmlStringNR(node, prefix="", encoding="utf-8"):
hasText = False
asString = prefix + "<" + node.type
#if node.hasAttributes():
if True:
for key in node.attributes:
asString += " " + key + "=\"" + escapeXmlChars(node.attributes[key], True, encoding) + "\""
asString += "/>"
return asString
def nodeToXmlString(node, prefix = "", childPrefix = " ", newLine="\n", encoding="utf-8"):
asString = u''
hasText = False
# comments
for attr in ('comments', 'commentsAfter'):
if hasattr(node, attr) and getattr(node, attr):
cmtStrings = []
for comment in getattr(node, attr):
cmtStrings.append(nodeToXmlString(comment, prefix, childPrefix, newLine, encoding))
asString += u''.join(cmtStrings)
# own str repr
asString += prefix + "<" + node.type
#if node.hasAttributes():
if True:
for key in node.attributes:
if key == "text":
hasText = True
else:
asString += " " + key + "=\"" + escapeXmlChars(node.attributes[key], True, encoding) + "\""
if not node.hasChildren() and not hasText:
asString += "/>" + newLine
else:
asString += ">"
if hasText:
asString += newLine + prefix + childPrefix
asString += "<text>" + escapeXmlChars(node.attributes["text"], False, encoding) + "</text>" + newLine
if node.hasChildren():
asString += newLine
for child in node.children:
asString += nodeToXmlString(child, prefix + childPrefix, childPrefix, newLine, encoding)
asString += prefix + "</" + node.type + ">" + newLine
return asString
def nodeToJsonString(node, prefix = "", childPrefix = " ", newLine="\n"):
asString = prefix + '{"type":"' + escapeJsonChars(node.type) + '"'
#if node.hasAttributes():
if True:
asString += ',"attributes":{'
firstAttribute = True
for key in node.attributes:
if not firstAttribute:
asString += ','
asString += '"' + key + '":"' + escapeJsonChars(node.attributes[key]) + '"'
firstAttribute = False
asString += '}'
if node.hasChildren():
asString += ',"children":[' + newLine
prefix = prefix + childPrefix
for child in node.children:
asString += nodeToJsonString(child, prefix, childPrefix, newLine) + ',' + newLine
# NOTE We remove the ',\n' of the last child
if newLine == "":
asString = asString[:-1] + prefix + ']'
else:
asString = asString[:-2] + newLine + prefix + ']'
asString += '}'
return asString
def getNodeData(node):
data = {
"type" : node.type
}
#if node.hasAttributes():
if True:
data["attributes"] = {}
for key in node.attributes:
data["attributes"][key] = node.attributes[key]
if node.hasChildren():
data["children"] = []
for child in node.children:
data["children"].append(getNodeData(child))
return data
def escapeXmlChars(text, inAttribute, encoding="utf-8"):
if isinstance(text, basestring):
# http://www.w3.org/TR/xml/#dt-escape
text = text.replace("\"", """).replace("'", "'").replace("&", "&").replace("<", "<").replace(">", ">")
elif isinstance(text, bool):
text = str(text).lower()
else:
text = str(text)
return text
def escapeJsonChars(text):
if isinstance(text, basestring):
# http://tools.ietf.org/html/rfc4627#section-2.5
text = text.replace('\\', '\\\\').replace('"', '\\"').replace('\n', '\\n').replace('\r', '\\r').replace('\t', '\\t').replace('\b', '\\b').replace('\f', '\\f').replace('/', '\\/')
elif isinstance(text, bool):
text = str(text).lower()
else:
text = str(text)
return text
| lgpl-3.0 | -3,710,500,687,276,511,700 | 30.775641 | 186 | 0.553319 | false |
emc-openstack/storops | storops_test/unity/jh_mock.py | 1 | 1253 | # coding=utf-8
# Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from storops import exception as ex
from storops.unity.resource.job import UnityJob
class MockJobHelper(object):
def __init__(self, cli, interval=5):
self._cli = cli
self.started = True
def wait_job(self, job, async_timeout, async_interval):
if job.id == 'N-3078':
return UnityJob(_id=job.id, cli=self._cli)
if job.id == 'N-3079':
ret_job = UnityJob(_id=job.id, cli=self._cli)
raise ex.JobStateError(ret_job)
if job.id == 'N-3080':
raise ex.JobTimeoutException()
| apache-2.0 | -8,038,427,901,715,396,000 | 35.852941 | 78 | 0.667997 | false |
mdehus/goose-IEC61850-scapy | goose.py | 1 | 5622 | import struct
import binascii
from scapy.all import *
import BER
class ASNType(object):
tag = ''
def __init__(self, data='', length=0):
pass
def unpack(self, data):
raise NotImplemented()
def pack(self, data):
raise NotImplemented()
def __str__(self):
return str(self.data)
def __repr__(self):
return str(self.data)
class Integer(ASNType):
def __init__(self, data='', length=0):
self.data = BER.unpack_varint(data, length)
def pack(self):
if isinstance(self.data, int):
if self.data <= 255:
return struct.pack('!B', self.data)
elif self.data <= 65535:
return struct.pack('!h', self.data)
else:
return struct.pack('!i', self.data)
if isinstance(self.data, long):
return struct.pack('!l', self.data)
class VisibleString(ASNType):
def __init__(self, data='', length=0):
self.data = data
def __repr__(self):
return "'" + self.data + "'"
def pack(self):
return self.data
class Boolean(ASNType):
ID = 3
def __init__(self, data='', length=0):
self.data = struct.unpack('!b', data)[0]
def __repr__(self):
if self.data:
return "True"
else:
return "False"
def pack(self):
return struct.pack('!b', self.data)
class UTCTime(ASNType):
def __init__(self, data='', length=0):
self.data = struct.unpack('!d', data)[0]
def pack(self):
return struct.pack('!d', self.data)
class UnsignedInteger(ASNType):
def __init__(self, data='', length=0):
self.data = struct.unpack()
class Float(ASNType):
def __init__(self, data='', length=0):
self.data = struct.unpack('!f', data)[0]
def pack(self):
return struct.data('!f', data)
class Real(Float):
pass
class OctetString(ASNType):
def __init__(self, data='', length=0):
self.data = struct.unpack('!d', data)[0]
class BitString(ASNType):
ID = 4
def __init__(self, data='', length=0):
c = {'0': '0000', '1': '0001', '2': '0010',
'3':'0011', '4':'0100', '5':'0101',
'6':'0110', '7':'0111', '8':'1000',
'9':'1001', 'a':'1010', 'b':'1011',
'c':'1100', 'd':'1101', 'e':'1110',
'f':'1111'}
self.padding = struct.unpack('!h', '\x00'+data[:1])[0]
h = binascii.b2a_hex(data[1:])
self.data = ''
for i in h:
self.data += c[i]
def pack(self):
packed_padding = struct.pack('!B', self.padding)
packed_data = struct.pack('!h', int(self.data, 2))
return packed_padding + packed_data
class ObjectID(ASNType):
pass
class BCD(ASNType):
pass
class BooleanArray(ASNType):
pass
class UTF8String(ASNType):
pass
class Data(object):
tag = ''
tagmap = {(128,0,3):('boolean', Boolean),
(128,0,4):('bitstring', BitString),
(128,0,5):('integer', Integer),
(129,0,6):('unsigned', UnsignedInteger),
(128,0,7):('float', Float),
(128,0,8):('real', Real),
(128,0,9):('octetstring', OctetString),
(129,0,10):('visiblestring', VisibleString),
(128,0,12):('binarytime', UTCTime),
(128,0,13):('bcd', BCD),
(129,0,14):('booleanarray', BooleanArray),
(128,0,15):('objID', ObjectID),
(128,0,16):('mMSString', UTF8String),
(128,0,17):('utcstring', UTCTime)}
def __init__(self, data=None, length=0):
self.tagmap[(128,32,1)] = ('array', Data)
self.tagmap[(128,32,2)] = ('structure', Data)
self.data = BER.decoder(data, self.tagmap, decode_as_list=True)
def __getitem__(self, index):
return self.data[index]
def __repr__(self):
return repr(self.data)
def pack(self):
""" This is a hack, and should probably be integrated in to
the BER encoder at some point.
"""
packed_data = ''
for i in self.data:
tag = i.tag[0] + i.tag[1] + i.tag[2]
tag = struct.pack('!B', tag)
package = i.pack()
if len(package) < 128:
length = struct.pack('!B', len(package))
else: # HACK.. this will only support lengths up to 254.
length = struct.pack('!BB', 129, len(package))
packed_data += tag + length + package
return packed_data
class GOOSEPDU(object):
ID = 97
tagmap = {(128,0,0):('gocbRef', VisibleString),
(128,0,1):('timeAllowedToLive', Integer),
(128,0,2):('datSet', VisibleString),
(128,0,3):('goID', VisibleString),
(128,0,4):('t', UTCTime),
(128,0,5):('stNum', Integer),
(128,0,6):('sqNum', Integer),
(128,0,7):('test',Boolean),
(128,0,8):('confRev', Integer),
(128,0,9):('ndsCom', Boolean),
(128,0,10):('numDataSetEntries', Integer),
(128,32,11):('allData', Data)}
def __init__(self, data=None, length=0):
self.__dict__ = BER.decoder(data, self.tagmap)
def pack(self):
return BER.encoder(self.__dict__, self.tagmap)
class GOOSE(Packet):
name = "GOOSE"
fields_desc = [ ShortField("APPID", 3),
ShortField("Length", None),
ShortField("Reserved1", 0),
ShortField("Reserved2", 0),
]
| gpl-2.0 | -9,140,397,460,529,846,000 | 28.129534 | 71 | 0.505692 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.