repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
nik-hil/scripts | palindrome.py | 1 | 1731 | '''This program finds to pallindrome in a string by taking each character as a
center of pallindrome. From center it probes in both direction this pallindrome
exists.
A pallindrome might exists in space between two characters e.g, "bb"
'''
def palindrome(string):
'''test cases
>>> palindrome("")
>>> palindrome("a")
'a'
>>> palindrome("ab")
'a'
>>> palindrome("bb")
'bb'
>>> palindrome("abcba")
'abcba'
>>> palindrome("efabcbad")
'abcba'
'''
if not string:
return None
max_range = ()
max_length = 0
for i in range(len(string)):
current_range = find_palindrome(i, string)
new_length = current_range[1] - current_range[0] + 1
if max_length < new_length :
max_length = new_length
max_range = current_range
return string[max_range[0]:max_range[1] + 1]
def find_palindrome(i, string):
len_str = len(string)
len_first = 0
len_second = 0
low, high = find_palindrome_range(i, i, string, len_str)
if low == i and high == i:
len_first = (low, high)
else:
len_first = (low + 1, high - 1)
low, high = find_palindrome_range(i, i+1, string, len_str)
if low == i and high == i + 1:
len_second = (low, high - 1)
else:
len_second = (low + 1, high - 1)
if len_first[1] - len_first[0] > len_second[1] - len_second[0]:
return len_first
else:
return len_second
def find_palindrome_range(low, high, string, len_str):
while (low > -1 and high < len_str and string[low] == string[high]):
low -= 1
high += 1
return low, high
if __name__ == "__main__":
import doctest
doctest.testmod()
| mit | -8,828,558,457,101,507,000 | 26.919355 | 79 | 0.569613 | false |
Bairdo/faucet | faucet/check_faucet_config.py | 1 | 1839 | #!/usr/bin/env python
"""Standalone script to check FAUCET configuration, return 0 if provided config OK."""
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2017 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
try:
import valve
from config_parser import dp_parser
except ImportError:
from faucet import valve
from faucet.config_parser import dp_parser
def check_config(conf_files):
logname = '/dev/null'
logger = logging.getLogger('%s.config' % logname)
logger_handler = logging.StreamHandler(stream=sys.stderr)
logger.addHandler(logger_handler)
logger.propagate = 0
logger.setLevel(logging.DEBUG)
for conf_file in conf_files:
parse_result = dp_parser(conf_file, logname)
if parse_result is None:
return False
else:
_, dps = parse_result
for dp in dps:
valve_dp = valve.valve_factory(dp)
if valve_dp is None:
return False
print((dp.to_conf()))
return True
def main():
if check_config(sys.argv[1:]):
sys.exit(0)
else:
sys.exit(-1)
if __name__ == '__main__':
main()
| apache-2.0 | 3,812,037,225,925,823,000 | 29.65 | 86 | 0.668842 | false |
milapour/palm | palm/blink_state_enumerator.py | 1 | 8844 | from types import IntType
from palm.util import multichoose
from palm.state_collection import StateCollectionFactory
class SingleDarkState(object):
"""
A macrostate for a BlinkModel with one dark microstate.
The available microstates are `I`, `A`, `D`, and `B`.
Attributes
----------
initial_state_flag : bool
This flag is used by BlinkModel when creating an initial
probability vector. Expected to be true only for the
macrostate in which `I` is the only microstate with nonzero
population.
Parameters
----------
id_str : string
A label that is used to identify to this macrostate.
I,A,D,B : int
The populations of the respective microstates.
observation_class : string
The aggregated class to which this macrostate belongs.
"""
def __init__(self, id_str, I, A, D, B, observation_class):
self.id = id_str
self.I = I
self.A = A
self.D = D
self.B = B
self.observation_class = observation_class
self.initial_state_flag = False
def __str__(self):
return "%s %s" % (self.id, self.observation_class)
def as_array(self):
return numpy.array([self.I, self.A, self.D, self.B])
def get_id(self):
return self.id
def get_class(self):
return self.observation_class
def is_initial_state(self):
return self.initial_state_flag
def set_initial_state_flag(self):
self.initial_state_flag = True
def as_dict(self):
return {'observation_class':self.get_class(),
'I':self.I, 'A':self.A, 'D':self.D, 'B':self.B}
class DoubleDarkState(object):
"""
A macrostate for a BlinkModel with one dark microstate.
The available microstates are `I`, `A`, `D1`, `D2`, and `B`.
Attributes
----------
initial_state_flag : bool
This flag is used by BlinkModel when creating an initial
probability vector. Expected to be true only for the
macrostate in which `I` is the only microstate with nonzero
population.
Parameters
----------
id_str : string
A label that is used to identify to this macrostate.
I,A,D1,D2,B : int
The populations of the respective microstates.
observation_class : string
The aggregated class to which this macrostate belongs.
"""
def __init__(self, id_str, I, A, D1, D2, B, observation_class):
self.id = id_str
self.I = I
self.A = A
self.D1 = D1
self.D2 = D2
self.B = B
self.observation_class = observation_class
self.initial_state_flag = False
def __str__(self):
return "%s %s" % (self.id, self.observation_class)
def as_array(self):
return numpy.array([self.I, self.A, self.D1, self.D2, self.B])
def get_id(self):
return self.id
def get_class(self):
return self.observation_class
def is_initial_state(self):
return self.initial_state_flag
def set_initial_state_flag(self):
self.initial_state_flag = True
def as_dict(self):
return {'observation_class':self.get_class(),
'I':self.I, 'A':self.A, 'D1':self.D1, 'D2':self.D2, 'B':self.B}
class SingleDarkStateEnumeratorFactory(object):
"""
Creates a state enumerator for a BlinkModel with one dark state.
Attributes
----------
num_microstates : int
Parameters
----------
N : int
The total number of fluorophores.
state_factory : class
Factory class for State objects.
max_A : int
Number of fluorophores that can be simultaneously active.
"""
def __init__(self, N, state_factory=SingleDarkState, max_A=5):
assert type(N) is IntType
self.N = N
self.state_factory = state_factory
self.max_A = max_A
self.num_microstates = len(['I', 'A', 'D', 'B'])
def create_state_enumerator(self):
"""
Creates a method that builds a StateCollection, made up of
all possible macrostates in the model, subject to the
constraint that no states with `A` > `max_A` are allowed.
Returns
-------
enumerate_states : callable f()
A method that builds a StateCollection.
"""
def enumerate_states():
"""
Builds a StateCollection for a model with one dark state.
No states with `A` > `max_A` are allowed.
Returns
-------
state_collection : StateCollection
The allowed macrostates for the model.
initial_state_id, final_state_id : string
The identifier strings for the states where a time trace
is expected to start and finish, respectively.
"""
sc_factory = StateCollectionFactory()
for this_count_list in multichoose(self.num_microstates, self.N):
I = this_count_list[0]
A = this_count_list[1]
D = this_count_list[2]
B = this_count_list[3]
if A > self.max_A:
continue
else:
if A > 0:
obs_class = 'bright'
else:
obs_class = 'dark'
id_str = "%d_%d_%d_%d" % (I, A, D, B)
this_state = self.state_factory(id_str, I, A, D, B,
obs_class)
if I == self.N:
initial_state_id = this_state.get_id()
elif B == self.N:
final_state_id = this_state.get_id()
else:
pass
sc_factory.add_state(this_state)
state_collection = sc_factory.make_state_collection()
return state_collection, initial_state_id, final_state_id
return enumerate_states
class DoubleDarkStateEnumeratorFactory(object):
"""
Creates a state enumerator for a BlinkModel with two dark states.
Attributes
----------
num_microstates : int
Parameters
----------
N : int
The total number of fluorophores.
state_factory : class
Factory class for State objects.
max_A : int
Number of fluorophores that can be simultaneously active.
"""
def __init__(self, N, state_factory=DoubleDarkState, max_A=5):
assert type(N) is IntType
self.N = N
self.state_factory = state_factory
self.max_A = max_A
self.num_microstates = len(['I', 'A', 'D1', 'D2', 'B'])
def create_state_enumerator(self):
"""
Creates a method that builds a StateCollection, made up of
all possible macrostates in the model, subject to the
constraint that no states with `A` > `max_A` are allowed.
Returns
-------
enumerate_states : callable f()
A method that builds a StateCollection.
"""
def enumerate_states():
"""
Builds a StateCollection for a model with one dark state.
No states with `A` > `max_A` are allowed.
Returns
-------
state_collection : StateCollection
The allowed macrostates for the model.
initial_state_id, final_state_id : string
The identifier strings for the states where a time trace
is expected to start and finish, respectively.
"""
sc_factory = StateCollectionFactory()
for this_count_list in multichoose(self.num_microstates, self.N):
I = this_count_list[0]
A = this_count_list[1]
D1 = this_count_list[2]
D2 = this_count_list[3]
B = this_count_list[4]
if A > self.max_A:
continue
else:
if A > 0:
obs_class = 'bright'
else:
obs_class = 'dark'
id_str = "%d_%d_%d_%d_%d" % (I, A, D1, D2, B)
this_state = self.state_factory(id_str, I, A, D1, D2, B,
obs_class)
if I == self.N:
initial_state_id = this_state.get_id()
elif B == self.N:
final_state_id = this_state.get_id()
else:
pass
sc_factory.add_state(this_state)
state_collection = sc_factory.make_state_collection()
return state_collection, initial_state_id, final_state_id
return enumerate_states
| bsd-2-clause | 1,250,265,342,732,406,800 | 33.956522 | 79 | 0.537766 | false |
walles/px | px/px_load.py | 1 | 3820 | """
Functions for visualizing system load over time in a Unicode graph.
The one you probably want to call is get_load_string().
"""
import os
from . import px_cpuinfo
from . import px_terminal
import sys
if sys.version_info.major >= 3:
# For mypy PEP-484 static typing validation
from six import text_type # NOQA
from typing import Tuple # NOQA
physical, logical = px_cpuinfo.get_core_count()
physical_string = px_terminal.bold(str(physical) + " cores")
cores_string = "[{} | {} virtual]".format(physical_string, logical)
def average_to_level(average, peak):
level = 3 * (average / peak)
return int(round(level))
def averages_to_levels(avg0, avg1, avg2):
"""
Converts three load averages into three levels.
A level is a 0-3 integer value.
This function returns the three leves, plus the peak value the levels are
based on.
"""
peak = max(avg0, avg1, avg2)
if peak < 1.0:
peak = 1.0
l0 = average_to_level(avg0, peak)
l1 = average_to_level(avg1, peak)
l2 = average_to_level(avg2, peak)
return (l0, l1, l2, peak)
def levels_to_graph(levels):
"""
Convert an array of levels into a unicode string graph.
Each level in the levels array is an integer 0-3. Those levels will be
represented in the graph by 1-4 dots each.
The returned string will contain two levels per rune.
"""
if len(levels) % 2 == 1:
# Left pad uneven-length arrays with an empty column
levels = [-1] + levels
# From: http://stackoverflow.com/a/19177754/473672
unicodify = chr
try:
# Python 2
unicodify = unichr # type: ignore
except NameError:
# Python 3
pass
# https://en.wikipedia.org/wiki/Braille_Patterns#Identifying.2C_naming_and_ordering
LEFT_BAR = [0x00, 0x40, 0x44, 0x46, 0x47]
RIGHT_BAR = [0x00, 0x80, 0xA0, 0xB0, 0xB8]
graph = ""
for index in range(0, len(levels) - 1, 2):
left_level = levels[index] + 1
right_level = levels[index + 1] + 1
code = 0x2800 + LEFT_BAR[left_level] + RIGHT_BAR[right_level]
graph += unicodify(code)
return graph
def get_load_values():
# type: () -> Tuple[float, float, float]
"""
Returns three system load numbers:
* The first is the average system load over the last 0m-1m
* The second is the average system load over the last 1m-5m
* The third is the average system load over the last 5m-15m
"""
avg1, avg5, avg15 = os.getloadavg()
avg0to1 = avg1
avg1to5 = (5 * avg5 - avg1) / 4.0
avg5to15 = (15 * avg15 - 5 * avg5) / 10.0
return (avg0to1, avg1to5, avg5to15)
def get_load_string(load_values=None):
# type: (Tuple[float, float, float]) -> text_type
"""
Example return string, underlines indicate bold:
"1.5 [4 cores | 8 virtual] [15m load history: GRAPH]"
^^^ ^^^^^^^ ^^^^^
Load number is color coded:
* <= physical core count: Green
* <= virtual core count: Yellow
* Larger: Red
"""
if load_values is None:
load_values = get_load_values()
avg0to1, avg1to5, avg5to15 = load_values
load_string = u"{:.1f}".format(avg0to1)
if avg0to1 <= physical:
load_string = px_terminal.green(load_string)
elif avg0to1 <= logical:
load_string = px_terminal.yellow(load_string)
else:
load_string = px_terminal.red(load_string)
recent, between, old, peak = averages_to_levels(avg0to1, avg1to5, avg5to15)
graph = levels_to_graph([old] * 10 + [between] * 4 + [recent])
# Increase intensity for more recent times
graph = px_terminal.faint(graph[0:3]) + graph[3:6] + px_terminal.bold(graph[6:])
return u"{} {} [15m load history: {}]".format(load_string, cores_string, graph)
| mit | 1,131,774,955,498,635,500 | 27.939394 | 87 | 0.624607 | false |
stormi/tsunami | src/primaires/affection/defaut/__init__.py | 1 | 1663 | # -*-coding:Utf-8 -*
# Copyright (c) 2012 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant les affections par défaut."""
from . import personnage
from . import salle
| bsd-3-clause | 1,727,723,433,865,617,000 | 47.882353 | 79 | 0.777978 | false |
ajylee/gpaw-rtxs | gpaw/response/bse.py | 1 | 25096 | from time import time, ctime
import numpy as np
import pickle
from math import pi
from ase.units import Hartree
from ase.io import write
from gpaw.io.tar import Writer, Reader
from gpaw.mpi import world, size, rank, serial_comm
from gpaw.utilities.blas import gemmdot, gemm, gemv
from gpaw.utilities import devnull
from gpaw.utilities.memory import maxrss
from gpaw.response.base import BASECHI
from gpaw.response.parallel import parallel_partition
from gpaw.response.df import DF
class BSE(BASECHI):
"""This class defines Belth-Selpether equations."""
def __init__(self,
calc=None,
nbands=None,
nc=None,
nv=None,
w=None,
q=None,
eshift=None,
ecut=10.,
eta=0.2,
rpad=np.array([1,1,1]),
vcut=None,
ftol=1e-5,
txt=None,
optical_limit=False,
positive_w=False, # True : use Tamm-Dancoff Approx
use_W=True, # True: include screened interaction kernel
qsymm=True):
BASECHI.__init__(self, calc=calc, nbands=nbands, w=w, q=q,
eshift=eshift, ecut=ecut, eta=eta, rpad=rpad,
ftol=ftol, txt=txt, optical_limit=optical_limit)
self.epsilon_w = None
self.positive_w = positive_w
self.vcut = vcut
self.nc = nc # conduction band index
self.nv = nv # valence band index
self.use_W = use_W
self.qsymm = qsymm
def initialize(self):
self.printtxt('')
self.printtxt('-----------------------------------------------')
self.printtxt('Bethe Salpeter Equation calculation started at:')
self.printtxt(ctime())
BASECHI.initialize(self)
calc = self.calc
self.kd = kd = calc.wfs.kd
# frequency points init
self.dw = self.w_w[1] - self.w_w[0]
assert ((self.w_w[1:] - self.w_w[:-1] - self.dw) < 1e-10).all() # make sure its linear w grid
assert self.w_w.max() == self.w_w[-1]
self.dw /= Hartree
self.w_w /= Hartree
self.wmax = self.w_w[-1]
self.Nw = int(self.wmax / self.dw) + 1
# band init
if self.nc is None and self.positive_w is True: # applied only to semiconductor
nv = self.nvalence / 2 - 1
self.nv = np.array([nv, nv+1]) # conduction band start / end
self.nc = np.array([nv+1, nv+2]) # valence band start / end
self.printtxt('Number of electrons: %d' %(self.nvalence))
self.printtxt('Valence band included : (band %d to band %d)' %(self.nv[0],self.nv[1]-1))
self.printtxt('Conduction band included : (band %d to band %d)' %(self.nc[0],self.nc[1]-1))
elif self.nc == 'all' or self.positive_w is False: # applied to metals
self.nv = np.array([0, self.nbands])
self.nc = np.array([0, self.nbands])
self.printtxt('All the bands are included')
else:
self.printtxt('User defined bands for BSE.')
self.printtxt('Valence band included: (band %d to band %d)' %(self.nv[0],self.nv[1]-1))
self.printtxt('Conduction band included: (band %d to band %d)' %(self.nc[0],self.nc[1]-1))
# find the pair index and initialized pair energy (e_i - e_j) and occupation(f_i-f_j)
self.e_S = {}
focc_s = {}
self.Sindex_S3 = {}
iS = 0
kq_k = self.kq_k
for k1 in range(self.nkpt):
ibzkpt1 = kd.bz2ibz_k[k1]
ibzkpt2 = kd.bz2ibz_k[kq_k[k1]]
for n1 in range(self.nv[0], self.nv[1]):
for m1 in range(self.nc[0], self.nc[1]):
focc = self.f_kn[ibzkpt1,n1] - self.f_kn[ibzkpt2,m1]
if not self.positive_w: # Dont use Tamm-Dancoff Approx.
check_ftol = np.abs(focc) > self.ftol
else:
check_ftol = focc > self.ftol
if check_ftol:
self.e_S[iS] =self.e_kn[ibzkpt2,m1] - self.e_kn[ibzkpt1,n1]
focc_s[iS] = focc
self.Sindex_S3[iS] = (k1, n1, m1)
iS += 1
self.nS = iS
self.focc_S = np.zeros(self.nS)
for iS in range(self.nS):
self.focc_S[iS] = focc_s[iS]
if self.use_W:
# q points init
self.bzq_qc = kd.get_bz_q_points()
if not self.qsymm:
self.ibzq_qc = self.bzq_qc
else:
# if use q symmetry, kpoint and qpoint grid should be the same
(self.ibzq_qc, self.ibzq_q, self.iop_q,
self.timerev_q, self.diff_qc) = kd.get_ibz_q_points(self.bzq_qc,
calc.wfs.symmetry.op_scc)
if np.abs(self.bzq_qc - self.bzk_kc).sum() < 1e-8:
assert np.abs(self.ibzq_qc - kd.ibzk_kc).sum() < 1e-8
self.nibzq = len(self.ibzq_qc)
# parallel init
self.Scomm = world
# kcomm and wScomm is only to be used when wavefunctions r parallelly distributed.
self.kcomm = world
self.wScomm = serial_comm
self.nS, self.nS_local, self.nS_start, self.nS_end = parallel_partition(
self.nS, world.rank, world.size, reshape=False)
self.print_bse()
if calc.input_parameters['mode'] == 'lcao':
calc.initialize_positions()
# Coulomb kernel init
self.kc_G = np.zeros(self.npw)
for iG in range(self.npw):
index = self.Gindex_G[iG]
qG = np.dot(self.q_c + self.Gvec_Gc[iG], self.bcell_cv)
self.kc_G[iG] = 1. / np.inner(qG, qG)
if self.optical_limit:
self.kc_G[0] = 0.
self.printtxt('')
return
def calculate(self):
calc = self.calc
f_kn = self.f_kn
e_kn = self.e_kn
ibzk_kc = self.ibzk_kc
bzk_kc = self.bzk_kc
kq_k = self.kq_k
focc_S = self.focc_S
e_S = self.e_S
op_scc = calc.wfs.symmetry.op_scc
if self.use_W:
bzq_qc=self.bzq_qc
ibzq_qc = self.ibzq_qc
if type(self.use_W) is str:
# read
data = pickle.load(open(self.use_W))
W_qGG = data['W_qGG']
self.dfinvG0_G = data['dfinvG0_G']
self.printtxt('Finished reading screening interaction kernel')
elif type(self.use_W) is bool:
# calculate from scratch
self.printtxt('Calculating screening interaction kernel.')
W_qGG = self.full_static_screened_interaction()
else:
raise ValueError('use_W can only be string or bool ')
# calculate phi_qaGp
import os.path
if not os.path.isfile('phi_qaGp'):
self.printtxt('Calculating phi_qaGp')
self.get_phi_qaGp()
world.barrier()
self.reader = Reader('phi_qaGp')
self.printtxt('Finished reading phi_aGp !')
self.printtxt('Memory used %f M' %(maxrss() / 1024.**2))
else:
self.phi_aGp = self.get_phi_aGp()
# calculate kernel
K_SS = np.zeros((self.nS, self.nS), dtype=complex)
W_SS = np.zeros_like(K_SS)
self.rhoG0_S = np.zeros((self.nS), dtype=complex)
t0 = time()
self.printtxt('Calculating BSE matrix elements.')
noGmap = 0
for iS in range(self.nS_start, self.nS_end):
k1, n1, m1 = self.Sindex_S3[iS]
rho1_G = self.density_matrix(n1,m1,k1)
self.rhoG0_S[iS] = rho1_G[0]
for jS in range(self.nS):
k2, n2, m2 = self.Sindex_S3[jS]
rho2_G = self.density_matrix(n2,m2,k2)
K_SS[iS, jS] = np.sum(rho1_G.conj() * rho2_G * self.kc_G)
if self.use_W:
rho3_G = self.density_matrix(n1,n2,k1,k2)
rho4_G = self.density_matrix(m1,m2,self.kq_k[k1],self.kq_k[k2])
q_c = bzk_kc[k2] - bzk_kc[k1]
q_c[np.where(q_c > 0.501)] -= 1.
q_c[np.where(q_c < -0.499)] += 1.
if not self.qsymm:
ibzq = self.kd.where_is_q(q_c, self.bzq_qc)
W_GG = W_qGG[ibzq].copy()
else:
iq = self.kd.where_is_q(q_c, self.bzq_qc)
ibzq = self.ibzq_q[iq]
iop = self.iop_q[iq]
timerev = self.timerev_q[iq]
diff_c = self.diff_qc[iq]
invop = np.linalg.inv(op_scc[iop])
W_GG_tmp = W_qGG[ibzq]
Gindex = np.zeros(self.npw,dtype=int)
for iG in range(self.npw):
G_c = self.Gvec_Gc[iG]
if timerev:
RotG_c = -np.int8(np.dot(invop, G_c+diff_c).round())
else:
RotG_c = np.int8(np.dot(invop, G_c+diff_c).round())
tmp_G = np.abs(self.Gvec_Gc - RotG_c).sum(axis=1)
try:
Gindex[iG] = np.where(tmp_G < 1e-5)[0][0]
except:
noGmap += 1
Gindex[iG] = -1
W_GG = np.zeros_like(W_GG_tmp)
for iG in range(self.npw):
for jG in range(self.npw):
if Gindex[iG] == -1 or Gindex[jG] == -1:
W_GG[iG, jG] = 0
else:
W_GG[iG, jG] = W_GG_tmp[Gindex[iG], Gindex[jG]]
if k1 == k2:
if (n1==n2) or (m1==m2):
tmp_G = np.zeros(self.npw, dtype=complex)
q = np.array([0.0001,0,0])
for jG in range(1, self.npw):
qG = np.dot(q+self.Gvec_Gc[jG], self.bcell_cv)
tmp_G[jG] = self.dfinvG0_G[jG] / np.sqrt(np.inner(qG,qG))
const = 1./pi*self.vol*(6*pi**2/self.vol/self.nkpt)**(2./3.)
tmp_G *= const
W_GG[:,0] = tmp_G
W_GG[0,:] = tmp_G.conj()
W_GG[0,0] = 2./pi*(6*pi**2/self.vol/self.nkpt)**(1./3.) \
* self.dfinvG0_G[0] *self.vol
tmp_GG = np.outer(rho3_G.conj(), rho4_G) * W_GG
W_SS[iS, jS] = np.sum(tmp_GG)
# self.printtxt('%d %d %s %s' %(iS, jS, K_SS[iS,jS], W_SS[iS,jS]))
self.timing(iS, t0, self.nS_local, 'pair orbital')
K_SS *= 4 * pi / self.vol
if self.use_W:
K_SS -= 0.5 * W_SS / self.vol
world.sum(K_SS)
world.sum(self.rhoG0_S)
self.printtxt('The number of G index outside the Gvec_Gc: %d'%(noGmap))
# get and solve hamiltonian
H_SS = np.zeros_like(K_SS)
for iS in range(self.nS):
H_SS[iS,iS] = e_S[iS]
for jS in range(self.nS):
H_SS[iS,jS] += focc_S[iS] * K_SS[iS,jS]
if self.positive_w is True: # matrix should be Hermitian
for iS in range(self.nS):
for jS in range(self.nS):
if np.abs(H_SS[iS,jS]- H_SS[jS,iS].conj()) > 1e-4:
print iS, jS, H_SS[iS,jS]- H_SS[jS,iS].conj()
# assert np.abs(H_SS[iS,jS]- H_SS[jS,iS].conj()) < 1e-4
# make the matrix hermitian
if self.use_W:
H_SS = (np.real(H_SS) + np.real(H_SS.T)) / 2. + 1j * (np.imag(H_SS) - np.imag(H_SS.T)) /2.
# if not self.positive_w:
self.w_S, self.v_SS = np.linalg.eig(H_SS)
# else:
# from gpaw.utilities.lapack import diagonalize
# self.w_S = np.zeros(self.nS, dtype=complex)
# diagonalize(H_SS, self.w_S)
# self.v_SS = H_SS.T.copy() # eigenvectors in the rows
data = {
'w_S': self.w_S,
'v_SS':self.v_SS,
'rhoG0_S':self.rhoG0_S
}
if rank == 0:
pickle.dump(data, open('H_SS.pckl', 'w'), -1)
return
def full_static_screened_interaction(self):
"""Calcuate W_GG(q)"""
W_qGG = np.zeros((self.nibzq, self.npw, self.npw),dtype=complex)
t0 = time()
for iq in range(self.nibzq):#self.q_start, self.q_end):
W_qGG[iq] = self.screened_interaction_kernel(iq, static=True)
self.timing(iq, t0, self.nibzq, 'iq')
data = {'W_qGG': W_qGG,
'dfinvG0_G': self.dfinvG0_G}
if rank == 0:
pickle.dump(data, open('W_qGG.pckl', 'w'), -1)
return W_qGG
def print_bse(self):
printtxt = self.printtxt
if self.use_W:
printtxt('Number of q points : %d' %(self.nibzq))
printtxt('Number of frequency points : %d' %(self.Nw) )
printtxt('Number of pair orbitals : %d' %(self.nS) )
printtxt('Parallelization scheme:')
printtxt(' Total cpus : %d' %(world.size))
printtxt(' pair orb parsize : %d' %(self.Scomm.size))
return
def get_phi_qaGp(self):
N1_max = 0
N2_max = 0
natoms = len(self.calc.wfs.setups)
for id in range(natoms):
N1 = self.npw
N2 = self.calc.wfs.setups[id].ni**2
if N1 > N1_max:
N1_max = N1
if N2 > N2_max:
N2_max = N2
nbzq = self.nkpt
nbzq, nq_local, q_start, q_end = parallel_partition(
nbzq, world.rank, world.size, reshape=False)
phimax_qaGp = np.zeros((nq_local, natoms, N1_max, N2_max), dtype=complex)
for iq in range(nq_local):
self.printtxt('%d' %(iq))
q_c = self.bzq_qc[iq + q_start]
tmp_aGp = self.get_phi_aGp(q_c)
for id in range(natoms):
N1, N2 = tmp_aGp[id].shape
phimax_qaGp[iq, id, :N1, :N2] = tmp_aGp[id]
world.barrier()
# write to disk
filename = 'phi_qaGp'
if world.rank == 0:
w = Writer(filename)
w.dimension('nbzq', nbzq)
w.dimension('natoms', natoms)
w.dimension('nG', N1_max)
w.dimension('nii', N2_max)
w.add('phi_qaGp', ('nbzq', 'natoms', 'nG', 'nii',), dtype=complex)
for q in range(nbzq):
residual = nbzq % size
N_local = nbzq // size
if q < residual * (N_local + 1):
qrank = q // (N_local + 1)
else:
qrank = (q - residual * (N_local + 1)) // N_local + residual
if qrank == 0:
if world.rank == 0:
phi_aGp = phimax_qaGp[q - q_start]
else:
if world.rank == qrank:
phi_aGp = phimax_qaGp[q - q_start]
world.send(phi_aGp, 0, q)
elif world.rank == 0:
world.receive(phi_aGp, qrank, q)
if world.rank == 0:
w.fill(phi_aGp)
world.barrier()
if world.rank == 0:
w.close()
return
def load_phi_aGp(self, reader, iq):
phimax_aGp = np.array(reader.get('phi_qaGp', iq), complex)
phi_aGp = {}
natoms = len(phimax_aGp)
for a in range(natoms):
N1 = self.npw
N2 = self.calc.wfs.setups[a].ni**2
phi_aGp[a] = phimax_aGp[a, :N1, :N2]
return phi_aGp
def get_dielectric_function(self, filename='df.dat', readfile=None, overlap=True):
if self.epsilon_w is None:
self.initialize()
if readfile is None:
self.calculate()
self.printtxt('Calculating dielectric function.')
else:
data = pickle.load(open(readfile))
self.w_S = data['w_S']
self.v_SS = data['v_SS']
self.rhoG0_S = data['rhoG0_S']
self.printtxt('Finished reading H_SS.pckl')
w_S = self.w_S
v_SS = self.v_SS # v_SS[:,lamda]
rhoG0_S = self.rhoG0_S
focc_S = self.focc_S
# get overlap matrix
if not self.positive_w:
tmp = np.dot(v_SS.conj().T, v_SS )
overlap_SS = np.linalg.inv(tmp)
# get chi
epsilon_w = np.zeros(self.Nw, dtype=complex)
t0 = time()
A_S = np.dot(rhoG0_S, v_SS)
B_S = np.dot(rhoG0_S*focc_S, v_SS)
if not self.positive_w:
C_S = np.dot(B_S.conj(), overlap_SS.T) * A_S
else:
C_S = B_S.conj() * A_S
for iw in range(self.Nw):
tmp_S = 1. / (iw*self.dw - w_S + 1j*self.eta)
epsilon_w[iw] += np.dot(tmp_S, C_S)
epsilon_w *= - 4 * pi / np.inner(self.qq_v, self.qq_v) / self.vol
epsilon_w += 1
self.epsilon_w = epsilon_w
if rank == 0:
f = open(filename,'w')
for iw in range(self.Nw):
energy = iw * self.dw * Hartree
print >> f, energy, np.real(epsilon_w[iw]), np.imag(epsilon_w[iw])
f.close()
# Wait for I/O to finish
world.barrier()
"""Check f-sum rule."""
N1 = 0
for iw in range(self.Nw):
w = iw * self.dw
N1 += np.imag(epsilon_w[iw]) * w
N1 *= self.dw * self.vol / (2 * pi**2)
self.printtxt('')
self.printtxt('Sum rule:')
nv = self.nvalence
self.printtxt('N1 = %f, %f %% error' %(N1, (N1 - nv) / nv * 100) )
return epsilon_w
def get_e_h_density(self, lamda=None, filename=None):
if filename is not None:
self.load(filename)
self.initialize()
gd = self.gd
w_S = self.w_S
v_SS = self.v_SS
A_S = v_SS[:, lamda]
kq_k = self.kq_k
kd = self.kd
# Electron density
nte_R = gd.zeros()
for iS in range(self.nS_start, self.nS_end):
print 'electron density:', iS
k1, n1, m1 = self.Sindex_S3[iS]
ibzkpt1 = kd.bz2ibz_k[k1]
psitold_g = self.get_wavefunction(ibzkpt1, n1)
psit1_g = kd.transform_wave_function(psitold_g, k1)
for jS in range(self.nS):
k2, n2, m2 = self.Sindex_S3[jS]
if m1 == m2 and k1 == k2:
psitold_g = self.get_wavefunction(ibzkpt1, n2)
psit2_g = kd.transform_wave_function(psitold_g, k1)
nte_R += A_S[iS] * A_S[jS].conj() * psit1_g.conj() * psit2_g
# Hole density
nth_R = gd.zeros()
for iS in range(self.nS_start, self.nS_end):
print 'hole density:', iS
k1, n1, m1 = self.Sindex_S3[iS]
ibzkpt1 = kd.bz2ibz_k[kq_k[k1]]
psitold_g = self.get_wavefunction(ibzkpt1, m1)
psit1_g = kd.transform_wave_function(psitold_g, kq_k[k1])
for jS in range(self.nS):
k2, n2, m2 = self.Sindex_S3[jS]
if n1 == n2 and k1 == k2:
psitold_g = self.get_wavefunction(ibzkpt1, m2)
psit2_g = kd.transform_wave_function(psitold_g, kq_k[k1])
nth_R += A_S[iS] * A_S[jS].conj() * psit1_g * psit2_g.conj()
self.Scomm.sum(nte_R)
self.Scomm.sum(nth_R)
if rank == 0:
write('rho_e.cube',self.calc.atoms, format='cube', data=nte_R)
write('rho_h.cube',self.calc.atoms, format='cube', data=nth_R)
world.barrier()
return
def get_excitation_wavefunction(self, lamda=None,filename=None, re_c=None, rh_c=None):
""" garbage at the moment. come back later"""
if filename is not None:
self.load(filename)
self.initialize()
gd = self.gd
w_S = self.w_S
v_SS = self.v_SS
A_S = v_SS[:, lamda]
kq_k = self.kq_k
kd = self.kd
nx, ny, nz = self.nG[0], self.nG[1], self.nG[2]
nR = 9
nR2 = (nR - 1 ) // 2
if re_c is not None:
psith_R = gd.zeros(dtype=complex)
psith2_R = np.zeros((nR*nx, nR*ny, nz), dtype=complex)
elif rh_c is not None:
psite_R = gd.zeros(dtype=complex)
psite2_R = np.zeros((nR*nx, ny, nR*nz), dtype=complex)
else:
self.printtxt('No wavefunction output !')
return
for iS in range(self.nS_start, self.nS_end):
k, n, m = self.Sindex_S3[iS]
ibzkpt1 = kd.bz2ibz_k[k]
ibzkpt2 = kd.bz2ibz_k[kq_k[k]]
print 'hole wavefunction', iS, (k,n,m),A_S[iS]
psitold_g = self.get_wavefunction(ibzkpt1, n)
psit1_g = kd.transform_wave_function(psitold_g, k)
psitold_g = self.get_wavefunction(ibzkpt2, m)
psit2_g = kd.transform_wave_function(psitold_g, kq_k[k])
if re_c is not None:
# given electron position, plot hole wavefunction
tmp = A_S[iS] * psit1_g[re_c].conj() * psit2_g
psith_R += tmp
k_c = self.bzk_kc[k] + self.q_c
for i in range(nR):
for j in range(nR):
R_c = np.array([i-nR2, j-nR2, 0])
psith2_R[i*nx:(i+1)*nx, j*ny:(j+1)*ny, 0:nz] += \
tmp * np.exp(1j*2*pi*np.dot(k_c,R_c))
elif rh_c is not None:
# given hole position, plot electron wavefunction
tmp = A_S[iS] * psit1_g.conj() * psit2_g[rh_c] * self.expqr_g
psite_R += tmp
k_c = self.bzk_kc[k]
k_v = np.dot(k_c, self.bcell_cv)
for i in range(nR):
for j in range(nR):
R_c = np.array([i-nR2, 0, j-nR2])
R_v = np.dot(R_c, self.acell_cv)
assert np.abs(np.dot(k_v, R_v) - np.dot(k_c, R_c) * 2*pi).sum() < 1e-5
psite2_R[i*nx:(i+1)*nx, 0:ny, j*nz:(j+1)*nz] += \
tmp * np.exp(-1j*np.dot(k_v,R_v))
else:
pass
if re_c is not None:
self.Scomm.sum(psith_R)
self.Scomm.sum(psith2_R)
if rank == 0:
write('psit_h.cube',self.calc.atoms, format='cube', data=psith_R)
atoms = self.calc.atoms
shift = atoms.cell[0:2].copy()
positions = atoms.positions
atoms.cell[0:2] *= nR2
atoms.positions += shift * (nR2 - 1)
write('psit_bigcell_h.cube',atoms, format='cube', data=psith2_R)
elif rh_c is not None:
self.Scomm.sum(psite_R)
self.Scomm.sum(psite2_R)
if rank == 0:
write('psit_e.cube',self.calc.atoms, format='cube', data=psite_R)
atoms = self.calc.atoms
# shift = atoms.cell[0:2].copy()
positions = atoms.positions
atoms.cell[0:2] *= nR2
# atoms.positions += shift * (nR2 - 1)
write('psit_bigcell_e.cube',atoms, format='cube', data=psite2_R)
else:
pass
world.barrier()
return
def load(self, filename):
data = pickle.load(open(filename))
self.w_S = data['w_S']
self.v_SS = data['v_SS']
self.printtxt('Read succesfully !')
def save(self, filename):
"""Dump essential data"""
data = {'w_S' : self.w_S,
'v_SS' : self.v_SS}
if rank == 0:
pickle.dump(data, open(filename, 'w'), -1)
world.barrier()
| gpl-3.0 | -5,935,395,398,127,700,000 | 35.318379 | 114 | 0.456447 | false |
chrisspen/burlap | burlap/tests/test_common.py | 1 | 12013 | """
Tests for the common module.
Run like:
tox -c tox-full.ini -e py27-ubuntu_16_04_64 -- -s burlap/tests/test_common.py::CommonTests::test_satchel_ordering
"""
from __future__ import print_function
import os
import sys
import tempfile
import unittest
import getpass
from pprint import pprint
import six
from burlap import load_yaml_settings
from burlap.common import CMD_VAR_REGEX, CMD_ESCAPED_VAR_REGEX, shellquote, all_satchels, Satchel, env, get_satchel, clear_state, save_env, env
from burlap.decorators import task
from burlap.tests.base import TestCase
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
class _TestSatchel(Satchel):
name = 'test'
def configure(self):
pass
class CommonTests(TestCase):
def get_test_satchel(self):
test = _TestSatchel()
test.genv.hosts = ['localhost']
test.genv.host_string = test.genv.hosts[0]
return test
def setUp(self):
super(CommonTests, self).setUp()
# Importing ourself register us in sys.modules, which burlap uses to track satchels.
# This is necessary to instantiate this satchel when running this testcase separately.
import test_common # pylint: disable=import-self
env.hosts = ['localhost']
env.host_string = env.hosts[0]
env.user = getpass.getuser()
env.always_use_pty = False
def test_shellquote(self):
s = """# /etc/cron.d/anacron: crontab entries for the anacron package
SHELL=/bin/bash
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
# minute hour day month weekday (0-6, 0 = Sunday) user command
*/5 * * * * root {command}
"""
s = shellquote(s)
def test_format_regex(self):
test = self.get_test_satchel()
assert CMD_VAR_REGEX.findall('{cmd} {host}') == ['cmd', 'host']
assert CMD_VAR_REGEX.findall("{cmd} {host} | {awk_cmd} '{{ print $1 }}'") == ['cmd', 'host', 'awk_cmd']
assert CMD_ESCAPED_VAR_REGEX.findall("{cmd}} {{ print hello }}") == ['{{ print hello }}']
r = test.local_renderer
r.env.host = 'myhost'
r.local("getent {host} | awk '{{ print $1 }}'", dryrun=1)
s = "rsync --recursive --verbose --perms --times --links --compress --copy-links {exclude_str} ' \
'--delete --delete-before --force {rsync_auth} {rsync_source_dir} {rsync_target_host}{rsync_target_dir}"
assert CMD_VAR_REGEX.findall(s) == ['exclude_str', 'rsync_auth', 'rsync_source_dir', 'rsync_target_host', 'rsync_target_dir']
def test_settings_include(self):
try:
os.makedirs('/tmp/burlap_test/roles/all')
except OSError:
pass
try:
os.makedirs('/tmp/burlap_test/roles/prod')
except OSError:
pass
open('/tmp/burlap_test/roles/all/settings.yaml', 'w').write("""
only_all_param: "just in all"
overridden_by_prod: 123
overridden_by_local: slkdjflsk
""")
open('/tmp/burlap_test/roles/prod/settings.yaml', 'w').write("""inherits: all
overridden_by_prod: 'prod'
only_prod_param: 7891
overriden_by_include: 7892
overridden_by_local: oiuweoiruwo
#includes: [settings_include1.yaml]
includes: [settings_include2.yaml]
""")
open('/tmp/burlap_test/roles/prod/settings_include2.yaml', 'w').write("""
overriden_by_include: xyz
overridden_by_local: ovmxlkfsweirwio
""")
open('/tmp/burlap_test/roles/prod/settings_local.yaml', 'w').write("""
overridden_by_local: 'hello world'
includes: [settings_include3.yaml]
""")
open('/tmp/burlap_test/roles/prod/settings_include3.yaml', 'w').write("""
set_by_include3: 'some special setting'
""")
os.chdir('/tmp/burlap_test')
config = load_yaml_settings(name='prod')
assert config['includes'] == ['settings_include2.yaml', 'settings_include3.yaml']
assert config['only_all_param'] == 'just in all'
assert config['overridden_by_prod'] == 'prod'
assert config['only_prod_param'] == 7891
assert config['overriden_by_include'] == 'xyz'
assert config['overridden_by_local'] == 'hello world'
assert config['set_by_include3'] == 'some special setting'
def test_renderer(self):
test = self.get_test_satchel()
# Confirm renderer is cached.
r1 = test.local_renderer
r2 = test.local_renderer
assert r1 is r2
# Confirm clear method.
test.clear_local_renderer()
r3 = test.local_renderer
assert r1 is not r3
r = r3
env.clear()
assert r.genv is env
# Confirm local env var gets renderered.
r.env.var1 = 'a'
assert r.format('{var1}') == 'a'
# Confirm global env var in local namespace gets rendered.
env.test_var2 = 'b'
assert r.format('{var2}') == 'b'
# Confirm global env var in global namespace gets rendered.
env.test_var2 = 'b2'
assert r.format('{test_var2}') == 'b2'
# Confirm global env var overridden in local namespace get rendered.
env.apache_var3 = '0'
r.env.apache_var3 = 'c'
assert r.format('{apache_var3}') == 'c'
# Confirm recursive template variables get rendered.
r.env.some_template = '{target_value}'
r.env.target_value = 'd'
assert r.format('{some_template}') == 'd'
class ApacheSatchel(Satchel):
name = 'apache'
def configure(self):
pass
apache = ApacheSatchel()
r = apache.local_renderer
r.env.application_name = 'someappname'
r.env.site = 'sitename'
r.env.wsgi_path = '/usr/local/{apache_application_name}/src/wsgi/{apache_site}.wsgi'
assert r.format(r.env.wsgi_path) == '/usr/local/someappname/src/wsgi/sitename.wsgi'
def test_iter_sites(self):
test = self.get_test_satchel()
env0 = save_env()
env.sites = {
'site1': {'apache_ssl': False, 'site': 'mysite'},
'site2': {'apache_ssl': True},
}
lst = list(test.iter_sites())
print('lst:', lst)
assert len(lst) == 2
lst = list(test.iter_sites(site='site2'))
print('lst:', lst)
assert len(lst) == 1
# Confirm all non-default keys were removed.
assert set(_ for _ in env0 if not _.startswith('_')) == set(_ for _ in env if not _.startswith('_'))
site_iter = test.iter_sites()
six.next(site_iter)
print('env.SITE:', env.SITE)
assert env.SITE == 'site1'
six.next(site_iter)
print('env.SITE:', env.SITE)
assert env.SITE == 'site2'
def test_append(self):
test = self.get_test_satchel()
test.genv.host_string = 'localhost'
_, fn = tempfile.mkstemp()
text = '[{rabbit, [{loopback_users, []}]}].'
test.append(filename=fn, text=text)
content = open(fn).read()
print('content0:', content)
assert content.count(text) == 1
# Confirm duplicate lines are appended.
test.append(filename=fn, text=text)
content = open(fn).read()
print('content1:', content)
assert content.count(text) == 1
def test_set_verbose(self):
from burlap.common import set_verbose, get_verbose
set_verbose(True)
assert get_verbose()
set_verbose(False)
assert not get_verbose()
set_verbose(1)
assert get_verbose()
set_verbose(0)
assert not get_verbose()
def test_satchel_ordering(self):
from burlap.deploy import deploy as deploy_satchel
# Purge any pre-existing satchels from global registeries so we only get results for our custom satchels.
clear_state()
# These test satchels should be dependent in the order c<-a<-b
class ASatchel(Satchel):
name = 'a'
def set_defaults(self):
self.env.param = 123
@task(precursors=['c'])
def configure(self):
pass
class BSatchel(Satchel):
name = 'b'
def set_defaults(self):
self.env.param = 123
@task(precursors=['a', 'c'])
def configure(self):
pass
class CSatchel(Satchel):
name = 'c'
def set_defaults(self):
self.env.param = 123
@task
def configure(self):
pass
a_satchel = ASatchel()
b_satchel = BSatchel()
c_satchel = CSatchel()
try:
assert set(all_satchels) == set(['A', 'B', 'C'])
deploy_satchel.genv.services = ['a', 'b', 'c']
#assert init_plan_data_dir() == '.burlap/plans'
env.ROLE = 'local'
#components, plan_funcs = deploy.preview(components=['A', 'B', 'C'])
deploy_satchel.verbose = True
components, plan_funcs = deploy_satchel.get_component_funcs(components=['A', 'B', 'C'])
expected_components = ['C', 'A', 'B']
print()
print('components:', components)
print('expected_components:', expected_components)
print('plan_funcs:', plan_funcs)
task_names = [_0 for _0, _1 in plan_funcs]
assert components == expected_components
assert task_names == ['c.configure', 'a.configure', 'b.configure']
finally:
a_satchel.unregister()
del a_satchel
with self.assertRaises(KeyError):
get_satchel('a')
# import gc
# refs = gc.get_referrers(a_satchel)
# print('refs:', refs)
b_satchel.unregister()
c_satchel.unregister()
def test_state_clearing(self):
from burlap.common import get_state, clear_state, set_state, all_satchels
actual = sorted(all_satchels.keys())
print('actual satchels:\n', actual)
expected = [
'APACHE', 'AVAHI', 'BLUETOOTH', 'BUILDBOT', 'CELERY', 'CLOUDFRONT', 'CRON',
'DEBUG', 'DEPLOY', 'DEPLOYMENTNOTIFIER', 'DJ', 'DNS', 'EC2MONITOR', 'ELASTICSEARCH', 'FILE',
'GIT', 'GITCHECKER', 'GITTRACKER', 'GPSD', 'GROUP', 'HOST', 'HOSTNAME', 'HOSTSFILE',
'INADYN', 'IP', 'JIRAHELPER', 'JSHINT', 'LOCALES', 'LOGINNOTIFIER', 'MANIFEST', 'MONGODB', 'MOTION', 'MYSQL', 'MYSQLCLIENT', 'NM',
'NTPCLIENT', 'PACKAGER', 'PHANTOMJS', 'PIP', 'POSTFIX', 'POSTGRESQL', 'POSTGRESQLCLIENT', 'PROJECT',
'RABBITMQ', 'RDS', 'RPI', 'RSYNC', 'S3', 'SELENIUM', 'SERVICE', 'SNORT', 'SOFTWARERAID',
'SSHNICE', 'SSL', 'SUPERVISOR', 'TARBALL', 'TIMEZONE', 'UBUNTUMULTIVERSE',
'UNATTENDEDUPGRADES', 'USER', 'VAGRANT', 'VIRTUALBOX',
]
print('expected satchels:\n', expected)
assert actual == expected
burlap_state = get_state()
print('burlap_state:')
pprint(burlap_state, indent=4)
clear_state()
print('all_satchels.b:', sorted(all_satchels.keys()))
assert not all_satchels
set_state(burlap_state)
print('all_satchels.c:', sorted(all_satchels.keys()))
assert len(all_satchels) == len(expected)
def test_runs_once_clear(self):
from fabric.api import runs_once
from burlap.debug import debug
from burlap.common import runs_once_methods
a = ['abc']
@runs_once
def test_func():
return a[0]
print('a')
assert test_func() == 'abc'
a[0] = 'xyz'
del test_func.return_value
print('b')
assert test_func() == 'xyz'
print('c')
a[0] = 'hhh'
assert test_func() == 'xyz'
print('debug.shell:', debug.shell)
#assert hasattr(debug.shell, 'wrapped')
print('runs_once_methods:', runs_once_methods)
if __name__ == '__main__':
unittest.main()
| mit | 8,692,178,063,376,610,000 | 31.034667 | 143 | 0.573629 | false |
edx-solutions/xblock-group-project | group_project/group_project.py | 1 | 40500 | # -*- coding: utf-8 -*-
#
# Imports ###########################################################
import json
import logging
import textwrap
from datetime import datetime, timedelta
from io import StringIO
from xml.etree import ElementTree as ET
import pytz
import webob
from django.conf import settings
from django.utils import html
from django.utils.translation import ugettext as _
from lxml import etree
from pkg_resources import resource_filename
from xblock.completable import XBlockCompletionMode
from xblock.core import XBlock
from xblock.fields import Dict, Float, Integer, Scope, String
from xblock.fragment import Fragment
from .api_error import ApiError
from .group_activity import GroupActivity
from .project_api import ProjectAPI
from .upload_file import UploadFile
from .utils import AttrDict, load_resource, render_template
ALLOWED_OUTSIDER_ROLES = getattr(settings, "ALLOWED_OUTSIDER_ROLES", None)
if ALLOWED_OUTSIDER_ROLES is None:
ALLOWED_OUTSIDER_ROLES = ["assistant"]
try:
from edx_notifications.data import NotificationMessage
except:
# Notifications is an optional runtime configuration, so it may not be available for import
pass
# Globals ###########################################################
log = logging.getLogger(__name__)
# Classes ###########################################################
def make_key(*args):
return ":".join([str(a) for a in args])
class OutsiderDisallowedError(Exception):
def __init__(self, detail):
self.value = detail
super(OutsiderDisallowedError, self).__init__()
def __str__(self):
return "Outsider Denied Access: {}".format(self.value)
def __unicode__(self):
return "Outsider Denied Access: {}".format(self.value)
@XBlock.wants('notifications')
@XBlock.wants('courseware_parent_info')
class GroupProjectBlock(XBlock):
"""
XBlock providing a group activity project for a group of students to collaborate upon.
"""
completion_mode = XBlockCompletionMode.EXCLUDED
display_name = String(
display_name="Display Name",
help="This name appears in the horizontal navigation at the top of the page.",
scope=Scope.settings,
default="Group Project"
)
weight = Float(
display_name="Weight",
help="This is the maximum score that the user receives when he/she successfully completes the problem",
scope=Scope.settings,
default=100.0
)
group_reviews_required_count = Integer(
display_name="Reviews Required Minimum",
help="The minimum number of group-reviews that should be applied to a set of submissions (set to 0 to be 'TA Graded')",
scope=Scope.settings,
default=3
)
user_review_count = Integer(
display_name="User Reviews Required Minimum",
help="The minimum number of other-group reviews that an individual user should perform",
scope=Scope.settings,
default=1
)
item_state = Dict(
help="JSON payload for assessment values",
scope=Scope.user_state
)
with open(resource_filename(__name__, 'res/default.xml'), "r") as default_xml_file:
default_xml = default_xml_file.read()
data = String(
display_name="",
help="XML contents to display for this module",
scope=Scope.content,
default=textwrap.dedent(default_xml)
)
has_score = True
_project_api = None
def _confirm_outsider_allowed(self):
granted_roles = [r["role"] for r in self.project_api.get_user_roles_for_course(self.user_id, self.course_id)]
for allowed_role in ALLOWED_OUTSIDER_ROLES:
if allowed_role in granted_roles:
return True
raise OutsiderDisallowedError("User does not have an allowed role")
_known_real_user_ids = {}
def real_user_id(self, anonymous_student_id):
if anonymous_student_id not in self._known_real_user_ids:
self._known_real_user_ids[anonymous_student_id] = self.xmodule_runtime.get_real_user(anonymous_student_id).id
return self._known_real_user_ids[anonymous_student_id]
@property
def milestone_dates(self):
group_activity = GroupActivity.import_xml_string(self.data, self.is_admin_grader)
return group_activity.milestone_dates
@property
def project_api(self):
if self._project_api is None:
api_server = "http://127.0.0.1:18000"
if hasattr(settings, 'API_LOOPBACK_ADDRESS'):
api_server = settings.API_LOOPBACK_ADDRESS
self._project_api = ProjectAPI(api_server)
return self._project_api
@property
def user_id(self):
try:
return self.real_user_id(self.xmodule_runtime.anonymous_student_id)
except:
return None
_workgroup = None
@property
def workgroup(self):
if self._workgroup is None:
try:
user_prefs = self.project_api.get_user_preferences(self.user_id)
if "TA_REVIEW_WORKGROUP" in user_prefs:
self._confirm_outsider_allowed()
self._workgroup = self.project_api.get_workgroup_by_id(user_prefs["TA_REVIEW_WORKGROUP"])
else:
self._workgroup = self.project_api.get_user_workgroup_for_course(
self.user_id,
self.course_id
)
except OutsiderDisallowedError:
raise
except:
self._workgroup = {
"id": "0",
"users": [],
}
return self._workgroup
@property
def is_group_member(self):
return self.user_id in [u["id"] for u in self.workgroup["users"]]
@property
def is_admin_grader(self):
return not self.is_group_member
@property
def content_id(self):
try:
return str(self.scope_ids.usage_id)
except:
return self.id
@property
def course_id(self):
try:
return str(self.xmodule_runtime.course_id)
except:
return self.xmodule_runtime.course_id
def student_view(self, context):
"""
Player view, displayed to the student
"""
try:
workgroup = self.workgroup
except OutsiderDisallowedError as ode:
error_fragment = Fragment()
error_fragment.add_content(render_template('/templates/html/loading_error.html', {'error_message': str(ode)}))
error_fragment.add_javascript(load_resource('public/js/group_project_error.js'))
error_fragment.initialize_js('GroupProjectError')
return error_fragment
user_id = self.user_id
group_activity = GroupActivity.import_xml_string(self.data, self.is_admin_grader)
try:
group_activity.update_submission_data(
self.project_api.get_latest_workgroup_submissions_by_id(workgroup["id"])
)
except:
pass
if self.is_group_member:
try:
team_members = [self.project_api.get_user_details(tm["id"]) for tm in workgroup["users"] if user_id != int(tm["id"])]
except:
team_members = []
try:
assess_groups = self.project_api.get_workgroups_to_review(user_id, self.course_id, self.content_id)
except:
assess_groups = []
else:
team_members = []
assess_groups = [workgroup]
context = {
"group_activity": group_activity,
"team_members": json.dumps(team_members),
"assess_groups": json.dumps(assess_groups),
"ta_graded": (self.group_reviews_required_count < 1),
}
fragment = Fragment()
fragment.add_content(
render_template('/templates/html/group_project.html', context))
fragment.add_css(load_resource('public/css/group_project.css'))
fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/vendor/jquery.ui.widget.js'))
fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/vendor/jquery.fileupload.js'))
fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/vendor/jquery.iframe-transport.js'))
fragment.add_javascript(load_resource('public/js/group_project.js'))
fragment.initialize_js('GroupProjectBlock')
return fragment
def studio_view(self, context):
"""
Editing view in Studio
"""
fragment = Fragment()
fragment.add_content(render_template('/templates/html/group_project_edit.html', {
'self': self,
}))
fragment.add_css(load_resource('public/css/group_project_edit.css'))
fragment.add_javascript(
load_resource('public/js/group_project_edit.js'))
fragment.initialize_js('GroupProjectEditBlock')
return fragment
def assign_grade_to_group(self, group_id, grade_value):
self.project_api.set_group_grade(
group_id,
self.course_id,
self.content_id,
grade_value,
self.weight
)
# Emit analytics event...
self.runtime.publish(
self,
"group_activity.final_grade",
{
"grade_value": grade_value,
"group_id": group_id,
"content_id": self.content_id,
}
)
notifications_service = self.runtime.service(self, 'notifications')
if notifications_service:
self.fire_grades_posted_notification(group_id, notifications_service)
def calculate_grade(self, group_id):
def mean(value_array):
numeric_values = [float(v) for v in value_array]
return float(sum(numeric_values)/len(numeric_values))
review_item_data = self.project_api.get_workgroup_review_items_for_group(group_id, self.content_id)
review_item_map = {make_key(r['question'], self.real_user_id(r['reviewer'])) : r['answer'] for r in review_item_data}
all_reviewer_ids = set([self.real_user_id(r['reviewer']) for r in review_item_data])
group_reviewer_ids = [u["id"] for u in self.project_api.get_workgroup_reviewers(group_id, self.content_id)]
admin_reviewer_ids = [ar_id for ar_id in all_reviewer_ids if ar_id not in group_reviewer_ids]
group_activity = GroupActivity.import_xml_string(self.data, self.is_admin_grader)
def get_user_grade_value_list(user_id):
user_grades = []
for q_id in group_activity.grade_questions:
user_value = review_item_map.get(make_key(q_id, user_id), None)
if user_value is None:
# if any are incomplete, we consider the whole set to be unusable
return None
else:
user_grades.append(user_value)
return user_grades
admin_provided_grades = None
if len(admin_reviewer_ids) > 0:
admin_provided_grades = []
# Only include complete admin gradesets
admin_reviewer_grades = [
arg
for arg in [get_user_grade_value_list(admin_id) for admin_id in admin_reviewer_ids]
if arg
]
admin_grader_count = len(admin_reviewer_grades)
if admin_grader_count > 1:
for idx in range(len(group_activity.grade_questions)):
admin_provided_grades.append(mean([adm[idx] for adm in admin_reviewer_grades]))
elif admin_grader_count > 0:
admin_provided_grades = admin_reviewer_grades[0]
user_grades = {}
if len(group_reviewer_ids) > 0:
for r_id in group_reviewer_ids:
this_reviewers_grades = get_user_grade_value_list(r_id)
if this_reviewers_grades is None:
if admin_provided_grades:
this_reviewers_grades = admin_provided_grades
else:
return None
user_grades[r_id] = this_reviewers_grades
elif admin_provided_grades:
group_reviewer_ids = [self.user_id]
user_grades[self.user_id] = admin_provided_grades
else:
return None
# Okay, if we've got here we have a complete set of marks to calculate the grade
reviewer_grades = [mean(user_grades[r_id]) for r_id in group_reviewer_ids if len(user_grades[r_id]) > 0]
group_grade = round(mean(reviewer_grades)) if len(reviewer_grades) > 0 else None
return group_grade
def mark_complete_stage(self, user_id, stage):
try:
self.project_api.mark_as_complete(
self.course_id,
self.content_id,
user_id,
stage
)
except ApiError as e:
# 409 indicates that the completion record already existed
# That's ok in this case
if e.code != 409:
raise
def update_upload_complete(self):
for u in self.workgroup["users"]:
self.mark_complete_stage(u["id"], "upload")
def graded_and_complete(self, group_id):
workgroup = self.project_api.get_workgroup_by_id(group_id)
for u in workgroup["users"]:
self.mark_complete_stage(u["id"], None)
def evaluations_complete(self):
group_activity = GroupActivity.import_xml_string(self.data, self.is_admin_grader)
peer_review_components = [c for c in group_activity.activity_components if c.peer_reviews]
peer_review_questions = []
for prc in peer_review_components:
for sec in prc.peer_review_sections:
peer_review_questions.extend([q.id for q in sec.questions if q.required])
group_peer_items = self.project_api.get_peer_review_items_for_group(self.workgroup['id'], self.content_id)
my_feedback = {make_key(pri["user"], pri["question"]): pri["answer"] for pri in group_peer_items if pri['reviewer'] == self.xmodule_runtime.anonymous_student_id}
my_peers = [u for u in self.workgroup["users"] if u["id"] != self.user_id]
for peer in my_peers:
for q_id in peer_review_questions:
k = make_key(peer["id"], q_id)
if not k in my_feedback:
return False
if my_feedback[k] is None:
return False
if my_feedback[k] == '':
return False
return True
def grading_complete(self):
group_activity = GroupActivity.import_xml_string(self.data, self.is_admin_grader)
group_review_components = [c for c in group_activity.activity_components if c.other_group_reviews]
group_review_questions = []
for prc in group_review_components:
for sec in prc.other_group_sections:
group_review_questions.extend([q.id for q in sec.questions if q.required])
group_review_items = []
assess_groups = self.project_api.get_workgroups_to_review(self.user_id, self.course_id, self.content_id)
for assess_group in assess_groups:
group_review_items.extend(self.project_api.get_workgroup_review_items_for_group(assess_group["id"], self.content_id))
my_feedback = {make_key(pri["workgroup"], pri["question"]): pri["answer"] for pri in group_review_items if pri['reviewer'] == self.xmodule_runtime.anonymous_student_id}
for assess_group in assess_groups:
for q_id in group_review_questions:
k = make_key(assess_group["id"], q_id)
if not k in my_feedback:
return False
if my_feedback[k] is None:
return False
if my_feedback[k] == '':
return False
return True
@XBlock.json_handler
def studio_submit(self, submissions, suffix=''):
self.display_name = submissions['display_name']
xml_content = submissions['data']
max_score = submissions['max_score']
group_reviews_required_count = submissions['group_reviews_required_count']
user_review_count = submissions['user_review_count']
if not max_score:
# empty = default
max_score = 100
else:
try:
# not an integer, then default
max_score = int(max_score)
except:
max_score = 100
self.weight = max_score
try:
group_reviews_required_count = int(group_reviews_required_count)
except:
group_reviews_required_count = 3
self.group_reviews_required_count = group_reviews_required_count
try:
user_review_count = int(user_review_count)
except:
user_review_count = 1
self.user_review_count = user_review_count
try:
etree.parse(StringIO(xml_content))
self.data = xml_content
except etree.XMLSyntaxError as e:
return {
'result': 'error',
'message': e.message
}
return {
'result': 'success',
}
@XBlock.json_handler
def submit_peer_feedback(self, submissions, suffix=''):
try:
peer_id = submissions["peer_id"]
del submissions["peer_id"]
# Then something like this needs to happen
self.project_api.submit_peer_review_items(
self.xmodule_runtime.anonymous_student_id,
peer_id,
self.workgroup['id'],
self.content_id,
submissions,
)
if self.evaluations_complete():
self.mark_complete_stage(self.user_id, "evaluation")
except Exception as e:
return {
'result': 'error',
'msg': e.message,
}
return {
'result': 'success',
'msg': _('Thanks for your feedback'),
}
@XBlock.json_handler
def submit_other_group_feedback(self, submissions, suffix=''):
try:
group_id = submissions["group_id"]
del submissions["group_id"]
self.project_api.submit_workgroup_review_items(
self.xmodule_runtime.anonymous_student_id,
group_id,
self.content_id,
submissions
)
group_activity = GroupActivity.import_xml_string(self.data, self.is_admin_grader)
for q_id in group_activity.grade_questions:
if q_id in submissions:
# Emit analytics event...
self.runtime.publish(
self,
"group_activity.received_grade_question_score",
{
"question": q_id,
"answer": submissions[q_id],
"reviewer_id": self.xmodule_runtime.anonymous_student_id,
"is_admin_grader": self.is_admin_grader,
"group_id": group_id,
"content_id": self.content_id,
}
)
grade_value = self.calculate_grade(group_id)
if grade_value:
self.assign_grade_to_group(group_id, grade_value)
self.graded_and_complete(group_id)
if self.is_group_member and self.grading_complete():
self.mark_complete_stage(self.user_id, "grade")
except Exception as e:
return {
'result': 'error',
'msg': e.message,
}
return {
'result': 'success',
'msg': _('Thanks for your feedback'),
}
@XBlock.handler
def load_peer_feedback(self, request, suffix=''):
peer_id = request.GET["peer_id"]
feedback = self.project_api.get_peer_review_items(
self.xmodule_runtime.anonymous_student_id,
peer_id,
self.workgroup['id'],
self.content_id,
)
# pivot the data to show question -> answer
results = {pi['question']: pi['answer'] for pi in feedback}
return webob.response.Response(body=json.dumps(results))
@XBlock.handler
def load_other_group_feedback(self, request, suffix=''):
group_id = request.GET["group_id"]
feedback = self.project_api.get_workgroup_review_items(
self.xmodule_runtime.anonymous_student_id,
group_id,
self.content_id
)
# pivot the data to show question -> answer
results = {ri['question']: ri['answer'] for ri in feedback}
return webob.response.Response(body=json.dumps(results))
@XBlock.handler
def load_my_peer_feedback(self, request, suffix=''):
user_id = self.user_id
feedback = self.project_api.get_user_peer_review_items(
user_id,
self.workgroup['id'],
self.content_id,
)
results = {}
for item in feedback:
if item['question'] in results:
results[item['question']].append(html.escape(item['answer']))
else:
results[item['question']] = [html.escape(item['answer'])]
return webob.response.Response(body=json.dumps(results))
@XBlock.handler
def load_my_group_feedback(self, request, suffix=''):
workgroup_id = self.workgroup['id']
feedback = self.project_api.get_workgroup_review_items_for_group(
workgroup_id,
self.content_id,
)
results = {}
for item in feedback:
if item['question'] in results:
results[item['question']].append(html.escape(item['answer']))
else:
results[item['question']] = [html.escape(item['answer'])]
final_grade = self.calculate_grade(workgroup_id)
if final_grade:
results["final_grade"] = [final_grade]
return webob.response.Response(body=json.dumps(results))
@XBlock.handler
def upload_submission(self, request, suffix=''):
response_data = {"message": _("File(s) successfully submitted")}
failure_code = 0
try:
group_activity = GroupActivity.import_xml_string(self.data, self.is_admin_grader)
context = {
"user_id": self.user_id,
"group_id": self.workgroup['id'],
"project_api": self.project_api,
"course_id": self.course_id
}
upload_files = [UploadFile(request.params[s.id].file, s.id, context)
for s in group_activity.submissions if s.id in request.params]
# Save the files first
for uf in upload_files:
try:
uf.save_file()
except Exception as save_file_error:
original_message = save_file_error.message if hasattr(save_file_error, "message") else ""
save_file_error.message = _("Error storing file {} - {}").format(uf.file.name, original_message)
raise
# They all got saved... note the submissions
at_least_one_success = False
for uf in upload_files:
try:
uf.submit()
# Emit analytics event...
self.runtime.publish(
self,
"group_activity.received_submission",
{
"submission_id": uf.submission_id,
"filename": uf.file.name,
"content_id": self.content_id,
"group_id": self.workgroup['id'],
"user_id": self.user_id,
}
)
at_least_one_success = True
except Exception as save_record_error:
original_message = save_record_error.message if hasattr(save_record_error, "message") else ""
save_record_error.message = _("Error recording file information {} - {}").format(uf.file.name, original_message)
raise
if at_least_one_success:
# See if the xBlock Notification Service is available, and - if so -
# dispatch a notification to the entire workgroup that a file has been uploaded
# Note that the NotificationService can be disabled, so it might not be available
# in the list of services
notifications_service = self.runtime.service(self, 'notifications')
if notifications_service:
self.fire_file_upload_notification(notifications_service)
response_data.update({uf.submission_id : uf.file_url for uf in upload_files})
group_activity.update_submission_data(
self.project_api.get_latest_workgroup_submissions_by_id(self.workgroup['id'])
)
if group_activity.has_all_submissions:
self.update_upload_complete()
except Exception as e:
log.exception(e)
failure_code = 500
if isinstance(e, ApiError):
failure_code = e.code
if not hasattr(e, "message"):
e.message = _("Error uploading at least one file")
response_data.update({"message": e.message})
response = webob.response.Response(body=json.dumps(response_data))
if failure_code:
response.status_code = failure_code
return response
@XBlock.handler
def other_submission_links(self, request, suffix=''):
group_activity = GroupActivity.import_xml_string(self.data, self.is_admin_grader)
group_id = request.GET["group_id"]
group_activity.update_submission_data(
self.project_api.get_latest_workgroup_submissions_by_id(group_id)
)
html_output = render_template('/templates/html/review_submissions.html', {"group_activity": group_activity})
return webob.response.Response(body=json.dumps({"html":html_output}))
@XBlock.handler
def refresh_submission_links(self, request, suffix=''):
group_activity = GroupActivity.import_xml_string(self.data, self.is_admin_grader)
group_activity.update_submission_data(
self.project_api.get_latest_workgroup_submissions_by_id(self.workgroup['id'])
)
html_output = render_template('/templates/html/submission_links.html', {"group_activity": group_activity})
return webob.response.Response(body=json.dumps({"html":html_output}))
def get_courseware_info(self, courseware_parent_info_service):
activity_name = self.display_name
activity_location = None
stage_name = self.display_name
stage_location = None
try:
if courseware_parent_info_service:
# First get Unit (first parent)
stage_info = courseware_parent_info_service.get_parent_info(
self.location
)
stage_location = stage_info['location']
stage_name = stage_info['display_name']
# Then get Sequence (second parent)
activity_courseware_info = courseware_parent_info_service.get_parent_info(
stage_location
)
activity_name = activity_courseware_info['display_name']
activity_location = activity_courseware_info['location']
project_courseware_info = courseware_parent_info_service.get_parent_info(
activity_location
)
project_name = project_courseware_info['display_name']
project_location = project_courseware_info['location']
except Exception as ex:
# Can't look this up then log and just use the default
# which is our display_name
log.exception(ex)
return {
'stage_name': stage_name,
'stage_location': stage_location,
'activity_name': activity_name,
'activity_location': activity_location,
'project_name': project_name,
'project_location': project_location,
}
def fire_file_upload_notification(self, notifications_service):
try:
# this NotificationType is registered in the list of default Open edX Notifications
msg_type = notifications_service.get_notification_type('open-edx.xblock.group-project.file-uploaded')
workgroup_user_ids = []
uploader_username = ''
for user in self.workgroup['users']:
# don't send to ourselves
if user['id'] != self.user_id:
workgroup_user_ids.append(user['id'])
else:
uploader_username = user['username']
# get the activity name which is simply our hosting
# Sequence's Display Name, so call out to a new xBlock
# runtime Service
courseware_info = self.get_courseware_info(self.runtime.service(self, 'courseware_parent_info'))
activity_name = courseware_info['activity_name']
activity_location = courseware_info['activity_location']
msg = NotificationMessage(
msg_type=msg_type,
namespace=str(self.course_id),
payload={
'_schema_version': 1,
'action_username': uploader_username,
'activity_name': activity_name,
}
)
#
# add in all the context parameters we'll need to
# generate a URL back to the website that will
# present the new course announcement
#
# IMPORTANT: This can be changed to msg.add_click_link() if we
# have a particular URL that we wish to use. In the initial use case,
# we need to make the link point to a different front end website
# so we need to resolve these links at dispatch time
#
msg.add_click_link_params({
'course_id': str(self.course_id),
'activity_location': str(activity_location) if activity_location else '',
})
# NOTE: We're not using Celery here since we are expectating that we
# will have only a very small handful of workgroup users
notifications_service.bulk_publish_notification_to_users(
workgroup_user_ids,
msg
)
except Exception as ex:
# While we *should* send notification, if there is some
# error here, we don't want to blow the whole thing up.
# So log it and continue....
log.exception(ex)
def fire_grades_posted_notification(self, group_id, notifications_service):
try:
# this NotificationType is registered in the list of default Open edX Notifications
msg_type = notifications_service.get_notification_type('open-edx.xblock.group-project.grades-posted')
# get the activity name which is simply our hosting
# Sequence's Display Name, so call out to a new xBlock
# runtime Service
courseware_info = self.get_courseware_info(self.runtime.service(self, 'courseware_parent_info'))
activity_name = courseware_info['activity_name']
activity_location = courseware_info['activity_location']
msg = NotificationMessage(
msg_type=msg_type,
namespace=str(self.course_id),
payload={
'_schema_version': 1,
'activity_name': activity_name,
}
)
#
# add in all the context parameters we'll need to
# generate a URL back to the website that will
# present the new course announcement
#
# IMPORTANT: This can be changed to msg.add_click_link() if we
# have a particular URL that we wish to use. In the initial use case,
# we need to make the link point to a different front end website
# so we need to resolve these links at dispatch time
#
msg.add_click_link_params({
'course_id': str(self.course_id),
'activity_location': str(activity_location) if activity_location else '',
})
# Bulk publish to the 'group_project_workgroup' user scope
notifications_service.bulk_publish_notification_to_scope(
'group_project_workgroup',
{
# I think self.workgroup['id'] is a string version of an integer
'workgroup_id': group_id,
},
msg
)
except Exception as ex:
# While we *should* send notification, if there is some
# error here, we don't want to blow the whole thing up.
# So log it and continue....
log.exception(ex)
def _get_component_timer_name(self, component, timer_name_suffix):
return '{location}-{component}-{timer_name_suffix}'.format(
location=self.location,
component=component.id,
timer_name_suffix=timer_name_suffix
)
def _set_activity_timed_notification(self, course_id, activity, msg_type, component, milestone_date, send_at_date, services, timer_name_suffix):
component_name = component.name
notifications_service = services.get('notifications')
courseware_parent_info = services.get('courseware_parent_info')
courseware_info = self.get_courseware_info(courseware_parent_info)
activity_name = courseware_info['activity_name']
activity_location = courseware_info['activity_location']
project_location = courseware_info['project_location']
milestone_date_tz = milestone_date.replace(tzinfo=pytz.UTC)
send_at_date_tz = send_at_date.replace(tzinfo=pytz.UTC)
msg = NotificationMessage(
msg_type=notifications_service.get_notification_type(msg_type),
namespace=str(course_id),
payload={
'_schema_version': 1,
'activity_name': activity_name,
'stage': component_name,
'due_date': milestone_date_tz.strftime('%-m/%-d/%-y'),
}
)
#
# add in all the context parameters we'll need to
# generate a URL back to the website that will
# present the new course announcement
#
# IMPORTANT: This can be changed to msg.add_click_link() if we
# have a particular URL that we wish to use. In the initial use case,
# we need to make the link point to a different front end website
# so we need to resolve these links at dispatch time
#
msg.add_click_link_params({
'course_id': str(course_id),
'activity_location': str(activity_location),
})
notifications_service.publish_timed_notification(
msg=msg,
send_at=send_at_date_tz,
# send to all students participating in this project
scope_name='group_project_participants',
scope_context={
'course_id': str(course_id),
'content_id': str(project_location),
},
timer_name=self._get_component_timer_name(component, timer_name_suffix),
ignore_if_past_due=True # don't send if we're already late!
)
def on_studio_published(self, course_id, services):
"""
A hook into when this xblock is published in Studio. When we are published we should
register a Notification to be send on key dates
"""
try:
log.info('GroupProjectBlock.on_published() on location = {}'.format(self.location))
group_activity = GroupActivity.import_xml_string(self.data, self.is_admin_grader)
# see if we are running in an environment which has Notifications enabled
notifications_service = services.get('notifications')
if notifications_service:
# set (or update) Notification timed message based on
# the current key dates
for component in group_activity.activity_components:
# if the component has a opening date, then send a msg then
if component.open_date:
self._set_activity_timed_notification(
course_id,
group_activity,
'open-edx.xblock.group-project.stage-open',
component,
datetime.combine(component.open_date, datetime.min.time()),
datetime.combine(component.open_date, datetime.min.time()),
services,
'open'
)
# if the component has a close date, then send a msg then
if component.close_date:
self._set_activity_timed_notification(
course_id,
group_activity,
'open-edx.xblock.group-project.stage-due',
component,
datetime.combine(component.close_date, datetime.min.time()),
datetime.combine(component.close_date, datetime.min.time()),
services,
'due'
)
# and also send a notice 3 days earlier
self._set_activity_timed_notification(
course_id,
group_activity,
'open-edx.xblock.group-project.stage-due',
component,
datetime.combine(component.close_date, datetime.min.time()),
datetime.combine(component.close_date, datetime.min.time()) - timedelta(days=3),
services,
'coming-due'
)
except Exception as ex:
log.exception(ex)
def on_before_studio_delete(self, course_id, services):
"""
A hook into when this xblock is deleted in Studio, for xblocks to do any lifecycle
management
"""
log.info('GroupProjectBlock.on_before_delete() on location = {}'.format(self.location))
try:
group_activity = GroupActivity.import_xml_string(self.data, self.is_admin_grader)
# see if we are running in an environment which has Notifications enabled
notifications_service = services.get('notifications')
if notifications_service:
# If we are being delete, then we should remove any NotificationTimers that
# may have been registered before
for component in group_activity.activity_components:
notifications_service.cancel_timed_notification(
self._get_component_timer_name(component, 'open')
)
notifications_service.cancel_timed_notification(
self._get_component_timer_name(component, 'due')
)
notifications_service.cancel_timed_notification(
self._get_component_timer_name(component, 'coming-due')
)
except Exception as ex:
log.exception(ex)
| agpl-3.0 | -3,804,872,931,286,464 | 37.793103 | 176 | 0.565062 | false |
lbouma/Cyclopath | scripts/dev/spam.py | 1 | 17502 | #!/usr/bin/python
# Copyright (c) 2006-2013 Regents of the University of Minnesota.
# For licensing terms, see the file LICENSE.
# Send an e-mail message to all the e-mail addresses in a file, waiting a
# specified number of seconds between each message.
# Usage:
#
# $ ./spam.py --help
#
# Also:
#
# $ ./spam.py |& tee 2012.09.12.spam.txt
#
'''
# DEVS: If you use --bcc-size (i.e., use Bcc:), the To: address
# is [email protected]. So the group gets an email while
# you test. Also, it makes the message more impersonal, so
# don't use it.
# --mail-from "[email protected]" \
# --bcc-size 3
# NOTE: Use single quotes to protect the bang (exclamation mark). If you don't
# escape it, i.e., \!, bash complains, but if you do, the email includes the
# backslash. This usage shows how to use both double and single quotes.
./spam.py \
--subject "Cyclopath says sorry and invites you to log in again"'!' \
--recipients "/ccp/bin/ccpdev/schema/runic/2012.09.12/recipient_file" \
--plain "/ccp/bin/ccpdev/schema/runic/2012.09.12/content_plain" \
--html "/ccp/bin/ccpdev/schema/runic/2012.09.12/content_html" \
--delay-time 0.3 \
--delay-shake
'''
script_name = ('Spam! Lovely Spam!')
script_version = '1.2'
__version__ = script_version
__author__ = 'Cyclopath <[email protected]>'
__date__ = '2012-11-12'
# ***
# SYNC_ME: Search: Scripts: Load pyserver.
import os
import sys
sys.path.insert(0, os.path.abspath('%s/../util'
% (os.path.abspath(os.curdir),)))
import pyserver_glue
import conf
import g
import logging
from util_ import logging2
from util_.console import Console
log_level = logging.DEBUG
#log_level = logging2.VERBOSE2
#log_level = logging2.VERBOSE4
#log_level = logging2.VERBOSE
conf.init_logging(True, True, Console.getTerminalSize()[0]-1, log_level)
log = g.log.getLogger('spam')
# ***
import random
import StringIO
import subprocess
from subprocess import Popen, PIPE, STDOUT
import time
from grax.user import User
from util_ import db_glue
from util_ import misc
from util_.emailer import Emailer
from util_.log_progger import Debug_Progress_Logger
from util_.script_args import Ccp_Script_Args
from util_.script_base import Ccp_Script_Base
# *** Debug switches
# *** Cli arg. parser
class ArgParser_Script(Ccp_Script_Args):
#
def __init__(self):
Ccp_Script_Args.__init__(self, script_name, script_version)
#
def prepare(self):
Ccp_Script_Args.prepare(self, exclude_normal=True)
self.add_argument('--subject', dest='email_subject',
action='store', default='', type=str, required=True,
help='the subject of the email')
#
self.add_argument('--recipients', dest='recipient_file',
action='store', default=None, type=str, required=False,
help='file of user IDs (or maybe usernames or email addresses')
#
self.add_argument('--plain', dest='content_plain',
action='store', default=None, type=str, required=False,
help='file containing the email plaintext')
#
self.add_argument('--html', dest='content_html',
action='store', default=None, type=str, required=False,
help='file containing the email html')
# Or, instead of the last three:
self.add_argument('--bug-number', dest='bug_number',
action='store', default=None, type=int, required=False,
help='the bug number, if you want to use standard paths and names')
#
self.add_argument('--file-number', dest='file_number',
action='store', default=None, type=int, required=False,
help='the file number of the emails_for_spam-created file')
#
self.add_argument('--test', dest='send_test_emails',
action='store_true', default=False,
help='if set, send emails to the users listed in the test file')
#
self.add_argument('--test-file', dest='test_emails_file',
action='store', type=str, required=False,
default='/ccp/bin/ccpdev/schema/runic/spam_test_uids',
help='the location of the test email user IDs')
# To test the code without sending even test emails, try this.
self.add_argument('--do-not-email', dest='do_not_email',
action='store_true', default=False, required=False,
help='do not even send test emails, just test the code.')
# Generally when you test you'll want to ignore dont_study, etc.
self.add_argument('--ignore-flags', dest='ignore_flags',
action='store_true', default=False, required=False,
help='always email, even if dont_study.')
#
# MAGIC_NUMBER: 0.3 seconds seems... reasonable. We've not tested
# anything smaller (circa 2012.11.12).
self.add_argument('--delay-time', dest='delay_time',
action='store', default=0.3, type=float,
help='if nonzero, wait time between sendmail calls')
#
self.add_argument('--dont-shake', dest='dont_shake',
action='store_true', default=True,
help='if set, use a variable delay time each time')
#
self.add_argument('--mail-from', dest='mail_from',
action='store', default=conf.mail_from_addr, type=str,
help='used to override CONFIG.mail_from_addr')
#
self.add_argument('--bcc-size', dest='bcc_size',
action='store', default=0, type=int,
help='number of recipients to Bcc at once (per sendmail call)')
# FIXME: The plain and html files should allow interpolation,
# i.e., username, email, etc.
#
def verify_handler(self):
ok = Ccp_Script_Args.verify_handler(self)
#
if ((self.cli_opts.recipient_file
or self.cli_opts.content_plain
or self.cli_opts.content_html)
and self.cli_opts.bug_number):
log.error('%s%s'
% ('Please specify either recipient_file, content_plain, and ',
'content_html, or --bug-number, not both',))
ok = False
elif self.cli_opts.bug_number:
self.cli_opts.recipient_file = (
'/ccp/bin/ccpdev/schema/runic/bug_%s/recipient_file'
% (self.cli_opts.bug_number,))
if self.cli_opts.file_number:
self.cli_opts.recipient_file += (
'.%d' % (self.cli_opts.file_number,))
self.cli_opts.content_plain = (
'/ccp/bin/ccpdev/schema/runic/bug_%s/content_plain'
% (self.cli_opts.bug_number,))
self.cli_opts.content_html = (
'/ccp/bin/ccpdev/schema/runic/bug_%s/content_html'
% (self.cli_opts.bug_number,))
elif (not (self.cli_opts.recipient_file
and self.cli_opts.content_plain
and self.cli_opts.content_html)):
log.error('%s%s'
% ('Please specify either --recipient-file, --plain, ',
'and --html, or --bug-number.',))
#
file_paths = [
self.cli_opts.recipient_file,
self.cli_opts.content_plain,
self.cli_opts.content_html,
]
if self.cli_opts.send_test_emails:
file_paths.append(self.cli_opts.test_emails_file)
for fpath in file_paths:
if not os.path.exists(fpath):
log.error('File does not exist: "%s"' % (fpath,))
ok = False
#
if self.cli_opts.send_test_emails:
self.cli_opts.recipient_file = self.cli_opts.test_emails_file
#
return ok
#
def parse(self):
Ccp_Script_Args.parse(self)
# *** Ccp_Send_Emails
class Ccp_Send_Emails(Ccp_Script_Base):
__slots__ = (
'headers',
)
# *** Constructor
def __init__(self):
Ccp_Script_Base.__init__(self, ArgParser_Script)
self.headers = ''
# ***
# This script's main() is very simple: it makes one of these objects and
# calls go(). Our base class reads the user's command line arguments and
# creates a query_builder object for us at self.qb before thunking to
# go_main().
#
def go_main(self):
# Get the content templates.
content_plain_f = open(self.cli_opts.content_plain)
content_plain = content_plain_f.read()
content_plain_f.close()
content_html_f = open(self.cli_opts.content_html)
content_html = content_html_f.read()
content_html_f.close()
# Assemble the recipients.
# The file should be of the form
#
# username\temail_address
#
# PERFORMANCE: Cyclopath circa 2012 doesn't have that many users (~5,000)
# so we can load all the emails into memory. If we end up with lots more
# users, this operation might take a sizeable bite of memory.
recipients = []
user_ids = []
recipients_f = open(self.cli_opts.recipient_file)
try:
deprecation_warned = False
for line in recipients_f:
line = line.strip()
# NOTE: Skip comment lines.
if line and (not line.startswith('#')):
try:
fake_uid = 0
username, email = line.split('\t')
# NOTE: unsubscribe_proof is unknown since we don't
# select from db, which is why this path is deprecated.
unsubscribe_proof = ''
recipients.append(
(fake_uid, username, email, unsubscribe_proof,))
if not deprecation_warned:
log.warning('Using username/email file is deprecated.')
deprecation_warned = True
except ValueError:
user_id = int(line)
user_ids.append(user_id)
except ValueError:
log.error('The format of the recipient file is unexpected / line: %s'
% (line,))
raise
finally:
recipients_f.close()
if recipients and user_ids:
log.error(
'Please specify only "username, email" or "user IDs" but not both')
sys.exit(0)
db = db_glue.new()
if user_ids:
extra_where = ("id IN (%s)" % (",".join([str(x) for x in user_ids]),))
(valid_ids, invalid_ids, not_okay, user_infos, info_lookup) = (
User.spam_get_user_info(
db, extra_where, sort_mode='id ASC', make_lookup=True,
ignore_flags=self.cli_opts.ignore_flags))
if invalid_ids or not_okay:
log.error('%s%s'
% ('Please recheck the user ID list: ',
'%d okay / %d invalid / %d not_okay'
% (len(valid_ids), len(invalid_ids), len(not_okay),)))
log.error('not_okay: %s' % (not_okay,))
sys.exit(0)
g.assurt(len(set(valid_ids)) == len(set(user_infos)))
g.assurt(len(set(valid_ids)) == len(set(user_ids)))
# Resort according to the input.
for uid in user_ids:
# NOTE: info_tuple is formatted: (user_id, username, email,)
recipients.append(info_lookup[uid])
all_okay = True
for info_tuple in recipients:
if not User.email_valid(info_tuple[2]):
log.error('Invalid email for user %s: %s'
% (info_tuple[1], info_tuple[2],))
all_okay = False
if not all_okay:
sys.exit(0)
log.debug('Found %d recipients.' % (len(recipients),))
if not recipients:
log.info('No one to email. Bye!')
sys.exit(0)
# Always send a copy to us, too.
g.assurt(conf.internal_email_addr)
unsubscribe_proof = ''
recipients.append(
(0, 'Cyclopath Team', conf.internal_email_addr, unsubscribe_proof,))
# Combine recipients if bcc'ing.
if self.cli_opts.bcc_size:
addr_lists = []
addrs_processed = 0
while addrs_processed < len(recipients):
last_index = addrs_processed + self.cli_opts.bcc_size
bcc_list = recipients[addrs_processed:last_index]
g.assurt(bcc_list)
addrs_processed += self.cli_opts.bcc_size
addr_lists.append(bcc_list)
recipients = addr_lists
# 2012.11.12: Using bcc is not cool. Don't do it.
log.error('BCC is too impersonal. Please consider not using it.')
g.assurt(False)
# Process the recipients one or many at a time.
prompted_once = False
prog_log = Debug_Progress_Logger(loop_max=len(recipients))
# MAYBE: Don't log for every email?
#prog_log.log_freq = prog_log.loop_max / 100.0
for recipient_or_list in recipients:
email_unames = []
# Make the To and Bcc headers.
if self.cli_opts.bcc_size:
g.assurt(False) # DEVs: Reconsider using BCC.
# Otherwise you cannot personalize messages, i.e.,
# with usernames of private UUID links.
# Use a generic user name, since there are multiple recipients.
msg_username = 'Cyclopath User'
# Send the email to ourselves...
recipient_email = self.cli_opts.mail_from
recipient_addr = ('"Cyclopath.org" <%s>'
% (self.cli_opts.mail_from,))
# ...and Bcc everyone else.
email_addrs = []
for recipient in recipient_or_list:
# C.f. emailer.check_email, but using Bcc is deprecated, so
# don't worry about it.
msg_username = recipient[1]
recipient_email = recipient[2]
really_send = False
if ((len(conf.mail_ok_addrs) == 1)
and ('ALL_OKAY' in conf.mail_ok_addrs)):
log.debug('go_main: conf says ALL_OKAY: %s'
% (recipient_addr,))
really_send = True
elif recipient_email in conf.mail_ok_addrs:
log.debug('go_main: email in mail_ok_addrs: %s'
% (recipient_addr,))
really_send = True
elif not conf.mail_ok_addrs:
log.error('go_main: mail_ok_addrs is not set: %s'
% (recipient_addr,))
else:
# This is a dev. machine and we don't want to email users.
log.debug('go_main: skipping non-dev email: %s'
% (recipient_addr,))
if really_send:
log.debug('Emailing user at: %s' % (recipient_addr,))
email_addr = ('"%s" <%s>' % (msg_username, recipient_email,))
email_addrs.append(email_addr)
email_unames.append(msg_username)
addrs_str = ','.join(email_addrs)
addr_bcc = 'Bcc: %s\n' % (addrs_str,)
unsubscribe_proof = ''
unsubscribe_link = ''
else:
# This is just a normal, send-directly-to-one-user email.
msg_username = recipient_or_list[1]
recipient_email = recipient_or_list[2]
recipient_addr = ('"%s" <%s>' % (msg_username, recipient_email,))
email_unames.append(recipient_email)
addr_bcc = ''
unsubscribe_proof = recipient_or_list[3]
unsubscribe_link = Emailer.make_unsubscribe_link(
'user_unsubscribe', recipient_email, unsubscribe_proof)
# To test the unsubscribe feature, try a link like this:
# http://ccpv3/gwis?request=user_unsubscribe&[email protected]&proof=asdasdasd
db.close()
the_msg = Emailer.compose_email(
self.cli_opts.mail_from,
msg_username,
recipient_addr,
unsubscribe_proof,
unsubscribe_link,
self.cli_opts.email_subject,
content_plain,
content_html,
addr_bcc)
if not prompted_once:
do_send = self.ask_permission(the_msg)
if not do_send:
log.warning('Canceled by user. Bye!')
sys.exit(0)
prompted_once = True
# NOTE: Emailer.send_email will check conf.mail_ok_addrs.
# ALSO: This is the only place/caller/script that uses do_not_email.
# It's really just for testing, and this is the last stop.
if not self.cli_opts.do_not_email:
Emailer.send_email(
email_unames,
the_msg,
prog_log,
self.cli_opts.delay_time,
self.cli_opts.dont_shake)
# end: for recipient_or_list in recipients.
prog_log.loops_fin()
#
def ask_permission(self, the_msg):
print ('Please confirm the settings:\n')
print (' %s sec. delay' % (self.cli_opts.delay_time,))
print (' %s recipient%s per email'
% ('1' if self.cli_opts.bcc_size else '0',
's' if self.cli_opts.bcc_size else '',))
print (' recipient_file: %s' % (self.cli_opts.recipient_file,))
print (' content_plain: %s' % (self.cli_opts.content_plain,))
print (' content_html: %s' % (self.cli_opts.content_html,))
print ('\nHere is the first message we will sendmail:\n')
print (the_msg)
print ('')
msg = 'Proceed and send emails?'
yes = self.ask_yes_no(msg)
return yes
# ***
# ***
if (__name__ == '__main__'):
spam = Ccp_Send_Emails()
spam.go()
| apache-2.0 | -95,545,866,461,320,300 | 34.645621 | 87 | 0.570621 | false |
themattrix/python-simian | setup.py | 1 | 1046 | from setuptools import setup
setup(
name='simian',
version='2.0.0',
packages=('simian',),
url='https://github.com/themattrix/python-simian',
license='MIT',
author='Matthew Tardiff',
author_email='[email protected]',
install_requires=('mock', 'contextlib2'),
tests_require=('nose', 'flake8'),
description=(
'A decorator for easily mocking out multiple dependencies by '
'monkey-patching.'),
classifiers=(
'Topic :: Software Development :: Testing',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'))
| mit | 159,269,272,641,297,660 | 37.740741 | 70 | 0.605163 | false |
animalize/tz2txt | tz2txt/datamachine.py | 1 | 18988 | # coding=utf-8
## web 到 内部状态1(Reply)
## tz = web_to_internal(url, pg_count)
##
## 内部状态1 到 编排
## internal_to_bp(tz)
##
## ----------------------------
##
## 编排 到 内部状态2(BPReply)
## lst = bp_to_internal2(infile)
##
## 处理 内部状态2
## lst = process_internal2(lst)
##
## 内部状态2 到 编排
## internal2_to_bp(lst)
##
## ----------------------------
##
## 编排 到 最终
## bp_to_final(infile, keep_discard=True, label=0)
##
## ----------------------------
##
## 统计
## statistic(all_list)
from io import StringIO
from datetime import datetime
import itertools
try:
import winsound
except:
winsound = None
import color
from red import red
from fetcher import *
from tzdatastruct import *
from BaseProcessor import *
from AbPageParser import *
def save_print(print_str):
try:
print(print_str)
except UnicodeEncodeError:
for char in print_str:
try:
print(char, end='')
except:
print('?', end='')
print()
# 打印编排头信息
def print_bp_head(all_list):
for one in all_list:
if isinstance(one, str):
if one.startswith('<tiezi>'):
print_str = one[len('<tiezi>'):]
save_print(print_str)
elif isinstance(one, BPReply):
break
print()
# 统计
def statistic(all_list):
processor = BaseProcessor()
rlist = [one for one in all_list if isinstance(one, BPReply)]
processor.set_rlist(rlist)
processor.statistic()
def process_internal2(all_list):
'''处理中间形式2'''
def get_processor(all_list):
'''得到处理器'''
processor = None
if all_list:
p = red.re_dict(r'<processor:\s*(.*?)\s*>')
m = p.search(all_list[0])
if m:
local_processor = m.group(1)
processor = BaseProcessor.get_processor(local_processor)
return processor
# 找到处理器
processor = get_processor(all_list)
if not processor:
print('编排文本的首行没有指定自动处理器,不做处理\n例如:<processor: sample>')
return all_list
rlist = [one for one in all_list if isinstance(one, BPReply)]
print('共有{0}条回复,选择了{1}条回复。\n'.format(
len(rlist),
sum(1 for i in rlist if i.select)
)
)
processor.set_rlist(rlist)
processor.process()
print('共有{0}条回复,选择了{1}条回复。'.format(
len(rlist),
sum(1 for i in rlist if i.select and i.suggest)
)
)
return all_list
def reply_to_bp(reply, select):
'''回复->编排,鸭子类型'''
mark = '█' if select else ''
t = ('<time>◇◆◇◆◇◆◇◆◇◆◇ <',
reply.time.strftime('%Y-%m-%d %H:%M:%S %w'),
'> ◇◆◇◆◇◆◇◆◇◆◇\n',
reply.text,
'\n<mark>══════保留标记:', mark
)
return ''.join(t)
def internal2_to_bp(all_list):
'''中间形式2 到 编排文本'''
def to_bp(obj):
if isinstance(obj, str):
return obj
elif isinstance(obj, BPReply):
s = obj.select and obj.suggest
return reply_to_bp(obj, s)
if not all_list:
print('无法处理,请检查输入文件是否为编排文本')
return None
write_list = (to_bp(one) for one in all_list)
output = StringIO('\n'.join(write_list))
return output
def bp_to_internal2(infile):
'''编排文本 到 中间形式2'''
all_list = list()
pattern = red.re_dict(r'<(\d{4}-\d\d-\d\d\s+\d\d:\d\d:\d\d)')
dt = lambda s:datetime.strptime(s, '%Y-%m-%d %H:%M:%S')
temp = list()
temp_date = None
in_reply = False
for line in infile.readlines():
line = line.rstrip('\n')
if line.startswith('<time>'):
if in_reply == True:
print('格式错误:回复文本的前后包括标志不配对。\n',
'丢失<mark>行')
break
m = pattern.search(line)
if not m:
print('无法解析日期')
break
temp_date = dt(m.group(1))
in_reply = True
elif line.startswith('<mark>'):
if in_reply == False:
print('格式错误:回复文本的前后包括标志不配对。\n',
'丢失<time>行')
break
if line.endswith('█'):
select = True
else:
select = False
# 添加回复
rpl = BPReply(temp_date, '\n'.join(temp), select)
all_list.append(rpl)
temp.clear()
in_reply = False
elif in_reply:
temp.append(line)
elif not in_reply:
all_list.append(line)
infile.close()
if in_reply == True:
print('格式错误:最后一个回复文本的前后包括标志不配对。')
return all_list
def count_chinese(string):
'''统计汉字字数,不含汉字标点符号'''
count = 0
for c in string:
c = ord(c)
# CJK统一汉字 20,950
# CJK统一汉字扩展A区 6,582
# CJK兼容汉字 472
# CJK统一汉字扩展B~E区 52844
if 0x4E00 <= c <= 0x9FFF or \
0x3400 <= c <= 0x4DBF or \
0xF900 <= c <= 0xFAFF or \
0x20000 <= c <= 0x2EBEF:
count += 1
return count
def bp_to_final(infile, keep_discard=True, label=0):
'''编译 编排to最终、丢弃'''
class placeholder:
def __init__(self, posi=0, pagenum=0, show=False):
self.posi = posi
self.pagenum = pagenum
self.show = show
def is_not_empty(lst):
for i in lst:
yield i.strip() != ''
info_list = list()
holder_list = [placeholder()]
text_list = list()
abandon_list = list()
pickcount, allcount = 0, 0
# 用于把 [img]http://img3.laibafile.cn/p/m/1234567.jpg[/img]
# 替换成 【图片:1234567.jpg】
picr = (r'\[img\s*(\d+|)\].*?\[/img\]')
pattern = red.re_dict(picr)
# 提取页号
re_pagenum = red.re_dict(r'^<page>页号:\s*(\d+)\s*$')
# 提取时间
p_time = (r'^<time>[^<]*<\d\d(\d\d-\d{1,2}-\d{1,2})\s+'
r'(\d{1,2}:\d{1,2})')
re_time = red.re_dict(p_time)
# 读取编排文本
in_reply = False
temp = list()
current_page = 0
current_time = ''
for line in infile.readlines():
if line.startswith('<time>'):
if in_reply == True:
print('格式错误:回复文本的前后包括标志不配对。\n',
'丢失<mark>行')
break
in_reply = True
# current_time
if label == 2:
m = re_time.search(line)
if m:
current_time = m.group(1) + ' ' + m.group(2)
else:
current_time = ''
elif line.startswith('<mark>'):
if in_reply == False:
print('格式错误:回复文本的前后包括标志不配对。\n',
'丢失<time>行')
break
if line.endswith('█\n') or line.endswith('█'):
pickcount += 1
if label == 0:
pass
elif label == 1:
holder_list[-1].show = True
elif label == 2:
floor_label = ('№.%d ☆☆☆'
' 发表于%s P.%d '
'☆☆☆\n'
'-------------------------'
'-------------------------'
'\n')
floor_label = floor_label % \
(pickcount, current_time, current_page)
text_list.append(floor_label)
text_list.extend(temp)
text_list.append('\n')
elif any(is_not_empty(temp)):
abandon_list.extend(temp)
abandon_list.append('∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞\n\n')
temp.clear()
allcount += 1
in_reply = False
elif in_reply:
line = pattern.sub(r'【一张图片\1】', line)
temp.append(line)
# 由于上一个elif,以下必定not in_reply
elif not text_list and not abandon_list and \
line.startswith('<tiezi>'):
info_list.append(line[len('<tiezi>'):])
elif label != 0:
m = re_pagenum.search(line)
if m:
current_page = int(m.group(1))
if label == 1:
text_list.append('')
holder = placeholder(len(text_list)-1,
current_page
)
holder_list.append(holder)
infile.close()
if in_reply == True:
print('格式错误:最后一个回复文本的前后包括标志不配对。')
# 页码 辅助格式
if label == 1:
for holder in holder_list[1:]:
if holder.show:
page_label = ('☆☆☆☆☆'
' 进入第%d页 '
'☆☆☆☆☆\n'
'----------------'
'----------------'
'\n\n') % holder.pagenum
text_list[holder.posi] = page_label
color_p1 = color.fore_color(allcount, color.Fore.YELLOW)
color_p2 = color.fore_color(pickcount, color.Fore.YELLOW)
print('共有{0}条回复,选择了其中{1}条回复'.format(color_p1, color_p2))
# output的内容============
# 连接
if info_list:
s_iter = itertools.chain(info_list, '\n', text_list)
else:
s_iter = iter(text_list)
s = ''.join(s_iter)
# 连续的多张图片
s = red.sub(r'(?:【一张图片(\d+|)】\s+){3,}',
r'【多张图片\1】\n\n',
s)
s = red.sub(r'(?:【一张图片(\d+|)】\s+){2}',
r'【两张图片\1】\n\n',
s)
# 输出StringIO
output = StringIO(s)
# 汉字字数
chinese_ct = count_chinese(s)
# 丢弃文本
if keep_discard and abandon_list:
s_iter = itertools.chain(info_list, '\n', abandon_list)
s = ''.join(s_iter)
discard = StringIO(s)
else:
discard = None
return output, discard, info_list, chinese_ct
def internal_to_bp(tz):
'''
内部形式 到 编排
返回(标题,输出文件字节)
'''
def page_to_g(page):
'''一页,返回:文本,摘取回复数,总回复数'''
rpls = [reply_to_bp(r, True) for r in page.replys]
pickcount = len(rpls)
allcount = len(page.replys)
if not pickcount:
return '', 0, allcount
else:
# 头信息
head = ('<page>页号: ', str(page.page_num), '\n',
'<page>网址: ', page.url, '\n',
'<page>有后页: ', str(page.finished), '\n',
'<page>总回复数: ', str(allcount),
' 摘取回复数: ', str(pickcount)
)
head = ''.join(head)
# 头信息 和 文本
s_iter = itertools.chain((head,), rpls, ('',))
s = '\n\n'.join(s_iter)
return s, pickcount, allcount
def tiezi_to_g(tiezi):
pgs = [page_to_g(p) for p in tiezi.pages]
text = (x for x,y,z in pgs if y > 0)
pickcount = sum(y for x,y,z in pgs)
allcount = sum(z for x,y,z in pgs)
color_p1 = color.fore_color(allcount, color.Fore.YELLOW)
color_p2 = color.fore_color(pickcount, color.Fore.YELLOW)
print('总回复数: {0} 摘取回复数: {1}'.format(color_p1, color_p2))
if not pickcount:
return None
else:
# 头信息
firstpg = tiezi.pages[0]
lastpg = tiezi.pages[-1]
processor_name = '<processor: ' + tiezi.local_processor + '>\n' \
if tiezi.local_processor \
else ''
fmark = '(未下载到末页)' if lastpg.finished else '(已下载到末页)'
post_time = '<tiezi>发帖时间:' + \
firstpg.replys[0].time.strftime('%Y-%m-%d %H:%M') + \
'\n' \
if firstpg.page_num == 1 and firstpg.replys \
else ''
head = (processor_name,
'<tiezi>标题:', tiezi.title, '\n',
'<tiezi>楼主:', tiezi.louzhu, '\n',
post_time,
'<tiezi>下载时间:',datetime.now().strftime('%Y-%m-%d %H:%M'),'\n',
'<tiezi>起始网址:', tiezi.begin_url, '\n',
'起始页号', str(firstpg.page_num),
',末尾页号', str(lastpg.page_num), ' ', fmark, '\n',
'总回复数: ', str(allcount),
' 摘取回复数: ', str(pickcount), '\n\n'
)
s_iter = itertools.chain(head, text)
s = ''.join(s_iter)
return s
#----------------------------------
# internal_to_bp(tz)开始
#----------------------------------
if not tz or not tz.pages:
print('一页也没有,不输出编排文件')
return None, ''
text = tiezi_to_g(tz)
if text == None:
print('\n没有摘取到回复,不输出文件')
return None, ''
# StringIO object
output = StringIO(text)
return output, tz.title
def web_to_internal(url, pg_count):
'''论坛帖子 到 内部形式'''
# 下载器
f = Fetcher()
# 页面解析器
parser = AbPageParser.get_parser(url)
if not parser:
return None
tz = Tiezi()
dl_count = 0
while True:
# 是否下载完指定页数
if pg_count >= 0 and dl_count >= pg_count:
print('下载完指定页数{0},停止下载\n'.format(pg_count))
break
# 下载数据
url = parser.pre_process_url(url)
data = f.fetch_url(url)
if not data:
print('无法读取页面:{0}'.format(url))
break
# 准备解析器
if dl_count == 0:
# 检查解析器
parser.set_page(url, data)
if not parser.check_parse_methods():
print(' 可能是网页改版,导致无法提取数据。')
print(' 请使用“检测新版本”功能检测是否有新程序可用。')
print()
return None
# 起始下载页
tz.begin_url = url
else:
# 送数据到解析器
parser.set_page(url, data)
# 设置tz的信息
if not tz.louzhu:
pub_date = None
tz.title = parser.wrap_get_title()
tz.louzhu = parser.wrap_get_louzhu()
# 首页1楼作楼主、发帖日期
if parser.wrap_get_page_num() == 1:
rplys = parser.wrap_get_replys()
if rplys:
if not tz.louzhu:
tz.louzhu = rplys[0].author
pub_date = rplys[0].time.strftime('%Y-%m-%d %H:%M')
# 手工输入楼主ID
if not tz.louzhu:
tz.louzhu = input('无法提取楼主ID,请手工输入楼主ID:').strip()
if not tz.louzhu:
print('无法得到楼主ID')
break
# 打印帖子信息
print_str = '标题:%s\n楼主:%s\n' % (tz.title, tz.louzhu)
if pub_date != None:
print_str += '发帖时间:%s\n' % pub_date
save_print(print_str)
# 得到本地格式名
tz.local_processor = parser.get_local_processor()
next_url = parser.wrap_get_next_pg_url()
pg_num = parser.wrap_get_page_num()
replys = parser.wrap_get_replys()
# 本页总回复
page_reply_count = len(replys)
# 只保留楼主
replys = [r for r in replys if r.author == tz.louzhu]
print('已下载第%d页, 有%d/%d条回复' %
(pg_num, len(replys), page_reply_count)
)
# 添加页
pg = Page(url,
pg_num,
bool(next_url),
replys
)
tz.add_page(pg)
dl_count += 1
# 帖子的最后一页?
if not next_url:
print('\n下载完帖子的最后一页(第{0}页),停止'.format(pg.page_num))
break
url = next_url
count = sum(len(p.replys) for p in tz.pages)
color_p1 = color.fore_color(len(tz.pages), color.Fore.YELLOW)
info = '共载入{pg_count}页,共有回复{rpl_count}条'.format(
pg_count=color_p1,
rpl_count=count
)
print(info)
# 发出响声
if winsound != None:
try:
winsound.Beep(400, 320) # (frequency, duration)
except:
pass
# 转义编排文本的标签
def escape_bp_tag(text):
# 转义编排标签
text = red.sub(r'^(<(?:time|mark)>)',
r'#\1',
text,
flags=red.MULTILINE)
# 【引用开始】、【引用结束】
text = red.sub(r'【(引用(?:开始|结束)|补充回复)】',
r'[\1]',
text)
# 标记的处理信息
if text.endswith('【与上一条回复重复】') \
or text.endswith('【无法处理的回复】'):
text = text + '#'
return text
for p in tz.pages:
for r in p.replys:
r.text = escape_bp_tag(r.text)
return tz
| bsd-3-clause | 8,837,727,428,784,550,000 | 26.571429 | 82 | 0.433997 | false |
partofthething/home-assistant | homeassistant/components/netatmo/data_handler.py | 1 | 6057 | """The Netatmo data handler."""
from collections import deque
from datetime import timedelta
from functools import partial
from itertools import islice
import logging
from time import time
from typing import Deque, Dict, List
import pyatmo
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import CALLBACK_TYPE, HomeAssistant
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.event import async_track_time_interval
from .const import AUTH, DOMAIN, MANUFACTURER
_LOGGER = logging.getLogger(__name__)
CAMERA_DATA_CLASS_NAME = "CameraData"
WEATHERSTATION_DATA_CLASS_NAME = "WeatherStationData"
HOMECOACH_DATA_CLASS_NAME = "HomeCoachData"
HOMEDATA_DATA_CLASS_NAME = "HomeData"
HOMESTATUS_DATA_CLASS_NAME = "HomeStatus"
PUBLICDATA_DATA_CLASS_NAME = "PublicData"
NEXT_SCAN = "next_scan"
DATA_CLASSES = {
WEATHERSTATION_DATA_CLASS_NAME: pyatmo.WeatherStationData,
HOMECOACH_DATA_CLASS_NAME: pyatmo.HomeCoachData,
CAMERA_DATA_CLASS_NAME: pyatmo.CameraData,
HOMEDATA_DATA_CLASS_NAME: pyatmo.HomeData,
HOMESTATUS_DATA_CLASS_NAME: pyatmo.HomeStatus,
PUBLICDATA_DATA_CLASS_NAME: pyatmo.PublicData,
}
BATCH_SIZE = 3
DEFAULT_INTERVALS = {
HOMEDATA_DATA_CLASS_NAME: 900,
HOMESTATUS_DATA_CLASS_NAME: 300,
CAMERA_DATA_CLASS_NAME: 900,
WEATHERSTATION_DATA_CLASS_NAME: 600,
HOMECOACH_DATA_CLASS_NAME: 300,
PUBLICDATA_DATA_CLASS_NAME: 600,
}
SCAN_INTERVAL = 60
class NetatmoDataHandler:
"""Manages the Netatmo data handling."""
def __init__(self, hass: HomeAssistant, entry: ConfigEntry):
"""Initialize self."""
self.hass = hass
self._auth = hass.data[DOMAIN][entry.entry_id][AUTH]
self.listeners: List[CALLBACK_TYPE] = []
self._data_classes: Dict = {}
self.data = {}
self._queue: Deque = deque()
self._webhook: bool = False
async def async_setup(self):
"""Set up the Netatmo data handler."""
async_track_time_interval(
self.hass, self.async_update, timedelta(seconds=SCAN_INTERVAL)
)
self.listeners.append(
async_dispatcher_connect(
self.hass,
f"signal-{DOMAIN}-webhook-None",
self.handle_event,
)
)
async def async_update(self, event_time):
"""
Update device.
We do up to BATCH_SIZE calls in one update in order
to minimize the calls on the api service.
"""
for data_class in islice(self._queue, 0, BATCH_SIZE):
if data_class[NEXT_SCAN] > time():
continue
self._data_classes[data_class["name"]][NEXT_SCAN] = (
time() + data_class["interval"]
)
await self.async_fetch_data(
data_class["class"], data_class["name"], **data_class["kwargs"]
)
self._queue.rotate(BATCH_SIZE)
async def async_cleanup(self):
"""Clean up the Netatmo data handler."""
for listener in self.listeners:
listener()
async def handle_event(self, event):
"""Handle webhook events."""
if event["data"]["push_type"] == "webhook_activation":
_LOGGER.info("%s webhook successfully registered", MANUFACTURER)
self._webhook = True
elif event["data"]["push_type"] == "webhook_deactivation":
_LOGGER.info("%s webhook unregistered", MANUFACTURER)
self._webhook = False
elif event["data"]["push_type"] == "NACamera-connection":
_LOGGER.debug("%s camera reconnected", MANUFACTURER)
self._data_classes[CAMERA_DATA_CLASS_NAME][NEXT_SCAN] = time()
async def async_fetch_data(self, data_class, data_class_entry, **kwargs):
"""Fetch data and notify."""
try:
self.data[data_class_entry] = await self.hass.async_add_executor_job(
partial(data_class, **kwargs),
self._auth,
)
for update_callback in self._data_classes[data_class_entry][
"subscriptions"
]:
if update_callback:
update_callback()
except pyatmo.NoDevice as err:
_LOGGER.debug(err)
self.data[data_class_entry] = None
except pyatmo.ApiError as err:
_LOGGER.debug(err)
async def register_data_class(
self, data_class_name, data_class_entry, update_callback, **kwargs
):
"""Register data class."""
if data_class_entry in self._data_classes:
self._data_classes[data_class_entry]["subscriptions"].append(
update_callback
)
return
self._data_classes[data_class_entry] = {
"class": DATA_CLASSES[data_class_name],
"name": data_class_entry,
"interval": DEFAULT_INTERVALS[data_class_name],
NEXT_SCAN: time() + DEFAULT_INTERVALS[data_class_name],
"kwargs": kwargs,
"subscriptions": [update_callback],
}
await self.async_fetch_data(
DATA_CLASSES[data_class_name], data_class_entry, **kwargs
)
self._queue.append(self._data_classes[data_class_entry])
_LOGGER.debug("Data class %s added", data_class_entry)
async def unregister_data_class(self, data_class_entry, update_callback):
"""Unregister data class."""
if update_callback not in self._data_classes[data_class_entry]["subscriptions"]:
return
self._data_classes[data_class_entry]["subscriptions"].remove(update_callback)
if not self._data_classes[data_class_entry].get("subscriptions"):
self._queue.remove(self._data_classes[data_class_entry])
self._data_classes.pop(data_class_entry)
_LOGGER.debug("Data class %s removed", data_class_entry)
@property
def webhook(self) -> bool:
"""Return the webhook state."""
return self._webhook
| mit | -5,802,619,240,173,583,000 | 32.65 | 88 | 0.616312 | false |
hrpt-se/hrpt | urls.py | 1 | 2104 | from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.i18n import i18n_patterns
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic import RedirectView, TemplateView
from django.views.static import serve
from contact_form.views import ContactFormView
from apps.partnersites.views import colors_css
from apps.pollster.views import map_tile, map_click, chart_data
from apps.hrptinfo.forms import CaptchaContactForm
admin.autodiscover()
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^admin/manual-newsletters/', include('apps.reminder.nladminurls')),
url(r'^admin/surveys-editor/', include('apps.pollster.urls')),
url(r'^surveys/(?P<survey_shortname>.+)/charts/(?P<chart_shortname>.+)/tile/(?P<z>\d+)/(?P<x>\d+)/(?P<y>\d+)$',
map_tile, name='pollster_map_tile'),
url(r'^surveys/(?P<survey_shortname>.+)/charts/(?P<chart_shortname>.+)/click/(?P<lat>[\d.-]+)/(?P<lng>[\d.-]+)$',
map_click, name='pollster_map_click'),
url(r'^surveys/(?P<survey_shortname>.+)/charts/(?P<chart_shortname>.+)\.json$', chart_data,
name='pollster_chart_data'),
url(r'^survey/', include('apps.survey.urls')),
url(r'^reminder/', include('apps.reminder.urls')),
url(r'^registrera/$', RedirectView.as_view(url='/accounts/register')),
url(r'^accounts/', include('apps.accounts.urls')),
url(r'^login/', include('loginurl.urls')),
url(r'^count/', include('apps.count.urls')),
url(r'^contact/$', ContactFormView.as_view(form_class=CaptchaContactForm), name='contact_form'),
url(r'^contact/sent/$', TemplateView.as_view(template_name='contact_form/contact_form_sent.html'),
name='contact_form_sent'),
url(r'^colors.css$', colors_css)
]
# Catchall
urlpatterns += i18n_patterns(
url(r'^', include('cms.urls'))
)
if settings.DEBUG:
urlpatterns = [
url(r'^upload/(?P<path>.*)$', serve, {'document_root': settings.MEDIA_ROOT})
] + staticfiles_urlpatterns() + urlpatterns
| agpl-3.0 | 1,752,537,007,993,838,300 | 43.765957 | 117 | 0.673954 | false |
etingof/pysnmp | examples/hlapi/v3arch/asyncore/sync/agent/ntforg/custom-v1-trap.py | 1 | 1455 | """
Custom SNMPv1 TRAP
++++++++++++++++++
Send SNMPv1 TRAP through unified SNMPv3 message processing framework.
Original v1 TRAP fields are mapped into dedicated variable-bindings,
(see `RFC2576 <https://www.ietf.org/rfc/rfc2576.txt>`_) for details.
* SNMPv1
* with community name 'public'
* over IPv4/UDP
* send TRAP notification
* with Generic Trap #6 (enterpriseSpecific) and Specific Trap 432
* overriding Uptime value with 12345
* overriding Agent Address with '127.0.0.1'
* overriding Enterprise OID with 1.3.6.1.4.1.20408.4.1.1.2
* include managed object information '1.3.6.1.2.1.1.1.0' = 'my system'
Functionally similar to:
| $ snmptrap -v1 -c public demo.snmplabs.com 1.3.6.1.4.1.20408.4.1.1.2 127.0.0.1 6 432 12345 1.3.6.1.2.1.1.1.0 s "my system"
"""#
from pysnmp.hlapi import *
iterator = sendNotification(
SnmpEngine(),
CommunityData('public', mpModel=0),
UdpTransportTarget(('demo.snmplabs.com', 162)),
ContextData(),
'trap',
NotificationType(
ObjectIdentity('1.3.6.1.4.1.20408.4.1.1.2.0.432')
).addVarBinds(
('1.3.6.1.2.1.1.3.0', 12345),
('1.3.6.1.6.3.18.1.3.0', '127.0.0.1'),
('1.3.6.1.6.3.1.1.4.3.0', '1.3.6.1.4.1.20408.4.1.1.2'),
('1.3.6.1.2.1.1.1.0', OctetString('my system'))
).loadMibs(
'SNMPv2-MIB', 'SNMP-COMMUNITY-MIB'
)
)
errorIndication, errorStatus, errorIndex, varBinds = next(iterator)
if errorIndication:
print(errorIndication)
| bsd-2-clause | 5,618,638,470,195,247,000 | 29.3125 | 124 | 0.653608 | false |
lantianlz/zx | scripts/crontab/worker_delay_monitor.py | 1 | 1433 | # -*- coding: utf-8 -*-
"""
@note: 分析ngxin日志,提取出频繁访问网站的ip
"""
import sys
import os
# 引入父目录来引入其他模块
SITE_ROOT = os.path.dirname(os.path.abspath(__file__))
sys.path.extend([os.path.abspath(os.path.join(SITE_ROOT, '../')),
os.path.abspath(os.path.join(SITE_ROOT, '../../')),
])
os.environ['DJANGO_SETTINGS_MODULE'] = 'www.settings'
from common import cache
from common.utils import send_email
from django.conf import settings
WORKER_CONFIG = [
{
'name': 'email_worker',
'limit': 200,
},
]
def get_delay_count(key):
cache_obj = cache.Cache(cache.CACHE_WORKER)
return cache_obj.llen(key)
def main():
warn_list = []
for item in WORKER_CONFIG:
count = get_delay_count(item['name'])
print u'---%s----%s----' % (item['name'], count)
if count > item.get('limit'):
item['count'] = count
warn_list.append(item)
if warn_list:
title = u'%s主机 worker积压警告' % (settings.SERVER_NAME, )
content = u''
for item in warn_list:
content += u'%(name)s:积压任务数%(count)s, 警戒值为%(limit)s\n' % item
send_email(emails=settings.NOTIFICATION_EMAIL, title=title, content=content, type="text")
print 'ok'
if __name__ == '__main__':
main()
| gpl-2.0 | 7,332,729,005,844,852,000 | 22.490909 | 97 | 0.551596 | false |
decebel/dataAtom_alpha | bin/plug/py/external/pattern/text/de/parser/__init__.py | 1 | 8935 | #### PATTERN | DE | RULE-BASED SHALLOW PARSER ######################################################
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Gerold Schneider, Martin Volk and University of Antwerp, Belgium
# Authors: Gerold Schneider & Martin Volk (German language model), Tom De Smedt <[email protected]>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
####################################################################################################
import re
import os
try:
MODULE = os.path.dirname(__file__)
except:
MODULE = ""
# The tokenizer, chunker and relation finder are inherited from pattern.en.parser.
# The tagger is based on Schneider & Volk's German language model:
# Schneider, G., Volk, M. (1998). Adding Manual Constraints and Lexical Look-up to a Brill-Tagger for German.
# In: Proceedings of the ESSLLI workshop on recent advances in corpus annotation. Saarbrucken, Germany.
# http://www.zora.uzh.ch/28579/
# Accuracy is reported around 96%, but Pattern scores may vary from Schneider & Volk's original
# due to STTS => Penn Treebank mapping etc.
import sys; sys.path.insert(0, os.path.join(MODULE, "..", ".."))
from en.parser import Lexicon
from en.parser import PUNCTUATION, tokenize as _en_tokenize, parse as _en_parse, TaggedString
from en.parser import commandline
#### TOKENIZER #####################################################################################
ABBREVIATIONS = [
"Abs.", "Abt.", "Ass.", "Br.", "Ch.", "Chr.", "Cie.", "Co.", "Dept.", "Diff.",
"Dr.", "Eidg.", "Exp.", "Fam.", "Fr.", "Hrsg.", "Inc.", "Inv.", "Jh.", "Jt.", "Kt.",
"Mio.", "Mrd.", "Mt.", "Mte.", "Nr.", "Nrn.", "Ord.", "Ph.", "Phil.", "Pkt.",
"Prof.", "Pt.", " S.", "St.", "Stv.", "Tit.", "VII.", "al.", "begr.","bzw.",
"chem.", "dent.", "dipl.", "e.g.", "ehem.", "etc.", "excl.", "exkl.", "hum.",
"i.e.", "incl.", "ing.", "inkl.", "int.", "iur.", "lic.", "med.", "no.", "oec.",
"phil.", "phys.", "pp.", "psych.", "publ.", "rer.", "sc.", "soz.", "spez.", "stud.",
"theol.", "usw.", "vet.", "vgl.", "vol.", "wiss.",
"d.h.", "h.c.", u"o.ä.", "u.a.", "z.B.", "z.T.", "z.Zt."
]
def tokenize(s, punctuation=PUNCTUATION, abbreviations=ABBREVIATIONS, replace={}):
return _en_tokenize(s, punctuation, abbreviations, replace)
_tokenize = tokenize
#### LEMMATIZER ####################################################################################
# Word lemmas using singularization and verb conjugation from the inflect module.
try:
from ..inflect import singularize, conjugate, predicative
except:
try:
sys.path.append(os.path.join(MODULE, ".."))
from inflect import singularize, conjugate, predicative
except:
try:
from pattern.de.inflect import singularize, conjugate, predicative
except:
singularize = lambda w: w
conjugate = lambda w, t: w
predicative = lambda w: w
def lemma(word, pos="NN"):
if pos == "NNS":
return singularize(word)
if pos.startswith(("VB","MD")):
return conjugate(word, "infinitive") or word
if pos.startswith(("DT", "JJ")):
return predicative(word)
return word
def find_lemmata(tagged):
for token in tagged:
token.append(lemma(token[0].lower(), pos=len(token) > 1 and token[1] or None))
return tagged
#### PARSER ########################################################################################
# pattern.en.find_tags() has an optional "lexicon" parameter.
# We'll pass the German lexicon to it instead of the default English lexicon:
lexicon = LEXICON = Lexicon()
lexicon.path = os.path.join(MODULE, "brill-lexicon.txt")
lexicon.lexical_rules.path = os.path.join(MODULE, "brill-lexical.txt")
lexicon.contextual_rules.path = os.path.join(MODULE, "brill-contextual.txt")
lexicon.named_entities.tag = "NE"
# Stuttgart/Tubinger Tagset (STTS):
# https://files.ifi.uzh.ch/cl/tagger/UIS-STTS-Diffs.html
PENN = PENNTREEBANK = TREEBANK = "penntreebank"
STTS = "stts"
stts = {
"ADJA": "JJ", # das große Haus
"ADJD": "JJ", # er ist schnell
"ADV": "RB", # schon
"APPR": "IN", # in der Stadt
"APPRART": "IN", # im Haus
"APPO": "IN", # der Sache wegen
"APZR": "IN", # von jetzt an
"ART": "DT", # der, die, eine
"ARTDEF": "DT", # der, die
"ARTIND": "DT", # eine
"CARD": "CD", # zwei
"CARDNUM": "CD", # 3
"KOUI": "IN", # [um] zu leben
"KOUS": "IN", # weil, damit, ob
"KON": "CC", # und, oder, aber
"KOKOM": "IN", # als, wie
"KONS": "IN", # usw.
"NN": "NN", # Tisch, Herr
"NNS": "NNS", # Tischen, Herren
"NE": "NNP", # Hans, Hamburg
"PDS": "DT", # dieser, jener
"PDAT": "DT", # jener Mensch
"PIS": "DT", # keiner, viele, niemand
"PIAT": "DT", # kein Mensch
"PIDAT": "DT", # die beiden Brüder
"PPER": "PRP", # ich, er, ihm, mich, dir
"PPOS": "PRP$", # meins, deiner
"PPOSAT": "PRP$", # mein Buch, deine Mutter
"PRELS": "WDT", # der Hund, [der] bellt
"PRELAT": "WDT", # der Mann, [dessen] Hund bellt
"PRF": "PRP", # erinnere [dich]
"PWS": "WP", # wer
"PWAT": "WP", # wessen, welche
"PWAV": "WRB", # warum, wo, wann
"PAV": "RB", # dafur, dabei, deswegen, trotzdem
"PTKZU": "TO", # zu gehen, zu sein
"PTKNEG": "RB", # nicht
"PTKVZ": "RP", # pass [auf]!
"PTKANT": "UH", # ja, nein, danke, bitte
"PTKA": "RB", # am schönsten, zu schnell
"VVFIN": "VB", # du [gehst], wir [kommen] an
"VAFIN": "VB", # du [bist], wir [werden]
"VVINF": "VB", # gehen, ankommen
"VAINF": "VB", # werden, sein
"VVIZU": "VB", # anzukommen
"VVIMP": "VB", # [komm]!
"VAIMP": "VB", # [sei] ruhig!
"VVPP": "VBN", # gegangen, angekommen
"VAPP": "VBN", # gewesen
"VMFIN": "MD", # dürfen
"VMINF": "MD", # wollen
"VMPP": "MD", # gekonnt
"SGML": "SYM", #
"FM": "FW", #
"ITJ": "UH", # ach, tja
"XY": "NN", #
"XX": "NN", #
"LINUM": "LS", # 1.
"C": ",", # ,
"Co": ":", # :
"Ex": ".", # !
"Pc": ")", # )
"Po": "(", # (
"Q": ".", # ?
"QMc": "\"", # "
"QMo": "\"", # "
"S": ".", # .
"Se": ":", # ;
}
def stts2penntreebank(tag):
""" Converts an STTS tag to Penn Treebank II tag.
For example: ohne APPR => ohne/IN
"""
return stts.get(tag, tag)
def parse(s, tokenize=True, tags=True, chunks=True, relations=False, lemmata=False, encoding="utf-8", **kwargs):
""" Takes a string (sentences) and returns a tagged Unicode string.
Sentences in the output are separated by newlines.
"""
if tokenize:
s = _tokenize(s)
if isinstance(s, (list, tuple)):
s = [isinstance(s, basestring) and s.split(" ") or s for s in s]
if isinstance(s, basestring):
s = [s.split(" ") for s in s.split("\n")]
# Reuse the English parser:
kwargs.update({
"lemmata": False,
"light": False,
"lexicon": LEXICON,
"language": "de",
"default": "NN",
"map": kwargs.get("tagset", "") != STTS and stts2penntreebank or None,
})
# The German lexicon uses "ss" instead of "ß".
# Instead of simply replacing it, we keep a hash map of the normalized words.
# After parsing we restore the "ß" so the output stays identical to the input.
m = dict((token.replace(u"ß", "ss"), token) for sentence in s for token in sentence)
s = [[token.replace(u"ß", "ss") for token in sentence] for sentence in s]
s = _en_parse(s, False, tags, chunks, relations, **kwargs)
p = [[[m[token[0]]] + token[1:] for token in sentence] for sentence in s.split()]
p = "\n".join([" ".join(["/".join(token) for token in sentence]) for sentence in p])
s = TaggedString(p, tags=s.tags, language="de")
# Use pattern.de.inflect for lemmatization:
if lemmata:
p = [find_lemmata(sentence) for sentence in s.split()]
p = "\n".join([" ".join(["/".join(token) for token in sentence]) for sentence in p])
s = TaggedString(p, tags=s.tags+["lemma"], language="de")
return s
def tag(s, tokenize=True, encoding="utf-8"):
""" Returns a list of (token, tag)-tuples from the given string.
"""
tags = []
for sentence in parse(s, tokenize, True, False, False, False, encoding).split():
for token in sentence:
tags.append((token[0], token[1]))
return tags
#### COMMAND LINE ##################################################################################
# From the folder that contains the "pattern" folder:
# python -m pattern.de.parser xml -s "Ein Unglück kommt selten allein." -OTCLI
if __name__ == "__main__":
commandline(parse) | apache-2.0 | 4,828,502,076,624,707,000 | 39.572727 | 112 | 0.531541 | false |
guoci/python3-xlib-trunk | Xlib/protocol/rq.py | 1 | 46900 | # Xlib.protocol.rq -- structure primitives for request, events and errors
#
# Copyright (C) 2000-2002 Peter Liljenberg <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# Standard modules
import sys
import traceback
import struct
from array import array
import types
# Xlib modules
from Xlib import X
from Xlib.support import lock
_PY3 = sys.version[0] >= '3'
# in Python 3, bytes are an actual array; in python 2, bytes are still
# string-like, so in order to get an array element we need to call ord()
if _PY3:
def _bytes_item(x):
return x
else:
def _bytes_item(x):
return ord(x)
class BadDataError(Exception): pass
# These are struct codes, we know their byte sizes
signed_codes = { 1: 'b', 2: 'h', 4: 'l' }
unsigned_codes = { 1: 'B', 2: 'H', 4: 'L' }
# Unfortunately, we don't know the array sizes of B, H and L, since
# these use the underlying architecture's size for a char, short and
# long. Therefore we probe for their sizes, and additionally create
# a mapping that translates from struct codes to array codes.
#
# Bleah.
array_unsigned_codes = { }
struct_to_array_codes = { }
for c in 'bhil':
size = array(c).itemsize
array_unsigned_codes[size] = c.upper()
try:
struct_to_array_codes[signed_codes[size]] = c
struct_to_array_codes[unsigned_codes[size]] = c.upper()
except KeyError:
pass
# print array_unsigned_codes, struct_to_array_codes
class Field:
"""Field objects represent the data fields of a Struct.
Field objects must have the following attributes:
name -- the field name, or None
structcode -- the struct codes representing this field
structvalues -- the number of values encodes by structcode
Additionally, these attributes should either be None or real methods:
check_value -- check a value before it is converted to binary
parse_value -- parse a value after it has been converted from binary
If one of these attributes are None, no check or additional
parsings will be done one values when converting to or from binary
form. Otherwise, the methods should have the following behaviour:
newval = check_value(val)
Check that VAL is legal when converting to binary form. The
value can also be converted to another Python value. In any
case, return the possibly new value. NEWVAL should be a
single Python value if structvalues is 1, a tuple of
structvalues elements otherwise.
newval = parse_value(val, display)
VAL is an unpacked Python value, which now can be further
refined. DISPLAY is the current Display object. Return the
new value. VAL will be a single value if structvalues is 1,
a tuple of structvalues elements otherwise.
If `structcode' is None the Field must have the method
f.parse_binary_value() instead. See its documentation string for
details.
"""
name = None
default = None
structcode = None
structvalues = 0
check_value = None
parse_value = None
keyword_args = 0
def __init__(self):
pass
def parse_binary_value(self, data, display, length, format):
"""value, remaindata = f.parse_binary_value(data, display, length, format)
Decode a value for this field from the binary string DATA.
If there are a LengthField and/or a FormatField connected to this
field, their values will be LENGTH and FORMAT, respectively. If
there are no such fields the parameters will be None.
DISPLAY is the display involved, which is really only used by
the Resource fields.
The decoded value is returned as VALUE, and the remaining part
of DATA shold be returned as REMAINDATA.
"""
raise RuntimeError('Neither structcode or parse_binary_value provided for %s'
% self)
class Pad(Field):
def __init__(self, size):
self.size = size
self.value = b'\0' * size
self.structcode = '%dx' % size
self.structvalues = 0
class ConstantField(Field):
def __init__(self, value):
self.value = value
class Opcode(ConstantField):
structcode = 'B'
structvalues = 1
class ReplyCode(ConstantField):
structcode = 'B'
structvalues = 1
def __init__(self):
self.value = 1
class LengthField(Field):
"""A LengthField stores the length of some other Field whose size
may vary, e.g. List and String8.
Its name should be the same as the name of the field whose size
it stores. The other_fields attribute can be used to specify the
names of other fields whose sizes are stored by this field, so
a single length field can set the length of multiple fields.
The lf.get_binary_value() method of LengthFields is not used, instead
a lf.get_binary_length() should be provided.
Unless LengthField.get_binary_length() is overridden in child classes,
there should also be a lf.calc_length().
"""
structcode = 'L'
structvalues = 1
other_fields = None
def calc_length(self, length):
"""newlen = lf.calc_length(length)
Return a new length NEWLEN based on the provided LENGTH.
"""
return length
class TotalLengthField(LengthField):
pass
class RequestLength(TotalLengthField):
structcode = 'H'
structvalues = 1
def calc_length(self, length):
return length // 4
class ReplyLength(TotalLengthField):
structcode = 'L'
structvalues = 1
def calc_length(self, length):
return (length - 32) // 4
class LengthOf(LengthField):
def __init__(self, name, size):
if isinstance(name, (list, tuple)):
self.name = name[0]
self.other_fields = name[1:]
else:
self.name = name
self.structcode = unsigned_codes[size]
class OddLength(LengthField):
structcode = 'B'
structvalues = 1
def __init__(self, name):
self.name = name
def calc_length(self, length):
return length % 2
def parse_value(self, value, display):
if value == 0:
return 'even'
else:
return 'odd'
class FormatField(Field):
"""A FormatField encodes the format of some other field, in a manner
similar to LengthFields.
The ff.get_binary_value() method is not used, replaced by
ff.get_binary_format().
"""
structvalues = 1
def __init__(self, name, size):
self.name = name
self.structcode = unsigned_codes[size]
Format = FormatField
class ValueField(Field):
def __init__(self, name, default = None):
self.name = name
self.default = default
class Int8(ValueField):
structcode = 'b'
structvalues = 1
class Int16(ValueField):
structcode = 'h'
structvalues = 1
class Int32(ValueField):
structcode = 'l'
structvalues = 1
class Card8(ValueField):
structcode = 'B'
structvalues = 1
class Card16(ValueField):
structcode = 'H'
structvalues = 1
class Card32(ValueField):
structcode = 'L'
structvalues = 1
class Resource(Card32):
cast_function = '__resource__'
class_name = 'resource'
def __init__(self, name, codes = (), default = None):
Card32.__init__(self, name, default)
self.codes = codes
def check_value(self, value):
try:
return getattr(value, self.cast_function)()
except AttributeError:
return value
def parse_value(self, value, display):
# if not display:
# return value
if value in self.codes:
return value
c = display.get_resource_class(self.class_name)
if c:
return c(display, value)
else:
return value
class Window(Resource):
cast_function = '__window__'
class_name = 'window'
class Pixmap(Resource):
cast_function = '__pixmap__'
class_name = 'pixmap'
class Drawable(Resource):
cast_function = '__drawable__'
class_name = 'drawable'
class Fontable(Resource):
cast_function = '__fontable__'
class_name = 'fontable'
class Font(Resource):
cast_function = '__font__'
class_name = 'font'
class GC(Resource):
cast_function = '__gc__'
class_name = 'gc'
class Colormap(Resource):
cast_function = '__colormap__'
class_name = 'colormap'
class Cursor(Resource):
cast_function = '__cursor__'
class_name = 'cursor'
class Bool(ValueField):
structvalues = 1
structcode = 'B'
def check_value(self, value):
return not not value
class Set(ValueField):
structvalues = 1
def __init__(self, name, size, values, default = None):
ValueField.__init__(self, name, default)
self.structcode = unsigned_codes[size]
self.values = values
def check_value(self, val):
if val not in self.values:
raise ValueError('field %s: argument %s not in %s'
% (self.name, val, self.values))
return val
class Gravity(Set):
def __init__(self, name):
Set.__init__(self, name, 1, (X.ForgetGravity, X.StaticGravity,
X.NorthWestGravity, X.NorthGravity,
X.NorthEastGravity, X.WestGravity,
X.CenterGravity, X.EastGravity,
X.SouthWestGravity, X.SouthGravity,
X.SouthEastGravity))
class FixedString(ValueField):
structvalues = 1
def __init__(self, name, size):
ValueField.__init__(self, name)
self.structcode = '%ds' % size
class String8(ValueField):
structcode = None
def __init__(self, name, pad = 1):
ValueField.__init__(self, name)
self.pad = pad
def pack_value(self, val):
slen = len(val)
val = val.encode('UTF-8')
# if _PY3 and type(val) is str:
# val = val.encode('UTF-8')
if self.pad:
return val + b'\0' * ((4 - slen % 4) % 4), slen, None
else:
return val, slen, None
def parse_binary_value(self, data, display, length, format):
if length is None:
try:
return data.decode('UTF-8'), b''
except UnicodeDecodeError:
return data, b''
if self.pad:
slen = length + ((4 - length % 4) % 4)
else:
slen = length
s = data[:length]
try:
s = s.decode('UTF-8')
except UnicodeDecodeError:
pass # return as bytes
return s, data[slen:]
class String16(ValueField):
structcode = None
def __init__(self, name, pad = 1):
ValueField.__init__(self, name)
self.pad = pad
def pack_value(self, val):
# Convert 8-byte string into 16-byte list
if type(val) is str:
val = [ord(c) for c in val]
slen = len(val)
if self.pad:
pad = b'\0\0' * (slen % 2)
else:
pad = b''
return (struct.pack(*('>' + 'H' * slen, ) + tuple(val)) + pad,
slen, None)
def parse_binary_value(self, data, display, length, format):
if length == 'odd':
length = len(data) // 2 - 1
elif length == 'even':
length = len(data) // 2
if self.pad:
slen = length + (length % 2)
else:
slen = length
return (struct.unpack('>' + 'H' * length, data[:length * 2]),
data[slen * 2:])
class List(ValueField):
"""The List, FixedList and Object fields store compound data objects.
The type of data objects must be provided as an object with the
following attributes and methods:
...
"""
structcode = None
def __init__(self, name, type, pad = 1):
ValueField.__init__(self, name)
self.type = type
self.pad = pad
def parse_binary_value(self, data, display, length, format):
if length is None:
ret = []
if self.type.structcode is None:
while data:
val, data = self.type.parse_binary(data, display)
ret.append(val)
else:
scode = '=' + self.type.structcode
slen = struct.calcsize(scode)
pos = 0
while pos + slen <= len(data):
v = struct.unpack(scode, data[pos: pos + slen])
if self.type.structvalues == 1:
v = v[0]
if self.type.parse_value is None:
ret.append(v)
else:
ret.append(self.type.parse_value(v, display))
pos = pos + slen
data = data[pos:]
else:
ret = [None] * int(length)
if self.type.structcode is None:
for i in range(0, length):
ret[i], data = self.type.parse_binary(data, display)
else:
scode = '=' + self.type.structcode
slen = struct.calcsize(scode)
pos = 0
for i in range(0, length):
v = struct.unpack(scode, data[pos: pos + slen])
if self.type.structvalues == 1:
v = v[0]
if self.type.parse_value is None:
ret[i] = v
else:
ret[i] = self.type.parse_value(v, display)
pos = pos + slen
data = data[pos:]
if self.pad:
data = data[len(data) % 4:]
return ret, data
def pack_value(self, val):
# Single-char values, we'll assume that means integer lists.
if self.type.structcode and len(self.type.structcode) == 1:
if self.type.check_value is not None:
val = [self.type.check_value(v) for v in val]
data = array(struct_to_array_codes[self.type.structcode],
val).tobytes()
else:
data = []
for v in val:
data.append(self.type.pack_value(v))
data = b''.join(data)
if self.pad:
dlen = len(data)
data = data + b'\0' * ((4 - dlen % 4) % 4)
return data, len(val), None
class FixedList(List):
def __init__(self, name, size, type, pad = 1):
List.__init__(self, name, type, pad)
self.size = size
def parse_binary_value(self, data, display, length, format):
return List.parse_binary_value(self, data, display, self.size, format)
def pack_value(self, val):
if len(val) != self.size:
raise BadDataError('length mismatch for FixedList %s' % self.name)
return List.pack_value(self, val)
class Object(ValueField):
def __init__(self, name, type, default = None):
ValueField.__init__(self, name, default)
self.type = type
self.structcode = self.type.structcode
self.structvalues = self.type.structvalues
def parse_binary_value(self, data, display, length, format):
return self.type.parse_binary(data, display)
def parse_value(self, val, display):
return self.type.parse_value(val, display)
def pack_value(self, val):
return self.type.pack_value(val)
def check_value(self, val):
if type(val) is tuple:
return val #TODO_PY3, reverted this to r135.
# code below added at r159
vals = []
i = 0
for f in self.type.fields:
if f.name:
if f.check_value is None:
v = val[i]
else:
v = f.check_value(val[i])
if f.structvalues == 1:
vals.append(v)
else:
vals.extend(v)
i = i + 1
return vals
if type(val) is dict:
data = val
elif isinstance(val, DictWrapper):
data = val._data
else:
raise TypeError('Object value must be tuple, dictionary or DictWrapper: %s' % val)
vals = []
for f in self.type.fields:
if f.name:
if f.check_value is None:
v = data[f.name]
else:
v = f.check_value(data[f.name])
if f.structvalues == 1:
vals.append(v)
else:
vals.extend(v)
return vals
class PropertyData(ValueField):
structcode = None
def parse_binary_value(self, data, display, length, format):
if length is None:
length = len(data) // (format // 8)
else:
length = int(length)
if format == 0:
ret = None
elif format == 8:
ret = (8, data[:length])
data = data[length + ((4 - length % 4) % 4):]
elif format == 16:
ret = (16, array(array_unsigned_codes[2], data[:2 * length]))
data = data[2 * (length + length % 2):]
elif format == 32:
ret = (32, array(array_unsigned_codes[4], data[:4 * length]))
data = data[4 * length:]
if ret != None and type(ret[1]) is bytes:
ret = (ret[0], ret[1].decode('UTF-8'))
return ret, data
def pack_value(self, value):
fmt, val = value
if fmt not in (8, 16, 32):
raise BadDataError('Invalid property data format %d' % fmt)
if type(val) is str:
val = val.encode('UTF-8')
if type(val) is bytes:
size = fmt // 8
vlen = len(val)
if vlen % size:
vlen = vlen - vlen % size
data = val[:vlen]
else:
data = val
dlen = vlen // size
else:
if type(val) is tuple:
val = list(val)
size = fmt // 8
data = array(array_unsigned_codes[size], val).tobytes()
dlen = len(val)
dl = len(data)
data = data + b'\0' * ((4 - dl % 4) % 4)
return data, dlen, fmt
class FixedPropertyData(PropertyData):
def __init__(self, name, size):
PropertyData.__init__(self, name)
self.size = size
def parse_binary_value(self, data, display, length, format):
return PropertyData.parse_binary_value(self, data, display,
self.size // (format // 8), format)
def pack_value(self, value):
data, dlen, fmt = PropertyData.pack_value(self, value)
if len(data) != self.size:
raise BadDataError('Wrong data length for FixedPropertyData: %s'
% (value, ))
return data, dlen, fmt
class ValueList(Field):
structcode = None
keyword_args = 1
default = 'usekeywords'
def __init__(self, name, mask, pad, *fields):
self.name = name
self.maskcode = '=%s%dx' % (unsigned_codes[mask], pad)
self.maskcodelen = struct.calcsize(self.maskcode)
self.fields = []
flag = 1
for f in fields:
if f.name:
self.fields.append((f, flag))
flag = flag << 1
def pack_value(self, arg, keys):
mask = 0
data = b''
if arg == self.default:
arg = keys
for field, flag in self.fields:
if field.name in arg:
mask = mask | flag
val = arg[field.name]
if field.check_value is not None:
val = field.check_value(val)
d = struct.pack('=' + field.structcode, val)
data = data + d + b'\0' * (4 - len(d))
return struct.pack(self.maskcode, mask) + data, None, None
def parse_binary_value(self, data, display, length, format):
r = {}
mask = int(struct.unpack(self.maskcode, data[:self.maskcodelen])[0])
data = data[self.maskcodelen:]
for field, flag in self.fields:
if mask & flag:
if field.structcode:
vals = struct.unpack('=' + field.structcode,
data[:struct.calcsize('=' + field.structcode)])
if field.structvalues == 1:
vals = vals[0]
if field.parse_value is not None:
vals = field.parse_value(vals, display)
else:
vals, d = field.parse_binary_value(data[:4], display, None, None)
r[field.name] = vals
data = data[4:]
return DictWrapper(r), data
class KeyboardMapping(ValueField):
structcode = None
def parse_binary_value(self, data, display, length, format):
if length is None:
dlen = len(data)
else:
dlen = 4 * length * format
a = array(array_unsigned_codes[4], data[:dlen])
ret = []
for i in range(0, len(a), format):
ret.append(a[i : i + format])
return ret, data[dlen:]
def pack_value(self, value):
keycodes = 0
for v in value:
keycodes = max(keycodes, len(v))
a = array(array_unsigned_codes[4])
for v in value:
for k in v:
a.append(k)
for i in range(len(v), keycodes):
a.append(X.NoSymbol)
return a.tobytes(), len(value), keycodes
class ModifierMapping(ValueField):
structcode = None
def parse_binary_value(self, data, display, length, format):
a = array(array_unsigned_codes[1], data[:8 * format])
ret = []
for i in range(0, 8):
ret.append(a[i * format : (i + 1) * format])
return ret, data[8 * format:]
def pack_value(self, value):
if len(value) != 8:
raise BadDataError('ModifierMapping list should have eight elements')
keycodes = 0
for v in value:
keycodes = max(keycodes, len(v))
a = array(array_unsigned_codes[1])
for v in value:
for k in v:
a.append(k)
for i in range(len(v), keycodes):
a.append(0)
return a.tobytes(), len(value), keycodes
class EventField(ValueField):
structcode = None
def pack_value(self, value):
if not isinstance(value, Event):
raise BadDataError('%s is not an Event for field %s' % (value, self.name))
return value._binary, None, None
def parse_binary_value(self, data, display, length, format):
assert type(data) is bytes
from . import event
estruct = display.event_classes.get(data[0] & 0x7f, event.AnyEvent)
if type(estruct) == dict:
# this etype refers to a set of sub-events with individual subcodes
estruct = estruct[ord(data[1])]
return estruct(display = display, binarydata = data[:32]), data[32:]
#
# Objects usable for List and FixedList fields.
# Struct is also usable.
#
class ScalarObj:
def __init__(self, code):
self.structcode = code
self.structvalues = 1
self.parse_value = None
self.check_value = None
Card8Obj = ScalarObj('B')
Card16Obj = ScalarObj('H')
Card32Obj = ScalarObj('L')
class ResourceObj:
structcode = 'L'
structvalues = 1
def __init__(self, class_name):
self.class_name = class_name
self.check_value = None
def parse_value(self, value, display):
# if not display:
# return value
c = display.get_resource_class(self.class_name)
if c:
return c(display, value)
else:
return value
WindowObj = ResourceObj('window')
ColormapObj = ResourceObj('colormap')
class StrClass:
structcode = None
def pack_value(self, val):
if type(val) is not bytes:
val = val.encode('UTF-8')
if _PY3:
val = bytes([len(val)]) + val
else:
val = chr(len(val)) + val
return val
def parse_binary(self, data, display):
assert type(data) is bytes
slen = data[0] + 1
s = data[1:slen]
try:
s = s.decode('UTF-8')
except UnicodeDecodeError:
pass # return as bytes
return s, data[slen:]
Str = StrClass()
class Struct:
"""Struct objects represents a binary data structure. It can
contain both fields with static and dynamic sizes. However, all
static fields must appear before all dynamic fields.
Fields are represented by various subclasses of the abstract base
class Field. The fields of a structure are given as arguments
when instantiating a Struct object.
Struct objects have two public methods:
to_binary() -- build a binary representation of the structure
with the values given as arguments
parse_binary() -- convert a binary (string) representation into
a Python dictionary or object.
These functions will be generated dynamically for each Struct
object to make conversion as fast as possible. They are
generated the first time the methods are called.
"""
def __init__(self, *fields):
self.fields = fields
# Structures for to_binary, parse_value and parse_binary
self.static_codes = '='
self.static_values = 0
self.static_fields = []
self.static_size = None
self.var_fields = []
for f in self.fields:
# Append structcode if there is one and we haven't
# got any varsize fields yet.
if f.structcode is not None:
assert not self.var_fields
self.static_codes = self.static_codes + f.structcode
# Only store fields with values
if f.structvalues > 0:
self.static_fields.append(f)
self.static_values = self.static_values + f.structvalues
# If we have got one varsize field, all the rest must
# also be varsize fields.
else:
self.var_fields.append(f)
self.static_size = struct.calcsize(self.static_codes)
if self.var_fields:
self.structcode = None
self.structvalues = 0
else:
self.structcode = self.static_codes[1:]
self.structvalues = self.static_values
# These functions get called only once, as they will override
# themselves with dynamically created functions in the Struct
# object
def to_binary(self, *varargs, **keys):
"""data = s.to_binary(...)
Convert Python values into the binary representation. The
arguments will be all value fields with names, in the order
given when the Struct object was instantiated. With one
exception: fields with default arguments will be last.
Returns the binary representation as the string DATA.
"""
code = ''
total_length = str(self.static_size)
joins = []
args = []
defargs = []
kwarg = 0
# First pack all varfields so their lengths and formats are
# available when we pack their static LengthFields and
# FormatFields
i = 0
for f in self.var_fields:
if f.keyword_args:
kwarg = 1
kw = ', _keyword_args'
else:
kw = ''
# Call pack_value method for each field, storing
# the return values for later use
code = code + (' _%(name)s, _%(name)s_length, _%(name)s_format'
' = self.var_fields[%(fno)d].pack_value(%(name)s%(kw)s)\n'
% { 'name': f.name,
'fno': i,
'kw': kw })
total_length = total_length + ' + len(_%s)' % f.name
joins.append('_%s' % f.name)
i = i + 1
# Construct argument list for struct.pack call, packing all
# static fields. First argument is the structcode, the
# remaining are values.
pack_args = ['"%s"' % self.static_codes]
i = 0
for f in self.static_fields:
if isinstance(f, LengthField):
# If this is a total length field, insert
# the calculated field value here
if isinstance(f, TotalLengthField):
if self.var_fields:
pack_args.append('self.static_fields[%d].calc_length(%s)'
% (i, total_length))
else:
pack_args.append(str(f.calc_length(self.static_size)))
else:
pack_args.append('self.static_fields[%d].calc_length(_%s_length)'
% (i, f.name))
# Format field, just insert the value we got previously
elif isinstance(f, FormatField):
pack_args.append('_%s_format' % f.name)
# A constant field, insert its value directly
elif isinstance(f, ConstantField):
pack_args.append(str(f.value))
# Value fields
else:
if f.structvalues == 1:
# If there's a value check/convert function, call it
if f.check_value is not None:
pack_args.append('self.static_fields[%d].check_value(%s)'
% (i, f.name))
# Else just use the argument as provided
else:
pack_args.append(f.name)
# Multivalue field. Handled like single valuefield,
# but the value are tuple unpacked into seperate arguments
# which are appended to pack_args
else:
a = []
for j in range(f.structvalues):
a.append('_%s_%d' % (f.name, j))
if f.check_value is not None:
code = code + (' %s = self.static_fields[%d].check_value(%s)\n'
% (', '.join(a), i, f.name))
else:
code = code + ' %s = %s\n' % (', '.join(a), f.name)
pack_args = pack_args + a
# Add field to argument list
if f.name:
if f.default is None:
args.append(f.name)
else:
defargs.append('%s = %s' % (f.name, repr(f.default)))
i = i + 1
# Construct call to struct.pack
pack = 'struct.pack(%s)' % ', '.join(pack_args)
# If there are any varfields, we append the packed strings to build
# the resulting binary value
if self.var_fields:
code = code + ' return %s + %s\n' % (pack, ' + '.join(joins))
# If there's only static fields, return the packed value
else:
code = code + ' return %s\n' % pack
# Add all varsize fields to argument list. We do it here
# to ensure that they appear after the static fields.
for f in self.var_fields:
if f.name:
if f.default is None:
args.append(f.name)
else:
defargs.append('%s = %s' % (f.name, repr(f.default)))
args = args + defargs
if kwarg:
args.append('**_keyword_args')
# Add function header
code = 'def to_binary(self, %s):\n' % ', '.join(args) + code
# self._pack_code = code
# print
# print code
# print
# Finally, compile function by evaluating it. This will store
# the function in the local variable to_binary, thanks to the
# def: line. Convert it into a instance metod bound to self,
# and store it in self.
# Unfortunately, this creates a circular reference. However,
# Structs are not really created dynamically so the potential
# memory leak isn't that serious. Besides, Python 2.0 has
# real garbage collect.
exec(code)
self.to_binary = types.MethodType(locals()["to_binary"], self)
# Finally call it manually
return self.to_binary(*varargs, **keys)
def pack_value(self, value):
""" This function allows Struct objects to be used in List and
Object fields. Each item represents the arguments to pass to
to_binary, either a tuple, a dictionary or a DictWrapper.
"""
if type(value) is tuple:
return self.to_binary(*value, **{})
elif type(value) is dict:
return self.to_binary(*(), **value)
elif isinstance(value, DictWrapper):
return self.to_binary(*(), **value._data)
else:
raise BadDataError('%s is not a tuple or a list' % (value))
def parse_value(self, val, display, rawdict = 0):
"""This function is used by List and Object fields to convert
Struct objects with no var_fields into Python values.
"""
code = ('def parse_value(self, val, display, rawdict = 0):\n'
' ret = {}\n')
vno = 0
fno = 0
for f in self.static_fields:
# Fields without names should be ignored, and there should
# not be any length or format fields if this function
# ever gets called. (If there were such fields, there should
# be a matching field in var_fields and then parse_binary
# would have been called instead.
if not f.name:
pass
elif isinstance(f, LengthField):
pass
elif isinstance(f, FormatField):
pass
# Value fields
else:
# Get the index or range in val representing this field.
if f.structvalues == 1:
vrange = str(vno)
else:
vrange = '%d:%d' % (vno, vno + f.structvalues)
# If this field has a parse_value method, call it, otherwise
# use the unpacked value as is.
if f.parse_value is None:
code = code + ' ret["%s"] = val[%s]\n' % (f.name, vrange)
else:
code = code + (' ret["%s"] = self.static_fields[%d].'
'parse_value(val[%s], display)\n'
% (f.name, fno, vrange))
fno = fno + 1
vno = vno + f.structvalues
code = code + ' if not rawdict: return DictWrapper(ret)\n'
code = code + ' return ret\n'
# print
# print code
# print
# Finally, compile function as for to_binary.
exec(code)
self.parse_value = types.MethodType(locals()["parse_value"], self)
# Call it manually
return self.parse_value(val, display, rawdict)
def parse_binary(self, data, display, rawdict = 0):
"""values, remdata = s.parse_binary(data, display, rawdict = 0)
Convert a binary representation of the structure into Python values.
DATA is a string or a buffer containing the binary data.
DISPLAY should be a Xlib.protocol.display.Display object if
there are any Resource fields or Lists with ResourceObjs.
The Python values are returned as VALUES. If RAWDICT is true,
a Python dictionary is returned, where the keys are field
names and the values are the corresponding Python value. If
RAWDICT is false, a DictWrapper will be returned where all
fields are available as attributes.
REMDATA are the remaining binary data, unused by the Struct object.
"""
code = ('def parse_binary(self, data, display, rawdict = 0):\n'
' ret = {}\n'
' val = struct.unpack("%s", data[:%d])\n'
% (self.static_codes, self.static_size))
lengths = {}
formats = {}
vno = 0
fno = 0
for f in self.static_fields:
# Fields without name should be ignored. This is typically
# pad and constant fields
if not f.name:
pass
# Store index in val for Length and Format fields, to be used
# when treating varfields.
elif isinstance(f, LengthField):
f_names = [f.name]
if f.other_fields:
f_names.extend(f.other_fields)
for f_name in f_names:
if f.parse_value is None:
lengths[f_name] = 'val[%d]' % vno
else:
lengths[f_name] = ('self.static_fields[%d].'
'parse_value(val[%d], display)'
% (fno, vno))
elif isinstance(f, FormatField):
formats[f.name] = 'val[%d]' % vno
# Treat value fields the same was as in parse_value.
else:
if f.structvalues == 1:
vrange = str(vno)
else:
vrange = '%d:%d' % (vno, vno + f.structvalues)
if f.parse_value is None:
code = code + ' ret["%s"] = val[%s]\n' % (f.name, vrange)
else:
code = code + (' ret["%s"] = self.static_fields[%d].'
'parse_value(val[%s], display)\n'
% (f.name, fno, vrange))
fno = fno + 1
vno = vno + f.structvalues
code = code + ' data = data[%d:]\n' % self.static_size
# Call parse_binary_value for each var_field, passing the
# length and format values from the unpacked val.
fno = 0
for f in self.var_fields:
code = code + (' ret["%s"], data = '
'self.var_fields[%d].parse_binary_value'
'(data, display, %s, %s)\n'
% (f.name, fno,
lengths.get(f.name, 'None'),
formats.get(f.name, 'None')))
fno = fno + 1
code = code + ' if not rawdict: ret = DictWrapper(ret)\n'
code = code + ' return ret, data\n'
# print
# print code
# print
# Finally, compile function as for to_binary.
exec(code)
self.parse_binary = types.MethodType(locals()["parse_binary"], self)
# Call it manually
return self.parse_binary(data, display, rawdict)
class TextElements8(ValueField):
string_textitem = Struct( LengthOf('string', 1),
Int8('delta'),
String8('string', pad = 0) )
def pack_value(self, value):
data = b''
args = {}
for v in value:
# Let values be simple strings, meaning a delta of 0
if type(v) is bytes:
v = (0, v)
# A tuple, it should be (delta, string)
# Encode it as one or more textitems
if type(v) in (tuple, dict) or \
isinstance(v, DictWrapper):
if type(v) is tuple:
delta, s = v
else:
delta = v['delta']
s = v['string']
while delta or s:
args['delta'] = delta
args['string'] = s[:254]
data = data + self.string_textitem.to_binary(*(), **args)
delta = 0
s = s[254:]
# Else an integer, i.e. a font change
else:
# Use fontable cast function if instance
try:
v = v.__fontable__()
except AttributeError as e: pass
data = data + struct.pack('>BL', 255, v)
# Pad out to four byte length
dlen = len(data)
return data + b'\0' * ((4 - dlen % 4) % 4), None, None
def parse_binary_value(self, data, display, length, format):
values = []
while 1:
if len(data) < 2:
break
# font change
assert type(data) is bytes
if data[0] == 255:
values.append(struct.unpack('>L', data[1:5])[0])
data = data[5:]
# skip null strings
elif data[0] == 0 and data[1] == 0:
data = data[2:]
# string with delta
else:
v, data = self.string_textitem.parse_binary(data, display)
values.append(v)
return values, b''
class TextElements16(TextElements8):
string_textitem = Struct( LengthOf('string', 1),
Int8('delta'),
String16('string', pad = 0) )
class GetAttrData(object):
def __getattr__(self, attr):
try:
if self._data:
return self._data[attr]
else:
raise AttributeError(attr)
except KeyError:
raise AttributeError(attr)
class DictWrapper(GetAttrData):
def __init__(self, dict):
self.__dict__['_data'] = dict
def __getitem__(self, key):
return self._data[key]
def __setitem__(self, key, value):
self._data[key] = value
def __delitem__(self, key):
del self._data[key]
def __setattr__(self, key, value):
self._data[key] = value
def __delattr__(self, key):
del self._data[key]
def __str__(self):
return str(self._data)
def __repr__(self):
return '%s(%s)' % (self.__class__, repr(self._data))
def __eq__(self, other):
if isinstance(other, DictWrapper):
return self._data == other._data
else:
return self._data == other
def __ne__(self, other):
return not self.__eq__(other)
class Request:
def __init__(self, display, onerror = None, *args, **keys):
self._errorhandler = onerror
self._binary = self._request.to_binary(*args, **keys)
self._serial = None
display.send_request(self, onerror is not None)
def _set_error(self, error):
if self._errorhandler is not None:
return call_error_handler(self._errorhandler, error, self)
else:
return 0
class ReplyRequest(GetAttrData):
def __init__(self, display, defer = 0, *args, **keys):
self._display = display
self._binary = self._request.to_binary(*args, **keys)
self._serial = None
self._data = None
self._error = None
self._response_lock = lock.allocate_lock()
self._display.send_request(self, 1)
if not defer:
self.reply()
def reply(self):
# Send request and wait for reply if we hasn't
# already got one. This means that reply() can safely
# be called more than one time.
self._response_lock.acquire()
while self._data is None and self._error is None:
self._display.send_recv_lock.acquire()
self._response_lock.release()
self._display.send_and_recv(request = self._serial)
self._response_lock.acquire()
self._response_lock.release()
self._display = None
# If error has been set, raise it
if self._error:
raise self._error
def _parse_response(self, data):
self._response_lock.acquire()
self._data, d = self._reply.parse_binary(data, self._display, rawdict = 1)
self._response_lock.release()
def _set_error(self, error):
self._response_lock.acquire()
self._error = error
self._response_lock.release()
return 1
def __repr__(self):
return '<%s serial = %s, data = %s, error = %s>' % (self.__class__, self._serial, self._data, self._error)
class Event(GetAttrData):
def __init__(self, binarydata = None, display = None,
**keys):
if binarydata:
self._binary = binarydata
self._data, data = self._fields.parse_binary(binarydata, display,
rawdict = 1)
# split event type into type and send_event bit
self._data['send_event'] = not not self._data['type'] & 0x80
self._data['type'] = self._data['type'] & 0x7f
else:
if self._code:
keys['type'] = self._code
keys['sequence_number'] = 0
self._binary = self._fields.to_binary(*(), **keys)
keys['send_event'] = 0
self._data = keys
def __repr__(self):
kwlist = []
for kw, val in self._data.items():
if kw == 'send_event':
continue
if kw == 'type' and self._data['send_event']:
val = val | 0x80
kwlist.append('%s = %s' % (kw, repr(val)))
kws = ', '.join(kwlist)
return '%s(%s)' % (self.__class__, kws)
def __eq__(self, other):
if isinstance(other, Event):
return self._data == other._data
else:
return cmp(self._data, other)
def call_error_handler(handler, error, request):
try:
return handler(error, request)
except:
sys.stderr.write('Exception raised by error handler.\n')
traceback.print_exc()
return 0
| gpl-2.0 | 5,240,419,798,686,419,000 | 28.929802 | 114 | 0.530938 | false |
Eszti/pymachine | src/pymachine/definition_parser.py | 1 | 20690 | import logging
import sys
import re
import string
from collections import defaultdict
try:
import pyparsing
from pyparsing import Literal, Word, Group, Combine, Optional, Forward, alphanums, SkipTo, LineEnd, nums, delimitedList # nopep8
except ImportError:
logging.critical("PyParsing has to be installed on the computer")
sys.exit(-1)
from hunmisc.xstring.encoding import decode_from_proszeky
from constants import deep_cases, avm_pre, deep_pre, enc_pre, id_sep
from pymachine.machine import Machine
from pymachine.control import ConceptControl
class ParserException(Exception):
pass
class DefinitionParser(object):
_str = set([str, unicode])
lb = "["
rb = "]"
lp = "("
rp = ")"
left_defa = '<'
right_defa = '>'
clause_sep = ","
part_sep = ";"
prime = "'"
hyphen = "-"
langspec_pre = "$" # starts langspec deep case
unary_p = re.compile("^[a-z_#\-/0-9]+(/[0-9]+)?$")
binary_p = re.compile("^[A-Z_0-9]+(/[0-9]+)?$")
def __init__(self, plur_dict):
self.plur_dict = plur_dict
self.init_parser()
@classmethod
def _is_binary(cls, s):
return ((type(s) in cls._str and cls.binary_p.match(s)) or
(type(s) is list and s[0] == deep_pre and s[1] == "REL"))
@classmethod
def _is_unary(cls, s):
return ((type(s) in cls._str and cls.unary_p.match(s) is not None) or
(type(s) is list and (
(s[0] == deep_pre) or
(s[0] == cls.langspec_pre) or
(s[0] == enc_pre) or
(s[0] == cls.left_defa)
)))
@classmethod
def _is_deep_case(cls, s):
return s in deep_cases
def init_parser(self):
self.lb_lit = Literal(DefinitionParser.lb)
self.rb_lit = Literal(DefinitionParser.rb)
self.lp_lit = Literal(DefinitionParser.lp)
self.rp_lit = Literal(DefinitionParser.rp)
self.left_defa_lit = Literal(DefinitionParser.left_defa)
self.right_defa_lit = Literal(DefinitionParser.right_defa)
self.clause_sep_lit = Literal(DefinitionParser.clause_sep)
self.part_sep_lit = Literal(DefinitionParser.part_sep)
self.prime_lit = Literal(DefinitionParser.prime)
self.hyphen_lit = Literal(DefinitionParser.hyphen)
self.enc_pre_lit = Literal(enc_pre)
self.deep_pre_lit = Literal(deep_pre)
self.avm_pre_lit = Literal(avm_pre)
self.langspec_pre_lit = Literal(DefinitionParser.langspec_pre)
self.id_sep_lit = Literal(id_sep)
self.disambig_id = self.id_sep_lit + Word(nums)
self.deep_cases = Group(self.deep_pre_lit + Word(string.uppercase))
self.unary = Forward()
self.unary << (Combine(Optional("-") +
Word(string.lowercase + "_" + nums) +
Optional(self.disambig_id))
| self.deep_cases
| Group(self.langspec_pre_lit +
Word(string.uppercase + "_"))
| Group(self.avm_pre_lit +
Word(string.ascii_letters + "_"))
| Group(self.enc_pre_lit + Word(alphanums + "_-"))
| Group(self.left_defa_lit + self.unary +
self.right_defa_lit))
self.binary = (Combine(Word(string.uppercase + "_" + nums) +
Optional(self.disambig_id))
| Group(self.deep_pre_lit + 'REL'))
self.dontcare = SkipTo(LineEnd())
# main expression
self.expression = Forward()
self.binexpr = Forward()
self.unexpr = Forward()
self.argexpr = Forward()
# "enumerable expression"
# D -> E | E, D
self.definition = Group(delimitedList(self.expression,
delim=DefinitionParser.clause_sep))
self.expression << Group(
# E -> UE
(self.unexpr) ^
# E -> BE
(self.binexpr) ^
# E -> U ( E )
(self.unary + self.lp_lit + self.expression + self.rp_lit) ^
# E -> < E >
(self.left_defa_lit + self.expression + self.right_defa_lit)
)
self.binexpr << Group(
# BE -> A B
(self.argexpr + self.binary) ^
# BE -> B A
(self.binary + self.argexpr) ^
# BE -> A B A
(self.argexpr + self.binary + self.argexpr) ^
# BE -> B [ E; E ]
(self.binary + self.lb_lit + self.expression + self.part_sep_lit
+ self.expression + self.rb_lit)
)
self.unexpr << Group(
# UE -> U
(self.unary) ^
# UE -> U [ D ]
(self.unary + self.lb_lit + self.definition + self.rb_lit) ^
# UE -> U ( U )
(self.unary + self.lp_lit + self.unary + self.rp_lit)
)
self.argexpr << Group(
# A -> UE
(self.unexpr) ^
# A -> [ D ]
(self.lb_lit + self.definition + self.rb_lit) ^
# A -> < A >
(self.left_defa_lit + self.argexpr + self.right_defa_lit) ^
# A -> '
(self.prime_lit)
)
self.hu, self.pos, self.en, self.lt, self.pt = (
Word(alphanums + "#-/_.'"),) * 5
self.defid = Word(nums)
self.word = Group(self.hu + self.pos + self.en + self.lt + self.pt)
# S -> W : D | W : D % _
#self.sen = self.definition + LineEnd()
def parse(self, s):
return self.definition.parseString(s, parseAll=True).asList()
def create_machine(self, name, partitions):
# lists are accepted because of ["=", "AGT"]
if type(name) is list:
name = "".join(name)
# HACK until we find a good solution for defaults
name = name.strip('<>')
is_plur = name in self.plur_dict
if is_plur:
name = self.plur_dict[name]
m = Machine(decode_from_proszeky(name),
ConceptControl(), partitions)
if is_plur:
m.append(self.create_machine('more', 1), 0)
return m
def unify(self, machine):
def __collect_machines(m, machines, is_root=False):
# cut the recursion
key = m.printname(), __has_other(m)
if (key in machines and m in machines[key]):
return
if not is_root:
machines[m.printname(), __has_other(m)].append(m)
for partition in m.partitions:
for m_ in partition:
__collect_machines(m_, machines)
def __has_other(m):
for m_ in m.partitions[0]:
if m_.printname() == "other":
return True
return False
def __get_unified(machines, res=None):
# if nothing to unify, don't
if len(machines) == 1:
return machines[0]
# if a return machine is given, don't create a new one
if res is None:
prototype = machines[0]
res = self.create_machine(prototype.printname(),
len(prototype.partitions))
for m in machines:
# if the same machine, don't add anything
if id(m) == id(res):
continue
for p_i, p in enumerate(m.partitions):
for part_m in p:
if part_m.printname() != "other":
res.partitions[p_i].append(part_m)
part_m.del_parent_link(m, p_i)
part_m.add_parent_link(res, p_i)
return res
def __replace(where, for_what, is_other=False, visited=None):
if visited is None:
visited = set()
if id(where) in visited:
return
visited.add(id(where))
pn = for_what.printname()
for p_i, p in enumerate(where.partitions):
# change the partition machines
for part_m_i, part_m in enumerate(p):
if part_m.printname() == pn and __has_other(
part_m) == is_other:
where.partitions[p_i][part_m_i] = for_what
for_what.add_parent_link(where, p_i)
__replace(where.partitions[p_i][part_m_i],
for_what, is_other, visited)
# unification if there is a machine more than once on the same
# partition
where.partitions[p_i] = list(set(p))
machines = defaultdict(list)
__collect_machines(machine, machines, is_root=True)
for k, machines_to_unify in machines.iteritems():
if len(machines_to_unify[0].partitions) > 1:
continue
printname, is_other = k
#if unification affects the root (machine),
#be that the result machine
if printname == machine.printname():
unified = __get_unified(machines_to_unify, machine)
else:
unified = __get_unified(machines_to_unify)
__replace(machine, unified, is_other)
def __parse_expr(self, expr, root, loop_to_defendum=True,
three_parts=False):
"""
creates machines from a parse node and its children
there should be one handler for every rule
"""
logging.debug("Parsing expression: {0}".format(expr))
# name shortening for classmethods
cls = DefinitionParser
is_binary = cls._is_binary
is_unary = cls._is_unary
is_tree = lambda r: type(r) == list
left_part = 0 + int(three_parts)
right_part = 1 + int(three_parts)
most_part = 2 + int(three_parts)
if (len(expr) == 1):
# UE -> U
if (is_unary(expr[0])):
logging.debug("Parsing {0} as a unary.".format(expr[0]))
return [self.create_machine(expr[0], 1)]
# E -> UE | BE, A -> UE
if (is_tree(expr[0])):
logging.debug("Parsing {0} as a tree.".format(expr[0]))
return self.__parse_expr(expr[0], root, loop_to_defendum,
three_parts)
if (len(expr) == 2):
# BE -> A B
if (is_tree(expr[0]) and
is_binary(expr[1])):
m = self.create_machine(expr[1], most_part)
if expr[0] != ["'"]:
m.append_all(
self.__parse_expr(expr[0], root, loop_to_defendum,
three_parts),
left_part)
if loop_to_defendum:
m.append(root, right_part)
return [m]
# BE -> B A
if (is_binary(expr[0]) and
is_tree(expr[1])):
m = self.create_machine(expr[0], most_part)
if expr[1] != ["'"]:
m.append_all(
self.__parse_expr(expr[1], root, loop_to_defendum,
three_parts),
right_part)
if loop_to_defendum:
m.append(root, left_part)
return [m]
# BE -> 'B
if (expr[0] == ["'"] and
is_binary(expr[1])):
m = self.create_machine(expr[1], most_part)
#m.append(parent, 1)
if loop_to_defendum:
m.append(root, right_part)
return [m]
# BE -> B'
if (is_binary(expr[0]) and
expr[1] == ["'"]):
m = self.create_machine(expr[0], most_part)
# m.append(parent, 0)
if loop_to_defendum:
m.append(root, left_part)
return [m]
# U -> =AGT
if expr[0] == deep_pre:
return [self.create_machine(deep_pre + expr[1], 1)]
# U -> $HUN_FROM
if (expr[0] == cls.langspec_pre):
return [self.create_machine(cls.langspec_pre + expr[1], 1)]
# U -> #AVM
if (expr[0] == avm_pre):
return [self.create_machine(avm_pre + expr[1], 1)]
# U -> @External_url
if (expr[0] == enc_pre):
return [self.create_machine(enc_pre + expr[1], 1)]
if (len(expr) == 3):
# UB -> A B A
if (is_tree(expr[0]) and
is_binary(expr[1]) and
is_tree(expr[2])):
m = self.create_machine(expr[1], most_part)
logging.debug(expr[1])
if expr[0] != [DefinitionParser.prime]:
logging.debug(expr[0])
m.append_all(
self.__parse_expr(expr[0], root, loop_to_defendum,
three_parts),
left_part)
if expr[2] != [DefinitionParser.prime]:
m.append_all(
self.__parse_expr(expr[2], root, loop_to_defendum,
three_parts),
right_part)
return [m]
# A -> [ D ]
if (expr[0] == "[" and
is_tree(expr[1]) and
expr[2] == "]"):
logging.debug(
"Parsing expr {0} as an embedded definition".format(expr))
res = list(
self.__parse_definition(expr[1], root, loop_to_defendum,
three_parts))
return res
# E -> < E >, U -> < U >
if expr[0] == '<' and expr[2] == '>':
logging.debug('E -> < E >' + str(expr[1]))
return list(self.__parse_expr(expr[1], root, loop_to_defendum,
three_parts))
if (len(expr) == 4):
# UE -> U ( U )
# E -> U ( BE ) provisional
if (is_unary(expr[0]) and
expr[1] == "(" and
expr[3] == ")"):
logging.debug('X -> U ( Y )')
if is_unary(expr[2]):
m = self.create_machine(expr[2], 1)
else:
m = self.__parse_expr(expr[2], root, loop_to_defendum,
three_parts)[0]
if not three_parts:
logging.warning(
"for 0th partition of binary machines, " +
"set three_parts=True, "+str(expr))
m.append(self.create_machine(expr[0], 1), 0)
return [m]
# UE -> U [ D ]
if (is_unary(expr[0]) and
expr[1] == "[" and
is_tree(expr[2]) and
expr[3] == "]"):
m = self.create_machine(expr[0], 1)
for parsed_expr in self.__parse_definition(expr[2], root,
loop_to_defendum,
three_parts):
m.append(parsed_expr, 0)
return [m]
# E -> U ( BE )
#if (is_unary(expr[0]) and
# expr[1] == "(" and
# is_tree(expr[2]) and
# expr[3] == ")"):
# ms = self.__parse_expr(expr[2], root, loop_to_defendum,
# three_parts)
# # if BE was an expression with an apostrophe, then
# # return of __parse_expr() is None
# if len(ms) != 0:
# ms[0].append(self.create_machine(expr[0], 1), 0)
# # if len(ms) == 3 and ms[0] == '<':
# # ms = ms[1]
# if len(ms) != 1:
# logging.warning("0th partition of binary machines " +
# "is not implemented "+str(ms))
# return ms
logging.warning('machine cannot be built '+str(expr))
if (len(expr) == 6):
# BE -> B [E; E]
if (is_binary(expr[0]) and
expr[1] == "[" and
is_tree(expr[2]) and
expr[3] == ";" and
is_tree(expr[4]) and
expr[5] == "]"):
m = self.create_machine(expr[0], 2)
m.append_all(
self.__parse_expr(expr[2], m, root, loop_to_defendum,
three_parts),
0)
m.append_all(
self.__parse_expr(expr[4], m, root, loop_to_defendum,
three_parts),
1)
return [m]
pe = ParserException(
"Unknown expression in definition: {0} (len={1})".format(
expr,
len(expr)))
logging.debug(str(pe))
logging.debug(expr)
raise pe
def __parse_definition(self, definition, root, loop_to_defendum=True,
three_parts=False):
logging.debug(str(definition))
for d in definition:
yield self.__parse_expr(d, root, loop_to_defendum, three_parts)[0]
def parse_into_machines(self, string, printname_index=0, add_indices=False,
loop_to_defendum=True, three_parts=False):
printname = string.split('\t')[printname_index]
try:
id_, urob, pos, def_, comment = string.split('\t')[4:]
except:
raise Exception(string.split('\t'))
machine = self.create_machine(printname.lower(), 1)
#TODO =AGT -> partition 1, =PAT -> partition 2, =TO -> ?
if add_indices:
machine.printname_ = machine.printname() + id_sep + id_
if def_ != '':
logging.debug(def_)
parsed = self.parse(def_)
logging.debug(parsed)
for parsed_expr in self.__parse_definition(
parsed[0], machine, loop_to_defendum, three_parts):
machine.append(parsed_expr, 0)
self.unify(machine)
return machine
def read(f, plur_filn, printname_index=0, add_indices=False,
loop_to_defendum=True, three_parts=False):
logging.warning(
"Will now discard all but the first definition of each \
headword!".upper())
d = defaultdict(set)
plur_dict = read_plur(open(plur_filn)) if plur_filn else {}
dp = DefinitionParser(plur_dict)
for line in f:
l = line.strip('\n')
logging.debug("Parsing: {0}".format(l))
try:
m = dp.parse_into_machines(l, printname_index, add_indices,
loop_to_defendum, three_parts)
if m.partitions[0] == []:
logging.debug('dropping empty definition of '+m.printname())
continue
pn = m.printname()
if pn in d:
continue
# logging.warning('duplicate pn: {0}, machines: {1}, {2}'.format(
# pn, d[pn], "{0}:{1}".format(m, m.partitions)))
d[m.printname()].add(m)
logging.debug('\n'+m.to_debug_str())
except pyparsing.ParseException, pe:
print l
logging.error("Error: "+str(pe))
return d
def read_plur(_file):
plur_dict = {}
for line in _file:
plur, sg = line.split()
plur_dict[plur] = sg
return plur_dict
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING,
format="%(asctime)s : %(module)s (%(lineno)s) " +
"- %(levelname)s - %(message)s")
plur_dict = read_plur(open('/home/recski/projects/4lang/4lang.plural'))
dp = DefinitionParser(plur_dict)
pstr = sys.argv[-1]
if sys.argv[1] == "-d":
print Machine.to_debug_str(dp.parse_into_machines(pstr), max_depth=99)
elif sys.argv[1] == "-f":
lexicon = read(file(sys.argv[2]), '../../res/4lang/4lang.plural',
three_parts=True)
else:
print dp.parse(pstr)
| mit | -156,672,265,516,880,830 | 35.298246 | 133 | 0.460222 | false |
COMBINE-lab/matryoshka_work | coredomains-import/python-src/domain_size_all_chroms.py | 1 | 5607 | # get min, max, avg domain sizes across different resolutions
import sys
import matplotlib.pyplot as plt
import matplotlib
import collections
Domain = collections.namedtuple("Domain", ['start', 'end'])
################################################
#
#
#
################################################
def parseBingRenDomains(path):
print "Parsing bing ren"
B = {}
with open(path, 'r') as f:
for line in f:
chromo, start, stop = line.strip().split()
if not (chromo in B): B[chromo] = []
B[chromo].append( Domain( int(start) / step, int(stop) /step ) )
avg = {}
total_sum = 0
total_len = 0
for chromo in sorted(B.keys()):
total_sum += sum ( [d.end - d.start for d in B[chromo] ] )
total_len += len(B[chromo])
a = sum ( [d.end - d.start for d in B[chromo] ] ) * 1.0 / len(B[chromo])
print "Avg B.R. domain length,", chromo, "\t", a, "\t", a * step
avg[chromo] = a
print "Compare", sum(avg.values() ) / len(avg.values() ) * step, total_sum * 1.0 / total_len * step
return B
################################################
#
#
#
################################################
def parseMultiResDomains(paths, step):
domains_chro = {}
print "Parsing our domains"
for f_name in paths:
parts = f_name[:-3].split(".")
chromo = parts[1]
if not (chromo in domains_chro ):
domains_chro[chromo] = {}
a = parts[-1]
#assert(chro == chromo)
if len(a) == 1:
a = int(a) * 0.1
elif len(a)==2:
a = int(a) * 0.01
elif 'alpha' in a:
a = 1.0
domains_chro[chromo][a] = []
with open(f_name, 'r') as f_in:
for line in f_in:
parts = line.strip().split()
if len(parts) == 2:
start = parts[0]
end = parts[1]
else:
start = parts[1]
end = parts[2]
domains_chro[chromo][a].append( Domain(int(start) / step, int(end) / step) )
#sorted_items = sorted([(k,v) for k,v in domains_res.iteritems()], key=lambda x: x[0] )
#print [y[0] for y in sorted_items[:10] ]
#print map(len, [y[1] for y in sorted_items] )
return domains_chro
################################################
#
#
#
################################################
def plotSizes(BR_cnt, BR_avg, Gamma, Avg, Min, Max, Cnt):
plt.subplot(211)
for chromo in Avg.keys():
plt.plot(Gamma, Avg[chromo], 'b-', Gamma, Max[chromo], 'r-', Gamma, Min[chromo], 'g-', alpha=0.3)
plt.plot( [min(Gamma), max(Gamma)], [BR_avg[chromo], BR_avg[chromo] ], 'm-', alpha=0.3)
plt.xlabel('$\gamma$, resolution parameter')
plt.ylabel('avg domain size, in 40Kb')
plt.yscale('log')
# plt.grid(True)
plt.subplot(212)
for chromo in Cnt.keys():
plt.plot(Gamma, Cnt[chromo], 'b-', alpha=0.3)
plt.plot( [min(Gamma), max(Gamma)], [BR_cnt[chromo], BR_cnt[chromo] ], 'm-', alpha=0.3)
plt.ylabel('number of domains')
plt.xlabel('all chromosomes')
plt.yscale('log')
f_name = "domain_sizes_all.pdf"
plt.savefig(f_name)
#plt.show()
print "Saved to", f_name
################################################
#
#
#
################################################
def plotSizesAvgOverChromos(BR_cnt, BR_avg, Gamma, Avg, Min, Max, Cnt):
plt.subplot(211)
font = {'family' : 'normal',
'size' : 20}
matplotlib.rc('font', **font)
avg_avg = []
max_avg = []
min_avg = []
BR_avg_avg = []
cnt_avg = []
num_chromo = len(Avg)
for i in xrange(len(Gamma)):
g = Gamma[i]
avg_avg.append( sum ( [res[i] for chromo, res in Avg.iteritems() ] ) / num_chromo )
max_avg.append( sum ( [res[i] for chromo, res in Max.iteritems() ] ) / num_chromo )
min_avg.append( sum ( [res[i] for chromo, res in Min.iteritems() ] ) / num_chromo )
cnt_avg.append( sum ( [res[i] for chromo, res in Cnt.iteritems() ] ) / num_chromo )
print avg_avg
print max_avg
print min_avg
plt.plot(Gamma, avg_avg, 'b-', Gamma, max_avg, 'r-', Gamma, min_avg, 'g-')
# plt.plot(Gamma, avg_avg, 'b-', alpha=0.7)
BR_avg_avg = sum( [data for chromo, data in BR_avg.iteritems()] ) / len(BR_avg)
plt.plot( [min(Gamma), max(Gamma)], [BR_avg_avg, BR_avg_avg ], 'm-', alpha=0.7)
# plt.xlabel('$\gamma$, resolution parameter')
plt.ylabel('size, in 40Kb')
plt.yscale('log')
# plt.grid(True)
plt.subplot(212)
plt.plot(Gamma, cnt_avg, 'b-')
BR_cnt_avg = sum( [data for chromo, data in BR_cnt.iteritems()] ) / len(BR_cnt)
plt.plot( [min(Gamma), max(Gamma)], [BR_cnt_avg, BR_cnt_avg ], 'm-')
plt.ylabel('domain count')
# plt.xlabel('all chromosomes')
plt.xlabel('$\gamma$, resolution parameter')
plt.yscale('log')
f_name = "domain_sizes_all.pdf"
plt.savefig(f_name)
#plt.show()
print "Saved to", f_name
#
# Main
#
binren = sys.argv[1]
dp_domains = sys.argv[2:]
step = 40000
b_domains = parseBingRenDomains(binren)
multi_dom = parseMultiResDomains(dp_domains, step)
br_cnt = {chromo: len(domains) for (chromo, domains) in b_domains.iteritems() }
br_avg_s = {chromo: sum( [ (d.end+1 - d.start) for d in b_domains[chromo]] ) * 1.0 / br_cnt[chromo] for chromo in br_cnt.keys() }
# calculate sizes
# average per chromosme, per resolution
Gamma = []
Avg = {}
Min = {}
Max = {}
Cnt = {}
#sort by gamma
doneOnce = False
for chrom, resolutions in multi_dom.iteritems():
Avg[chrom] = []
Min[chrom] = []
Max[chrom] = []
Cnt[chrom] = []
resolutions = sorted( resolutions.items(), key=lambda x: x[0] )
for g, domains in resolutions:
if not doneOnce:
Gamma.append(g)
lens = [d.end+1 - d.start for d in domains]
Avg[chrom].append( sum(lens) * 1.0 / len (domains) )
Min[chrom].append( min(lens) )
Max[chrom].append( max(lens) )
Cnt[chrom].append( len(domains) )
doneOnce = True
#print Min
# Plot
plotSizesAvgOverChromos(br_cnt, br_avg_s, Gamma, Avg, Min, Max, Cnt)
| gpl-3.0 | 1,464,951,984,376,433,000 | 27.035 | 129 | 0.579098 | false |
kynikos/lib.py.wxclasses | src/wxclasses/timectrls.py | 1 | 15455 | # wxClasses
# Copyright (C) 2013-2014 Dario Giovannetti <[email protected]>
#
# This file is part of wxClasses.
#
# wxClasses is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wxClasses is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with wxClasses. If not, see <http://www.gnu.org/licenses/>.
import time as time_
import datetime as datetime_
import wx
from choices import MultipleChoiceCtrl
from misc import NarrowSpinCtrl
class HourCtrl(object):
def __init__(self, parent):
self.panel = wx.Panel(parent)
box = wx.BoxSizer(wx.HORIZONTAL)
self.panel.SetSizer(box)
self.hourctrl = NarrowSpinCtrl(self.panel, min=0, max=23,
style=wx.SP_ARROW_KEYS | wx.SP_WRAP)
box.Add(self.hourctrl, flag=wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT)
slabel = wx.StaticText(self.panel, label=':')
box.Add(slabel, flag=wx.ALIGN_CENTER_VERTICAL)
self.minutectrl = NarrowSpinCtrl(self.panel, min=0, max=59,
style=wx.SP_ARROW_KEYS | wx.SP_WRAP)
box.Add(self.minutectrl, flag=wx.ALIGN_CENTER_VERTICAL)
def set_values(self, hour, minute):
self.hourctrl.SetValue(hour)
self.minutectrl.SetValue(minute)
def get_main_panel(self):
return self.panel
def get_hour(self):
return self.hourctrl.GetValue()
def get_minute(self):
return self.minutectrl.GetValue()
def get_relative_time(self):
hour = self.hourctrl.GetValue()
minute = self.minutectrl.GetValue()
return hour * 3600 + minute * 60
class WeekDayCtrl(object):
choices = ('Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday',
'Saturday', 'Sunday')
def __init__(self, parent):
self.panel = wx.Panel(parent)
self.dayctrl = wx.Choice(self.panel, choices=self.choices)
def set_day(self, day):
self.dayctrl.SetSelection(self.dayctrl.FindString(day))
def get_main_panel(self):
return self.panel
def get_day(self):
return self.dayctrl.GetString(self.dayctrl.GetSelection())
def get_relative_unix_time(self):
# Day 1 in Unix time was a Thursday
return {
'Thursday': 0,
'Friday': 86400,
'Saturday': 172800,
'Sunday': 259200,
'Monday': 345600,
'Tuesday': 432000,
'Wednesday': 518400,
}[self.get_day()]
@classmethod
def compute_widget_day(cls, timew):
# Any check that 0 <= number <= 6 should be done outside of here
return cls.choices[timew]
@classmethod
def compute_day_label(cls, day):
return cls.choices.index(day)
class MonthDayCtrl(object):
choices = ('1st', '2nd', '3rd', '4th', '5th', '6th', '7th', '8th', '9th',
'10th', '11th', '12th', '13th', '14th', '15th', '16th', '17th',
'18th', '19th', '20th', '21st', '22nd', '23rd', '24th', '25th',
'26th', '27th', '28th', '29th', '30th', '31st')
def __init__(self, parent):
self.panel = wx.Panel(parent)
self.dayctrl = wx.Choice(self.panel, choices=self.choices)
def set_day(self, day):
self.dayctrl.SetSelection(day - 1)
def get_main_panel(self):
return self.panel
def get_day(self):
return self.dayctrl.GetSelection() + 1
def get_relative_time(self):
return self.get_day() * 86400 - 86400
@classmethod
def compute_day_label(cls, day):
return cls.choices[day - 1]
class MonthInverseDayCtrl(MonthDayCtrl):
choices = ['last', ] + [d + ' to last' for d in ('2nd', '3rd', '4th',
'5th', '6th', '7th', '8th', '9th', '10th', '11th', '12th',
'13th', '14th', '15th', '16th', '17th', '18th', '19th', '20th',
'21st', '22nd', '23rd', '24th', '25th', '26th', '27th', '28th',
'29th', '30th', '31st')]
def get_day(self):
return self.dayctrl.GetSelection() + 1
@classmethod
def compute_day_label(cls, day):
return cls.choices[day - 1].replace(' ', '-')
class MonthWeekdayNumberCtrl(MonthDayCtrl):
choices = ('1st', '2nd', '3rd', '4th', '5th')
class MonthInverseWeekdayNumberCtrl(MonthInverseDayCtrl):
choices = ['last', ] + [d + ' to last' for d in ('2nd', '3rd', '4th',
'5th')]
class MonthWeekdayCtrl(object):
mwnctrl = MonthWeekdayNumberCtrl
def __init__(self, parent):
self.panel = wx.Panel(parent)
box = wx.BoxSizer(wx.HORIZONTAL)
self.panel.SetSizer(box)
self.numberctrl = self.mwnctrl(self.panel)
box.Add(self.numberctrl.get_main_panel(),
flag=wx.ALIGN_CENTER_VERTICAL)
self.dayctrl = WeekDayCtrl(self.panel)
box.Add(self.dayctrl.get_main_panel(), flag=wx.ALIGN_CENTER_VERTICAL |
wx.ALIGN_RIGHT | wx.LEFT, border=12)
def set_values(self, number, day):
self.numberctrl.set_day(number)
self.dayctrl.set_day(day)
def get_main_panel(self):
return self.panel
def get_weekday_number(self):
return self.numberctrl.get_day()
def get_weekday(self):
return self.dayctrl.get_day()
@classmethod
def compute_weekday_number_label(cls, number):
return cls.mwnctrl.compute_day_label(number)
@staticmethod
def compute_weekday_label(day):
return WeekDayCtrl.compute_day_label(day)
@staticmethod
def compute_widget_weekday(day):
return WeekDayCtrl.compute_widget_day(day)
class MonthInverseWeekdayCtrl(MonthWeekdayCtrl):
mwnctrl = MonthInverseWeekdayNumberCtrl
class DateHourCtrl(object):
def __init__(self, parent):
self.panel = wx.Panel(parent)
box = wx.BoxSizer(wx.HORIZONTAL)
self.panel.SetSizer(box)
# DatePickerCtrl doesn't release TAB (Outspline bug #332)
self.datectrl = wx.DatePickerCtrl(self.panel)
box.Add(self.datectrl, flag=wx.ALIGN_CENTER_VERTICAL)
self.hourctrl = HourCtrl(self.panel)
box.Add(self.hourctrl.get_main_panel(), flag=wx.ALIGN_CENTER_VERTICAL |
wx.ALIGN_RIGHT | wx.LEFT, border=12)
def set_values(self, year, month, day, hour, minute):
sdate = wx.DateTime()
sdate.Set(year=year, month=month, day=day)
self.datectrl.SetValue(sdate)
self.hourctrl.set_values(hour, minute)
def get_main_panel(self):
return self.panel
def get_unix_time(self):
date = self.datectrl.GetValue()
hour = self.hourctrl.get_hour()
minute = self.hourctrl.get_minute()
fdate = datetime_.datetime(date.GetYear(), date.GetMonth() + 1,
date.GetDay(), hour, minute)
# Don't do this because it behaves incorrectly if the date is a day
# in which the DST starts or ends
#date = self.datectrl.GetValue().GetTicks()
#return date + hour * 3600 + minute * 60
return int(time_.mktime(fdate.timetuple()))
def get_year(self):
return self.datectrl.GetValue().GetYear()
def get_month(self):
return self.datectrl.GetValue().GetMonth()
def get_day(self):
return self.datectrl.GetValue().GetDay()
def get_hour(self):
return self.hourctrl.get_hour()
def get_minute(self):
return self.hourctrl.get_minute()
@staticmethod
def compute_month_label(month):
# Hardcode the names since only English is supported for the moment
# anyway
return ('January', 'February', 'March', 'April', 'May', 'June', 'July',
'August', 'September', 'October', 'November', 'December')[
month - 1]
class WeekDayHourCtrl(object):
def __init__(self, parent):
self.panel = wx.Panel(parent)
box = wx.BoxSizer(wx.HORIZONTAL)
self.panel.SetSizer(box)
self.dayctrl = WeekDayCtrl(self.panel)
box.Add(self.dayctrl.get_main_panel(), flag=wx.ALIGN_CENTER_VERTICAL)
self.hourctrl = HourCtrl(self.panel)
box.Add(self.hourctrl.get_main_panel(), flag=wx.ALIGN_CENTER_VERTICAL |
wx.ALIGN_RIGHT | wx.LEFT, border=12)
def set_values(self, day, hour, minute):
self.dayctrl.set_day(day)
self.hourctrl.set_values(hour, minute)
def get_main_panel(self):
return self.panel
def get_day(self):
return self.dayctrl.get_day()
def get_hour(self):
return self.hourctrl.get_hour()
def get_minute(self):
return self.hourctrl.get_minute()
def get_relative_time(self):
return self.hourctrl.get_relative_time()
def get_relative_unix_week_time(self):
rday = self.dayctrl.get_relative_unix_time()
rhour = self.hourctrl.get_relative_time()
return rday + rhour
@staticmethod
def compute_widget_day(timew):
return WeekDayCtrl.compute_widget_day(timew)
class MonthDayHourCtrl(object):
# Defining mdctrl here lets derive other classes from this one more easily
mdctrl = MonthDayCtrl
def __init__(self, parent):
self.panel = wx.Panel(parent)
box = wx.BoxSizer(wx.HORIZONTAL)
self.panel.SetSizer(box)
self.dayctrl = self.mdctrl(self.panel)
box.Add(self.dayctrl.get_main_panel(), flag=wx.ALIGN_CENTER_VERTICAL)
self.hourctrl = HourCtrl(self.panel)
box.Add(self.hourctrl.get_main_panel(), flag=wx.ALIGN_CENTER_VERTICAL |
wx.ALIGN_RIGHT | wx.LEFT, border=12)
def set_values(self, day, hour, minute):
self.dayctrl.set_day(day)
self.hourctrl.set_values(hour, minute)
def get_main_panel(self):
return self.panel
def get_day(self):
return self.dayctrl.get_day()
def get_hour(self):
return self.hourctrl.get_hour()
def get_minute(self):
return self.hourctrl.get_minute()
def get_relative_month_time(self):
rday = self.dayctrl.get_relative_time()
rhour = self.hourctrl.get_relative_time()
return rday + rhour
def get_relative_time(self):
return self.hourctrl.get_relative_time()
@classmethod
def compute_day_label(cls, day):
return cls.mdctrl.compute_day_label(day)
class MonthInverseDayHourCtrl(MonthDayHourCtrl):
mdctrl = MonthInverseDayCtrl
def get_relative_month_time(self):
rday = self.dayctrl.get_relative_time()
rhour = self.hourctrl.get_relative_time()
return rday + 86400 - rhour
class MonthWeekdayHourCtrl(MonthDayHourCtrl):
mdctrl = MonthWeekdayCtrl
def set_values(self, number, weekday, hour, minute):
self.dayctrl.set_values(number, weekday)
self.hourctrl.set_values(hour, minute)
def get_relative_time(self):
return self.hourctrl.get_relative_time()
def get_weekday_number(self):
return self.dayctrl.get_weekday_number()
def get_weekday(self):
return self.dayctrl.get_weekday()
@classmethod
def compute_weekday_number_label(cls, number):
return cls.mdctrl.compute_weekday_number_label(number)
@classmethod
def compute_weekday_label(cls, day):
return cls.mdctrl.compute_weekday_label(day)
@classmethod
def compute_widget_weekday(cls, day):
return cls.mdctrl.compute_widget_weekday(day)
class MonthInverseWeekdayHourCtrl(MonthWeekdayHourCtrl):
mdctrl = MonthInverseWeekdayCtrl
class TimeSpanCtrl(object):
def __init__(self, parent, min_number, max_number):
self.panel = wx.Panel(parent)
box = wx.BoxSizer(wx.HORIZONTAL)
self.panel.SetSizer(box)
self.numberctrl = NarrowSpinCtrl(self.panel, min=min_number,
max=max_number, style=wx.SP_ARROW_KEYS)
box.Add(self.numberctrl, flag=wx.ALIGN_CENTER_VERTICAL)
self.unitctrl = wx.Choice(self.panel,
choices=('minutes', 'hours', 'days', 'weeks'))
box.Add(self.unitctrl, flag=wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT |
wx.LEFT, border=12)
def set_values(self, number, unit):
self.numberctrl.SetValue(number)
self.unitctrl.SetSelection(self.unitctrl.FindString(unit))
def get_main_panel(self):
return self.panel
def get_time_span(self):
number = self.numberctrl.GetValue()
unit = self.unitctrl.GetString(self.unitctrl.GetSelection())
return self._compute_relative_time(number, unit)
def get_number(self):
return self.numberctrl.GetValue()
def get_unit(self):
return self.unitctrl.GetString(self.unitctrl.GetSelection())
@staticmethod
def _compute_relative_time(number, unit):
mult = {'minutes': 60,
'hours': 3600,
'days': 86400,
'weeks': 604800}
return number * mult[unit]
@staticmethod
def compute_widget_values(diff):
if diff == 0:
return (0, 'minutes')
else:
adiff = abs(diff)
# Same result as `1 if diff > 0 else -1`
neg = diff // adiff
for (number, unit) in ((604800, 'weeks'),
(86400, 'days'),
(3600, 'hours'),
(60, 'minutes')):
if adiff % number == 0:
return (adiff // number * neg, unit)
else:
return (adiff // 60 * neg, 'minutes')
class WeekdaysCtrl(MultipleChoiceCtrl):
# Hardcode the names since only English is supported for the moment anyway
dnames = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
def __init__(self, parent):
MultipleChoiceCtrl.__init__(self, parent, self.dnames)
def set_days(self, days):
return self.set_values(days)
def get_days(self):
return self.get_values()
@classmethod
def compute_day_name(cls, day):
return cls.dnames[day - 1]
class MonthsCtrl(MultipleChoiceCtrl):
# Hardcode the names since only English is supported for the moment anyway
mnames = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec')
def __init__(self, parent):
MultipleChoiceCtrl.__init__(self, parent, self.mnames)
def set_months(self, months):
return self.set_values(months)
def get_months(self):
return self.get_values()
@classmethod
def compute_month_name(cls, month):
return cls.mnames[month - 1]
| gpl-3.0 | 2,253,997,558,680,086,000 | 30.285425 | 79 | 0.5945 | false |
mamiaokui/tracer | tools/AnalysisTools/user_trace_analysis/EventHandlers.py | 1 | 9243 | import Event
from Global import Global
import copy
# Common utilities
def MarkCurrentWorkingUnitEnded(thread_id):
threads = Global.ActiveThreads()
if thread_id in threads:
del threads[thread_id]
def AddConnector(event, connector_key_func, key):
if key is None:
return
ctors = Global.Connectors()
ctor_key = connector_key_func(event)
ctors[ctor_key] = key
def FindConnectorAndUpdate(event, connector_key_func):
ctors = Global.Connectors()
threads = Global.ActiveThreads()
# find the connector
ctor_key = connector_key_func(event)
if ctor_key in ctors:
# connects previous event to the current event
key = Global.AddNode(event)
Global.AddEdge(ctors[ctor_key], key)
del ctors[ctor_key]
# decouple the previous event with the current event in the same thread
threads[event.pid] = key
# Default and Null handlers
def DefaultHandlerInternal(event):
"""Use temporal correlation to link events within a thread."""
threads = Global.ActiveThreads()
tid = event.pid
#key = None
key = Global.AddNode(event)
if tid in threads:
Global.AddEdge(threads[tid], key)
threads[tid] = key
return key
def DefaultHandler(event):
DefaultHandlerInternal(event)
def NullHandler(event):
pass
# Binder event handlers
binder_key_func = lambda event: 'binder:%d' % event.data['trans']
def BinderProduceHandlerInternal(event):
key = DefaultHandlerInternal(event)
# insert a connector
AddConnector(event, binder_key_func, key)
def BinderOneWayProduceHandler(event):
if Global.IsUserInSystemUI():
return
BinderProduceHandlerInternal(event)
def BinderTwoWayProduceHandler(event):
if Global.IsUserInSystemUI():
return
BinderProduceHandlerInternal(event)
MarkCurrentWorkingUnitEnded(event.pid)
def BinderReplyHandler(event):
if Global.IsUserInSystemUI():
return
BinderProduceHandlerInternal(event)
MarkCurrentWorkingUnitEnded(event.pid)
def BinderConsumeHandler(event):
if Global.IsUserInSystemUI():
return
ctors = Global.Connectors()
threads = Global.ActiveThreads()
# find a connector
ctor_key = binder_key_func(event)
if ctor_key in ctors:
key = Global.AddNode(event)
Global.AddEdge(ctors[ctor_key], key)
del ctors[ctor_key]
threads[event.pid] = key
# Native poll event handlers
def PollNativeHandler(event):
if Global.IsUserInSystemUI():
return
MarkCurrentWorkingUnitEnded(event.pid)
def PollDoneHandler(event):
if Global.IsUserInSystemUI():
return
MarkCurrentWorkingUnitEnded(event.pid)
# Async task event handlers
async_task_key_func = lambda event: 'async:%d' % event.data['runnable']
def AsyncTaskSubmitHandler(event):
if Global.IsUserInSystemUI():
return
key = DefaultHandlerInternal(event)
# insert a connector
AddConnector(event, async_task_key_func, key)
def AsyncTaskConsumeHandler(event):
if Global.IsUserInSystemUI():
return
FindConnectorAndUpdate(event, async_task_key_func)
# Message queue event handlers
msg_key_func = lambda event: 'thread:%d,message:%d' % (event.data['queue_id'], event.data['message_id'])
def EnqueueMessageHandler(event):
if Global.IsUserInSystemUI():
return
key = DefaultHandlerInternal(event)
# insert a connector
AddConnector(event, msg_key_func, key)
def DequeueMessageHandler(event):
if Global.IsUserInSystemUI():
return
FindConnectorAndUpdate(event, msg_key_func)
# Fork event handler
def ForkHandler(event):
""" Create a 'FORK_IN_CHILD' event in the new thread."""
threads = Global.ActiveThreads()
# Add a node in the parent thread
key = DefaultHandlerInternal(event)
# Add a node in the child thread
if key is not None:
new_event = copy.deepcopy(event)
new_event.event = 'FORK_IN_CHILD'
new_event.pid = event.data['pid']
new_event.json['event'] = new_event.event
new_event.json['pid'] = new_event.pid
new_key = Global.AddNode(new_event)
threads[new_event.pid] = new_key
Global.AddEdge(key, new_key)
Global.NotifyFork(event.data['pid'], event.pid, event.data['tgid'])
Global.NotifyForkWebView(event.data['pid'], event.pid, event.data['tgid'])
# Core config change event handler
def CoreConfigChangeHandler(event):
Global.NotifyCoreConfigChange(event.data['old'], event.data['new'])
# DVFS config change event handler
def DVFSConfigChangeHandler(event):
Global.NotifyDvfsConfigChange(event.data['old'], event.data['new'])
# UI Related event handler
def UIInputHandler(event):
if Global.IsUserInSystemUI():
return
threads = Global.ActiveThreads()
event.thread_name = Global.ThreadName(event.pid)
key = Global.AddNode(event)
threads[event.pid] = key
ui_invalidate_key_func = lambda event: 'invalidate:%d' % event.pid
def UIInvalidateHandler(event):
if Global.IsUserInSystemUI():
return
key = DefaultHandlerInternal(event)
# insert a connector
AddConnector(event, ui_invalidate_key_func, key)
def UIUpdateHandler(event):
if Global.IsUserInSystemUI():
return
FindConnectorAndUpdate(event, ui_invalidate_key_func)
def UIInvalidateV2Handler(event):
if Global.IsUserInSystemUI():
return
key = DefaultHandlerInternal(event)
# insert a connector
if key is None:
return
ctors = Global.Connectors()
ctor_key = ui_invalidate_key_func(event)
if ctor_key in ctors:
ctors[ctor_key].append(key)
else:
ctors[ctor_key] = [ key, ]
def UIUpdateV2Handler(event):
if Global.IsUserInSystemUI():
return
ctors = Global.Connectors()
threads = Global.ActiveThreads()
event.thread_name = Global.ThreadName(event.pid)
# find the connector
ctor_key = ui_invalidate_key_func(event)
if ctor_key in ctors:
# connects all previous events to the current event
key = Global.AddNode(event)
for n in ctors[ctor_key]:
Global.AddEdge(n, key)
del ctors[ctor_key]
# decouple the previous event with the current event in the same thread
threads[event.pid] = key
# Enter/Exit foreground event handlers
def EnterForegroundHandler(event):
Global.SaveThreadNameMap()
Global.SaveWebViewThreadMap()
if Global.NotifyThreadEnterForeground(event.pid):
Global.ScheduleWriteGraph(synchronous = False)
if Global.ThreadName(event.pid) == 'ndroid.launcher':
Global.NotifyUserInSystemUI(True)
else:
Global.NotifyUserInSystemUI(False)
def ExitForegroundHandler(event):
pass
# Thread name event handler
def ThreadNameHandler(event):
# TODO: temporary hack to guess reboot event
if event.data['pid'] == 7:
Global.ScheduleWriteGraph(synchronous = False)
Global.ResetAll()
Global.AddThreadName(event.data['pid'], event.data['name'])
# Futex related handler specific for WebView applications
def FutexWaitHandler(event):
if Global.IsUserInSystemUI():
return
if Global.isWebViewThread(event.pid):
MarkCurrentWorkingUnitEnded(event.pid)
else:
DefaultHandler(event)
def futex_notify_key_func(event):
pid = event.pid if 'pid' not in event.data else event.data['pid']
return 'futex:%d,thread:%d' % (event.data['lock'], pid)
def FutexWakeHandler(event):
if Global.IsUserInSystemUI():
return
if Global.isWebViewThread(event.pid):
FindConnectorAndUpdate(event, futex_notify_key_func)
else:
DefaultHandler(event)
def FutexNotifyHandler(event):
if Global.IsUserInSystemUI():
return
if Global.isWebViewThread(event.data['pid']):
key = DefaultHandlerInternal(event)
AddConnector(event, futex_notify_key_func, key)
# Waitqueue event related handler for WebView applications
def WaitQueueWaitHandler(event):
if Global.IsUserInSystemUI():
return
if Global.isWebViewThread(event.pid):
MarkCurrentWorkingUnitEnded(event.pid)
else:
DefaultHandler(event)
def wait_queue_notify_key_func(event):
pid = event.pid if 'pid' not in event.data else event.data['pid']
return 'waitqueue:%d,thread:%d' % (event.data['lock'], pid)
def WaitQueueWakeHandler(event):
if Global.IsUserInSystemUI():
return
if Global.isWebViewThread(event.pid):
FindConnectorAndUpdate(event, wait_queue_notify_key_func)
else:
DefaultHandler(event)
def WaitQueueNotifyHandler(event):
if Global.IsUserInSystemUI():
return
if Global.isWebViewThread(event.data['pid']):
key = DefaultHandlerInternal(event)
AddConnector(event, wait_queue_notify_key_func, key)
# insert a FAKED_WAITQUEUE_WAKE event in the notified thread
thread_state = Global.ThreadState(event.data['pid'])
if (key is not None) and (thread_state == 'I' or thread_state == 'U'):
new_event = copy.deepcopy(event)
new_event.event = 'FAKED_WAITQUEUE_WAKE'
new_event.pid = event.data['pid']
del new_event.data['pid']
new_event.json['event'] = new_event.event
new_event.json['pid'] = new_event.pid
new_key = Global.AddNode(new_event)
threads = Global.ActiveThreads()
threads[new_event.pid] = new_key
Global.AddEdge(key, new_key)
# Context switch event handler
def ContextSwitchHandler(event):
if Global.IsUserInSystemUI():
return
if Global.isWebViewThread(event.data['old']) and Global.ThreadState(event.data['old']) != 'R':
MarkCurrentWorkingUnitEnded(event.data['old'])
else:
DefaultHandler(event)
| gpl-2.0 | -264,602,368,059,446,880 | 26.673653 | 104 | 0.726063 | false |
confeitaria/inelegant | inelegant/test/__init__.py | 1 | 1250 | #!/usr/bin/env python
#
# Copyright 2015, 2016 Adam Victor Brandizzi
#
# This file is part of Inelegant.
#
# Inelegant is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Inelegant is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Inelegant. If not, see <http://www.gnu.org/licenses/>.
import unittest
import os
import os.path
import inelegant
from inelegant.finder import TestFinder
readme_path = os.path.join(
os.path.abspath(os.path.dirname(inelegant.__file__)),
os.pardir,
'readme.rst'
)
load_tests = TestFinder(
readme_path,
'inelegant.test.dict',
'inelegant.test.finder',
'inelegant.test.fs',
'inelegant.test.io',
'inelegant.test.module',
'inelegant.test.net',
'inelegant.test.process',
'inelegant.test.toggle'
).load_tests
| lgpl-3.0 | -7,457,735,556,302,458,000 | 27.409091 | 77 | 0.7296 | false |
opendatateam/udata | setup.py | 1 | 3296 | #!/usr/bin/env python
import io
import os
import re
from setuptools import setup, find_packages
RE_REQUIREMENT = re.compile(r'^\s*-r\s*(?P<filename>.*)$')
RE_BADGE = re.compile(r'^\[\!\[(?P<text>[^\]]+)\]\[(?P<badge>[^\]]+)\]\]\[(?P<target>[^\]]+)\]$', re.M)
BADGES_TO_KEEP = ['gitter-badge', 'readthedocs-badge']
def md(filename):
'''
Load .md (markdown) file and sanitize it for PyPI.
Remove unsupported github tags:
- code-block directive
- travis ci build badges
'''
content = io.open(filename).read()
for match in RE_BADGE.finditer(content):
if match.group('badge') not in BADGES_TO_KEEP:
content = content.replace(match.group(0), '')
return content
long_description = '\n'.join((
md('README.md'),
md('CHANGELOG.md'),
''
))
def pip(filename):
"""Parse pip reqs file and transform it to setuptools requirements."""
requirements = []
for line in open(os.path.join('requirements', filename)):
line = line.strip()
if not line or '://' in line or line.startswith('#'):
continue
match = RE_REQUIREMENT.match(line)
if match:
requirements.extend(pip(match.group('filename')))
else:
requirements.append(line)
return requirements
install_requires = pip('install.pip')
tests_require = pip('test.pip')
setup(
name='udata',
version=__import__('udata').__version__,
description=__import__('udata').__description__,
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/opendatateam/udata',
author='Opendata Team',
author_email='[email protected]',
packages=find_packages(),
include_package_data=True,
python_requires='>=3.7',
install_requires=install_requires,
setup_requires=['setuptools>=38.6.0'],
tests_require=tests_require,
extras_require={
'test': tests_require,
'sentry': ['sentry-sdk[flask] >= 1.1.0'],
},
entry_points={
'console_scripts': [
'udata = udata.commands:cli',
],
'udata.harvesters': [
'dcat = udata.harvest.backends.dcat:DcatBackend',
],
'udata.avatars': [
'internal = udata.features.identicon.backends:internal',
'adorable = udata.features.identicon.backends:adorable',
'robohash = udata.features.identicon.backends:robohash',
],
'pytest11': [
'udata = udata.tests.plugin',
],
},
license='GNU AGPLv3+',
keywords='udata opendata portal data',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Environment :: Web Environment',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Topic :: System :: Software Distribution',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Libraries :: Python Modules',
('License :: OSI Approved :: GNU Affero General Public License v3'
' or later (AGPLv3+)'),
],
)
| agpl-3.0 | 8,688,118,323,633,385,000 | 29.803738 | 103 | 0.594964 | false |
schutte/submit | lib/submit/deliverers/sendmail.py | 1 | 2776 | # -*- coding: utf-8 -*-
#
# This file is part of submit, a sendmail replacement or supplement for
# multi-user desktop systems.
#
# Copyright © 2008 Michael Schutte <[email protected]>
#
# submit is available under the terms of the MIT/X license. Please see the
# file COPYING for details.
from submit.deliverers import *
from submit.errors import *
from submit.i18n import *
import os
import shlex
import subprocess
__all__ = ["SendmailDeliverer"]
class SendmailDeliverer(AbstractDeliverer):
"""A deliverer submitting messages using a sendmail-compatible program."""
def needs_authentication(self):
"""Sendmail-based delivery methods never ask for authentication."""
return False
def authenticate(self, auth):
"""No authentication needed; do nothing."""
def abort(self):
"""Abort after a failed authentication procedure. Authentication will
never fail; still, do nothing."""
def deliver(self, message, rcpts):
"""Pipe the message through sendmail."""
program = self.config.get_method(str, self.method, "program",
default_sendmail)
if not program:
raise DeliveryFailedError(n_(
"Unable to find sendmail program."))
sendmail = shlex.split(program)
args = self.config.get_method(str, self.method, "arguments",
"-oem -oi")
if args: args = shlex.split(args)
else: args = []
cmd = sendmail + args + ["-f", message.efrom] + rcpts
proc = subprocess.Popen(cmd,
stdin = subprocess.PIPE, stderr = subprocess.PIPE)
proc.stdin.write(message.get_body())
proc.stdin.close()
if proc.wait() != 0:
details = proc.stderr.read().strip()
if details:
raise DeliveryFailedError(n_('"%(program)s" failed: %(details)s.'),
program=program, details=details)
else:
raise DeliveryFailedError(n_('"%(program)s" failed with unknown error.'),
program=program)
Deliverer = SendmailDeliverer
def default_sendmail():
"""Determine the path to the MTA sendmail implementation. Take into
account that `submit` itself might be called `sendmail`; in this case,
`sendmail.notsubmit` is what we are looking for."""
dirs = ("/usr/sbin", "/usr/lib")
files = ("sendmail.notsubmit", "sendmail")
for dir in dirs:
for filename in files:
filename = os.path.realpath(os.path.join(dir, filename))
if os.path.basename(filename) == "submit":
continue # avoid loops
elif os.access(filename, os.X_OK):
return filename
return None
# vim:tw=78:fo-=t:sw=4:sts=4:et:
| mit | 6,753,451,764,058,531,000 | 34.126582 | 89 | 0.615495 | false |
kba/ocropy | ocrolib/common.py | 1 | 37334 | # -*- coding: utf-8 -*-
################################################################
### common functions for data structures, file name manipulation, etc.
################################################################
from __future__ import print_function
import os
import os.path
import re
import sys
import sysconfig
import unicodedata
import warnings
import inspect
import glob
import cPickle
import numpy
from numpy import *
import pylab
from pylab import imshow
from scipy.ndimage import morphology,measurements
import PIL
from default import getlocal
from toplevel import *
import chars
import codecs
import ligatures
import lstm
import morph
import multiprocessing
################################################################
### exceptions
################################################################
class OcropusException(Exception):
trace = 1
def __init__(self,*args,**kw):
Exception.__init__(self,*args,**kw)
class Unimplemented(OcropusException):
trace = 1
"Exception raised when a feature is unimplemented."
def __init__(self,s):
Exception.__init__(self,inspect.stack()[1][3])
class Internal(OcropusException):
trace = 1
"Exception raised when a feature is unimplemented."
def __init__(self,s):
Exception.__init__(self,inspect.stack()[1][3])
class RecognitionError(OcropusException):
trace = 1
"Some kind of error during recognition."
def __init__(self,explanation,**kw):
self.context = kw
s = [explanation]
s += ["%s=%s"%(k,summary(kw[k])) for k in kw]
message = " ".join(s)
Exception.__init__(self,message)
class Warning(OcropusException):
trace = 0
def __init__(self,*args,**kw):
OcropusException.__init__(self,*args,**kw)
class BadClassLabel(OcropusException):
trace = 0
"Exception for bad class labels in a dataset or input."
def __init__(self,s):
Exception.__init__(self,s)
class BadImage(OcropusException):
trace = 0
def __init__(self,*args,**kw):
OcropusException.__init__(self,*args)
class BadInput(OcropusException):
trace = 0
def __init__(self,*args,**kw):
OcropusException.__init__(self,*args,**kw)
class FileNotFound(OcropusException):
trace = 0
"""Some file-not-found error during OCRopus processing."""
def __init__(self,fname):
self.fname = fname
def __str__(self):
return "file not found %s"%(self.fname,)
pickle_mode = 2
def deprecated(f):
def _wrap(f):
warned = 0
def _wrapper(*args,**kw):
if not warned:
print(f, "has been DEPRECATED")
warned = 1
return f(*args,**kw)
return _wrap
################################################################
# text normalization
################################################################
def normalize_text(s):
"""Apply standard Unicode normalizations for OCR.
This eliminates common ambiguities and weird unicode
characters."""
s = unicode(s)
s = unicodedata.normalize('NFC',s)
s = re.sub(ur'\s+(?u)',' ',s)
s = re.sub(ur'\n(?u)','',s)
s = re.sub(ur'^\s+(?u)','',s)
s = re.sub(ur'\s+$(?u)','',s)
for m,r in chars.replacements:
s = re.sub(unicode(m),unicode(r),s)
return s
def project_text(s,kind="exact"):
"""Project text onto a smaller subset of characters
for comparison."""
s = normalize_text(s)
s = re.sub(ur'( *[.] *){4,}',u'....',s) # dot rows
s = re.sub(ur'[~_]',u'',s) # dot rows
if kind=="exact":
return s
if kind=="nospace":
return re.sub(ur'\s','',s)
if kind=="spletdig":
return re.sub(ur'[^A-Za-z0-9 ]','',s)
if kind=="letdig":
return re.sub(ur'[^A-Za-z0-9]','',s)
if kind=="letters":
return re.sub(ur'[^A-Za-z]','',s)
if kind=="digits":
return re.sub(ur'[^0-9]','',s)
if kind=="lnc":
s = s.upper()
return re.sub(ur'[^A-Z]','',s)
raise BadInput("unknown normalization: "+kind)
################################################################
### Text I/O
################################################################
def read_text(fname,nonl=1,normalize=1):
"""Read text. This assumes files are in unicode.
By default, it removes newlines and normalizes the
text for OCR processing with `normalize_text`"""
with codecs.open(fname,"r","utf-8") as stream:
result = stream.read()
if nonl and len(result)>0 and result[-1]=='\n':
result = result[:-1]
if normalize:
result = normalize_text(result)
return result
def write_text(fname,text,nonl=0,normalize=1):
"""Write text. This assumes files are in unicode.
By default, it removes newlines and normalizes the
text for OCR processing with `normalize_text`"""
if normalize:
text = normalize_text(text)
with codecs.open(fname,"w","utf-8") as stream:
stream.write(text)
if not nonl and text[-1]!='\n':
stream.write('\n')
################################################################
### Image I/O
################################################################
def pil2array(im,alpha=0):
if im.mode=="L":
a = numpy.fromstring(im.tobytes(),'B')
a.shape = im.size[1],im.size[0]
return a
if im.mode=="RGB":
a = numpy.fromstring(im.tobytes(),'B')
a.shape = im.size[1],im.size[0],3
return a
if im.mode=="RGBA":
a = numpy.fromstring(im.tobytes(),'B')
a.shape = im.size[1],im.size[0],4
if not alpha: a = a[:,:,:3]
return a
return pil2array(im.convert("L"))
def array2pil(a):
if a.dtype==dtype("B"):
if a.ndim==2:
return PIL.Image.frombytes("L",(a.shape[1],a.shape[0]),a.tostring())
elif a.ndim==3:
return PIL.Image.frombytes("RGB",(a.shape[1],a.shape[0]),a.tostring())
else:
raise OcropusException("bad image rank")
elif a.dtype==dtype('float32'):
return PIL.Image.fromstring("F",(a.shape[1],a.shape[0]),a.tostring())
else:
raise OcropusException("unknown image type")
def isbytearray(a):
return a.dtype in [dtype('uint8')]
def isfloatarray(a):
return a.dtype in [dtype('f'),dtype('float32'),dtype('float64')]
def isintarray(a):
return a.dtype in [dtype('B'),dtype('int16'),dtype('int32'),dtype('int64'),dtype('uint16'),dtype('uint32'),dtype('uint64')]
def isintegerarray(a):
return a.dtype in [dtype('int32'),dtype('int64'),dtype('uint32'),dtype('uint64')]
@checks(str,pageno=int,_=GRAYSCALE)
def read_image_gray(fname,pageno=0):
"""Read an image and returns it as a floating point array.
The optional page number allows images from files containing multiple
images to be addressed. Byte and short arrays are rescaled to
the range 0...1 (unsigned) or -1...1 (signed)."""
if type(fname)==tuple: fname,pageno = fname
assert pageno==0
pil = PIL.Image.open(fname)
a = pil2array(pil)
if a.dtype==dtype('uint8'):
a = a/255.0
if a.dtype==dtype('int8'):
a = a/127.0
elif a.dtype==dtype('uint16'):
a = a/65536.0
elif a.dtype==dtype('int16'):
a = a/32767.0
elif isfloatarray(a):
pass
else:
raise OcropusException("unknown image type: "+a.dtype)
if a.ndim==3:
a = mean(a,2)
return a
def write_image_gray(fname,image,normalize=0,verbose=0):
"""Write an image to disk. If the image is of floating point
type, its values are clipped to the range [0,1],
multiplied by 255 and converted to unsigned bytes. Otherwise,
the image must be of type unsigned byte."""
if verbose: print("# writing", fname)
if isfloatarray(image):
image = array(255*clip(image,0.0,1.0),'B')
assert image.dtype==dtype('B'),"array has wrong dtype: %s"%image.dtype
im = array2pil(image)
im.save(fname)
@checks(str,_=ABINARY2)
def read_image_binary(fname,dtype='i',pageno=0):
"""Read an image from disk and return it as a binary image
of the given dtype."""
if type(fname)==tuple: fname,pageno = fname
assert pageno==0
pil = PIL.Image.open(fname)
a = pil2array(pil)
if a.ndim==3: a = amax(a,axis=2)
return array(a>0.5*(amin(a)+amax(a)),dtype)
@checks(str,ABINARY2)
def write_image_binary(fname,image,verbose=0):
"""Write a binary image to disk. This verifies first that the given image
is, in fact, binary. The image may be of any type, but must consist of only
two values."""
if verbose: print("# writing", fname)
assert image.ndim==2
image = array(255*(image>midrange(image)),'B')
im = array2pil(image)
im.save(fname)
@checks(AINT3,_=AINT2)
def rgb2int(a):
"""Converts a rank 3 array with RGB values stored in the
last axis into a rank 2 array containing 32 bit RGB values."""
assert a.ndim==3
assert a.dtype==dtype('B')
return array(0xffffff&((0x10000*a[:,:,0])|(0x100*a[:,:,1])|a[:,:,2]),'i')
@checks(AINT2,_=AINT3)
def int2rgb(image):
"""Converts a rank 3 array with RGB values stored in the
last axis into a rank 2 array containing 32 bit RGB values."""
assert image.ndim==2
assert isintarray(image)
a = zeros(list(image.shape)+[3],'B')
a[:,:,0] = (image>>16)
a[:,:,1] = (image>>8)
a[:,:,2] = image
return a
@checks(LIGHTSEG,_=DARKSEG)
def make_seg_black(image):
assert isintegerarray(image),"%s: wrong type for segmentation"%image.dtype
image = image.copy()
image[image==0xffffff] = 0
return image
@checks(DARKSEG,_=LIGHTSEG)
def make_seg_white(image):
assert isintegerarray(image),"%s: wrong type for segmentation"%image.dtype
image = image.copy()
image[image==0] = 0xffffff
return image
@checks(str,_=LINESEG)
def read_line_segmentation(fname):
"""Reads a line segmentation, that is an RGB image whose values
encode the segmentation of a text line. Returns an int array."""
pil = PIL.Image.open(fname)
a = pil2array(pil)
assert a.dtype==dtype('B')
assert a.ndim==3
image = rgb2int(a)
result = make_seg_black(image)
return result
@checks(str,LINESEG)
def write_line_segmentation(fname,image):
"""Writes a line segmentation, that is an RGB image whose values
encode the segmentation of a text line."""
a = int2rgb(make_seg_white(image))
im = array2pil(a)
im.save(fname)
@checks(str,_=PAGESEG)
def read_page_segmentation(fname):
"""Reads a page segmentation, that is an RGB image whose values
encode the segmentation of a page. Returns an int array."""
pil = PIL.Image.open(fname)
a = pil2array(pil)
assert a.dtype==dtype('B')
assert a.ndim==3
segmentation = rgb2int(a)
segmentation = make_seg_black(segmentation)
return segmentation
@checks(str,PAGESEG)
def write_page_segmentation(fname,image):
"""Writes a page segmentation, that is an RGB image whose values
encode the segmentation of a page."""
assert image.ndim==2
assert image.dtype in [dtype('int32'),dtype('int64')]
a = int2rgb(make_seg_white(image))
im = array2pil(a)
im.save(fname)
def iulib_page_iterator(files):
for fname in files:
image = read_image_gray(fname)
yield image,fname
def norm_max(a):
return a/amax(a)
def pad_by(image,r,dtype=None):
"""Symmetrically pad the image by the given amount.
FIXME: replace by scipy version."""
if dtype is None: dtype = image.dtype
w,h = image.shape
result = zeros((w+2*r,h+2*r))
result[r:(w+r),r:(h+r)] = image
return result
class RegionExtractor:
"""A class facilitating iterating over the parts of a segmentation."""
def __init__(self):
self.cache = {}
def clear(self):
del self.cache
self.cache = {}
def setImage(self,image):
return self.setImageMasked(image)
def setImageMasked(self,image,mask=None,lo=None,hi=None):
"""Set the image to be iterated over. This should be an RGB image,
ndim==3, dtype=='B'. This picks a subset of the segmentation to iterate
over, using a mask and lo and hi values.."""
assert image.dtype==dtype('B') or image.dtype==dtype('i'),"image must be type B or i"
if image.ndim==3: image = rgb2int(image)
assert image.ndim==2,"wrong number of dimensions"
self.image = image
labels = image
if lo is not None: labels[labels<lo] = 0
if hi is not None: labels[labels>hi] = 0
if mask is not None: labels = bitwise_and(labels,mask)
labels,correspondence = morph.renumber_labels_ordered(labels,correspondence=1)
self.labels = labels
self.correspondence = correspondence
self.objects = [None]+morph.find_objects(labels)
def setPageColumns(self,image):
"""Set the image to be iterated over. This should be an RGB image,
ndim==3, dtype=='B'. This iterates over the columns."""
self.setImageMasked(image,0xff0000,hi=0x800000)
def setPageParagraphs(self,image):
"""Set the image to be iterated over. This should be an RGB image,
ndim==3, dtype=='B'. This iterates over the paragraphs (if present
in the segmentation)."""
self.setImageMasked(image,0xffff00,hi=0x800000)
def setPageLines(self,image):
"""Set the image to be iterated over. This should be an RGB image,
ndim==3, dtype=='B'. This iterates over the lines."""
self.setImageMasked(image,0xffffff,hi=0x800000)
def id(self,i):
"""Return the RGB pixel value for this segment."""
return self.correspondence[i]
def x0(self,i):
"""Return x0 (column) for the start of the box."""
return self.bbox(i)[1]
def x1(self,i):
"""Return x0 (column) for the end of the box."""
return self.bbox(i)[3]
def y0(self,i):
"""Return y0 (row) for the start of the box."""
h = self.image.shape[0]
return h-self.bbox(i)[2]-1
def y1(self,i):
"""Return y0 (row) for the end of the box."""
h = self.image.shape[0]
return h-self.bbox(i)[0]-1
def bbox(self,i):
"""Return the bounding box in raster coordinates
(row0,col0,row1,col1)."""
r = self.objects[i]
# print("@@@bbox", i, r)
return (r[0].start,r[1].start,r[0].stop,r[1].stop)
def bboxMath(self,i):
"""Return the bounding box in math coordinates
(row0,col0,row1,col1)."""
h = self.image.shape[0]
(y0,x0,y1,x1) = self.bbox(i)
return (h-y1-1,x0,h-y0-1,x1)
def length(self):
"""Return the number of components."""
return len(self.objects)
def mask(self,index,margin=0):
"""Return the mask for component index."""
b = self.objects[index]
# print("@@@mask", index, b)
m = self.labels[b]
m[m!=index] = 0
if margin>0: m = pad_by(m,margin)
return array(m!=0,'B')
def extract(self,image,index,margin=0):
"""Return the subimage for component index."""
h,w = image.shape[:2]
(r0,c0,r1,c1) = self.bbox(index)
# mask = self.mask(index,margin=margin)
return image[max(0,r0-margin):min(h,r1+margin),max(0,c0-margin):min(w,c1+margin),...]
def extractMasked(self,image,index,grow=0,bg=None,margin=0,dtype=None):
"""Return the masked subimage for component index, elsewhere the bg value."""
if bg is None: bg = amax(image)
h,w = image.shape[:2]
mask = self.mask(index,margin=margin)
# FIXME ... not circular
if grow>0: mask = morphology.binary_dilation(mask,iterations=grow)
mh,mw = mask.shape
box = self.bbox(index)
r0,c0,r1,c1 = box
subimage = improc.cut(image,(r0,c0,r0+mh-2*margin,c0+mw-2*margin),margin,bg=bg)
return where(mask,subimage,bg)
################################################################
### Object reading and writing
### This handles reading and writing zipped files directly,
### and it also contains workarounds for changed module/class names.
################################################################
def save_object(fname,obj,zip=0):
if zip==0 and fname.endswith(".gz"):
zip = 1
if zip>0:
# with gzip.GzipFile(fname,"wb") as stream:
with os.popen("gzip -9 > '%s'"%fname,"wb") as stream:
cPickle.dump(obj,stream,2)
else:
with open(fname,"wb") as stream:
cPickle.dump(obj,stream,2)
def unpickle_find_global(mname,cname):
if mname=="lstm.lstm":
return getattr(lstm,cname)
if not mname in sys.modules.keys():
exec "import "+mname
return getattr(sys.modules[mname],cname)
def load_object(fname,zip=0,nofind=0,verbose=0):
"""Loads an object from disk. By default, this handles zipped files
and searches in the usual places for OCRopus. It also handles some
class names that have changed."""
if not nofind:
fname = ocropus_find_file(fname)
if verbose:
print("# loading object", fname)
if zip==0 and fname.endswith(".gz"):
zip = 1
if zip>0:
# with gzip.GzipFile(fname,"rb") as stream:
with os.popen("gunzip < '%s'"%fname,"rb") as stream:
unpickler = cPickle.Unpickler(stream)
unpickler.find_global = unpickle_find_global
return unpickler.load()
else:
with open(fname,"rb") as stream:
unpickler = cPickle.Unpickler(stream)
unpickler.find_global = unpickle_find_global
return unpickler.load()
################################################################
### Simple record object.
################################################################
class Record:
"""A simple record datatype that allows initialization with
keyword arguments, as in Record(x=3,y=9)"""
def __init__(self,**kw):
self.__dict__.update(kw)
def like(self,obj):
self.__dict__.update(obj.__dict__)
return self
################################################################
### Histograms
################################################################
def chist(l):
"""Simple counting histogram. Takes a list of items
and returns a list of (count,object) tuples."""
counts = {}
for c in l:
counts[c] = counts.get(c,0)+1
hist = [(v,k) for k,v in counts.items()]
return sorted(hist,reverse=1)
################################################################
### multiprocessing
################################################################
def number_of_processors():
"""Estimates the number of processors."""
return multiprocessing.cpu_count()
# return int(os.popen("cat /proc/cpuinfo | grep 'processor.*:' | wc -l").read())
def parallel_map(fun,jobs,parallel=0,chunksize=1):
if parallel<2:
for e in jobs:
result = fun(e)
yield result
else:
try:
pool = multiprocessing.Pool(parallel)
for e in pool.imap_unordered(fun,jobs,chunksize):
yield e
finally:
pool.close()
pool.join()
del pool
def check_valid_class_label(s):
"""Determines whether the given character is a valid class label.
Control characters and spaces are not permitted."""
if type(s)==unicode:
if re.search(r'[\0-\x20]',s):
raise BadClassLabel(s)
elif type(s)==str:
if re.search(r'[^\x21-\x7e]',s):
raise BadClassLabel(s)
else:
raise BadClassLabel(s)
def summary(x):
"""Summarize a datatype as a string (for display and debugging)."""
if type(x)==numpy.ndarray:
return "<ndarray %s %s>"%(x.shape,x.dtype)
if type(x)==str and len(x)>10:
return '"%s..."'%x
if type(x)==list and len(x)>10:
return '%s...'%x
return str(x)
################################################################
### file name manipulation
################################################################
@checks(str,_=str)
def findfile(name,error=1):
result = ocropus_find_file(name)
return result
@checks(str)
def finddir(name):
"""Find some OCRopus-related resource by looking in a bunch off standard places.
(This needs to be integrated better with setup.py and the build system.)"""
local = getlocal()
path = name
if os.path.exists(path) and os.path.isdir(path): return path
path = local+name
if os.path.exists(path) and os.path.isdir(path): return path
_,tail = os.path.split(name)
path = tail
if os.path.exists(path) and os.path.isdir(path): return path
path = local+tail
if os.path.exists(path) and os.path.isdir(path): return path
raise FileNotFound("file '"+path+"' not found in . or /usr/local/share/ocropus/")
@checks(str)
def allsplitext(path):
"""Split all the pathname extensions, so that "a/b.c.d" -> "a/b", ".c.d" """
match = re.search(r'((.*/)*[^.]*)([^/]*)',path)
if not match:
return path,""
else:
return match.group(1),match.group(3)
@checks(str)
def base(path):
return allsplitext(path)[0]
@checks(str,{str,unicode})
def write_text_simple(file,s):
"""Write the given string s to the output file."""
with open(file,"w") as stream:
if type(s)==unicode: s = s.encode("utf-8")
stream.write(s)
@checks([str])
def glob_all(args):
"""Given a list of command line arguments, expand all of them with glob."""
result = []
for arg in args:
if arg[0]=="@":
with open(arg[1:],"r") as stream:
expanded = stream.read().split("\n")
expanded = [s for s in expanded if s!=""]
else:
expanded = sorted(glob.glob(arg))
if len(expanded)<1:
raise FileNotFound("%s: expansion did not yield any files"%arg)
result += expanded
return result
@checks([str])
def expand_args(args):
"""Given a list of command line arguments, if the
length is one, assume it's a book directory and expands it.
Otherwise returns the arguments unchanged."""
if len(args)==1 and os.path.isdir(args[0]):
return sorted(glob.glob(args[0]+"/????/??????.png"))
else:
return args
def ocropus_find_file(fname, gz=True):
"""Search for `fname` in one of the OCRopus data directories, as well as
the current directory). If `gz` is True, search also for gzipped files.
Result of searching $fname is the first existing in:
* $base/$fname
* $base/$fname.gz # if gz
* $base/model/$fname
* $base/model/$fname.gz # if gz
* $base/data/$fname
* $base/data/$fname.gz # if gz
* $base/gui/$fname
* $base/gui/$fname.gz # if gz
$base can be four base paths:
* `$OCROPUS_DATA` environment variable
* current working directory
* ../../../../share/ocropus from this file's install location
* `/usr/local/share/ocropus`
* `$PREFIX/share/ocropus` ($PREFIX being the Python installation
prefix, usually `/usr`)
"""
possible_prefixes = []
if os.getenv("OCROPUS_DATA"):
possible_prefixes.append(os.getenv("OCROPUS_DATA"))
possible_prefixes.append(os.curdir)
possible_prefixes.append(os.path.normpath(os.path.join(
os.path.dirname(inspect.getfile(inspect.currentframe())),
os.pardir, os.pardir, os.pardir, os.pardir, "share", "ocropus")))
possible_prefixes.append("/usr/local/share/ocropus")
possible_prefixes.append(os.path.join(
sysconfig.get_config_var("datarootdir"), "ocropus"))
# Unique entries with preserved order in possible_prefixes
# http://stackoverflow.com/a/15637398/201318
possible_prefixes = [possible_prefixes[i] for i in
sorted(numpy.unique(possible_prefixes, return_index=True)[1])]
for prefix in possible_prefixes:
if not os.path.isdir(prefix):
continue
for basename in [".", "models", "data", "gui"]:
if not os.path.isdir(os.path.join(prefix, basename)):
continue
full = os.path.join(prefix, basename, fname)
if os.path.exists(full):
return full
if gz and os.path.exists(full + ".gz"):
return full + ".gz"
raise FileNotFound(fname)
def fvariant(fname,kind,gt=""):
"""Find the file variant corresponding to the given file name.
Possible fil variants are line (or png), rseg, cseg, fst, costs, and txt.
Ground truth files have an extra suffix (usually something like "gt",
as in 010001.gt.txt or 010001.rseg.gt.png). By default, the variant
with the same ground truth suffix is produced. The non-ground-truth
version can be produced with gt="", the ground truth version can
be produced with gt="gt" (or some other desired suffix)."""
if gt!="": gt = "."+gt
base,ext = allsplitext(fname)
# text output
if kind=="txt":
return base+gt+".txt"
assert gt=="","gt suffix may only be supplied for .txt files (%s,%s,%s)"%(fname,kind,gt)
# a text line image
if kind=="line" or kind=="png" or kind=="bin":
return base+".bin.png"
if kind=="nrm":
return base+".nrm.png"
# a recognition lattice
if kind=="lattice":
return base+gt+".lattice"
# raw segmentation
if kind=="rseg":
return base+".rseg.png"
# character segmentation
if kind=="cseg":
return base+".cseg.png"
# text specifically aligned with cseg (this may be different from gt or txt)
if kind=="aligned":
return base+".aligned"
# per character costs
if kind=="costs":
return base+".costs"
raise BadInput("unknown kind: %s"%kind)
################################################################
### Utility for setting "parameters" on an object: a list of keywords for
### changing instance variables.
################################################################
def set_params(object,kw,warn=1):
"""Given an object and a dictionary of keyword arguments,
set only those object properties that are already instance
variables of the given object. Returns a new dictionary
without the key,value pairs that have been used. If
all keywords have been used, afterwards, len(kw)==0."""
kw = kw.copy()
for k,v in kw.items():
if hasattr(object,k):
setattr(object,k,v)
del kw[k]
return kw
################################################################
### warning and logging
################################################################
def caller():
"""Just returns info about the caller in string for (for error messages)."""
frame = sys._getframe(2)
info = inspect.getframeinfo(frame)
result = "%s:%d (%s)"%(info.filename,info.lineno,info.function)
del frame
return result
def die(message,*args):
"""Die with an error message."""
message = message%args
message = caller()+" FATAL "+message+"\n"
sys.stderr.write(message)
sys.exit(1)
def warn(message,*args):
"""Give a warning message."""
message = message%args
message = caller()+" WARNING "+message+"\n"
sys.stderr.write(message)
already_warned = {}
def warn_once(message,*args):
"""Give a warning message, but just once."""
c = caller()
if c in already_warned: return
already_warned[c] = 1
message = message%args
message = c+" WARNING "+message+"\n"
sys.stderr.write(message)
def quick_check_page_components(page_bin,dpi):
"""Quickly check whether the components of page_bin are
reasonable. Returns a value between 0 and 1; <0.5 means that
there is probably something wrong."""
return 1.0
def quick_check_line_components(line_bin,dpi):
"""Quickly check whether the components of line_bin are
reasonable. Returns a value between 0 and 1; <0.5 means that
there is probably something wrong."""
return 1.0
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used."""
def newFunc(*args, **kwargs):
warnings.warn("Call to deprecated function %s." % func.__name__,
category=DeprecationWarning,stacklevel=2)
return func(*args, **kwargs)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
################################################################
### conversion functions
################################################################
def ustrg2unicode(u,lig=ligatures.lig):
"""Convert an iulib ustrg to a Python unicode string; the
C++ version iulib.ustrg2unicode does weird things for special
symbols like -3"""
result = ""
for i in range(u.length()):
value = u.at(i)
if value>=0:
c = lig.chr(value)
if c is not None:
result += c
else:
result += "<%d>"%value
return result
### code for instantiation native components
def pyconstruct(s):
"""Constructs a Python object from a constructor, an expression
of the form x.y.z.name(args). This ensures that x.y.z is imported.
In the future, more forms of syntax may be accepted."""
env = {}
if "(" not in s:
s += "()"
path = s[:s.find("(")]
if "." in path:
module = path[:path.rfind(".")]
print("import", module)
exec "import "+module in env
return eval(s,env)
def mkpython(name):
"""Tries to instantiate a Python class. Gives an error if it looks
like a Python class but can't be instantiated. Returns None if it
doesn't look like a Python class."""
if name is None or len(name)==0:
return None
elif type(name) is not str:
return name()
elif name[0]=="=":
return pyconstruct(name[1:])
elif "(" in name or "." in name:
return pyconstruct(name)
else:
return None
################################################################
### loading and saving components
################################################################
# This code has to deal with a lot of special cases for all the
# different formats we have accrued.
def obinfo(ob):
"""A bit of information about the given object. Returns
the str representation of the object, and if it has a shape,
also includes the shape."""
result = str(ob)
if hasattr(ob,"shape"):
result += " "
result += str(ob.shape)
return result
def save_component(file,object,verbose=0,verify=0):
"""Save an object to disk in an appropriate format. If the object
is a wrapper for a native component (=inherits from
CommonComponent and has a comp attribute, or is in package
ocropus), write it using ocropus.save_component in native format.
Otherwise, write it using Python's pickle. We could use pickle
for everything (since the native components pickle), but that
would be slower and more confusing."""
if hasattr(object,"save_component"):
object.save_component(file)
return
if object.__class__.__name__=="CommonComponent" and hasattr(object,"comp"):
# FIXME -- get rid of this eventually
import ocropus
ocropus.save_component(file,object.comp)
return
if type(object).__module__=="ocropus":
import ocropus
ocropus.save_component(file,object)
return
if verbose:
print("[save_component]")
if verbose:
for k,v in object.__dict__.items():
print(":", k, obinfo(v))
with open(file,"wb") as stream:
pickle.dump(object,stream,pickle_mode)
if verify:
if verbose:
print("[trying to read it again]")
with open(file,"rb") as stream:
pickle.load(stream)
def load_component(file):
"""Load a component. This handles various special cases,
including old-style C++ recognizers (soon to be gotten rid of),
python expressions ("=package.ObjectName(arg1,arg2)"),
and simple pickled Python objects (default)."""
if file[0]=="=":
return pyconstruct(file[1:])
elif file[0]=="@":
file = file[1:]
with open(file,"r") as stream:
# FIXME -- get rid of this eventually
start = stream.read(128)
if start.startswith("<object>\nlinerec\n"):
# FIXME -- get rid of this eventually
warnings.warn("loading old-style linerec: %s"%file)
result = RecognizeLine()
import ocropus
result.comp = ocropus.load_IRecognizeLine(file)
return result
if start.startswith("<object>"):
# FIXME -- get rid of this eventually
warnings.warn("loading old-style cmodel: %s"%file)
import ocroold
result = ocroold.Model()
import ocropus
result.comp = ocropus.load_IModel(file)
return result
return load_object(file)
def binarize_range(image,dtype='B',threshold=0.5):
"""Binarize an image by its range."""
threshold = (amax(image)+amin(image))*threshold
scale = 1
if dtype=='B': scale = 255
return array(scale*(image>threshold),dtype=dtype)
def draw_pseg(pseg,axis=None):
if axis is None:
axis = subplot(111)
h = pseg.dim(1)
regions = ocropy.RegionExtractor()
regions.setPageLines(pseg)
for i in range(1,regions.length()):
x0,y0,x1,y1 = (regions.x0(i),regions.y0(i),regions.x1(i),regions.y1(i))
p = patches.Rectangle((x0,h-y1-1),x1-x0,y1-y0,edgecolor="red",fill=0)
axis.add_patch(p)
def draw_aligned(result,axis=None):
raise Unimplemented("FIXME draw_aligned")
if axis is None:
axis = subplot(111)
axis.imshow(NI(result.image),cmap=cm.gray)
cseg = result.cseg
if type(cseg)==numpy.ndarray: cseg = common.lseg2narray(cseg)
ocropy.make_line_segmentation_black(cseg)
ocropy.renumber_labels(cseg,1)
bboxes = ocropy.rectarray()
ocropy.bounding_boxes(bboxes,cseg)
s = re.sub(r'\s+','',result.output)
h = cseg.dim(1)
for i in range(1,bboxes.length()):
r = bboxes.at(i)
x0,y0,x1,y1 = (r.x0,r.y0,r.x1,r.y1)
p = patches.Rectangle((x0,h-y1-1),x1-x0,y1-y0,edgecolor=(0.0,0.0,1.0,0.5),fill=0)
axis.add_patch(p)
if i>0 and i-1<len(s):
axis.text(x0,h-y0-1,s[i-1],color="red",weight="bold",fontsize=14)
draw()
def plotgrid(data,d=10,shape=(30,30)):
"""Plot a list of images on a grid."""
ion()
gray()
clf()
for i in range(min(d*d,len(data))):
subplot(d,d,i+1)
row = data[i]
if shape is not None: row = row.reshape(shape)
imshow(row)
ginput(1,timeout=0.1)
def showrgb(r,g=None,b=None):
if g is None: g = r
if b is None: b = r
imshow(array([r,g,b]).transpose([1,2,0]))
def showgrid(l,cols=None,n=400,titles=None,xlabels=None,ylabels=None,**kw):
if "cmap" not in kw: kw["cmap"] = pylab.cm.gray
if "interpolation" not in kw: kw["interpolation"] = "nearest"
n = minimum(n,len(l))
if cols is None: cols = int(sqrt(n))
rows = (n+cols-1)//cols
for i in range(n):
pylab.xticks([]); pylab.yticks([])
pylab.subplot(rows,cols,i+1)
pylab.imshow(l[i],**kw)
if titles is not None: pylab.title(str(titles[i]))
if xlabels is not None: pylab.xlabel(str(xlabels[i]))
if ylabels is not None: pylab.ylabel(str(ylabels[i]))
def gt_explode(s):
l = re.split(r'_(.{1,4})_',s)
result = []
for i,e in enumerate(l):
if i%2==0:
result += [c for c in e]
else:
result += [e]
result = [re.sub("\001","_",s) for s in result]
result = [re.sub("\002","\\\\",s) for s in result]
return result
def gt_implode(l):
result = []
for c in l:
if c=="_":
result.append("___")
elif len(c)<=1:
result.append(c)
elif len(c)<=4:
result.append("_"+c+"_")
else:
raise BadInput("cannot create ground truth transcription for: %s"%l)
return "".join(result)
@checks(int,sequence=int,frac=int,_=BOOL)
def testset(index,sequence=0,frac=10):
# this doesn't have to be good, just a fast, somewhat random function
return sequence==int(abs(sin(index))*1.23456789e6)%frac
def midrange(image,frac=0.5):
"""Computes the center of the range of image values
(for quick thresholding)."""
return frac*(amin(image)+amax(image))
def remove_noise(line,minsize=8):
"""Remove small pixels from an image."""
if minsize==0: return line
bin = (line>0.5*amax(line))
labels,n = morph.label(bin)
sums = measurements.sum(bin,labels,range(n+1))
sums = sums[labels]
good = minimum(bin,1-(sums>0)*(sums<minsize))
return good
class MovingStats:
def __init__(self,n=100):
self.data = []
self.n = n
self.count = 0
def add(self,x):
self.data += [x]
self.data = self.data[-self.n:]
self.count += 1
def mean(self):
if len(self.data)==0: return nan
return mean(self.data)
| apache-2.0 | -5,726,180,883,866,732,000 | 32.970883 | 127 | 0.583329 | false |
tombusby/Log-Bitbucket-History | logger_html.py | 1 | 2069 | #!/usr/bin/env python
import feedparser, sys, hashlib, os
from lxml import etree
from StringIO import StringIO
from datetime import datetime
def get_log_file_location():
file_dir = os.path.dirname(os.path.realpath(__file__))
return os.path.join(file_dir, "work_log.html")
def make_table_header_row(table):
row = etree.SubElement(table, "tr")
etree.SubElement(row, "th").text = "Published"
etree.SubElement(row, "th").text = "Processed to Log"
etree.SubElement(row, "th").text = "Description"
def parse_entry_summary(entry):
tree = etree.parse(StringIO(entry["summary"]), parse_entry_summary.parser)
return tree.find(".//body").getchildren()
parse_entry_summary.parser = etree.HTMLParser()
def make_table_row(table, hash, entry):
row = etree.SubElement(table, "tr")
row.attrib["hash"] = hash
etree.SubElement(row, "td").text = entry["published"]
etree.SubElement(row, "td").text = datetime.today().isoformat()
summary = etree.SubElement(row, "td")
for element in parse_entry_summary(entry):
summary.append(element)
def get_existing_log():
try:
return etree.parse(get_log_file_location()).getroot()
except:
table = etree.Element("table")
table.attrib["border"] = "1"
table.attrib["style"] = "border-collapse: collapse;"
make_table_header_row(table)
return table
def get_existing_hashes(tree):
return tree.xpath(".//tr/@hash")
if __name__ == "__main__":
if len(sys.argv) != 3:
print "Usage: {} <user_id> <token>".format(sys.argv[0])
exit()
user_id, token = sys.argv[1:3]
table = get_existing_log()
hashes = get_existing_hashes(table)
feed_items = feedparser.parse("https://bitbucket.org/{}/rss/feed?token={}".format(user_id, token))
entries_for_user = filter(lambda e: user_id in e["title"], feed_items.entries)
for entry in sorted(entries_for_user, key=lambda k: k["published_parsed"]):
hash = hashlib.sha1(entry.published + entry.summary).hexdigest()
if hash not in hashes:
make_table_row(table, hash, entry)
with open(get_log_file_location(), "w+") as f:
f.write(etree.tostring(table, pretty_print=True))
| mit | 1,247,881,574,972,655,900 | 31.84127 | 99 | 0.699372 | false |
betur/btce-api | btceapi/keyhandler.py | 1 | 2463 | # Copyright (c) 2013 Alan McIntyre
import warnings
class KeyData(object):
def __init__(self, secret, nonce):
self.secret = secret
self.nonce = nonce
class KeyHandler(object):
'''KeyHandler handles the tedious task of managing nonces associated
with a BTC-e API key/secret pair.
The getNextNonce method is threadsafe, all others are not.'''
def __init__(self, filename=None, resaveOnDeletion=True):
'''The given file is assumed to be a text file with three lines
(key, secret, nonce) per entry.'''
if not resaveOnDeletion:
warnings.warn("The resaveOnDeletion argument to KeyHandler will"
" default to True in future versions.")
self._keys = {}
self.resaveOnDeletion = False
self.filename = filename
if filename is not None:
self.resaveOnDeletion = resaveOnDeletion
f = open(filename, "rt")
while True:
key = f.readline().strip()
if not key:
break
secret = f.readline().strip()
nonce = int(f.readline().strip())
self.addKey(key, secret, nonce)
def __del__(self):
self.close()
def close(self):
if self.resaveOnDeletion:
self.save(self.filename)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
@property
def keys(self):
return self._keys.keys()
def getKeys(self):
return self._keys.keys()
def save(self, filename):
f = open(filename, "wt")
for k, data in self._keys.items():
f.write("%s\n%s\n%d\n" % (k, data.secret, data.nonce))
def addKey(self, key, secret, next_nonce):
self._keys[key] = KeyData(secret, next_nonce)
def getNextNonce(self, key):
data = self._keys.get(key)
if data is None:
raise KeyError("Key not found: %r" % key)
nonce = data.nonce
data.nonce += 1
return nonce
def getSecret(self, key):
data = self._keys.get(key)
if data is None:
raise KeyError("Key not found: %r" % key)
return data.secret
def setNextNonce(self, key, next_nonce):
data = self._keys.get(key)
if data is None:
raise KeyError("Key not found: %r" % key)
data.nonce = next_nonce
| mit | 1,563,550,528,482,904,300 | 27.976471 | 76 | 0.56151 | false |
GLolol/lightdm-gtk-greeter-settings-deb | lightdm_gtk_greeter_settings/IndicatorPropertiesDialog.py | 1 | 12548 | #!/usr/bin/env python3
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# LightDM GTK Greeter Settings
# Copyright (C) 2014 Andrew P. <[email protected]>
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
from copy import deepcopy
from glob import iglob
from gi.repository import Gtk
from lightdm_gtk_greeter_settings import (
IconEntry,
OptionEntry)
from lightdm_gtk_greeter_settings.helpers import (
C_,
bool2string,
string2bool,
get_data_path,
get_greeter_version,
SimpleEnum,
WidgetsEnum,
WidgetsWrapper)
from lightdm_gtk_greeter_settings.IndicatorsEntry import (
EmptyIndicators,
Indicators,
LayoutSet,
Option)
__all__ = ['IndicatorPropertiesDialog']
class IndicatorPath(OptionEntry.StringPathEntry):
class Row(SimpleEnum):
Title = ()
Type = ()
Icon = ()
class IndicatorIconEntry(IconEntry.IconEntry):
DefaultValue = ()
def __init__(self, widgets):
self._label = widgets['label']
super().__init__(widgets)
def _set_value(self, value):
super()._set_value(self.DefaultValue if value is None else value)
self._label.set_markup(self._current_item.menuitem.get_label())
self._image.props.visible = value not in (None, self.DefaultValue)
def _get_value(self):
return super()._get_value() or None
def _get_items(self):
for item in super()._get_items():
yield item
yield -1, (self._update_default, self._ask_default)
def _update_default(self, value, just_label):
if just_label or value is not self.DefaultValue:
return C_('option-entry|indicators', 'Use default value...'), None
self._image.props.icon_name = ''
label = C_('option-entry|indicators', '<b>Using default value</b>')
return label, label
def _ask_default(self, oldvalue):
return self.DefaultValue
class IndicatorTypeEntry(OptionEntry.BaseEntry):
def __init__(self, widgets):
super().__init__(widgets)
self._types = widgets['types']
self._indicator_choice = widgets['indicator_choice']
self._spacer_choice = widgets['spacer_choice']
self._separator_choice = widgets['separator_choice']
self._types.connect('changed', self._emit_changed)
self._indicator_choice.connect('toggled', self._on_choice_changed, None,
(self._types, widgets['indicator_box']))
self._spacer_choice.connect('toggled', self._on_choice_changed, Indicators.Spacer)
self._separator_choice.connect('toggled', self._on_choice_changed, Indicators.Separator)
self._value = None
def add_type(self, name, title):
if name not in EmptyIndicators:
self._types.append(name, title or name)
def _get_value(self):
if self._indicator_choice.props.active:
return self._types.props.active_id
else:
return self._value
def _set_value(self, value):
if value == Indicators.Spacer:
button = self._spacer_choice
elif value == Indicators.Separator:
button = self._separator_choice
else:
button = self._indicator_choice
self._value = value
self._types.set_active_id(value)
if button.props.active:
button.toggled()
else:
button.props.active = True
def _on_choice_changed(self, button, value, widgets=[]):
for w in widgets:
w.props.sensitive = button.props.active
if button.props.active:
self._value = value if value else self._types.props.active_id
self._emit_changed()
class IndicatorPropertiesDialog(Gtk.Dialog):
__gtype_name__ = 'IndicatorPropertiesDialog'
class Widgets(WidgetsEnum):
add = 'add_button'
ok = 'ok_button'
infobar = 'infobar'
message = 'message'
common_options = 'common_options_box'
custom_options = 'custom_options_box'
path = 'option_path_combo'
path_model = 'option_path_model'
hide_disabled = 'option_power_hide_disabled'
def __new__(cls, *args, **kwargs):
builder = Gtk.Builder()
builder.add_from_file(get_data_path('%s.ui' % cls.__name__))
window = builder.get_object('indicator_properties_dialog')
window.builder = builder
builder.connect_signals(window)
window.init_window(*args, **kwargs)
return window
def init_window(self, is_duplicate=None, get_defaults=None, get_name=str):
self._widgets = self.Widgets(builder=self.builder)
self._get_defaults = get_defaults
self._add_indicator = None
self._is_duplicate = is_duplicate
self._get_name = get_name
self._indicator_loaded = False
self._name = None
self._reversed = False
self._name2page = {}
for i in range(0, self._widgets.custom_options.get_n_pages()):
page = self._widgets.custom_options.get_nth_page(i)
name = Gtk.Buildable.get_name(page)
self._name2page['~' + name.rsplit('_')[-1]] = i
if get_greeter_version() < 0x020100:
self._widgets.common_options.props.visible = False
self._name2page = {
Indicators.External: self._name2page[Indicators.External],
Indicators.Text: self._name2page[Indicators.Text]}
text_prefix = 'option_text_fallback'
else:
self._name2page[Indicators.Text] = -1
text_prefix = 'option_text'
self._option_type = IndicatorTypeEntry(WidgetsWrapper(self.builder, 'option_type'))
self._option_text = OptionEntry.StringEntry(WidgetsWrapper(self.builder, text_prefix))
self._option_image = IndicatorIconEntry(WidgetsWrapper(self.builder, 'option_image'))
self._option_path = IndicatorPath(WidgetsWrapper(self.builder, 'option_path'))
self._option_hide_disabled = \
OptionEntry.BooleanEntry(WidgetsWrapper(self.builder, 'option_hide_disabled'))
for entry in (self._option_type, self._option_path):
entry.changed.connect(self._on_option_changed)
for name in Indicators:
self._option_type.add_type(name, self._get_name(name))
# Hiding first column created by Gtk.ComboBoxText
self._widgets.path.get_cells()[0].props.visible = False
for path in sorted(iglob(os.path.join(sys.prefix, 'share', 'unity', 'indicators', '*'))):
name = os.path.basename(path)
parts = name.rsplit('.', maxsplit=1)
if len(parts) == 2 and parts[0] == 'com.canonical.indicator':
name = parts[1]
row = IndicatorPath.Row._make(Type=IndicatorPath.ItemType.Value,
Title=name,
Icon='application-x-executable')
self._widgets.path_model.append(row)
for path in sorted(iglob(os.path.join(sys.prefix, 'lib', 'indicators3', '7', '*.so'))):
row = IndicatorPath.Row._make(Type=IndicatorPath.ItemType.Value,
Title=os.path.basename(path),
Icon='application-x-executable')
self._widgets.path_model.append(row)
def _on_option_changed(self, entry=None):
if not self._indicator_loaded:
return
name = self._option_type.value
error = None
warning = None
if name == Indicators.External:
if not str(self._option_path.value).strip():
error = C_('option-entry|indicators', 'Path/Service field is not filled')
elif name != self._name:
if self._is_duplicate and self._is_duplicate(name):
warning = C_('option-entry|indicators',
'Indicator "{name}" is already in the list.\n'
'It will be overwritten.').format(name=self._get_name(name, name))
self._widgets.ok.props.sensitive = error is None
self._widgets.add.props.sensitive = error is None
self._widgets.infobar.props.visible = error or warning
self._widgets.message.props.label = error or warning
if error:
self._widgets.infobar.props.message_type = Gtk.MessageType.WARNING
elif warning:
self._widgets.infobar.props.message_type = Gtk.MessageType.INFO
else:
self._widgets.infobar.props.message_type = Gtk.MessageType.OTHER
def on_option_type_types_changed(self, combo):
current = self._widgets.custom_options.props.page
if current != -1:
self._widgets.custom_options.get_nth_page(current).props.visible = False
current = self._name2page.get(combo.props.active_id, -1)
if current != -1:
self._widgets.custom_options.get_nth_page(current).props.visible = True
self._widgets.custom_options.props.page = current
if self._indicator_loaded:
defaults = self._get_defaults(combo.props.active_id)
self._option_text.enabled = Option.Text in defaults
self._option_image.enabled = Option.Image in defaults
def on_add_clicked(self, widget):
self._add_callback(self.get_indicator())
self._options = deepcopy(self._options)
self._on_option_changed()
@property
def add_callback(self):
return self._add_callback
@add_callback.setter
def add_callback(self, value):
self._add_callback = value
self._widgets.add.props.visible = value is not None
def set_indicator(self, options):
self._indicator_loaded = False
self._options = deepcopy(options)
self._name = options[Option.Name]
self._option_type.value = options[Option.Name]
self._option_path.value = options.get(Option.Path)
self._option_text.value = options.get(Option.Text, '')
self._option_text.enabled = Option.Text in options
self._option_image.value = options.get(Option.Image)
self._option_image.enabled = Option.Image in options
self._reversed = Option.Layout in options and LayoutSet.Reversed in options[Option.Layout]
hide_disabled = options.get(Option.HideDisabled, bool2string(False))
self._option_hide_disabled.value = hide_disabled or bool2string(True)
self._indicator_loaded = True
self._on_option_changed()
def get_indicator(self):
options = self._options
name = self._option_type.value
options[Option.Name] = name
options[Option.Layout] = set()
if name not in EmptyIndicators:
if self._option_text.enabled:
options[Option.Text] = self._option_text.value or None
options[Option.Layout].add(LayoutSet.Text)
if self._option_image.enabled:
options[Option.Image] = self._option_image.value or None
options[Option.Layout].add(LayoutSet.Image)
if self._option_text.enabled and self._option_image.enabled and self._reversed:
options[Option.Layout].add(LayoutSet.Reversed)
if LayoutSet.Text not in options[Option.Layout] and Option.Text in options:
del options[Option.Text]
if LayoutSet.Image not in options[Option.Layout] and Option.Image in options:
del options[Option.Image]
if name == Indicators.External:
options[Option.Path] = self._option_path.value
else:
options.pop(Option.Path, None)
if name == Indicators.Power and string2bool(self._option_hide_disabled.value):
options[Option.HideDisabled] = None
elif Option.HideDisabled in options:
options.pop(Option.HideDisabled, None)
return options
| gpl-3.0 | -3,611,849,266,747,698,000 | 36.681682 | 98 | 0.618664 | false |
kdheepak89/fono | fono/run.py | 1 | 4290 | #!/usr/bin/env python
"""Run module."""
import click
import data
import ReferenceModel
import solve
import version
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.command(context_settings=CONTEXT_SETTINGS)
@click.option('--folder', type=click.Path(), help='Path to data folder')
@click.option('--quantity', type=click.Path(), help='Path to quantity.csv file')
@click.option('--price', type=click.Path(), help='Path to price.csv file')
@click.option('--shipping', type=click.Path(), help='Path to shipping.csv file')
@click.option('--mipgap', type=click.FLOAT, default=0.001, help='Value of mipgap')
@click.option('--color', default='white', help='Color of solution (e.g. --color=red)')
@click.option('--fono-color', default='green', help='Color of solution (e.g. --fono-color=blue)')
@click.version_option(version.__version__, '-v', '--version')
def main(**kwargs):
"""'Find Optimal Number of Orders' aka fono."""
color = kwargs.pop('color')
fono_color = kwargs.pop('fono_color')
try:
if not any([kwargs[key] for key in kwargs]):
help_str = "{}".format(click.get_current_context().get_help())
click.secho(help_str)
click.get_current_context().exit()
def show_item(item):
if item is not None:
return item
click.echo("")
click.secho("Find the Optimal Number of Orders:", fg=fono_color, bold=True)
click.echo("")
with click.progressbar(
('Getting data', 'Creating model', 'Solving', 'Finished'),
label='fono:',
item_show_func=show_item) as bar:
for item in bar:
if item == 'Getting data':
if kwargs['folder']:
price, quantity, shipping = data.get_input(kwargs['folder'])
elif kwargs['quantity'] and kwargs['price'] and kwargs['shipping']:
quantity = data.get_quantity(kwargs['quantity'])
price = data.get_price(kwargs['price'])
shipping = data.get_shipping(kwargs['shipping'])
elif item == 'Creating model':
model = ReferenceModel.create_model(price, quantity, shipping)
elif item == 'Solving':
mipgap = kwargs.get('mipgap')
solve.solve_instance(model, mipgap=mipgap), model
# solve.display_results(solve.solve_instance(model), model)
click.echo("")
click.secho("fono results:", fg=fono_color, bold=True)
for website in sorted(model.Websites):
click.secho("")
click.secho("{}".format(website), fg=color, bold=True, nl=False)
click.secho(":")
for item in sorted(model.Items):
if model.Quantity[website, item].value > 0:
click.echo("Buy ", nl=False)
click.secho("{} ".format(int(model.Quantity[website, item].value)), fg=color, bold=True, nl=False)
click.echo("item(s) of ", nl=False)
click.secho("{} ".format(item), fg=color, bold=True, nl=False)
click.echo("for a total of ", nl=False)
click.secho("{} ".format(price[(website, item)] * model.Quantity[website, item].value),
fg=color,
bold=True,
nl=False)
click.echo("dollars", nl=False)
click.secho(".")
click.echo("")
item_costs = model.Cost['Item'].value
shipping_costs = model.Cost['Shipping'].value
total_costs = item_costs + shipping_costs
click.secho("Total product costs = {} dollars".format(item_costs), bold=True)
click.secho("Total shipping costs = {} dollars".format(shipping_costs), bold=True)
click.echo("")
click.secho("Total costs = {} dollars".format(total_costs), fg=fono_color, bold=True)
click.echo("")
except Exception as e:
click.echo('')
raise click.ClickException("{}\n\nCheck the help (--help) on how to use fono or contact the developer.".format(
e.message))
if __name__ == '__main__':
main()
| bsd-3-clause | -4,993,770,995,784,116,000 | 41.058824 | 119 | 0.557576 | false |
chandrikas/sm | tests/test_refcounter.py | 1 | 3908 | import unittest
import testlib
import os
import mock
import errno
import refcounter
class TestRefCounter(unittest.TestCase):
@testlib.with_context
def test_get_whencalled_creates_namespace(self, context):
os.makedirs(refcounter.RefCounter.BASE_DIR)
refcounter.RefCounter.get('not-important', False, 'somenamespace')
self.assertEquals(
['somenamespace'],
os.listdir(os.path.join(refcounter.RefCounter.BASE_DIR)))
@testlib.with_context
def test_get_whencalled_returns_counters(self, context):
os.makedirs(refcounter.RefCounter.BASE_DIR)
result = refcounter.RefCounter.get(
'not-important', False, 'somenamespace')
self.assertEquals(1, result)
@testlib.with_context
def test_get_whencalled_creates_refcounter_file(self, context):
os.makedirs(refcounter.RefCounter.BASE_DIR)
refcounter.RefCounter.get('someobject', False, 'somenamespace')
self.assertEquals(
['someobject'],
os.listdir(os.path.join(
refcounter.RefCounter.BASE_DIR, 'somenamespace')))
@testlib.with_context
def test_get_whencalled_refcounter_file_contents(self, context):
os.makedirs(refcounter.RefCounter.BASE_DIR)
refcounter.RefCounter.get('someobject', False, 'somenamespace')
path_to_refcounter = os.path.join(
refcounter.RefCounter.BASE_DIR, 'somenamespace', 'someobject')
refcounter_file = open(path_to_refcounter, 'r')
contents = refcounter_file.read()
refcounter_file.close()
self.assertEquals('1 0\n', contents)
@testlib.with_context
def test_put_is_noop_if_already_zero(self, context):
os.makedirs(refcounter.RefCounter.BASE_DIR)
result = refcounter.RefCounter.put(
'someobject', False, 'somenamespace')
self.assertEquals(0, result)
@testlib.with_context
def test_writeCount_returns_true_if_file_found(self, context):
os.makedirs('/existing')
result = refcounter.RefCounter._writeCount('/existing/file', 1, 1)
self.assertTrue(result)
@testlib.with_context
def test_writeCount_returns_false_if_file_not_found(self, context):
result = refcounter.RefCounter._writeCount('/nonexisting/file', 1, 1)
self.assertFalse(result)
@mock.patch('os.rmdir', autospec=True)
@mock.patch('os.unlink', autospec=True)
@mock.patch('util.pathexists', autospec=True)
def test_removeObject_ignores_if_directory_already_removed(self,
pathexists,
unlink,
rmdir):
rmdir.side_effect = OSError(errno.ENOENT, 'ignored')
refcounter.RefCounter._removeObject('namespace', 'obj')
rmdir.assert_called_once_with(
os.path.join(refcounter.RefCounter.BASE_DIR, 'namespace'))
@mock.patch('os.rmdir', autospec=True)
@mock.patch('os.unlink', autospec=True)
@mock.patch('util.pathexists', autospec=True)
def test_removeObject_ignores_if_directory_not_empty(self,
pathexists,
unlink,
rmdir):
rmdir.side_effect = OSError(errno.ENOTEMPTY, 'ignored')
refcounter.RefCounter._removeObject('namespace', 'obj')
rmdir.assert_called_once_with(
os.path.join(refcounter.RefCounter.BASE_DIR, 'namespace'))
# Re-use legacy tests embedded in refcounter
testcase = unittest.FunctionTestCase(refcounter.RefCounter._runTests)
with mock.patch.object(refcounter.RefCounter, "BASE_DIR", "./fakesm/refcount"):
unittest.TextTestRunner().run(testcase)
| lgpl-2.1 | -4,773,335,430,316,501,000 | 34.207207 | 79 | 0.617707 | false |
urschrei/simplification | benchmark_runner.py | 1 | 1269 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Standalone benchmark runner
"""
import cProfile
import pstats
import profile
import numpy as np
print("Running Rust + Cython benchmarks")
# calibrate
pr = profile.Profile()
calibration = np.mean([pr.calibrate(100000) for x in range(5)])
# add the bias
profile.Profile.bias = calibration
with open("simplification/test/cprofile_rust_cython.py", "rb") as f1:
c1 = f1.read()
with open("simplification/test/cprofile_rust_cython_complex.py", "rb") as f2:
c2 = f2.read()
with open("simplification/test/cprofile_rust_cython_shapely.py", "rb") as f3:
c3 = f3.read()
cProfile.run(c1, "simplification/test/output_stats_rust_cython")
rust_cython = pstats.Stats("simplification/test/output_stats_rust_cython")
cProfile.run(c2, "simplification/test/output_stats_rust_cython_complex")
rust_cython_c = pstats.Stats("simplification/test/output_stats_rust_cython_complex")
cProfile.run(c3, "simplification/test/output_stats_rust_cython_shapely")
shapely = pstats.Stats("simplification/test/output_stats_rust_cython_shapely")
print("Rust Cython Benchmarks\n")
rust_cython.sort_stats("cumulative").print_stats(5)
rust_cython_c.sort_stats("cumulative").print_stats(5)
shapely.sort_stats("cumulative").print_stats(20)
| mit | -8,462,080,342,845,628,000 | 29.95122 | 84 | 0.746257 | false |
corbinq27/priceTweeter | product_extractor.py | 1 | 1197 | __author__ = 'corbinq27'
import re
import json
import urllib2
#fairly specialized python script to extract prices from specific pages on wholesalegaming.biz
class ProductExtractor():
def __init__(self):
pass
def product_extractor(self):
the_magic_regex_string = '<tr bgcolor="#FFFFFF">\r\n <td align="left"><font color="black" face="Arial, Helvetica"'+ \
' size="2"><a CLASS="anylink" href="([^\"]+)">([^<]+)</a></font></td>'
list_of_urls = {}
with open("/tmp/hills_urls.json", "rb") as urls:
list_of_urls = json.loads(urls.read())
dict_of_pages_to_check = {"urls": []}
for each_page in list_of_urls["urls"]:
response = urllib2.urlopen(each_page)
page_source = response.read()
m = re.finditer(the_magic_regex_string, page_source)
for each_group in m:
url = "%s%s" % (each_page, each_group.group(1))
print url
dict_of_pages_to_check["urls"].append(url)
with open("/tmp/pages_to_check.json", "w") as fp:
json.dump(dict_of_pages_to_check, fp, sort_keys=True, indent=4)
| mit | -4,489,230,412,158,461,400 | 34.205882 | 126 | 0.555556 | false |
bafana5/wKRApp | db/db_create.py | 1 | 1448 | from wKRApp.views import db
from wKRApp.models import Users
# create the database and the db tables
db.create_all()
# insert in Users
db.session.add(Users("123456",
"Brian",
"Nobody",
"[email protected]",
"0812345678",
"Admin",
"C4",
"RAD",
"Business",
"bnobody",
"123456",
"No",
"No"))
db.session.add(Users("451263",
"Person",
"Someone",
"[email protected]",
"0823456789",
"Supervisor",
"D4",
"RAD",
"Business",
"psomeone",
"654321",
"No",
"No"))
db.session.add(Users("987456",
"Test",
"Sometest",
"[email protected]",
"0823456789",
"User",
"C1",
"RAD",
"Business",
"tsometest",
"456789",
"No",
"No"))
# commit the changes
db.session.commit() | mit | -4,020,737,145,296,546,000 | 28.571429 | 46 | 0.301796 | false |
portnov/sverchok | nodes/vector/formula_deform.py | 1 | 2365 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
from math import *
from bpy.props import StringProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import (updateNode)
class SvFormulaDeformNode(bpy.types.Node, SverchCustomTreeNode):
''' Deform Verts by Math '''
bl_idname = 'SvFormulaDeformNode'
bl_label = 'Deform by formula'
bl_icon = 'OUTLINER_OB_EMPTY'
ModeX = StringProperty(name='formulaX', default='x', update=updateNode)
ModeY = StringProperty(name='formulaY', default='y', update=updateNode)
ModeZ = StringProperty(name='formulaZ', default='z', update=updateNode)
def sv_init(self, context):
self.inputs.new('VerticesSocket', 'Verts')
self.outputs.new('VerticesSocket', 'Verts')
def draw_buttons(self, context, layout):
for element in 'XYZ':
row = layout.row()
split = row.split(percentage=0.15)
split.label(element)
split.split().prop(self, "Mode"+element, text='')
def process(self):
Io = self.inputs[0]
Oo = self.outputs[0]
if Oo.is_linked:
out = []
V = Io.sv_get()
Value = "[("+self.ModeX+","+self.ModeY+","+self.ModeZ+") for (x, y, z),i in zip(L, I)]"
for L in V:
I = range(len(L))
out.append(eval(Value))
Oo.sv_set(out)
def update_socket(self, context):
self.update()
def register():
bpy.utils.register_class(SvFormulaDeformNode)
def unregister():
bpy.utils.unregister_class(SvFormulaDeformNode)
| gpl-3.0 | 7,887,637,199,607,015,000 | 33.779412 | 99 | 0.654123 | false |
bmerry/entropy | entropy/__init__.py | 1 | 1900 | # Entropy: pauses Rhythmbox when the play queue is finished
# Copyright (C) 2014 Bruce Merry <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
from gi.repository import GObject, RB, Peas
import gettext
gettext.install('rhythmbox', RB.locale_dir())
class EntropyPlugin(GObject.Object, Peas.Activatable):
object = GObject.property(type = GObject.Object)
def __init__(self):
super(EntropyPlugin, self).__init__()
def get_shell_player(self):
return self.object.props.shell_player
def song_changed(self, entry, user_data):
shell_player = self.get_shell_player()
if shell_player.props.playing and self.playing_from_queue and not shell_player.props.playing_from_queue:
shell_player.stop()
self.playing_from_queue = shell_player.props.playing_from_queue
def do_activate(self):
'''
Plugin activation
'''
shell_player = self.get_shell_player()
self.playing_from_queue = shell_player.props.playing_from_queue
self.song_changed_id = shell_player.connect('playing-song-changed', self.song_changed)
def do_deactivate(self):
shell_player = self.get_shell_player()
shell_player.disconnect(self.song_changed_id)
| gpl-3.0 | -4,271,626,382,035,553,300 | 37.77551 | 112 | 0.71 | false |
hale36/SRTV | sickbeard/providers/generic.py | 1 | 26955 | # coding=utf-8
# Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import datetime
import os
import re
import itertools
import urllib
from random import shuffle
from base64 import b16encode, b32decode
import requests
from hachoir_parser import createParser
import sickbeard
from sickbeard import helpers, classes, logger, db
from sickbeard.common import MULTI_EP_RESULT, SEASON_RESULT
from sickbeard import tvcache
from sickbeard.name_parser.parser import NameParser, InvalidNameException, InvalidShowException
from sickbeard.common import Quality
from sickbeard.common import user_agents
from sickrage.helper.encoding import ek
from sickrage.helper.exceptions import ex
from sickbeard import show_name_helpers
class GenericProvider:
NZB = "nzb"
TORRENT = "torrent"
def __init__(self, name):
# these need to be set in the subclass
self.providerType = None
self.name = name
self.proxy = ProviderProxy()
self.proxyGlypeProxySSLwarning = None
self.urls = {}
self.url = ''
self.public = False
self.show = None
self.supportsBacklog = False
self.supportsAbsoluteNumbering = False
self.anime_only = False
self.search_mode = None
self.search_fallback = False
self.enabled = False
self.enable_daily = False
self.enable_backlog = False
self.cache = tvcache.TVCache(self)
self.session = requests.Session()
shuffle(user_agents)
self.headers = {'User-Agent': user_agents[0]}
self.btCacheURLS = [
'http://torcache.net/torrent/{torrent_hash}.torrent',
'http://thetorrent.org/torrent/{torrent_hash}.torrent',
'http://btdig.com/torrent/{torrent_hash}.torrent',
# 'http://torrage.com/torrent/{torrent_hash}.torrent',
# 'http://itorrents.org/torrent/{torrent_hash}.torrent',
]
shuffle(self.btCacheURLS)
self.proper_strings = ['PROPER|REPACK']
def getID(self):
return GenericProvider.makeID(self.name)
@staticmethod
def makeID(name):
return re.sub(r"[^\w\d_]", "_", name.strip().lower())
def imageName(self):
return self.getID() + '.png'
def _checkAuth(self):
return True
def _doLogin(self):
return True
def isActive(self):
if self.providerType == GenericProvider.NZB and sickbeard.USE_NZBS:
return self.isEnabled()
elif self.providerType == GenericProvider.TORRENT and sickbeard.USE_TORRENTS:
return self.isEnabled()
else:
return False
def isEnabled(self):
"""
This should be overridden and should return the config setting eg. sickbeard.MYPROVIDER
"""
return False
def getResult(self, episodes):
"""
Returns a result of the correct type for this provider
"""
if self.providerType == GenericProvider.NZB:
result = classes.NZBSearchResult(episodes)
elif self.providerType == GenericProvider.TORRENT:
result = classes.TorrentSearchResult(episodes)
else:
result = classes.SearchResult(episodes)
result.provider = self
return result
def getURL(self, url, post_data=None, params=None, timeout=30, json=False):
"""
By default this is just a simple urlopen call but this method should be overridden
for providers with special URL requirements (like cookies)
"""
if self.proxy.isEnabled():
self.headers.update({'Referer': self.proxy.getProxyURL()})
self.proxyGlypeProxySSLwarning = self.proxy.getProxyURL() + 'includes/process.php?action=sslagree&submit=Continue anyway...'
else:
if 'Referer' in self.headers:
self.headers.pop('Referer')
self.proxyGlypeProxySSLwarning = None
return helpers.getURL(self.proxy._buildURL(url), post_data=post_data, params=params, headers=self.headers, timeout=timeout,
session=self.session, json=json, proxyGlypeProxySSLwarning=self.proxyGlypeProxySSLwarning)
def _makeURL(self, result):
urls = []
filename = u''
if result.url.startswith('magnet'):
try:
torrent_hash = re.findall(r'urn:btih:([\w]{32,40})', result.url)[0].upper()
try:
torrent_name = re.findall('dn=([^&]+)', result.url)[0]
except:
torrent_name = 'NO_DOWNLOAD_NAME'
if len(torrent_hash) == 32:
torrent_hash = b16encode(b32decode(torrent_hash)).upper()
if not torrent_hash:
logger.log("Unable to extract torrent hash from magnet: " + ex(result.url), logger.ERROR)
return urls, filename
urls = [x.format(torrent_hash=torrent_hash, torrent_name=torrent_name) for x in self.btCacheURLS]
except:
logger.log("Unable to extract torrent hash or name from magnet: " + ex(result.url), logger.ERROR)
return urls, filename
else:
urls = [result.url]
if self.providerType == GenericProvider.TORRENT:
filename = ek(os.path.join, sickbeard.TORRENT_DIR,
helpers.sanitizeFileName(result.name) + '.' + self.providerType)
elif self.providerType == GenericProvider.NZB:
filename = ek(os.path.join, sickbeard.NZB_DIR,
helpers.sanitizeFileName(result.name) + '.' + self.providerType)
return urls, filename
def downloadResult(self, result):
"""
Save the result to disk.
"""
# check for auth
if not self._doLogin():
return False
urls, filename = self._makeURL(result)
if self.proxy.isEnabled():
self.headers.update({'Referer': self.proxy.getProxyURL()})
elif 'Referer' in self.headers:
self.headers.pop('Referer')
for url in urls:
if 'NO_DOWNLOAD_NAME' in url:
continue
if not self.proxy.isEnabled() and url.startswith('http'):
# Let's just set a referer for every .torrent/.nzb, should work as a cover-all without side-effects
self.headers.update({'Referer': '/'.join(url.split('/')[:3]) + '/'})
logger.log(u"Downloading a result from " + self.name + " at " + url)
# Support for Jackett/TorzNab
if url.endswith(GenericProvider.TORRENT) and filename.endswith(GenericProvider.NZB):
filename = filename.rsplit('.', 1)[0] + '.' + GenericProvider.TORRENT
if helpers.download_file(self.proxy._buildURL(url), filename, session=self.session, headers=self.headers):
if self._verify_download(filename):
logger.log(u"Saved result to " + filename, logger.INFO)
return True
else:
logger.log(u"Could not download %s" % url, logger.WARNING)
helpers._remove_file_failed(filename)
if len(urls):
logger.log(u"Failed to download any results", logger.WARNING)
return False
def _verify_download(self, file_name=None):
"""
Checks the saved file to see if it was actually valid, if not then consider the download a failure.
"""
# primitive verification of torrents, just make sure we didn't get a text file or something
if file_name.endswith(GenericProvider.TORRENT):
try:
parser = createParser(file_name)
if parser:
mime_type = parser._getMimeType()
try:
parser.stream._input.close()
except:
pass
if mime_type == 'application/x-bittorrent':
return True
except Exception as e:
logger.log(u"Failed to validate torrent file: " + ex(e), logger.DEBUG)
logger.log(u"Result is not a valid torrent file", logger.DEBUG)
return False
return True
def searchRSS(self, episodes):
return self.cache.findNeededEpisodes(episodes)
def getQuality(self, item, anime=False):
"""
Figures out the quality of the given RSS item node
item: An elementtree.ElementTree element representing the <item> tag of the RSS feed
Returns a Quality value obtained from the node's data
"""
(title, url) = self._get_title_and_url(item)
quality = Quality.sceneQuality(title, anime)
return quality
def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0, epObj=None):
return []
def _get_season_search_strings(self, episode):
return []
def _get_episode_search_strings(self, eb_obj, add_string=''):
return []
def _get_title_and_url(self, item):
"""
Retrieves the title and URL data from the item XML node
item: An elementtree.ElementTree element representing the <item> tag of the RSS feed
Returns: A tuple containing two strings representing title and URL respectively
"""
title = item.get('title', '')
if title:
title = u'' + title.replace(' ', '.')
url = item.get('link', '')
if url:
url = url.replace('&', '&').replace('%26tr%3D', '&tr=')
return title, url
def _get_size(self, item):
"""Gets the size from the item"""
logger.log(u"Provider type doesn't have _get_size() implemented yet", logger.ERROR)
return -1
def findSearchResults(self, show, episodes, search_mode, manualSearch=False, downCurQuality=False):
self._checkAuth()
self.show = show
results = {}
itemList = []
searched_scene_season = None
for epObj in episodes:
# search cache for episode result
cacheResult = self.cache.searchCache(epObj, manualSearch, downCurQuality)
if cacheResult:
if epObj.episode not in results:
results[epObj.episode] = cacheResult
else:
results[epObj.episode].extend(cacheResult)
# found result, search next episode
continue
# skip if season already searched
if len(episodes) > 1 and search_mode == 'sponly' and searched_scene_season == epObj.scene_season:
continue
# mark season searched for season pack searches so we can skip later on
searched_scene_season = epObj.scene_season
search_strings = []
if len(episodes) > 1 and search_mode == 'sponly':
# get season search results
search_strings = self._get_season_search_strings(epObj)
elif search_mode == 'eponly':
# get single episode search results
search_strings = self._get_episode_search_strings(epObj)
first = search_strings and isinstance(search_strings[0], dict) and 'rid' in search_strings[0]
if first:
logger.log(u'First search_string has rid', logger.DEBUG)
for curString in search_strings:
itemList += self._doSearch(curString, search_mode, len(episodes), epObj=epObj)
if first:
first = False
if itemList:
logger.log(u'First search_string had rid, and returned results, skipping query by string', logger.DEBUG)
break
else:
logger.log(u'First search_string had rid, but returned no results, searching with string query', logger.DEBUG)
# if we found what we needed already from cache then return results and exit
if len(results) == len(episodes):
return results
# sort list by quality
if len(itemList):
items = {}
itemsUnknown = []
for item in itemList:
quality = self.getQuality(item, anime=show.is_anime)
if quality == Quality.UNKNOWN:
itemsUnknown += [item]
else:
if quality not in items:
items[quality] = [item]
else:
items[quality].append(item)
itemList = list(itertools.chain(*[v for (k, v) in sorted(items.iteritems(), reverse=True)]))
itemList += itemsUnknown if itemsUnknown else []
# filter results
cl = []
for item in itemList:
(title, url) = self._get_title_and_url(item)
# parse the file name
try:
myParser = NameParser(False)
parse_result = myParser.parse(title)
except InvalidNameException:
logger.log(u"Unable to parse the filename " + title + " into a valid episode", logger.DEBUG)
continue
except InvalidShowException:
logger.log(u"Unable to parse the filename " + title + " into a valid show", logger.DEBUG)
continue
showObj = parse_result.show
quality = parse_result.quality
release_group = parse_result.release_group
version = parse_result.version
addCacheEntry = False
if not (showObj.air_by_date or showObj.sports):
if search_mode == 'sponly':
if len(parse_result.episode_numbers):
logger.log(
u"This is supposed to be a season pack search but the result " + title + " is not a valid season pack, skipping it",
logger.DEBUG)
addCacheEntry = True
if len(parse_result.episode_numbers) and (
parse_result.season_number not in set([ep.season for ep in episodes]) or not [ep for ep in episodes if
ep.scene_episode in parse_result.episode_numbers]):
logger.log(
u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
logger.DEBUG)
addCacheEntry = True
else:
if not len(parse_result.episode_numbers) and parse_result.season_number and not [ep for ep in
episodes if
ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
logger.log(
u"The result " + title + " doesn't seem to be a valid season that we are trying to snatch, ignoring",
logger.DEBUG)
addCacheEntry = True
elif len(parse_result.episode_numbers) and not [ep for ep in episodes if
ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
logger.log(
u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
logger.DEBUG)
addCacheEntry = True
if not addCacheEntry:
# we just use the existing info for normal searches
actual_season = parse_result.season_number
actual_episodes = parse_result.episode_numbers
else:
if not parse_result.is_air_by_date:
logger.log(
u"This is supposed to be a date search but the result " + title + " didn't parse as one, skipping it",
logger.DEBUG)
addCacheEntry = True
else:
airdate = parse_result.air_date.toordinal()
myDB = db.DBConnection()
sql_results = myDB.select(
"SELECT season, episode FROM tv_episodes WHERE showid = ? AND airdate = ?",
[showObj.indexerid, airdate])
if len(sql_results) != 1:
logger.log(
u"Tried to look up the date for the episode " + title + " but the database didn't give proper results, skipping it",
logger.WARNING)
addCacheEntry = True
if not addCacheEntry:
actual_season = int(sql_results[0]["season"])
actual_episodes = [int(sql_results[0]["episode"])]
# add parsed result to cache for usage later on
if addCacheEntry:
logger.log(u"Adding item from search to cache: " + title, logger.DEBUG)
ci = self.cache._addCacheEntry(title, url, parse_result=parse_result)
if ci is not None:
cl.append(ci)
continue
# make sure we want the episode
wantEp = True
for epNo in actual_episodes:
if not showObj.wantEpisode(actual_season, epNo, quality, manualSearch, downCurQuality):
wantEp = False
break
if not wantEp:
logger.log(
u"Ignoring result " + title + " because we don't want an episode that is " +
Quality.qualityStrings[
quality], logger.INFO)
continue
logger.log(u"Found result " + title + " at " + url, logger.DEBUG)
# make a result object
epObj = []
for curEp in actual_episodes:
epObj.append(showObj.getEpisode(actual_season, curEp))
result = self.getResult(epObj)
result.show = showObj
result.url = url
result.name = title
result.quality = quality
result.release_group = release_group
result.version = version
result.content = None
result.size = self._get_size(item)
if len(epObj) == 1:
epNum = epObj[0].episode
logger.log(u"Single episode result.", logger.DEBUG)
elif len(epObj) > 1:
epNum = MULTI_EP_RESULT
logger.log(u"Separating multi-episode result to check for later - result contains episodes: " + str(
parse_result.episode_numbers), logger.DEBUG)
elif len(epObj) == 0:
epNum = SEASON_RESULT
logger.log(u"Separating full season result to check for later", logger.DEBUG)
if epNum not in results:
results[epNum] = [result]
else:
results[epNum].append(result)
# check if we have items to add to cache
if len(cl) > 0:
myDB = self.cache._getDB()
myDB.mass_action(cl)
return results
def findPropers(self, search_date=None):
results = self.cache.listPropers(search_date)
return [classes.Proper(x['name'], x['url'], datetime.datetime.fromtimestamp(x['time']), self.show) for x in
results]
def seedRatio(self):
'''
Provider should override this value if custom seed ratio enabled
It should return the value of the provider seed ratio
'''
return ''
class NZBProvider(GenericProvider):
def __init__(self, name):
GenericProvider.__init__(self, name)
self.providerType = GenericProvider.NZB
def _get_size(self, item):
try:
size = item.get('links')[1].get('length', -1)
except IndexError:
size = -1
if not size:
logger.log(u"Size was not found in your provider response", logger.DEBUG)
return int(size)
class TorrentProvider(GenericProvider):
def __init__(self, name):
GenericProvider.__init__(self, name)
self.providerType = GenericProvider.TORRENT
def _get_title_and_url(self, item):
from feedparser.feedparser import FeedParserDict
if isinstance(item, (dict, FeedParserDict)):
title = item.get('title', '')
download_url = item.get('url', '')
if not download_url:
download_url = item.get('link', '')
elif isinstance(item, (list, tuple)) and len(item) > 1:
title = item[0]
download_url = item[1]
# Temp global block `DIAMOND` releases
if title.endswith('DIAMOND'):
logger.log(u'Skipping DIAMOND release for mass fake releases.')
title = download_url = u'FAKERELEASE'
if title:
title = self._clean_title_from_provider(title)
if download_url:
download_url = download_url.replace('&', '&')
return (title, download_url)
def _get_size(self, item):
size = -1
if isinstance(item, dict):
size = item.get('size', -1)
elif isinstance(item, (list, tuple)) and len(item) > 2:
size = item[2]
# Make sure we didn't select seeds/leechers by accident
if not size or size < 1024*1024:
size = -1
return size
def _get_season_search_strings(self, ep_obj):
search_string = {'Season': []}
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
if ep_obj.show.air_by_date or ep_obj.show.sports:
ep_string = show_name + ' ' + str(ep_obj.airdate).split('-')[0]
elif ep_obj.show.anime:
ep_string = show_name + ' ' + "%d" % ep_obj.scene_absolute_number
else:
ep_string = show_name + ' S%02d' % int(ep_obj.scene_season) #1) showName.SXX
search_string['Season'].append(ep_string.encode('utf-8').strip())
return [search_string]
def _get_episode_search_strings(self, ep_obj, add_string=''):
search_string = {'Episode': []}
if not ep_obj:
return []
for show_name in set(show_name_helpers.allPossibleShowNames(ep_obj.show)):
ep_string = show_name + ' '
if ep_obj.show.air_by_date:
ep_string += str(ep_obj.airdate).replace('-', ' ')
elif ep_obj.show.sports:
ep_string += str(ep_obj.airdate).replace('-', ' ') + ('|', ' ')[len(self.proper_strings) > 1] + ep_obj.airdate.strftime('%b')
elif ep_obj.show.anime:
ep_string += "%02d" % int(ep_obj.scene_absolute_number)
else:
ep_string += sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
'episodenumber': ep_obj.scene_episode}
if add_string:
ep_string = ep_string + ' %s' % add_string
search_string['Episode'].append(ep_string.encode('utf-8').strip())
return [search_string]
def _clean_title_from_provider(self, title):
return (title or '').replace(' ', '.')
def findPropers(self, search_date=datetime.datetime.today()):
results = []
myDB = db.DBConnection()
sqlResults = myDB.select(
'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
' WHERE e.airdate >= ' + str(search_date.toordinal()) +
' AND e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED + Quality.SNATCHED + Quality.SNATCHED_BEST]) + ')'
)
for sqlshow in sqlResults or []:
show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"]))
if show:
curEp = show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
for term in self.proper_strings:
searchString = self._get_episode_search_strings(curEp, add_string=term)
for item in self._doSearch(searchString[0]):
title, url = self._get_title_and_url(item)
results.append(classes.Proper(title, url, datetime.datetime.today(), show))
return results
class ProviderProxy:
def __init__(self):
self.Type = 'GlypeProxy'
self.param = 'browse.php?u='
self.option = '&b=32&f=norefer'
self.enabled = False
self.url = None
self.urls = {
'getprivate.eu (NL)': 'http://getprivate.eu/',
'hideme.nl (NL)': 'http://hideme.nl/',
'proxite.eu (DE)': 'http://proxite.eu/',
'interproxy.net (EU)': 'http://interproxy.net/',
}
def isEnabled(self):
""" Return True if we Choose to call TPB via Proxy """
return self.enabled
def getProxyURL(self):
""" Return the Proxy URL Choosen via Provider Setting """
return str(self.url)
def _buildURL(self, url):
""" Return the Proxyfied URL of the page """
if self.isEnabled():
url = self.getProxyURL() + self.param + urllib.quote_plus(url.encode('UTF-8')) + self.option
logger.log(u"Proxified URL: " + url, logger.DEBUG)
return url
def _buildRE(self, regx):
""" Return the Proxyfied RE string """
if self.isEnabled():
regx = re.sub('//1', self.option, regx).replace('&', '&')
logger.log(u"Proxified REGEX: " + regx, logger.DEBUG)
else:
regx = re.sub('//1', '', regx)
return regx
| gpl-3.0 | -8,025,768,007,482,514,000 | 37.234043 | 189 | 0.555147 | false |
owlabs/incubator-airflow | airflow/contrib/operators/gcp_translate_speech_operator.py | 1 | 6372 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from google.protobuf.json_format import MessageToDict
from airflow import AirflowException
from airflow.contrib.hooks.gcp_speech_to_text_hook import GCPSpeechToTextHook
from airflow.contrib.hooks.gcp_translate_hook import CloudTranslateHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class GcpTranslateSpeechOperator(BaseOperator):
"""
Recognizes speech in audio input and translates it.
Note that it uses the first result from the recognition api response - the one with the highest confidence
In order to see other possible results please use
:ref:`howto/operator:GcpSpeechToTextRecognizeSpeechOperator`
and
:ref:`howto/operator:CloudTranslateTextOperator`
separately
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GcpTranslateSpeechOperator`
See https://cloud.google.com/translate/docs/translating-text
Execute method returns string object with the translation
This is a list of dictionaries queried value.
Dictionary typically contains three keys (though not
all will be present in all cases).
* ``detectedSourceLanguage``: The detected language (as an
ISO 639-1 language code) of the text.
* ``translatedText``: The translation of the text into the
target language.
* ``input``: The corresponding input value.
* ``model``: The model used to translate the text.
Dictionary is set as XCom return value.
:param audio: audio data to be recognized. See more:
https://googleapis.github.io/google-cloud-python/latest/speech/gapic/v1/types.html#google.cloud.speech_v1.types.RecognitionAudio
:type audio: dict or google.cloud.speech_v1.types.RecognitionAudio
:param config: information to the recognizer that specifies how to process the request. See more:
https://googleapis.github.io/google-cloud-python/latest/speech/gapic/v1/types.html#google.cloud.speech_v1.types.RecognitionConfig
:type config: dict or google.cloud.speech_v1.types.RecognitionConfig
:param target_language: The language to translate results into. This is required by the API and defaults
to the target language of the current instance.
Check the list of available languages here: https://cloud.google.com/translate/docs/languages
:type target_language: str
:param format_: (Optional) One of ``text`` or ``html``, to specify
if the input text is plain text or HTML.
:type format_: str or None
:param source_language: (Optional) The language of the text to
be translated.
:type source_language: str or None
:param model: (Optional) The model used to translate the text, such
as ``'base'`` or ``'nmt'``.
:type model: str or None
:param project_id: Optional, Google Cloud Platform Project ID where the Compute
Engine Instance exists. If set to None or missing, the default project_id from the GCP connection is
used.
:type project_id: str
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud
Platform. Defaults to 'google_cloud_default'.
:type gcp_conn_id: str
"""
# [START translate_speech_template_fields]
template_fields = ('target_language', 'format_', 'source_language', 'model', 'project_id', 'gcp_conn_id')
# [END translate_speech_template_fields]
@apply_defaults
def __init__(
self,
audio,
config,
target_language,
format_,
source_language,
model,
project_id=None,
gcp_conn_id='google_cloud_default',
*args,
**kwargs
):
super(GcpTranslateSpeechOperator, self).__init__(*args, **kwargs)
self.audio = audio
self.config = config
self.target_language = target_language
self.format_ = format_
self.source_language = source_language
self.model = model
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
_speech_to_text_hook = GCPSpeechToTextHook(gcp_conn_id=self.gcp_conn_id)
_translate_hook = CloudTranslateHook(gcp_conn_id=self.gcp_conn_id)
recognize_result = _speech_to_text_hook.recognize_speech(
config=self.config, audio=self.audio
)
recognize_dict = MessageToDict(recognize_result)
self.log.info("Recognition operation finished")
if len(recognize_dict['results']) == 0:
self.log.info("No recognition results")
return {}
self.log.debug("recognition result: %s", recognize_dict)
try:
transcript = recognize_dict['results'][0]['alternatives'][0]['transcript']
except KeyError as key:
raise AirflowException("Wrong response '{}' returned - it should contain {} field"
.format(recognize_dict, key))
try:
translation = _translate_hook.translate(
values=transcript,
target_language=self.target_language,
format_=self.format_,
source_language=self.source_language,
model=self.model
)
self.log.info('translated output: %s', translation)
return translation
except ValueError as e:
self.log.error('An error has been thrown from translate speech method:')
self.log.error(e)
raise AirflowException(e)
| apache-2.0 | 3,434,754,156,602,138,000 | 39.075472 | 137 | 0.679849 | false |
abhitrip/tensorflowPractice | conv_toy.py | 1 | 1318 | # import tensorflow as tf
# input = tf.placeholder(tf.float32,(None,32,32,3))
# filter_weights = tf.placeholder(tf.truncated_normal(8,8,3,20))
# filter_bias = tf.placeholder(tf.zeros(20))
# strides = [1,2,2,1] # (batch,height,width,depth)
# padding = 'SAME'
# conv = tf.nn.conv2d(input,filter_weights,strides,padding)+filter_bias
import tensorflow as tf
# output depth
k_output = 64
# Image properties
image_width = 10
image_height = 10
color_channels = 3
# Convolution filter
filter_size_width = 5
filter_size_height = 5
# Input/Image
input = tf.placeholder(tf.float32,shape=[None,image_height,image_width,
image_width,color_channels])
# Weight and bias
Weight = tf.Variable(tf.truncated_normal([filter_size_height,
filter_size_width,color_channels
,k_output]))
bias = tf.Variable(tf.zeros(k_output))
# Apply convolution
conv_layer = tf.nn.conv2d(input,Weight,strides=[1,2,2,1],padding='SAME')
# Add bias
conv_layer = tf.nn.bias_add(conv_layer,bias)
# Apply activation function
conv_layer = tf.nn.relu(conv_layer)
# Apply pooling
conv_layer = tf.nn.max_pool(
conv_layer,
ksize=[1,2,2,1],
strides=[1,2,2,1],
padding='SAME'
)
| mit | -6,546,808,897,618,189,000 | 27.042553 | 72 | 0.624431 | false |
codingforentrepreneurs/digital-marketplace | src/sellers/mixins.py | 1 | 1520 | import datetime
from django.db.models import Count, Min, Sum, Avg, Max
from billing.models import Transaction
from digitalmarket.mixins import LoginRequiredMixin
from products.models import Product
from .models import SellerAccount
class SellerAccountMixin(LoginRequiredMixin, object):
account = None
products = []
transactions = []
def get_account(self):
user = self.request.user
accounts = SellerAccount.objects.filter(user=user)
if accounts.exists() and accounts.count() == 1:
self.account = accounts.first()
return accounts.first()
return None
def get_products(self):
account = self.get_account()
products = Product.objects.filter(seller=account)
self.products = products
return products
def get_transactions(self):
products = self.get_products()
transactions = Transaction.objects.filter(product__in=products)
return transactions
def get_transactions_today(self):
today = datetime.date.today()
today_min = datetime.datetime.combine(today, datetime.time.min)
today_max = datetime.datetime.combine(today, datetime.time.max)
return self.get_transactions().filter(timestamp__range=(today_min, today_max))
def get_total_sales(self):
transactions = self.get_transactions().aggregate(Sum("price"), Avg("price"))
print transactions
total_sales = transactions["price__sum"]
return total_sales
def get_today_sales(self):
transactions = self.get_transactions_today().aggregate(Sum("price"))
total_sales = transactions["price__sum"]
return total_sales
| mit | 8,608,003,941,597,075,000 | 26.142857 | 80 | 0.748684 | false |
atlasapi/atlas-deer | atlas-api/src/main/python/generate-load-test-urls.py | 1 | 3768 | #!/usr/bin/env python
# ./generate-load-test-urls.py --number-of-urls=100 --atlas-url=stage.atlas.metabroadcast.com --target-host=host-to-test --api-key=api-key --source=pressassociation.com --num-channels-source=100 --num-channels=10 --platform=hkyn --start-date=2015-02-01 --end-date=2015-02-10
import argparse
import datetime
import dateutil.parser
import httplib
import json
import random
arg_parser = argparse.ArgumentParser(description='Generate URL for load testing')
arg_parser.add_argument('--number-of-urls', required=True, dest='n', type=int, metavar='n', help='Number of url to generate')
arg_parser.add_argument('--atlas-url', required=True, dest='atlas_url', metavar='atlas_url', help='Atlas host')
arg_parser.add_argument('--target-host', required=True, dest='target_host', metavar='target_host', help='Target host')
arg_parser.add_argument('--api-key', required=True, dest='api_key', metavar='api_key', help='Atlas API key')
arg_parser.add_argument('--num-channels-source', required=True, type=int, dest='num_channels_source', metavar='num_channels_source', help='Number of channels to choose from')
arg_parser.add_argument('--num-channels', required=True, type=int, dest='num_channels', metavar='num_channels', help='Number of channels to use in request')
arg_parser.add_argument('--platform', required=True, dest='platform', metavar='platform', help='platform')
arg_parser.add_argument('--source', required=True, metavar='source', help='source of the schedules to bootstrap')
arg_parser.add_argument('--start-date', required=True, metavar='start_date', help='Start date')
arg_parser.add_argument('--end-date', required=True, metavar='end_date', help='Start date')
args = arg_parser.parse_args()
args.start_date = dateutil.parser.parse(args.start_date)
args.end_date = dateutil.parser.parse(args.end_date)
class Atlas:
def __init__(self, host, port):
self.host = host
self.port = port
def get(self, resource):
conn = httplib.HTTPConnection(self.host, self.port)
request = "GET http://%s:%s%s" % (self.host, self.port, resource)
conn.request('GET', resource)
resp = conn.getresponse()
if not resp.status == 200:
if resp.status == 400:
print "request failed for %s: %s" % (resource, resp.reason)
if resp.status == 404:
print "resource %s doesn't appear to exist" % (resource)
if resp.status >= 500:
print "problem with %s? %s %s" % (self.host, resp.status, resp.reason)
resp.read()
conn.close()
sys.exit()
body = resp.read()
try:
response = json.loads(body)
except Exception as e:
print "couldn't decode response to %s: %s" % (request, e)
print body
sys.exit()
return (request, response)
atlas = Atlas(args.atlas_url, 80)
req, platform = atlas.get("/4/channel_groups/%s.json?key=%s&annotations=channels" % (args.platform, args.api_key))
def get_days(start,end):
ds = []
cur = start
while cur <= end:
ds.append(cur)
cur = cur + datetime.timedelta(1)
return ds
channels = map((lambda c: c['channel']['id']),platform['channel_group']['channels'][:args.num_channels_source])
days = get_days(args.start_date, args.end_date)
for x in range(0, args.n):
channels_string = ",".join(random.sample(channels, args.num_channels))
day = random.choice(days)
print "/4/schedules.json?id=%s&annotations=channel,content_detail&from=%s&to=%s&key=%s&source=%s" % (
# args.target_host,
channels_string,
day.isoformat(),
(day + datetime.timedelta(1)).isoformat(),
args.api_key,
args.source
)
| apache-2.0 | -617,069,677,216,576,400 | 41.818182 | 275 | 0.650743 | false |
Rhoana/butterfly | bfly/CoreLayer/AccessLayer/Websocket.py | 1 | 2717 | import yaml
import json
import logging as log
import tornado.websocket
from QueryLayer import InfoQuery
from RequestHandler import RequestHandler
from NDStore import get_config
websockets = []
class Websocket(tornado.websocket.WebSocketHandler):
INPUT = RequestHandler.INPUT
RUNTIME = RequestHandler.RUNTIME
OUTPUT = RequestHandler.OUTPUT
OPEN_API = [
'token',
'channel',
]
def initialize(self, _core, _db, _config, _root=''):
self.core = _core;
self.BFLY_CONFIG = _config
# Get keys for interface
error_key = self.RUNTIME.IMAGE.ERROR.NAME
format_key = self.INPUT.INFO.FORMAT.NAME
method_key = self.INPUT.METHODS.NAME
# Initializae empty query
self.query = InfoQuery(**{
method_key: 'websocket:restore',
format_key: 'json',
error_key: '',
})
def check_origin(self, origin):
# Allow anyone to send messages
return True
def open(self, request, **kwargs):
# Get the path keywords
args = request.split('/')
keywords = dict(zip(self.OPEN_API, args))
# Get path information from token
config = get_config(self.BFLY_CONFIG, keywords, True)
# Update the query with the parameters
self.query.update_keys(config)
# Get message from the core
content = self.core.get_info(self.query)
# Send welcome only via this websocket
self.write_message(content)
# Add to list
if self not in websockets:
websockets.append(self)
def on_close(self):
# Remove from list
if self in websockets:
websockets.remove(self)
def on_message(self, json_msg):
# Interpret the message
message = json.loads(json_msg)
# Get keys for interface
method_key = self.INPUT.METHODS.NAME
error_key = self.RUNTIME.IMAGE.ERROR.NAME
# Get current method
action_val = message.get('action', '')
method_val = 'websocket:{}'.format(action_val)
# Set the action from the message
self.query.update_keys({
method_key: method_val,
error_key: '',
})
# Log request
log_msg = {'Incoming Message': message}
log.warning(yaml.safe_dump(log_msg))
# Get reply from the core
reply = self.core.get_edits(self.query, message)
self.send(reply)
def send(self, message):
# Log response
log_msg = """Outgoing Broadcast:
{}""".format(message)
log.warning(log_msg)
# Send to all in list
for ws in websockets:
ws.write_message(message)
| mit | 5,140,380,614,656,622,000 | 28.532609 | 61 | 0.596982 | false |
kronenpj/python-for-android | ci/constants.py | 1 | 2655 | from enum import Enum
class TargetPython(Enum):
python2 = 0
python3crystax = 1
python3 = 2
# recipes that currently break the build
# a recipe could be broken for a target Python and not for the other,
# hence we're maintaining one list per Python target
BROKEN_RECIPES_PYTHON2 = set([
# pythonhelpers.h:12:18: fatal error: string: No such file or directory
'atom',
# https://github.com/kivy/python-for-android/issues/550
'audiostream',
'brokenrecipe',
'evdev',
# distutils.errors.DistutilsError
# Could not find suitable distribution for Requirement.parse('cython')
'ffpyplayer',
'flask',
'groestlcoin_hash',
'hostpython3crystax',
# https://github.com/kivy/python-for-android/issues/1354
'kiwisolver',
'libmysqlclient',
'libsecp256k1',
'libtribler',
'ndghttpsclient',
'm2crypto',
# ImportError: No module named setuptools
'netifaces',
'Pillow',
# depends on cffi that still seems to have compilation issues
'protobuf_cpp',
'xeddsa',
'x3dh',
'pynacl',
'doubleratchet',
'omemo',
# requires `libpq-dev` system dependency e.g. for `pg_config` binary
'psycopg2',
# most likely some setup in the Docker container, because it works in host
'pyjnius', 'pyopenal',
'pyproj',
'pysdl2',
'pyzmq',
'secp256k1',
'shapely',
# mpmath package with a version >= 0.19 required
'sympy',
'twisted',
'vlc',
'websocket-client',
'zeroconf',
'zope',
'matplotlib', # https://github.com/kivy/python-for-android/issues/1900
])
BROKEN_RECIPES_PYTHON3 = set([
'brokenrecipe',
# enum34 is not compatible with Python 3.6 standard library
# https://stackoverflow.com/a/45716067/185510
'enum34',
# build_dir = glob.glob('build/lib.*')[0]
# IndexError: list index out of range
'secp256k1',
'ffpyplayer',
'icu',
# requires `libpq-dev` system dependency e.g. for `pg_config` binary
'psycopg2',
'protobuf_cpp',
# most likely some setup in the Docker container, because it works in host
'pyjnius', 'pyopenal',
# SyntaxError: invalid syntax (Python2)
'storm',
# mpmath package with a version >= 0.19 required
'sympy',
'vlc',
'matplotlib', # https://github.com/kivy/python-for-android/issues/1900
])
BROKEN_RECIPES = {
TargetPython.python2: BROKEN_RECIPES_PYTHON2,
TargetPython.python3: BROKEN_RECIPES_PYTHON3,
}
# recipes that were already built will be skipped
CORE_RECIPES = set([
'pyjnius', 'kivy', 'openssl', 'requests', 'sqlite3', 'setuptools',
'numpy', 'android', 'python2', 'python3',
])
| mit | 1,589,547,520,252,471,800 | 27.858696 | 78 | 0.652354 | false |
soft-matter/slicerator | slicerator/_version.py | 1 | 18452 |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "slicerator/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| bsd-3-clause | 3,485,111,997,770,358,300 | 34.484615 | 79 | 0.575276 | false |
kquick/Thespian | thespian/test/test_requireCapability.py | 1 | 3504 | from thespian.actors import requireCapability
class TestUnitRequireCapability(object):
@requireCapability('asdf')
class req1: pass
def test_oneReq(self):
capcheck = TestUnitRequireCapability.req1.actorSystemCapabilityCheck
assert not capcheck({}, 0)
assert not capcheck({'asdf':False}, 0)
assert capcheck({'asdf':True}, 0)
assert capcheck({'asdf':True,'qwer':False}, 0)
assert capcheck({'asdf':True,'qwer':True}, 0)
assert capcheck({'qwer':False,'asdf':True}, 0)
assert capcheck({'qwer':True,'asdf':True}, 0)
assert not capcheck({'qwer':False,'asdf':False}, 0)
assert not capcheck({'qwer':True,'asdf':False}, 0)
def test_oneReq_no_value_spec_must_be_truthy(self):
capcheck = TestUnitRequireCapability.req1.actorSystemCapabilityCheck
assert capcheck({'asdf':'truthy string'}, 0)
assert capcheck({'asdf':1}, 0)
assert not capcheck({'asdf':''}, 0)
assert not capcheck({'asdf':0}, 0)
@requireCapability('asdf')
@requireCapability('qwer')
class req2: pass
def test_twoReq(self):
capcheck = TestUnitRequireCapability.req2.actorSystemCapabilityCheck
assert not capcheck({}, 0)
assert not capcheck({'asdf':False}, 0)
assert not capcheck({'asdf':True}, 0)
assert not capcheck({'asdf':True,'qwer':False}, 0)
assert capcheck({'asdf':True,'qwer':True}, 0)
assert not capcheck({'qwer':False,'asdf':True}, 0)
assert capcheck({'qwer':True,'asdf':True}, 0)
assert not capcheck({'qwer':False,'asdf':False}, 0)
assert not capcheck({'qwer':True,'asdf':False}, 0)
@requireCapability('qwer')
@requireCapability('asdf')
class req2rev: pass
def test_twoReqReverse(self):
capcheck = TestUnitRequireCapability.req2rev.actorSystemCapabilityCheck
assert not capcheck({}, 0)
assert not capcheck({'asdf':False}, 0)
assert not capcheck({'asdf':True}, 0)
assert not capcheck({'asdf':True,'qwer':False}, 0)
assert capcheck({'asdf':True,'qwer':True}, 0)
assert not capcheck({'qwer':False,'asdf':True}, 0)
assert capcheck({'qwer':True,'asdf':True}, 0)
assert not capcheck({'qwer':False,'asdf':False}, 0)
assert not capcheck({'qwer':True,'asdf':False}, 0)
@requireCapability('frog', 'ribbet')
class req3rev: pass
def test_threeReq(self):
check3 = TestUnitRequireCapability.req3rev.actorSystemCapabilityCheck
assert check3({'frog':'ribbet'}, 0)
assert not check3({'frog':'moo'}, 0)
assert not check3({'frog':True}, 0)
assert not check3({'frog':False}, 0)
assert not check3({'frog':1}, 0)
assert not check3({'frog':0}, 0)
assert not check3({'frog':None}, 0)
assert not check3({'Frog':'ribbet'}, 0)
class TestUnitRequireRequirements(object):
class req1:
@staticmethod
def actorSystemCapabilityCheck(cap, req):
return req.get('foo', 'bar') == 'woof'
def test_ActorReqs(self):
reqCheck = TestUnitRequireRequirements.req1.actorSystemCapabilityCheck
assert not reqCheck({}, {})
assert not reqCheck({}, {'foo':None})
assert not reqCheck({}, {'foo':True})
assert not reqCheck({}, {'foo':'boo'})
assert not reqCheck({}, {'dog':'woof'})
assert reqCheck({}, {'foo':'woof'})
assert reqCheck({}, {'foo':'woof', 'bar':'foo'})
| mit | 4,632,017,876,383,767,000 | 37.505495 | 79 | 0.616438 | false |
NarlikarLab/DIVERSITY | plotFigures.py | 1 | 2017 | ##################### DIVERSITY #####################
# DIVERSITY is a tool to explore multiple ways of protein-DNA
# binding in the genome. More information can be found in the README file.
# Copyright (C) 2015 Sneha Mitra, Anushua Biswas and Leelavati Narlikar
# DIVERSITY is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# DIVERSITY is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
######################################################
# Plot likelihood values
import os
from config import *
# plot a single likelihood file
def plotSingleFile(d, dirname):
f1 = dirname + "/" + likelihoodFile
f2 = dirname + "/" + likelihoodPlotFile
os.system("gnuplot -e 'filename=\"" + f1 + "\"; var=\"" + f2 + "\"' " + d['-v'])
os.system("rm " + f1)
# plot likelihood for all modes in different files
def plotLikelihood(d):
for i in range(d['-minMode'], d['-maxMode'] + 1):
f1 = d['-o'][1] + "/" + modeDir.format(str(i)) + "/" + likelihoodFile
f2 = d['-o'][1] + "/" + modeDir.format(str(i)) + "/" + likelihoodPlotFile
os.system("gnuplot -e 'filename=\"" + f1 + "\"; var=\"" + f2 + "\"' " + d['-v'])
os.system("rm " + f1)
def plotLikelihoodMode(d, mode):
f1 = d['-o'][1] + "/" + modeDir.format(str(mode)) + "/" + likelihoodFile
f2 = d['-o'][1] + "/" + modeDir.format(str(mode)) + "/" + likelihoodPlotFile
os.system("gnuplot -e 'filename=\"" + f1 + "\"; var=\"" + f2 + "\"' " + d['-v'])
os.system("rm " + f1)
| gpl-3.0 | 5,421,648,628,846,587,000 | 41.914894 | 88 | 0.597422 | false |
75651/kbengine_cloud | demo/res/scripts/client/interfaces/State.py | 1 | 1642 | # -*- coding: utf-8 -*-
#
"""
"""
import json
import GlobalDefine
from KBEDebug import *
class State:
"""
"""
def __init__(self):
"""
"""
pass
# ----------------------------------------------------------------
# public
# ----------------------------------------------------------------
def getState(self):
return self.state
def isState(self, state):
return self.state == state
def isForbid(self, forbid):
"""
scdefine.FORBID_***
"""
return self.forbids & forbid
# ----------------------------------------------------------------
# callback
# ----------------------------------------------------------------
def onStateChanged_(self, oldState, newState):
"""
virtual method.
"""
# 通知表现层改变表现
KBEngine.fireEvent("set_state", json.dumps((self.id, newState)))
def onForbidChanged_(self, oldForbids, newForbids):
"""
virtual method.
"""
pass
# ----------------------------------------------------------------
# property method
# ----------------------------------------------------------------
def set_state(self, oldValue):
DEBUG_MSG("%s::set_state: %i changed:%s->%s" % (self.getScriptName(), self.id, oldValue, self.state))
self.onStateChanged_(oldValue, self.state)
def set_effStates(self, oldValue):
DEBUG_MSG("%s::set_effStates: %i changed:%s->%s" % (self.getScriptName(), self.id, oldValue, self.effStates))
self.onEffectStateChanged_(oldValue, self.effStates)
def set_forbids(self, oldValue):
DEBUG_MSG("%s::set_forbids: %i changed:%s->%s" % (self.getScriptName(), self.id, oldValue, self.forbids))
self.onForbidChanged_(oldValue, self.forbids)
| lgpl-3.0 | 6,644,067,555,815,716,000 | 25.622951 | 111 | 0.497537 | false |
adzanette/scf-extractor | scf-extractor/lib/peewee.py | 1 | 67648 | # (\
# ( \ /(o)\ caw!
# ( \/ ()/ /)
# ( `;.))'".)
# `(/////.-'
# =====))=))===()
# ///'
# //
# '
from __future__ import with_statement
import datetime
import decimal
import logging
import operator
import os
import re
import threading
import time
from collections import deque, namedtuple
from copy import deepcopy
__all__ = [
'IntegerField', 'BigIntegerField', 'PrimaryKeyField', 'FloatField', 'DoubleField',
'DecimalField', 'CharField', 'TextField', 'DateTimeField', 'DateField', 'TimeField',
'BooleanField', 'ForeignKeyField', 'Model', 'DoesNotExist', 'ImproperlyConfigured',
'DQ', 'fn', 'SqliteDatabase', 'MySQLDatabase', 'PostgresqlDatabase', 'Field',
'JOIN_LEFT_OUTER', 'JOIN_INNER', 'JOIN_FULL',
]
try:
import sqlite3
except ImportError:
sqlite3 = None
try:
import psycopg2
except ImportError:
psycopg2 = None
try:
import MySQLdb as mysql
except ImportError:
try:
import pymysql as mysql
except ImportError:
mysql = None
class ImproperlyConfigured(Exception):
pass
if sqlite3 is None and psycopg2 is None and mysql is None:
raise ImproperlyConfigured('Either sqlite3, psycopg2 or MySQLdb must be installed')
if sqlite3:
sqlite3.register_adapter(decimal.Decimal, str)
sqlite3.register_adapter(datetime.date, str)
sqlite3.register_adapter(datetime.time, str)
sqlite3.register_converter('decimal', lambda v: decimal.Decimal(v))
if psycopg2:
import psycopg2.extensions
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
logger = logging.getLogger('peewee')
OP_AND = 0
OP_OR = 1
OP_ADD = 10
OP_SUB = 11
OP_MUL = 12
OP_DIV = 13
OP_AND = 14
OP_OR = 15
OP_XOR = 16
OP_USER = 19
OP_EQ = 20
OP_LT = 21
OP_LTE = 22
OP_GT = 23
OP_GTE = 24
OP_NE = 25
OP_IN = 26
OP_IS = 27
OP_LIKE = 28
OP_ILIKE = 29
DJANGO_MAP = {
'eq': OP_EQ,
'lt': OP_LT,
'lte': OP_LTE,
'gt': OP_GT,
'gte': OP_GTE,
'ne': OP_NE,
'in': OP_IN,
'is': OP_IS,
'like': OP_LIKE,
'ilike': OP_ILIKE,
}
JOIN_INNER = 1
JOIN_LEFT_OUTER = 2
JOIN_FULL = 3
def dict_update(orig, extra):
new = {}
new.update(orig)
new.update(extra)
return new
class Leaf(object):
def __init__(self):
self.negated = False
self._alias = None
def __invert__(self):
self.negated = not self.negated
return self
def alias(self, a):
self._alias = a
return self
def asc(self):
return Ordering(self, True)
def desc(self):
return Ordering(self, False)
def _e(op, inv=False):
def inner(self, rhs):
if inv:
return Expr(rhs, op, self)
return Expr(self, op, rhs)
return inner
__and__ = _e(OP_AND)
__or__ = _e(OP_OR)
__add__ = _e(OP_ADD)
__sub__ = _e(OP_SUB)
__mul__ = _e(OP_MUL)
__div__ = _e(OP_DIV)
__xor__ = _e(OP_XOR)
__radd__ = _e(OP_ADD, inv=True)
__rsub__ = _e(OP_SUB, inv=True)
__rmul__ = _e(OP_MUL, inv=True)
__rdiv__ = _e(OP_DIV, inv=True)
__rand__ = _e(OP_AND, inv=True)
__ror__ = _e(OP_OR, inv=True)
__rxor__ = _e(OP_XOR, inv=True)
__eq__ = _e(OP_EQ)
__lt__ = _e(OP_LT)
__le__ = _e(OP_LTE)
__gt__ = _e(OP_GT)
__ge__ = _e(OP_GTE)
__ne__ = _e(OP_NE)
__lshift__ = _e(OP_IN)
__rshift__ = _e(OP_IS)
__mod__ = _e(OP_LIKE)
__pow__ = _e(OP_ILIKE)
class Expr(Leaf):
def __init__(self, lhs, op, rhs, negated=False):
super(Expr, self).__init__()
self.lhs = lhs
self.op = op
self.rhs = rhs
self.negated = negated
def clone(self):
return Expr(self.lhs, self.op, self.rhs, self.negated)
class DQ(Leaf):
def __init__(self, **query):
super(DQ, self).__init__()
self.query = query
def clone(self):
return DQ(**self.query)
class Param(Leaf):
def __init__(self, data):
self.data = data
super(Param, self).__init__()
class Func(Leaf):
def __init__(self, name, *params):
self.name = name
self.params = params
super(Func, self).__init__()
def clone(self):
return Func(self.name, *self.params)
def __getattr__(self, attr):
def dec(*args, **kwargs):
return Func(attr, *args, **kwargs)
return dec
fn = Func(None)
class FieldDescriptor(object):
def __init__(self, field):
self.field = field
self.att_name = self.field.name
def __get__(self, instance, instance_type=None):
if instance:
return instance._data.get(self.att_name)
return self.field
def __set__(self, instance, value):
instance._data[self.att_name] = value
Ordering = namedtuple('Ordering', ('param', 'asc'))
R = namedtuple('R', ('value',))
class Field(Leaf):
_field_counter = 0
_order = 0
db_field = 'unknown'
template = '%(column_type)s'
def __init__(self, null=False, index=False, unique=False, verbose_name=None,
help_text=None, db_column=None, default=None, choices=None,
primary_key=False, sequence=None, *args, **kwargs):
self.null = null
self.index = index
self.unique = unique
self.verbose_name = verbose_name
self.help_text = help_text
self.db_column = db_column
self.default = default
self.choices = choices
self.primary_key = primary_key
self.sequence = sequence
self.attributes = self.field_attributes()
self.attributes.update(kwargs)
Field._field_counter += 1
self._order = Field._field_counter
super(Field, self).__init__()
def add_to_class(self, model_class, name):
self.name = name
self.model_class = model_class
self.db_column = self.db_column or self.name
self.verbose_name = self.verbose_name or re.sub('_+', ' ', name).title()
model_class._meta.fields[self.name] = self
model_class._meta.columns[self.db_column] = self
setattr(model_class, name, FieldDescriptor(self))
def field_attributes(self):
return {}
def get_db_field(self):
return self.db_field
def coerce(self, value):
return value
def db_value(self, value):
return value if value is None else self.coerce(value)
def python_value(self, value):
return value if value is None else self.coerce(value)
class IntegerField(Field):
db_field = 'int'
def coerce(self, value):
return int(value)
class BigIntegerField(IntegerField):
db_field = 'bigint'
class PrimaryKeyField(IntegerField):
db_field = 'primary_key'
def __init__(self, *args, **kwargs):
kwargs['primary_key'] = True
super(PrimaryKeyField, self).__init__(*args, **kwargs)
class FloatField(Field):
db_field = 'float'
def coerce(self, value):
return float(value)
class DoubleField(FloatField):
db_field = 'double'
class DecimalField(Field):
db_field = 'decimal'
template = '%(column_type)s(%(max_digits)d, %(decimal_places)d)'
def field_attributes(self):
return {
'max_digits': 10,
'decimal_places': 5,
'auto_round': False,
'rounding': decimal.DefaultContext.rounding,
}
def db_value(self, value):
D = decimal.Decimal
if not value:
return value if value is None else D(0)
if self.attributes['auto_round']:
exp = D(10)**(-self.attributes['decimal_places'])
return D(str(value)).quantize(exp, rounding=self.attributes['rounding'])
return value
def python_value(self, value):
if value is not None:
if isinstance(value, decimal.Decimal):
return value
return decimal.Decimal(str(value))
def format_unicode(s, encoding='utf-8'):
if isinstance(s, unicode):
return s
elif isinstance(s, basestring):
return s.decode(encoding)
elif hasattr(s, '__unicode__'):
return s.__unicode__()
else:
return unicode(bytes(s), encoding)
class CharField(Field):
db_field = 'string'
template = '%(column_type)s(%(max_length)s)'
def field_attributes(self):
return {'max_length': 255}
def coerce(self, value):
value = format_unicode(value or '')
return value[:self.attributes['max_length']]
class TextField(Field):
db_field = 'text'
def coerce(self, value):
return format_unicode(value or '')
def format_date_time(value, formats, post_process=None):
post_process = post_process or (lambda x: x)
for fmt in formats:
try:
return post_process(datetime.datetime.strptime(value, fmt))
except ValueError:
pass
return value
class DateTimeField(Field):
db_field = 'datetime'
def field_attributes(self):
return {
'formats': [
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d',
]
}
def python_value(self, value):
if value and isinstance(value, basestring):
return format_date_time(value, self.attributes['formats'])
return value
class DateField(Field):
db_field = 'date'
def field_attributes(self):
return {
'formats': [
'%Y-%m-%d',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
]
}
def python_value(self, value):
if value and isinstance(value, basestring):
pp = lambda x: x.date()
return format_date_time(value, self.attributes['formats'], pp)
elif value and isinstance(value, datetime.datetime):
return value.date()
return value
class TimeField(Field):
db_field = 'time'
def field_attributes(self):
return {
'formats': [
'%H:%M:%S.%f',
'%H:%M:%S',
'%H:%M',
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M:%S',
]
}
def python_value(self, value):
if value and isinstance(value, basestring):
pp = lambda x: x.time()
return format_date_time(value, self.attributes['formats'], pp)
elif value and isinstance(value, datetime.datetime):
return value.time()
return value
class BooleanField(Field):
db_field = 'bool'
def coerce(self, value):
return bool(value)
class RelationDescriptor(FieldDescriptor):
def __init__(self, field, rel_model):
self.rel_model = rel_model
super(RelationDescriptor, self).__init__(field)
def get_object_or_id(self, instance):
rel_id = instance._data.get(self.att_name)
if rel_id is not None or self.att_name in instance._obj_cache:
if self.att_name not in instance._obj_cache:
obj = self.rel_model.get(self.rel_model._meta.primary_key==rel_id)
instance._obj_cache[self.att_name] = obj
return instance._obj_cache[self.att_name]
elif not self.field.null:
raise self.rel_model.DoesNotExist
return rel_id
def __get__(self, instance, instance_type=None):
if instance:
return self.get_object_or_id(instance)
return self.field
def __set__(self, instance, value):
if isinstance(value, self.rel_model):
instance._data[self.att_name] = value.get_id()
instance._obj_cache[self.att_name] = value
else:
instance._data[self.att_name] = value
class ReverseRelationDescriptor(object):
def __init__(self, field):
self.field = field
self.rel_model = field.model_class
def __get__(self, instance, instance_type=None):
if instance:
return self.rel_model.select().where(self.field==instance.get_id())
return self
class ForeignKeyField(IntegerField):
def __init__(self, rel_model, null=False, related_name=None, cascade=False, extra=None, *args, **kwargs):
self.rel_model = rel_model
self._related_name = related_name
self.cascade = cascade
self.extra = extra
kwargs.update(dict(
cascade='ON DELETE CASCADE' if self.cascade else '',
extra=extra or '',
))
super(ForeignKeyField, self).__init__(null=null, *args, **kwargs)
def add_to_class(self, model_class, name):
self.name = name
self.model_class = model_class
self.db_column = self.db_column or '%s_id' % self.name
self.verbose_name = self.verbose_name or re.sub('_+', ' ', name).title()
model_class._meta.fields[self.name] = self
model_class._meta.columns[self.db_column] = self
self.related_name = self._related_name or '%s_set' % (model_class._meta.name)
if self.rel_model == 'self':
self.rel_model = self.model_class
if self.related_name in self.rel_model._meta.fields:
raise AttributeError('Foreign key: %s.%s related name "%s" collision with field of same name' % (
self.model_class._meta.name, self.name, self.related_name))
setattr(model_class, name, RelationDescriptor(self, self.rel_model))
setattr(self.rel_model, self.related_name, ReverseRelationDescriptor(self))
model_class._meta.rel[self.name] = self
self.rel_model._meta.reverse_rel[self.related_name] = self
def get_db_field(self):
to_pk = self.rel_model._meta.primary_key
if not isinstance(to_pk, PrimaryKeyField):
return to_pk.get_db_field()
return super(ForeignKeyField, self).get_db_field()
def coerce(self, value):
return self.rel_model._meta.primary_key.coerce(value)
def db_value(self, value):
if isinstance(value, self.rel_model):
value = value.get_id()
return self.rel_model._meta.primary_key.db_value(value)
class QueryCompiler(object):
field_map = {
'int': 'INTEGER',
'bigint': 'INTEGER',
'float': 'REAL',
'double': 'REAL',
'decimal': 'DECIMAL',
'string': 'VARCHAR',
'text': 'TEXT',
'datetime': 'DATETIME',
'date': 'DATE',
'time': 'TIME',
'bool': 'SMALLINT',
'primary_key': 'INTEGER',
}
op_map = {
OP_EQ: '=',
OP_LT: '<',
OP_LTE: '<=',
OP_GT: '>',
OP_GTE: '>=',
OP_NE: '!=',
OP_IN: 'IN',
OP_IS: 'IS',
OP_LIKE: 'LIKE',
OP_ILIKE: 'ILIKE',
OP_ADD: '+',
OP_SUB: '-',
OP_MUL: '*',
OP_DIV: '/',
OP_XOR: '^',
OP_AND: 'AND',
OP_OR: 'OR',
}
join_map = {
JOIN_INNER: 'INNER',
JOIN_LEFT_OUTER: 'LEFT OUTER',
JOIN_FULL: 'FULL',
}
def __init__(self, quote_char='"', interpolation='?', field_overrides=None,
op_overrides=None):
self.quote_char = quote_char
self.interpolation = interpolation
self._field_map = dict_update(self.field_map, field_overrides or {})
self._op_map = dict_update(self.op_map, op_overrides or {})
def quote(self, s):
return ''.join((self.quote_char, s, self.quote_char))
def get_field(self, f):
return self._field_map[f]
def get_op(self, q):
return self._op_map[q]
def _max_alias(self, am):
max_alias = 0
if am:
for a in am.values():
i = int(a.lstrip('t'))
if i > max_alias:
max_alias = i
return max_alias + 1
def parse_expr(self, expr, alias_map=None):
s = self.interpolation
p = [expr]
if isinstance(expr, Expr):
lhs, lparams = self.parse_expr(expr.lhs, alias_map)
rhs, rparams = self.parse_expr(expr.rhs, alias_map)
s = '(%s %s %s)' % (lhs, self.get_op(expr.op), rhs)
p = lparams + rparams
elif isinstance(expr, Field):
s = self.quote(expr.db_column)
if alias_map and expr.model_class in alias_map:
s = '.'.join((alias_map[expr.model_class], s))
p = []
elif isinstance(expr, Func):
p = []
exprs = []
for param in expr.params:
parsed, params = self.parse_expr(param, alias_map)
exprs.append(parsed)
p.extend(params)
s = '%s(%s)' % (expr.name, ', '.join(exprs))
elif isinstance(expr, Param):
s = self.interpolation
p = [expr.data]
elif isinstance(expr, Ordering):
s, p = self.parse_expr(expr.param, alias_map)
s += ' ASC' if expr.asc else ' DESC'
elif isinstance(expr, R):
s = expr.value
p = []
elif isinstance(expr, SelectQuery):
max_alias = self._max_alias(alias_map)
clone = expr.clone()
if not expr._explicit_selection:
clone._select = (clone.model_class._meta.primary_key,)
subselect, p = self.parse_select_query(clone, max_alias, alias_map)
s = '(%s)' % subselect
elif isinstance(expr, (list, tuple)):
exprs = []
p = []
for i in expr:
e, v = self.parse_expr(i, alias_map)
exprs.append(e)
p.extend(v)
s = '(%s)' % ','.join(exprs)
elif isinstance(expr, Model):
s = self.interpolation
p = [expr.get_id()]
if isinstance(expr, Leaf):
if expr.negated:
s = 'NOT %s' % s
if expr._alias:
s = ' '.join((s, 'AS', expr._alias))
return s, p
def parse_query_node(self, qnode, alias_map):
if qnode is not None:
return self.parse_expr(qnode, alias_map)
return '', []
def parse_joins(self, joins, model_class, alias_map):
parsed = []
seen = set()
def _traverse(curr):
if curr not in joins or curr in seen:
return
seen.add(curr)
for join in joins[curr]:
from_model = curr
to_model = join.model_class
field = from_model._meta.rel_for_model(to_model, join.on)
if field:
left_field = field.db_column
right_field = to_model._meta.primary_key.db_column
else:
field = to_model._meta.rel_for_model(from_model, join.on)
left_field = from_model._meta.primary_key.db_column
right_field = field.db_column
join_type = join.join_type or JOIN_INNER
lhs = '%s.%s' % (alias_map[from_model], self.quote(left_field))
rhs = '%s.%s' % (alias_map[to_model], self.quote(right_field))
parsed.append('%s JOIN %s AS %s ON %s = %s' % (
self.join_map[join_type],
self.quote(to_model._meta.db_table),
alias_map[to_model],
lhs,
rhs,
))
_traverse(to_model)
_traverse(model_class)
return parsed
def parse_expr_list(self, s, alias_map):
parsed = []
data = []
for expr in s:
expr_str, vars = self.parse_expr(expr, alias_map)
parsed.append(expr_str)
data.extend(vars)
return ', '.join(parsed), data
def calculate_alias_map(self, query, start=1):
alias_map = {query.model_class: 't%s' % start}
for model, joins in query._joins.items():
if model not in alias_map:
start += 1
alias_map[model] = 't%s' % start
for join in joins:
if join.model_class not in alias_map:
start += 1
alias_map[join.model_class] = 't%s' % start
return alias_map
def parse_select_query(self, query, start=1, alias_map=None):
model = query.model_class
db = model._meta.database
alias_map = alias_map or {}
alias_map.update(self.calculate_alias_map(query, start))
parts = ['SELECT']
params = []
if query._distinct:
parts.append('DISTINCT')
selection = query._select
select, s_params = self.parse_expr_list(selection, alias_map)
parts.append(select)
params.extend(s_params)
parts.append('FROM %s AS %s' % (self.quote(model._meta.db_table), alias_map[model]))
joins = self.parse_joins(query._joins, query.model_class, alias_map)
if joins:
parts.append(' '.join(joins))
where, w_params = self.parse_query_node(query._where, alias_map)
if where:
parts.append('WHERE %s' % where)
params.extend(w_params)
if query._group_by:
group_by, g_params = self.parse_expr_list(query._group_by, alias_map)
parts.append('GROUP BY %s' % group_by)
params.extend(g_params)
if query._having:
having, h_params = self.parse_query_node(query._having, alias_map)
parts.append('HAVING %s' % having)
params.extend(h_params)
if query._order_by:
order_by, _ = self.parse_expr_list(query._order_by, alias_map)
parts.append('ORDER BY %s' % order_by)
if query._limit or (query._offset and not db.empty_limit):
limit = query._limit or -1
parts.append('LIMIT %s' % limit)
if query._offset:
parts.append('OFFSET %s' % query._offset)
if query._for_update:
parts.append('FOR UPDATE')
return ' '.join(parts), params
def _parse_field_dictionary(self, d):
sets, params = [], []
for field, expr in d.items():
field_str, _ = self.parse_expr(field)
val_str, val_params = self.parse_expr(expr)
val_params = [field.db_value(vp) for vp in val_params]
sets.append((field_str, val_str))
params.extend(val_params)
return sets, params
def parse_update_query(self, query):
model = query.model_class
parts = ['UPDATE %s SET' % self.quote(model._meta.db_table)]
sets, params = self._parse_field_dictionary(query._update)
parts.append(', '.join('%s=%s' % (f, v) for f, v in sets))
where, w_params = self.parse_query_node(query._where, None)
if where:
parts.append('WHERE %s' % where)
params.extend(w_params)
return ' '.join(parts), params
def parse_insert_query(self, query):
model = query.model_class
parts = ['INSERT INTO %s' % self.quote(model._meta.db_table)]
sets, params = self._parse_field_dictionary(query._insert)
parts.append('(%s)' % ', '.join(s[0] for s in sets))
parts.append('VALUES (%s)' % ', '.join(s[1] for s in sets))
return ' '.join(parts), params
def parse_delete_query(self, query):
model = query.model_class
parts = ['DELETE FROM %s' % self.quote(model._meta.db_table)]
params = []
where, w_params = self.parse_query_node(query._where, None)
if where:
parts.append('WHERE %s' % where)
params.extend(w_params)
return ' '.join(parts), params
def field_sql(self, field):
attrs = field.attributes
attrs['column_type'] = self.get_field(field.get_db_field())
template = field.template
if isinstance(field, ForeignKeyField):
to_pk = field.rel_model._meta.primary_key
if not isinstance(to_pk, PrimaryKeyField):
template = to_pk.template
attrs.update(to_pk.attributes)
parts = [self.quote(field.db_column), template]
if not field.null:
parts.append('NOT NULL')
if field.primary_key:
parts.append('PRIMARY KEY')
if isinstance(field, ForeignKeyField):
ref_mc = (
self.quote(field.rel_model._meta.db_table),
self.quote(field.rel_model._meta.primary_key.db_column),
)
parts.append('REFERENCES %s (%s)' % ref_mc)
parts.append('%(cascade)s%(extra)s')
elif field.sequence:
parts.append("DEFAULT NEXTVAL('%s')" % self.quote(field.sequence))
return ' '.join(p % attrs for p in parts)
def parse_create_table(self, model_class, safe=False):
parts = ['CREATE TABLE']
if safe:
parts.append('IF NOT EXISTS')
parts.append(self.quote(model_class._meta.db_table))
columns = ', '.join(self.field_sql(f) for f in model_class._meta.get_fields())
parts.append('(%s)' % columns)
return parts
def create_table(self, model_class, safe=False):
return ' '.join(self.parse_create_table(model_class, safe))
def drop_table(self, model_class, fail_silently=False, cascade=False):
parts = ['DROP TABLE']
if fail_silently:
parts.append('IF EXISTS')
parts.append(self.quote(model_class._meta.db_table))
if cascade:
parts.append('CASCADE')
return ' '.join(parts)
def parse_create_index(self, model_class, fields, unique):
tbl_name = model_class._meta.db_table
colnames = [f.db_column for f in fields]
parts = ['CREATE %s' % ('UNIQUE INDEX' if unique else 'INDEX')]
parts.append(self.quote('%s_%s' % (tbl_name, '_'.join(colnames))))
parts.append('ON %s' % self.quote(tbl_name))
parts.append('(%s)' % ', '.join(map(self.quote, colnames)))
return parts
def create_index(self, model_class, fields, unique):
return ' '.join(self.parse_create_index(model_class, fields, unique))
def create_sequence(self, sequence_name):
return 'CREATE SEQUENCE %s;' % self.quote(sequence_name)
def drop_sequence(self, sequence_name):
return 'DROP SEQUENCE %s;' % self.quote(sequence_name)
class QueryResultWrapper(object):
"""
Provides an iterator over the results of a raw Query, additionally doing
two things:
- converts rows from the database into model instances
- ensures that multiple iterations do not result in multiple queries
"""
def __init__(self, model, cursor, meta=None):
self.model = model
self.cursor = cursor
self.naive = not meta
if self.naive:
cols = []
non_cols = []
for i in range(len(self.cursor.description)):
col = self.cursor.description[i][0]
if col in model._meta.columns:
cols.append((i, model._meta.columns[col]))
else:
non_cols.append((i, col))
self._cols = cols
self._non_cols = non_cols
else:
self.column_meta, self.join_meta = meta
self.__ct = 0
self.__idx = 0
self._result_cache = []
self._populated = False
def simple_iter(self, row):
instance = self.model()
for i, f in self._cols:
setattr(instance, f.name, f.python_value(row[i]))
for i, f in self._non_cols:
setattr(instance, f, row[i])
return instance
def construct_instance(self, row):
# we have columns, models, and a graph of joins to reconstruct
collected_models = {}
cols = [c[0] for c in self.cursor.description]
for i, expr in enumerate(self.column_meta):
value = row[i]
if isinstance(expr, Field):
model = expr.model_class
else:
model = self.model
if model not in collected_models:
collected_models[model] = model()
instance = collected_models[model]
if isinstance(expr, Field):
setattr(instance, expr.name, expr.python_value(value))
elif isinstance(expr, Expr) and expr._alias:
setattr(instance, expr._alias, value)
else:
setattr(instance, cols[i], value)
return self.follow_joins(self.join_meta, collected_models, self.model)
def follow_joins(self, joins, collected_models, current):
inst = collected_models[current]
if current not in joins:
return inst
for joined_model, _, _ in joins[current]:
if joined_model in collected_models:
joined_inst = self.follow_joins(joins, collected_models, joined_model)
fk_field = current._meta.rel_for_model(joined_model)
if not fk_field:
continue
if joined_inst.get_id() is None and fk_field.name in inst._data:
rel_inst_id = inst._data[fk_field.name]
joined_inst.set_id(rel_inst_id)
setattr(inst, fk_field.name, joined_inst)
return inst
def __iter__(self):
self.__idx = 0
if not self._populated:
return self
else:
return iter(self._result_cache)
def iterate(self):
row = self.cursor.fetchone()
if not row:
self._populated = True
raise StopIteration
if self.naive:
return self.simple_iter(row)
else:
return self.construct_instance(row)
def iterator(self):
while 1:
yield self.iterate()
def next(self):
if self.__idx < self.__ct:
inst = self._result_cache[self.__idx]
self.__idx += 1
return inst
instance = self.iterate()
instance.prepared() # <-- model prepared hook
self._result_cache.append(instance)
self.__ct += 1
self.__idx += 1
return instance
def fill_cache(self, n=None):
n = n or float('Inf')
self.__idx = self.__ct
while not self._populated and (n > self.__ct):
try:
self.next()
except StopIteration:
break
def returns_clone(func):
def inner(self, *args, **kwargs):
clone = self.clone()
func(clone, *args, **kwargs)
return clone
inner.call_local = func
return inner
def not_allowed(fn):
def inner(self, *args, **kwargs):
raise NotImplementedError('%s is not allowed on %s instances' % (
fn, type(self).__name__,
))
return inner
Join = namedtuple('Join', ('model_class', 'join_type', 'on'))
class Query(object):
require_commit = True
def __init__(self, model_class):
self.model_class = model_class
self.database = model_class._meta.database
self._dirty = True
self._query_ctx = model_class
self._joins = {self.model_class: []} # adjacency graph
self._where = None
def clone(self):
query = type(self)(self.model_class)
if self._where is not None:
query._where = self._where.clone()
query._joins = self.clone_joins()
query._query_ctx = self._query_ctx
return query
def clone_joins(self):
return dict(
(mc, list(j)) for mc, j in self._joins.items()
)
@returns_clone
def where(self, *q_or_node):
if self._where is None:
self._where = reduce(operator.and_, q_or_node)
else:
for piece in q_or_node:
self._where &= piece
@returns_clone
def join(self, model_class, join_type=None, on=None):
if not self._query_ctx._meta.rel_exists(model_class):
raise ValueError('No foreign key between %s and %s' % (
self._query_ctx, model_class,
))
if on and isinstance(on, basestring):
on = self._query_ctx._meta.fields[on]
self._joins.setdefault(self._query_ctx, [])
self._joins[self._query_ctx].append(Join(model_class, join_type, on))
self._query_ctx = model_class
@returns_clone
def switch(self, model_class=None):
self._query_ctx = model_class or self.model_class
def ensure_join(self, lm, rm, on=None):
ctx = self._query_ctx
for join in self._joins.get(lm, []):
if join.model_class == rm:
return self
query = self.switch(lm).join(rm, on=on).switch(ctx)
return query
def convert_dict_to_node(self, qdict):
accum = []
joins = []
for key, value in sorted(qdict.items()):
curr = self.model_class
if '__' in key and key.rsplit('__', 1)[1] in DJANGO_MAP:
key, op = key.rsplit('__', 1)
op = DJANGO_MAP[op]
else:
op = OP_EQ
for piece in key.split('__'):
model_attr = getattr(curr, piece)
if isinstance(model_attr, (ForeignKeyField, ReverseRelationDescriptor)):
curr = model_attr.rel_model
joins.append(model_attr)
accum.append(Expr(model_attr, op, value))
return accum, joins
def filter(self, *args, **kwargs):
# normalize args and kwargs into a new expression
dq_node = Leaf()
if args:
dq_node &= reduce(operator.and_, [a.clone() for a in args])
if kwargs:
dq_node &= DQ(**kwargs)
# dq_node should now be an Expr, lhs = Leaf(), rhs = ...
q = deque([dq_node])
dq_joins = set()
while q:
curr = q.popleft()
if not isinstance(curr, Expr):
continue
for side, piece in (('lhs', curr.lhs), ('rhs', curr.rhs)):
if isinstance(piece, DQ):
query, joins = self.convert_dict_to_node(piece.query)
dq_joins.update(joins)
setattr(curr, side, reduce(operator.and_, query))
else:
q.append(piece)
dq_node = dq_node.rhs
query = self.clone()
for field in dq_joins:
if isinstance(field, ForeignKeyField):
lm, rm = field.model_class, field.rel_model
field_obj = field
elif isinstance(field, ReverseRelationDescriptor):
lm, rm = field.field.rel_model, field.rel_model
field_obj = field.field
query = query.ensure_join(lm, rm, field_obj)
return query.where(dq_node)
def sql(self, compiler):
raise NotImplementedError()
def execute(self):
raise NotImplementedError
class RawQuery(Query):
def __init__(self, model, query, *params):
self._sql = query
self._params = list(params)
self._qr = None
super(RawQuery, self).__init__(model)
def clone(self):
return RawQuery(self.model_class, self._sql, *self._params)
def sql(self, compiler):
return self._sql, self._params
join = not_allowed('joining')
where = not_allowed('where')
switch = not_allowed('switch')
def execute(self):
if self._qr is None:
self._qr = QueryResultWrapper(self.model_class, self.database.execute(self), None)
return self._qr
def __iter__(self):
return iter(self.execute())
class SelectQuery(Query):
require_commit = False
def __init__(self, model_class, *selection):
self._explicit_selection = len(selection) > 0
self._select = self._model_shorthand(selection or model_class._meta.get_fields())
self._group_by = None
self._having = None
self._order_by = None
self._limit = None
self._offset = None
self._distinct = False
self._for_update = False
self._naive = False
self._qr = None
super(SelectQuery, self).__init__(model_class)
def clone(self):
query = super(SelectQuery, self).clone()
query._explicit_selection = self._explicit_selection
query._select = list(self._select)
if self._group_by is not None:
query._group_by = list(self._group_by)
if self._having:
query._having = self._having.clone()
if self._order_by is not None:
query._order_by = list(self._order_by)
query._limit = self._limit
query._offset = self._offset
query._distinct = self._distinct
query._for_update = self._for_update
query._naive = self._naive
return query
def _model_shorthand(self, args):
accum = []
for arg in args:
if isinstance(arg, Leaf):
accum.append(arg)
elif issubclass(arg, Model):
accum.extend(arg._meta.get_fields())
return accum
@returns_clone
def group_by(self, *args):
self._group_by = self._model_shorthand(args)
@returns_clone
def having(self, *q_or_node):
if self._having is None:
self._having = reduce(operator.and_, q_or_node)
else:
for piece in q_or_node:
self._having &= piece
@returns_clone
def order_by(self, *args):
self._order_by = list(args)
@returns_clone
def limit(self, lim):
self._limit = lim
@returns_clone
def offset(self, off):
self._offset = off
@returns_clone
def paginate(self, page, paginate_by=20):
if page > 0:
page -= 1
self._limit = paginate_by
self._offset = page * paginate_by
@returns_clone
def distinct(self, is_distinct=True):
self._distinct = is_distinct
@returns_clone
def for_update(self, for_update=True):
self._for_update = for_update
@returns_clone
def naive(self, naive=True):
self._naive = naive
def annotate(self, rel_model, annotation=None):
annotation = annotation or fn.Count(rel_model._meta.primary_key).alias('count')
query = self.clone()
query = query.ensure_join(query._query_ctx, rel_model)
if not query._group_by:
query._group_by = list(query._select)
query._select = tuple(query._select) + (annotation,)
return query
def _aggregate(self, aggregation=None):
aggregation = aggregation or fn.Count(self.model_class._meta.primary_key)
query = self.order_by()
query._select = (aggregation,)
return query
def aggregate(self, aggregation=None):
query = self._aggregate(aggregation)
compiler = self.database.get_compiler()
sql, params = query.sql(compiler)
curs = query.database.execute_sql(sql, params, require_commit=False)
return curs.fetchone()[0]
def count(self):
if self._distinct or self._group_by:
return self.wrapped_count()
clone = self.order_by()
clone._limit = clone._offset = None
clone._select = [fn.Count(clone.model_class._meta.primary_key)]
res = clone.database.execute(clone)
return (res.fetchone() or [0])[0]
def wrapped_count(self):
clone = self.order_by()
clone._limit = clone._offset = None
compiler = self.database.get_compiler()
sql, params = clone.sql(compiler)
query = 'SELECT COUNT(1) FROM (%s) AS wrapped_select' % sql
res = clone.database.execute_sql(query, params, require_commit=False)
return res.fetchone()[0]
def exists(self):
clone = self.paginate(1, 1)
clone._select = [self.model_class._meta.primary_key]
res = self.database.execute(clone)
return bool(res.fetchone())
def get(self):
clone = self.paginate(1, 1)
try:
return clone.execute().next()
except StopIteration:
raise self.model_class.DoesNotExist('instance matching query does not exist:\nSQL: %s\nPARAMS: %s' % (
self.sql(self.database.get_compiler())
))
def sql(self, compiler):
return compiler.parse_select_query(self)
def verify_naive(self):
for expr in self._select:
if isinstance(expr, Field) and expr.model_class != self.model_class:
return False
return True
def execute(self):
if self._dirty or not self._qr:
if self._naive or not self._joins or self.verify_naive():
query_meta = None
else:
query_meta = [self._select, self._joins]
self._qr = QueryResultWrapper(self.model_class, self.database.execute(self), query_meta)
self._dirty = False
return self._qr
else:
return self._qr
def __iter__(self):
return iter(self.execute())
def __getitem__(self, value):
offset = limit = None
if isinstance(value, slice):
if value.start:
offset = value.start
if value.stop:
limit = value.stop - (value.start or 0)
else:
if value < 0:
raise ValueError('Negative indexes are not supported, try ordering in reverse')
offset = value
limit = 1
if self._limit != limit or self._offset != offset:
self._qr = None
self._limit = limit
self._offset = offset
res = list(self)
return limit == 1 and res[0] or res
class UpdateQuery(Query):
def __init__(self, model_class, update=None):
self._update = update
super(UpdateQuery, self).__init__(model_class)
def clone(self):
query = super(UpdateQuery, self).clone()
query._update = dict(self._update)
return query
join = not_allowed('joining')
def sql(self, compiler):
return compiler.parse_update_query(self)
def execute(self):
result = self.database.execute(self)
return self.database.rows_affected(result)
class InsertQuery(Query):
def __init__(self, model_class, insert=None):
mm = model_class._meta
query = dict((mm.fields[f], v) for f, v in mm.get_default_dict().items())
query.update(insert)
self._insert = query
super(InsertQuery, self).__init__(model_class)
def clone(self):
query = super(InsertQuery, self).clone()
query._insert = dict(self._insert)
return query
join = not_allowed('joining')
where = not_allowed('where clause')
def sql(self, compiler):
return compiler.parse_insert_query(self)
def execute(self):
result = self.database.execute(self)
return self.database.last_insert_id(result, self.model_class)
class DeleteQuery(Query):
join = not_allowed('joining')
def sql(self, compiler):
return compiler.parse_delete_query(self)
def execute(self):
result = self.database.execute(self)
return self.database.rows_affected(result)
class Database(object):
commit_select = False
compiler_class = QueryCompiler
empty_limit = False
field_overrides = {}
for_update = False
interpolation = '?'
op_overrides = {}
quote_char = '"'
reserved_tables = []
sequences = False
subquery_delete_same_table = True
def __init__(self, database, threadlocals=False, autocommit=True,
fields=None, ops=None, **connect_kwargs):
self.init(database, **connect_kwargs)
if threadlocals:
self.__local = threading.local()
else:
self.__local = type('DummyLocal', (object,), {})
self._conn_lock = threading.Lock()
self.autocommit = autocommit
self.field_overrides = dict_update(self.field_overrides, fields or {})
self.op_overrides = dict_update(self.op_overrides, ops or {})
def init(self, database, **connect_kwargs):
self.deferred = database is None
self.database = database
self.connect_kwargs = connect_kwargs
def connect(self):
with self._conn_lock:
if self.deferred:
raise Exception('Error, database not properly initialized before opening connection')
self.__local.conn = self._connect(self.database, **self.connect_kwargs)
self.__local.closed = False
def close(self):
with self._conn_lock:
if self.deferred:
raise Exception('Error, database not properly initialized before closing connection')
self._close(self.__local.conn)
self.__local.closed = True
def get_conn(self):
if not hasattr(self.__local, 'closed') or self.__local.closed:
self.connect()
return self.__local.conn
def is_closed(self):
return getattr(self.__local, 'closed', True)
def get_cursor(self):
return self.get_conn().cursor()
def _close(self, conn):
conn.close()
def _connect(self, database, **kwargs):
raise NotImplementedError
@classmethod
def register_fields(cls, fields):
cls.field_overrides = dict_update(cls.field_overrides, fields)
@classmethod
def register_ops(cls, ops):
cls.op_overrides = dict_update(cls.op_overrides, ops)
def last_insert_id(self, cursor, model):
if model._meta.auto_increment:
return cursor.lastrowid
def rows_affected(self, cursor):
return cursor.rowcount
def get_compiler(self):
return self.compiler_class(
self.quote_char, self.interpolation, self.field_overrides,
self.op_overrides)
def execute(self, query):
sql, params = query.sql(self.get_compiler())
if isinstance(query, (SelectQuery, RawQuery)):
commit = self.commit_select
else:
commit = query.require_commit
return self.execute_sql(sql, params, commit)
def execute_sql(self, sql, params=None, require_commit=True):
cursor = self.get_cursor()
res = cursor.execute(sql, params or ())
if require_commit and self.get_autocommit():
self.commit()
logger.debug((sql, params))
return cursor
def begin(self):
pass
def commit(self):
self.get_conn().commit()
def rollback(self):
self.get_conn().rollback()
def set_autocommit(self, autocommit):
self.__local.autocommit = autocommit
def get_autocommit(self):
if not hasattr(self.__local, 'autocommit'):
self.set_autocommit(self.autocommit)
return self.__local.autocommit
def get_tables(self):
raise NotImplementedError
def get_indexes_for_table(self, table):
raise NotImplementedError
def sequence_exists(self, seq):
raise NotImplementedError
def create_table(self, model_class):
qc = self.get_compiler()
return self.execute_sql(qc.create_table(model_class))
def create_index(self, model_class, fields, unique=False):
qc = self.get_compiler()
if not isinstance(fields, (list, tuple)):
raise ValueError('fields passed to "create_index" must be a list or tuple: "%s"' % fields)
field_objs = [model_class._meta.fields[f] if isinstance(f, basestring) else f for f in fields]
return self.execute_sql(qc.create_index(model_class, field_objs, unique))
def create_foreign_key(self, model_class, field):
if not field.primary_key:
return self.create_index(model_class, [field], field.unique)
def create_sequence(self, seq):
if self.sequences:
qc = self.get_compiler()
return self.execute_sql(qc.create_sequence(seq))
def drop_table(self, model_class, fail_silently=False):
qc = self.get_compiler()
return self.execute_sql(qc.drop_table(model_class, fail_silently))
def drop_sequence(self, seq):
if self.sequences:
qc = self.get_compiler()
return self.execute_sql(qc.drop_sequence(seq))
def transaction(self):
return transaction(self)
def commit_on_success(self, func):
def inner(*args, **kwargs):
orig = self.get_autocommit()
self.set_autocommit(False)
self.begin()
try:
res = func(*args, **kwargs)
self.commit()
except:
self.rollback()
raise
else:
return res
finally:
self.set_autocommit(orig)
return inner
class SqliteDatabase(Database):
op_overrides = {
OP_LIKE: 'GLOB',
OP_ILIKE: 'LIKE',
}
def _connect(self, database, **kwargs):
if not sqlite3:
raise ImproperlyConfigured('sqlite3 must be installed on the system')
return sqlite3.connect(database, **kwargs)
def get_indexes_for_table(self, table):
res = self.execute_sql('PRAGMA index_list(%s);' % self.quote(table))
rows = sorted([(r[1], r[2] == 1) for r in res.fetchall()])
return rows
def get_tables(self):
res = self.execute_sql('select name from sqlite_master where type="table" order by name')
return [r[0] for r in res.fetchall()]
class PostgresqlDatabase(Database):
commit_select = True
empty_limit = True
field_overrides = {
'bigint': 'BIGINT',
'bool': 'BOOLEAN',
'datetime': 'TIMESTAMP',
'decimal': 'NUMERIC',
'double': 'DOUBLE PRECISION',
'primary_key': 'SERIAL',
}
for_update = True
interpolation = '%s'
reserved_tables = ['user']
sequences = True
def _connect(self, database, **kwargs):
if not psycopg2:
raise ImproperlyConfigured('psycopg2 must be installed on the system')
return psycopg2.connect(database=database, **kwargs)
def last_insert_id(self, cursor, model):
seq = model._meta.primary_key.sequence
if seq:
cursor.execute("SELECT CURRVAL('\"%s\"')" % (seq))
return cursor.fetchone()[0]
elif model._meta.auto_increment:
cursor.execute("SELECT CURRVAL('\"%s_%s_seq\"')" % (
model._meta.db_table, model._meta.primary_key.db_column))
return cursor.fetchone()[0]
def get_indexes_for_table(self, table):
res = self.execute_sql("""
SELECT c2.relname, i.indisprimary, i.indisunique
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2, pg_catalog.pg_index i
WHERE c.relname = %s AND c.oid = i.indrelid AND i.indexrelid = c2.oid
ORDER BY i.indisprimary DESC, i.indisunique DESC, c2.relname""", (table,))
return sorted([(r[0], r[1]) for r in res.fetchall()])
def get_tables(self):
res = self.execute_sql("""
SELECT c.relname
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r', 'v', '')
AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
AND pg_catalog.pg_table_is_visible(c.oid)
ORDER BY c.relname""")
return [row[0] for row in res.fetchall()]
def sequence_exists(self, sequence):
res = self.execute_sql("""
SELECT COUNT(*)
FROM pg_class, pg_namespace
WHERE relkind='S'
AND pg_class.relnamespace = pg_namespace.oid
AND relname=%s""", (sequence,))
return bool(res.fetchone()[0])
def set_search_path(self, *search_path):
path_params = ','.join(['%s'] * len(search_path))
self.execute_sql('SET search_path TO %s' % path_params, search_path)
class MySQLDatabase(Database):
commit_select = True
field_overrides = {
'bigint': 'BIGINT',
'boolean': 'BOOL',
'decimal': 'NUMERIC',
'double': 'DOUBLE PRECISION',
'float': 'FLOAT',
'primary_key': 'INTEGER AUTO_INCREMENT',
'text': 'LONGTEXT',
}
for_update = True
interpolation = '%s'
op_overrides = {OP_LIKE: 'LIKE BINARY', OP_ILIKE: 'LIKE'}
quote_char = '`'
subquery_delete_same_table = False
def _connect(self, database, **kwargs):
if not mysql:
raise ImproperlyConfigured('MySQLdb must be installed on the system')
conn_kwargs = {
'charset': 'utf8',
'use_unicode': True,
}
conn_kwargs.update(kwargs)
return mysql.connect(db=database, **conn_kwargs)
def create_foreign_key(self, model_class, field):
compiler = self.get_compiler()
framing = """
ALTER TABLE %(table)s ADD CONSTRAINT %(constraint)s
FOREIGN KEY (%(field)s) REFERENCES %(to)s(%(to_field)s)%(cascade)s;
"""
db_table = model_class._meta.db_table
constraint = 'fk_%s_%s_%s' % (
db_table,
field.rel_model._meta.db_table,
field.db_column,
)
query = framing % {
'table': compiler.quote(db_table),
'constraint': compiler.quote(constraint),
'field': compiler.quote(field.db_column),
'to': compiler.quote(field.rel_model._meta.db_table),
'to_field': compiler.quote(field.rel_model._meta.primary_key.db_column),
'cascade': ' ON DELETE CASCADE' if field.cascade else '',
}
self.execute_sql(query)
return super(MySQLDatabase, self).create_foreign_key(model_class, field)
def get_indexes_for_table(self, table):
res = self.execute_sql('SHOW INDEXES IN `%s`;' % table)
rows = sorted([(r[2], r[1] == 0) for r in res.fetchall()])
return rows
def get_tables(self):
res = self.execute_sql('SHOW TABLES;')
return [r[0] for r in res.fetchall()]
class transaction(object):
def __init__(self, db):
self.db = db
def __enter__(self):
self._orig = self.db.get_autocommit()
self.db.set_autocommit(False)
self.db.begin()
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type:
self.db.rollback()
else:
self.db.commit()
self.db.set_autocommit(self._orig)
class DoesNotExist(Exception):
pass
# doing an IN on empty set
class EmptyResultException(Exception):
pass
default_database = SqliteDatabase('peewee.db')
class ModelOptions(object):
def __init__(self, cls, database=None, db_table=None, indexes=None,
order_by=None, primary_key=None):
self.model_class = cls
self.name = cls.__name__.lower()
self.fields = {}
self.columns = {}
self.defaults = {}
self.database = database or default_database
self.db_table = db_table
self.indexes = indexes or []
self.order_by = order_by
self.primary_key = primary_key
self.auto_increment = None
self.rel = {}
self.reverse_rel = {}
def prepared(self):
for field in self.fields.values():
if field.default is not None:
self.defaults[field] = field.default
if self.order_by:
norm_order_by = []
for clause in self.order_by:
field = self.fields[clause.lstrip('-')]
if clause.startswith('-'):
norm_order_by.append(field.desc())
else:
norm_order_by.append(field.asc())
self.order_by = norm_order_by
def get_default_dict(self):
dd = {}
for field, default in self.defaults.items():
if callable(default):
dd[field.name] = default()
else:
dd[field.name] = default
return dd
def get_sorted_fields(self):
return sorted(self.fields.items(), key=lambda (k,v): (v is self.primary_key and 1 or 2, v._order))
def get_field_names(self):
return [f[0] for f in self.get_sorted_fields()]
def get_fields(self):
return [f[1] for f in self.get_sorted_fields()]
def rel_for_model(self, model, field_obj=None):
for field in self.get_fields():
if isinstance(field, ForeignKeyField) and field.rel_model == model:
if field_obj is None or field_obj.name == field.name:
return field
def reverse_rel_for_model(self, model):
return model._meta.rel_for_model(self.model_class)
def rel_exists(self, model):
return self.rel_for_model(model) or self.reverse_rel_for_model(model)
class BaseModel(type):
inheritable_options = ['database', 'indexes', 'order_by', 'primary_key']
def __new__(cls, name, bases, attrs):
if not bases:
return super(BaseModel, cls).__new__(cls, name, bases, attrs)
meta_options = {}
meta = attrs.pop('Meta', None)
if meta:
meta_options.update((k, v) for k, v in meta.__dict__.items() if not k.startswith('_'))
# inherit any field descriptors by deep copying the underlying field obj
# into the attrs of the new model, additionally see if the bases define
# inheritable model options and swipe them
for b in bases:
if not hasattr(b, '_meta'):
continue
base_meta = getattr(b, '_meta')
for (k, v) in base_meta.__dict__.items():
if k in cls.inheritable_options and k not in meta_options:
meta_options[k] = v
for (k, v) in b.__dict__.items():
if isinstance(v, FieldDescriptor) and k not in attrs:
if not v.field.primary_key:
attrs[k] = deepcopy(v.field)
# initialize the new class and set the magic attributes
cls = super(BaseModel, cls).__new__(cls, name, bases, attrs)
cls._meta = ModelOptions(cls, **meta_options)
cls._data = None
primary_key = None
# replace the fields with field descriptors, calling the add_to_class hook
for name, attr in cls.__dict__.items():
cls._meta.indexes = list(cls._meta.indexes)
if isinstance(attr, Field):
attr.add_to_class(cls, name)
if attr.primary_key:
primary_key = attr
if not primary_key:
primary_key = PrimaryKeyField(primary_key=True)
primary_key.add_to_class(cls, 'id')
cls._meta.primary_key = primary_key
cls._meta.auto_increment = isinstance(primary_key, PrimaryKeyField) or primary_key.sequence
if not cls._meta.db_table:
cls._meta.db_table = re.sub('[^\w]+', '_', cls.__name__.lower())
# create a repr and error class before finalizing
if hasattr(cls, '__unicode__'):
setattr(cls, '__repr__', lambda self: '<%s: %r>' % (
cls.__name__, self.__unicode__()))
exception_class = type('%sDoesNotExist' % cls.__name__, (DoesNotExist,), {})
cls.DoesNotExist = exception_class
cls._meta.prepared()
return cls
class Model(object):
__metaclass__ = BaseModel
def __init__(self, *args, **kwargs):
self._data = self._meta.get_default_dict()
self._obj_cache = {} # cache of related objects
for k, v in kwargs.items():
setattr(self, k, v)
@classmethod
def select(cls, *selection):
query = SelectQuery(cls, *selection)
if cls._meta.order_by:
query = query.order_by(*cls._meta.order_by)
return query
@classmethod
def update(cls, **update):
fdict = dict((cls._meta.fields[f], v) for f, v in update.items())
return UpdateQuery(cls, fdict)
@classmethod
def insert(cls, **insert):
fdict = dict((cls._meta.fields[f], v) for f, v in insert.items())
return InsertQuery(cls, fdict)
@classmethod
def delete(cls):
return DeleteQuery(cls)
@classmethod
def raw(cls, sql, *params):
return RawQuery(cls, sql, *params)
@classmethod
def create(cls, **query):
inst = cls(**query)
inst.save(force_insert=True)
return inst
@classmethod
def get(cls, *query, **kwargs):
sq = cls.select().naive()
if query:
sq = sq.where(*query)
if kwargs:
sq = sq.filter(**kwargs)
return sq.get()
@classmethod
def get_or_create(cls, **kwargs):
sq = cls.select().filter(**kwargs)
try:
return sq.get()
except cls.DoesNotExist:
return cls.create(**kwargs)
@classmethod
def filter(cls, *dq, **query):
return cls.select().filter(*dq, **query)
@classmethod
def table_exists(cls):
return cls._meta.db_table in cls._meta.database.get_tables()
@classmethod
def create_table(cls, fail_silently=False):
if fail_silently and cls.table_exists():
return
db = cls._meta.database
pk = cls._meta.primary_key
if db.sequences and pk.sequence and not db.sequence_exists(pk.sequence):
db.create_sequence(pk.sequence)
db.create_table(cls)
for field_name, field_obj in cls._meta.fields.items():
if isinstance(field_obj, ForeignKeyField):
db.create_foreign_key(cls, field_obj)
elif field_obj.index or field_obj.unique:
db.create_index(cls, [field_obj], field_obj.unique)
if cls._meta.indexes:
for fields, unique in cls._meta.indexes:
db.create_index(cls, fields, unique)
@classmethod
def drop_table(cls, fail_silently=False):
cls._meta.database.drop_table(cls, fail_silently)
def get_id(self):
return getattr(self, self._meta.primary_key.name)
def set_id(self, id):
setattr(self, self._meta.primary_key.name, id)
def prepared(self):
pass
def save(self, force_insert=False):
field_dict = dict(self._data)
pk = self._meta.primary_key
if self.get_id() is not None and not force_insert:
field_dict.pop(pk.name)
update = self.update(
**field_dict
).where(pk == self.get_id())
update.execute()
else:
if self._meta.auto_increment:
field_dict.pop(pk.name, None)
insert = self.insert(**field_dict)
new_pk = insert.execute()
if self._meta.auto_increment:
self.set_id(new_pk)
def dependencies(self, search_nullable=False):
stack = [(type(self), self.select().where(self._meta.primary_key == self.get_id()))]
seen = set()
while stack:
klass, query = stack.pop()
if klass in seen:
continue
seen.add(klass)
for rel_name, fk in klass._meta.reverse_rel.items():
rel_model = fk.model_class
expr = fk << query
if not fk.null or search_nullable:
stack.append((rel_model, rel_model.select().where(expr)))
yield (expr, fk)
def delete_instance(self, recursive=False, delete_nullable=False):
if recursive:
for query, fk in reversed(list(self.dependencies(delete_nullable))):
if fk.null and not delete_nullable:
fk.model_class.update(**{fk.name: None}).where(query).execute()
else:
fk.model_class.delete().where(query).execute()
return self.delete().where(self._meta.primary_key == self.get_id()).execute()
def __eq__(self, other):
return other.__class__ == self.__class__ and \
self.get_id() is not None and \
other.get_id() == self.get_id()
def __ne__(self, other):
return not self == other
def create_model_tables(models, **create_table_kwargs):
"""Create tables for all given models (in the right order)."""
for m in sort_models_topologically(models):
m.create_table(**create_table_kwargs)
def drop_model_tables(models, **drop_table_kwargs):
"""Drop tables for all given models (in the right order)."""
for m in reversed(sort_models_topologically(models)):
m.drop_table(**drop_table_kwargs)
def sort_models_topologically(models):
"""Sort models topologically so that parents will precede children."""
models = set(models)
seen = set()
ordering = []
def dfs(model):
if model in models and model not in seen:
seen.add(model)
for foreign_key in model._meta.reverse_rel.values():
dfs(foreign_key.model_class)
ordering.append(model) # parent will follow descendants
# order models by name and table initially to guarantee a total ordering
names = lambda m: (m._meta.name, m._meta.db_table)
for m in sorted(models, key=names, reverse=True):
dfs(m)
return list(reversed(ordering)) # want parents first in output ordering
def raw_sql(query):
db = query.model_class._meta.database
return query.sql(db.get_compiler()) | mit | 656,235,991,233,999,200 | 30.508409 | 114 | 0.543091 | false |
gratipay/gratipay.com | tests/py/test_team_closing.py | 1 | 1325 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from gratipay.testing import Harness, T
class TestTeamClosing(Harness):
def test_teams_can_be_closed_via_python(self):
team = self.make_team()
team.close()
assert team.is_closed
def test_teams_can_be_closed_via_http(self):
self.make_team()
response = self.client.PxST('/TheEnterprise/edit/close', auth_as='picard')
assert response.headers['Location'] == '/~picard/'
assert response.code == 302
assert T('TheEnterprise').is_closed
def test_but_not_by_anon(self):
self.make_team()
response = self.client.PxST('/TheEnterprise/edit/close')
assert response.code == 401
def test_nor_by_turkey(self):
self.make_participant('turkey')
self.make_team()
response = self.client.PxST('/TheEnterprise/edit/close', auth_as='turkey')
assert response.code == 403
def test_admin_is_cool_though(self):
self.make_participant('Q', is_admin=True)
self.make_team()
response = self.client.PxST('/TheEnterprise/edit/close', auth_as='Q')
assert response.headers['Location'] == '/~Q/'
assert response.code == 302
assert T('TheEnterprise').is_closed
| mit | -666,542,625,447,780,000 | 33.868421 | 82 | 0.632453 | false |
QuinnSong/JPG-Tools | src/background.py | 1 | 1161 | # Modified based on Phatch
#---PIL modules import
from shadow import fill_background_color, generate_layer, \
remove_alpha, has_transparency, get_alpha, paste
from PIL import Image
#from reflection import HTMLColorToRGBA
FILL_CHOICES = ('Color', 'Image')
def background(image, fill, mark, color,
horizontal_offset=None, vertical_offset=None,
horizontal_justification=None, vertical_justification=None,
orientation=None, method=None, opacity=100):
"""color is RGB"""
if not has_transparency(image):
return image
if image.mode == 'P':
image = image.convert('RGBA')
if fill == FILL_CHOICES[0]:
opacity = (255 * opacity) / 100
r,g,b = color
return fill_background_color(image, (r,g,b, opacity))
elif fill == FILL_CHOICES[1]:
layer = generate_layer(image.size, mark, method,
horizontal_offset, vertical_offset,
horizontal_justification,
vertical_justification,
orientation, opacity)
paste(layer, image, mask=image)
return layer | gpl-3.0 | -3,725,835,973,732,491,000 | 36.483871 | 67 | 0.605512 | false |
javahust/dotamax | dataIngest/crawler.py | 1 | 4059 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import dota2api
import datetime
import time
import thread
api = dota2api.Initialise()
seed_user_id = 161877399
#seed_user_id = 98887913
hero_list = api.get_heroes()['heroes']
item_list = api.get_game_items()['items']
# heroes
# {
# count - Number of results
# status - HTTP status code
# [heroes]
# {
# id - Unique hero ID
# name - Hero's name
# localized_name - Localized version of hero's name
# url_full_portrait - URL to full-size hero portrait (256x144)
# url_large_portrait - URL to large hero portrait (205x115)
# url_small_portrait - URL to small hero portrait (59x33)
# url_vertical_portrait - URL to vertical hero portrait (235x272)
# }
# }
#items:
# {
# count - Number of results
# status - HTTP status respose
# [items]
# {
# id - Unique item ID
# name - Item's name
# cost - Item's gold cost in game, 0 if recipe
# localized_name - Item's localized name
# recipe - True if item is a recipe item, false otherwise
# secret_shop - True if item is bought at the secret shop, false otherwise
# side_shop - True if item is bought at the side shop, false otherwise
# }
# }
# getmatchhistory Parameters:
# account_id – (int, optional)
# hero_id – (int, optional)
# game_mode – (int, optional) see ref/modes.json
# skill – (int, optional) see ref/skill.json
# min_players – (int, optional) only return matches with minimum amount of players
# league_id – (int, optional) for ids use get_league_listing()
# start_at_match_id – (int, optional) start at matches equal to or older than this match id
# matches_requested – (int, optional) defaults to 100
# tournament_games_only – (str, optional) limit results to tournament matches only
# response
# {
# num_results - Number of matches within a single response
# total_results - Total number of matches for this query
# results_remaining - Number of matches remaining to be retrieved with subsequent API calls
# [matches] - List of matches for this response
# {
# match_id - Unique match ID
# match_seq_num - Number indicating position in which this match was recorded
# start_time - Unix timestamp of beginning of match
# lobby_type - See lobby_type table
# [player] - List of players in the match
# {
# account_id - Unique account ID
# player_slot - Player's position within the team
# hero_id - Unique hero ID
# }
# }
# }
def traverse_user_match(user_id):
match_set = set()
for hero in hero_list:
id = hero['id']
count = len(match_set)
batch_matches = api.get_match_history(account_id=user_id, hero_id=id)
while True:
# do process
matches = batch_matches['matches']
for match in matches:
match_set.add(match['match_id'])
if batch_matches['num_results'] < 100:
break
batch_matches = api.get_match_history(account_id=user_id, start_at_match_id=matches[-1]['match_id'], hero_id=id)
print "play {0} for {1} matches".format(hero['name'], (len(match_set) - count))
print len(match_set)
print match_set
def date_to_timestamp(date_str):
return int(time.mktime(datetime.datetime.strptime(date_str, "%Y-%m-%d %H:%M:%S").timetuple()))
def timestamp_to_date(timestamp):
return datetime.datetime.fromtimestamp(timestamp).strftime("%Y-%m-%d %H:%M:%S")
def main():
a = datetime.datetime.now()
traverse_user_match(seed_user_id)
b = datetime.datetime.now()
print(b - a)
if __name__ == "__main__":
main()
| apache-2.0 | 1,302,979,220,849,933,000 | 35.080357 | 124 | 0.579065 | false |
priyom/priyomdb | PriyomHTTP/Server/Resources/API/InstanciateSchedules.py | 1 | 2595 | """
File name: InstanciateSchedules.py
This file is part of: priyomdb
LICENSE
The contents of this file are subject to the Mozilla Public License
Version 1.1 (the "License"); you may not use this file except in
compliance with the License. You may obtain a copy of the License at
http://www.mozilla.org/MPL/
Software distributed under the License is distributed on an "AS IS"
basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
License for the specific language governing rights and limitations under
the License.
Alternatively, the contents of this file may be used under the terms of
the GNU General Public license (the "GPL License"), in which case the
provisions of GPL License are applicable instead of those above.
FEEDBACK & QUESTIONS
For feedback and questions about priyomdb please e-mail one of the
authors:
Jonas Wielicki <[email protected]>
"""
from WebStack.Generic import ContentType
import time
from datetime import datetime, timedelta
from libPriyom import *
from libPriyom.Formatting import priyomdate
from PriyomHTTP.Server.limits import queryLimits
from PriyomHTTP.Server.Resources.API.API import API, CallSyntax, Argument
class InstanciateSchedulesAPI(API):
title = u"instanciateSchedules"
shortDescription = u"instanciate schedules"
docArgs = [
Argument(u"stationId", u"station ID", u"Restrict the instanciation to a single station", metavar="stationid", optional=True),
]
docCallSyntax = CallSyntax(docArgs, u"?{0}")
docRequiredPrivilegues = u"instanciate"
def __init__(self, model):
super(InstanciateSchedulesAPI, self).__init__(model)
self.allowedMethods = frozenset(("POST", "GET", "HEAD"))
def handle(self, trans):
stationId = self.getQueryIntDefault("stationId", None, "must be integer")
trans.set_content_type(ContentType("text/plain", self.encoding))
if self.head:
return
if trans.get_request_method() == "GET":
print >>self.out, u"failed: Call this resource with POST to perform instanciation.".encode(self.encoding)
return
generatedUntil = 0
if stationId is None:
generatedUntil = self.priyomInterface.scheduleMaintainer.updateSchedules(None)
else:
generatedUntil = self.priyomInterface.scheduleMaintainer.updateSchedule(self.store.get(Station, stationId), None)
print >>self.out, u"success: valid until {0}".format(datetime.fromtimestamp(generatedUntil).strftime(priyomdate)).encode(self.encoding)
| gpl-3.0 | -271,381,137,207,109,060 | 37.161765 | 143 | 0.721773 | false |
alephobjects/Cura2 | tests/TestMachineAction.py | 1 | 3513 | #Todo: Write tests
import pytest
# QtApplication needs to be imported first to prevent import errors.
from UM.Qt.QtApplication import QtApplication
from cura.MachineAction import MachineAction
from cura.MachineActionManager import MachineActionManager, NotUniqueMachineActionError, UnknownMachineActionError
class Machine:
def __init__(self, key = ""):
self._key = key
def getKey(self):
return self._key
def test_addMachineAction():
machine_manager = MachineActionManager()
test_action = MachineAction(key = "test_action")
test_action_2 = MachineAction(key = "test_action_2")
test_machine = Machine("test_machine")
machine_manager.addMachineAction(test_action)
machine_manager.addMachineAction(test_action_2)
assert machine_manager.getMachineAction("test_action") == test_action
assert machine_manager.getMachineAction("key_that_doesnt_exist") is None
# Adding the same machine action is not allowed.
with pytest.raises(NotUniqueMachineActionError):
machine_manager.addMachineAction(test_action)
# Check that the machine has no supported actions yet.
assert machine_manager.getSupportedActions(test_machine) == list()
# Check if adding a supported action works.
machine_manager.addSupportedAction(test_machine, "test_action")
assert machine_manager.getSupportedActions(test_machine) == [test_action, ]
# Check that adding a unknown action doesn't change anything.
machine_manager.addSupportedAction(test_machine, "key_that_doesnt_exist")
assert machine_manager.getSupportedActions(test_machine) == [test_action, ]
# Check if adding multiple supported actions works.
machine_manager.addSupportedAction(test_machine, "test_action_2")
assert machine_manager.getSupportedActions(test_machine) == [test_action, test_action_2]
# Check that the machine has no required actions yet.
assert machine_manager.getRequiredActions(test_machine) == set()
## Ensure that only known actions can be added.
with pytest.raises(UnknownMachineActionError):
machine_manager.addRequiredAction(test_machine, "key_that_doesnt_exist")
## Check if adding single required action works
machine_manager.addRequiredAction(test_machine, "test_action")
assert machine_manager.getRequiredActions(test_machine) == [test_action, ]
# Check if adding multiple required actions works.
machine_manager.addRequiredAction(test_machine, "test_action_2")
assert machine_manager.getRequiredActions(test_machine) == [test_action, test_action_2]
# Ensure that firstStart actions are empty by default.
assert machine_manager.getFirstStartActions(test_machine) == []
# Check if adding multiple (the same) actions to first start actions work.
machine_manager.addFirstStartAction(test_machine, "test_action")
machine_manager.addFirstStartAction(test_machine, "test_action")
assert machine_manager.getFirstStartActions(test_machine) == [test_action, test_action]
# Check if inserting an action works
machine_manager.addFirstStartAction(test_machine, "test_action_2", index = 1)
assert machine_manager.getFirstStartActions(test_machine) == [test_action, test_action_2, test_action]
# Check that adding a unknown action doesn't change anything.
machine_manager.addFirstStartAction(test_machine, "key_that_doesnt_exist", index = 1)
assert machine_manager.getFirstStartActions(test_machine) == [test_action, test_action_2, test_action]
| lgpl-3.0 | -1,673,280,500,255,820,000 | 43.468354 | 114 | 0.750356 | false |
rzarzynski/tempest | tempest/services/image/v2/json/image_client.py | 1 | 7421 | # Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import urllib
import jsonschema
from tempest_lib import exceptions as lib_exc
from tempest.common import glance_http
from tempest.common import service_client
class ImageClientV2JSON(service_client.ServiceClient):
def __init__(self, auth_provider, catalog_type, region, endpoint_type=None,
build_interval=None, build_timeout=None,
disable_ssl_certificate_validation=None, ca_certs=None,
**kwargs):
super(ImageClientV2JSON, self).__init__(
auth_provider,
catalog_type,
region,
endpoint_type=endpoint_type,
build_interval=build_interval,
build_timeout=build_timeout,
disable_ssl_certificate_validation=(
disable_ssl_certificate_validation),
ca_certs=ca_certs,
**kwargs)
self._http = None
self.dscv = disable_ssl_certificate_validation
self.ca_certs = ca_certs
def _get_http(self):
return glance_http.HTTPClient(auth_provider=self.auth_provider,
filters=self.filters,
insecure=self.dscv,
ca_certs=self.ca_certs)
def _validate_schema(self, body, type='image'):
if type in ['image', 'images']:
schema = self.get_schema(type)
else:
raise ValueError("%s is not a valid schema type" % type)
jsonschema.validate(body, schema)
@property
def http(self):
if self._http is None:
self._http = self._get_http()
return self._http
def update_image(self, image_id, patch):
data = json.dumps(patch)
self._validate_schema(data)
headers = {"Content-Type": "application/openstack-images-v2.0"
"-json-patch"}
resp, body = self.patch('v2/images/%s' % image_id, data, headers)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, self._parse_resp(body))
def create_image(self, name, container_format, disk_format, **kwargs):
params = {
"name": name,
"container_format": container_format,
"disk_format": disk_format,
}
for option in kwargs:
value = kwargs.get(option)
if isinstance(value, dict) or isinstance(value, tuple):
params.update(value)
else:
params[option] = value
data = json.dumps(params)
self._validate_schema(data)
resp, body = self.post('v2/images', data)
self.expected_success(201, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def delete_image(self, image_id):
url = 'v2/images/%s' % image_id
resp, _ = self.delete(url)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp)
def image_list(self, params=None):
url = 'v2/images'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
self._validate_schema(body, type='images')
return service_client.ResponseBodyList(resp, body['images'])
def get_image(self, image_id):
url = 'v2/images/%s' % image_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def is_resource_deleted(self, id):
try:
self.get_image(id)
except lib_exc.NotFound:
return True
return False
@property
def resource_type(self):
"""Returns the primary type of resource this client works with."""
return 'image'
def store_image(self, image_id, data):
url = 'v2/images/%s/file' % image_id
headers = {'Content-Type': 'application/octet-stream'}
resp, body = self.http.raw_request('PUT', url, headers=headers,
body=data)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def get_image_file(self, image_id):
url = 'v2/images/%s/file' % image_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
return service_client.ResponseBodyData(resp, body)
def add_image_tag(self, image_id, tag):
url = 'v2/images/%s/tags/%s' % (image_id, tag)
resp, body = self.put(url, body=None)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def delete_image_tag(self, image_id, tag):
url = 'v2/images/%s/tags/%s' % (image_id, tag)
resp, _ = self.delete(url)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp)
def get_image_membership(self, image_id):
url = 'v2/images/%s/members' % image_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def add_member(self, image_id, member_id):
url = 'v2/images/%s/members' % image_id
data = json.dumps({'member': member_id})
resp, body = self.post(url, data)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def update_member_status(self, image_id, member_id, status):
"""Valid status are: ``pending``, ``accepted``, ``rejected``."""
url = 'v2/images/%s/members/%s' % (image_id, member_id)
data = json.dumps({'status': status})
resp, body = self.put(url, data)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def get_member(self, image_id, member_id):
url = 'v2/images/%s/members/%s' % (image_id, member_id)
resp, body = self.get(url)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, json.loads(body))
def remove_member(self, image_id, member_id):
url = 'v2/images/%s/members/%s' % (image_id, member_id)
resp, _ = self.delete(url)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp)
def get_schema(self, schema):
url = 'v2/schemas/%s' % schema
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
| apache-2.0 | 8,774,955,404,667,336,000 | 35.55665 | 79 | 0.599245 | false |
google/offline-content-packager | third_party/nkata/scripts/utils/ISOconverter.py | 1 | 2430 | # Copyright 2015 The Offline Content Packager Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ISO converter script.
"""
import logging
from os import link
from os import makedirs
from os import system
from os import unlink
from os.path import dirname
from os.path import isdir
from os.path import isfile
from os.path import join
from sys import platform
from tempfile import mkdtemp
import click
def to_iso(source, destination, filelist=None):
"""ISO converter utility.
Convert contents to ISO format checking the systems platform where the tool
is being run.
Args:
source: path to directory with content to be converted
destination: path to destination where the ISO file is written
filelist: TBD
"""
# overwrite existing ISO file
if isfile(destination):
unlink(destination)
if filelist:
# create tmp dir
tmpdir = mkdtemp()
for item in filelist:
rel = item[len(source)+1:]
dst = join(tmpdir, rel)
if not isdir(dirname(dst)):
makedirs(dirname(dst))
if not isdir(item):
link(item, dst)
source = tmpdir
if platform.startswith("darwin"):
system("hdiutil makehybrid -iso -joliet -o %s %s"%(destination, source))
click.echo("Finished!")
elif platform.startswith("linux"):
system("mkisofs -r -J -o %s %s"%(destination, source))
click.echo("Finished!")
else:
click.echo(platform + (" not supported for converting to ISO files."
"Try to download ISO maker tool from "
"'http://www.magiciso.com/tutorials/"
"miso-iso-creator.htm'"))
logging.debug(platform + (" not supported for converting to ISO files."
"Try to download ISO maker tool from "
"'http://www.magiciso.com/tutorials/"
"miso-iso-creator.htm'"))
| apache-2.0 | 9,064,200,964,084,471,000 | 30.973684 | 77 | 0.665432 | false |
jaantoots/bridgeview | render/textures.py | 1 | 4059 | """Provide methods for texturing the scene for rendering."""
import json
import numpy as np
import bpy # pylint: disable=import-error
from . import helpers
class Textures():
"""Identify parts by name, organise into texturing groups and texture.
Initialise with list of objects to be textured.
Run: read groups and textures from JSON file & call `texture` to
assign (random) textures to objects
Test or setup: group parts to always have the same texture, add
available textures to groups (or ungrouped parts) & write groups
and textures to JSON file
"""
def __init__(self, objects: list):
"""Create Textures object for Blender objects list."""
self.objects = objects[:]
self.textures = helpers.Dict()
self.groups = helpers.Dict()
def read(self, texture_file: str):
"""Read texturing from file."""
with open(texture_file) as file:
data = json.load(file)
self.textures = helpers.Dict(data['textures'])
self.groups = helpers.Dict(data['groups'])
def write(self, texture_file: str):
"""Write texturing to file."""
with open(texture_file, 'w') as file:
data = {'textures': self.textures, 'groups': self.groups}
json.dump(data, file)
def smart_project_all(self):
"""Initialize objects for texturing using UV smart project (for testing only).
Usually need to prepare the model by choosing the best
projection for each part manually. Cube projection seems to
work well most of the time.
"""
for obj in self.objects:
bpy.data.scenes[0].objects.active = obj
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.uv.smart_project()
bpy.ops.object.mode_set(mode='OBJECT')
bpy.data.scenes[0].objects.active = None
def cube_project_all(self):
"""Initialize objects for texturing using cube project.
Usually need to prepare the model by choosing the best
projection for each part manually. Cube projection seems to
work well most of the time.
"""
for obj in self.objects:
print(obj.name)
bpy.data.scenes[0].objects.active = obj
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.uv.cube_project()
bpy.ops.object.mode_set(mode='OBJECT')
bpy.data.scenes[0].objects.active = None
def add_textures(self, group: str, textures: list):
"""Add available textures to group (or part if no group).
It is possible to add multiple textures per group to have one
chosen randomly when textures are applied to objects.
"""
self.textures[group] += textures
def add_parts_to_group(self, group: str, parts: list):
"""Assign parts to belong in a group that gets textured the same."""
self.groups[group] += parts
def texture(self):
"""Texture all objects (assumes all parts have been UV projected)."""
for group, textures in self.textures.items():
texture = np.random.choice(textures)
if group in self.groups:
for part in self.groups[group]:
self._texture_parts(part, texture)
else:
self._texture_parts(group, texture)
def _texture_parts(self, part: str, texture: str):
"""Texture all instances of a part, or all objects if part is ''."""
instances = helpers.all_instances(part, self.objects)
for obj in instances:
texture_object(obj, texture)
def texture_object(obj, texture: str):
"""Texture an object with texture.
Find a material with the name `texture` and make this the active
material of the object
"""
material = bpy.data.materials[texture]
# Assign the material to object
for _ in range(len(obj.material_slots)):
bpy.ops.object.material_slot_remove({'object': obj})
obj.data.materials.clear()
obj.active_material = material
| gpl-3.0 | 7,488,986,955,036,146,000 | 34.605263 | 86 | 0.626016 | false |
dcherian/configs | .ipython/profile_default/ipython_notebook_config.py | 1 | 16516 | # Configuration file for ipython-notebook.
c = get_config()
#------------------------------------------------------------------------------
# NotebookApp configuration
#------------------------------------------------------------------------------
# NotebookApp will inherit config from: BaseIPythonApplication, Application
# The IPython profile to use.
# c.NotebookApp.profile = u'default'
# The url for MathJax.js.
# c.NotebookApp.mathjax_url = ''
# The IP address the notebook server will listen on.
# c.NotebookApp.ip = '127.0.0.1'
# The base URL for the notebook server
# c.NotebookApp.base_project_url = '/'
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.NotebookApp.verbose_crash = False
# The number of additional ports to try if the specified port is not available.
# c.NotebookApp.port_retries = 50
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.NotebookApp.copy_config_files = False
# The base URL for the kernel server
# c.NotebookApp.base_kernel_url = '/'
# The port the notebook server will listen on.
# c.NotebookApp.port = 8888
# Whether to overwrite existing config files when copying
# c.NotebookApp.overwrite = False
# Whether to prevent editing/execution of notebooks.
# c.NotebookApp.read_only = False
# Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library IPython uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
# c.NotebookApp.enable_mathjax = True
# Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
# c.NotebookApp.open_browser = True
# The full path to an SSL/TLS certificate file.
# c.NotebookApp.certfile = u''
# The hostname for the websocket server.
# c.NotebookApp.websocket_host = ''
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.NotebookApp.ipython_dir = u'/home/deepak/.config/ipython'
# Set the log level by value or name.
# c.NotebookApp.log_level = 20
# Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from IPython.lib import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
# c.NotebookApp.password = u''
# The Logging format template
# c.NotebookApp.log_format = '[%(name)s] %(message)s'
# The full path to a private key file for usage with SSL/TLS.
# c.NotebookApp.keyfile = u''
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
# c.NotebookApp.webapp_settings = {}
# Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
# c.NotebookApp.browser = u''
#------------------------------------------------------------------------------
# IPKernelApp configuration
#------------------------------------------------------------------------------
# IPython: an enhanced interactive Python shell.
# IPKernelApp will inherit config from: KernelApp, BaseIPythonApplication,
# Application, InteractiveShellApp
# The importstring for the DisplayHook factory
# c.IPKernelApp.displayhook_class = 'IPython.zmq.displayhook.ZMQDisplayHook'
# Set the IP or interface on which the kernel will listen.
# c.IPKernelApp.ip = '127.0.0.1'
#
# c.IPKernelApp.parent_appname = u''
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.IPKernelApp.verbose_crash = False
# Run the module as a script.
# c.IPKernelApp.module_to_run = ''
# set the shell (ROUTER) port [default: random]
# c.IPKernelApp.shell_port = 0
# Whether to overwrite existing config files when copying
# c.IPKernelApp.overwrite = False
# Execute the given command string.
# c.IPKernelApp.code_to_run = ''
# set the stdin (DEALER) port [default: random]
# c.IPKernelApp.stdin_port = 0
# Set the log level by value or name.
# c.IPKernelApp.log_level = 30
# lines of code to run at IPython startup.
# c.IPKernelApp.exec_lines = []
# The importstring for the OutStream factory
# c.IPKernelApp.outstream_class = 'IPython.zmq.iostream.OutStream'
# Whether to create profile dir if it doesn't exist
# c.IPKernelApp.auto_create = False
# set the heartbeat port [default: random]
# c.IPKernelApp.hb_port = 0
# redirect stdout to the null device
# c.IPKernelApp.no_stdout = False
# dotted module name of an IPython extension to load.
# c.IPKernelApp.extra_extension = ''
# A file to be run
# c.IPKernelApp.file_to_run = ''
# The IPython profile to use.
# c.IPKernelApp.profile = u'default'
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.IPKernelApp.pylab = None
# kill this process if its parent dies. On Windows, the argument specifies the
# HANDLE of the parent process, otherwise it is simply boolean.
# c.IPKernelApp.parent = 0
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security-
# dir of the current profile, but can be specified by absolute path.
# c.IPKernelApp.connection_file = ''
# If true, an 'import *' is done from numpy and pylab, when using pylab
# c.IPKernelApp.pylab_import_all = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.IPKernelApp.ipython_dir = u'/home/deepak/.config/ipython'
# ONLY USED ON WINDOWS Interrupt this process when the parent is signalled.
# c.IPKernelApp.interrupt = 0
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.IPKernelApp.copy_config_files = False
# List of files to run at IPython startup.
# c.IPKernelApp.exec_files = []
# Enable GUI event loop integration ('qt', 'wx', 'gtk', 'glut', 'pyglet',
# 'osx').
# c.IPKernelApp.gui = None
# A list of dotted module names of IPython extensions to load.
# c.IPKernelApp.extensions = []
# redirect stderr to the null device
# c.IPKernelApp.no_stderr = False
# The Logging format template
# c.IPKernelApp.log_format = '[%(name)s] %(message)s'
# set the iopub (PUB) port [default: random]
# c.IPKernelApp.iopub_port = 0
#------------------------------------------------------------------------------
# ZMQInteractiveShell configuration
#------------------------------------------------------------------------------
# A subclass of InteractiveShell for ZMQ.
# ZMQInteractiveShell will inherit config from: InteractiveShell
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.ZMQInteractiveShell.color_info = True
#
# c.ZMQInteractiveShell.history_length = 10000
# Don't call post-execute functions that have failed in the past.
# c.ZMQInteractiveShell.disable_failing_post_execute = False
# Show rewritten input, e.g. for autocall.
# c.ZMQInteractiveShell.show_rewritten_input = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.ZMQInteractiveShell.colors = 'LightBG'
#
# c.ZMQInteractiveShell.separate_in = '\n'
# Deprecated, use PromptManager.in2_template
# c.ZMQInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.ZMQInteractiveShell.separate_out = ''
# Deprecated, use PromptManager.in_template
# c.ZMQInteractiveShell.prompt_in1 = 'In [\\#]: '
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.ZMQInteractiveShell.deep_reload = False
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.ZMQInteractiveShell.autocall = 0
#
# c.ZMQInteractiveShell.separate_out2 = ''
# Deprecated, use PromptManager.justify
# c.ZMQInteractiveShell.prompts_pad_left = True
#
# c.ZMQInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# Enable magic commands to be called without the leading %.
# c.ZMQInteractiveShell.automagic = True
#
# c.ZMQInteractiveShell.debug = False
#
# c.ZMQInteractiveShell.object_info_string_level = 0
#
# c.ZMQInteractiveShell.ipython_dir = ''
#
# c.ZMQInteractiveShell.readline_remove_delims = '-/~'
# Start logging to the default log file.
# c.ZMQInteractiveShell.logstart = False
# The name of the logfile to use.
# c.ZMQInteractiveShell.logfile = ''
#
# c.ZMQInteractiveShell.wildcards_case_sensitive = True
# Save multi-line entries as one entry in readline history
# c.ZMQInteractiveShell.multiline_history = True
# Start logging to the given file in append mode.
# c.ZMQInteractiveShell.logappend = ''
#
# c.ZMQInteractiveShell.xmode = 'Context'
#
# c.ZMQInteractiveShell.quiet = False
# Deprecated, use PromptManager.out_template
# c.ZMQInteractiveShell.prompt_out = 'Out[\\#]: '
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.ZMQInteractiveShell.cache_size = 1000
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.ZMQInteractiveShell.ast_node_interactivity = 'last_expr'
# Automatically call the pdb debugger after every exception.
# c.ZMQInteractiveShell.pdb = False
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialiization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# Username for the Session. Default is your system username.
# c.Session.username = 'deepak'
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# The UUID identifying this session.
# c.Session.session = u''
# execution key, for extra authentication.
# c.Session.key = ''
# Debug output in the Session
# c.Session.debug = False
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# path to file containing execution key.
# c.Session.keyfile = ''
#------------------------------------------------------------------------------
# MappingKernelManager configuration
#------------------------------------------------------------------------------
# A KernelManager that handles notebok mapping and HTTP error handling
# MappingKernelManager will inherit config from: MultiKernelManager
# The max raw message size accepted from the browser over a WebSocket
# connection.
# c.MappingKernelManager.max_msg_size = 65536
# Kernel heartbeat interval in seconds.
# c.MappingKernelManager.time_to_dead = 3.0
# The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
# c.MappingKernelManager.kernel_manager_class = 'IPython.zmq.blockingkernelmanager.BlockingKernelManager'
# Delay (in seconds) before sending first heartbeat.
# c.MappingKernelManager.first_beat = 5.0
#------------------------------------------------------------------------------
# NotebookManager configuration
#------------------------------------------------------------------------------
# Automatically create a Python script when saving the notebook.
#
# For easier use of import, %run and %load across notebooks, a <notebook-
# name>.py script will be created next to any <notebook-name>.ipynb on each
# save. This can also be set with the short `--script` flag.
# c.NotebookManager.save_script = False
# The directory to use for notebooks.
# c.NotebookManager.notebook_dir = u'/media/data/Work/eddyshelf'
| mit | 524,242,366,591,500,540 | 35.783964 | 411 | 0.697203 | false |
woodymit/millstone | genome_designer/genome_finish/insertion_placement_read_trkg.py | 1 | 22010 | from collections import defaultdict
import os
import pickle
import re
import subprocess
from Bio import SeqIO
from Bio.Alphabet import IUPAC
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from django.conf import settings
import pysam
from genome_finish.contig_display_utils import Junction
from genome_finish.jbrowse_genome_finish import add_contig_reads_bam_track
from genome_finish.jbrowse_genome_finish import maybe_create_reads_to_contig_bam
from main.models import Dataset
from pipeline.read_alignment_util import ensure_bwa_index
from pipeline.read_alignment_util import has_bwa_index
from utils.bam_utils import index_bam
from utils.bam_utils import sort_bam_by_coordinate
from utils.import_util import add_dataset_to_entity
ENDPOINT_MODE_DIFFERENCE_FACTOR_CUTOFF = 0.5
REVERSED_COMPLEMENTARITY_FRACTION_CUTOFF = 0.75
ENDPOINT_FRACTION = 0.8
def get_insertion_placement_positions(contig, strategy='all_reads'):
def _get_contig_reads_using_strategy(strategy):
if strategy == 'all_reads':
return extract_contig_reads(contig, 'all')
elif strategy == 'mapped_mates_of_unmapped':
return mapped_mates_of_unmapped_reads(contig)
else:
raise Exception(str(strategy) + ' not a recognized strategy')
contig_reads = _get_contig_reads_using_strategy(strategy)
if len(contig_reads) == 0:
return {'error_string':
'No clipped reads were assembled into the contig'}
contig_reads_dataset_exists = bool(
contig.dataset_set.filter(
type=Dataset.TYPE.BWA_SV_INDICANTS).count())
if strategy == 'all_reads' and not contig_reads_dataset_exists:
make_contig_reads_dataset(contig, contig_reads)
# Add bam track
add_contig_reads_bam_track(contig, Dataset.TYPE.BWA_SV_INDICANTS)
# Align extracted reads to contig, check if assembled as reverse
# complement relative to the reference
maybe_create_reads_to_contig_bam(contig)
reads_to_contig_bam = contig.dataset_set.get(
type=Dataset.TYPE.BWA_ALIGN).get_absolute_location()
reads_to_contig_dict = dictify(pysam.AlignmentFile(reads_to_contig_bam))
reads_to_ref_dict = dictify(contig_reads)
is_reverse = is_contig_reverse_complement(reads_to_ref_dict,
reads_to_contig_dict)
contig.metadata['is_reverse'] = is_reverse
if is_reverse:
write_contig_reverse_complement(contig)
extracted_clipped_read_dicts = extract_left_and_right_clipped_read_dicts(
contig_reads)
left_clipped = extracted_clipped_read_dicts['left_clipped']
right_clipped = extracted_clipped_read_dicts['right_clipped']
# Right clipped reads indicate left endpoint
left_ref_endpoints = get_top_clipped_locs(right_clipped)
# Left clipped reads indicate right endpoint
right_ref_endpoints = get_top_clipped_locs(left_clipped)
left_junctions = []
for ref_endpoint, ref_count in left_ref_endpoints:
contig_endpoint, contig_count = find_contig_endpoint(
contig, right_clipped[ref_endpoint], 'right')
left_junctions.append(Junction(
ref_endpoint, ref_count, contig_endpoint, contig_count))
right_junctions = []
for ref_endpoint, ref_count in right_ref_endpoints:
contig_endpoint, contig_count = find_contig_endpoint(
contig, left_clipped[ref_endpoint], 'left')
right_junctions.append(Junction(
ref_endpoint, ref_count, contig_endpoint, contig_count))
contig.metadata['left_junctions'] = left_junctions
contig.metadata['right_junctions'] = right_junctions
contig.metadata['potential_reference_endpoints'] = {
'left': left_ref_endpoints,
'right': right_ref_endpoints
}
contig.save()
ref_insertion_endpoints = {}
if are_ref_endpoints_placeable(left_ref_endpoints):
ref_insertion_endpoints['left'] = left_ref_endpoints[0][0]
else:
ref_insertion_endpoints['left'] = None
if are_ref_endpoints_placeable(right_ref_endpoints):
ref_insertion_endpoints['right'] = right_ref_endpoints[0][0]
else:
ref_insertion_endpoints['right'] = None
# Handle case of no endpoints found
error = None
if (not ref_insertion_endpoints['left'] and
not ref_insertion_endpoints['right']):
error = {'error_string': ('Could not find left or right reference ' +
'insertion endpoints using ' + str(len(contig_reads)) +
' clipped reads')}
elif not ref_insertion_endpoints['left']:
error = {'error_string': ('Could not find left reference ' +
'insertion endpoint using ' + str(len(contig_reads)) +
' clipped reads')}
elif not ref_insertion_endpoints['right']:
error = {'error_string': ('Could not find right reference ' +
'insertion endpoint using ' + str(len(contig_reads)) +
' clipped reads')}
elif (ref_insertion_endpoints['left'] - ref_insertion_endpoints['right'] >
0.5 * contig.num_bases):
error = {'error_string': ('Left insertion endpoint found too far ' +
'before right insertion endpoint')}
elif (ref_insertion_endpoints['right'] - ref_insertion_endpoints['left'] >
10 * contig.num_bases):
error = {'error_string': ('Distance between left and right ' +
'reference insertion endpoints more than 10x contig' +
'length')}
if error:
return error
left_clipped_same_end = left_clipped[ref_insertion_endpoints['right']]
right_clipped_same_end = right_clipped[ref_insertion_endpoints['left']]
contig_insertion_endpoints = find_contig_insertion_endpoints(
contig, left_clipped_same_end,
right_clipped_same_end)
# Propogate error upwards
if 'error_string' in contig_insertion_endpoints:
return contig_insertion_endpoints
if contig_insertion_endpoints['left'] is None:
return {'error_string': ('Could not find left contig endpoint')}
if contig_insertion_endpoints['right'] is None:
return {'error_string': ('Could not find right contig endpoint')}
# Set contig metadata fields and return endpoints
insertion_placement_positions = {
'reference': ref_insertion_endpoints,
'contig': contig_insertion_endpoints
}
contig.metadata['contig_insertion_endpoints'] = (
insertion_placement_positions['contig']['left'],
insertion_placement_positions['contig']['right'])
contig.metadata['reference_insertion_endpoints'] = (
insertion_placement_positions['reference']['left'],
insertion_placement_positions['reference']['right'])
contig.save()
return insertion_placement_positions
def mapped_mates_of_unmapped_reads(contig):
unmapped_contig_reads = extract_contig_reads(
contig, read_category='unmapped')
print len(unmapped_contig_reads), 'unmapped reads in contig'
original_align = contig.experiment_sample_to_alignment.dataset_set.get(
type=Dataset.TYPE.BWA_ALIGN).get_absolute_location()
original_alignmentfile = pysam.AlignmentFile(original_align)
found_mates = []
for read in unmapped_contig_reads:
if not read.mate_is_unmapped:
mate = original_alignmentfile.mate(read)
found_mates.append(mate)
original_alignmentfile.close()
print len(found_mates), 'mapped mates found'
return found_mates
def dictify(reads_iterator):
id_to_reads = defaultdict(list)
for read in reads_iterator:
id_to_reads[read.qname].append(read)
return id_to_reads
def only_primary(reads):
return [read for read in reads if not
(read.is_supplementary or read.is_secondary)]
def is_contig_reverse_complement(reads_to_ref_dict, reads_to_contig_dict):
direction_agreement = 0
direction_disagreement = 0
for qname, reads in reads_to_ref_dict.items():
reads = only_primary(reads)
if all([read.is_unmapped for read in reads]):
continue
same_reads_to_contig = only_primary(
reads_to_contig_dict[reads[0].qname])
for read in reads:
if read.is_unmapped:
continue
if read.is_read1:
correspondant = next((read for read in same_reads_to_contig
if read.is_read1), None)
else:
correspondant = next((read for read in same_reads_to_contig
if read.is_read2), None)
if correspondant:
if read.is_reverse == correspondant.is_reverse:
direction_agreement += 1
else:
direction_disagreement += 1
if not (direction_agreement or direction_disagreement):
return False
return (direction_disagreement / (direction_disagreement +
direction_agreement) > REVERSED_COMPLEMENTARITY_FRACTION_CUTOFF)
def extract_contig_reads(contig, read_category='all'):
READ_CATEGORY_TO_FILENAME_DICT = {
'without_mates': 'bwa_align.SV_indicants_no_dups.bam',
'clipped': 'bwa_align.clipped.bam',
'split': 'bwa_align.split.bam',
'unmapped': 'bwa_align.unmapped.bam'
}
def _read_category_to_filename(read_category):
if read_category in READ_CATEGORY_TO_FILENAME_DICT:
return READ_CATEGORY_TO_FILENAME_DICT[read_category]
elif read_category == 'all':
assembly_metadata_file = os.path.join(
contig.metadata['assembly_dir'],
'metadata.txt')
with open(assembly_metadata_file) as fh:
assembly_metadata_obj = pickle.load(fh)
return assembly_metadata_obj['sv_indicants_bam']
elif read_category == 'mates_of_unmapped':
return mapped_mates_of_unmapped_reads(contig)
else:
raise Exception('read category not recognized')
extract_contig_reads_executable = os.path.join(
settings.TOOLS_DIR,
'velvet/extractContigReads.pl')
assembly_dir = contig.metadata['assembly_dir']
contig_node_number = contig.metadata['node_number']
cmd = [extract_contig_reads_executable, str(contig_node_number),
assembly_dir]
cmd = ' '.join(cmd)
contig_reads_fasta = os.path.join(
contig.get_model_data_dir(),
'extracted_reads.fa')
if not os.path.exists(contig_reads_fasta):
with open(contig_reads_fasta, 'w') as fh:
subprocess.call(cmd, shell=True, stdout=fh)
p1 = re.compile('>(\S+)/(\d)')
contig_reads = defaultdict(list)
with open(contig_reads_fasta) as fh:
for line in fh:
m1 = p1.match(line)
if m1:
read_id = m1.group(1)
read_number = int(m1.group(2))
contig_reads[read_id].append(read_number)
sv_indicant_reads_path = os.path.join(
contig.experiment_sample_to_alignment.get_model_data_dir(),
_read_category_to_filename(read_category))
sam_file = pysam.AlignmentFile(sv_indicant_reads_path)
sv_indicant_reads_in_contig = []
for read in sam_file:
if read.is_read1:
read_number = 1
elif read.is_read2:
read_number = 2
else:
raise Exception('Read is neither read1 nor read2')
contig_read_numbers = contig_reads.get(read.query_name, [])
if read_number in contig_read_numbers:
sv_indicant_reads_in_contig.append(read)
# HACK: Set chromosome here while sam file is open
# so AlignmentFile.getrname(tid) can be called
ref_id_to_count = {}
mapped_count = 0
for read in sv_indicant_reads_in_contig:
if not read.is_unmapped:
mapped_count += 1
if read.reference_id not in ref_id_to_count:
ref_id_to_count[read.reference_id] = 1
else:
ref_id_to_count[read.reference_id] += 1
if mapped_count:
tid_count_sorted = sorted(
ref_id_to_count.items(), key=lambda x: x[1], reverse=True)
mode_chrom_tid = tid_count_sorted[0][0]
mode_chrom_percentage = (tid_count_sorted[0][1] /
float(mapped_count))
# Set field
if mode_chrom_percentage > 0.8:
contig_seqrecord_id = sam_file.getrname(mode_chrom_tid)
contig.metadata['chromosome'] = contig_seqrecord_id
contig.save()
sam_file.close()
return sv_indicant_reads_in_contig
def make_contig_reads_dataset(contig, sv_indicant_reads_in_contig):
# Get bam filename
extracted_reads_bam_file = os.path.join(
contig.get_model_data_dir(),
'sv_indicants.bam')
bwa_align_bam = contig.experiment_sample_to_alignment.dataset_set.get(
type=Dataset.TYPE.BWA_ALIGN).get_absolute_location()
sam_file = pysam.AlignmentFile(bwa_align_bam)
# Write extracted reads into bam file
extracted_reads_alignment_file = pysam.AlignmentFile(
extracted_reads_bam_file, "wb", template=sam_file)
sam_file.close()
for read in sv_indicant_reads_in_contig:
extracted_reads_alignment_file.write(read)
extracted_reads_alignment_file.close()
coordinate_sorted_bam = (os.path.splitext(extracted_reads_bam_file)[0] +
'.coordinate_sorted.bam')
sort_bam_by_coordinate(extracted_reads_bam_file, coordinate_sorted_bam)
index_bam(coordinate_sorted_bam)
# Add the bam file to contig as BWA_SV_INDICANTS dataset, overwriting it
# if it already exists
dataset_query = contig.dataset_set.filter(
type=Dataset.TYPE.BWA_SV_INDICANTS)
if dataset_query.count():
dataset_query[0].delete()
add_dataset_to_entity(contig,
Dataset.TYPE.BWA_SV_INDICANTS,
Dataset.TYPE.BWA_SV_INDICANTS,
filesystem_location=coordinate_sorted_bam)
def extract_left_and_right_clipped_read_dicts(sv_indicant_reads_in_contig,
clipping_threshold=0):
SOFT_CLIP = 4
HARD_CLIP = 5
CLIP = [SOFT_CLIP, HARD_CLIP]
# Separate left and right clipped reads
left_clipped = defaultdict(list)
right_clipped = defaultdict(list)
for read in sv_indicant_reads_in_contig:
if read.cigartuples is not None:
left_clipping = (read.cigartuples[0][1]
if read.cigartuples[0][0] in CLIP else 0)
right_clipping = (read.cigartuples[-1][1]
if read.cigartuples[-1][0] in CLIP else 0)
if max(left_clipping, right_clipping) > clipping_threshold:
is_left_clipped = left_clipping > right_clipping
is_right_clipped = right_clipping > left_clipping
if is_left_clipped:
left_clipped[read.reference_start].append(read)
elif is_right_clipped:
right_clipped[read.reference_end].append(read)
return {
'left_clipped': left_clipped,
'right_clipped': right_clipped
}
def are_ref_endpoints_placeable(endpoints):
"""endpoints is a list of tuples of the form
(loc, clipped_read_count) sorted by decreasing clipped_key_count
"""
first = endpoints[0][1] if len(endpoints) > 0 else 0
second = endpoints[1][1] if len(endpoints) > 1 else 0
if not first * (1 - ENDPOINT_MODE_DIFFERENCE_FACTOR_CUTOFF) > second:
return False
return True
def get_top_clipped_locs(clipped_dict):
"""clipped_dict is a dictionary with clipping locations as
keys and a list of reads as values
"""
# Convert the dictionary into a list of tuples of the form
# (loc, #reads) sorted in decreasing order of #reads
clipped_count_list = sorted(
[(loc, len(reads)) for loc, reads in clipped_dict.items()],
key=lambda t: t[1], reverse=True)
# Count up the total number of reads
total = sum(count for loc, count in clipped_count_list)
# Return the list that comprises ENDPOINT_FRACTION of the total reads
included = 0
i = 0
while included < ENDPOINT_FRACTION * total:
included += clipped_count_list[i][1]
i += 1
return clipped_count_list[:i]
def write_read_query_alignments_to_fastq(reads, fastq_path,
read_attr_class='query_alignment'):
"""Writes the aligned portion of each read into a fastq
"""
read_attr_funcs = {
'query_alignment': {
'seq': lambda x: x.query_alignment_sequence,
'qual': lambda x: x.query_alignment_qualities
},
'query': {
'seq': lambda x: x.query_sequence,
'qual': lambda x: x.query_qualities
}
}
assert read_attr_class in read_attr_funcs
get_read_attr = read_attr_funcs[read_attr_class]
query_alignment_seqrecords = []
for read in reads:
query_alignment_seqrecords.append(SeqRecord(
Seq(get_read_attr['seq'](read), IUPAC.ambiguous_dna),
letter_annotations={
'phred_quality': get_read_attr['qual'](read)},
id=read.query_name,
description=''))
with open(fastq_path, 'w') as fastq_handle:
SeqIO.write(query_alignment_seqrecords, fastq_handle, 'fastq')
def simple_align_with_bwa_mem(reads_fq, reference_fasta, output_bam_path):
# Assert reference fasta is indexed
assert has_bwa_index(reference_fasta)
# Align clipped query alignment fastq to contig
align_input_args = ' '.join([
'%s/bwa/bwa' % settings.TOOLS_DIR,
'mem',
reference_fasta,
reads_fq])
# Bwa mem calls reads clipped slightly at the end of the genome
# as unmapped, so filter these out with -F 0x004
# To skip saving the SAM file to disk directly, pipe output directly to
# make a BAM file.
align_input_args += (' | ' + settings.SAMTOOLS_BINARY +
' view -F 0x004 -bS -')
# Run alignment
with open(output_bam_path, 'w') as fh:
subprocess.check_call(
align_input_args, stdout=fh,
shell=True, executable=settings.BASH_PATH)
def get_reads_with_mode_attribute(clipped_alignment_bam, get_attr_function):
alignment_ref_clip_positions = defaultdict(list)
sam_file = pysam.AlignmentFile(clipped_alignment_bam)
for read in sam_file:
alignment_ref_clip_positions[get_attr_function(read)].append(read)
alignment_ref_clip_positions_sorted = sorted(
alignment_ref_clip_positions.items(),
key=lambda x: len(x[1]), reverse=True)
highest_consensus = (len(alignment_ref_clip_positions_sorted[0][1])
if len(alignment_ref_clip_positions_sorted) > 0 else 0)
second_highest_consensus = (len(alignment_ref_clip_positions_sorted[1][1])
if len(alignment_ref_clip_positions_sorted) > 1 else 0)
if (highest_consensus - second_highest_consensus >
(ENDPOINT_MODE_DIFFERENCE_FACTOR_CUTOFF *
highest_consensus)):
endpoint = (alignment_ref_clip_positions_sorted[0][0],
highest_consensus)
else:
endpoint = None, None
return endpoint
def get_contig_rc_fasta_path(contig):
contig_fasta = contig.dataset_set.get(
type=Dataset.TYPE.REFERENCE_GENOME_FASTA).get_absolute_location()
return (os.path.splitext(contig_fasta)[0] +
'.reverse_complement.fa')
def write_contig_reverse_complement(contig):
contig_fasta = contig.dataset_set.get(
type=Dataset.TYPE.REFERENCE_GENOME_FASTA).get_absolute_location()
rc_contig_fasta = get_contig_rc_fasta_path(contig)
contig_seqrecord = SeqIO.parse(contig_fasta, 'fasta').next()
contig_seqrecord.seq = contig_seqrecord.seq.reverse_complement()
SeqIO.write(contig_seqrecord, rc_contig_fasta, 'fasta')
return rc_contig_fasta
def find_contig_endpoint(contig, clipped_same_end, direction):
assert direction in ['left', 'right']
# Write clipped query alignment sequences to fastq
contig_dir = contig.get_model_data_dir()
clipped_query_alignment_fq = os.path.join(
contig_dir,
'clipped_query_alignment_seqs.fq')
write_read_query_alignments_to_fastq(
clipped_same_end,
clipped_query_alignment_fq)
# Get BAM filename for alignment
clipped_to_contig_bam = os.path.join(
contig_dir,
'clipped_to_contig.bwa_align.bam')
# Get contig fasta
contig_fasta = contig.dataset_set.get(
type=Dataset.TYPE.REFERENCE_GENOME_FASTA).get_absolute_location()
if contig.is_reverse:
align_to = get_contig_rc_fasta_path(contig)
else:
align_to = contig_fasta
if align_to:
ensure_bwa_index(align_to)
simple_align_with_bwa_mem(
clipped_query_alignment_fq, align_to,
clipped_to_contig_bam)
# Find contig endpoints
if direction == 'right':
return get_reads_with_mode_attribute(
clipped_to_contig_bam, lambda r: r.reference_end)
else:
return get_reads_with_mode_attribute(
clipped_to_contig_bam, lambda r: r.reference_start)
def find_contig_insertion_endpoints(contig,
left_clipped_same_end, right_clipped_same_end):
""" left_clipped_same_end/right_clipped_same_end are lists of
left and right clipped reads all with the same left/right
alignment endpoint, corresponding to the reference insertion
right/left endpoint
"""
contig_ins_left_end, _ = find_contig_endpoint(contig,
right_clipped_same_end, 'right')
contig_ins_right_end, _ = find_contig_endpoint(contig,
left_clipped_same_end, 'left')
return {
'left': contig_ins_left_end,
'right': contig_ins_right_end
}
| mit | -2,248,555,978,764,441,900 | 35.500829 | 80 | 0.636256 | false |
thinkle/gourmet | gourmet/plugins/key_editor/keyEditorPluggable.py | 1 | 3327 | # This library provides a pluggable that lets plugins that *use* our
# key editor to provide extra information based on the ingredient
# key. This will be used to show info in both the key editor and
# recipe card view and possibly to allow editing etc.
from gourmet.plugin_loader import Pluggable
from gourmet.plugin import PluginPlugin
from gourmet import gdebug
# Here's our template -- those implementing will have to take this as
# boilerplate code rather than subclassing it, since it's not possible
# to reliably access one plugin's module from another.
# Begin boilerplate...
#
# For a fuller example, see shopping_associations
class KeyEditorPlugin (PluginPlugin):
target_pluggable = 'KeyEditorPlugin'
selected_ingkeys = []
def setup_treeview_column (self, ike, key_col, instant_apply=False):
'''Set up a treeview column to display your data.
The key_col is the column in the treemodel which will contain
your data in the model. It\'s your responsibility to get
whatever other data you need yourself.
If you make this editable, it\'s up to you to apply the
changes as well to the database. If instant_apply is True,
then apply them instantly; if False, apply them when this
class\'s save method is called.
'''
raise NotImplementedError
def save (self):
'''Save any data the user has entered in your treeview column.
'''
pass
def offers_edit_widget (self):
'''Return True if this plugin provides an edit button for
editing data (if you need more than an editable cellrenderer
to let users edit your data, or would like to act on multiple
rows.
'''
return False
def setup_edit_widget (self):
'''Return an edit button to let users edit your data.
'''
raise NotImplementedError
def selection_changed (self, ingkeys):
'''Selected ingkeys have changed -- currently ingkeys are
selected (and should be acted on by our edit_widget
'''
self.selected_ingkeys = ingkeys
# End boilerplate
class KeyEditorPluginManager (Pluggable):
'''Manage plugins that provide users the ability to edit extra
associations, such as nutritional information, shopping list
categories, etc.'''
title = 'Title of Whatever we Do'
targets = ['KeyEditorPlugin']
__single = None
@classmethod
def instance(cls):
if KeyEditorPluginManager.__single is None:
KeyEditorPluginManager.__single = cls()
return KeyEditorPluginManager.__single
def __init__ (self):
Pluggable.__init__(self,[PluginPlugin])
def get_treeview_columns (self, ike, key_col, instant_apply=False):
return [p.setup_treeview_column(ike, key_col,instant_apply) for p in self.plugins]
def get_edit_buttons (self, ike):
buttons = []
for p in self.plugins:
if p.offer_edit_button():
try:
buttons.append(p.setup_edit_button())
except:
'Trouble initializing edit button for plugin',p
import traceback; traceback.print_exc()
return buttons
def get_key_editor_plugin_manager ():
return KeyEditorPluginManager.instance()
| gpl-2.0 | -7,379,759,485,367,620,000 | 32.606061 | 90 | 0.661858 | false |
japsu/voitto | tests/helpers.py | 1 | 1027 |
#
# Voitto - a simple yet efficient double ledger bookkeeping system
# Copyright (C) 2010 Santtu Pajukanta <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Testing related helpers.
"""
from functools import wraps
from nose.plugins.skip import SkipTest
def skipped(func):
@wraps(func)
def _wrapper(*args, **kwargs):
raise SkipTest("Test {0} is skipped", func.__name__)
return _wrapper
| gpl-3.0 | -3,821,549,126,503,750,000 | 28.342857 | 71 | 0.735151 | false |
SetBased/py-etlt | etlt/dimension/Type2ReferenceDimension.py | 1 | 5466 | """
ETLT
Copyright 2016 Set Based IT Consultancy
Licence MIT
"""
import abc
import datetime
class Type2ReferenceDimension(metaclass=abc.ABCMeta):
"""
Abstract class for type2 dimensions for which the reference data is supplied with date intervals.
"""
# ------------------------------------------------------------------------------------------------------------------
def __init__(self):
"""
Object constructor.
"""
self._key_key = ''
"""
The key in the dict returned by call_stored_procedure holding the technical ID.
:type: str
"""
self._key_date_start = ''
"""
The key in the dict returned by call_stored_procedure holding the start date.
:type: str
"""
self._key_date_end = ''
"""
The key in the dict returned by call_stored_procedure holding the end date.
:type: str
"""
self._map = {}
"""
The map from natural keys to lists of tuples with start date, end date, and technical keys. The dates must be in
ISO 8601 (YYYY-MM-DD) format.
:type: dict[T, list[(str,str,int|None)]]
"""
# Pre-load look up data in to the map.
self.pre_load_data()
# ------------------------------------------------------------------------------------------------------------------
def get_id(self, natural_key, date, enhancement=None):
"""
Returns the technical ID for a natural key at a date or None if the given natural key is not valid.
:param T natural_key: The natural key.
:param str date: The date in ISO 8601 (YYYY-MM-DD) format.
:param T enhancement: Enhancement data of the dimension row.
:rtype: int|None
"""
if not date:
return None
# If the natural key is known return the technical ID immediately.
if natural_key in self._map:
for row in self._map[natural_key]:
if row[0] <= date <= row[1]:
return row[2]
# The natural key is not in the map of this dimension. Call a stored procedure for translating the natural key
# to a technical key.
self.pre_call_stored_procedure()
success = False
try:
row = self.call_stored_procedure(natural_key, date, enhancement)
# Convert dates to strings in ISO 8601 format.
if isinstance(row[self._key_date_start], datetime.date):
row[self._key_date_start] = row[self._key_date_start].isoformat()
if isinstance(row[self._key_date_end], datetime.date):
row[self._key_date_end] = row[self._key_date_end].isoformat()
success = True
finally:
self.post_call_stored_procedure(success)
# Make sure the natural key is in the map.
if natural_key not in self._map:
self._map[natural_key] = []
if row[self._key_key]:
self._map[natural_key].append((row[self._key_date_start],
row[self._key_date_end],
row[self._key_key]))
else:
self._map[natural_key].append((date, date, None))
return row[self._key_key]
# ------------------------------------------------------------------------------------------------------------------
@abc.abstractmethod
def call_stored_procedure(self, natural_key, date, enhancement):
"""
Call a stored procedure for getting the technical key of a natural key at a date. Returns the technical ID or
None if the given natural key is not valid.
:param T natural_key: The natural key.
:param str date: The date in ISO 8601 (YYYY-MM-DD) format.
:param T enhancement: Enhancement data of the dimension row.
:rtype: dict
"""
raise NotImplementedError()
# ------------------------------------------------------------------------------------------------------------------
def pre_load_data(self):
"""
Can be overridden to pre-load lookup data from a dimension table.
:rtype: None
"""
pass
# ------------------------------------------------------------------------------------------------------------------
def pre_call_stored_procedure(self):
"""
This method is invoked before call the stored procedure for getting the technical key of a natural key.
In a concurrent environment override this method to acquire a lock on the dimension or dimension hierarchy.
:rtype: None
"""
pass
# ------------------------------------------------------------------------------------------------------------------
def post_call_stored_procedure(self, success):
"""
This method is invoked after calling the stored procedure for getting the technical key of a natural key.
In a concurrent environment override this method to release a lock on the dimension or dimension hierarchy and
to commit or rollback the transaction.
:param bool success: True: the stored procedure is executed successfully. False: an exception has occurred.
:rtype: None
"""
pass
# ----------------------------------------------------------------------------------------------------------------------
| mit | -5,448,622,367,667,991,000 | 34.960526 | 120 | 0.497988 | false |
ArcherSys/ArcherSys | skulpt/src/lib/pythonds/trees/bst.py | 1 | 8740 | #!/bin/env python3.1
# Bradley N. Miller, David L. Ranum
# Introduction to Data Structures and Algorithms in Python
# Copyright 2005, 2010
#
class BinarySearchTree:
'''
Author: Brad Miller
Date: 1/15/2005
Description: Imlement a binary search tree with the following interface
functions:
__contains__(y) <==> y in x
__getitem__(y) <==> x[y]
__init__()
__len__() <==> len(x)
__setitem__(k,v) <==> x[k] = v
clear()
get(k)
items()
keys()
values()
put(k,v)
in
del <==>
'''
def __init__(self):
self.root = None
self.size = 0
def put(self,key,val):
if self.root:
self._put(key,val,self.root)
else:
self.root = TreeNode(key,val)
self.size = self.size + 1
def _put(self,key,val,currentNode):
if key < currentNode.key:
if currentNode.hasLeftChild():
self._put(key,val,currentNode.leftChild)
else:
currentNode.leftChild = TreeNode(key,val,parent=currentNode)
else:
if currentNode.hasRightChild():
self._put(key,val,currentNode.rightChild)
else:
currentNode.rightChild = TreeNode(key,val,parent=currentNode)
def __setitem__(self,k,v):
self.put(k,v)
def get(self,key):
if self.root:
res = self._get(key,self.root)
if res:
return res.payload
else:
return None
else:
return None
def _get(self,key,currentNode):
if not currentNode:
return None
elif currentNode.key == key:
return currentNode
elif key < currentNode.key:
return self._get(key,currentNode.leftChild)
else:
return self._get(key,currentNode.rightChild)
def __getitem__(self,key):
res = self.get(key)
if res:
return res
else:
raise KeyError('Error, key not in tree')
def __contains__(self,key):
if self._get(key,self.root):
return True
else:
return False
def length(self):
return self.size
def __len__(self):
return self.size
def __iter__(self):
return self.root.__iter__()
def delete(self,key):
if self.size > 1:
nodeToRemove = self._get(key,self.root)
if nodeToRemove:
self.remove(nodeToRemove)
self.size = self.size-1
else:
raise KeyError('Error, key not in tree')
elif self.size == 1 and self.root.key == key:
self.root = None
self.size = self.size - 1
else:
raise KeyError('Error, key not in tree')
def __delitem__(self,key):
self.delete(key)
def remove(self,currentNode):
if currentNode.isLeaf(): #leaf
if currentNode == currentNode.parent.leftChild:
currentNode.parent.leftChild = None
else:
currentNode.parent.rightChild = None
elif currentNode.hasBothChildren(): #interior
succ = currentNode.findSuccessor()
succ.spliceOut()
currentNode.key = succ.key
currentNode.payload = succ.payload
else: # this node has one child
if currentNode.hasLeftChild():
if currentNode.isLeftChild():
currentNode.leftChild.parent = currentNode.parent
currentNode.parent.leftChild = currentNode.leftChild
elif currentNode.isRightChild():
currentNode.leftChild.parent = currentNode.parent
currentNode.parent.rightChild = currentNode.leftChild
else:
currentNode.replaceNodeData(currentNode.leftChild.key,
currentNode.leftChild.payload,
currentNode.leftChild.leftChild,
currentNode.leftChild.rightChild)
else:
if currentNode.isLeftChild():
currentNode.rightChild.parent = currentNode.parent
currentNode.parent.leftChild = currentNode.rightChild
elif currentNode.isRightChild():
currentNode.rightChild.parent = currentNode.parent
currentNode.parent.rightChild = currentNode.rightChild
else:
currentNode.replaceNodeData(currentNode.rightChild.key,
currentNode.rightChild.payload,
currentNode.rightChild.leftChild,
currentNode.rightChild.rightChild)
def inorder(self):
self._inorder(self.root)
def _inorder(self,tree):
if tree != None:
self._inorder(tree.leftChild)
print(tree.key)
self._inorder(tree.rightChild)
def postorder(self):
self._postorder(self.root)
def _postorder(self, tree):
if tree:
self._postorder(tree.rightChild)
self._postorder(tree.leftChild)
print(tree.key)
def preorder(self):
self._preorder(self,self.root)
def _preorder(self,tree):
if tree:
print(tree.key)
self._preorder(tree.leftChild)
self._preorder(tree.rightChild)
class TreeNode:
def __init__(self,key,val,left=None,right=None,parent=None):
self.key = key
self.payload = val
self.leftChild = left
self.rightChild = right
self.parent = parent
self.balanceFactor = 0
def hasLeftChild(self):
return self.leftChild
def hasRightChild(self):
return self.rightChild
def isLeftChild(self):
return self.parent and self.parent.leftChild == self
def isRightChild(self):
return self.parent and self.parent.rightChild == self
def isRoot(self):
return not self.parent
def isLeaf(self):
return not (self.rightChild or self.leftChild)
def hasAnyChildren(self):
return self.rightChild or self.leftChild
def hasBothChildren(self):
return self.rightChild and self.leftChild
def replaceNodeData(self,key,value,lc,rc):
self.key = key
self.payload = value
self.leftChild = lc
self.rightChild = rc
if self.hasLeftChild():
self.leftChild.parent = self
if self.hasRightChild():
self.rightChild.parent = self
def findSuccessor(self):
succ = None
if self.hasRightChild():
succ = self.rightChild.findMin()
else:
if self.parent:
if self.isLeftChild():
succ = self.parent
else:
self.parent.rightChild = None
succ = self.parent.findSuccessor()
self.parent.rightChild = self
return succ
def spliceOut(self):
if self.isLeaf():
if self.isLeftChild():
self.parent.leftChild = None
else:
self.parent.rightChild = None
elif self.hasAnyChildren():
if self.hasLeftChild():
if self.isLeftChild():
self.parent.leftChild = self.leftChild
else:
self.parent.rightChild = self.leftChild
self.leftChild.parent = self.parent
else:
if self.isLeftChild():
self.parent.leftChild = self.rightChild
else:
self.parent.rightChild = self.rightChild
self.rightChild.parent = self.parent
def findMin(self):
current = self
while current.hasLeftChild():
current = current.leftChild
return current
def __iter__(self):
"""The standard inorder traversal of a binary tree."""
if self:
if self.hasLeftChild():
for elem in self.leftChild:
yield elem
yield self.key
if self.hasRightChild():
for elem in self.rightChild:
yield elem
| mit | 2,014,519,436,415,015,200 | 30.781818 | 77 | 0.515904 | false |
vlegoff/tsunami | src/primaires/scripting/actions/desequiper.py | 1 | 3400 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant l'action desequiper."""
from primaires.scripting.action import Action
from primaires.scripting.instruction import ErreurExecution
from primaires.objet.conteneur import SurPoids
class ClasseAction(Action):
"""Fait déséquiper un personnage."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.desequiper_objet, "Personnage", "Objet")
@staticmethod
def desequiper_objet(personnage, objet):
"""Force un personnage à déséquiper l'objet précisé.
Cette syntaxe de l'action se rapproche davantage de la commande
**retirer/remove**. Elle demande à un personnage de déséquiper un
objet qu'il équipe. L'objet est ensuite placé dans l'inventaire
du personnage, ou sur le sol si ce n'est pas possible.
Paramètres à préciser :
* personnage : le personnage que l'on souhaite déséquiper
* objet : l'objet que l'on souhaite déséquiper.
Exemple d'utilisation :
sabre = equipe(personnage, "sabre_bois")
desequiper personnage sabre
"""
if objet.contenu is not personnage.equipement.equipes:
raise ErreurExecution("{} n'équipe pas {}".format(
personnage.nom_unique, objet.identifiant))
# Essaye de déséquiper l'objet
try:
personnage.equipement.equipes.retirer(objet)
except ValueError:
raise ErreurExecution("{} ne peut retirer {}".format(
personnage.nom_unique, objet.identifiant))
else:
try:
personnage.ramasser(objet=objet)
except SurPoids:
personnage.equipement.tenir_objet(objet=objet)
objet.script["retire"].executer(objet=objet,
personnage=personnage)
| bsd-3-clause | 675,812,069,121,602,700 | 40.195122 | 79 | 0.706927 | false |
chrys87/fenrir | play zone/detectDevices.py | 1 | 1911 | #!/bin/python
import evdev
iDevices = {}
iDeviceNo = 0
def updateInputDevices(force = False, init = False):
global iDeviceNo
if init:
iDevices = {}
iDeviceNo = 0
deviceFileList = evdev.list_devices()
if not force:
if len(deviceFileList) == iDeviceNo:
return
iDeviceNo = len(deviceFileList)
mode = 'ALL'
iDevicesFiles = []
for device in iDevices:
iDevicesFiles.append(iDevices[device].fn)
print(len(iDevicesFiles),len(deviceFileList))
if len(iDevicesFiles) == len(deviceFileList):
return
for deviceFile in deviceFileList:
try:
if deviceFile in iDevicesFiles:
print('skip')
continue
open(deviceFile)
# 3 pos absolute
# 2 pos relative
# 1 Keys
currDevice = evdev.InputDevice(deviceFile)
cap = currDevice.capabilities()
if mode in ['ALL','NOMICE']:
if 1 in cap:
if 116 in cap[1] and len(cap[1]) < 5:
print('power')
continue
if mode == 'ALL':
iDevices[currDevice.fd] = currDevice
print('Device added:' + iDevices[currDevice.fd].name)
elif mode == 'NOMICE':
if not ((2 in cap) or (3 in cap)):
iDevices[currDevice.fd] = currDevice
print('Device added:' + iDevices[currDevice.fd].name)
elif currDevice.name.upper() in mode.split(','):
iDevices[currDevice.fd] = currDevice
print('Device added:' + iDevices[currDevice.fd].name)
except Exception as e:
print("Skip Inputdevice : " + deviceFile +' ' + str(e))
updateInputDevices()
| lgpl-3.0 | -7,624,252,007,267,087,000 | 37.22 | 88 | 0.508111 | false |
rchatterjee/nocrack | newcode/honeyvault_config.py | 1 | 3284 | # The following dictionaries should be provided to buildcfg.py
# 1: base dictionary //only character words will be considered
# 2: tweak set file
# 3: dictionary with count // PCFG will be built over this
# 4: output PCFG file name/path
# 5: output Trie file name/path
# empty lines and line beginning with '#' will be discarded
# exact dicionary path should be given.
import math
import os
import random
DEBUG = os.environ.get("DEBUG", False)
BASE_DIR = os.getcwd()
thisdir = os.path.dirname(os.path.abspath(__file__))
# DIC_TRIE_FILE = 'data/english.tri'
# DICTIONARY_DAWG = '{}/Dictionary_Store/dictionary1.1.dawg.gz'.format(thisdir)
# STANDARD_DIC_FILE = "{}/Dictionary_Store/standard_english.tri.gz".format(thisdir)
# GRAMMAR_OUTPUT_FILE = "{}/data/combined.gmr.bz2".format(thisdir)
# GRAMMAR_INPUT_FILE = "{}/data/combined.tri.bz2".format(thisdir)
# HANDGRAMMAR_FILE = "{}/data/grammar.txt".format(thisdir)
STATIC_DIR = os.path.join(thisdir, 'static')
TRAINED_GRAMMAR_FILE = os.path.join(STATIC_DIR, 'grammar.cfg.gz')
if DEBUG:
TRAINED_GRAMMAR_FILE += '~orig'
VAULT_DIST_FILE = os.path.join(STATIC_DIR, 'vault_dist.cfg')
# Don't change
EPSILON = '|_|'
GRAMMAR_R = 0
MEMLIMMIT = 1024 # 1024 MB, 1GB
MIN_COUNT = 2
PRODUCTION = 1
NONTERMINAL = 1
TERMINAL = 1 - NONTERMINAL
REPR_SIZE = 4 # number of bytes to represent an integer. normally 4 bytes. But
# we might go for higher values for better security.
MAX_INT = 256 ** REPR_SIZE # value of maximum integer in this representation.
PASSWORD_LENGTH = 100 # length of the password encoding
HONEY_VAULT_GRAMMAR_SIZE = 500 # 400 bytes, 50 integers/rules
# This controls the size of the NoCrack vault. Refer to the Oakland 15 paper
# (NoCrack) for more details. If you change this remember to delete
# static/vault.db to see the effect. Need less to say, you will lose all your
# passwords. Export/import operation are on its way. (TODO: Import-Export
# functions)
HONEY_VAULT_S1 = 1000
HONEY_VAULT_S2 = 1000
HONEY_VAULT_STORAGE_SIZE = HONEY_VAULT_S1 + HONEY_VAULT_S2
# For each password there is 1 byte saying whether the password is m/c or human
# generated. '1' --> m/c or '0' --> human generated pw.
# TODO: move it to more succinct repr, Google's protobuf!
HONEY_VAULT_MACHINE_PASS_SET_SIZE = int(math.ceil(HONEY_VAULT_STORAGE_SIZE / 8))
HONEY_VAULT_ENCODING_SIZE = HONEY_VAULT_GRAMMAR_SIZE + \
HONEY_VAULT_STORAGE_SIZE * PASSWORD_LENGTH
HONEY_VAULT_TOTAL_CIPHER_SIZE = HONEY_VAULT_ENCODING_SIZE + \
int(math.ceil(HONEY_VAULT_MACHINE_PASS_SET_SIZE / 4)) + \
8 # PBKDF1 salt size
SECURITY_PARAM = 16
SECURITY_PARAM_IN_BASE64 = (SECURITY_PARAM * 4) / 3 + 1
# Static domain mapping list
STATIC_DOMAIN_LIST = '{}/server/static_domain_map.txt'.format(thisdir)
STATIC_DOMAIN_HASH_LIST = '{}/static/static_domain_hashes.txt'.format(thisdir)
# Machie generated password probability in set of 1000
MACHINE_GENRATED_PASS_PROB = 10
# Required by honey_client
HONEY_SERVER_URL = "http://localhost:5000/"
VAULT_FILE = 'static/vault.db'
L33T = {
'3': 'e', '4': 'a', '@': 'a',
'$': 's', '0': 'o', '1': 'i',
'z': 's'
}
if DEBUG:
random.seed(123456)
else:
random.seed(os.urandom(4))
| mit | -7,326,233,119,189,157,000 | 32.510204 | 89 | 0.68849 | false |
andrewyoung1991/abjad | abjad/tools/documentationtools/ReSTDirective.py | 1 | 3357 | # -*- encoding: utf-8 -*-
import abc
from abjad.tools.datastructuretools.TreeContainer import TreeContainer
class ReSTDirective(TreeContainer):
r'''A ReST directive.
'''
### INITIALIZER ###
def __init__(
self,
argument=None,
children=None,
directive=None,
name=None,
options=None,
):
TreeContainer.__init__(self, children=children, name=name)
assert isinstance(options, (dict, type(None)))
self._argument = argument
self._options = {}
if options is not None:
self._options.update(options)
self._directive = directive
### PRIVATE PROPERTIES ###
@property
def _children_rest_format_contributions(self):
result = []
for child in self.children:
result.append('')
contribution = child._rest_format_contributions
for x in contribution:
if x:
result.append(' ' + x)
else:
result.append(x)
return result
@property
def _rest_format_contributions(self):
if self.argument:
result = ['.. {}:: {}'.format(self.directive, self.argument)]
else:
result = ['.. {}::'.format(self.directive)]
for key, value in sorted(self.options.items()):
option = ' :{}:'.format(key)
if value is True:
pass
elif value is None or value is False:
continue
elif isinstance(value, (list, tuple)):
option += ' ' + ', '.join(str(x) for x in value)
elif isinstance(value, (int, float, str)):
option += ' ' + str(value)
result.append(option)
result.extend(self._children_rest_format_contributions)
return result
@property
def _storage_format_specification(self):
from abjad.tools import systemtools
return systemtools.StorageFormatSpecification(
self,
keywords_ignored_when_false=(
'children',
'name',
'options',
),
)
### PUBLIC PROPERTIES ###
@property
def argument(self):
r'''Gets and sets argument of ReST directive.
'''
return self._argument
@argument.setter
def argument(self, arg):
assert isinstance(arg, (str, type(None)))
self._argument = arg
@property
def directive(self):
r'''Gets and sets directive of ReST directive.
'''
return self._directive
@directive.setter
def directive(self, expr):
self._directive = str(expr)
@property
def node_class(self):
r'''Node class of ReST directive.
'''
from abjad.tools import documentationtools
return (
documentationtools.ReSTDirective,
documentationtools.ReSTHeading,
documentationtools.ReSTHorizontalRule,
documentationtools.ReSTParagraph,
)
@property
def options(self):
r'''Options of ReST directive.
'''
return self._options
@property
def rest_format(self):
r'''ReST format of ReST directive.
'''
return '\n'.join(self._rest_format_contributions) | gpl-3.0 | -1,021,700,211,659,350,400 | 26.983333 | 73 | 0.542746 | false |
mozilla/olympia | src/olympia/zadmin/tests/test_views.py | 1 | 7143 | # -*- coding: utf-8 -*-
import json
from unittest import mock
from django.urls import reverse
from pyquery import PyQuery as pq
from olympia import amo
from olympia.access.models import Group, GroupUser
from olympia.amo.tests import TestCase, user_factory
from olympia.files.models import File
from olympia.users.models import UserProfile
from olympia.versions.models import Version
class TestHomeAndIndex(TestCase):
fixtures = ['base/users']
def setUp(self):
super(TestHomeAndIndex, self).setUp()
self.client.login(email='[email protected]')
def test_get_home(self):
url = reverse('admin:index')
response = self.client.get(url, follow=True)
assert response.status_code == 200
assert response.context['user'].username == 'admin'
assert response.context['user'].email == '[email protected]'
def test_django_index(self):
# Can access with full admin.
url = reverse('admin:index')
response = self.client.get(url)
assert response.status_code == 200
doc = pq(response.content)
modules = [x.text for x in doc('a.section')]
assert len(modules) == 20 # Increment as we add new admin modules.
# Redirected because no permissions if not logged in.
self.client.logout()
response = self.client.get(url)
self.assert3xx(response, '/en-US/admin/models/login/?next=/en-US/admin/models/')
# Redirected when logged in without enough permissions.
user = user_factory(username='staffperson', email='[email protected]')
self.client.login(email=user.email)
response = self.client.get(url)
self.assert3xx(response, '/en-US/admin/models/login/?next=/en-US/admin/models/')
# Can access with a "is_staff" user.
user.update(email='[email protected]')
self.client.login(email=user.email)
response = self.client.get(url)
assert response.status_code == 200
doc = pq(response.content)
modules = [x.text for x in doc('a.section')]
# Admin:Something doesn't give access to anything, so they can log in
# but they don't see any modules.
assert len(modules) == 0
@mock.patch('olympia.accounts.utils.default_fxa_login_url')
def test_django_login_page(self, default_fxa_login_url):
login_url = 'https://example.com/fxalogin'
default_fxa_login_url.return_value = login_url
# Check we can actually access the /login page - django admin uses it.
url = reverse('admin:login')
response = self.client.get(url)
# if you're already logged in, redirect to the index
self.assert3xx(response, '/en-US/admin/models/')
# Redirected to fxa because no permissions if not logged in.
self.client.logout()
response = self.client.get(url)
self.assert3xx(response, login_url)
# But if logged in and not enough permissions return a 403.
user = user_factory(username='staffperson', email='[email protected]')
self.client.login(email=user.email)
response = self.client.get(url)
assert response.status_code == 403
# But can access with a "is_staff" user.
user.update(email='[email protected]')
response = self.client.get(url)
self.assert3xx(response, '/en-US/admin/models/')
@mock.patch('olympia.accounts.utils.default_fxa_login_url')
def test_django_login_page_with_next(self, default_fxa_login_url):
login_url = 'https://example.com/fxalogin'
default_fxa_login_url.return_value = login_url
# if django admin passes on a next param, check we use it.
url = reverse('admin:login') + '?next=/en-US/admin/models/addon/'
response = self.client.get(url)
# redirect to the correct page
self.assert3xx(response, '/en-US/admin/models/addon/')
# Same with an "is_staff" user.
user = user_factory(email='[email protected]')
self.client.login(email=user.email)
response = self.client.get(url)
self.assert3xx(response, '/en-US/admin/models/addon/')
def test_django_admin_logout(self):
url = reverse('admin:logout')
response = self.client.get(url, follow=False)
self.assert3xx(response, '/', status_code=302)
class TestRecalculateHash(TestCase):
fixtures = ['base/addon_3615', 'base/users']
def setUp(self):
super().setUp()
self.client.login(email='[email protected]')
@mock.patch.object(
File,
'file_path',
amo.tests.AMOPaths().file_fixture_path('delicious_bookmarks-2.1.106-fx.xpi'),
)
def test_regenerate_hash(self):
version = Version.objects.create(addon_id=3615)
file = File.objects.create(
filename='delicious_bookmarks-2.1.106-fx.xpi', version=version
)
r = self.client.post(reverse('zadmin.recalc_hash', args=[file.id]))
assert json.loads(r.content)['success'] == 1
file = File.objects.get(pk=file.id)
assert file.size, 'File size should not be zero'
assert file.hash, 'File hash should not be empty'
@mock.patch.object(
File,
'file_path',
amo.tests.AMOPaths().file_fixture_path('delicious_bookmarks-2.1.106-fx.xpi'),
)
def test_regenerate_hash_get(self):
""" Don't allow GET """
version = Version.objects.create(addon_id=3615)
file = File.objects.create(
filename='delicious_bookmarks-2.1.106-fx.xpi', version=version
)
r = self.client.get(reverse('zadmin.recalc_hash', args=[file.id]))
assert r.status_code == 405 # GET out of here
class TestPerms(TestCase):
fixtures = ['base/users']
FILE_ID = '1234567890abcdef1234567890abcdef'
def assert_status(self, view, status, follow=False, **kw):
"""Check that requesting the named view returns the expected status."""
assert (
self.client.get(reverse(view, kwargs=kw), follow=follow).status_code
== status
)
def test_admin_user(self):
# Admin should see views with Django's perm decorator and our own.
assert self.client.login(email='[email protected]')
self.assert_status('admin:index', 200, follow=True)
def test_staff_user(self):
# Staff users have some privileges.
user = UserProfile.objects.get(email='[email protected]')
group = Group.objects.create(name='Staff', rules='Admin:*')
GroupUser.objects.create(group=group, user=user)
assert self.client.login(email='[email protected]')
self.assert_status('admin:index', 200, follow=True)
def test_unprivileged_user(self):
# Unprivileged user.
assert self.client.login(email='[email protected]')
self.assert_status('admin:index', 403, follow=True)
# Anonymous users should get a login redirect.
self.client.logout()
self.assert3xx(
self.client.get(reverse('admin:index')),
'/en-US/admin/models/login/?next=/en-US/admin/models/',
)
| bsd-3-clause | 8,470,127,421,560,938,000 | 36.793651 | 88 | 0.639647 | false |
google/mirandum | alerts/streamtip/migrations/0002_migrate_updater.py | 1 | 1372 | # -*- coding: utf-8 -*-
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from django.db import models, migrations
def migrate_updater(apps, schema_editor):
StreamtipEvent = apps.get_model("streamtip", "StreamtipEvent")
UpdaterEvent = apps.get_model("main", "UpdaterEvent")
for event in StreamtipEvent.objects.all():
try:
ue = UpdaterEvent.objects.get(pk=event.updaterevent_ptr_id)
ue.base_updater = event.updater.updater_ptr
ue.save()
except Exception:
pass
class Migration(migrations.Migration):
dependencies = [
('main', '0009_updaterevent_base_updater'),
('streamtip', '0001_initial'),
]
operations = [
migrations.RunPython(migrate_updater)
]
| apache-2.0 | -8,171,553,838,232,492,000 | 33.3 | 75 | 0.682216 | false |
ninefold/libcloud | libcloud/compute/drivers/voxel.py | 1 | 11150 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Voxel VoxCloud driver
"""
import datetime
import hashlib
from libcloud.utils.py3 import b
from libcloud.common.base import XmlResponse, ConnectionUserAndKey
from libcloud.common.types import InvalidCredsError
from libcloud.compute.providers import Provider
from libcloud.compute.types import NodeState
from libcloud.compute.base import Node, NodeDriver
from libcloud.compute.base import NodeSize, NodeImage, NodeLocation
VOXEL_API_HOST = "api.voxel.net"
class VoxelResponse(XmlResponse):
def __init__(self, response, connection):
self.parsed = None
super(VoxelResponse, self).__init__(response=response,
connection=connection)
def parse_body(self):
if not self.body:
return None
if not self.parsed:
self.parsed = super(VoxelResponse, self).parse_body()
return self.parsed
def parse_error(self):
err_list = []
if not self.body:
return None
if not self.parsed:
self.parsed = super(VoxelResponse, self).parse_body()
for err in self.parsed.findall('err'):
code = err.get('code')
err_list.append("(%s) %s" % (code, err.get('msg')))
# From voxel docs:
# 1: Invalid login or password
# 9: Permission denied: user lacks access rights for this method
if code == "1" or code == "9":
# sucks, but only way to detect
# bad authentication tokens so far
raise InvalidCredsError(err_list[-1])
return "\n".join(err_list)
def success(self):
if not self.parsed:
self.parsed = super(VoxelResponse, self).parse_body()
stat = self.parsed.get('stat')
if stat != "ok":
return False
return True
class VoxelConnection(ConnectionUserAndKey):
"""
Connection class for the Voxel driver
"""
host = VOXEL_API_HOST
responseCls = VoxelResponse
def add_default_params(self, params):
params = dict([(k, v) for k, v in list(params.items())
if v is not None])
params["key"] = self.user_id
params["timestamp"] = datetime.datetime.utcnow().isoformat()+"+0000"
keys = list(params.keys())
keys.sort()
md5 = hashlib.md5()
md5.update(b(self.key))
for key in keys:
if params[key]:
if not params[key] is None:
md5.update(b("%s%s"% (key, params[key])))
else:
md5.update(b(key))
params['api_sig'] = md5.hexdigest()
return params
VOXEL_INSTANCE_TYPES = {}
RAM_PER_CPU = 2048
NODE_STATE_MAP = {
'IN_PROGRESS': NodeState.PENDING,
'QUEUED': NodeState.PENDING,
'SUCCEEDED': NodeState.RUNNING,
'shutting-down': NodeState.TERMINATED,
'terminated': NodeState.TERMINATED,
'unknown': NodeState.UNKNOWN,
}
class VoxelNodeDriver(NodeDriver):
"""
Voxel VoxCLOUD node driver
"""
connectionCls = VoxelConnection
type = Provider.VOXEL
name = 'Voxel VoxCLOUD'
website = 'http://www.voxel.net/'
def _initialize_instance_types():
for cpus in range(1,14):
if cpus == 1:
name = "Single CPU"
else:
name = "%d CPUs" % cpus
id = "%dcpu" % cpus
ram = cpus * RAM_PER_CPU
VOXEL_INSTANCE_TYPES[id]= {
'id': id,
'name': name,
'ram': ram,
'disk': None,
'bandwidth': None,
'price': None}
features = {"create_node": [],
"list_sizes": ["variable_disk"]}
_initialize_instance_types()
def list_nodes(self):
params = {"method": "voxel.devices.list"}
result = self.connection.request('/', params=params).object
return self._to_nodes(result)
def list_sizes(self, location=None):
return [ NodeSize(driver=self.connection.driver, **i)
for i in list(VOXEL_INSTANCE_TYPES.values()) ]
def list_images(self, location=None):
params = {"method": "voxel.images.list"}
result = self.connection.request('/', params=params).object
return self._to_images(result)
def create_node(self, **kwargs):
"""Create Voxel Node
@keyword name: the name to assign the node (mandatory)
@type name: C{str}
@keyword image: distribution to deploy
@type image: L{NodeImage}
@keyword size: the plan size to create (mandatory)
Requires size.disk (GB) to be set manually
@type size: L{NodeSize}
@keyword location: which datacenter to create the node in
@type location: L{NodeLocation}
@keyword ex_privateip: Backend IP address to assign to node;
must be chosen from the customer's
private VLAN assignment.
@type ex_privateip: C{str}
@keyword ex_publicip: Public-facing IP address to assign to node;
must be chosen from the customer's
public VLAN assignment.
@type ex_publicip: C{str}
@keyword ex_rootpass: Password for root access; generated if unset.
@type ex_rootpass: C{str}
@keyword ex_consolepass: Password for remote console;
generated if unset.
@type ex_consolepass: C{str}
@keyword ex_sshuser: Username for SSH access
@type ex_sshuser: C{str}
@keyword ex_sshpass: Password for SSH access; generated if unset.
@type ex_sshpass: C{str}
@keyword ex_voxel_access: Allow access Voxel administrative access.
Defaults to False.
@type ex_voxel_access: C{bool}
"""
# assert that disk > 0
if not kwargs["size"].disk:
raise ValueError("size.disk must be non-zero")
# convert voxel_access to string boolean if needed
voxel_access = kwargs.get("ex_voxel_access", None)
if voxel_access is not None:
voxel_access = "true" if voxel_access else "false"
params = {
'method': 'voxel.voxcloud.create',
'hostname': kwargs["name"],
'disk_size': int(kwargs["size"].disk),
'facility': kwargs["location"].id,
'image_id': kwargs["image"].id,
'processing_cores': kwargs["size"].ram / RAM_PER_CPU,
'backend_ip': kwargs.get("ex_privateip", None),
'frontend_ip': kwargs.get("ex_publicip", None),
'admin_password': kwargs.get("ex_rootpass", None),
'console_password': kwargs.get("ex_consolepass", None),
'ssh_username': kwargs.get("ex_sshuser", None),
'ssh_password': kwargs.get("ex_sshpass", None),
'voxel_access': voxel_access,
}
object = self.connection.request('/', params=params).object
if self._getstatus(object):
return Node(
id = object.findtext("device/id"),
name = kwargs["name"],
state = NODE_STATE_MAP[object.findtext("device/status")],
public_ips = kwargs.get("publicip", None),
private_ips = kwargs.get("privateip", None),
driver = self.connection.driver
)
else:
return None
def reboot_node(self, node):
"""
Reboot the node by passing in the node object
"""
params = {'method': 'voxel.devices.power',
'device_id': node.id,
'power_action': 'reboot'}
return self._getstatus(self.connection.request('/', params=params).object)
def destroy_node(self, node):
"""
Destroy node by passing in the node object
"""
params = {'method': 'voxel.voxcloud.delete',
'device_id': node.id}
return self._getstatus(self.connection.request('/', params=params).object)
def list_locations(self):
params = {"method": "voxel.voxcloud.facilities.list"}
result = self.connection.request('/', params=params).object
nodes = self._to_locations(result)
return nodes
def _getstatus(self, element):
status = element.attrib["stat"]
return status == "ok"
def _to_locations(self, object):
return [NodeLocation(element.attrib["label"],
element.findtext("description"),
element.findtext("description"),
self)
for element in object.findall('facilities/facility')]
def _to_nodes(self, object):
nodes = []
for element in object.findall('devices/device'):
if element.findtext("type") == "Virtual Server":
try:
state = self.NODE_STATE_MAP[element.attrib['status']]
except KeyError:
state = NodeState.UNKNOWN
public_ip = private_ip = None
ipassignments = element.findall("ipassignments/ipassignment")
for ip in ipassignments:
if ip.attrib["type"] =="frontend":
public_ip = ip.text
elif ip.attrib["type"] == "backend":
private_ip = ip.text
nodes.append(Node(id= element.attrib['id'],
name=element.attrib['label'],
state=state,
public_ips= public_ip,
private_ips= private_ip,
driver=self.connection.driver))
return nodes
def _to_images(self, object):
images = []
for element in object.findall("images/image"):
images.append(NodeImage(id = element.attrib["id"],
name = element.attrib["summary"],
driver = self.connection.driver))
return images
| apache-2.0 | 557,125,822,919,852,400 | 35.319218 | 82 | 0.553184 | false |
Entscheider/SeamEater | ImgLib/Poisson.py | 1 | 4047 | # -*- coding: utf-8 -*-
# Functions for Poisson-Reconstruction
import numpy as np
from ImgLib.MyFilter import myfilter as filter
# Some explanations: http://eric-yuan.me/poisson-blending/
def jacobi(A, b, N=25, x=None, progressFunc = None, stopFunc=None):
"""
Solving A*x =b for x by using the Jacobi-method.
@param A The Matrix
@param b The solution A*x=b
@param N the iterations for solving.
@param x A guess value for beginning.
@param progressFunc A function for showing the progress.
@param stopFunc Function. Stopping when evaluated to true
@return The solution x
"""
# Create an initial guess if needed
if x is None:
x = np.zeros(len(A[0]))
# Create a vector of the diagonal elements of A
# and subtract them from A
D = np.diag(A)
R = A - np.diagflat(D)
# Iterate for N times
for i in range(N):
if (progressFunc):
progressFunc(i*100/N)
if stopFunc and stopFunc():
return x
x = (b - np.dot(R, x)) / D
return x
def laplace_div(array):
'''
Calculating the Laplace derivative
@param array The Image
@return The numpy array of the Laplace derivative
'''
kern=-np.array([[0,1,0],[1,-4,1],[0,1,0]])
return filter(array,kern)
# Inspired by http://pebbie.wordpress.com/2012/04/04/python-poisson-image-editing/
def poissonInsertMask(m, mask, div, iterations=20, progressFunc = None, stopFunc=None):
'''
Computes from the Laplace derivative div and the picture m
a new picture. That picture blends them together using Poisson.
@param m The target picture
@param mask mask[x,y]=1 => Reconstruct this pixel.
mask[x,y]=0 => Use the value from m for this pixel
0<mask[x,y]<1 => Mix both picture
@param div The Laplace derivative for reconstruction. (numpy Array)
@param iterations Number of iteration for solving the linear system of equations.
iterations <=0 => Use the exact solution
@param progressFunc A function for showing the progress.
@param stopFunc Function. Stopping when evaluated to true
@return the reconstructed picture.
'''
h, w = mask.shape
r, c = mask.nonzero()
N = len(r)
idx = np.zeros(mask.shape, dtype=np.uint32)
for i in range(N):
idx.itemset((r.item(i), c.item(i)), i + 1)
b_r = np.zeros(N)
A = np.zeros((N, N))
for i in range(N):
if (progressFunc):
progressFunc(i*100//(2*N))
if stopFunc and stopFunc():
return
y, x = r.item(i), c.item(i)
b_r.itemset(i, div.item((y, x)))
p = i
Np = 0
if y > 0 and mask.item((y - 1, x)):
q = idx.item((y - 1, x)) - 1
A[p, q] = -1.
Np += 1
if x > 0 and mask.item((y, x - 1)):
q = idx.item((y, x - 1)) - 1
A[p, q] = -1.
Np += 1
if y < h - 1 and mask.item((y + 1, x)):
q = idx.item((y + 1, x)) - 1
A[p, q] = -1.
Np += 1
if x < w - 1 and mask.item((y, x + 1)):
q = idx.item((y, x + 1)) - 1
A[p, q] = -1
Np += 1
A[p, p] = Np * 1.
guess = None
x = 0
if (iterations <= 0):
x = np.linalg.solve(A,b_r).astype("uint8")
else:
if (progressFunc):
x = jacobi(A, b_r, x=guess, N=iterations, progressFunc = lambda k:progressFunc(50+k/2), stopFunc = stopFunc)
else:
x = jacobi(A, b_r, x=guess, N=iterations, stopFunc = stopFunc)
if stopFunc and stopFunc():
return None
for i in range(N):
yy, xx = r.item(i), c.item(i)
v = m[yy, xx] - x[i]
if v < 0:
v = 0
elif v > 255:
v = 255
if (iterations >0): # mixing
m[yy, xx] = v * mask[yy, xx] + m[yy, xx] * (1 - mask[yy, xx])
else: # no mixing needed ?!
m[yy, xx] = v
return m
| gpl-3.0 | -1,879,845,899,330,050,600 | 32.172131 | 123 | 0.538424 | false |
alhashash/odoo | addons/hr_timesheet_invoice/hr_timesheet_invoice.py | 2 | 18970 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.exceptions import UserError
class hr_timesheet_invoice_factor(osv.osv):
_name = "hr_timesheet_invoice.factor"
_description = "Invoice Rate"
_order = 'factor'
_columns = {
'name': fields.char('Internal Name', required=True, translate=True),
'customer_name': fields.char('Name', help="Label for the customer"),
'factor': fields.float('Discount (%)', required=True, help="Discount in percentage"),
}
_defaults = {
'factor': lambda *a: 0.0,
}
class account_analytic_account(osv.osv):
def _invoiced_calc(self, cr, uid, ids, name, arg, context=None):
obj_invoice = self.pool.get('account.invoice')
res = {}
cr.execute('SELECT account_id as account_id, l.invoice_id '
'FROM hr_analytic_timesheet h LEFT JOIN account_analytic_line l '
'ON (h.line_id=l.id) '
'WHERE l.account_id = ANY(%s)', (ids,))
account_to_invoice_map = {}
for rec in cr.dictfetchall():
account_to_invoice_map.setdefault(rec['account_id'], []).append(rec['invoice_id'])
for account in self.browse(cr, uid, ids, context=context):
invoice_ids = filter(None, list(set(account_to_invoice_map.get(account.id, []))))
for invoice in obj_invoice.browse(cr, uid, invoice_ids, context=context):
res.setdefault(account.id, 0.0)
res[account.id] += invoice.amount_untaxed
for id in ids:
res[id] = round(res.get(id, 0.0),2)
return res
_inherit = "account.analytic.account"
_columns = {
'pricelist_id': fields.many2one('product.pricelist', 'Pricelist',
help="The product to invoice is defined on the employee form, the price will be deducted by this pricelist on the product."),
'amount_max': fields.float('Max. Invoice Price',
help="Keep empty if this contract is not limited to a total fixed price."),
'amount_invoiced': fields.function(_invoiced_calc, string='Invoiced Amount',
help="Total invoiced"),
'to_invoice': fields.many2one('hr_timesheet_invoice.factor', 'Timesheet Invoicing Ratio',
help="You usually invoice 100% of the timesheets. But if you mix fixed price and timesheet invoicing, you may use another ratio. For instance, if you do a 20% advance invoice (fixed price, based on a sales order), you should invoice the rest on timesheet with a 80% ratio."),
}
_defaults = {
'pricelist_id': lambda self, cr, uid, c: self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'product.list0')
}
def on_change_partner_id(self, cr, uid, ids, partner_id, name, context=None):
res = super(account_analytic_account, self).on_change_partner_id(cr, uid, ids, partner_id, name, context=context)
if partner_id:
part = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
pricelist = part.property_product_pricelist and part.property_product_pricelist.id or False
if pricelist:
res['value']['pricelist_id'] = pricelist
return res
def set_close(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'close'}, context=context)
def set_cancel(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'cancelled'}, context=context)
def set_open(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'open'}, context=context)
def set_pending(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'pending'}, context=context)
class account_analytic_line(osv.osv):
_inherit = 'account.analytic.line'
_columns = {
'invoice_id': fields.many2one('account.invoice', 'Invoice', ondelete="set null", copy=False),
'to_invoice': fields.many2one('hr_timesheet_invoice.factor', 'Invoiceable', help="It allows to set the discount while making invoice, keep empty if the activities should not be invoiced."),
}
def _default_journal(self, cr, uid, context=None):
proxy = self.pool.get('hr.employee')
record_ids = proxy.search(cr, uid, [('user_id', '=', uid)], context=context)
if record_ids:
employee = proxy.browse(cr, uid, record_ids[0], context=context)
return employee.journal_id and employee.journal_id.id or False
return False
def _default_general_account(self, cr, uid, context=None):
proxy = self.pool.get('hr.employee')
record_ids = proxy.search(cr, uid, [('user_id', '=', uid)], context=context)
if record_ids:
employee = proxy.browse(cr, uid, record_ids[0], context=context)
if employee.product_id and employee.product_id.property_account_income:
return employee.product_id.property_account_income.id
return False
_defaults = {
'journal_id' : _default_journal,
'general_account_id' : _default_general_account,
}
def write(self, cr, uid, ids, vals, context=None):
self._check_inv(cr, uid, ids, vals)
return super(account_analytic_line,self).write(cr, uid, ids, vals,
context=context)
def _check_inv(self, cr, uid, ids, vals):
select = ids
if isinstance(select, (int, long)):
select = [ids]
if ( not vals.has_key('invoice_id')) or vals['invoice_id' ] == False:
for line in self.browse(cr, uid, select):
if line.invoice_id:
raise UserError(_('You cannot modify an invoiced analytic line!'))
return True
def _get_invoice_price(self, cr, uid, account, product_id, user_id, qty, context = {}):
pro_price_obj = self.pool.get('product.pricelist')
if account.pricelist_id:
pl = account.pricelist_id.id
price = pro_price_obj.price_get(cr,uid,[pl], product_id, qty or 1.0, account.partner_id.id, context=context)[pl]
else:
price = 0.0
return price
def _prepare_cost_invoice(self, cr, uid, partner, company_id, currency_id, analytic_lines, group_by_partner=False, context=None):
""" returns values used to create main invoice from analytic lines"""
account_payment_term_obj = self.pool['account.payment.term']
if group_by_partner:
invoice_name = partner.name
else:
invoice_name = analytic_lines[0].account_id.name
date_due = False
if partner.property_payment_term:
pterm_list = account_payment_term_obj.compute(cr, uid,
partner.property_payment_term.id, value=1,
date_ref=time.strftime('%Y-%m-%d'))
if pterm_list:
pterm_list = [line[0] for line in pterm_list]
pterm_list.sort()
date_due = pterm_list[-1]
return {
'name': "%s - %s" % (time.strftime('%d/%m/%Y'), invoice_name),
'partner_id': partner.id,
'company_id': company_id,
'payment_term': partner.property_payment_term.id or False,
'account_id': partner.property_account_receivable.id,
'currency_id': currency_id,
'date_due': date_due,
'fiscal_position': partner.property_account_position.id
}
def _prepare_cost_invoice_line(self, cr, uid, invoice_id, product_id, uom, user_id,
factor_id, account, analytic_lines, journal_type, data, context=None):
product_obj = self.pool['product.product']
uom_context = dict(context or {}, uom=uom)
total_price = sum(l.amount for l in analytic_lines)
total_qty = sum(l.unit_amount for l in analytic_lines)
if data.get('product'):
# force product, use its public price
if isinstance(data['product'], (tuple, list)):
product_id = data['product'][0]
else:
product_id = data['product']
unit_price = self._get_invoice_price(cr, uid, account, product_id, user_id, total_qty, uom_context)
elif journal_type == 'general' and product_id:
# timesheets, use sale price
unit_price = self._get_invoice_price(cr, uid, account, product_id, user_id, total_qty, uom_context)
else:
# expenses, using price from amount field
unit_price = total_price*-1.0 / total_qty
factor = self.pool['hr_timesheet_invoice.factor'].browse(cr, uid, factor_id, context=uom_context)
factor_name = factor.customer_name
curr_invoice_line = {
'price_unit': unit_price,
'quantity': total_qty,
'product_id': product_id,
'discount': factor.factor,
'invoice_id': invoice_id,
'name': factor_name,
'uos_id': uom,
'account_analytic_id': account.id,
}
if product_id:
product = product_obj.browse(cr, uid, product_id, context=uom_context)
factor_name = product_obj.name_get(cr, uid, [product_id], context=uom_context)[0][1]
if factor.customer_name:
factor_name += ' - ' + factor.customer_name
general_account = product.property_account_income or product.categ_id.property_account_income_categ
if not general_account:
raise UserError(_("Configuration Error!") + '\n' + _("Please define income account for product '%s'.") % product.name)
taxes = product.taxes_id or general_account.tax_ids
tax = self.pool['account.fiscal.position'].map_tax(cr, uid, account.partner_id.property_account_position, taxes)
curr_invoice_line.update({
'invoice_line_tax_id': [(6, 0, tax)],
'name': factor_name,
'invoice_line_tax_id': [(6, 0, tax)],
'account_id': general_account.id,
})
note = []
for line in analytic_lines:
# set invoice_line_note
details = []
if data.get('date', False):
details.append(line['date'])
if data.get('time', False):
if line['product_uom_id']:
details.append("%s %s" % (line.unit_amount, line.product_uom_id.name))
else:
details.append("%s" % (line['unit_amount'], ))
if data.get('name', False):
details.append(line['name'])
if details:
note.append(u' - '.join(map(lambda x: unicode(x) or '', details)))
if note:
curr_invoice_line['name'] += "\n" + ("\n".join(map(lambda x: unicode(x) or '', note)))
return curr_invoice_line
def invoice_cost_create(self, cr, uid, ids, data=None, context=None):
invoice_obj = self.pool.get('account.invoice')
invoice_line_obj = self.pool.get('account.invoice.line')
invoices = []
if context is None:
context = {}
if data is None:
data = {}
# use key (partner/account, company, currency)
# creates one invoice per key
invoice_grouping = {}
# grouping on partner instead of analytic account
group_by_partner = data.get('group_by_partner', False)
currency_id = False
# prepare for iteration on journal and accounts
for line in self.browse(cr, uid, ids, context=context):
# check if currency is the same in different accounts when grouping by partner
if not currency_id :
currency_id = line.account_id.pricelist_id.currency_id.id
if line.account_id.pricelist_id and line.account_id.pricelist_id.currency_id:
if line.account_id.pricelist_id.currency_id.id != currency_id and group_by_partner:
raise UserError(_('You cannot group invoices having different currencies on different analytic accounts for the same partner.'))
if group_by_partner:
key = (line.account_id.partner_id.id,
line.account_id.company_id.id,
line.account_id.pricelist_id.currency_id.id)
invoice_grouping.setdefault(key, []).append(line)
else:
key = (line.account_id.id,
line.account_id.company_id.id,
line.account_id.pricelist_id.currency_id.id)
invoice_grouping.setdefault(key, []).append(line)
for (key_id, company_id, currency_id), analytic_lines in invoice_grouping.items():
# key_id is either an account.analytic.account, either a res.partner
# don't really care, what's important is the analytic lines that
# will be used to create the invoice lines
partner = analytic_lines[0].account_id.partner_id # will be the same for every line
curr_invoice = self._prepare_cost_invoice(cr, uid, partner, company_id, currency_id, analytic_lines, group_by_partner, context=context)
invoice_context = dict(context,
lang=partner.lang,
force_company=company_id, # set force_company in context so the correct product properties are selected (eg. income account)
company_id=company_id) # set company_id in context, so the correct default journal will be selected
last_invoice = invoice_obj.create(cr, uid, curr_invoice, context=invoice_context)
invoices.append(last_invoice)
# use key (product, uom, user, invoiceable, analytic account, journal type)
# creates one invoice line per key
invoice_lines_grouping = {}
for analytic_line in analytic_lines:
account = analytic_line.account_id
if (not partner) or not (account.pricelist_id):
raise UserError(_('Contract incomplete. Please fill in the Customer and Pricelist fields for %s.') % (account.name))
if not analytic_line.to_invoice:
raise UserError(_('Trying to invoice non invoiceable line for %s.') % (analytic_line.product_id.name))
key = (analytic_line.product_id.id,
analytic_line.product_uom_id.id,
analytic_line.user_id.id,
analytic_line.to_invoice.id,
analytic_line.account_id,
analytic_line.journal_id.type)
invoice_lines_grouping.setdefault(key, []).append(analytic_line)
# finally creates the invoice line
for (product_id, uom, user_id, factor_id, account, journal_type), lines_to_invoice in invoice_lines_grouping.items():
curr_invoice_line = self._prepare_cost_invoice_line(cr, uid, last_invoice,
product_id, uom, user_id, factor_id, account, lines_to_invoice,
journal_type, data, context=context)
invoice_line_obj.create(cr, uid, curr_invoice_line, context=context)
self.write(cr, uid, [l.id for l in analytic_lines], {'invoice_id': last_invoice}, context=context)
invoice_obj.button_reset_taxes(cr, uid, [last_invoice], context)
return invoices
class hr_analytic_timesheet(osv.osv):
_inherit = "hr.analytic.timesheet"
def on_change_account_id(self, cr, uid, ids, account_id, user_id=False):
res = {}
if not account_id:
return res
res.setdefault('value',{})
acc = self.pool.get('account.analytic.account').browse(cr, uid, account_id)
st = acc.to_invoice.id
res['value']['to_invoice'] = st or False
if acc.state=='pending':
res['warning'] = {
'title': 'Warning',
'message': 'The analytic account is in pending state.\nYou should not work on this account !'
}
return res
class account_invoice(osv.osv):
_inherit = "account.invoice"
def _get_analytic_lines(self, cr, uid, ids, context=None):
iml = super(account_invoice, self)._get_analytic_lines(cr, uid, ids, context=context)
inv = self.browse(cr, uid, ids, context=context)[0]
if inv.type == 'in_invoice':
obj_analytic_account = self.pool.get('account.analytic.account')
for il in iml:
if il['account_analytic_id']:
# *-* browse (or refactor to avoid read inside the loop)
to_invoice = obj_analytic_account.read(cr, uid, [il['account_analytic_id']], ['to_invoice'], context=context)[0]['to_invoice']
if to_invoice:
il['analytic_lines'][0][2]['to_invoice'] = to_invoice[0]
return iml
class account_move_line(osv.osv):
_inherit = "account.move.line"
def create_analytic_lines(self, cr, uid, ids, context=None):
res = super(account_move_line, self).create_analytic_lines(cr, uid, ids,context=context)
analytic_line_obj = self.pool.get('account.analytic.line')
for move_line in self.browse(cr, uid, ids, context=context):
#For customer invoice, link analytic line to the invoice so it is not proposed for invoicing in Bill Tasks Work
invoice_id = move_line.invoice and move_line.invoice.type in ('out_invoice','out_refund') and move_line.invoice.id or False
for line in move_line.analytic_lines:
analytic_line_obj.write(cr, uid, line.id, {
'invoice_id': invoice_id,
'to_invoice': line.account_id.to_invoice and line.account_id.to_invoice.id or False
}, context=context)
return res
| agpl-3.0 | -3,447,457,845,333,173,000 | 47.516624 | 287 | 0.59009 | false |
Diksha-Rathi/find-my-place | find-my-place/settings.py | 1 | 2706 | """
Django settings for find-my-place project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import dj_database_url
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '**************************************************'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'find-my-place.urls'
WSGI_APPLICATION = 'find-my-place.wsgi.application'
# Database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'GMT'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Parse database configuration from $DATABASE_URL
DATABASES['default'] = dj_database_url.config()
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Static files (CSS, JavaScript, Images)
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR,'../static'),)
TEMPLATE_DIRS = ( os.path.join(BASE_DIR, '../templates'),)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
#EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = '[email protected]'
SERVER_EMAIL = '[email protected]'
EMAIL_HOST = 'smtp.domain.com'
EMAIL_HOST_USER = '[email protected]'
EMAIL_HOST_PASSWORD = '********'
EMAIL_PORT = 587 | mit | 4,452,227,457,241,505,000 | 23.834862 | 71 | 0.705839 | false |
RustoriaRu/hipster_api | hipster_api/tests/tests_fields_json.py | 1 | 1136 | # -*- coding: utf-8 -*-
from django.test import TestCase
from hipster_api import fields
class FiledJsonTestCase(TestCase):
def get_value(self, obj):
obj.to_python()
obj.to_rules(None)
return obj.value
def test_field(self):
obj = fields.JsonField(default={})
self.assertDictEqual(self.get_value(obj), {})
obj.setitem('asdasd')
self.assertDictEqual(self.get_value(obj), {})
obj.setitem('{"id": 123, "name": "Проверка"}')
self.assertDictEqual(self.get_value(obj), {u'id': 123, u'name': u'Проверка'})
obj.setitem('{"id": 123, "list": [1,2,3] , "name": "Проверка"}')
self.assertDictEqual(self.get_value(obj), {u'id': 123, u'name': u'Проверка', u'list': [1, 2, 3]})
obj.setitem('[1,2,3, "Проверка", "list"]')
self.assertListEqual(self.get_value(obj), [1, 2, 3, u"Проверка", u"list"])
obj.setitem('[1,2,3, "Проверка", {"name": "post"}, "list"]')
self.assertListEqual(self.get_value(obj), [1, 2, 3, u"Проверка", {u'name': u'post'}, u"list"])
| mit | -7,616,783,169,586,005,000 | 33.580645 | 105 | 0.581157 | false |
smerkousdavid/rem-sphinx | logger.py | 1 | 2066 | # -*- coding: utf-8 -*-
"""RemSphinx speech to text logger
This module is designed to just handle logging. There's nothing more to it
Just printing and logging to files
Developed By: David Smerkous
"""
from logging import getLogger, INFO, Formatter, FileHandler, StreamHandler
from os.path import dirname, realpath, isdir, exists
from os import makedirs
from time import strftime
from sys import stdout
# Define logging characteristics
LOGGER_NAME = "RemSphinx"
LOGGER_LEVEL = INFO
LOGGER_FORMAT = Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s")
LOGGER_FILE_PATH = "%s/logs" % dirname(realpath(__file__))
LOGGER_FILE_DATE = strftime("%d-%m-%y--%H-%M-%S")
LOGGER_FILE_FORMAT = "%s/%s.log" % (LOGGER_FILE_PATH, LOGGER_FILE_DATE)
if not isdir(LOGGER_FILE_PATH):
print("Creating new log location %s..." % LOGGER_FILE_PATH),
makedirs(LOGGER_FILE_PATH)
print("Done")
if not exists(LOGGER_FILE_FORMAT):
print("Creating new log file %s..." % LOGGER_FILE_FORMAT),
open(LOGGER_FILE_FORMAT, 'w').close()
print("Done")
LOGGER_FILE_HANDLER = FileHandler(LOGGER_FILE_FORMAT)
LOGGER_FILE_HANDLER.setFormatter(LOGGER_FORMAT)
LOGGER_CONSOLE_HANDLER = StreamHandler(stdout)
LOGGER_CONSOLE_HANDLER.setFormatter(LOGGER_FORMAT)
LOGGER = getLogger(LOGGER_NAME)
LOGGER.addHandler(LOGGER_FILE_HANDLER)
# Uncomment when not using tornado, which already has a console handler
# LOGGER.addHandler(LOGGER_CONSOLE_HANDLER)
class logger(object):
def __init__(self, name_space, logger_level=LOGGER_LEVEL):
LOGGER.setLevel(logger_level)
LOGGER.debug("Starting logger!")
self._name_space = name_space
def __base_log(self, to_log):
return "|%s|: %s" % (self._name_space, str(to_log))
def info(self, to_log):
LOGGER.info(self.__base_log(to_log))
def debug(self, to_log):
LOGGER.debug(self.__base_log(to_log))
def warning(self, to_log):
LOGGER.warning(self.__base_log(to_log))
def error(self, to_log):
LOGGER.error(self.__base_log(to_log))
| gpl-3.0 | -8,818,605,115,806,239,000 | 30.784615 | 96 | 0.693611 | false |
cabanm/project-euler | myMath.py | 1 | 3540 | from time import time
from math import sqrt
# Time some code
def timeIt(code):
start = time()
exec code
return time()-start
# Find primes up to a certain number and output a dictionary with them as keys
def primes(top):
sieve = [0]*top
for m in range(2, top+1):
if sieve[m-1] == 0: # if m prime
for n in range(m, top//m+1):
p = m*n
sieve[p-1] = 1
primes = {}
for n in range(2,top+1):
if sieve[n-1] == 0: primes[n] = 0
return primes
# Find Pythagorean triplets with short sides up to and equal to max side
def pythTrips(maxSide):
triples = []
for a in range(1, maxSide+1):
for b in range(1, a):
c = sqrt(a**2+b**2)
if c == int(c): triples.append((a,b,int(c)))
return triples
# Find Pythagorean triplets with max perimeter specified
def pythTripsPerim(p):
triples = []
for a in range(1, p):
for b in range(1, a):
c = sqrt(a**2+b**2)
if c == int(c) and a+b+c <= p: triples.append((a,b,int(c)))
return triples
# Checks if the input string is a pandigital number
def isPandigital(n):
if n.count('0') != 0: return 0
for digit in range(1,10):
if n.count(str(digit)) > 1:
return 0
return 1
# Checks if input number is prime
def isPrime(n):
n = abs(n)
if n==0 or n==1: return 0
#print 'Checking primality:', n
maxm = int(sqrt(n))+1
for d in range(2, maxm):
#if (d*100//maxm)%10 == 0: print d/1.0/maxm
if n%d == 0: return 0
return 1
# Returns the prime factors of a number given a set of primes
def pFactors(n,primes):
i = 0
divs = []
while n != 1:
p = primes[i]
if n%p == 0:
divs.append(p)
n = n/p
i = 0
else:
i += 1
return divs
# Returns the number of unique prime factors for numbers up to and incl. top
def pFactors2(top):
sieve = [0]*top
sieve[0] = 1
for m in range(2, top+1):
if sieve[m-1] == 0: # if m is prime
for n in range(2, top//m+1):
p = m*n
sieve[p-1] += 1
return sieve
# Checks if a number is pentagonal
def isPent(n):
d = sqrt(1.+24*n)
if d%1 == 0 and (d+1)%6 == 0: return 1
return 0
# Returns a list of the amount of each digit a number has
# Note: a method with purely mathematical operations took longer than using strings!!!!
def digSig(n):
sig = [0]*10
for d in str(n):
sig[int(d)] += 1
return sig
# Returns the set of digits in a number
def digits(n):
return set([int(ch) for ch in str(n)])
# Returns the number of digits in a number
def digNum(n):
return len(str(n))
# Returns factorial of number
def factorial(n):
out=1
for x in range(1,abs(n)+1):
out = out*x
return out
# The combinatoric formula, that will work well for large n and reasonable r
def nCr(n,r):
if n<r:
return "n must be leq r"
out=1
for x in range(n-r+1,n+1):
out = out*x
return out/factorial(r)
# Returns all possible combinations of a list
def combinations(s): # Rename to subsets!!!!!
yield []
for i, d in enumerate(s):
for comb in combinations(s[i+1:]):
yield [d] + comb
# Returns whether a number is a palindrome
def isPalindromic(n):
n=str(n)
if n==''.join([n[-i-1] for i in range(len(n))]): return 1
return 0
# Returns the reverse of an integer
def reverse(n):
n=str(n)
return int(''.join([n[-i-1] for i in range(len(n))]))
# Returns the digital sum of a number
def digSum(n):
total = 0
for m,n in enumerate(digSig(n)):
total += m*n
return total
# Returns whether a number is square
def isSquare(n):
# Perfect squares end in 0, 1, 4, 9 in hexadecimal
# Thus we check this first, then apply general method
if hex(n)[-1] in ['0','1','4','9']:
if int(n**0.5)**2 == n: return 1
return 0
| gpl-2.0 | 9,074,495,308,821,205,000 | 21.987013 | 87 | 0.641243 | false |
kiliakis/BLonD | beams/beams.py | 1 | 5676 |
# Copyright 2015 CERN. This software is distributed under the
# terms of the GNU General Public Licence version 3 (GPL Version 3),
# copied verbatim in the file LICENCE.md.
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization or
# submit itself to any jurisdiction.
# Project website: http://blond.web.cern.ch/
'''
**Module containing the fundamental beam class with methods to compute beam statistics**
:Authors: **Danilo Quartullo**, **Helga Timko**, **ALexandre Lasheen**
'''
from __future__ import division
import numpy as np
from trackers.utilities import is_in_separatrix
class Beam(object):
'''
*Object containing the beam coordinates and beam properties such as mass,
charge, synchronous energy, momentum, etc.
The beam coordinate 'dt' is defined as the particle arrival time to the RF
station w.r.t. the reference time that is the sum of turns.
The beam coordiate 'dE' is defined as the particle energy offset w.r.t. the
energy of the synchronous particle.*
'''
def __init__(self, GeneralParameters, n_macroparticles, intensity):
#: *Import particle mass [eV] (from GeneralParameters)*
self.mass = GeneralParameters.mass
#: *Import particle charge [e] (from GeneralParameters)*
self.charge = GeneralParameters.charge
#: *Import synchronous relativistic beta [1] (from GeneralParameters)*
self.beta = GeneralParameters.beta[0][0]
#: *Import synchronous relativistic gamma [1] (from GeneralParameters)*
self.gamma = GeneralParameters.gamma[0][0]
#: *Import synchronous total energy [eV] (from GeneralParameters)*
self.energy = GeneralParameters.energy[0][0]
#: *Import synchronous momentum [eV] (from GeneralParameters)*
self.momentum = GeneralParameters.momentum[0][0]
#: *Import ring radius [m] (from GeneralParameters)*
#self.ring_radius = GeneralParameters.ring_radius
#: | *Beam arrival time with respect to reference time [s]*
self.dt = np.zeros([n_macroparticles])
#: | *Beam energy offset with respect to synchronous energy [eV]*
self.dE = np.zeros([n_macroparticles])
#: | *Average beam arrival time [s]*
self.mean_dt = 0
#: | *Average beam energy offset [eV]*
self.mean_dE = 0
#: | *Standard deviation of beam arrival time [s]*
self.sigma_dt = 0
#: | *Standard deviation of beam energy offset [eV]*
self.sigma_dE = 0
#: | *Total beam intensity [1]*
self.intensity = intensity
#: | *Total number of macro-particles in the beam [1]*
self.n_macroparticles = int(n_macroparticles)
#: | *This ratio should be in general constant during the simulation*
self.ratio = self.intensity/self.n_macroparticles
#: | *Number of macro-particles marked as 'lost' [1]*
#: | *Losses defined via loss mechanisms chosen by user*
self.n_macroparticles_lost = 0
#: | *Number of transmitted macro-particles (= total - lost) [1]*
#self.n_macroparticles_alive = self.n_macroparticles - self.n_macroparticles_lost
#: | *Unique macro-particle ID number; zero if particle is 'lost'*
self.id = np.arange(1, self.n_macroparticles + 1, dtype=int)
@property
def n_macroparticles_alive(self):
'''
*Number of transmitted macro-particles.*
'''
return self.n_macroparticles - self.n_macroparticles_lost
def statistics(self):
'''
*Calculation of the mean and standard deviation of beam coordinates,
as well as beam emittance using different definitions.*
'''
# Statistics only for particles that are not flagged as lost
itemindex = np.where(self.id != 0)[0]
self.mean_dt = np.mean(self.dt[itemindex])
self.mean_dE = np.mean(self.dE[itemindex])
self.sigma_dt = np.std(self.dt[itemindex])
self.sigma_dE = np.std(self.dE[itemindex])
# R.m.s. emittance in Gaussian approximation
self.epsn_rms_l = np.pi*self.sigma_dE*self.sigma_dt # in eVs
# Losses
self.n_macroparticles_lost = len( np.where( self.id == 0 )[0] )
def losses_separatrix(self, GeneralParameters, RFSectionParameters, Beam):
'''
*Beam losses based on separatrix.*
'''
itemindex = np.where(is_in_separatrix(GeneralParameters,
RFSectionParameters,
Beam, self.dt, self.dE)
== False)[0]
if itemindex.size != 0:
self.id[itemindex] = 0
def losses_longitudinal_cut(self, dt_min, dt_max):
'''
*Beam losses based on longitudinal cuts.*
'''
itemindex = np.where( (self.dt - dt_min)*(dt_max - self.dt) < 0 )[0]
if itemindex.size != 0:
self.id[itemindex] = 0
def losses_energy_cut(self, dE_min, dE_max):
'''
*Beam losses based on energy cuts, e.g. on collimators.*
'''
itemindex = np.where( (self.dE - dE_min)*(dE_max - self.dE) < 0 )[0]
if itemindex.size != 0:
self.id[itemindex] = 0
| gpl-3.0 | -2,657,791,008,127,920,000 | 35.152866 | 90 | 0.586328 | false |
sternshus/arelle2.7 | svr-2.7/arelle/CntlrCmdLine.py | 1 | 64081 | u'''
Created on Oct 3, 2010
This module is Arelle's controller in command line non-interactive mode
(This module can be a pattern for custom integration of Arelle into an application.)
@author: Mark V Systems Limited
(c) Copyright 2010 Mark V Systems Limited, All rights reserved.
'''
from arelle import PythonUtil # define 2.x or 3.x string types
import gettext, time, datetime, os, shlex, sys, traceback, fnmatch
from optparse import OptionParser, SUPPRESS_HELP
import re
from arelle import (Cntlr, FileSource, ModelDocument, XmlUtil, Version,
ViewFileDTS, ViewFileFactList, ViewFileFactTable, ViewFileConcepts,
ViewFileFormulae, ViewFileRelationshipSet, ViewFileTests, ViewFileRssFeed,
ViewFileRoleTypes,
ModelManager)
from arelle.ModelValue import qname
from arelle.Locale import format_string
from arelle.ModelFormulaObject import FormulaOptions
from arelle import PluginManager
from arelle.PluginManager import pluginClassMethods
from arelle.WebCache import proxyTuple
import logging
from lxml import etree
win32file = None
def main():
u"""Main program to initiate application from command line or as a separate process (e.g, java Runtime.getRuntime().exec). May perform
a command line request, or initiate a web server on specified local port.
:param argv: Command line arguments. (Currently supported arguments can be displayed by the parameter *--help*.)
:type message: [str]
"""
envArgs = os.getenv(u"ARELLE_ARGS")
if envArgs:
args = shlex.split(envArgs)
else:
args = sys.argv[1:]
gettext.install(u"arelle") # needed for options messages
parseAndRun(args)
def xbrlTurtleGraphModel(furi='http://www.sec.gov/Archives/edgar/data/66740/000155837015002024/mmm-20150930.xml'):
args = ['--plugins', 'xbrlDB', '-f', furi, '--disclosureSystem', 'efm-strict-all-years', '--store-to-XBRL-DB',
'rdfTurtleFile,None,None,None,turtle.rdf,None,rdfDB']
gettext.install("arelle") # needed for options messages
return parseAndRun(args)
def wsgiApplication():
return parseAndRun([u"--webserver=::wsgi"])
def parseAndRun(args):
u"""interface used by Main program and py.test (arelle_test.py)
"""
try:
from arelle import webserver
hasWebServer = True
except ImportError:
hasWebServer = False
cntlr = CntlrCmdLine() # need controller for plug ins to be loaded
usage = u"usage: %prog [options]"
parser = OptionParser(usage,
version=u"Arelle(r) {0}bit {1}".format(cntlr.systemWordSize, Version.version),
conflict_handler=u"resolve") # allow reloading plug-in options without errors
parser.add_option(u"-f", u"--file", dest=u"entrypointFile",
help=_(u"FILENAME is an entry point, which may be "
u"an XBRL instance, schema, linkbase file, "
u"inline XBRL instance, testcase file, "
u"testcase index file. FILENAME may be "
u"a local file or a URI to a web located file."))
parser.add_option(u"--username", dest=u"username",
help=_(u"user name if needed (with password) for web file retrieval"))
parser.add_option(u"--password", dest=u"password",
help=_(u"password if needed (with user name) for web retrieval"))
# special option for web interfaces to suppress closing an opened modelXbrl
parser.add_option(u"--keepOpen", dest=u"keepOpen", action=u"store_true", help=SUPPRESS_HELP)
parser.add_option(u"-i", u"--import", dest=u"importFiles",
help=_(u"FILENAME is a list of files to import to the DTS, such as "
u"additional formula or label linkbases. "
u"Multiple file names are separated by a '|' character. "))
parser.add_option(u"-d", u"--diff", dest=u"diffFile",
help=_(u"FILENAME is a second entry point when "
u"comparing (diffing) two DTSes producing a versioning report."))
parser.add_option(u"-r", u"--report", dest=u"versReportFile",
help=_(u"FILENAME is the filename to save as the versioning report."))
parser.add_option(u"-v", u"--validate",
action=u"store_true", dest=u"validate",
help=_(u"Validate the file according to the entry "
u"file type. If an XBRL file, it is validated "
u"according to XBRL validation 2.1, calculation linkbase validation "
u"if either --calcDecimals or --calcPrecision are specified, and "
u"SEC EDGAR Filing Manual (if --efm selected) or Global Filer Manual "
u"disclosure system validation (if --gfm=XXX selected). "
u"If a test suite or testcase, the test case variations "
u"are individually so validated. "
u"If formulae are present they will be validated and run unless --formula=none is specified. "
))
parser.add_option(u"--calcDecimals", action=u"store_true", dest=u"calcDecimals",
help=_(u"Specify calculation linkbase validation inferring decimals."))
parser.add_option(u"--calcdecimals", action=u"store_true", dest=u"calcDecimals", help=SUPPRESS_HELP)
parser.add_option(u"--calcPrecision", action=u"store_true", dest=u"calcPrecision",
help=_(u"Specify calculation linkbase validation inferring precision."))
parser.add_option(u"--calcprecision", action=u"store_true", dest=u"calcPrecision", help=SUPPRESS_HELP)
parser.add_option(u"--efm", action=u"store_true", dest=u"validateEFM",
help=_(u"Select Edgar Filer Manual (U.S. SEC) disclosure system validation (strict)."))
parser.add_option(u"--gfm", action=u"store", dest=u"disclosureSystemName", help=SUPPRESS_HELP)
parser.add_option(u"--disclosureSystem", action=u"store", dest=u"disclosureSystemName",
help=_(u"Specify a disclosure system name and"
u" select disclosure system validation. "
u"Enter --disclosureSystem=help for list of names or help-verbose for list of names and descriptions. "))
parser.add_option(u"--disclosuresystem", action=u"store", dest=u"disclosureSystemName", help=SUPPRESS_HELP)
parser.add_option(u"--hmrc", action=u"store_true", dest=u"validateHMRC",
help=_(u"Select U.K. HMRC disclosure system validation."))
parser.add_option(u"--utr", action=u"store_true", dest=u"utrValidate",
help=_(u"Select validation with respect to Unit Type Registry."))
parser.add_option(u"--utrUrl", action=u"store", dest=u"utrUrl",
help=_(u"Override disclosure systems Unit Type Registry location (URL or file path)."))
parser.add_option(u"--utrurl", action=u"store", dest=u"utrUrl", help=SUPPRESS_HELP)
parser.add_option(u"--infoset", action=u"store_true", dest=u"infosetValidate",
help=_(u"Select validation with respect testcase infosets."))
parser.add_option(u"--labelLang", action=u"store", dest=u"labelLang",
help=_(u"Language for labels in following file options (override system settings)"))
parser.add_option(u"--labellang", action=u"store", dest=u"labelLang", help=SUPPRESS_HELP)
parser.add_option(u"--labelRole", action=u"store", dest=u"labelRole",
help=_(u"Label role for labels in following file options (instead of standard label)"))
parser.add_option(u"--labelrole", action=u"store", dest=u"labelRole", help=SUPPRESS_HELP)
parser.add_option(u"--DTS", u"--csvDTS", action=u"store", dest=u"DTSFile",
help=_(u"Write DTS tree into FILE (may be .csv or .html)"))
parser.add_option(u"--facts", u"--csvFacts", action=u"store", dest=u"factsFile",
help=_(u"Write fact list into FILE"))
parser.add_option(u"--factListCols", action=u"store", dest=u"factListCols",
help=_(u"Columns for fact list file"))
parser.add_option(u"--factTable", u"--csvFactTable", action=u"store", dest=u"factTableFile",
help=_(u"Write fact table into FILE"))
parser.add_option(u"--concepts", u"--csvConcepts", action=u"store", dest=u"conceptsFile",
help=_(u"Write concepts into FILE"))
parser.add_option(u"--pre", u"--csvPre", action=u"store", dest=u"preFile",
help=_(u"Write presentation linkbase into FILE"))
parser.add_option(u"--cal", u"--csvCal", action=u"store", dest=u"calFile",
help=_(u"Write calculation linkbase into FILE"))
parser.add_option(u"--dim", u"--csvDim", action=u"store", dest=u"dimFile",
help=_(u"Write dimensions (of definition) linkbase into FILE"))
parser.add_option(u"--formulae", u"--htmlFormulae", action=u"store", dest=u"formulaeFile",
help=_(u"Write formulae linkbase into FILE"))
parser.add_option(u"--viewArcrole", action=u"store", dest=u"viewArcrole",
help=_(u"Write linkbase relationships for viewArcrole into viewFile"))
parser.add_option(u"--viewarcrole", action=u"store", dest=u"viewArcrole", help=SUPPRESS_HELP)
parser.add_option(u"--viewFile", action=u"store", dest=u"viewFile",
help=_(u"Write linkbase relationships for viewArcrole into viewFile"))
parser.add_option(u"--viewfile", action=u"store", dest=u"viewFile", help=SUPPRESS_HELP)
parser.add_option(u"--roleTypes", action=u"store", dest=u"roleTypesFile",
help=_(u"Write defined role types into FILE"))
parser.add_option(u"--roletypes", action=u"store", dest=u"roleTypesFile", help=SUPPRESS_HELP)
parser.add_option(u"--arcroleTypes", action=u"store", dest=u"arcroleTypesFile",
help=_(u"Write defined arcrole types into FILE"))
parser.add_option(u"--arcroletypes", action=u"store", dest=u"arcroleTypesFile", help=SUPPRESS_HELP)
parser.add_option(u"--testReport", u"--csvTestReport", action=u"store", dest=u"testReport",
help=_(u"Write test report of validation (of test cases) into FILE"))
parser.add_option(u"--testreport", u"--csvtestreport", action=u"store", dest=u"testReport", help=SUPPRESS_HELP)
parser.add_option(u"--testReportCols", action=u"store", dest=u"testReportCols",
help=_(u"Columns for test report file"))
parser.add_option(u"--testreportcols", action=u"store", dest=u"testReportCols", help=SUPPRESS_HELP)
parser.add_option(u"--rssReport", action=u"store", dest=u"rssReport",
help=_(u"Write RSS report into FILE"))
parser.add_option(u"--rssreport", action=u"store", dest=u"rssReport", help=SUPPRESS_HELP)
parser.add_option(u"--rssReportCols", action=u"store", dest=u"rssReportCols",
help=_(u"Columns for RSS report file"))
parser.add_option(u"--rssreportcols", action=u"store", dest=u"rssReportCols", help=SUPPRESS_HELP)
parser.add_option(u"--skipDTS", action=u"store_true", dest=u"skipDTS",
help=_(u"Skip DTS activities (loading, discovery, validation), useful when an instance needs only to be parsed."))
parser.add_option(u"--skipdts", action=u"store_true", dest=u"skipDTS", help=SUPPRESS_HELP)
parser.add_option(u"--skipLoading", action=u"store", dest=u"skipLoading",
help=_(u"Skip loading discovered or schemaLocated files matching pattern (unix-style file name patterns separated by '|'), useful when not all linkbases are needed."))
parser.add_option(u"--skiploading", action=u"store", dest=u"skipLoading", help=SUPPRESS_HELP)
parser.add_option(u"--logFile", action=u"store", dest=u"logFile",
help=_(u"Write log messages into file, otherwise they go to standard output. "
u"If file ends in .xml it is xml-formatted, otherwise it is text. "))
parser.add_option(u"--logfile", action=u"store", dest=u"logFile", help=SUPPRESS_HELP)
parser.add_option(u"--logFormat", action=u"store", dest=u"logFormat",
help=_(u"Logging format for messages capture, otherwise default is \"[%(messageCode)s] %(message)s - %(file)s\"."))
parser.add_option(u"--logformat", action=u"store", dest=u"logFormat", help=SUPPRESS_HELP)
parser.add_option(u"--logLevel", action=u"store", dest=u"logLevel",
help=_(u"Minimum level for messages capture, otherwise the message is ignored. "
u"Current order of levels are debug, info, info-semantic, warning, warning-semantic, warning, assertion-satisfied, inconsistency, error-semantic, assertion-not-satisfied, and error. "))
parser.add_option(u"--loglevel", action=u"store", dest=u"logLevel", help=SUPPRESS_HELP)
parser.add_option(u"--logLevelFilter", action=u"store", dest=u"logLevelFilter",
help=_(u"Regular expression filter for logLevel. "
u"(E.g., to not match *-semantic levels, logLevelFilter=(?!^.*-semantic$)(.+). "))
parser.add_option(u"--loglevelfilter", action=u"store", dest=u"logLevelFilter", help=SUPPRESS_HELP)
parser.add_option(u"--logCodeFilter", action=u"store", dest=u"logCodeFilter",
help=_(u"Regular expression filter for log message code."))
parser.add_option(u"--logcodefilter", action=u"store", dest=u"logCodeFilter", help=SUPPRESS_HELP)
parser.add_option(u"--statusPipe", action=u"store", dest=u"statusPipe", help=SUPPRESS_HELP)
parser.add_option(u"--outputAttribution", action=u"store", dest=u"outputAttribution", help=SUPPRESS_HELP)
parser.add_option(u"--outputattribution", action=u"store", dest=u"outputAttribution", help=SUPPRESS_HELP)
parser.add_option(u"--showOptions", action=u"store_true", dest=u"showOptions", help=SUPPRESS_HELP)
parser.add_option(u"--parameters", action=u"store", dest=u"parameters", help=_(u"Specify parameters for formula and validation (name=value[,name=value])."))
parser.add_option(u"--parameterSeparator", action=u"store", dest=u"parameterSeparator", help=_(u"Specify parameters separator string (if other than comma)."))
parser.add_option(u"--parameterseparator", action=u"store", dest=u"parameterSeparator", help=SUPPRESS_HELP)
parser.add_option(u"--formula", choices=(u"validate", u"run", u"none"), dest=u"formulaAction",
help=_(u"Specify formula action: "
u"validate - validate only, without running, "
u"run - validate and run, or "
u"none - prevent formula validation or running when also specifying -v or --validate. "
u"if this option is not specified, -v or --validate will validate and run formulas if present"))
parser.add_option(u"--formulaParamExprResult", action=u"store_true", dest=u"formulaParamExprResult", help=_(u"Specify formula tracing."))
parser.add_option(u"--formulaparamexprresult", action=u"store_true", dest=u"formulaParamExprResult", help=SUPPRESS_HELP)
parser.add_option(u"--formulaParamInputValue", action=u"store_true", dest=u"formulaParamInputValue", help=_(u"Specify formula tracing."))
parser.add_option(u"--formulaparaminputvalue", action=u"store_true", dest=u"formulaParamInputValue", help=SUPPRESS_HELP)
parser.add_option(u"--formulaCallExprSource", action=u"store_true", dest=u"formulaCallExprSource", help=_(u"Specify formula tracing."))
parser.add_option(u"--formulacallexprsource", action=u"store_true", dest=u"formulaCallExprSource", help=SUPPRESS_HELP)
parser.add_option(u"--formulaCallExprCode", action=u"store_true", dest=u"formulaCallExprCode", help=_(u"Specify formula tracing."))
parser.add_option(u"--formulacallexprcode", action=u"store_true", dest=u"formulaCallExprCode", help=SUPPRESS_HELP)
parser.add_option(u"--formulaCallExprEval", action=u"store_true", dest=u"formulaCallExprEval", help=_(u"Specify formula tracing."))
parser.add_option(u"--formulacallexpreval", action=u"store_true", dest=u"formulaCallExprEval", help=SUPPRESS_HELP)
parser.add_option(u"--formulaCallExprResult", action=u"store_true", dest=u"formulaCallExprResult", help=_(u"Specify formula tracing."))
parser.add_option(u"--formulacallexprtesult", action=u"store_true", dest=u"formulaCallExprResult", help=SUPPRESS_HELP)
parser.add_option(u"--formulaVarSetExprEval", action=u"store_true", dest=u"formulaVarSetExprEval", help=_(u"Specify formula tracing."))
parser.add_option(u"--formulavarsetexpreval", action=u"store_true", dest=u"formulaVarSetExprEval", help=SUPPRESS_HELP)
parser.add_option(u"--formulaVarSetExprResult", action=u"store_true", dest=u"formulaVarSetExprResult", help=_(u"Specify formula tracing."))
parser.add_option(u"--formulavarsetexprresult", action=u"store_true", dest=u"formulaVarSetExprResult", help=SUPPRESS_HELP)
parser.add_option(u"--formulaVarSetTiming", action=u"store_true", dest=u"timeVariableSetEvaluation", help=_(u"Specify showing times of variable set evaluation."))
parser.add_option(u"--formulavarsettiming", action=u"store_true", dest=u"timeVariableSetEvaluation", help=SUPPRESS_HELP)
parser.add_option(u"--formulaAsserResultCounts", action=u"store_true", dest=u"formulaAsserResultCounts", help=_(u"Specify formula tracing."))
parser.add_option(u"--formulaasserresultcounts", action=u"store_true", dest=u"formulaAsserResultCounts", help=SUPPRESS_HELP)
parser.add_option(u"--formulaSatisfiedAsser", action=u"store_true", dest=u"formulaSatisfiedAsser", help=_(u"Specify formula tracing."))
parser.add_option(u"--formulasatisfiedasser", action=u"store_true", dest=u"formulaSatisfiedAsser", help=SUPPRESS_HELP)
parser.add_option(u"--formulaUnsatisfiedAsser", action=u"store_true", dest=u"formulaUnsatisfiedAsser", help=_(u"Specify formula tracing."))
parser.add_option(u"--formulaunsatisfiedasser", action=u"store_true", dest=u"formulaUnsatisfiedAsser", help=SUPPRESS_HELP)
parser.add_option(u"--formulaUnsatisfiedAsserError", action=u"store_true", dest=u"formulaUnsatisfiedAsserError", help=_(u"Specify formula tracing."))
parser.add_option(u"--formulaunsatisfiedassererror", action=u"store_true", dest=u"formulaUnsatisfiedAsserError", help=SUPPRESS_HELP)
parser.add_option(u"--formulaFormulaRules", action=u"store_true", dest=u"formulaFormulaRules", help=_(u"Specify formula tracing."))
parser.add_option(u"--formulaformularules", action=u"store_true", dest=u"formulaFormulaRules", help=SUPPRESS_HELP)
parser.add_option(u"--formulaVarsOrder", action=u"store_true", dest=u"formulaVarsOrder", help=_(u"Specify formula tracing."))
parser.add_option(u"--formulavarsorder", action=u"store_true", dest=u"formulaVarsOrder", help=SUPPRESS_HELP)
parser.add_option(u"--formulaVarExpressionSource", action=u"store_true", dest=u"formulaVarExpressionSource", help=_(u"Specify formula tracing."))
parser.add_option(u"--formulavarexpressionsource", action=u"store_true", dest=u"formulaVarExpressionSource", help=SUPPRESS_HELP)
parser.add_option(u"--formulaVarExpressionCode", action=u"store_true", dest=u"formulaVarExpressionCode", help=_(u"Specify formula tracing."))
parser.add_option(u"--formulavarexpressioncode", action=u"store_true", dest=u"formulaVarExpressionCode", help=SUPPRESS_HELP)
parser.add_option(u"--formulaVarExpressionEvaluation", action=u"store_true", dest=u"formulaVarExpressionEvaluation", help=_(u"Specify formula tracing."))
parser.add_option(u"--formulavarexpressionevaluation", action=u"store_true", dest=u"formulaVarExpressionEvaluation", help=SUPPRESS_HELP)
parser.add_option(u"--formulaVarExpressionResult", action=u"store_true", dest=u"formulaVarExpressionResult", help=_(u"Specify formula tracing."))
parser.add_option(u"--formulavarexpressionresult", action=u"store_true", dest=u"formulaVarExpressionResult", help=SUPPRESS_HELP)
parser.add_option(u"--formulaVarFilterWinnowing", action=u"store_true", dest=u"formulaVarFilterWinnowing", help=_(u"Specify formula tracing."))
parser.add_option(u"--formulavarfilterwinnowing", action=u"store_true", dest=u"formulaVarFilterWinnowing", help=SUPPRESS_HELP)
parser.add_option(u"--formulaVarFiltersResult", action=u"store_true", dest=u"formulaVarFiltersResult", help=_(u"Specify formula tracing."))
parser.add_option(u"--formulavarfiltersresult", action=u"store_true", dest=u"formulaVarFiltersResult", help=SUPPRESS_HELP)
parser.add_option(u"--formulaRunIDs", action=u"store", dest=u"formulaRunIDs", help=_(u"Specify formula/assertion IDs to run, separated by a '|' character."))
parser.add_option(u"--formularunids", action=u"store", dest=u"formulaRunIDs", help=SUPPRESS_HELP)
parser.add_option(u"--uiLang", action=u"store", dest=u"uiLang",
help=_(u"Language for user interface (override system settings, such as program messages). Does not save setting."))
parser.add_option(u"--uilang", action=u"store", dest=u"uiLang", help=SUPPRESS_HELP)
parser.add_option(u"--proxy", action=u"store", dest=u"proxy",
help=_(u"Modify and re-save proxy settings configuration. "
u"Enter 'system' to use system proxy setting, 'none' to use no proxy, "
u"'http://[user[:password]@]host[:port]' "
u" (e.g., http://192.168.1.253, http://example.com:8080, http://joe:[email protected]:8080), "
u" or 'show' to show current setting, ." ))
parser.add_option(u"--internetConnectivity", choices=(u"online", u"offline"), dest=u"internetConnectivity",
help=_(u"Specify internet connectivity: online or offline"))
parser.add_option(u"--internetconnectivity", action=u"store", dest=u"internetConnectivity", help=SUPPRESS_HELP)
parser.add_option(u"--internetTimeout", type=u"int", dest=u"internetTimeout",
help=_(u"Specify internet connection timeout in seconds (0 means unlimited)."))
parser.add_option(u"--internettimeout", type=u"int", action=u"store", dest=u"internetTimeout", help=SUPPRESS_HELP)
parser.add_option(u"--internetRecheck", choices=(u"weekly", u"daily", u"never"), dest=u"internetRecheck",
help=_(u"Specify rechecking cache files (weekly is default)"))
parser.add_option(u"--internetrecheck", choices=(u"weekly", u"daily", u"never"), action=u"store", dest=u"internetRecheck", help=SUPPRESS_HELP)
parser.add_option(u"--internetLogDownloads", action=u"store_true", dest=u"internetLogDownloads",
help=_(u"Log info message for downloads to web cache."))
parser.add_option(u"--internetlogdownloads", action=u"store_true", dest=u"internetLogDownloads", help=SUPPRESS_HELP)
parser.add_option(u"--xdgConfigHome", action=u"store", dest=u"xdgConfigHome",
help=_(u"Specify non-standard location for configuration and cache files (overrides environment parameter XDG_CONFIG_HOME)."))
parser.add_option(u"--plugins", action=u"store", dest=u"plugins",
help=_(u"Modify plug-in configuration. "
u"Re-save unless 'temp' is in the module list. "
u"Enter 'show' to show current plug-in configuration. "
u"Commands show, and module urls are '|' separated: "
u"+url to add plug-in by its url or filename, ~name to reload a plug-in by its name, -name to remove a plug-in by its name, "
u"relative URLs are relative to installation plug-in directory, "
u" (e.g., '+http://arelle.org/files/hello_web.py', '+C:\Program Files\Arelle\examples\plugin\hello_dolly.py' to load, "
u"or +../examples/plugin/hello_dolly.py for relative use of examples directory, "
u"~Hello Dolly to reload, -Hello Dolly to remove). "
u"If + is omitted from .py file nothing is saved (same as temp). "
u"Packaged plug-in urls are their directory's url. " ))
parser.add_option(u"--packages", action=u"store", dest=u"packages",
help=_(u"Modify taxonomy packages configuration. "
u"Re-save unless 'temp' is in the module list. "
u"Enter 'show' to show current packages configuration. "
u"Commands show, and module urls are '|' separated: "
u"+url to add package by its url or filename, ~name to reload package by its name, -name to remove a package by its name, "
u"URLs are full absolute paths. "
u"If + is omitted from package file nothing is saved (same as temp). " ))
parser.add_option(u"--packageManifestName", action=u"store", dest=u"packageManifestName",
help=_(u"Provide non-standard archive manifest file name pattern (e.g., *taxonomyPackage.xml). "
u"Uses unix file name pattern matching. "
u"Multiple manifest files are supported in archive (such as oasis catalogs). "
u"(Replaces search for either .taxonomyPackage.xml or catalog.xml). " ))
parser.add_option(u"--abortOnMajorError", action=u"store_true", dest=u"abortOnMajorError", help=_(u"Abort process on major error, such as when load is unable to find an entry or discovered file."))
parser.add_option(u"--showEnvironment", action=u"store_true", dest=u"showEnvironment", help=_(u"Show Arelle's config and cache directory and host OS environment parameters."))
parser.add_option(u"--showenvironment", action=u"store_true", dest=u"showEnvironment", help=SUPPRESS_HELP)
parser.add_option(u"--collectProfileStats", action=u"store_true", dest=u"collectProfileStats", help=_(u"Collect profile statistics, such as timing of validation activities and formulae."))
if hasWebServer:
parser.add_option(u"--webserver", action=u"store", dest=u"webserver",
help=_(u"start web server on host:port[:server] for REST and web access, e.g., --webserver locahost:8080, "
u"or specify nondefault a server name, such as cherrypy, --webserver locahost:8080:cherrypy. "
u"(It is possible to specify options to be defaults for the web server, such as disclosureSystem and validations, but not including file names.) "))
pluginOptionsIndex = len(parser.option_list)
# install any dynamic plugins so their command line options can be parsed if present
for i, arg in enumerate(args):
if arg.startswith(u'--plugins'):
if len(arg) > 9 and arg[9] == u'=':
preloadPlugins = arg[10:]
elif i < len(args) - 1:
preloadPlugins = args[i+1]
else:
preloadPlugins = u""
for pluginCmd in preloadPlugins.split(u'|'):
cmd = pluginCmd.strip()
if cmd not in (u"show", u"temp") and len(cmd) > 0 and cmd[0] not in (u'-', u'~', u'+'):
moduleInfo = PluginManager.addPluginModule(cmd)
if moduleInfo:
cntlr.preloadedPlugins[cmd] = moduleInfo
PluginManager.reset()
break
# add plug-in options
for optionsExtender in pluginClassMethods(u"CntlrCmdLine.Options"):
optionsExtender(parser)
pluginLastOptionIndex = len(parser.option_list)
parser.add_option(u"-a", u"--about",
action=u"store_true", dest=u"about",
help=_(u"Show product version, copyright, and license."))
if not args and cntlr.isGAE:
args = [u"--webserver=::gae"]
elif cntlr.isCGI:
args = [u"--webserver=::cgi"]
elif cntlr.isMSW:
# if called from java on Windows any empty-string arguments are lost, see:
# http://bugs.sun.com/view_bug.do?bug_id=6518827
# insert needed arguments
sourceArgs = args
args = []
namedOptions = set()
optionsWithArg = set()
for option in parser.option_list:
names = unicode(option).split(u'/')
namedOptions.update(names)
if option.action == u"store":
optionsWithArg.update(names)
priorArg = None
for arg in sourceArgs:
if priorArg in optionsWithArg and arg in namedOptions:
# probable java/MSFT interface bug 6518827
args.append(u'') # add empty string argument
args.append(arg)
priorArg = arg
(options, leftoverArgs) = parser.parse_args(args)
if options.about:
print _(u"\narelle(r) {0}bit {1}\n\n"
u"An open source XBRL platform\n"
u"(c) 2010-2014 Mark V Systems Limited\n"
u"All rights reserved\nhttp://www.arelle.org\[email protected]\n\n"
u"Licensed under the Apache License, Version 2.0 (the \"License\"); "
u"you may not \nuse this file except in compliance with the License. "
u"You may obtain a copy \nof the License at "
u"'http://www.apache.org/licenses/LICENSE-2.0'\n\n"
u"Unless required by applicable law or agreed to in writing, software \n"
u"distributed under the License is distributed on an \"AS IS\" BASIS, \n"
u"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. \n"
u"See the License for the specific language governing permissions and \n"
u"limitations under the License."
u"\n\nIncludes:"
u"\n Python(r) {3[0]}.{3[1]}.{3[2]} (c) 2001-2013 Python Software Foundation"
u"\n PyParsing (c) 2003-2013 Paul T. McGuire"
u"\n lxml {4[0]}.{4[1]}.{4[2]} (c) 2004 Infrae, ElementTree (c) 1999-2004 by Fredrik Lundh"
u"\n xlrd (c) 2005-2013 Stephen J. Machin, Lingfo Pty Ltd, (c) 2001 D. Giffin, (c) 2000 A. Khan"
u"\n xlwt (c) 2007 Stephen J. Machin, Lingfo Pty Ltd, (c) 2005 R. V. Kiseliov"
u"{2}"
).format(cntlr.systemWordSize, Version.version,
_(u"\n Bottle (c) 2011-2013 Marcel Hellkamp") if hasWebServer else u"",
sys.version_info, etree.LXML_VERSION)
elif options.disclosureSystemName in (u"help", u"help-verbose"):
text = _(u"Disclosure system choices: \n{0}").format(u' \n'.join(cntlr.modelManager.disclosureSystem.dirlist(options.disclosureSystemName)))
try:
print text
except UnicodeEncodeError:
print text.encode(u"ascii", u"replace").decode(u"ascii")
elif len(leftoverArgs) != 0 and (not hasWebServer or options.webserver is None):
parser.error(_(u"unrecognized arguments: {}".format(u', '.join(leftoverArgs))))
elif (options.entrypointFile is None and
((not options.proxy) and (not options.plugins) and
(not any(pluginOption for pluginOption in parser.option_list[pluginOptionsIndex:pluginLastOptionIndex])) and
(not hasWebServer or options.webserver is None))):
parser.error(_(u"incorrect arguments, please try\n python CntlrCmdLine.py --help"))
elif hasWebServer and options.webserver:
# webserver incompatible with file operations
if any((options.entrypointFile, options.importFiles, options.diffFile, options.versReportFile,
options.factsFile, options.factListCols, options.factTableFile,
options.conceptsFile, options.preFile, options.calFile, options.dimFile, options.formulaeFile, options.viewArcrole, options.viewFile,
options.roleTypesFile, options.arcroleTypesFile
)):
parser.error(_(u"incorrect arguments with --webserver, please try\n python CntlrCmdLine.py --help"))
else:
cntlr.startLogging(logFileName=u'logToBuffer')
from arelle import CntlrWebMain
app = CntlrWebMain.startWebserver(cntlr, options)
if options.webserver == u'::wsgi':
return app
else:
# parse and run the FILENAME
cntlr.startLogging(logFileName=(options.logFile or u"logToPrint"),
logFormat=(options.logFormat or u"[%(messageCode)s] %(message)s - %(file)s"),
logLevel=(options.logLevel or u"DEBUG"))
success = cntlr.run(options)
return success
class CntlrCmdLine(Cntlr.Cntlr):
u"""
.. class:: CntlrCmdLin()
Initialization sets up for platform via Cntlr.Cntlr.
"""
def __init__(self, logFileName=None):
super(CntlrCmdLine, self).__init__(hasGui=False)
self.preloadedPlugins = {}
def run(self, options, sourceZipStream=None):
u"""Process command line arguments or web service request, such as to load and validate an XBRL document, or start web server.
When a web server has been requested, this method may be called multiple times, once for each web service (REST) request that requires processing.
Otherwise (when called for a command line request) this method is called only once for the command line arguments request.
:param options: OptionParser options from parse_args of main argv arguments (when called from command line) or corresponding arguments from web service (REST) request.
:type options: optparse.Values
"""
if options.statusPipe:
try:
global win32file
import win32file, pywintypes
self.statusPipe = win32file.CreateFile(u"\\\\.\\pipe\\{}".format(options.statusPipe),
win32file.GENERIC_READ | win32file.GENERIC_WRITE, 0, None, win32file.OPEN_EXISTING, win32file.FILE_FLAG_NO_BUFFERING, None)
self.showStatus = self.showStatusOnPipe
self.lastStatusTime = 0.0
except ImportError: # win32 not installed
self.addToLog(u"--statusPipe {} cannot be installed, packages for win32 missing".format(options.statusPipe))
except pywintypes.error: # named pipe doesn't exist
self.addToLog(u"--statusPipe {} has not been created by calling program".format(options.statusPipe))
if options.showOptions: # debug options
for optName, optValue in sorted(options.__dict__.items(), key=lambda optItem: optItem[0]):
self.addToLog(u"Option {0}={1}".format(optName, optValue), messageCode=u"info")
self.addToLog(u"sys.argv {0}".format(sys.argv), messageCode=u"info")
if options.uiLang: # set current UI Lang (but not config setting)
self.setUiLanguage(options.uiLang)
if options.proxy:
if options.proxy != u"show":
proxySettings = proxyTuple(options.proxy)
self.webCache.resetProxies(proxySettings)
self.config[u"proxySettings"] = proxySettings
self.saveConfig()
self.addToLog(_(u"Proxy configuration has been set."), messageCode=u"info")
useOsProxy, urlAddr, urlPort, user, password = self.config.get(u"proxySettings", proxyTuple(u"none"))
if useOsProxy:
self.addToLog(_(u"Proxy configured to use {0}.").format(
_(u'Microsoft Windows Internet Settings') if sys.platform.startswith(u"win")
else (_(u'Mac OS X System Configuration') if sys.platform in (u"darwin", u"macos")
else _(u'environment variables'))), messageCode=u"info")
elif urlAddr:
self.addToLog(_(u"Proxy setting: http://{0}{1}{2}{3}{4}").format(
user if user else u"",
u":****" if password else u"",
u"@" if (user or password) else u"",
urlAddr,
u":{0}".format(urlPort) if urlPort else u""), messageCode=u"info")
else:
self.addToLog(_(u"Proxy is disabled."), messageCode=u"info")
if options.plugins:
resetPlugins = False
savePluginChanges = True
showPluginModules = False
for pluginCmd in options.plugins.split(u'|'):
cmd = pluginCmd.strip()
if cmd == u"show":
showPluginModules = True
elif cmd == u"temp":
savePluginChanges = False
elif cmd.startswith(u"+"):
moduleInfo = PluginManager.addPluginModule(cmd[1:])
if moduleInfo:
self.addToLog(_(u"Addition of plug-in {0} successful.").format(moduleInfo.get(u"name")),
messageCode=u"info", file=moduleInfo.get(u"moduleURL"))
resetPlugins = True
if u"CntlrCmdLine.Options" in moduleInfo[u"classMethods"]:
addedPluginWithCntlrCmdLineOptions = True
else:
self.addToLog(_(u"Unable to load plug-in."), messageCode=u"info", file=cmd[1:])
elif cmd.startswith(u"~"):
if PluginManager.reloadPluginModule(cmd[1:]):
self.addToLog(_(u"Reload of plug-in successful."), messageCode=u"info", file=cmd[1:])
resetPlugins = True
else:
self.addToLog(_(u"Unable to reload plug-in."), messageCode=u"info", file=cmd[1:])
elif cmd.startswith(u"-"):
if PluginManager.removePluginModule(cmd[1:]):
self.addToLog(_(u"Deletion of plug-in successful."), messageCode=u"info", file=cmd[1:])
resetPlugins = True
else:
self.addToLog(_(u"Unable to delete plug-in."), messageCode=u"info", file=cmd[1:])
else: # assume it is a module or package (may also have been loaded before for option parsing)
savePluginChanges = False
if cmd in self.preloadedPlugins:
moduleInfo = self.preloadedPlugins[cmd] # already loaded, add activation message to log below
else:
moduleInfo = PluginManager.addPluginModule(cmd)
if moduleInfo:
resetPlugins = True
if moduleInfo:
self.addToLog(_(u"Activation of plug-in {0} successful.").format(moduleInfo.get(u"name")),
messageCode=u"info", file=moduleInfo.get(u"moduleURL"))
else:
self.addToLog(_(u"Unable to load {0} as a plug-in or {0} is not recognized as a command. ").format(cmd), messageCode=u"info", file=cmd)
if resetPlugins:
PluginManager.reset()
if savePluginChanges:
PluginManager.save(self)
if showPluginModules:
self.addToLog(_(u"Plug-in modules:"), messageCode=u"info")
for i, moduleItem in enumerate(sorted(PluginManager.pluginConfig.get(u"modules", {}).items())):
moduleInfo = moduleItem[1]
self.addToLog(_(u"Plug-in: {0}; author: {1}; version: {2}; status: {3}; date: {4}; description: {5}; license {6}.").format(
moduleItem[0], moduleInfo.get(u"author"), moduleInfo.get(u"version"), moduleInfo.get(u"status"),
moduleInfo.get(u"fileDate"), moduleInfo.get(u"description"), moduleInfo.get(u"license")),
messageCode=u"info", file=moduleInfo.get(u"moduleURL"))
if options.packages:
from arelle import PackageManager
savePackagesChanges = True
showPackages = False
for packageCmd in options.packages.split(u'|'):
cmd = packageCmd.strip()
if cmd == u"show":
showPackages = True
elif cmd == u"temp":
savePackagesChanges = False
elif cmd.startswith(u"+"):
packageInfo = PackageManager.addPackage(cmd[1:], options.packageManifestName)
if packageInfo:
self.addToLog(_(u"Addition of package {0} successful.").format(packageInfo.get(u"name")),
messageCode=u"info", file=packageInfo.get(u"URL"))
else:
self.addToLog(_(u"Unable to load plug-in."), messageCode=u"info", file=cmd[1:])
elif cmd.startswith(u"~"):
if PackageManager.reloadPackageModule(cmd[1:]):
self.addToLog(_(u"Reload of package successful."), messageCode=u"info", file=cmd[1:])
else:
self.addToLog(_(u"Unable to reload package."), messageCode=u"info", file=cmd[1:])
elif cmd.startswith(u"-"):
if PackageManager.removePackageModule(cmd[1:]):
self.addToLog(_(u"Deletion of package successful."), messageCode=u"info", file=cmd[1:])
else:
self.addToLog(_(u"Unable to delete package."), messageCode=u"info", file=cmd[1:])
else: # assume it is a module or package
savePackagesChanges = False
packageInfo = PackageManager.addPackage(cmd, options.packageManifestName)
if packageInfo:
self.addToLog(_(u"Activation of package {0} successful.").format(packageInfo.get(u"name")),
messageCode=u"info", file=packageInfo.get(u"URL"))
resetPlugins = True
else:
self.addToLog(_(u"Unable to load {0} as a package or {0} is not recognized as a command. ").format(cmd), messageCode=u"info", file=cmd)
if savePackagesChanges:
PackageManager.save(self)
if showPackages:
self.addToLog(_(u"Taxonomy packages:"), messageCode=u"info")
for packageInfo in PackageManager.orderedPackagesConfig()[u"packages"]:
self.addToLog(_(u"Package: {0}; version: {1}; status: {2}; date: {3}; description: {4}.").format(
packageInfo.get(u"name"), packageInfo.get(u"version"), packageInfo.get(u"status"),
packageInfo.get(u"fileDate"), packageInfo.get(u"description")),
messageCode=u"info", file=packageInfo.get(u"URL"))
if options.showEnvironment:
self.addToLog(_(u"Config directory: {0}").format(self.configDir))
self.addToLog(_(u"Cache directory: {0}").format(self.userAppDir))
for envVar in (u"XDG_CONFIG_HOME",):
if envVar in os.environ:
self.addToLog(_(u"XDG_CONFIG_HOME={0}").format(os.environ[envVar]))
return True
# run utility command line options that don't depend on entrypoint Files
hasUtilityPlugin = False
for pluginXbrlMethod in pluginClassMethods(u"CntlrCmdLine.Utility.Run"):
hasUtilityPlugin = True
try:
pluginXbrlMethod(self, options, sourceZipStream=sourceZipStream)
except SystemExit: # terminate operation, plug in has terminated all processing
return True # success
# if no entrypointFile is applicable, quit now
if options.proxy or options.plugins or hasUtilityPlugin:
if not options.entrypointFile:
return True # success
self.username = options.username
self.password = options.password
self.entrypointFile = options.entrypointFile
if self.entrypointFile:
filesource = FileSource.openFileSource(self.entrypointFile, self, sourceZipStream)
else:
filesource = None
if options.validateEFM:
if options.disclosureSystemName:
self.addToLog(_(u"both --efm and --disclosureSystem validation are requested, proceeding with --efm only"),
messageCode=u"info", file=self.entrypointFile)
self.modelManager.validateDisclosureSystem = True
self.modelManager.disclosureSystem.select(u"efm")
elif options.disclosureSystemName:
self.modelManager.validateDisclosureSystem = True
self.modelManager.disclosureSystem.select(options.disclosureSystemName)
elif options.validateHMRC:
self.modelManager.validateDisclosureSystem = True
self.modelManager.disclosureSystem.select(u"hmrc")
else:
self.modelManager.disclosureSystem.select(None) # just load ordinary mappings
self.modelManager.validateDisclosureSystem = False
if options.utrUrl: # override disclosureSystem utrUrl
self.modelManager.disclosureSystem.utrUrl = options.utrUrl
# can be set now because the utr is first loaded at validation time
if options.skipDTS: # skip DTS loading, discovery, etc
self.modelManager.skipDTS = True
if options.skipLoading: # skip loading matching files (list of unix patterns)
self.modelManager.skipLoading = re.compile(
u'|'.join(fnmatch.translate(f) for f in options.skipLoading.split(u'|')))
# disclosure system sets logging filters, override disclosure filters, if specified by command line
if options.logLevelFilter:
self.setLogLevelFilter(options.logLevelFilter)
if options.logCodeFilter:
self.setLogCodeFilter(options.logCodeFilter)
if options.calcDecimals:
if options.calcPrecision:
self.addToLog(_(u"both --calcDecimals and --calcPrecision validation are requested, proceeding with --calcDecimals only"),
messageCode=u"info", file=self.entrypointFile)
self.modelManager.validateInferDecimals = True
self.modelManager.validateCalcLB = True
elif options.calcPrecision:
self.modelManager.validateInferDecimals = False
self.modelManager.validateCalcLB = True
if options.utrValidate:
self.modelManager.validateUtr = True
if options.infosetValidate:
self.modelManager.validateInfoset = True
if options.abortOnMajorError:
self.modelManager.abortOnMajorError = True
if options.collectProfileStats:
self.modelManager.collectProfileStats = True
if options.outputAttribution:
self.modelManager.outputAttribution = options.outputAttribution
if options.internetConnectivity == u"offline":
self.webCache.workOffline = True
elif options.internetConnectivity == u"online":
self.webCache.workOffline = False
if options.internetTimeout is not None:
self.webCache.timeout = (options.internetTimeout or None) # use None if zero specified to disable timeout
if options.internetLogDownloads:
self.webCache.logDownloads = True
fo = FormulaOptions()
if options.parameters:
parameterSeparator = (options.parameterSeparator or u',')
fo.parameterValues = dict(((qname(key, noPrefixIsNoNamespace=True),(None,value))
for param in options.parameters.split(parameterSeparator)
for key,sep,value in (param.partition(u'='),) ) )
if options.formulaParamExprResult:
fo.traceParameterExpressionResult = True
if options.formulaParamInputValue:
fo.traceParameterInputValue = True
if options.formulaCallExprSource:
fo.traceCallExpressionSource = True
if options.formulaCallExprCode:
fo.traceCallExpressionCode = True
if options.formulaCallExprEval:
fo.traceCallExpressionEvaluation = True
if options.formulaCallExprResult:
fo.traceCallExpressionResult = True
if options.formulaVarSetExprEval:
fo.traceVariableSetExpressionEvaluation = True
if options.formulaVarSetExprResult:
fo.traceVariableSetExpressionResult = True
if options.formulaAsserResultCounts:
fo.traceAssertionResultCounts = True
if options.formulaSatisfiedAsser:
fo.traceSatisfiedAssertions = True
if options.formulaUnsatisfiedAsser:
fo.traceUnsatisfiedAssertions = True
if options.formulaUnsatisfiedAsserError:
fo.errorUnsatisfiedAssertions = True
if options.formulaFormulaRules:
fo.traceFormulaRules = True
if options.formulaVarsOrder:
fo.traceVariablesOrder = True
if options.formulaVarExpressionSource:
fo.traceVariableExpressionSource = True
if options.formulaVarExpressionCode:
fo.traceVariableExpressionCode = True
if options.formulaVarExpressionEvaluation:
fo.traceVariableExpressionEvaluation = True
if options.formulaVarExpressionResult:
fo.traceVariableExpressionResult = True
if options.timeVariableSetEvaluation:
fo.timeVariableSetEvaluation = True
if options.formulaVarFilterWinnowing:
fo.traceVariableFilterWinnowing = True
if options.formulaVarFiltersResult:
fo.traceVariableFiltersResult = True
if options.formulaVarFiltersResult:
fo.traceVariableFiltersResult = True
if options.formulaRunIDs:
fo.runIDs = options.formulaRunIDs
self.modelManager.formulaOptions = fo
timeNow = XmlUtil.dateunionValue(datetime.datetime.now())
firstStartedAt = startedAt = time.time()
modelDiffReport = None
success = True
modelXbrl = None
try:
if filesource:
modelXbrl = self.modelManager.load(filesource, _(u"views loading"))
except ModelDocument.LoadingException:
pass
except Exception, err:
self.addToLog(_(u"[Exception] Failed to complete request: \n{0} \n{1}").format(
err,
traceback.format_tb(sys.exc_info()[2])))
success = False # loading errors, don't attempt to utilize loaded DTS
if modelXbrl and modelXbrl.modelDocument:
loadTime = time.time() - startedAt
modelXbrl.profileStat(_(u"load"), loadTime)
self.addToLog(format_string(self.modelManager.locale,
_(u"loaded in %.2f secs at %s"),
(loadTime, timeNow)),
messageCode=u"info", file=self.entrypointFile)
if options.importFiles:
for importFile in options.importFiles.split(u"|"):
fileName = importFile.strip()
if sourceZipStream is not None and not (fileName.startswith(u'http://') or os.path.isabs(fileName)):
fileName = os.path.dirname(modelXbrl.uri) + os.sep + fileName # make relative to sourceZipStream
ModelDocument.load(modelXbrl, fileName)
loadTime = time.time() - startedAt
self.addToLog(format_string(self.modelManager.locale,
_(u"import in %.2f secs at %s"),
(loadTime, timeNow)),
messageCode=u"info", file=importFile)
modelXbrl.profileStat(_(u"import"), loadTime)
if modelXbrl.errors:
success = False # loading errors, don't attempt to utilize loaded DTS
if modelXbrl.modelDocument.type in ModelDocument.Type.TESTCASETYPES:
for pluginXbrlMethod in pluginClassMethods(u"Testcases.Start"):
pluginXbrlMethod(self, options, modelXbrl)
else: # not a test case, probably instance or DTS
for pluginXbrlMethod in pluginClassMethods(u"CntlrCmdLine.Xbrl.Loaded"):
pluginXbrlMethod(self, options, modelXbrl)
else:
success = False
if success and options.diffFile and options.versReportFile:
try:
diffFilesource = FileSource.FileSource(options.diffFile,self)
startedAt = time.time()
modelXbrl2 = self.modelManager.load(diffFilesource, _(u"views loading"))
if modelXbrl2.errors:
if not options.keepOpen:
modelXbrl2.close()
success = False
else:
loadTime = time.time() - startedAt
modelXbrl.profileStat(_(u"load"), loadTime)
self.addToLog(format_string(self.modelManager.locale,
_(u"diff comparison DTS loaded in %.2f secs"),
loadTime),
messageCode=u"info", file=self.entrypointFile)
startedAt = time.time()
modelDiffReport = self.modelManager.compareDTSes(options.versReportFile)
diffTime = time.time() - startedAt
modelXbrl.profileStat(_(u"diff"), diffTime)
self.addToLog(format_string(self.modelManager.locale,
_(u"compared in %.2f secs"),
diffTime),
messageCode=u"info", file=self.entrypointFile)
except ModelDocument.LoadingException:
success = False
except Exception, err:
success = False
self.addToLog(_(u"[Exception] Failed to doad diff file: \n{0} \n{1}").format(
err,
traceback.format_tb(sys.exc_info()[2])))
if success:
try:
modelXbrl = self.modelManager.modelXbrl
hasFormulae = modelXbrl.hasFormulae
isAlreadyValidated = False
for pluginXbrlMethod in pluginClassMethods(u"ModelDocument.IsValidated"):
if pluginXbrlMethod(modelXbrl): # e.g., streaming extensions already has validated
isAlreadyValidated = True
if options.validate and not isAlreadyValidated:
startedAt = time.time()
if options.formulaAction: # don't automatically run formulas
modelXbrl.hasFormulae = False
self.modelManager.validate()
if options.formulaAction: # restore setting
modelXbrl.hasFormulae = hasFormulae
self.addToLog(format_string(self.modelManager.locale,
_(u"validated in %.2f secs"),
time.time() - startedAt),
messageCode=u"info", file=self.entrypointFile)
if (options.formulaAction in (u"validate", u"run") and # do nothing here if "none"
not isAlreadyValidated): # formulas can't run if streaming has validated the instance
from arelle import ValidateXbrlDimensions, ValidateFormula
startedAt = time.time()
if not options.validate:
ValidateXbrlDimensions.loadDimensionDefaults(modelXbrl)
# setup fresh parameters from formula optoins
modelXbrl.parameters = fo.typedParameters()
ValidateFormula.validate(modelXbrl, compileOnly=(options.formulaAction != u"run"))
self.addToLog(format_string(self.modelManager.locale,
_(u"formula validation and execution in %.2f secs")
if options.formulaAction == u"run"
else _(u"formula validation only in %.2f secs"),
time.time() - startedAt),
messageCode=u"info", file=self.entrypointFile)
if options.testReport:
ViewFileTests.viewTests(self.modelManager.modelXbrl, options.testReport, options.testReportCols)
if options.rssReport:
ViewFileRssFeed.viewRssFeed(self.modelManager.modelXbrl, options.rssReport, options.rssReportCols)
if options.DTSFile:
ViewFileDTS.viewDTS(modelXbrl, options.DTSFile)
if options.factsFile:
ViewFileFactList.viewFacts(modelXbrl, options.factsFile, labelrole=options.labelRole, lang=options.labelLang, cols=options.factListCols)
if options.factTableFile:
ViewFileFactTable.viewFacts(modelXbrl, options.factTableFile, labelrole=options.labelRole, lang=options.labelLang)
if options.conceptsFile:
ViewFileConcepts.viewConcepts(modelXbrl, options.conceptsFile, labelrole=options.labelRole, lang=options.labelLang)
if options.preFile:
ViewFileRelationshipSet.viewRelationshipSet(modelXbrl, options.preFile, u"Presentation Linkbase", u"http://www.xbrl.org/2003/arcrole/parent-child", labelrole=options.labelRole, lang=options.labelLang)
if options.calFile:
ViewFileRelationshipSet.viewRelationshipSet(modelXbrl, options.calFile, u"Calculation Linkbase", u"http://www.xbrl.org/2003/arcrole/summation-item", labelrole=options.labelRole, lang=options.labelLang)
if options.dimFile:
ViewFileRelationshipSet.viewRelationshipSet(modelXbrl, options.dimFile, u"Dimensions", u"XBRL-dimensions", labelrole=options.labelRole, lang=options.labelLang)
if options.formulaeFile:
ViewFileFormulae.viewFormulae(modelXbrl, options.formulaeFile, u"Formulae", lang=options.labelLang)
if options.viewArcrole and options.viewFile:
ViewFileRelationshipSet.viewRelationshipSet(modelXbrl, options.viewFile, os.path.basename(options.viewArcrole), options.viewArcrole, labelrole=options.labelRole, lang=options.labelLang)
if options.roleTypesFile:
ViewFileRoleTypes.viewRoleTypes(modelXbrl, options.roleTypesFile, u"Role Types", isArcrole=False, lang=options.labelLang)
if options.arcroleTypesFile:
ViewFileRoleTypes.viewRoleTypes(modelXbrl, options.arcroleTypesFile, u"Arcrole Types", isArcrole=True, lang=options.labelLang)
for pluginXbrlMethod in pluginClassMethods(u"CntlrCmdLine.Xbrl.Run"):
g = pluginXbrlMethod(self, options, modelXbrl)
except (IOError, EnvironmentError), err:
self.addToLog(_(u"[IOError] Failed to save output:\n {0}").format(err),
messageCode=u"IOError",
file=options.entrypointFile,
level=logging.CRITICAL)
success = False
except Exception, err:
self.addToLog(_(u"[Exception] Failed to complete request: \n{0} \n{1}").format(
err,
traceback.format_tb(sys.exc_info()[2])),
messageCode=err.__class__.__name__,
file=options.entrypointFile,
level=logging.CRITICAL)
success = False
if modelXbrl:
modelXbrl.profileStat(_(u"total"), time.time() - firstStartedAt)
if options.collectProfileStats and modelXbrl:
modelXbrl.logProfileStats()
if not options.keepOpen:
if modelDiffReport:
self.modelManager.close(modelDiffReport)
elif modelXbrl:
self.modelManager.close(modelXbrl)
self.username = self.password = None #dereference password
if options.statusPipe and getattr(self, u"statusPipe", None) is not None:
win32file.WriteFile(self.statusPipe, " ") # clear status
win32file.FlushFileBuffers(self.statusPipe)
win32file.SetFilePointer(self.statusPipe, 0, win32file.FILE_BEGIN) # hangs on close without this
win32file.CloseHandle(self.statusPipe)
self.statusPipe = None # dereference
return (success, modelXbrl, g)
# default web authentication password
def internet_user_password(self, host, realm):
return (self.username, self.password)
# special show status for named pipes
def showStatusOnPipe(self, message, clearAfter=None):
# now = time.time() # seems ok without time-limiting writes to the pipe
if self.statusPipe is not None: # max status updates 3 per second now - 0.3 > self.lastStatusTime and
# self.lastStatusTime = now
win32file.WriteFile(self.statusPipe, (message or u"").encode(u"utf8"))
win32file.FlushFileBuffers(self.statusPipe)
win32file.SetFilePointer(self.statusPipe, 0, win32file.FILE_BEGIN) # hangs on close without this
if __name__ == u"__main__":
u'''
if '--COMserver' in sys.argv:
from arelle import CntlrComServer
CntlrComServer.main()
else:
main()
'''
main()
| apache-2.0 | 8,177,756,817,281,012,000 | 67.052802 | 221 | 0.603814 | false |
GhostshipSoftware/avaloria | game/gamesrc/chargen.py | 1 | 12872 | """
Contribution - Griatch 2011
[Note - with the advent of MULTISESSION_MODE=2, this is not really
as necessary anymore - the ooclook and @charcreate commands in that
mode replaces this module with better functionality.]
This is a simple character creation commandset. A suggestion is to
test this together with menu_login, which doesn't create a Character
on its own. This shows some more info and gives the Player the option
to create a character without any more customizations than their name
(further options are unique for each game anyway).
Since this extends the OOC cmdset, logging in from the menu will
automatically drop the Player into this cmdset unless they logged off
while puppeting a Character already before.
Installation:
Read the instructions in game/gamesrc/commands/examples/cmdset.py in
order to create a new default cmdset module for Evennia to use (copy
the template up one level, and change the settings file's relevant
variables to point to the cmdsets inside). If you already have such
a module you should of course use that.
Next import this module in your custom cmdset module and add the
following line to the end of OOCCmdSet's at_cmdset_creation():
self.add(chargen.OOCCmdSetCharGen)
"""
from django.conf import settings
from ev import Command, create_object, utils, CmdSet
from ev import default_cmds, managers
from game.gamesrc.menu_login import *
from game.gamesrc.objects import copyreader
CHARACTER_TYPECLASS = settings.BASE_CHARACTER_TYPECLASS
class CmdOOCLook(default_cmds.CmdLook):
"""
ooc look
Usage:
look
look <character>
This is an OOC version of the look command. Since a Player doesn't
have an in-game existence, there is no concept of location or
"self".
If any characters are available for you to control, you may look
at them with this command.
"""
key = "look"
aliases = ["l", "ls"]
locks = "cmd:all()"
help_cateogory = "General"
def func(self):
"""
Implements the ooc look command
We use an attribute _character_dbrefs on the player in order
to figure out which characters are "theirs". A drawback of this
is that only the CmdCharacterCreate command adds this attribute,
and thus e.g. player #1 will not be listed (although it will work).
Existence in this list does not depend on puppeting rights though,
that is checked by the @ic command directly.
"""
# making sure caller is really a player
self.character = None
if utils.inherits_from(self.caller, "src.objects.objects.Object"):
# An object of some type is calling. Convert to player.
#print self.caller, self.caller.__class__
self.character = self.caller
if hasattr(self.caller, "player"):
self.caller = self.caller.player
if not self.character:
# ooc mode, we are players
avail_chars = self.caller.db._character_dbrefs
if self.args:
# Maybe the caller wants to look at a character
if not avail_chars:
self.caller.msg("You have no characters to look at. Why not create one?")
return
objs = managers.objects.get_objs_with_key_and_typeclass(self.args.strip(), CHARACTER_TYPECLASS)
objs = [obj for obj in objs if obj.id in avail_chars]
if not objs:
self.caller.msg("You cannot see this Character.")
return
self.caller.msg(objs[0].return_appearance(self.caller))
return
# not inspecting a character. Show the OOC info.
charobjs = []
charnames = []
if self.caller.db._character_dbrefs:
dbrefs = self.caller.db._character_dbrefs
charobjs = [managers.objects.get_id(dbref) for dbref in dbrefs]
charnames = [charobj.key for charobj in charobjs if charobj]
if charnames:
charlist = "The following Character(s) are available:\n\n"
charlist += "\n\r".join(["{w %s{n" % charname for charname in charnames])
charlist += "\n\n Use {w@ic <character name>{n to switch to that Character."
else:
charlist = "You have no Characters."
string = \
""" You, %s, are an {wOOC ghost{n without form. The world is hidden
from you and besides chatting on channels your options are limited.
You need to have a Character in order to interact with the world.
%s
Use {wcreate <name>{n to create a new character and {whelp{n for a
list of available commands.""" % (self.caller.key, charlist)
self.caller.msg(string)
else:
# not ooc mode - leave back to normal look
# we have to put this back for normal look to work.
self.caller = self.character
super(CmdOOCLook, self).func()
class CmdOOCCharacterCreate(Command):
"""
creates a character
Usage:
create <character name>
This will create a new character, assuming
the given character name does not already exist.
"""
key = "create"
locks = "cmd:all()"
def func(self):
"""
Tries to create the Character object. We also put an
attribute on ourselves to remember it.
"""
# making sure caller is really a player
self.character = None
if utils.inherits_from(self.caller, "src.objects.objects.Object"):
# An object of some type is calling. Convert to player.
#print self.caller, self.caller.__class__
self.character = self.caller
if hasattr(self.caller, "player"):
self.caller = self.caller.player
if not self.args:
self.caller.msg("Usage: create <character name>")
return
charname = self.args.strip()
old_char = managers.objects.get_objs_with_key_and_typeclass(charname, CHARACTER_TYPECLASS)
if old_char:
self.caller.msg("Character {c%s{n already exists." % charname)
return
# create the character
new_character = create_object(CHARACTER_TYPECLASS, key=charname)
if not new_character:
self.caller.msg("{rThe Character couldn't be created. This is a bug. Please contact an admin.")
return
# make sure to lock the character to only be puppeted by this player
new_character.locks.add("puppet:id(%i) or pid(%i) or perm(Immortals) or pperm(Immortals)" %
(new_character.id, self.caller.id))
# save dbref
avail_chars = self.caller.db._character_dbrefs
if avail_chars:
avail_chars.append(new_character.id)
else:
avail_chars = [new_character.id]
self.caller.db._character_dbrefs = avail_chars
self.caller.msg("{gThe Character {c%s{g was successfully created!" % charname)
self.caller.obj = new_character
attributes = new_character.db.attributes
nodes = []
copy_dir = '/Users/geoffrey/gitrepos/avaloria/game/gamesrc/copy/'
for option in ['race', 'deity', 'alignment', 'gender']:
if 'race' in option:
for race in ['bardok', 'erelania', 'the unknowns', 'earthen', 'gerdling']:
confirm_node = MenuNode("confirm-%s" % race, links=['deity'], linktexts=['Choose your deity.'], code="self.caller.obj.set_race('%s')" % race)
nodes.append(confirm_node)
if 'bardok' in race:
text = copyreader.read_file("%s/races/bardok_desc.txt" % copy_dir)
race_node = MenuNode("%s" % race, text=text, links=['confirm-bardok', 'race'], linktexts=['Confirm Race Selection', 'Back to Races'])
elif 'erelania' in race:
text = copyreader.read_file("%s/races/erelania_desc.txt" % copy_dir)
race_node = MenuNode("%s" % race, text=text, links=['confirm-erelania', 'race'], linktexts=['Confirm Race Selection', 'Back to Races'])
elif 'gerdling' in race:
text = copyreader.read_file("%s/races/gerdling_desc.txt" % copy_dir)
race_node = MenuNode("%s" % race, text=text, links=['confirm-gerdling', 'race'], linktexts=['Confirm Race Selection', 'Back to Races'])
elif 'earthen' in race:
text = copyreader.read_file("%s/races/earthen_desc.txt" % copy_dir)
race_node = MenuNode("%s" % race, text=text, links=['confirm-earthen', 'race'], linktexts=['Confirm Race Selection', 'Back to Races'])
nodes.append(race_node)
text = copyreader.read_file("%s/races/races_desc.txt" % copy_dir)
root_race_node = MenuNode("%s" % option, text=text, links=['bardok', 'erelania', 'gerdling', 'earthen'], linktexts=['The Bardok', 'The Erelania', 'The Gerdling', 'The Earthen'])
nodes.append(root_race_node)
elif 'deity' in option:
deities = ['ankarith', 'slyth', 'green warden', 'kaylynne']
for deity in deities:
confirm_node = MenuNode('confirm-%s' % deity, links=['gender'], linktexts=['Choose your gender.'], code="self.caller.obj.set_deity('%s')" % deity)
nodes.append(confirm_node)
if 'karith' in deity:
text = copyreader.read_file("%s/deities/ankarith_desc.txt" % copy_dir)
deity_node = MenuNode("%s" % deity, text=text, links=['confirm-ankarith', 'deity'], linktexts=['Confirm Deity Selection', 'Back to Deities'])
#self.obj.msg("links: %s, linktexts: %s" % (deity_node.links, deity_node.linktexts))
elif 'slyth' in deity:
text = copyreader.read_file("%s/deities/slyth_desc.txt" % copy_dir)
deity_node = MenuNode("%s" % deity, text=text, links=['confirm-slyth', 'deity'], linktexts=['Confirm Deity Selection', 'Back to Deities'])
elif 'green warden' in deity:
text = copyreader.read_file("%s/deities/greenwarden_desc.txt" % copy_dir)
deity_node = MenuNode("%s" % deity, text=text, links=['confirm-green warden', 'deity'], linktexts=['Confirm Deity Selection', 'Back to Deities'])
elif 'kaylynne' in deity:
text = copyreader.read_file("%s/deities/kaylynne_desc.txt" % copy_dir)
deity_node = MenuNode("%s" % deity, text=text, links=['confirm-kaylynne', 'deity'], linktexts=['Confirm Deity Selection', 'Back to Deities'])
nodes.append(deity_node)
deity_node_text = copyreader.read_file("%s/deities/deities_desc.txt" % copy_dir)
root_deity_node = MenuNode("deity", text=deity_node_text, links=['ankarith', 'slyth', 'green warden', 'kaylynne'],
linktexts=['An\'Karith', 'Slyth of the Glade', 'The Green Warden', 'Kaylynne'])
nodes.append(root_deity_node)
elif 'gender' in option:
confirm_male = MenuNode("confirm-gender-male", links=['END'], linktexts=["Go forth"], code="self.caller.obj.set_gender('male')")
confirm_female = MenuNode("confirm-gender-female", links=['END'], linktexts=["Go forth"], code="self.caller.obj.set_gender('female')")
nodes.append(confirm_male)
nodes.append(confirm_female)
text = """
--{rGender Selection{n--
Please select which gender you would like to be:
"""
gender_node = MenuNode("gender", text=text, links=['confirm-gender-male', 'confirm-gender-female'],
linktexts=['Male', 'Female'])
nodes.append(gender_node)
start_node = MenuNode("START", text="{bWelcome to Avaloria. Please proceed through the menu to customize your character.{n",
links=['race' ], linktexts=['Choose your race.'])
nodes.append(start_node)
node_string = ' '.join([node.key for node in nodes])
self.caller.msg("{mDEBUG: nodes: %s{n" % node_string)
menutree = MenuTree(caller=self.caller, nodes=nodes)
menutree.start()
class OOCCmdSetCharGen(CmdSet):
"""
Extends the default OOC cmdset.
"""
def at_cmdset_creation(self):
"Install everything from the default set, then overload"
#super(OOCCmdSetCharGen, self).at_cmdset_creation()
self.add(CmdOOCLook())
self.add(CmdOOCCharacterCreate()) | bsd-3-clause | 8,105,016,762,496,736,000 | 47.213483 | 194 | 0.601849 | false |
lrt512/emol | emol/emol/models/privacy_acceptance.py | 1 | 4199 | # -*- coding: utf-8 -*-
"""Model to record combatants' acceptance of the privacy policy."""
# standard library imports
# pylint complains about the uuid import but it is used for Required(uuid.UUID)
# pylint: disable=unused-import
import uuid
from datetime import datetime
# third-party imports
from flask import url_for, current_app as app
# application imports
from emol.mail import Emailer
from emol.utility.database import default_uuid
__all__ = ['PrivacyAcceptance']
class PrivacyAcceptance(app.db.Model):
"""Record indicating acceptance of the privacy policy.
When a Combatant record is inserted into the database, the listener
event creates a matching PrivacyAccepted record. Any combatant who has
a PrivacyAccepted record that is not resolved cannot use the system
until they accept the privacy policy
When the combatant accepts the privacy policy, the PrivacyAccepted record
is resolved by noting the datetime that the privacy policy was accepted
If the combatant declines the privacy policy, the Combatant record and the
related PrivacyAcceptance is deleted from the database and the MoL is
informed
Attributes:
id: Identity PK for the table
uuid: A reference to the record with no intrinsic meaning
accepted: Date the combatant accepted the privacy policy
combatant_id: ID of the related combatant
combatant: ORM relationship to the Combatant identified by combatant_id
"""
id = app.db.Column(app.db.Integer, primary_key=True)
combatant_id = app.db.Column(app.db.Integer, app.db.ForeignKey('combatant.id'))
combatant = app.db.relationship(
'Combatant',
backref=app.db.backref('privacy_acceptance', uselist=False, cascade="all, delete-orphan")
)
uuid = app.db.Column(app.db.String(36), default=default_uuid)
accepted = app.db.Column(app.db.DateTime)
@classmethod
def create(cls, combatant, no_email=False):
"""Generate a PrivacyAccepted record for a combatant.
Generates and saves the PrivacyAccepted record, then sends out the
email to prompt the combatant to visit eMoL and read (heh) and accept
the privacy policy
Attributes:
combatant: A combatant
no_email: Should be used for unit testing only
"""
privacy_acceptance = cls(combatant=combatant)
app.db.session.add(privacy_acceptance)
app.db.session.commit()
emailer = Emailer()
emailer.send_privacy_policy_acceptance(privacy_acceptance)
@property
def privacy_policy_url(self):
"""Generate the URL for a user to visit to accept the privacy policy.
Uses the uuid member to uniquely identify this privacy accepted record,
and through it the combatant.
Returns:
String containing the URL
"""
return url_for('privacy_policy.index', uuid=self.uuid, _external=True)
def resolve(self, accepted):
if accepted is True:
# Combatant accepted the privacy policy. Note the time of
# acceptance, generate their card_id and email them the
# link to their card
self.accepted = datetime.utcnow()
self.combatant.generate_card_id()
emailer = Emailer()
emailer.send_card_request(self.combatant)
app.logger.debug('Sent card request email to {0}'.format(
self.combatant.email
))
has_sca_name = self.combatant.sca_name is not None
return {
'accepted': True,
'card_url': self.combatant.card_url,
'has_sca_name': has_sca_name
}
else:
# Combatant declined the privacy policy, delete the Combatant
# record for them and notify the MoL
combatant = self.combatant
app.db.session.delete(self)
app.db.session.delete(combatant)
app.logger.info('Deleted combatant {0}'.format(
self.combatant.email))
# TODO: Notify the MoL
app.db.session.commit()
return {'accepted': False}
| mit | 8,481,351,691,787,836,000 | 34.584746 | 97 | 0.656109 | false |
openpaul/DNApy | output.py | 1 | 5753 | #!/usr/bin/python
#This file is part of DNApy. DNApy is a DNA editor written purely in python.
#The program is intended to be an intuitive, fully featured,
#extendable, editor for molecular and synthetic biology.
#Enjoy!
#
#Copyright (C) 2014 Martin Engqvist |
#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#LICENSE:
#This file is part of DNApy.
#
#DNApy is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 3 of the License, or
#(at your option) any later version.
#
#DNApy is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Library General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software Foundation,
#Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
#Get source code at: https://github.com/0b0bby0/DNApy
#
import wx.richtext as rt
import wx
from base_class import DNApyBaseClass
#from wx.lib.pubsub import pub
class create(DNApyBaseClass):
'''A class to print colored output to a rich textctrl'''
def __init__(self, parent, style):
super(create, self).__init__(parent, style)
self.rtc = rt.RichTextCtrl(self)
self.rtc.SetEditable(False) #make it not editable
font = wx.Font(10, wx.MODERN, wx.NORMAL, wx.NORMAL, False, u'Consolas')
self.rtc.SetFont(font)
# self.rtc.Bind(wx.EVT_KEY_DOWN, self.OnKeyPress)
#determing which listening group from which to recieve messages about UI updates
# self.listening_group = 'placeholder'
# pub.Publisher.subscribe(self.listen_to_updateUI, self.listening_group)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(item=self.rtc, proportion=-1, flag=wx.EXPAND)
self.SetSizer(sizer)
####### Modify methods from base class to fit current needs #########
def update_globalUI(self):
'''Method should be modified as to update other panels in response to changes in own panel.
Preferred use is through sending a message using the pub module.
Example use is: pub.Publisher.sendMessage('feature_list_updateUI', '').
The first string is the "listening group" and deterimines which listeners get the message.
The second string is the message and is unimportant for this implementation.
The listening group assigned here (to identify recipients) must be different from the listening group assigned in __init__ (to subscribe to messages).'''
pass
def update_ownUI(self):
'''Updates all fields depending on which feature is chosen'''
pass
#####################################################################
def OnKeyPress(self, evt):
print('keypress')
def clear(self):
'''Remove any text already in the Output Panel'''
self.rtc.Clear()
def write(self, string, stringtype):
'''General method for printing to the Output Panel'''
if stringtype == 'DNA':
self.rtc.BeginTextColour('#009999')
elif stringtype == 'DNAcolor':
#this is too slow!
self.rtc.WriteText(string)
color = '#333333'
self.rtc.attr.SetTextColour(color)
self.rtc.SetStyleEx(rt.RichTextRange(insertionpoint, insertionpoint+30), self.attr)
previousbase = ''
string=string.upper()
i = 0
[(self.rtc.attr.SetTextColour('#33CC00'), self.rtc.SetStyleEx(rt.RichTextRange(insertionpoint + i, insertionpoint + i+1), self.rtc.attr)) for base in string for i in xrange(len(string)) if base =='A']
# for base in string:
# start = insertionpoint + i
# end = start + 1
# if base == '-': color = '#000000'
# elif base == 'A': color = '#33CC00'
# elif base == 'T': color = '#CC0000'
# elif base == 'C': color = '#0066CC'
# elif base == 'G': color = '#000000'
# elif base == 'N': color = '#FF00CC'
# else: color = '#FF6600'
#
#
# self.attr.SetTextColour(color)
# self.SetStyleEx(rt.RichTextRange(start, end), self.attr)
# i += 1
elif stringtype == 'Protein':
self.rtc.BeginTextColour('#CC6600')
elif stringtype == 'Text':
self.rtc.BeginTextColour('#333333')
elif stringtype == 'File':
self.rtc.BeginTextColour('#330099')
elif stringtype == 'Barcode':
self.rtc.BeginTextColour('#FF00FF')
self.rtc.WriteText(string)
self.rtc.EndTextColour()
if stringtype == 'Replace':
self.rtc.BeginTextColour('#333333')
self.rtc.SetValue(string)
self.rtc.EndTextColour()
def write_image(self, image):
'''General method for printing images to the Output Panel'''
pass
# self.WriteImage(images._rt_smiley.GetImage())
#
# bool WriteBitmap(self, bitmap, bitmapType)
# Write a bitmap at the current insertion point.
# bool WriteImage(self, image, bitmapType)
# Write an image at the current insertion point.
# bool WriteImageBlock(self, imageBlock)
# Write an image block at the current insertion point.
# bool WriteImageFile(self, filename, bitmapType)
# Load an image from file and write at the current insertion point.
if __name__ == '__main__': #if script is run by itself and not loaded
app = wx.App() # creation of the wx.App object (initialisation of the wxpython toolkit)
frame = wx.Frame(None, title="Output Panel") # creation of a Frame with a title
frame.output = create(frame, style=wx.VSCROLL|wx.HSCROLL) # creation of a richtextctrl in the frame
frame.output.write('CACC', 'DNA') #testing..
frame.Show() # frames are invisible by default so we use Show() to make them visible
app.MainLoop() # here the app enters a loop waiting for user input
| gpl-3.0 | 1,293,076,373,429,063,700 | 34.732919 | 204 | 0.682079 | false |
aino/django-aislug | tests/aislug_tests/models.py | 1 | 1161 | from aislug import AISlugField
from django.db import models
class Item(models.Model):
title = models.CharField(max_length=100)
slug = AISlugField()
class ItemUpdateFalse(models.Model):
title = models.CharField(max_length=100)
slug = AISlugField(update=False)
class ItemSlugify(models.Model):
title = models.CharField(max_length=100)
slug = AISlugField(slugify=lambda x: x)
class ItemInvalidList(models.Model):
title = models.CharField(max_length=100)
slug = AISlugField(invalid=['invalid'])
class ItemInvalidCallback(models.Model):
title = models.CharField(max_length=100)
slug = AISlugField(invalid=lambda: ['invalid'])
class ItemPopulateFromProperty(models.Model):
name = models.CharField(max_length=100)
slug = AISlugField(populate_from='name')
class ItemPopulateFromMethod(models.Model):
name = models.CharField(max_length=100)
slug = AISlugField(populate_from='meth')
def meth(self):
return self.name
class ItemUniqueFor(models.Model):
title = models.CharField(max_length=100)
category = models.CharField(max_length=100)
slug = AISlugField(unique_for=['category'])
| bsd-3-clause | 2,764,682,284,138,301,000 | 28.025 | 51 | 0.727821 | false |
Eric89GXL/vispy | examples/demo/gloo/grayscott.py | 2 | 7375 | # -*- coding: utf-8 -*-
# vispy: gallery 2000
# -----------------------------------------------------------------------------
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
# Author: Nicolas P .Rougier
# Date: 06/03/2014
# Abstract: GPU computing using the framebuffer
# Keywords: framebuffer, GPU computing, reaction-diffusion
# -----------------------------------------------------------------------------
from __future__ import division
import numpy as np
from vispy.gloo import (Program, FrameBuffer, RenderBuffer, set_viewport,
clear, set_state)
from vispy import app
render_vertex = """
attribute vec2 position;
attribute vec2 texcoord;
varying vec2 v_texcoord;
void main()
{
gl_Position = vec4(position, 0.0, 1.0);
v_texcoord = texcoord;
}
"""
render_fragment = """
uniform int pingpong;
uniform sampler2D texture;
varying vec2 v_texcoord;
void main()
{
float v;
if( pingpong == 0 )
v = texture2D(texture, v_texcoord).r;
else
v = texture2D(texture, v_texcoord).b;
gl_FragColor = vec4(1.0-v, 1.0-v, 1.0-v, 1.0);
}
"""
compute_vertex = """
attribute vec2 position;
attribute vec2 texcoord;
varying vec2 v_texcoord;
void main()
{
gl_Position = vec4(position, 0.0, 1.0);
v_texcoord = texcoord;
}
"""
compute_fragment = """
uniform int pingpong;
uniform sampler2D texture; // U,V:= r,g, other channels ignored
uniform sampler2D params; // rU,rV,f,k := r,g,b,a
uniform float dx; // horizontal distance between texels
uniform float dy; // vertical distance between texels
uniform float dd; // unit of distance
uniform float dt; // unit of time
varying vec2 v_texcoord;
void main(void)
{
float center = -(4.0+4.0/sqrt(2.0)); // -1 * other weights
float diag = 1.0/sqrt(2.0); // weight for diagonals
vec2 p = v_texcoord; // center coordinates
vec2 c,l;
if( pingpong == 0 ) {
c = texture2D(texture, p).rg; // central value
// Compute Laplacian
l = ( texture2D(texture, p + vec2(-dx,-dy)).rg
+ texture2D(texture, p + vec2( dx,-dy)).rg
+ texture2D(texture, p + vec2(-dx, dy)).rg
+ texture2D(texture, p + vec2( dx, dy)).rg) * diag
+ texture2D(texture, p + vec2(-dx, 0.0)).rg
+ texture2D(texture, p + vec2( dx, 0.0)).rg
+ texture2D(texture, p + vec2(0.0,-dy)).rg
+ texture2D(texture, p + vec2(0.0, dy)).rg
+ c * center;
} else {
c = texture2D(texture, p).ba; // central value
// Compute Laplacian
l = ( texture2D(texture, p + vec2(-dx,-dy)).ba
+ texture2D(texture, p + vec2( dx,-dy)).ba
+ texture2D(texture, p + vec2(-dx, dy)).ba
+ texture2D(texture, p + vec2( dx, dy)).ba) * diag
+ texture2D(texture, p + vec2(-dx, 0.0)).ba
+ texture2D(texture, p + vec2( dx, 0.0)).ba
+ texture2D(texture, p + vec2(0.0,-dy)).ba
+ texture2D(texture, p + vec2(0.0, dy)).ba
+ c * center;
}
float u = c.r; // compute some temporary
float v = c.g; // values which might save
float lu = l.r; // a few GPU cycles
float lv = l.g;
float uvv = u * v * v;
vec4 q = texture2D(params, p).rgba;
float ru = q.r; // rate of diffusion of U
float rv = q.g; // rate of diffusion of V
float f = q.b; // some coupling parameter
float k = q.a; // another coupling parameter
float du = ru * lu / dd - uvv + f * (1.0 - u); // Gray-Scott equation
float dv = rv * lv / dd + uvv - (f + k) * v; // diffusion+-reaction
u += du * dt;
v += dv * dt;
if( pingpong == 1 ) {
gl_FragColor = vec4(clamp(u, 0.0, 1.0), clamp(v, 0.0, 1.0), c);
} else {
gl_FragColor = vec4(c, clamp(u, 0.0, 1.0), clamp(v, 0.0, 1.0));
}
}
"""
class Canvas(app.Canvas):
def __init__(self):
app.Canvas.__init__(self, title='Grayscott Reaction-Diffusion',
size=(512, 512), keys='interactive')
self.scale = 4
self.comp_size = self.size
comp_w, comp_h = self.comp_size
dt = 1.0
dd = 1.5
species = {
# name : [r_u, r_v, f, k]
'Bacteria 1': [0.16, 0.08, 0.035, 0.065],
'Bacteria 2': [0.14, 0.06, 0.035, 0.065],
'Coral': [0.16, 0.08, 0.060, 0.062],
'Fingerprint': [0.19, 0.05, 0.060, 0.062],
'Spirals': [0.10, 0.10, 0.018, 0.050],
'Spirals Dense': [0.12, 0.08, 0.020, 0.050],
'Spirals Fast': [0.10, 0.16, 0.020, 0.050],
'Unstable': [0.16, 0.08, 0.020, 0.055],
'Worms 1': [0.16, 0.08, 0.050, 0.065],
'Worms 2': [0.16, 0.08, 0.054, 0.063],
'Zebrafish': [0.16, 0.08, 0.035, 0.060]
}
P = np.zeros((comp_h, comp_w, 4), dtype=np.float32)
P[:, :] = species['Unstable']
UV = np.zeros((comp_h, comp_w, 4), dtype=np.float32)
UV[:, :, 0] = 1.0
r = 32
UV[comp_h // 2 - r:comp_h // 2 + r,
comp_w // 2 - r:comp_w // 2 + r, 0] = 0.50
UV[comp_h // 2 - r:comp_h // 2 + r,
comp_w // 2 - r:comp_w // 2 + r, 1] = 0.25
UV += np.random.uniform(0.0, 0.01, (comp_h, comp_w, 4))
UV[:, :, 2] = UV[:, :, 0]
UV[:, :, 3] = UV[:, :, 1]
self.pingpong = 1
self.compute = Program(compute_vertex, compute_fragment, 4)
self.compute["params"] = P
self.compute["texture"] = UV
self.compute["position"] = [(-1, -1), (-1, +1), (+1, -1), (+1, +1)]
self.compute["texcoord"] = [(0, 0), (0, 1), (1, 0), (1, 1)]
self.compute['dt'] = dt
self.compute['dx'] = 1.0 / comp_w
self.compute['dy'] = 1.0 / comp_h
self.compute['dd'] = dd
self.compute['pingpong'] = self.pingpong
self.render = Program(render_vertex, render_fragment, 4)
self.render["position"] = [(-1, -1), (-1, +1), (+1, -1), (+1, +1)]
self.render["texcoord"] = [(0, 0), (0, 1), (1, 0), (1, 1)]
self.render["texture"] = self.compute["texture"]
self.render['pingpong'] = self.pingpong
self.fbo = FrameBuffer(self.compute["texture"],
RenderBuffer(self.comp_size))
set_state(depth_test=False, clear_color='black')
self._timer = app.Timer('auto', connect=self.update, start=True)
self.show()
def on_draw(self, event):
with self.fbo:
set_viewport(0, 0, *self.comp_size)
self.compute["texture"].interpolation = 'nearest'
self.compute.draw('triangle_strip')
clear(color=True)
set_viewport(0, 0, *self.physical_size)
self.render["texture"].interpolation = 'linear'
self.render.draw('triangle_strip')
self.pingpong = 1 - self.pingpong
self.compute["pingpong"] = self.pingpong
self.render["pingpong"] = self.pingpong
def on_resize(self, event):
set_viewport(0, 0, *self.physical_size)
if __name__ == '__main__':
canvas = Canvas()
app.run()
| bsd-3-clause | -7,063,744,697,492,145,000 | 34.119048 | 79 | 0.510644 | false |
stonemary/lintcode_solutions | search-a-2d-matrix/1.py | 1 | 1214 | # iterative
# time: over time, bug on helper function. ~ 18mins
class Solution:
"""
@param matrix, a list of lists of integers
@param target, an integer
@return a boolean, indicate whether matrix contains target
"""
def searchMatrix(self, matrix, target):
if matrix is None or matrix == []:
return False
# width: m, height: n
n = len(matrix)
m = len(matrix[0])
start = 0
end = m * n - 1
mid = (start + end) / 2
while start + 1 < end:
mid_value = self.get_element(matrix, mid, m)
if mid_value == target:
return True
if mid_value < target:
start = mid
if mid_value > target:
end = mid
mid = (start + end) / 2
else:
if self.get_element(matrix, start, m) == target:
return True
if self.get_element(matrix, end, m) == target:
return True
return False
def get_element(self, matrix, i, m):
index_1 = i / m
index_2 = i % m
return matrix[index_1][index_2]
| apache-2.0 | 3,740,729,000,668,610,000 | 27.904762 | 62 | 0.476936 | false |
OmkarPathak/pygorithm | pygorithm/data_structures/quadtree.py | 1 | 23006 | """
Author: Timothy Moore
Created On: 31th August 2017
Defines a two-dimensional quadtree of arbitrary
depth and bucket size.
"""
import inspect
import math
from collections import deque
from pygorithm.geometry import (vector2, polygon2, rect2)
class QuadTreeEntity(object):
"""
This is the minimum information required for an object to
be usable in a quadtree as an entity. Entities are the
things that you are trying to compare in a quadtree.
:ivar aabb: the axis-aligned bounding box of this entity
:type aabb: :class:`pygorithm.geometry.rect2.Rect2`
"""
def __init__(self, aabb):
"""
Create a new quad tree entity with the specified aabb
:param aabb: axis-aligned bounding box
:type aabb: :class:`pygorithm.geometry.rect2.Rect2`
"""
self.aabb = aabb
def __repr__(self):
"""
Create an unambiguous representation of this entity.
Example:
.. code-block:: python
from pygorithm.geometry import (vector2, rect2)
from pygorithm.data_structures import quadtree
_ent = quadtree.QuadTreeEntity(rect2.Rect2(5, 5))
# prints quadtreeentity(aabb=rect2(width=5, height=5, mincorner=vector2(x=0, y=0)))
print(repr(_ent))
:returns: unambiguous representation of this quad tree entity
:rtype: string
"""
return "quadtreeentity(aabb={})".format(repr(self.aabb))
def __str__(self):
"""
Create a human readable representation of this entity
Example:
.. code-block:: python
from pygorithm.geometry import (vector2, rect2)
from pygorithm.data_structures import quadtree
_ent = quadtree.QuadTreeEntity(rect2.Rect2(5, 5))
# prints entity(at rect(5x5 at <0, 0>))
print(str(_ent))
:returns: human readable representation of this entity
:rtype: string
"""
return "entity(at {})".format(str(self.aabb))
class QuadTree(object):
"""
A quadtree is a sorting tool for two-dimensional space, most
commonly used to reduce the number of required collision
calculations in a two-dimensional scene. In this context,
the scene is stepped without collision detection, then a
quadtree is constructed from all of the boundaries
.. caution::
Just because a quad tree has split does not mean entities will be empty. Any
entities which overlay any of the lines of the split will be included in the
parent of the quadtree.
.. tip::
It is important to tweak bucket size and depth to the problem, but a common error
is too small a bucket size. It is typically not reasonable to have a bucket size
smaller than 16; A good starting point is 64, then modify as appropriate. Larger
buckets reduce the overhead of the quad tree which could easily exceed the improvement
from reduced collision checks. The max depth is typically just a sanity check since
depth greater than 4 or 5 would either indicate a badly performing quadtree (too
dense objects, use an r-tree or kd-tree) or a very large world (where an iterative
quadtree implementation would be appropriate).
:ivar bucket_size: maximum number objects per bucket (before :py:attr:`.max_depth`)
:type bucket_size: int
:ivar max_depth: maximum depth of the quadtree
:type max_depth: int
:ivar depth: the depth of this node (0 being the topmost)
:type depth: int
:ivar location: where this quad tree node is situated
:type location: :class:`pygorithm.geometry.rect2.Rect2`
:ivar entities: the entities in this quad tree and in NO OTHER related quad tree
:type entities: list of :class:`.QuadTreeEntity`
:ivar children: either None or the 4 :class:`.QuadTree` children of this node
:type children: None or list of :class:`.QuadTree`
"""
def __init__(self, bucket_size, max_depth, location, depth = 0, entities = None):
"""
Initialize a new quad tree.
.. warning::
Passing entities to this quadtree will NOT cause it to split automatically!
You must call :py:meth:`.think` for that. This allows for more predictable
performance per line.
:param bucket_size: the number of entities in this quadtree
:type bucket_size: int
:param max_depth: the maximum depth for automatic splitting
:type max_depth: int
:param location: where this quadtree is located
:type location: :class:`pygorithm.geometry.rect2.Rect2`
:param depth: the depth of this node
:type depth: int
:param entities: the entities to initialize this quadtree with
:type entities: list of :class:`.QuadTreeEntity` or None for empty list
"""
self.bucket_size = bucket_size
self.max_depth = max_depth
self.location = location
self.depth = depth
self.entities = entities if entities is not None else []
self.children = None
def think(self, recursive = False):
"""
Call :py:meth:`.split` if appropriate
Split this quad tree if it has not split already and it has more
entities than :py:attr:`.bucket_size` and :py:attr:`.depth` is
less than :py:attr:`.max_depth`.
If `recursive` is True, think is called on the :py:attr:`.children` with
recursive set to True after splitting.
:param recursive: if `think(True)` should be called on :py:attr:`.children` (if there are any)
:type recursive: bool
"""
if not self.children and self.depth < self.max_depth and len(self.entities) > self.bucket_size:
self.split()
if recursive:
if self.children:
for child in self.children:
child.think(True)
def split(self):
"""
Split this quadtree.
.. caution::
A call to split will always split the tree or raise an error. Use
:py:meth:`.think` if you want to ensure the quadtree is operating
efficiently.
.. caution::
This function will not respect :py:attr:`.bucket_size` or
:py:attr:`.max_depth`.
:raises ValueError: if :py:attr:`.children` is not empty
"""
if self.children:
raise ValueError("cannot split twice")
_cls = type(self)
def _cstr(r):
return _cls(self.bucket_size, self.max_depth, r, self.depth + 1)
_halfwidth = self.location.width / 2
_halfheight = self.location.height / 2
_x = self.location.mincorner.x
_y = self.location.mincorner.y
self.children = [
_cstr(rect2.Rect2(_halfwidth, _halfheight, vector2.Vector2(_x, _y))),
_cstr(rect2.Rect2(_halfwidth, _halfheight, vector2.Vector2(_x + _halfwidth, _y))),
_cstr(rect2.Rect2(_halfwidth, _halfheight, vector2.Vector2(_x + _halfwidth, _y + _halfheight))),
_cstr(rect2.Rect2(_halfwidth, _halfheight, vector2.Vector2(_x, _y + _halfheight))) ]
_newents = []
for ent in self.entities:
quad = self.get_quadrant(ent)
if quad < 0:
_newents.append(ent)
else:
self.children[quad].entities.append(ent)
self.entities = _newents
def get_quadrant(self, entity):
"""
Calculate the quadrant that the specified entity belongs to.
Touching a line is considered overlapping a line. Touching is
determined using :py:meth:`math.isclose`
Quadrants are:
- -1: None (it overlaps 2 or more quadrants)
- 0: Top-left
- 1: Top-right
- 2: Bottom-right
- 3: Bottom-left
.. caution::
This function does not verify the entity is contained in this quadtree.
This operation takes O(1) time.
:param entity: the entity to place
:type entity: :class:`.QuadTreeEntity`
:returns: quadrant
:rtype: int
"""
_aabb = entity.aabb
_halfwidth = self.location.width / 2
_halfheight = self.location.height / 2
_x = self.location.mincorner.x
_y = self.location.mincorner.y
if math.isclose(_aabb.mincorner.x, _x + _halfwidth):
return -1
if math.isclose(_aabb.mincorner.x + _aabb.width, _x + _halfwidth):
return -1
if math.isclose(_aabb.mincorner.y, _y + _halfheight):
return -1
if math.isclose(_aabb.mincorner.y + _aabb.height, _y + _halfheight):
return -1
_leftside_isleft = _aabb.mincorner.x < _x + _halfwidth
_rightside_isleft = _aabb.mincorner.x + _aabb.width < _x + _halfwidth
if _leftside_isleft != _rightside_isleft:
return -1
_topside_istop = _aabb.mincorner.y < _y + _halfheight
_botside_istop = _aabb.mincorner.y + _aabb.height < _y + _halfheight
if _topside_istop != _botside_istop:
return -1
_left = _leftside_isleft
_top = _topside_istop
if _left:
if _top:
return 0
else:
return 3
else:
if _top:
return 1
else:
return 2
def insert_and_think(self, entity):
"""
Insert the entity into this or the appropriate child.
This also acts as thinking (recursively). Using :py:meth:`.insert_and_think`
iteratively is slightly less efficient but has more predictable performance
than initializing with a large number of entities then thinking is slightly
faster but may hang. Both may exceed recursion depth if :py:attr:`.max_depth`
is too large.
:param entity: the entity to insert
:type entity: :class:`.QuadTreeEntity`
"""
if not self.children and len(self.entities) == self.bucket_size and self.depth < self.max_depth:
self.split()
quad = self.get_quadrant(entity) if self.children else -1
if quad < 0:
self.entities.append(entity)
else:
self.children[quad].insert_and_think(entity)
def retrieve_collidables(self, entity, predicate = None):
"""
Find all entities that could collide with the specified entity.
.. warning::
If entity is, itself, in the quadtree, it will be returned. The
predicate may be used to prevent this using your preferred equality
method.
The predicate takes 1 positional argument (the entity being considered)
and returns `False` if the entity should never be returned, even if it
might collide with the entity. It should return `True` otherwise.
:param entity: the entity to find collidables for
:type entity: :class:`.QuadTreeEntity`
:param predicate: the predicate
:type predicate: :class:`types.FunctionType` or None
:returns: potential collidables (never `None)
:rtype: list of :class:`.QuadTreeEntity`
"""
result = list(filter(predicate, self.entities))
quadrant = self.get_quadrant(entity) if self.children else -1
if quadrant >= 0:
result.extend(self.children[quadrant].retrieve_collidables(entity, predicate))
elif self.children:
for child in self.children:
touching, overlapping, alwaysNone = rect2.Rect2.find_intersection(entity.aabb, child.location, find_mtv=False)
if touching or overlapping:
result.extend(child.retrieve_collidables(entity, predicate))
return result
def _iter_helper(self, pred):
"""
Calls pred on each child and childs child, iteratively.
pred takes one positional argument (the child).
:param pred: function to call
:type pred: `types.FunctionType`
"""
_stack = deque()
_stack.append(self)
while _stack:
curr = _stack.pop()
if curr.children:
for child in curr.children:
_stack.append(child)
pred(curr)
def find_entities_per_depth(self):
"""
Calculate the number of nodes and entities at each depth level in this
quad tree. Only returns for depth levels at or equal to this node.
This is implemented iteratively. See :py:meth:`.__str__` for usage example.
:returns: dict of depth level to number of entities
:rtype: dict int: int
"""
container = { 'result': {} }
def handler(curr, container=container):
container['result'][curr.depth] = container['result'].get(curr.depth, 0) + len(curr.entities)
self._iter_helper(handler)
return container['result']
def find_nodes_per_depth(self):
"""
Calculate the number of nodes at each depth level.
This is implemented iteratively. See :py:meth:`.__str__` for usage example.
:returns: dict of depth level to number of nodes
:rtype: dict int: int
"""
nodes_per_depth = {}
self._iter_helper(lambda curr, d=nodes_per_depth: d.update({ (curr.depth, d.get(curr.depth, 0) + 1) }))
return nodes_per_depth
def sum_entities(self, entities_per_depth=None):
"""
Sum the number of entities in this quad tree and all lower quad trees.
If `entities_per_depth` is not None, that array is used to calculate the sum
of entities rather than traversing the tree. Either way, this is implemented
iteratively. See :py:meth:`.__str__` for usage example.
:param entities_per_depth: the result of :py:meth:`.find_entities_per_depth`
:type entities_per_depth: `dict int: (int, int)` or None
:returns: number of entities in this and child nodes
:rtype: int
"""
if entities_per_depth is not None:
return sum(entities_per_depth.values())
container = { 'result': 0 }
def handler(curr, container=container):
container['result'] += len(curr.entities)
self._iter_helper(handler)
return container['result']
def calculate_avg_ents_per_leaf(self):
"""
Calculate the average number of entities per leaf node on this and child
quad trees.
In the ideal case, the average entities per leaf is equal to the bucket size,
implying maximum efficiency. Note that, as always with averages, this might
be misleading if this tree has reached its max depth.
This is implemented iteratively. See :py:meth:`.__str__` for usage example.
:returns: average number of entities at each leaf node
:rtype: :class:`numbers.Number`
"""
container = { 'leafs': 0, 'total': 0 }
def handler(curr, container=container):
if not curr.children:
container['leafs'] += 1
container['total'] += len(curr.entities)
self._iter_helper(handler)
return container['total'] / container['leafs']
def calculate_weight_misplaced_ents(self, sum_entities=None):
"""
Calculate a rating for misplaced entities.
A misplaced entity is one that is not on a leaf node. That weight is multiplied
by 4*remaining maximum depth of that node, to indicate approximately how
many additional calculations are required.
The result is then divided by the total number of entities on this node (either
calculated using :py:meth:`.sum_entities` or provided) to get the approximate
cost of the misplaced nodes in comparison with the placed nodes. A value greater
than 1 implies a different tree type (such as r-tree or kd-tree) should probably be
used.
This is implemented iteratively. See :py:meth:`.__str__` for usage example.
:param sum_entities: the number of entities on this node
:type sum_entities: int or None
:returns: weight of misplaced entities
:rtype: :class:`numbers.Number`
"""
# this iteration requires more context than _iter_helper provides.
# we must keep track of parents as well in order to correctly update
# weights
nonleaf_to_max_child_depth_dict = {}
# stack will be (quadtree, list (of parents) or None)
_stack = deque()
_stack.append((self, None))
while _stack:
curr, parents = _stack.pop()
if parents:
for p in parents:
nonleaf_to_max_child_depth_dict[p] = max(nonleaf_to_max_child_depth_dict.get(p, 0), curr.depth)
if curr.children:
new_parents = list(parents) if parents else []
new_parents.append(curr)
for child in curr.children:
_stack.append((child, new_parents))
_weight = 0
for nonleaf, maxchilddepth in nonleaf_to_max_child_depth_dict.items():
_weight += len(nonleaf.entities) * 4 * (maxchilddepth - nonleaf.depth)
_sum = self.sum_entities() if sum_entities is None else sum_entities
return _weight / _sum
def __repr__(self):
"""
Create an unambiguous representation of this quad tree.
This is implemented iteratively.
Example:
.. code-block:: python
from pygorithm.geometry import (vector2, rect2)
from pygorithm.data_structures import quadtree
# create a tree with a up to 2 entities in a bucket that
# can have a depth of up to 5.
_tree = quadtree.QuadTree(1, 5, rect2.Rect2(100, 100))
# add a few entities to the tree
_tree.insert_and_think(quadtree.QuadTreeEntity(rect2.Rect2(2, 2, vector2.Vector2(5, 5))))
_tree.insert_and_think(quadtree.QuadTreeEntity(rect2.Rect2(2, 2, vector2.Vector2(95, 5))))
# prints quadtree(bucket_size=1, max_depth=5, location=rect2(width=100, height=100, mincorner=vector2(x=0, y=0)), depth=0, entities=[], children=[quadtree(bucket_size=1, max_depth=5, location=rect2(width=50.0, height=50.0, mincorner=vector2(x=0, y=0)), depth=1, entities=[quadtreeentity(aabb=rect2(width=2, height=2, mincorner=vector2(x=5, y=5)))], children=None), quadtree(bucket_size=1, max_depth=5, location=rect2(width=50.0, height=50.0, mincorner=vector2(x=50.0, y=0)), depth=1, entities=[quadtreeentity(aabb=rect2(width=2, height=2, mincorner=vector2(x=95, y=5)))], children=None), quadtree(bucket_size=1, max_depth=5, location=rect2(width=50.0, height=50.0, mincorner=vector2(x=50.0, y=50.0)), depth=1, entities=[], children=None), quadtree(bucket_size=1, max_depth=5, location=rect2(width=50.0, height=50.0, mincorner=vector2(x=0, y=50.0)), depth=1, entities=[], children=None)])
:returns: unambiguous, recursive representation of this quad tree
:rtype: string
"""
return "quadtree(bucket_size={}, max_depth={}, location={}, depth={}, entities={}, children={})".format(self.bucket_size, self.max_depth, repr(self.location), self.depth, self.entities, self.children)
def __str__(self):
"""
Create a human-readable representation of this quad tree
.. caution::
Because of the complexity of quadtrees it takes a fair amount of calculation to
produce something somewhat legible. All returned statistics have paired functions.
This uses only iterative algorithms to calculate statistics.
Example:
.. code-block:: python
from pygorithm.geometry import (vector2, rect2)
from pygorithm.data_structures import quadtree
# create a tree with a up to 2 entities in a bucket that
# can have a depth of up to 5.
_tree = quadtree.QuadTree(2, 5, rect2.Rect2(100, 100))
# add a few entities to the tree
_tree.insert_and_think(quadtree.QuadTreeEntity(rect2.Rect2(2, 2, vector2.Vector2(5, 5))))
_tree.insert_and_think(quadtree.QuadTreeEntity(rect2.Rect2(2, 2, vector2.Vector2(95, 5))))
# prints quadtree(at rect(100x100 at <0, 0>) with 0 entities here (2 in total); (nodes, entities) per depth: [ 0: (1, 0), 1: (4, 2) ] (allowed max depth: 5, actual: 1), avg ent/leaf: 0.5 (target 1), misplaced weight 0.0 (0 best, >1 bad)
print(_tree)
:returns: human-readable representation of this quad tree
:rtype: string
"""
nodes_per_depth = self.find_nodes_per_depth()
_ents_per_depth = self.find_entities_per_depth()
_nodes_ents_per_depth_str = "[ {} ]".format(', '.join("{}: ({}, {})".format(dep, nodes_per_depth[dep], _ents_per_depth[dep]) for dep in nodes_per_depth.keys()))
_sum = self.sum_entities(entities_per_depth=_ents_per_depth)
_max_depth = max(_ents_per_depth.keys())
_avg_ent_leaf = self.calculate_avg_ents_per_leaf()
_mispl_weight = self.calculate_weight_misplaced_ents(sum_entities=_sum)
return "quadtree(at {} with {} entities here ({} in total); (nodes, entities) per depth: {} (allowed max depth: {}, actual: {}), avg ent/leaf: {} (target {}), misplaced weight {} (0 best, >1 bad)".format(self.location, len(self.entities), _sum, _nodes_ents_per_depth_str, self.max_depth, _max_depth, _avg_ent_leaf, self.bucket_size, _mispl_weight)
@staticmethod
def get_code():
"""
Get the code for the QuadTree class
:returns: code for QuadTree
:rtype: string
"""
return inspect.getsource(QuadTree) | mit | 3,215,054,708,170,512,000 | 39.937722 | 899 | 0.586325 | false |
maheshgaya/lips-with-maps | machine-learning/python/first_model.py | 1 | 4736 | import tensorflow as tf
import pandas as pd
import numpy as np
from sklearn import preprocessing
# importing data and munging
constant_data = pd.read_csv('full_library_xt875.csv')
#normalizing data
#normalization = lambda df: (df - df.mean()) / (df.max() - df.min())
#constant_data = normalization(constant_data)
t_data = constant_data[:2787]
pred_data = t_data[['xPosition', 'yPosition']]
t_data = t_data.drop('Latitude', 1)
t_data = t_data.drop('Longitude', 1)
t_data = t_data.drop('xPosition', 1)
t_data = t_data.drop('yPosition', 1)
tp_data = constant_data[2789:]
pred_t_data = tp_data[['xPosition', 'yPosition']]
tp_data = tp_data.drop('Latitude', 1)
tp_data = tp_data.drop('Longitude', 1)
tp_data = tp_data.drop('xPosition', 1)
tp_data = tp_data.drop('yPosition', 1)
example = constant_data[2789:2791]
testing = example[['xPosition', 'yPosition']]
example = example.drop('Latitude',1)
example = example.drop('Longitude',1)
example = example.drop('xPosition',1)
example = example.drop('yPosition',1)
print(len(tp_data))
print(pred_data)
#paramters
learning_rate = 0.005
training_epochs = 100000
batch_size = 100
display_step = 1
#network paramters
n_input = 170
n_classes = 2
n_hidden_1 = 86
n_hidden_2 = 52
n_hidden_3 = 21
n_hidden_4 = 13
#tf Graph input
x = tf.placeholder('float', [None, n_input])
y = tf.placeholder('float', [None, n_classes])
#create model
def multilayer_perceptron(x, weights, biases):
# Hidden layer with relu activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with relu activation
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# Hidden layer with relu activation
layer_3 = tf.add(tf.matmul(layer_2, weights['h3']), biases['b3'])
layer_3 = tf.nn.relu(layer_3)
# Hidden layer with relu activation
'''layer_4 = tf.add(tf.matmul(layer_3, weights['h4']), biases['b4'])
layer_4 = tf.nn.relu(layer_4)'''
#output layer with linear activation
out_layer = tf.matmul(layer_3, weights['out']) + biases['out']
return out_layer
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'h3': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3])),
#'h4': tf.Variable(tf.random_normal([n_hidden_3, n_hidden_4])),
'out': tf.Variable(tf.random_normal([n_hidden_3, n_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'b3': tf.Variable(tf.random_normal([n_hidden_3])),
#'b4': tf.Variable(tf.random_normal([n_hidden_4])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# Construct model
'''Convergence occurs at a loss at 779.46
May want to normalize the data to see if a reduction in
the error may occur (look up online)
'''
pred = multilayer_perceptron(x, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.square(pred-y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
saver = tf.train.Saver()
# Initializing the variables
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
total_batch_test = int(len(t_data)/batch_size)
#training cycle
for epoch in range(training_epochs):
ptr = 0
avg_cost = 0.0
total_batch = int(len(t_data)/batch_size)
#loop over all batches
for i in range(total_batch):
inp, out = t_data[ptr:ptr+total_batch], pred_data[ptr:ptr+total_batch]
ptr+=batch_size
_, c = sess.run([optimizer, cost], feed_dict={x: inp, y: out})
# Compute average loss
avg_cost += c / total_batch
#print(avg_cost)
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%06d' % (epoch+1), "cost=", \
"{:.9f}".format(avg_cost))
if avg_cost <= 0.11 and epoch > 100:
break
print("Optimization Finished!")
# Test model
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
#print("Accuracy:", accuracy.eval({x: tp_data[0:length],
# y: pred_t_data[0:length]}))
feed_dict = {x: example, y: testing}
#classi = pred.eval(feed_dict)
print(sess.run(pred, feed_dict))
saver.save(sess, '/Users/Joel/Desktop/Research/lgps.ckpt')
# Ignore Latitude and Longitude
# Predict formula for converstion of x and y position infer Latitude and Longitude
| apache-2.0 | -4,449,675,825,437,727,000 | 29.753247 | 82 | 0.644848 | false |
mablae/weblate | weblate/trans/south_migrations/0027_auto__chg_field_subproject_template.py | 1 | 15556 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <[email protected]>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
depends_on = (
('lang', '0003_auto__add_field_language_plural_type'),
)
def forwards(self, orm):
# Changing field 'SubProject.template'
db.alter_column('trans_subproject', 'template', self.gf('django.db.models.fields.CharField')(max_length=200, null=False))
def backwards(self, orm):
# Changing field 'SubProject.template'
db.alter_column('trans_subproject', 'template', self.gf('django.db.models.fields.CharField')(default='', max_length=200))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lang.language': {
'Meta': {'ordering': "['name']", 'object_name': 'Language'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'direction': ('django.db.models.fields.CharField', [], {'default': "'ltr'", 'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'nplurals': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'plural_type': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'pluralequation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'trans.change': {
'Meta': {'ordering': "['-timestamp']", 'object_name': 'Change'},
'action': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'translation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Translation']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Unit']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'trans.check': {
'Meta': {'object_name': 'Check'},
'check': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignore': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lang.Language']", 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Project']"})
},
'trans.comment': {
'Meta': {'ordering': "['timestamp']", 'object_name': 'Comment'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lang.Language']", 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Project']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'trans.dictionary': {
'Meta': {'ordering': "['source']", 'object_name': 'Dictionary'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lang.Language']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Project']"}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'trans.indexupdate': {
'Meta': {'object_name': 'IndexUpdate'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Unit']"})
},
'trans.project': {
'Meta': {'ordering': "['name']", 'object_name': 'Project'},
'commit_message': ('django.db.models.fields.TextField', [], {'default': "'Translated using Weblate (%(language_name)s)\\n\\nCurrently translated at %(translated_percent)s%% (%(translated)s of %(total)s strings)'"}),
'committer_email': ('django.db.models.fields.EmailField', [], {'default': "'[email protected]'", 'max_length': '75'}),
'committer_name': ('django.db.models.fields.CharField', [], {'default': "'Weblate'", 'max_length': '200'}),
'enable_acl': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instructions': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'mail': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'merge_style': ('django.db.models.fields.CharField', [], {'default': "'merge'", 'max_length': '10'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'new_lang': ('django.db.models.fields.CharField', [], {'default': "'contact'", 'max_length': '10'}),
'push_on_commit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'set_translation_team': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'web': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'trans.subproject': {
'Meta': {'ordering': "['project__name', 'name']", 'unique_together': "(('project', 'name'), ('project', 'slug'))", 'object_name': 'SubProject'},
'allow_translation_propagation': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'branch': ('django.db.models.fields.CharField', [], {'default': "'master'", 'max_length': '50'}),
'file_format': ('django.db.models.fields.CharField', [], {'default': "'auto'", 'max_length': '50'}),
'filemask': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Project']"}),
'push': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'repo': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'report_source_bugs': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'repoweb': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
},
'trans.suggestion': {
'Meta': {'object_name': 'Suggestion'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lang.Language']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Project']"}),
'target': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'trans.translation': {
'Meta': {'ordering': "['language__name']", 'object_name': 'Translation'},
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'fuzzy': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lang.Language']"}),
'language_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20'}),
'lock_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'lock_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'revision': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'subproject': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.SubProject']"}),
'total': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'translated': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'})
},
'trans.unit': {
'Meta': {'ordering': "['position']", 'object_name': 'Unit'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'context': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'flags': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'fuzzy': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'previous_source': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'source': ('django.db.models.fields.TextField', [], {}),
'target': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'translated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'translation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Translation']"})
}
}
complete_apps = ['trans']
| gpl-3.0 | -2,850,618,282,719,229,000 | 74.868293 | 227 | 0.553462 | false |
NoRedInk/elm-ops-tooling | elm_deps_upgrade.py | 1 | 4701 | #! /usr/bin/env python
from __future__ import print_function
import sys
import json
import requests
import struct
import argparse
def load_all_packages(elm_version, url=None):
if url is None:
url = "http://package.elm-lang.org/all-packages?elm-package-version="
payload = requests.get("{url}{elm_version}".format(
url=url,
elm_version=elm_version
))
return { item['name'] : item for item in payload.json() }
def load_versions(package_name, url=None):
if url is None:
url = "http://package.elm-lang.org/versions?name="
payload = requests.get("{url}{package_name}".format(
url=url,
package_name=package_name
))
return payload.content
def load_local_packages(elm_package):
with open(elm_package) as f:
return json.load(f)['dependencies']
def top_range(field):
only_end = field[field.index('v'):]
if '=' in only_end:
return only_end.split('=')[-1].strip()
if '<' in only_end:
number = only_end.split('<')[-1].strip()
if patch(number) == 0:
if minor(number) == 0:
return '{maj}.{min}.{pat}'.format(
maj = major(number) - 1,
min = 9999999,
pat = 0 )
return '{maj}.{min}.{pat}'.format(
maj = major(number) - 1,
min = minor(number) - 1,
pat = 0 )
return '{maj}.{min}.{pat}'.format(
maj = major(number) - 1,
min = minor(number) - 1,
pat = patch(number) - 1 )
def major(version):
return int(version.split('.')[0])
def minor(version):
return int(version.split('.')[1])
def patch(version):
return int(version.split('.')[2])
def get_major_upgrades(top, versions):
major_top = major(top)
return [ version for version in versions if major(version) > major_top ]
def get_minor_upgrades(top, versions):
major_top = major(top)
minor_top = minor(top)
return [ version for version in versions if minor(version) > minor_top and major(version) == major_top ]
def get_patch_upgrades(top, versions):
major_top = major(top)
minor_top = minor(top)
patch_top = patch(top)
return [ version for version in versions
if major(version) == major_top and minor_top == minor(version) and patch_top > patch(version) ]
def find_newer_versions(local_deps, remote_deps):
upgrade_suggestions = {}
for (dep, version) in local_deps.items():
if dep not in remote_deps:
continue
current_version = top_range(version)
patches = get_patch_upgrades(current_version, remote_deps[dep]['versions'])
minors = get_minor_upgrades(current_version, remote_deps[dep]['versions'])
majors = get_major_upgrades(current_version, remote_deps[dep]['versions'])
upgrade_suggestions[dep] = {
'patches': patches,
'minors': minors,
'majors': majors
}
return upgrade_suggestions
def newest_version(suggestions):
if suggestions['majors']:
return suggestions['majors'][-1]
elif suggestions['minors']:
return suggestions['majors'][-1]
else:
return suggestions['patches'][-1]
def print_newer_versions(local_deps, remote_deps):
upgrade_suggestions = []
for (dep, suggestions) in find_newer_versions(local_deps, remote_deps).items():
patches = suggestions['patches']
minors = suggestions['minors']
majors = suggestions['majors']
if len(patches) > 0:
upgrade_suggestions.append(
'Patches available for {dep}: [{patches}]'.format(dep=dep, patches=', '.join(patches))
)
if len(minors) > 0:
upgrade_suggestions.append(
'Minors available for {dep}: [{minors}]'.format(dep=dep, minors=', '.join(minors))
)
if len(majors) > 0:
upgrade_suggestions.append(
'Majors available for {dep}: [{majors}]'.format(dep=dep, majors=', '.join(majors))
)
if not upgrade_suggestions:
print('No upgrades available')
else:
print('\n'.join(upgrade_suggestions))
def main():
parser = argparse.ArgumentParser(description='Check deps file for possible upgrades')
parser.add_argument('--elm-version', help='specify your current elm version', default='0.18')
parser.add_argument('local')
args = parser.parse_args()
local = load_local_packages(args.local)
remote = load_all_packages(args.elm_version)
print_newer_versions(local, remote)
if __name__ == '__main__':
main()
| bsd-3-clause | 6,115,484,445,367,418,000 | 27.840491 | 108 | 0.589236 | false |
ledtvavs/repository.ledtv | script.module.nanscrapers/lib/nanscrapers/scraperplugins/onlinemovies.py | 7 | 2640 | import re
import requests
from ..scraper import Scraper
class Onlinemovies(Scraper):
domains = ['onlinemovies.tube']
name = "onlinemovies"
sources = []
def __init__(self):
self.base_link = 'http://onlinemovies.tube/'
def scrape_movie(self, title, year, imdb, debrid = False):
try:
start_url = self.base_link+'watch/'+title.replace(' ','-')+'-'+year+'/'
html = requests.get(start_url).text
match = re.compile('<iframe.+?src="(.+?)"').findall(html)
for url in match:
if 'google' in url:
pass
elif 'youtube' in url:
pass
elif 'openload' in url:
pass
elif 'estream' in url:
self.sources.append({'source': 'estream', 'quality': 'SD', 'scraper': self.name, 'url': url,'direct': False})
elif 'clxmovies' in url:
html2 = requests.get(url).text
match2 = re.compile('{file: "(.+?)",label:"(.+?)",type: ".+?"}').findall(html2)
for url2,p in match2:
self.sources.append({'source': 'google', 'quality': p, 'scraper': self.name, 'url': url2,'direct': True})
except:
pass
return self.sources
def scrape_episode(self, title, show_year, year, season, episode, imdb, tvdb, debrid = False):
try:
if len(season) == 1:
season = '0'+str(season)
if len(episode) == 1:
episode = '0'+str(episode)
start_url = self.base_link+'episode/'+title.replace(' ','-')+'-s'+season+'e'+episode+'/'
html = requests.get(start_url).text
match = re.compile('<iframe.+?src="(.+?)"').findall(html)
for url in match:
if 'google' in url:
pass
elif 'youtube' in url:
pass
elif 'openload' in url:
pass
elif 'estream' in url:
self.sources.append({'source': 'estream', 'quality': 'SD', 'scraper': self.name, 'url': url,'direct': False})
elif 'clxmovies' in url:
html2 = requests.get(url).text
match2 = re.compile('{file: "(.+?)",label:"(.+?)",type: ".+?"}').findall(html2)
for url2,p in match2:
self.sources.append({'source': 'google', 'quality': p, 'scraper': self.name, 'url': url2,'direct': True})
return self.sources
except:
pass
| gpl-3.0 | 1,984,699,316,680,473,300 | 40.25 | 129 | 0.475 | false |
jmrodri/sm-photo-tool | src/cookietransport.py | 1 | 2041 | #
# code copied from
# http://rocketscience.itteco.org/2010/01/10/sending-cookie-via-xmlrpclib/
# by Nazar Leush
#
# changes added by jesus m. rodriguez
#
import xmlrpclib
from xmlrpclib import ProtocolError, Fault
from Cookie import _quote
class CookieTransport(xmlrpclib.Transport):
def __init__(self, cookies=None, *args, **kwargs):
xmlrpclib.Transport.__init__(self, *args, **kwargs)
self.cookies = cookies
##
# Send a complete request, and parse the response.
#
# @param host Target host.
# @param handler Target PRC handler.
# @param request_body XML-RPC request body.
# @param verbose Debugging flag.
# @return Parsed response.
def single_request(self, host, handler, request_body, verbose=0):
# issue XML-RPC request
h = self.make_connection(host)
if verbose:
h.set_debuglevel(1)
try:
self.send_request(h, handler, request_body)
self.send_host(h, host)
self.send_user_agent(h)
# Custom cookies.
self.send_cookies(h)
self.send_content(h, request_body)
response = h.getresponse(buffering=True)
if response.status == 200:
self.verbose = verbose
return self.parse_response(response)
except Fault:
raise
except Exception:
# All unexpected errors leave connection in
# a strange state, so we clear it.
self.close()
raise
#discard any response data and raise exception
if (response.getheader("content-length", 0)):
response.read()
raise ProtocolError(
host + handler,
response.status, response.reason,
response.msg,
)
def send_cookies(self, connection):
if self.cookies:
for k, v in self.cookies.iteritems():
connection.putheader(
"Cookie", ";".join(["%s=%s" % (k, _quote(v))]))
| gpl-2.0 | -4,333,939,322,342,350,000 | 27.746479 | 74 | 0.579128 | false |
Reddone/CarIncidentJupyter | main.py | 1 | 1735 | import os
import pandas as pd
import utils
# Prepare the dataset for the analysis
sem_path = r"0_Sem_2014"
sem1_path = r"1_Sem_2014.csv"
sem2_path = r"2_Sem_2014.csv"
if not os.path.isfile(sem_path):
utils.join_dataframes(sem_path, sem1_path, sem2_path)
dataset = pd.read_pickle(sem_path)
# Assign correct values to columns
dataset = utils.assign_columns(dataset)
# Remove columns we don't need
dataset = utils.remove_columns(dataset)
# Fix broken values
dataset = utils.fix_columns(dataset)
# Create new features using DataOraIncidente column
dataset = utils.expand_DataOraIncidente(dataset)
# Create new features using NUM columns
dataset = utils.expand_NUM(dataset)
# Create new features using DecedutoDopo column
dataset = utils.expand_DecedutoDopo(dataset)
# Adjust NaturaIncidente column
dataset = utils.adjust_NaturaIncidente(dataset)
# Adjust ParticolaritaStrade column
dataset = utils.adjust_ParticolaritaStrade(dataset)
# Adjust FondoStradale column
dataset = utils.adjust_FondoStradale(dataset)
# Adjust Pavimentazione column
dataset = utils.adjust_Pavimentazione(dataset)
# Adjust CondizioneAtmosferica column
dataset = utils.adjust_CondizioneAtmosferica(dataset)
# Adjust Traffico column
dataset = utils.adjust_Traffico(dataset)
# Adjust TipoVeicolo column
dataset = utils.adjust_TipoVeicolo(dataset)
# Adjust TipoPersona column
dataset = utils.adjust_TipoPersona(dataset)
# Adjust AnnoNascita column
dataset = utils.adjust_AnnoNascita(dataset)
# Adjust Sesso column
dataset = utils.adjust_Sesso(dataset)
# Adjust TipoLesione column
dataset = utils.adjust_TipoLesione(dataset)
save_path = r"0_CarIncident_2014"
save_path_csv = r"0_CarIncident_2014.csv"
dataset.to_pickle(save_path)
dataset.to_csv(save_path_csv)
| mit | 8,604,081,585,900,436,000 | 31.12963 | 57 | 0.796542 | false |
thinmanj/clean-bj | zbj.py | 1 | 5351 | import random
class Card(object):
SUITS = ('C', 'S', 'H', 'D')
VALUES = {'A':1, '2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8, '9':9, 'T':10, 'J':10, 'Q':10, 'K':10}
def __init__(self, suit, rank):
if (suit in self.SUITS) and (rank in self.VALUES.keys()):
self.suit = suit
self.rank = rank
self.value = self.VALUES[rank]
else:
self.suit = None
self.rank = None
self.value = 0
print "Invalid card: ", suit, rank
def __str__(self):
return "%s(%s)" % (self.rank, self.suit, )
def get_suit(self):
return self.suit
def get_rank(self):
return self.rank
def get_value(self):
return self.value
class Hand(object):
def __init__(self):
self.clear()
def __len__(self):
return len(self.hand)
def __str__(self):
return ', '.join([str(card) for card in self.hand])
def add_card(self, card):
self.hand.append(card)
if card.get_rank() == 'A':
self.ace = True
self.value += card.get_value()
if self.value > 21:
self.value = 0
self.ace = 0
raise Exception("Busted!")
def get_value(self):
if self.ace and self.value <= 10:
return self.value + 10
else:
return self.value
def hit(self, deck):
card = deck.deal_card()
print str(card)
self.add_card(card)
def busted(self):
if self.get_value() > 21:
return True
def clear(self):
self.hand = []
self.ace = False
self.value = 0
class Deck(object):
def __init__(self):
self.clear()
def __len__(self):
return len(self.deck)
def __str__(self):
return ', '.join([str(card) for card in self.deck])
def shuffle(self):
random.shuffle(self.deck)
def deal_card(self):
return self.deck.pop()
def clear(self):
self.deck = [Card(s, r) for s in Card.SUITS for r in Card.VALUES.keys()]
self.shuffle()
class Game(object):
def __init__(self):
self.in_play = True
self.player_chips = 100
self.game_chips = 0
self.deal()
def __str__(self):
if not self.in_play:
return ""
return """
Game:
Game chips: %d
Dealer hand: %s
Player hand: %s
Player chips: %s
""" % (self.game_chips, str(self.dealer), str(self.player), self.player_chips)
def won(self):
print "You won!"
self.player_chips += 2 * self.game_chips
self.game_chips = 0
self.in_play = False
def lost(self):
print "You lost!"
self.game_chips = 0
self.in_play = False
def tie(self):
print "It's a tie!"
self.player_chips += self.game_chips
self.game_chips = 0
self.in_play = False
def deal(self, chips=1):
if self.player_chips <= 0 or chips > self.player_chips:
raise Exception("No enough chips.")
self.in_play = True
self.deck = Deck()
self.dealer = Hand()
self.player = Hand()
self.player_chips -= chips
self.game_chips = chips
for x in range(2):
self.dealer.hit(self.deck)
self.player.hit(self.deck)
def hit(self):
if not self.in_play:
return
try:
self.player.hit(self.deck)
except Exception:
self.in_play = False
print "You went bust!"
self.lost()
raise Exception("Lost!")
if self.player.get_value() == 21:
print "Black Jack!"
self.won()
raise Exception("Won!")
def stand(self):
if not self.in_play:
return
while self.in_play and self.dealer.get_value() < 17:
try:
self.dealer.hit(self.deck)
except Exception:
print "Dealer Busted!"
self.in_play = False
if self.player.get_value() > self.dealer.get_value():
self.won()
elif self.player.get_value() < self.dealer.get_value():
self.lost()
else:
self.tie()
def play():
game = Game()
try:
while True:
try:
while True:
print str(game)
selection = raw_input("Do you whan to (h)it or to (s)tand: ")
if selection == "h":
game.hit()
elif selection == "s":
game.stand()
raise Exception
else:
print "Wrong selection..."
except:
selection_flag = True
while selection_flag:
selection = raw_input("New (d)eal? or (e)xit: ")
if selection == "d":
game.deal(1)
selection_flag = False
elif selection == "e":
raise Exception
else:
print "Wrong selection..."
except:
print "See you next time"
if __name__ == "__main__":
play()
| apache-2.0 | -2,957,766,947,423,949,300 | 23.888372 | 108 | 0.470566 | false |
diegojromerolopez/djanban | src/djanban/apps/members/migrations/0001_initial.py | 1 | 1626 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-09 18:03
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Member',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('api_key', models.CharField(default=None, max_length=128, null=True, verbose_name='Trello API key')),
('api_secret', models.CharField(default=None, max_length=128, null=True, verbose_name='Trello API secret')),
('token', models.CharField(default=None, max_length=128, null=True, verbose_name='Trello token')),
('token_secret', models.CharField(default=None, max_length=128, null=True, verbose_name='Trello token secret')),
('uuid', models.CharField(max_length=128, unique=True, verbose_name='Trello member uuid')),
('trello_username', models.CharField(max_length=128, verbose_name='Trello username')),
('initials', models.CharField(max_length=8, verbose_name='User initials in Trello')),
('user', models.OneToOneField(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='member', to=settings.AUTH_USER_MODEL, verbose_name='Associated user')),
],
),
]
| mit | 8,655,522,309,848,991,000 | 48.272727 | 201 | 0.643911 | false |
neviim/forza | getarq.py | 1 | 2158 | #!/usr/bin env python3
# -*- coding: utf-8 -*-
import os
import csv
import json
import pymongo
from pymongo import MongoClient
# mongodb
def get_db():
client = MongoClient('localhost:27017')
db = client.forza6db
return db
def add_dados(db, data):
db.countries.insert(data)
def get_country(db, colecao):
return db.get_collection(colecao).find({}).count()
# ---
# gera arquivo csv
def gera_csv(localFilePath):
#verifica se tem arquivo com estencao .csv
if (os.path.isfile(localFilePath) and localFilePath.endswith(".csv")):
# gera arquivo csv
with open(localFilePath, 'r', encoding='utf-8') as csvfile:
#sniff
fileDialect = csv.Sniffer().sniff(csvfile.read(1024))
csvfile.seek(0)
#cria um CSV
myReader = csv.reader(csvfile, dialect=fileDialect)
for row in myReader:
print(row)
# gera arquivo json
def gera_json(localFilePath):
if (os.path.isfile(localFilePath) and localFilePath.endswith(".csv")):
# abre banco forza
db = get_db()
# gera arquivo json
with open(localFilePath, 'r', encoding='utf-8') as csvfile:
#sniff para encontrar o formato
fileDialect = csv.Sniffer().sniff(csvfile.read(1024))
csvfile.seek(0)
#le o arquivo CSV do diretorio.
dictReader = csv.DictReader(csvfile, dialect=fileDialect)
for row in dictReader:
# para coleção de carros
if get_country(db, 'carros') == 0:
db.carros.insert(row)
#print(row)
return
# le os arquivos
def leArquivos(filePath):
#get all files in the given folder
fileListing = os.listdir(filePath)
for myFile in fileListing:
#le a path do arquivo
localFilePath = os.path.join(filePath, myFile)
gera_json(localFilePath)
return
# inicializar...
if __name__ == '__main__':
currentPath = os.path.dirname(__file__)
filePath = os.path.abspath(os.path.join(currentPath, os.pardir,os.pardir,'_github/forza/csv'))
leArquivos(filePath)
| mit | 1,924,125,129,973,735,400 | 24.666667 | 98 | 0.610853 | false |
SambaDemon/python_vantiv | vantiv/request/model/identification.py | 1 | 1178 | from ..schemas import Schema, fields
from ..utils import FrozenMixin
from ..enums import (EnumField, CustomerTypeEnum, CurrencyEnum,
ResidenceStatusEnum)
class IdentificationSchema(Schema):
Ssn = fields.String()
BirthDate = fields.Date()
CustomerRegistrationDate = fields.Date()
IncomeAmount = fields.Decimal()
CustomerCheckingAccount = fields.String()
CustomerSavingsAccount = fields.String()
EmployerName = fields.String()
CustomerWorkTelephone = fields.String()
YearsAtResidence = fields.String()
YearsAtEmployer = fields.String()
CustomerType = EnumField(CustomerTypeEnum, by_value=True)
IncomeCurrency = EnumField(CurrencyEnum, by_value=True)
ResidenceStatus = EnumField(ResidenceStatusEnum, by_value=True)
class Identification(FrozenMixin):
Ssn = None
BirthDate = None
CustomerRegistrationDate = None
IncomeAmount = None
CustomerCheckingAccount = None
CustomerSavingsAccount = None
EmployerName = None
CustomerWorkTelephone = None
YearsAtResidence = None
YearsAtEmployer = None
CustomerType = None
IncomeCurrency = None
ResidenceStatus = None
| mit | 5,504,557,734,375,287,000 | 31.722222 | 67 | 0.729202 | false |
Fantomas42/veliberator | veliberator/xml_wrappers.py | 1 | 1279 | """Xml functions for helping in convertion of data"""
def xml_station_status_wrapper(xmlnode):
"""Convert Station status xml
to a usable dict"""
def node_value(name):
return xmlnode.getElementsByTagName(name)[0].childNodes[0].data
return {'total': int(node_value('total')),
'available': int(node_value('available')),
'free': int(node_value('free')),
'ticket': int(node_value('ticket')) == 1}
def xml_station_information_wrapper(xmlnode):
"""Convert Station information xml
to a usable dict"""
city = ''
postal_code = ''
address = xmlnode.getAttribute('address')[:-1].strip()
address_parts = xmlnode.getAttribute('fullAddress').split()
for p in address_parts:
if len(p) == 5 and p.isdigit():
postal_code = p
city = ' '.join(address_parts[address_parts.index(p) + 1:])
break
return {'id': int(xmlnode.getAttribute('number')),
'address': address,
'postal_code': postal_code,
'city': city,
'lat': xmlnode.getAttribute('lat'),
'lng': xmlnode.getAttribute('lng'),
'opened': xmlnode.getAttribute('open') == '1',
'bonus': xmlnode.getAttribute('bonus') == '1'}
| bsd-3-clause | 3,755,493,492,201,253,000 | 33.567568 | 71 | 0.577795 | false |
tlemoult/spectroDb | tools/clean-travail-ISIS.py | 1 | 1036 | import sys,os
import urllib.request, urllib.parse, urllib.error,glob
import astropy.io.fits as fits
import zipfile
import shutil
def listdirectory(path):
fichier=[]
l = glob.glob(path+'\\*')
for i in l:
if os.path.isdir(i): fichier.extend(listdirectory(i))
else: fichier.append(i)
return fichier
def listdirectory2(path):
a=[]
l = glob.glob(path+'\\*')
for i in l:
if os.path.isdir(i):
f=listdirectory(i)
a.append(f)
return a
print("Script de Nettoyage des dossiers de travail ISIS")
BasePath=sys.path[0]
dbSourcePath=BasePath
PathWeb=BasePath
dirList= os.listdir(dbSourcePath)
dirList=sorted(dirList)
for path in listdirectory(BasePath):
file=os.path.basename(path)
if (file.startswith('blaze_') or file.startswith('calib_') or file.startswith('flat_') or file.startswith('#') or (file.endswith('.dat') and not file.startswith('reponse')) or (file.startswith('@') and not file.startswith('@pro')) ):
print(('remove:'+path))
os.remove(path)
| mit | 5,727,460,823,284,896,000 | 26.263158 | 234 | 0.671815 | false |
nickgu/pydev | py3dev.py | 1 | 5350 | #! /bin/env python3
# encoding=utf-8
# author: nickgu
#
# Compitible for python3
#
import sys
import argparse
class ColorString:
TC_NONE ="\033[m"
TC_RED ="\033[0;32;31m"
TC_LIGHT_RED ="\033[1;31m"
TC_GREEN ="\033[0;32;32m"
TC_LIGHT_GREEN ="\033[1;32m"
TC_BLUE ="\033[0;32;34m"
TC_LIGHT_BLUE ="\033[1;34m"
TC_DARY_GRAY ="\033[1;30m"
TC_CYAN ="\033[0;36m"
TC_LIGHT_CYAN ="\033[1;36m"
TC_PURPLE ="\033[0;35m"
TC_LIGHT_PURPLE ="\033[1;35m"
TC_BROWN ="\033[0;33m"
TC_YELLOW ="\033[1;33m"
TC_LIGHT_GRAY ="\033[0;37m"
TC_WHITE ="\033[1;37m"
def __init__(self):
pass
@staticmethod
def colors(s, color):
return color + s + ColorString.TC_NONE
@staticmethod
def red(s): return ColorString.colors(s, ColorString.TC_RED)
@staticmethod
def yellow(s): return ColorString.colors(s, ColorString.TC_YELLOW)
@staticmethod
def green(s): return ColorString.colors(s, ColorString.TC_GREEN)
@staticmethod
def blue(s): return ColorString.colors(s, ColorString.TC_BLUE)
@staticmethod
def cyan(s): return ColorString.colors(s, ColorString.TC_CYAN)
def error(*args, on_screen=True):
if on_screen:
tag = ColorString.Yellow('[ERROR] ')
else:
tag = '[ERROR] '
print(tag, *args, file=sys.stderr)
def info(*args):
tag = '[INFO] '
print(tag, *args, file=sys.stderr)
class Arg(object):
'''
Sample code:
ag=Arg()
ag.str_opt('f', 'file', 'this arg is for file')
opt = ag.init_arg()
# todo with opt, such as opt.file
'''
def __init__(self, help='Lazy guy, no help'):
self.is_parsed = False;
#help = help.decode('utf-8').encode('gb18030')
self.__parser = argparse.ArgumentParser(description=help)
self.__args = None;
# -l --log
self.str_opt('log', 'l', 'logging level default=[error]', meta='[debug|info|error]');
def __default_tip(self, default_value=None):
if default_value==None:
return ''
return ' default=[%s]'%default_value
def bool_opt(self, name, iname, help=''):
#help = help.decode('utf-8').encode('gb18030')
self.__parser.add_argument(
'-'+iname,
'--'+name,
action='store_const',
const=1,
default=0,
help=help);
return
def str_opt(self, name, iname, help='', default=None, meta=None):
help = (help + self.__default_tip(default))#.decode('utf-8').encode('gb18030')
self.__parser.add_argument(
'-'+iname,
'--'+name,
metavar=meta,
help=help,
default=default);
pass
def var_opt(self, name, meta='', help='', default=None):
help = (help + self.__default_tip(default).decode('utf-8').encode('gb18030'))
if meta=='':
meta=name
self.__parser.add_argument(name,
metavar=meta,
help=help,
default=default)
pass
def init_arg(self, input_args=None):
if not self.is_parsed:
if input_args is not None:
self.__args = self.__parser.parse_args(input_args)
else:
self.__args = self.__parser.parse_args()
self.is_parsed = True;
if self.__args.log:
format='%(asctime)s %(levelname)8s [%(filename)18s:%(lineno)04d]: %(message)s'
if self.__args.log=='debug':
logging.basicConfig(level=logging.DEBUG, format=format)
logging.debug('log level set to [%s]'%(self.__args.log));
elif self.__args.log=='info':
logging.basicConfig(level=logging.INFO, format=format)
logging.info('log level set to [%s]'%(self.__args.log));
elif self.__args.log=='error':
logging.basicConfig(level=logging.ERROR, format=format)
logging.info('log level set to [%s]'%(self.__args.log));
else:
logging.error('log mode invalid! [%s]'%self.__args.log)
return self.__args
@property
def args(self):
if not self.is_parsed:
self.__args = self.__parser.parse_args()
self.is_parsed = True;
return self.__args;
def dp_to_generate_answer_range(data):
'''
data shape: (batch, clen, 2),
last dim indicates start/end prob.
'''
ans = []
l = data.shape[1]
data = data.cpu().numpy()
dp = [0.] * (l+1)
dp_sidx = [-1] * (l+1)
for b in data:
max_prob = 0
max_range = (0, 0)
dp[0] = 0
dp_sidx[0] = -1
for idx in range(l):
sp, ep = b[idx]
cur_end_prob = dp[idx] * ep
if cur_end_prob > max_prob:
max_prob = cur_end_prob
max_range = (dp_sidx[idx], idx)
if sp>dp[idx]:
dp[idx+1] = sp
dp_sidx[idx+1] = idx
else:
dp[idx+1] = dp[idx]
dp_sidx[idx+1] = dp_sidx[idx]
ans.append(max_range)
return ans
if __name__=='__main__':
pass
| mit | 2,808,130,532,025,652,700 | 28.558011 | 93 | 0.509907 | false |
AloneRoad/Inforlearn | common/component.py | 1 | 1678 | import logging
import os.path
from django.conf import settings
# public variable with the intent of referencing it in templates
# and allowing tests to easily adjust the values
loaded = {}
def install_components():
global loaded
root_dir = os.path.dirname(os.path.dirname(__file__))
component_dir = os.path.join(root_dir, 'components')
possibles = os.listdir(component_dir)
logging.info("Trying to load components in %s...", possibles)
for p in possibles:
# verify that we haven't manually disabled this in settings
is_enabled = getattr(settings, 'COMPONENT_%s_DISABLED' % (p.upper()), True)
if not is_enabled:
continue
path = os.path.join(component_dir, p)
if not os.path.isdir(path):
logging.debug("Not a dir %s", p)
continue
try:
loaded[p] = __import__('components.%s' % p, {}, {}, p)
logging.debug('Loaded component: %s', p)
except ValueError:
# bad module name, things like .svn and whatnot trigger this
continue
except ImportError:
import traceback
logging.debug('Exception loading component: %s', traceback.format_exc())
continue
def include(*names):
global loaded
for name in names:
rv = loaded.get(name)
if rv:
return rv
return rv
def require(*names):
mod = include(*names)
if not mod:
raise Exception("Ultimate doom")
return mod
class LoadedOrDummy(object):
def __getitem__(self, key):
rv = include(key, "dummy_%s" % key)
if not rv:
raise KeyError(key)
return rv
def __contains__(self, key):
rv = include(key, "dummy_%s" % key)
if rv:
return True
return False
best = LoadedOrDummy()
| apache-2.0 | -5,935,113,229,335,788,000 | 24.424242 | 79 | 0.652563 | false |
Noahs-ARK/ARKcat | src/tokenizer.py | 1 | 3104 | import re
import nltk.data
from nltk.stem import WordNetLemmatizer
def split_sentences(text, decorate=False):
sent_tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
sentences = sent_tokenizer.sentences_from_text(text, realign_boundaries=True)
if decorate:
sentences = [sent + ' <SE>' for sent in sentences]
return sentences
def split_into_words(text, lemmatize=False, reattach=True, replace_numbers=True, split_off_quotes=True,
fix_semicolon_mistakes=True):
if fix_semicolon_mistakes:
text = fix_semicolons(text)
word_tokenizer = nltk.tokenize.TreebankWordTokenizer()
# get rid of certain character so that we can use those for special purposes
tokens = word_tokenizer.tokenize(text)
if reattach:
tokens = reattach_clitics(tokens)
if split_off_quotes:
tokens = split_off_quote_marks(tokens)
if lemmatize:
lemmatizer = WordNetLemmatizer()
tokens = [lemmatizer.lemmatize(token) for token in tokens]
if replace_numbers:
pattern = '^[0-9]+$'
tokens = [t if re.search(pattern, t) is None else '__NUM__' for t in tokens]
tokens = split_tokens(tokens, '.,')
return tokens
def reattach_clitics(tokens):
#clitic_pattern = '^\'(s|S|d|D|ve|VE|t|T|m|M|re|RE|ll|LL)'
#clitic_pattern = "^(n't|'ll|'re|'ve)"
clitic_pattern = "^((n't)|('s)|('m)|('re)|('ve)|('ll)|('d)|('l)|('t))$"
pop_list = []
# append clitics to previous words
for i in range(1, len(tokens)):
if re.search(clitic_pattern, tokens[i]):
tokens[i-1] += tokens[i]
if i not in pop_list:
pop_list.append(i)
pop_list.sort()
pop_list.reverse()
for i in pop_list:
tokens.pop(i)
return tokens
def split_off_quote_marks(tokens):
i = 0
pattern1 = r"^('+)(.+)"
while i < len(tokens):
token = tokens[i]
match = re.search(pattern1, token)
if match is not None:
tokens[i] = match.group(1)
tokens.insert(i+1, match.group(2))
i += 1
return tokens
def fix_semicolons(text):
pattern = "([a-z]+;(t|s|m))[^a-z]"
match = re.search(pattern, text)
if match is not None:
repl = re.sub(';', "'", match.group(1))
text = re.sub(match.group(1), repl, text)
return text
def make_ngrams(text, n, lemmatize=False, reattach=True, replace_numbers=True, split_off_quotes=True):
tokens = split_into_words(text, lemmatize=lemmatize, reattach=reattach, replace_numbers=replace_numbers,
split_off_quotes=split_off_quotes)
if n > 1:
N = len(tokens)
grams = [tokens[k:N-(n-1-k)] for k in range(n)]
tokens = map(u'_'.join, zip(*grams))
return tokens
def split_tokens(tokens, delimiters):
# split on and keep periods
tokens = [re.split('([' + delimiters + '])', token) for token in tokens]
# flatten
tokens = [token for sublist in tokens for token in sublist]
tokens = [token for token in tokens if token != '']
return tokens
| apache-2.0 | -1,482,344,033,208,116,700 | 28.846154 | 108 | 0.607281 | false |
FrankNagel/qlc | src/webapp/quanthistling/quanthistling/dictdata/languages.py | 1 | 51670 |
# -*- coding: utf8 -*-
list = [
{
'name' : u'Spanish',
'langcode': u'spa',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=spa'
},
{
'name' : u'English',
'langcode': u'eng',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=eng'
},
{
'name' : u'Portuguese',
'langcode': u'por',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=por'
},
{
'name' : u'Bora',
'langcode': u'boa',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=boa'
},
{
'name' : u'Huitoto Minica',
'langcode': u'hto',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=hto'
},
{
'name' : u'Huitoto Nüpode',
'langcode': u'hux',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=hux'
},
{
'name' : u'Huitoto Murui',
'langcode': u'huu',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=huu'
},
{
'name' : u'Muinane',
'langcode': u'bmr',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=bmr'
},
{
'name' : u'Ocaina',
'langcode': u'oca',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=oca'
},
{
'name' : u'Achuar-Shiwiar',
'langcode': u'acu',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=acu'
},
{
'name' : u'Aguaruna',
'langcode': u'agr',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=agr'
},
{
'name' : u'Huambisa',
'langcode': u'hub',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=hub'
},
{
'name' : u'Shuar',
'langcode': u'jiv',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=jiv'
},
{
'name' : u'Achagua',
'langcode': u'aca',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=aca'
},
{
'name' : u'Awa-Cuaiquer',
'langcode': u'kwi',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=kwi'
},
{
'name' : u'Barí',
'langcode': u'mot',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=mot'
},
{
'name' : u'Baniva',
'langcode': u'bvv',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=bvv'
},
{
'name' : u'Waimaha',
'langcode': u'bao',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=bao'
},
{
'name' : u'Barasana-Eduria',
'langcode': u'bsn',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=bsn'
},
{
'name' : u'Cabiyarí',
'langcode': u'cbb',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=cbb'
},
{
'name' : u'Chachi',
'langcode': u'cbi',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=cbi'
},
{
'name' : u'Cuiba',
'langcode': u'cui',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=cui'
},
{
'name' : u'Carijona',
'langcode': u'cbd',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=cbd'
},
{
'name' : u'Chimila',
'langcode': u'cbg',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=cbg'
},
{
'name' : u'Emberá-Chamí',
'langcode': u'cmi',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=cmi'
},
{
'name' : u'Carapana',
'langcode': u'cbc',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=cbc'
},
{
'name' : u'Curripaco',
'langcode': u'kpc',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=kpc'
},
{
'name' : u'Emberá-Catío',
'langcode': u'cto',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=cto'
},
{
'name' : u'Cubeo',
'langcode': u'cub',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=cub'
},
{
'name' : u'Desano',
'langcode': u'des',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=des'
},
{
'name' : u'Malayo',
'langcode': u'mbp',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=mbp'
},
{
'name' : u'Emberá, Northern',
'langcode': u'emp',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=emp'
},
{
'name' : u'Epena',
'langcode': u'sja',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/sja'
},
{
'name' : u'Guahibo',
'langcode': u'guh',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=guh'
},
{
'name' : u'Guambiano',
'langcode': u'gum',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=gum'
},
{
'name' : u'Guayabero',
'langcode': u'guo',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=guo'
},
{
'name' : u'Arhuaco',
'langcode': u'arh',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=arh'
},
{
'name' : u'Inga',
'langcode': u'inb',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=inb'
},
{
'name' : u'Hupdë',
'langcode': u'jup',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=jup'
},
{
'name' : u'Macaguán',
'langcode': u'mbn',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=mbn'
},
{
'name' : u'Koreguaje',
'langcode': u'coe',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=coe'
},
{
'name' : u'Cacua',
'langcode': u'cbv',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=cbv'
},
{
'name' : u'Kogi',
'langcode': u'kog',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=kog'
},
{
'name' : u'Camsá',
'langcode': u'kbh',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=kbh'
},
{
'name' : u'Macuna',
'langcode': u'myy',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=myy'
},
{
'name' : u'Nukak Makú',
'langcode': u'mbr',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=mbr'
},
{
'name' : u'Orejón',
'langcode': u'ore',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=ore'
},
{
'name' : u'Páez',
'langcode': u'pbb',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=pbb'
},
{
'name' : u'Playero',
'langcode': u'gob',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=gob'
},
{
'name' : u'Piapoco',
'langcode': u'pio',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=pio'
},
{
'name' : u'Puinave',
'langcode': u'pui',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=pui'
},
{
'name' : u'Piratapuyo',
'langcode': u'pir',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=pir'
},
{
'name' : u'Resígaro',
'langcode': u'rgr',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=rgr'
},
{
'name' : u'Secoya',
'langcode': u'sey',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=sey'
},
{
'name' : u'Siona',
'langcode': u'snn',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=snn'
},
{
'name' : u'Sáliba',
'langcode': u'slc',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/slc'
},
{
'name' : u'Siriano',
'langcode': u'sri',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=sri'
},
{
'name' : u'Tatuyo',
'langcode': u'tav',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=tav'
},
{
'name' : u'Tucano',
'langcode': u'tuo',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=tuo'
},
{
'name' : u'Emberá-Tadó',
'langcode': u'tdc',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=tdc'
},
{
'name' : u'Tanimuca-Retuarã',
'langcode': u'tnc',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=tnc'
},
{
'name' : u'Tunebo, Central',
'langcode': u'tuf',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=tuf'
},
{
'name' : u'Tariana',
'langcode': u'tae',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=tae'
},
{
'name' : u'Colorado',
'langcode': u'cof',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=cof'
},
{
'name' : u'Totoro',
'langcode': u'ttk',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=ttk'
},
{
'name' : u'Tuyuca',
'langcode': u'tue',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=tue'
},
{
'name' : u'Waimaha',
'langcode': u'bao',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=bao'
},
{
'name' : u'Woun Meu',
'langcode': u'noa',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=noa'
},
{
'name' : u'Guanano',
'langcode': u'gvc',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=gvc'
},
{
'name' : u'Wayuu',
'langcode': u'guc',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=guc'
},
{
'name' : u'Yucuna',
'langcode': u'ycn',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=ycn'
},
{
'name' : u'Yukpa',
'langcode': u'yup',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=yup'
},
{
'name' : u'Wajiara',
'langcode': u'yui',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=yui'
},
{
'name' : u'Záparo',
'langcode': u'zro',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=zro'
},
{
'name' : u'Cashibo-Cacataibo',
'langcode': u'cbr',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=cbr'
},
{
'name' : u'Kaxararí',
'langcode': u'ktx',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=ktx'
},
{
'name' : u'Shipibo-Conibo',
'langcode': u'shp',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=shp'
},
{
'name' : u'Matsés',
'langcode': u'mcf',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=mcf'
},
{
'name' : u'Kashinawa',
'langcode': u'cbs',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=cbs'
},
{
'name' : u'Amahuaca',
'langcode': u'amc',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=amc'
},
{
'name' : u'Musak',
'langcode': u'mmq',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=mmq'
},
{
'name' : u'Apali',
'langcode': u'ena',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=ena'
},
{
'name' : u'Paynamar',
'langcode': u'pmr',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=pmr'
},
{
'name' : u'Mum',
'langcode': u'kqa',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=kqa'
},
{
'name' : u'Nend',
'langcode': u'anh',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=anh'
},
{
'name' : u'Sileibi',
'langcode': u'sbq',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=sbq'
},
{
'name' : u'Sinsauru',
'langcode': u'snz',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=snz'
},
{
'name' : u'Asas',
'langcode': u'asd',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=asd'
},
{
'name' : u'Sausi',
'langcode': u'ssj',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=ssj'
},
{
'name' : u'Kesawai',
'langcode': u'xes',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=xes'
},
{
'name' : u'Dumpu',
'langcode': u'wtf',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=wtf'
},
{
'name' : u'Arawum',
'langcode': u'awm',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=awm'
},
{
'name' : u'Kolom',
'langcode': u'klm',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=klm'
},
{
'name' : u'Kolom',
'langcode': u'klm',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=klm'
},
{
'name' : u'Siroi',
'langcode': u'ssd',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=ssd'
},
{
'name' : u'Lemio',
'langcode': u'lei',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=lei'
},
{
'name' : u'Pulabu',
'langcode': u'pup',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=pup'
},
{
'name' : u'Yabong',
'langcode': u'ybo',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=ybo'
},
{
'name' : u'Ganglau',
'langcode': u'ggl',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=ggl'
},
{
'name' : u'Saep',
'langcode': u'spd',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=spd'
},
{
'name' : u'Sop',
'langcode': u'urw',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=urw'
},
{
'name' : u'Sumau',
'langcode': u'six',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=six'
},
{
'name' : u'Urigina',
'langcode': u'urg',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=urg'
},
{
'name' : u'Danaru',
'langcode': u'dnr',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=dnr'
},
{
'name' : u'Uya',
'langcode': u'usu',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=usu'
},
{
'name' : u'Ogea',
'langcode': u'eri',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=eri'
},
{
'name' : u'Duduela',
'langcode': u'duk',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=duk'
},
{
'name' : u'Kwato',
'langcode': u'kop',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=kop'
},
{
'name' : u'Rerau',
'langcode': u'rea',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=rea'
},
{
'name' : u'Jilim',
'langcode': u'jil',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=jil'
},
{
'name' : u'Yangulam',
'langcode': u'ynl',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=ynl'
},
{
'name' : u'Anjam',
'langcode': u'boj',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=boj'
},
{
'name' : u'Male',
'langcode': u'mdc',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=mdc'
},
{
'name' : u'Bongu',
'langcode': u'bpu',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=bpu'
},
{
'name' : u'Sam',
'langcode': u'snx',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=snx'
},
{
'name' : u'Bargam',
'langcode': u'mlp',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=mlp'
},
{
'name' : u'Dimir',
'langcode': u'dmc',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=dmc'
},
{
'name' : u'Malas',
'langcode': u'mkr',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=mkr'
},
{
'name' : u'Brem',
'langcode': u'buq',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=buq'
},
{
'name' : u'Korak',
'langcode': u'koz',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=koz'
},
{
'name' : u'Waskia',
'langcode': u'wsk',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=wsk'
},
{
'name' : u'Mala',
'langcode': u'ped',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=ped'
},
{
'name' : u'Maia',
'langcode': u'sks',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=sks'
},
{
'name' : u'Maiani',
'langcode': u'tnh',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=tnh'
},
{
'name' : u'Mauwake',
'langcode': u'mhl',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=mhl'
},
{
'name' : u'Bepour',
'langcode': u'bie',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=bie'
},
{
'name' : u'Moere',
'langcode': u'mvq',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=mvq'
},
{
'name' : u'Kowaki',
'langcode': u'xow',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=xow'
},
{
'name' : u'Mawak',
'langcode': u'mjj',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=mjj'
},
{
'name' : u'Pamosu',
'langcode': u'hih',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=hih'
},
{
'name' : u'Musar',
'langcode': u'mmi',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=mmi'
},
{
'name' : u'Wanambre',
'langcode': u'wnb',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=wnb'
},
{
'name' : u'Kobol',
'langcode': u'kgu',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=kgu'
},
{
'name' : u'Pal',
'langcode': u'abw',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=abw'
},
{
'name' : u'Usan',
'langcode': u'wnu',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=wnu'
},
{
'name' : u'Yaben',
'langcode': u'ybm',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=ybm'
},
{
'name' : u'Yarawata',
'langcode': u'yrw',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=yrw'
},
{
'name' : u'Bilakura',
'langcode': u'bql',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=bql'
},
{
'name' : u'Parawen',
'langcode': u'prw',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=prw'
},
{
'name' : u'Ukuriguma',
'langcode': u'ukg',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=ukg'
},
{
'name' : u'Amaimon',
'langcode': u'ali',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=ali'
},
{
'name' : u'Kare',
'langcode': u'kmf',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=kmf'
},
{
'name' : u'Girawa',
'langcode': u'bbr',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=bbr'
},
{
'name' : u'Munit',
'langcode': u'mtc',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=mtc'
},
{
'name' : u'Kein',
'langcode': u'bmh',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=bmh'
},
{
'name' : u'Sihan',
'langcode': u'snr',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=snr'
},
{
'name' : u'Gumalu',
'langcode': u'gmu',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=gmu'
},
{
'name' : u'Isebe',
'langcode': u'igo',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=igo'
},
{
'name' : u'Amele',
'langcode': u'aey',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=aey'
},
{
'name' : u'Bau',
'langcode': u'bbd',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=bbd'
},
{
'name' : u'Panim',
'langcode': u'pnr',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=pnr'
},
{
'name' : u'Rapting',
'langcode': u'rpt',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=rpt'
},
{
'name' : u'Wamas',
'langcode': u'wmc',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=wmc'
},
{
'name' : u'Samosa',
'langcode': u'swm',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=swm'
},
{
'name' : u'Murupi',
'langcode': u'mqw',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=mqw'
},
{
'name' : u'Saruga',
'langcode': u'sra',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=sra'
},
{
'name' : u'Nake',
'langcode': u'nbk',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=nbk'
},
{
'name' : u'Mosimo',
'langcode': u'mqv',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=mqv'
},
{
'name' : u'Garus',
'langcode': u'gyb',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=gyb'
},
{
'name' : u'Yoidik',
'langcode': u'ydk',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=ydk'
},
{
'name' : u'Rempi',
'langcode': u'rmp',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=rmp'
},
{
'name' : u'Bagupi',
'langcode': u'bpi',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=bpi'
},
{
'name' : u'Silopi',
'langcode': u'xsp',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=xsp'
},
{
'name' : u'Utu',
'langcode': u'utu',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=utu'
},
{
'name' : u'Mawan',
'langcode': u'mcz',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=mcz'
},
{
'name' : u'Baimak',
'langcode': u'bmx',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=bmx'
},
{
'name' : u'Matepi',
'langcode': u'mqe',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=mqe'
},
{
'name' : u'Gal',
'langcode': u'gap',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=gap'
},
{
'name' : u'Nobonob',
'langcode': u'gaw',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=gaw'
},
{
'name' : u'Wagi',
'langcode': u'fad',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=fad'
},
{
'name' : u'Utarmbung',
'langcode': u'omo',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=omo'
},
{
'name' : u'Anam',
'langcode': u'pda',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=pda'
},
{
'name' : u'Anamgura',
'langcode': u'imi',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=imi'
},
{
'name' : u'Moresada',
'langcode': u'msx',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=msx'
},
{
'name' : u'Wadaginam',
'langcode': u'wdg',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=wdg'
},
{
'name' : u'Atemble',
'langcode': u'ate',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=ate'
},
{
'name' : u'Isabi',
'langcode': u'isa',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=isa'
},
{
'name' : u'Biyom',
'langcode': u'bpm',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=bpm'
},
{
'name' : u'Tauya',
'langcode': u'tya',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=tya'
},
{
'name' : u'Faita',
'langcode': u'faj',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=faj'
},
{
'name' : u'Yawanawa',
'langcode': u'ywn',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=ywn'
},
{
'name' : u'Nonuya',
'langcode': u'noj',
'description' : u'',
'url' : u'http://www.sil.org/iso639-3/documentation.asp?id=noj'
},
{
'name' : u'French',
'langcode': u'fra',
'description' : u'',
'url' : u'http://www.sil.org/iso639-3/documentation.asp?id=fra'
},
{
'name' : u'Panobo',
'langcode': u'pno',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=pno'
},
{
'name' : u'Sirionó',
'langcode': u'srq',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=srq'
},
{
'name' : u'Matís',
'langcode': u'mpq',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=mpq'
},
{
'name' : u'Chácobo',
'langcode': u'cao',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=cao'
},
{
'name' : u'Capanahua',
'langcode': u'kaq',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=kaq'
},
{
'name' : u'Sharanahua',
'langcode': u'mcd',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=mcd'
},
{
'name' : u'Ashéninka, Pichis',
'langcode': u'cpu',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=cpu'
},
{
'name' : u'Yine',
'langcode': u'pib',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=pib'
},
{
'name' : u'Iñapari',
'langcode': u'inp',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=inp'
},
{
'name' : u'Machiguenga',
'langcode': u'mcb',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=mcb'
},
{
'name' : u'Chayahuita',
'langcode': u'cbt',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=cbt'
},
{
'name' : u'Yanesha’',
'langcode': u'ame',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=ame'
},
{
'name' : u'Nomatsiguenga',
'langcode': u'not',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=not'
},
{
'name' : u'Kadiwéu',
'langcode': u'kbc',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=kbc'
},
{
'name' : u'Chamicuro',
'langcode': u'ccc',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=ccc'
},
{
'name' : u'Dení',
'langcode': u'dny',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=dny'
},
{
'name' : u'Tenharim',
'langcode': u'pah',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=pah'
},
{
'name' : u'Apalaí',
'langcode': u'apy',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=apy'
},
{
'name' : u'Xavánte',
'langcode': u'xav',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=xav'
},
{
'name' : u'Guaraní',
'langcode': u'gun',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=gun'
},
{
'name' : u'Kaapor',
'langcode': u'urb',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=urb'
},
{
'name' : u'Nambikuára, Southern',
'langcode': u'nab',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=nab'
},
{
'name' : u'Karipuna Creole French',
'langcode': u'kmv',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=kmv'
},
{
'name' : u'Wayampi',
'langcode': u'oym',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=oym'
},
{
'name' : u'Arabela',
'langcode': u'arl',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=arl'
},
{
'name' : u'Hausa',
'langcode': u'hau',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=hau'
},
{
'name' : u'Ngas',
'langcode': u'anc',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=anc'
},
{
'name' : u'Mwaghavul',
'langcode': u'sur',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=sur'
},
{
'name' : u'Miship',
'langcode': u'mjs',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=mjs'
},
{
'name' : u'Goemai',
'langcode': u'ank',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=ank'
},
{
'name' : u'Karekare',
'langcode': u'kai',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=kai'
},
{
'name' : u'Gera',
'langcode': u'gew',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=gew'
},
{
'name' : u'Bole',
'langcode': u'bol',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=bol'
},
{
'name' : u'Ngamo',
'langcode': u'nbh',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=nbh'
},
{
'name' : u'Pero',
'langcode': u'pip',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=pip'
},
{
'name' : u'Tangale',
'langcode': u'tan',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=tan'
},
{
'name' : u'Dera',
'langcode': u'kna',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=kna'
},
{
'name' : u'Miya',
'langcode': u'mkf',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=mkf'
},
{
'name' : u'Diri',
'langcode': u'dwa',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=mkf'
},
{
'name' : u'Boghom',
'langcode': u'bux',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=bux'
},
{
'name' : u'Geji',
'langcode': u'gji',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=gji'
},
{
'name' : u'Polci',
'langcode': u'plj',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=plj'
},
{
'name' : u'Saya',
'langcode': u'say',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=say'
},
{
'name' : u'Dass',
'langcode': u'dot',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=dot'
},
{
'name' : u'Ngizim',
'langcode': u'ngi',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=ngi'
},
{
'name' : u'Bade',
'langcode': u'bde',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=bde'
},
{
'name' : u'Tera',
'langcode': u'ttr',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=ttr'
},
{
'name' : u'Hwana',
'langcode': u'hwo',
'description' : u'',
'url' : u'http://www.ethnologue.com/show_language.asp?code=hwo'
},
{
'name' : u'Ga\'anda',
'langcode': u'gqa',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/gqa'
},
{
'name' : u'Boga',
'langcode': u'bvw',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/bvw'
},
{
'name' : u'Bura-Pabir',
'langcode': u'bwr',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/bwr'
},
{
'name' : u'Cibak',
'langcode': u'ckl',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/ckl'
},
{
'name' : u'Putai',
'langcode': u'mfl',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/mfl'
},
{
'name' : u'Nggwahyi',
'langcode': u'ngx',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/ngx'
},
{
'name' : u'Huba',
'langcode': u'hbb',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/hbb'
},
{
'name' : u'Margi South',
'langcode': u'mfm',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/mfm'
},
{
'name' : u'Margi Central',
'langcode': u'mrt',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/mrt'
},
{
'name' : u'Kwame',
'langcode': u'hig',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/hig'
},
{
'name' : u'Psikye',
'langcode': u'kvj',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/kvj'
},
{
'name' : u'Hya',
'langcode': u'hya',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/hya'
},
{
'name' : u'Kirya-Konzel',
'langcode': u'fkk',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/fkk'
},
{
'name' : u'Bana',
'langcode': u'bcw',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/bcw'
},
{
'name' : u'Zizilivakan',
'langcode': u'ziz',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/ziz'
},
{
'name' : u'Gude',
'langcode': u'gde',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/gde'
},
{
'name' : u'Fali',
'langcode': u'fli',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/fli'
},
{
'name' : u'Nzanyi',
'langcode': u'nja',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/nja'
},
{
'name' : u'Bata',
'langcode': u'bta',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/bta'
},
{
'name' : u'Bacama',
'langcode': u'bcy',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/bcy'
},
{
'name' : u'Gudu',
'langcode': u'gdu',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/gdu'
},
{
'name' : u'Wandala',
'langcode': u'mfi',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/mfi'
},
{
'name' : u'Glavda',
'langcode': u'glw',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/glw'
},
{
'name' : u'Dghwede',
'langcode': u'dgh',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/dgh'
},
{
'name' : u'Guduf-Gava',
'langcode': u'gdf',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/gdf'
},
{
'name' : u'Cineni',
'langcode': u'cie',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/cie'
},
{
'name' : u'Mafa',
'langcode': u'maf',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/maf'
},
{
'name' : u'Daba',
'langcode': u'dbq',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/dbq'
},
{
'name' : u'Masana',
'langcode': u'mcn',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/mcn'
},
{
'name' : u'Musey',
'langcode': u'mse',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/mse'
},
{
'name' : u'Lame',
'langcode': u'bma',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/bma'
},
{
'name' : u'Pévé',
'langcode': u'lme',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/lme'
},
{
'name' : u'Mesme',
'langcode': u'zim',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/zim'
},
{
'name' : u'Labir',
'langcode': u'jku',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/jku'
},
{
'name' : u'Dagaari Dioula',
'langcode': u'dgd',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/dgd'
},
{
'name' : u'Bankal',
'langcode': u'jjr',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/jjr'
},
{
'name' : u'Fali, North',
'langcode': u'fll',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/fll'
},
{
'name' : u'Karitiâna',
'langcode': u'ktn',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/ktn'
},
{
'name' : u'Parecís',
'langcode': u'pab',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/pab'
},
{
'name' : u'Rikbaktsa',
'langcode': u'rkb',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/rkb'
},
{
'name' : u'Amarakaeri',
'langcode': u'amr',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/amr'
},
{
'name' : u'Candoshi-Shapra',
'langcode': u'cbu',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/cbu'
},
{
'name' : u'Kreye',
'langcode': u'xre',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/xre'
},
{
'name' : u'Ignaciano',
'langcode': u'ign',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/ign'
},
{
'name' : u'Páez',
'langcode': u'pbb',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/pbb'
},
{
'name' : u'Mamaindé',
'langcode': u'wmd',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/wmd'
},
{
'name' : u'Latundê',
'langcode': u'ltn',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/ltn'
},
{
'name' : u'Kaxuiâna',
'langcode': u'kbb',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/kbb'
},
{
'name' : u'Kanoé',
'langcode': u'kxo',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/kxo'
},
{
'name' : u'Apurinã',
'langcode': u'apu',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/apu'
},
{
'name' : u'Wapishana',
'langcode': u'wap',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/wap'
},
{
'name' : u'Macushi',
'langcode': u'mbc',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/mbc'
},
{
'name' : u'Araona',
'langcode': u'aro',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/aro'
},
{
'name' : u'Maxakalí',
'langcode': u'mbl',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/mbl'
},
{
'name' : u'Tupinikin',
'langcode': u'tpk',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/tpk'
},
{
'name' : u'Hupdë',
'langcode': u'jup',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/jup'
},
{
'name' : u'Paumarí',
'langcode': u'pad',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/pad'
},
{
'name' : u'Cavineña',
'langcode': u'cav',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/cav'
},
{
'name' : u'German, Standard',
'langcode': u'deu',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/deu'
},
{
'name' : u'Jupua',
'langcode': u'jupu1235',
'description' : u'',
'url' : u''
},
{
'name' : u'Cofán',
'langcode': u'con',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/con'
},
{
'name' : u'Waorani',
'langcode': u'auc',
'description' : u'',
'url' : u'http://www.ethnologue.com/language/auc'
},
] | gpl-3.0 | -6,831,153,465,114,668,000 | 24.513611 | 71 | 0.449438 | false |
evrenesat/genesis | genesis/lab/admin.py | 1 | 26626 | from datetime import datetime
# import dbsettings
from functools import partial
from uuid import uuid4
from django.contrib import admin
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.core.exceptions import PermissionDenied
from django.db.models import Q
from django.dispatch import receiver
from django.forms import BaseInlineFormSet
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext_lazy as _
import django.dispatch
# Register your models here.
from django.apps import apps
from django.contrib import admin
from django.contrib.admin.sites import AlreadyRegistered
from django_ace import AceWidget
# from grappelli_autocomplete_fk_edit_link import AutocompleteEditLinkAdminMixin
from lab.utils import tlower, tupper
from .models import *
from com.models import *
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import Permission
admin.site.register(Permission)
UserAdmin.add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'password1', 'password2', 'first_name', 'last_name')}
),
)
# admin.ModelAdmin.change_list_template = "admin/change_list_filter_sidebar.html"
def finish_selected_value(modeladmin, request, queryset):
for value_item in queryset:
value_item.analyse.mark_finished(request, True)
finish_selected_value.short_description = _("Mark as Finished")
def approve_selected_value(modeladmin, request, queryset):
for value_item in queryset:
value_item.analyse.mark_approved(request, True)
approve_selected_value.short_description = _("Mark as Approved")
def finish_selected(modeladmin, request, queryset):
for analyse in queryset:
analyse.mark_finished(request, True)
finish_selected.short_description = _("Mark as Finished")
def accept_selected(modeladmin, request, queryset):
for analyse in queryset:
analyse.mark_accepted(request, True)
finish_selected.short_description = _("Mark as Finished")
def approve_selected(modeladmin, request, queryset):
for analyse in queryset:
analyse.mark_approved(request, True)
approve_selected.short_description = _("Mark as Approved")
@admin.register(ParameterValue)
class AdminParameterValue(admin.ModelAdmin):
list_editable = ('value',)
actions = [finish_selected_value, approve_selected_value]
list_display = (
'code', 'patient_name', 'analyse_name', 'key', 'value', 'analyse_state', 'keyid')
# search_fields = ('analyse__group_relation', 'analyse__type__name', 'analyse__admission__id')
search_fields = ('analyse__group_relation', )
def get_actions(self, request):
actions = super().get_actions(request)
if 'delete_selected' in actions:
del actions['delete_selected']
return actions
# def get_search_results(self, request, queryset, search_term):
# # integer search_term means we want to list values of a certain admission
# try:
# search_term_as_int = int(search_term)
# return ParameterValue.objects.filter(analyse__admission=search_term_as_int), False
# except ValueError:
# return super().get_search_results(request, queryset, search_term)
def message_user(self, *args, **kwargs):
super().message_user(*args, **kwargs)
# this is a pure hack!
# we are harnessing the fact that message_user will be called
# for once after all objects are saved
if hasattr(self, 'updated_analysis'):
for analyse in self.updated_analysis[0].admission.analyse_set.all():
analyse.save_result()
def log_change(self, request, object, message):
# by overriding log_change we can catch the changed objects
# and accumulate their analyse ids
if request.method == "POST" and '_save' in request.POST:
if not hasattr(self, 'updated_analyses'):
self.updated_analysis = []
self.updated_analysis.append(object.analyse)
super().log_change(request, object, message)
def get_form(self, request, obj=None, **kwargs):
kwargs['formfield_callback'] = partial(self.formfield_for_dbfield, request=request, obj=obj)
return super().get_form(request, obj, **kwargs)
def get_formset(self, request, obj=None, **kwargs):
kwargs['formfield_callback'] = partial(self.formfield_for_dbfield, request=request, obj=obj)
return super().get_formset(request, obj, **kwargs)
def formfield_for_dbfield(self, db_field, **kwargs):
p_value = kwargs.pop('obj', None)
if p_value and db_field.name == "value" and p_value.key.presets:
db_field.choices = p_value.key.preset_choices()
return super().formfield_for_dbfield(db_field, **kwargs)
# def formfield_for_choice_field(self, db_field, request=None, **kwargs):
# if db_field.name == "value":
# kwargs['choices'] = (
# ('accepted', 'Accepted'),
# ('denied', 'Denied'),
# )
# return super().formfield_for_choice_field(db_field, request, **kwargs)
class ParameterValueInline(admin.TabularInline):
classes = ('grp-collapse grp-closed analyse_box result_parameters',)
model = ParameterValue
extra = 0
ordering = ('code',)
readonly_fields = ('key', 'keydata')
max_num = 0
fields = ('key', 'value', 'keydata')
def has_add_permission(self, request, obj=None):
return False
def keydata(self, obj):
return obj.keyid()
keydata.allow_tags = True
class ParameterKeyInline(admin.TabularInline):
model = ParameterKey
extra = 0
classes = ('grp-collapse grp-closed',)
class AdmissionSampleInline(admin.TabularInline):
model = AdmissionSample
extra = 1
classes = ('grp-collapse',)
class ParameterInline(admin.TabularInline):
model = Parameter.analyze_type.through
extra = 0
classes = ('grp-collapse',) # grp-closed
class InstitutionAnalyseInline(admin.TabularInline):
model = InstitutionAnalyse
extra = 0
classes = ('grp-collapse',) # grp-closed
class ProcessLogicForm(forms.ModelForm):
class Meta:
model = ProcessLogic
widgets = {
'code': AceWidget(mode='python', theme='twilight', width="900px", height="700px"),
}
fields = '__all__'
@admin.register(ProcessLogic)
class AdminProcessLogic(admin.ModelAdmin):
form = ProcessLogicForm
@admin.register(AnalyseType)
class AdminAnalyseType(admin.ModelAdmin):
list_filter = ('group_type', 'category',)
search_fields = ('name',)
list_display = (
'name', 'code', 'group_type', 'category', 'method', 'price', 'external', 'order')
list_editable = ('category', 'method', 'price', 'code', 'order')
filter_horizontal = ('subtypes',)
readonly_fields = ('group_type',)
fieldsets = (
(None, {
'fields': (('name', 'code','group_type',), ('sample_type', 'category', 'method'),
'process_time', 'footnote','barcode_count',
('price', 'alternative_price', 'no_of_groups'),
('external_lab', 'external_price'),)
}),
(_('Advanced'),
{'classes': ('grp-collapse', 'grp-closed'),
'fields': ('subtypes', 'process_logic',)
})
)
inlines = [ParameterInline, InstitutionAnalyseInline]
class Media:
js = [
'/static/tinymce/tinymce.min.js',
'/static/tinymce/setup.js',
]
def get_search_results(self, request, queryset, search_term):
# integer search_term means we want to list values of a certain admission
return queryset.filter(Q(name__contains=tupper(search_term))|Q(name__contains=tlower(search_term))), False
def save_related(self, request, form, formsets, change):
super().save_related(request, form, formsets, change)
if form.instance.subtypes.exists():
if not form.instance.group_type:
form.instance.group_type = True
form.instance.save()
else:
if form.instance.group_type:
form.instance.group_type = False
form.instance.save()
@admin.register(StateDefinition)
class AdminStateDefinition(admin.ModelAdmin):
list_filter = ('type',)
search_fields = ('name',)
filter_horizontal = ('type',)
@admin.register(State)
class AdminState(admin.ModelAdmin):
list_filter = (
'definition', 'group', 'sample_type', 'analyse__type', 'analyse__type__category',
'timestamp',
'current_state')
list_display = (
'definition', 'comment', 'sample_type', 'analyse_info', 'timestamp', 'current_state',
'group',
'tdt')
search_fields = ('definition__name', 'comment')
date_hierarchy = 'timestamp'
change_list_template = "admin/change_list_filter_sidebar.html"
def analyse_info(self, obj):
return "%s %s" % (obj.analyse.type.name, obj.analyse.admission.patient.full_name(15))
def tdt(self, obj):
return str(int(obj.timestamp.timestamp()))
@admin.register(Parameter)
class AdminParameter(admin.ModelAdmin):
# list_filter = (,)
# search_fields = (,)
def save_model(self, request, obj, form, change):
obj.save()
if obj.parameter_definition.strip():
obj.create_update_parameter_keys()
filter_horizontal = ('analyze_type',)
inlines = (ParameterKeyInline,)
fieldsets = (
(None, {
'fields': ('name', 'process_logic', 'analyze_type')
}),
(_('Quick parameter definition'), {
'classes': ('grp-collapse',), # grp-closed
'fields': ('parameter_definition',),
}),
)
class StateFormSet(BaseInlineFormSet):
def save_new(self, form, commit=True):
obj = super().save_new(form, commit=False)
if not obj.personnel_id:
obj.personnel = self.request.user.profile
# if obj.personnel != form._request.user.profile:
# here you can add anything you need from the request
if obj.definition.finish:
obj.analyse.mark_finished(self.request)
if obj.definition.accept:
obj.analyse.mark_accepted(self.request)
if obj.definition.approve:
obj.analyse.mark_approved(self.request)
if commit:
obj.save()
return obj
# def clean(self):
# super().clean()
# for form in self.forms:
# if not hasattr(form, 'cleaned_data'):
# continue
# if form.cleaned_data.get('DELETE'):
# raise ValidationError('Error')
class StateInline(admin.TabularInline):
model = State
extra = 1
can_delete = False
formset = StateFormSet
classes = ('grp-collapse analyse_box analyse_states',)
radio_fields = {"group": admin.VERTICAL}
fields = ('current_state', 'group', 'definition', 'comment', 'timestamp', 'personnel')
readonly_fields = ('current_state', 'timestamp', 'personnel')
ordering = ("-timestamp",)
def get_formset(self, request, obj=None, **kwargs):
formset = super().get_formset(request, obj, **kwargs)
formset.request = request
return formset
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name == 'comment':
kwargs['widget'] = forms.Textarea()
return super().formfield_for_dbfield(db_field, **kwargs)
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
field = super().formfield_for_foreignkey(db_field, request, **kwargs)
if db_field.name == 'definition':
if request._obj_ is not None:
field.queryset = field.queryset.filter(id__in=request._obj_.applicable_states_ids())
else:
field.queryset = field.queryset.none()
return field
class AnalyseAdminForm(forms.ModelForm):
class Meta:
model = Analyse
widgets = {
'group_relation': forms.HiddenInput()
}
fields = '__all__'
@admin.register(Patient)
class AdminPatient(admin.ModelAdmin):
list_display = ("name", 'surname', 'tcno', 'birthdate', 'timestamp')
date_hierarchy = 'timestamp'
# list_filter = []
search_fields = ('name', 'surname', 'tcno')
@admin.register(Analyse)
class AdminAnalyse(admin.ModelAdmin):
form = AnalyseAdminForm
raw_id_fields = ("type", 'admission')
actions = [finish_selected, approve_selected]
date_hierarchy = 'timestamp'
search_fields = ('admission__id', 'type__name', 'admission__patient__name',
'admission__patient__tcno', 'admission__patient__surname')
readonly_fields = ('id', 'approver', 'approved', 'approve_time', 'finished', 'analyser',
'completion_time', 'doctor_institution', 'patient', 'analyse_type',
'result_json')
autocomplete_lookup_fields = {
'fk': ['type', 'admission'],
}
fieldsets = (
(_('Admission Information'),
{'classes': ('grp-collapse analyse_box admission_info',),
'fields': (('analyse_type', 'doctor_institution', 'patient'),
('sample_type', 'sample_amount', 'sample_unit'),
('no_of_groups', 'medium_amount', 'medium_type', 'group_relation')
)
},
),
("State Inline", {"classes": ("placeholder state_set-group",), "fields": ()}),
("Result Inline", {"classes": ("placeholder parametervalue_set-group",), "fields": ()}),
(_('Analyse Result'),
{'classes': ('grp-collapse', 'grp-closed', 'analyse_box', 'analyse_result'),
'fields': (('short_result', 'comment'),
('finished', 'analyser', 'completion_time'),
('approved', 'approver', 'approve_time'),
)}),
(_('Advanced'),
{'classes': ('grp-collapse', 'grp-closed', 'analyse_box advanced_details'),
'fields': (
'report_override',
'result', 'result_json', 'template', 'admission', 'type',
'external_lab')
},
))
list_filter = ('finished', 'timestamp', 'type')
list_display = ('id', 'type', 'admission', 'timestamp', 'finished', 'approved')
list_display_links = ('id', 'type')
inlines = [
StateInline, ParameterValueInline
]
def get_search_results(self, request, queryset, search_term):
# integer search_term means we want to list values of a certain record
try:
search_term_as_int = int(search_term)
return Analyse.objects.filter(pk=search_term_as_int), False
except ValueError:
if len(search_term) == 32 and ' ' not in search_term:
# checking if the search term is a hash or not,
# a weak solution but should work for most cases
return Analyse.objects.filter(group_relation=search_term), False
return super().get_search_results(request, queryset, search_term)
def get_actions(self, request):
actions = super().get_actions(request)
if 'delete_selected' in actions:
del actions['delete_selected']
return actions
def doctor_institution(self, obj):
adm = obj.admission
return '%s / %s' % (adm.institution.name, adm.doctor.full_name() if adm.doctor else '')
doctor_institution.short_description = _("Institution / Doctor")
def patient(self, obj):
return '<a href="/admin/lab/admission/%s/">%s - %s</a>' % (obj.admission.id,
obj.admission.patient.full_name(
30),
obj.admission.timestamp)
patient.short_description = _("Patient info")
patient.allow_tags = True
def analyse_type(self, obj):
external = ' | %s:%s' % (_('Ext.Lab'), obj.external_lab) if obj.external else ''
return '<span style="font-size:16px">#%s</span> / %s %s' % (obj.id, obj.type.name, external)
analyse_type.short_description = _("Analyse")
analyse_type.allow_tags = True
def save_model(self, request, obj, form, change):
# is_new = not bool(obj.id)
# if is_new:
obj.create_empty_values()
super().save_model(request, obj, form, change)
def save_related(self, request, form, formset, change):
super().save_related(request, form, formset, change)
form.instance.save_result()
# def get_queryset(self, request):
# return super().get_queryset(request).exclude(group_relation='GRP')
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
if db_field.name == "template":
kwargs["queryset"] = request._obj_.type.reporttemplate_set.all()
return super().formfield_for_foreignkey(db_field, request, **kwargs)
def get_form(self, request, obj=None, **kwargs):
# just save obj reference for future processing in Inline
request._obj_ = obj
return super().get_form(request, obj, **kwargs)
# def changelist_view(self, request, extra_context=None):
# if not request.META['QUERY_STRING'] and \
# not request.META.get('HTTP_REFERER', '').startswith(request.build_absolute_uri()):
# return HttpResponseRedirect(request.path + "?finished__exact=0")
# return super().changelist_view(request, extra_context=extra_context)
class Media:
js = [
'/static/tinymce/tinymce.min.js',
'/static/tinymce/setup.js',
]
@admin.register(ReportTemplate)
class ReportTemplateAdmin(admin.ModelAdmin):
filter_horizontal = ('analyse_type',)
save_as = True
class Media:
js = [
'/static/tinymce/tinymce.min.js',
'/static/tinymce/setup.js',
]
@admin.register(Doctor)
class DoctorAdmin(admin.ModelAdmin):
search_fields = ('name', 'surname')
raw_id_fields = ('institution',)
autocomplete_lookup_fields = {
'fk': ['institution', ],
}
def save_model(self, request, obj, form, change):
if not obj.institution:
# create a clinic record for doctors who doesn't
# belong to an institution
ins = Institution(name="%s %s" % (obj.name, obj.surname), type=30)
ins.save()
obj.institution = ins
obj.save()
class InstitutePricingInline(admin.TabularInline):
model = InstitutePricing
classes = ('grp-collapse',)
class AnalysePricingInline(admin.TabularInline):
model = AnalysePricing
classes = ('grp-collapse',)
fields = ('analyse_type', 'price', 'discount_rate')
@admin.register(Institution)
class InstitutionAdmin(admin.ModelAdmin):
search_fields = ('name', 'id', 'code')
inlines = [InstitutePricingInline, AnalysePricingInline]
class AnalyseInline(admin.TabularInline):
model = Analyse
extra = 1
classes = ('grp-collapse',)
# autocomplete_lookup_fields = {
# 'type_fk': ['type'],
# }
# show_change_link = True
raw_id_fields = ("type",)
readonly_fields = ('get_state', 'finished', 'ext_lab')
fields = ('get_state', 'type', 'sample_type', 'sample_amount', 'sample_unit', 'medium_amount', 'medium_type',
'ext_lab')
# list_filter = ('category__name',)
autocomplete_lookup_fields = {
'fk': ['type'],
}
def get_state(self, obj):
states = obj.state_set.filter(current_state=True)
if len(states) == 1:
return states[0].definition.name
else:
return '<br/>'.join('%s - %s' % (st.group, st.definition.name) for st in states)
get_state.short_description = _('Analyse state')
get_state.allow_tags = True
def ext_lab(self, obj):
return obj.external_lab if obj.external else ''
ext_lab.short_description = _('Ext.Lab')
def get_extra(self, request, obj=None, **kwargs):
return 0 if obj else self.extra
def get_queryset(self, request):
return super().get_queryset(request).exclude(group_relation='GRP')
class AdmissionStateInline(admin.TabularInline):
model = AdmissionState
extra = 1
classes = ('grp-collapse',)
post_admission_save = django.dispatch.Signal(providing_args=["instance", ])
@admin.register(Admission)
class AdminAdmission(admin.ModelAdmin):
date_hierarchy = 'timestamp'
search_fields = ('patient__name', 'patient__surname')
list_display = ('id', 'patient', 'institution', 'analyse_state', 'timestamp')
list_display_links = ('id', 'patient')
readonly_fields = ('id', ) #'timestamp'
raw_id_fields = ('patient', 'institution', 'doctor')
fields = (('id', 'timestamp'), ('patient', 'is_urgent'), ('doctor', 'institution'),
('week', 'upd_week', 'lmp_date'),
('indications', 'history'),
)
autocomplete_lookup_fields = {
'fk': ['patient', 'institution', 'doctor'],
}
inlines = [AnalyseInline, AdmissionStateInline] # AdmissionSampleInline,
def get_form(self, request, obj=None, **kwargs):
# just save obj reference for future processing in Inline
request._obj_ = obj
return super().get_form(request, obj, **kwargs)
def save_model(self, request, obj, form, change):
obj.save()
# if obj.parameter_definition.strip():
# obj.create_update_parameter_keys()
def _create_payment_item(self):
pass
def get_search_results(self, request, queryset, search_term):
# integer search_term means we want to list values of a certain admission
try:
search_term_as_int = int(search_term)
if len(search_term) < 6:
queryset = queryset.filter(pk=search_term_as_int)
else:
queryset = queryset.filter(patient__tcno__contains=search_term_as_int)
except ValueError:
queryset = queryset.filter(Q(patient__name__icontains=search_term)|
Q(patient__surname__icontains=search_term))
return queryset, False
def _save_analyses(self, admission, analyses):
for analyse in analyses:
if not analyse.type:
continue
is_new = not analyse.id
analyse.save()
if analyse.type.group_type:
analyse.group_relation = 'GRP' # this is a group
rand_group_code = uuid4().hex
for sub_analyse_type in analyse.type.subtypes.all():
anl = Analyse(type=sub_analyse_type,
sample_type=analyse.sample_type,
grouper=analyse,
group_relation=analyse.id,
external=sub_analyse_type.external,
external_lab=sub_analyse_type.external_lab,
admission=admission)
anl.save()
anl._set_state_for(self._request.user, first=True)
if analyse.type.external:
analyse.external = analyse.type.external
analyse.external_lab = analyse.type.external_lab
analyse.save()
if is_new:
analyse._set_state_for(self._request.user, first=True)
post_admission_save.send(sender=Admission, instance=admission)
def save_related(self, request, form, formsets, change):
"""
- expand group-type analyses
- create payment and payment-items
"""
form.save_m2m()
self._request = request
if not change:
adm = form.instance
# payment = Payment(admission=adm, patient=adm.patient)
# if adm.institution.preferred_payment_method == 20:
# payment.institution = adm.institution
# else:
# payment.patient = adm.patient
for formset in formsets:
if formset.model == Analyse:
self._save_analyses(formset.instance, formset.save(commit=False))
formset.save()
customer_charge, new = AdmissionPricing.objects.get_or_create(admission=form.instance)
customer_charge.process_payments()
class MethodAdminForm(forms.ModelForm):
analysetype_set = forms.ModelMultipleChoiceField(
queryset=AnalyseType.objects.all(),
required=False,
widget=FilteredSelectMultiple(
verbose_name=_('Analyse Types'),
is_stacked=False
)
)
class Meta:
model = AnalyseType
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.instance and self.instance.pk:
self.fields['analysetype_set'].initial = self.instance.analysetype_set.all()
def save(self, *args, **kwargs):
kwargs['commit'] = True
return super().save(*args, **kwargs)
def save_m2m(self):
self.instance.analysetype_set.clear()
self.instance.analysetype_set.add(*self.cleaned_data['analysetype_set'])
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
list_display = ('name', 'code',)
form = MethodAdminForm
filter_horizontal = ('states',)
@admin.register(Method)
class AdminMethod(admin.ModelAdmin):
list_display = ('name', 'code',)
form = MethodAdminForm
@admin.register(MediumType)
class AdminMedium(admin.ModelAdmin):
list_display = ('name', 'code', 'order')
list_editable = ('code', 'order',)
@receiver(post_admission_save, sender=Admission)
def create_payment_objects(sender, instance, **kwargs):
# instance.analyse_set.filter(group_relation='GRP').delete()
for analyse in instance.analyse_set.exclude(group_relation='GRP'):
analyse.create_empty_values()
# instance.analyse_set.filter(group_relation='GRP').delete()
@admin.register(Setting)
class AdminSetting(admin.ModelAdmin):
search_fields = ('name',)
list_display = ('name', 'value', 'key')
list_editable = ('value', )
readonly_fields = ('name', 'key')
app_models = apps.get_app_config('lab').get_models()
for model in app_models:
try:
admin.site.register(model)
except AlreadyRegistered:
pass
| gpl-3.0 | 6,820,900,275,454,348,000 | 33.624187 | 114 | 0.606512 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.