repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
douglaswei/stock | script/stockit/function.py | 1 | 5830 | #! /usr/bin/python
# -*- coding:utf-8 -*-
import sys
import numpy as np
import random
import logging
__all__ = [
'TransFeatFromFloats',
'Mean',
'Gradient',
'GradientAngle',
'ConNum',
'ContinousIncrease',
'PairsIncrease',
'CoutNonNeg',
'GradientsBySample',
'ConinousPotiveCount',
'GenRate',
'HoriCmp',
'VehiCmp',
'Variance',
'CountDaysShangshenTongDao',
'GradMaxMin',
]
def TransFeatFromFloats(featname, feats):
'''
translate featnames into format: featurename_idx
'''
return ["%s_%d:%f" % (featname, idx, feats[idx]) for idx in range(len(feats))]
def Mean(records, beg, length):
'''
mean value of values in the records
'''
return np.mean(records[beg : beg + length])
def Gradient(records, cur, prev, frame_size=1, prev_frame_size=1):
'''
Gradient(cur_pos, prev_pos, frame_size, prev_frame_size)
= ( Mean(c,f) - Mean(p,pf) ) / Mean(p,pf)
'''
cur_val = Mean(records, cur, frame_size)
pre_val = Mean(records, prev, prev_frame_size)
if (pre_val == 0) :
pre_val += 0.000001
return (cur_val - pre_val) / pre_val
def GradientAngle(records, cur, prev, frame_size=1, prev_frame_size=1):
'''
the angle value of gradient : when Gradient>1, it increases too quick
'''
return np.arctan(Gradient(records, cur, prev, frame_size, prev_frame_size))
def ConNum(records, li):
'''
ConNum(list) : continus increaseNum of [1, 3, 5, 10, 20 ...]
'''
res = 0
for idx in range(len(li) - 1):
cur = Mean(records, 0, li[idx])
prev = Mean(records, 0, li[idx + 1])
if cur < prev:
res += 1
else:
break
return res
def ContinousIncrease(records, li, thres):
'''
return 1 of 0 if the list is continously increase
'''
for idx in range(len(li)):
if Gradient(records, idx, 1, idx+1, 1) < thres:
return 0
return 1
def PairsIncrease(records, pairs):
'''
return True if records increase for all pair in pairs
'''
for a,b in pairs:
if records[a] < records[b]:
return False
return True
def CoutNonNeg(records, n):
'''
number of non negtive records in N
'''
filtered_records = filter(lambda item: item>=0, records[:n])
return len(filtered_records)
def GradientsBySample(records, sample_rate, grad_func=Gradient):
'''
gradients of records sample by sample_rate
'''
sample_records = [Mean(records, idx*sample_rate, sample_rate) \
for idx in range(len(records)/sample_rate)]
sample_gradient = []
for idx in range(len(sample_records) - 1):
for idy in range(idx + 1, len(sample_records)):
sample_gradient.append(Gradient(sample_records ,idx ,idy))
mean_value = np.mean(sample_records)
sample_gradient.append(sum(pow(sample_records - mean_value, 2)))
return np.asarray(sample_gradient)
def ConinousPotiveCount(records):
res = 0
for item in records:
if item > 0:
res += 1
else:
break
return res
def GenRate(records):
'''
todo
'''
gradient_list = [Gradient(records, idx, idx+1, 1, 1) > 0 for idx in range(len(records) - 1)]
freq_list = []
pre = None
for idx in range(len(gradient_list) - 1):
idn = idx + 1
if not(gradient_list[idx] and gradient_list): # gradient's sign changes
cur = idx
if pre != None:
freq_list.append(cur - pre)
pre = cur
if len(feaList) == 0:
return 0
return Mean(feaList)
def HoriCmp(records, ma_list, beg_pos):
'''
todo
'''
for [cur, prev] in ma_list:
if Mean(records, beg_pos, cur) < Mean(records, beg_pos, prev):
return 0
return 1
def VehiCmp(records, big_window, small_window):
'''
todo
'''
for beg_pos in range(big_window):
eng_pos = beg_pos + small_window
if records[beg_pos] < records[eng_pos]:
return 0
return 1
def Variance(records):
'''
方差
'''
mean_value = Mean(records, 0, 10)
variance = sum(np.power(records - mean_value, 2)) / len(records)
variance = np.sqrt(variance)
return variance
def CountDaysShangshenTongDao(records, window_size):
'''
上升通道
todo
'''
# (ma5>ma20 and ma10>ma20 and ma20>ma30 and ma30>ma60)
ma_list = [[5,20], [10,20], [20,30], [30,60]]
return sum([HoriCmp(records, ma_list, beg) for beg in range(window_size)])
def GradMaxMin(records, beg, length, span=1):
'''
获取梯度MAX MIN
'''
max_gradient = -1
min_gradient = 1
for pos in range(beg, beg+length, span):
next_pos = pos + span
grad = Gradient(records, pos, next_pos, span, span)
if grad > max_gradient:
max_gradient = grad
if grad < min_gradient:
min_gradient = grad
return (max_gradient, min_gradient)
def get_fuquan_rate(ma_cur, ma_prev, ma_gradient):
'''
计算复权调整股价比例
'''
if ma_prev is None:
return 1
ma_gradient = 1 + (float(ma_gradient)/100)
org_price = ma_prev / ma_gradient
rate = org_price / ma_cur
rate = round(rate, 2)
return rate
def adjust_gegu_records(records):
'''
价格字段 做复权处理
'''
rate = 1.0
prev_ma = None
ma_gradient = None
for record in records:
ma = record.ma
new_rate = get_fuquan_rate(ma, prev_ma, ma_gradient)
if prev_ma:
logging.debug('%s:%s:%f:%f:%f' % (record.code, str(record.cdate), ma, prev_ma, new_rate))
rate *= new_rate
record.ma = round(record.ma*rate, 3)
ma_gradient = record.ma_gradient
prev_ma = ma
| gpl-2.0 | 7,780,616,057,856,976,000 | 22.85124 | 101 | 0.577616 | false |
yackj/GameAnalysis | test/gpgame_test.py | 1 | 2451 | import numpy as np
import pytest
from gameanalysis import gamegen
from gameanalysis import gpgame
from gameanalysis import rsgame
GAMES = [
([1], 1),
([1], 2),
([2], 1),
([2], 2),
([2], 5),
([5], 2),
([5], 5),
(2 * [1], 1),
(2 * [1], 2),
(2 * [2], 1),
(2 * [2], 2),
(5 * [1], 2),
(2 * [1], 5),
(2 * [2], 5),
(2 * [5], 2),
(2 * [5], 5),
(3 * [3], 3),
(5 * [1], 5),
([170], 2),
([180], 2),
([1, 2], 2),
([1, 2], [2, 1]),
(2, [1, 2]),
([3, 4], [2, 3]),
([2, 3, 4], [4, 3, 2]),
]
@pytest.mark.parametrize('game_params', GAMES)
@pytest.mark.parametrize('num_devs', range(5))
def test_nearby_profiles(game_params, num_devs):
# TODO There is probably a better way to test this, but it requires moving
# nearyby_profs out of a game the requires enough data for x-validation
base = rsgame.basegame(*game_params)
game_data = gamegen.add_profiles(base, min(base.num_all_profiles,
3 * base.num_strategies.max()))
if np.any(np.sum(game_data.profiles > 0, 0) < 3):
# We need at least 3 profiles per strategy for x-validation
return
game = gpgame.NeighborGPGame(game_data)
prof = game.random_profiles()
nearby = game.nearby_profs(prof, num_devs)
diff = nearby - prof
devs_from = game.role_reduce((diff < 0) * -diff)
devs_to = game.role_reduce((diff > 0) * diff)
assert np.all(devs_to.sum(1) <= num_devs)
assert np.all(devs_from.sum(1) <= num_devs)
assert np.all(devs_to == devs_from)
assert np.all(game.verify_profile(nearby))
def test_basic_functions():
"""Test that all functions can be called without breaking"""
base = rsgame.basegame([4, 3], [3, 4])
game = gamegen.add_profiles(base, 200)
gpbase = gpgame.BaseGPGame(game)
mix = game.random_mixtures()
assert np.all(gpbase.min_payoffs() == game.min_payoffs())
assert np.all(gpbase.max_payoffs() == game.max_payoffs())
assert gpbase.is_complete()
gppoint = gpgame.PointGPGame(gpbase)
gppoint.deviation_payoffs(mix)
gpsample = gpgame.SampleGPGame(gpbase, num_samples=100)
gpsample.deviation_payoffs(mix)
gpneighbor = gpgame.NeighborGPGame(gpbase)
gpneighbor.deviation_payoffs(mix)
gpdpr = gpgame.DprGPGame(gpbase)
gpdpr.deviation_payoffs(mix)
gpfull = gpgame.FullGPGame(gpbase)
gpfull.deviation_payoffs(mix)
| apache-2.0 | 4,100,504,288,013,826,000 | 28.53012 | 78 | 0.587923 | false |
EMS-TU-Ilmenau/fastmat | fastmat/inspect/common.py | 1 | 35900 | # -*- coding: utf-8 -*-
# Copyright 2018 Sebastian Semper, Christoph Wagner
# https://www.tu-ilmenau.de/it-ems/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import inspect
import itertools
import os
import re
import six
import struct
import numpy as np
from scipy import sparse
from platform import system as pfSystem
from ..core.types import isInteger
try:
from itertools import izip
except ImportError: # python 3.x
izip = zip
currentOS = pfSystem()
################################################################################
################################################## CONSTANT definition classes
class ALIGNMENT(object):
"""Short summary."""
DONTCARE = '-'
FCONT = 'F'
CCONT = 'C'
STRIDE = 'S'
################################################################################
################################################## Permutation funcs and classes
class AccessDict(dict):
"""Short summary."""
def __getattr__(self, key):
if key in self:
return self[key]
else:
found = [kk for kk in sorted(self.keys())
if kk.startswith(key)]
return ([self[kk] for kk in found] if len(found) > 2
else (None if len(found) == 0
else self[found[0]]))
def __repr__(self):
return str(self.keys())
def convertToAccessDicts(level):
"""Short summary.
Parameters
----------
level : type
Description of parameter `level`.
Returns
-------
type
Description of returned object.
"""
for key, value in level.items():
if isinstance(value, dict) and (value is not value):
level[key] = convertToAccessDicts(value)
return AccessDict(level)
################################################## class uniqueNameDict
class uniqueNameDict(dict):
"""Short summary."""
'''
Modified dictionary: suffixes key names with integer to maintain uniqueness.
'''
def __setitem__(self, key, value):
index = 1
strKey = key
while True:
if strKey not in self:
break
index += 1
strKey = "%s_%03d" %(key, index)
dict.__setitem__(self, strKey, value)
################################################## class paramDict
reFormatString = re.compile(r'%\(.+\)')
class paramDict(dict):
"""Short summary."""
def __getattr__(self, key):
# evaluate nested format-string parameters, update format results
value, lastValue = super(paramDict, self).__getitem__(key), None
while id(lastValue) != id(value):
lastValue = value
if isinstance(value, str):
if value in self and value != key:
value = getattr(self, value)
elif reFormatString.search(value):
value = value %self
elif (inspect.isroutine(value) and
not isinstance(value, IgnoreFunc)):
value = value(self)
self[key] = value
return value
################################################## class Permutation
class Permutation(list):
"""Short summary."""
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__,
super(Permutation, self).__repr__())
def __str__(self):
return "%s(%s)" % (self.__class__.__name__,
super(Permutation, self).__str__())
################################################## class VariantPermutation
class VariantPermutation(Permutation):
pass
################################################## class IgnoreDict
class IgnoreDict(dict):
pass
################################################## class IgnoreFunc
class IgnoreFunc(object):
def __init__(self, fun):
self._fun = fun
def __call__(self, *args, **kwargs):
return self._fun(*args, **kwargs)
################################################## paramApplyDefaults()
def paramApplyDefaults(
params,
templates=None,
templateKey=None,
extraArgs=None
):
"""Short summary.
Parameters
----------
params : type
Description of parameter `params`.
templates : type
Description of parameter `templates`.
templateKey : type
Description of parameter `templateKey`.
extraArgs : type
Description of parameter `extraArgs`.
Returns
-------
type
Description of returned object.
"""
# have some defaults from templates
# first, fetch the template as defaults, then update with target,
# then assign the whole dict to target
result = {}
if templates is not None:
# 1. COMMON - section of templates (lowest-priority)
result.update(templates.get(NAME.COMMON, {}))
# 2. the templates-section corresponding to the templateKey
result.update(templates.get(templateKey, {}))
# 3. specific reference to a template by the 'template' key in params
if NAME.TEMPLATE in params:
result.update(templates.get(params[NAME.TEMPLATE], {}))
# 4. actual parameters (params)
result.update(params)
# 5. extraArgs (which usually come from command-line) (top-priority)
if extraArgs is not None:
for p in extraArgs:
for pp in list(p.split(',')):
tokens = pp.split('=')
if len(tokens) >= 2:
string = "=".join(tokens[1:])
try:
val = int(string)
except ValueError:
try:
val = float(string)
except ValueError:
val = string
result[tokens[0]] = val
return paramDict(result)
################################################## paramPermute()
def paramPermute(dictionary, copy=True, PermutationClass=Permutation):
"""Short summary.
Parameters
----------
dictionary : type
Description of parameter `dictionary`.
copy : type
Description of parameter `copy`.
PermutationClass : type
Description of parameter `PermutationClass`.
Returns
-------
type
Description of returned object.
"""
'''
Return a list of cartesian-product combination of all dictionary values
holding a Permutation list object. If copy is True, the resulting list will
be applied back to input dictionary copies before returning, resulting in
a list of copies with the permutation replacements being made on the input
dictionary. Parameter permutations must be indicated by wrapping a list in
a Permutation class instance.
'''
# isolate the permutation parameters from the dictionary
parameters = {key: list(value)
for key, value in dictionary.items()
if isinstance(value, PermutationClass)}
# perform a cartesian product -> list of permuted instances
permutations = [dict(izip(parameters, x))
for x in itertools.product(*six.itervalues(parameters))]
if len(permutations) == 1:
permutations = [{}]
# apply the permutations to the input dictionary (generating copies)
def dictCopyAndMerge(source, merge):
result = source.copy()
result.update(merge)
return paramDict(result)
return [dictCopyAndMerge(dictionary, permutation)
for permutation in permutations] if copy else permutations
################################################## paramDereferentiate()
def paramDereferentiate(currentLevel, paramDict=None):
"""Short summary.
Parameters
----------
currentLevel : type
Description of parameter `currentLevel`.
paramDict : type
Description of parameter `paramDict`.
Returns
-------
type
Description of returned object.
"""
'''
Replace all text value identifyers matching a target key name with the
key values it's pointing to allowing parameter links. Then, recurse through
an arbitrary depth of container types (dicts, lists, tuples) found in the
first stage, continuing dereferentiation.
Returns the modified currentLevel container.
'''
# set paramDict in first level, determine iterator for currentLevel
iterator = currentLevel.items() \
if isinstance(currentLevel, dict) else enumerate(currentLevel)
if paramDict is None:
paramDict = currentLevel
# STAGE 1: Replace all dictionary values matching a key name in paramDict
# with the corresponding value of that paramDict-entry. Also build
# a container list for stage two.
dictIterables = {}
for key, value in iterator:
if isinstance(value, (list, tuple, dict)):
dictIterables[key] = value
else:
if not isinstance(value, str):
continue
try:
paramValue = paramDict.get(value, None)
if (paramValue is not None and
not isinstance(paramValue, Permutation)):
currentLevel[key] = paramValue
if isinstance(paramValue, (list, tuple, dict)):
dictIterables[key] = paramValue
except TypeError:
continue
# STAGE 2: Crawl the containers found in stage 1, repeating the process.
# Note that nested containers are copied in the process.
# The parameter dictionary paramDict stays the same for all levels.
for key, iterable in dictIterables.items():
# copy the container to allow modification of values
newIterable = copy.copy(iterable)
if isinstance(iterable, tuple):
# cast the immutable tuple type to list allowing modifications, cast
# back to tuple afterwards
newIterable = list(newIterable)
paramDereferentiate(newIterable, paramDict)
newIterable = tuple(newIterable)
else:
paramDereferentiate(newIterable, paramDict)
# overwrite former iterable with new copy
currentLevel[key] = newIterable
return currentLevel
################################################## paramEvaluate()
def paramEvaluate(currentLevel, paramDict=None):
"""Short summary.
Parameters
----------
currentLevel : type
Description of parameter `currentLevel`.
paramDict : type
Description of parameter `paramDict`.
Returns
-------
type
Description of returned object.
"""
'''
Evaluate all functions found in currentLevel with paramDict as argument.
Repeat the process for nested containers.
Returns the modified currentLevel container.
'''
# set paramDict in first level, determine iterator for currentLevel
iterator = currentLevel.items() \
if isinstance(currentLevel, dict) else enumerate(currentLevel)
if paramDict is None:
paramDict = currentLevel
# STAGE 1: Evaluate the functions found in currentLevel. Also build a
# container list for stage two.
dictIterables = {}
for key, value in iterator:
if isinstance(value, (list, tuple, dict)):
if not isinstance(value, IgnoreDict):
dictIterables[key] = value
elif inspect.isroutine(value):
currentLevel[key] = value(paramDict)
# STAGE 2: Crawl the containers found in stage 1, repeating the process
# The parameter dictionary paramDict stays the same for all levels.
for key, iterable in dictIterables.items():
if isinstance(iterable, tuple):
# cast the immutable tuple type to list allowing modifications, cast
# back to tuple afterwards
newIterable = list(currentLevel[key])
paramEvaluate(newIterable, paramDict)
currentLevel[key] = tuple(newIterable)
else:
paramEvaluate(currentLevel[key], paramDict)
return currentLevel
################################################## mergeDicts()
# a little helper to safely merge two dictionaries
def mergeDicts(a, b):
"""Short summary.
Parameters
----------
a : type
Description of parameter `a`.
b : type
Description of parameter `b`.
Returns
-------
type
Description of returned object.
"""
'''
Merge the dictionaries a and b such that entries in b have priority and the
input Dictionary a remains unchanged.
'''
c = a.copy()
c.update(b)
return c
################################################################################
################################################## array generators, test distr.
################################################## arrTestDist()
def arrTestDist(shape, dtype, center=0):
"""Short summary.
Parameters
----------
shape : type
Description of parameter `shape`.
dtype : type
Description of parameter `dtype`.
center : type
Description of parameter `center`.
Returns
-------
type
Description of returned object.
"""
def _draw(shape):
'''
Draw a random floating-point number from a test distribution.
Remove the part around zero from the distribution and keep the distance
between minimal and maximal absolute values (dynamics) relatively small
'''
return (np.random.uniform(2., 1., size=shape) *
np.random.choice([-1, 1], shape))
if np.prod(shape) < 1:
return np.array([])
if dtype in (np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64):
result = np.random.choice(
[center - 2, center - 1, center + 1, center + 2], shape).astype(
dtype)
else:
if dtype in (np.float32, np.float64):
result = _draw(shape).astype(dtype) + center
elif dtype in (np.complex64, np.complex128):
result = (_draw(shape) + np.real(center) +
1j * (_draw(shape) + np.imag(center))).astype(dtype)
else:
raise TypeError("arrTestDist: unsupported type %s" % (dtype))
# increase the largest element in magnitude a little bit more to avoid
# too close neighbours to the largest element in the distribution
# this helps at least largestSingularValue in Diag matrices to converge ;)
idxMax = np.unravel_index(np.abs(result).argmax(), result.shape)
if isInteger(dtype):
result[idxMax] += np.sign(result[idxMax])
else:
result[idxMax] *= 1.5
return result
################################################## arrSparseTestDist()
def arrSparseTestDist(shape, dtype,
density=0.1, center=0, compactFullyOccupied=False):
"""Short summary.
Parameters
----------
shape : type
Description of parameter `shape`.
dtype : type
Description of parameter `dtype`.
density : type
Description of parameter `density`.
center : type
Description of parameter `center`.
compactFullyOccupied : type
Description of parameter `compactFullyOccupied`.
Returns
-------
type
Description of returned object.
"""
numSize = np.prod(shape)
if compactFullyOccupied:
# draw just enough samples randomly such that every row and column is
# occupied with at least one element. Ignore the density parameter
numElements = max(shape)
# draw mm and nn coordinates from coordinate space. Modulo operation
# wraps indices larger than the actual row or column dimension
suppX = np.mod(
np.random.choice(numElements, numElements, replace=False), shape[0])
suppY = np.mod(
np.random.choice(numElements, numElements, replace=False), shape[1])
else:
# draw a relative amount of samples randomly over vectorized index space
numElements = int(numSize * density)
supp = np.random.choice(np.arange(numSize), numElements, replace=False)
suppX = np.mod(supp, shape[0])
suppY = np.divide(supp, shape[0])
# determine the actual element values distributed over the sparse array
# from arrTestDist with a 1D-array spanning the required element count
arrElements = arrTestDist((numElements, ), dtype, center=center)
return sparse.coo_matrix(
(arrElements, (suppX, suppY)), shape=shape, dtype=dtype)
################################################## arrAlign()
def arrAlign(arr, alignment=ALIGNMENT.DONTCARE):
"""Short summary.
Parameters
----------
arr : type
Description of parameter `arr`.
alignment : type
Description of parameter `alignment`.
Returns
-------
type
Description of returned object.
"""
if alignment == ALIGNMENT.DONTCARE:
return np.asanyarray(arr)
elif alignment == ALIGNMENT.FCONT:
return np.asfortranarray(arr)
elif alignment == ALIGNMENT.CCONT:
return np.ascontiguousarray(arr)
elif alignment == ALIGNMENT.STRIDE:
# define spacing between elements
spacing = 3
# determine maximum value
try:
maxValue = np.iinfo(arr.dtype).max
except ValueError:
try:
maxValue = np.finfo(arr.dtype).max
except ValueError:
maxValue = 1.
# generate large random array with maximized data type utilization
arrFill = (maxValue * (np.random.rand(
*(dim * spacing for dim in arr.shape)) - 0.5)).astype(arr.dtype)
# fill-in the array data and return a view of the to-be-aligned array
arrPart = arrFill[(np.s_[1::spacing], ) * arrFill.ndim]
arrPart[:] = arr
return arrPart
else:
raise ValueError("Unknown alignment identificator '%s'" %(alignment))
################################################## arrayGenerator
class ArrayGenerator(dict):
@property
def forwardData(self):
if NAME.FWDATA not in self:
# generate random array and set specific alignment style
self[NAME.FWDATA] = arrAlign(
arrTestDist(self[NAME.SHAPE], self[NAME.DTYPE],
center=self.get(NAME.CENTER, 0)),
alignment=self.get(NAME.ALIGN, ALIGNMENT.DONTCARE))
return self[NAME.FWDATA]
@property
def backwardData(self):
if NAME.BWDATA not in self:
# generate random array and set specific alignment style
self[NAME.BWDATA] = arrAlign(
arrTestDist(self[NAME.SHAPE_T], self[NAME.DTYPE],
center=self.get(NAME.CENTER, 0)),
alignment=self.get(NAME.ALIGN, ALIGNMENT.DONTCARE))
return self[NAME.BWDATA]
def __call__(self):
return self.forwardData
def __str__(self):
'''Compose a compact description of the represented array.'''
tags = []
# generate shape-token: check for both shape variants (fw and bw).
# if they differ, print both as "fw/bw", otherwise print the dim only
fwShape = self[NAME.SHAPE] if NAME.SHAPE in self else ()
bwShape = self[NAME.SHAPE_T] if NAME.SHAPE_T in self else ()
def printDim(fw, bw):
if fw is None:
return '-' if bw is None else str(bw)
else:
return (str(fw) if bw is None
else "%s/%s" %(fw, bw) if (fw != bw) else str(fw))
strShape = 'x'.join([
printDim(fw, bw)
for fw, bw in six.moves.zip_longest(fwShape, bwShape)])
if len(strShape) > 0:
tags.append(strShape)
# print the data type of the array
value = self.get(NAME.DTYPE, '')
if isinstance(value, type):
tags.append(NAME.TYPENAME.get(value, str(value)))
value = self.get(NAME.ALIGN, None)
if value in NAME.ALLALIGNMENTS:
tags.append(value)
return str("[%s]" % (','.join(tags)))
def __repr__(self):
return self.__str__()
################################################################################
################################################## inspection routines
def showContent(instance, seen=None, prefix=""):
"""Short summary.
Parameters
----------
instance : type
Description of parameter `instance`.
seen : type
Description of parameter `seen`.
prefix : type
Description of parameter `prefix`.
Returns
-------
type
Description of returned object.
"""
'''
Print a readable dependency tree of fastmat class instances
Parameters
----------
instance : :py:class:`fastmat.Matrix`
Matrix instance to get inspected
Notes
-----
The function outputs instance as top-level node and then walks through all
elements of instance.content, recursively calling itself with extended
prefix. To avoid endless loops, the function ensures the recursion is only
applied once to every element by keeping track already visited elements.
>>> showContent(Eye(4) * -1 * -1 + Eye(4) - Eye(4))
<Sum[4x4]:0x7fe447f40770>
+-<Product[4x4]:0x7fe447f3d188>
| +-<Eye[4x4]:0x7fe447f402b0>
+-<Eye[4x4]:0x7fe447f403e0>
+-<Product[4x4]:0x7fe447f3da10>
+-<Eye[4x4]:0x7fe447f40640>
'''
if seen is None:
seen = set([])
print((prefix[:-2] + "+-" if len(prefix) > 0 else "") + repr(instance))
if instance not in seen:
seen.add(instance)
last = len(instance)
for ii, item in enumerate(instance):
showContent(item, seen=seen,
prefix=prefix + ("| " if ii < last - 1 else " "))
################################################################################
################################################## CONSTANT definitions
################################################## class TypeDict
class TypeDict(dict):
def __getitem__(self, key):
if key not in self:
key = key.type if isinstance(key, np.dtype) else None
return dict(self).get(key, '???')
class COLOR():
'''Give escape format strings (color, face) a name.'''
END = "\033[0m"
BOLD = "\033[1m"
LINE = "\033[4m"
RED = "\033[91m"
GREEN = "\033[92m"
YELLOW = "\033[93m"
BLUE = "\033[94m"
PURPLE = "\033[95m"
AQUA = "\033[96m"
def fmtStr(string, color):
"""Short summary.
Parameters
----------
string : type
Description of parameter `string`.
color : type
Description of parameter `color`.
Returns
-------
type
Description of returned object.
"""
'''Print a string quoted by some format specifiers.'''
# colored output only supported with linux
return ("%s%s%s" %(color, string, COLOR.END)
if currentOS == 'Linux'
else string)
def fmtGreen(string):
"""Short summary.
Parameters
----------
string : type
Description of parameter `string`.
Returns
-------
type
Description of returned object.
"""
'''Print string in green.'''
return fmtStr(string, COLOR.GREEN)
def fmtRed(string):
"""Short summary.
Parameters
----------
string : type
Description of parameter `string`.
Returns
-------
type
Description of returned object.
"""
'''Print string in red.'''
return fmtStr(string, COLOR.RED)
def fmtYellow(string):
"""Short summary.
Parameters
----------
string : type
Description of parameter `string`.
Returns
-------
type
Description of returned object.
"""
'''Print string in yellow.'''
return fmtStr(string, COLOR.YELLOW)
def fmtBold(string):
"""Short summary.
Parameters
----------
string : type
Description of parameter `string`.
Returns
-------
type
Description of returned object.
"""
'''Print string in bold face.'''
return fmtStr(string, COLOR.BOLD)
reAnsiEscape = None
def fmtEscape(string):
"""Short summary.
Parameters
----------
string : type
Description of parameter `string`.
Returns
-------
type
Description of returned object.
"""
'''Return a string with all ASCII escape sequences removed.'''
global reAnsiEscape
if reAnsiEscape is None:
reAnsiEscape = re.compile(r'\x1b[^m]*m')
return reAnsiEscape.sub('', string)
def dynFormat(s, *keys):
"""Short summary.
Parameters
----------
s : type
Description of parameter `s`.
*keys : type
Description of parameter `*keys`.
Returns
-------
type
Description of returned object.
"""
return s.replace('%', '%%(%s)') % keys
################################################## getConsoleSize()
fallbackConsoleSize = (80, 25)
if (currentOS in ['Linux', 'Darwin']) or currentOS.startswith('CYGWIN'):
import fcntl
import termios
# source: https://gist.github.com/jtriley/1108174
def getConsoleSize():
def ioctl_GWINSZ(fd):
try:
return struct.unpack(
'hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except EnvironmentError:
return None
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except EnvironmentError:
pass
if not cr:
try:
cr = (os.environ['LINES'], os.environ['COLUMNS'])
except (EnvironmentError, KeyError):
cr = fallbackConsoleSize
return int(cr[0]), int(cr[1])
elif currentOS == 'Windows':
def getConsoleSize():
cr = fallbackConsoleSize
try:
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
(left, top, right, bottom) = struct.unpack("10x4h4x", csbi.raw)
cr = (right - left, bottom - top)
except (ImportError, EnvironmentError):
pass
return cr
else:
def getConsoleSize():
return fallbackConsoleSize
################################################## worker's CONSTANT classes
class NAME(object):
DATA = 'data'
FWDATA = 'dataForward'
BWDATA = 'dataBackward'
DTYPE = 'dtype'
SHAPE = 'shape'
SHAPE_T = 'shapeBackward'
ALIGN = 'align'
CENTER = 'center'
FORMAT = 'format'
NAME = 'name'
CLASS = 'class'
TARGET = 'target'
FILENAME = 'filename'
CAPTION = 'caption'
BENCHMARK = 'bench'
DOCU = 'docu'
TEST = 'test'
COMMON = 'common'
TEMPLATE = 'template'
VARIANT = 'variant'
RESULT = 'result'
HEADER = 'header'
TYPENAME = TypeDict({
np.int8: 'i08', np.int16: 'i16', np.int32: 'i32',
np.int64: 'i64', np.float32: 'f32', np.float64: 'f64',
np.complex64: 'c32', np.complex128: 'c64', None: '???'
})
ALLTYPES = [np.int8, np.int16, np.int32, np.int64,
np.float32, np.float64,
np.complex64, np.complex128]
FEWTYPES = [np.int32, np.float32, np.complex64]
SINGLETYPES = [np.int32, np.int16, np.float32, np.complex64]
DOUBLETYPES = [np.int64, np.float64, np.complex128]
INTTYPES = [np.int8, np.int16, np.int32, np.int64]
FLOATTYPES = [np.float32, np.float64]
COMPLEXTYPES = [np.complex64, np.complex128]
LARGETYPES = [np.int32, np.int64, np.float32, np.float64,
np.complex64, np.complex128]
ALLALIGNMENTS = [ALIGNMENT.FCONT, ALIGNMENT.CCONT, ALIGNMENT.STRIDE]
# repeat some of the definitions in this unit to allow compact imports
ALIGNMENT = ALIGNMENT
ArrayGenerator = ArrayGenerator
Permutation = Permutation
IgnoreFunc = IgnoreFunc
################################################################################
################################################## class Runner
class Worker(object):
"""Short summary."""
'''
options - dictionary structure containing options for multiple targets.
{ 'nameOfTarget': {'parameter': 123,
'anotherParameter': 456
}, NAME.DEFAULTS: {'parameter': default parameter unless overwritten
within the selected target
}}
results - output of each target's as specified in options
'''
cbStatus=None
cbResult=None
target=None
def __init__(self, targetClass, **options):
"""Short summary.
Parameters
----------
targetClass : type
Description of parameter `targetClass`.
**options : type
Description of parameter `**options`.
Returns
-------
type
Description of returned object.
"""
'''
Setup an inspection environment on a fastmat class specified in target.
Along the way an empty instance of target will be created and aside
the default options, which may be specified in runnerDefaults, an
arbitrarily named method of target may be specified to return more
specific options when called.
extraOptions may be specified to overwrite any parameter with highest
priority.
Both extraOptions and runnerDefaults must be specified as a two-level
dictionary with the outer level specifying target names as keys and the
inner level the actual parameter for the target.
'''
# the test target is a fastmat class to be instantiated in the runners
if not inspect.isclass(targetClass):
raise ValueError("target in init of Runner must be a class type")
# set defaults for options
targetOptionMethod = options.get('targetOptionMethod', None)
runnerDefaults = options.get('runnerDefaults', {})
extraOptions = options.get('extraOptions', {})
self.target=targetClass.__new__(targetClass)
# targetOptionMethod specifies a method name of target which will
# return a dictionary with class-specific options if called. If this
# functionality is not needed, targetOptionsMethod or runnerDefaults
# may be left to their default values.
targetOptionMethod=getattr(self.target, targetOptionMethod, None)
options={name: mergeDicts(target, extraOptions)
for name, target in ({} if targetOptionMethod is None
else targetOptionMethod()).items()}
# determine keys for final output
keys=[name for name in options.keys() if name != NAME.COMMON]
# start with the defaults for the selected keys as a baseline
common=runnerDefaults.get(NAME.COMMON, {})
self.options={name: mergeDicts(common, runnerDefaults.get(name, {}))
for name in keys}
# a NAME.TEMPLATE parameter in a target will cause the specified
# target to be extend by the default options of another target.
# Priority order: extraOptions > options > defaults > COMMON
for name, target in self.options.items():
template=(options[name][NAME.TEMPLATE]
if name in options and NAME.TEMPLATE in options[name]
else (runnerDefaults[name][NAME.TEMPLATE]
if (name in runnerDefaults and
NAME.TEMPLATE in runnerDefaults[name])
else None))
if template is not None:
target.update(
runnerDefaults.get(template, {}))
# now add our specific options from options and extraoptions
common=options.get(NAME.COMMON, {})
self.options={name: mergeDicts(self.options.get(name, {}),
mergeDicts(common, options[name]))
for name in keys}
# finally, tag each target with its target name so that parameter links
# may address it over the key [NAME.TARGET]. Also, write down the name
# of the class in [NAME.CLASS]
for name, target in self.options.items():
target[NAME.TARGET]=name
target[NAME.CLASS]=targetClass.__name__
# initialize output
self.results=AccessDict({})
def emitStatus(self, *args):
"""Short summary.
Parameters
----------
*args : type
Description of parameter `*args`.
Returns
-------
type
Description of returned object.
"""
if self.cbStatus is not None:
self.cbStatus(*args)
def emitResult(self, *args):
"""Short summary.
Parameters
----------
*args : type
Description of parameter `*args`.
Returns
-------
type
Description of returned object.
"""
if self.cbResult is not None:
self.cbResult(*args)
def run(self, *targetNames, **extraArgs):
"""Short summary.
Parameters
----------
*targetNames : type
Description of parameter `*targetNames`.
**extraArgs : type
Description of parameter `**extraArgs`.
Returns
-------
type
Description of returned object.
"""
'''
Execute all selected targets by a list of targetNames.
If *targetNames is empty all targets will be run.
The output of each :math:`TARGET` will be written to
self.results[:math:`TARGET`]
'''
# determine console width
self.consoleWidth=getConsoleSize()[1]
if len(targetNames) == 0:
targetNames=self.options.keys()
targets={name: self.options[name]
for name in targetNames if name in self.options}
for name, target in sorted(targets.items()):
options=target.copy()
if len(extraArgs) > 0:
options.update(extraArgs)
result = self._run(name, options)
# make all result dicts of type accessDict for easy access
self.results[name] = convertToAccessDicts(result)
if self.cbResult is not None:
self.cbResult(name, result)
| apache-2.0 | 8,123,056,730,716,135,000 | 29.868444 | 80 | 0.560808 | false |
ankittare/NLP_QuestionGeneration | src/util/RDR_POS/SCRDRlearner/Node.py | 1 | 4600 | def tabStr(length):
s = ""
for i in xrange(length):
s += "\t"
return s
class Node:
"""
Class representing a node of SCRDR tree
"""
def __init__(self, condition, conclusion, father = None, exceptChild = None, elseChild = None, cornerstoneCases = [], depth = 0):
"""
rule: python code - rule of the node
conclusion: python code - conclusion of the node if the rule is satisfied
father: Node - father of the node
exceptChild, elseChild: Node - two children of the node
cornerstoneCases: list of instances of Object class which are the cornerstone cases of the node
depth: depth of node in tree
"""
self.condition = condition
self.conclusion = conclusion
self.exceptChild = exceptChild
self.elseChild = elseChild
self.cornerstoneCases = cornerstoneCases
self.father = father
self.depth = depth
def satisfied(self, object):
return eval(self.condition)
def executeConclusion(self, object):
exec(self.conclusion)
def appendCornerstoneCase(self, object):
self.cornerstoneCases.append(object)
def check(self, object):
if self.satisfied(object):
self.executeConclusion(object)
if self.exceptChild != None:
self.exceptChild.check(object)
else:
if self.elseChild != None:
self.elseChild.check(object)
def checkDepth(self, object, length):
if self.depth <= length:
if self.satisfied(object):
self.executeConclusion(object)
if self.exceptChild != None:
self.exceptChild.checkDepth(object, length)
else:
if self.elseChild != None:
self.elseChild.checkDepth(object, length)
"""
Find the node which the current node is exception of
"""
def findRealFather(self):
node = self
fatherNode = node.father
while True and fatherNode != None:
if fatherNode.exceptChild == node:
break
node = fatherNode
fatherNode = node.father
return fatherNode
"""
Add a else-node to the current node
Check if new rule fire the cornerstone cases of the real father node
- real father is the node which the current node is exception of
"""
def addElseChild(self, node):
fatherNode = self.findRealFather()
for object in fatherNode.cornerstoneCases:
if node.satisfied(object):
print "Error while adding new else node: the new rule fire the cornerstonecases of exception-father node"
print "Condition: %s" % node.condition
print "Object: %s" % object.toStr()
self.findRealFather().cornerstoneCases.remove(object)
self.elseChild = node
return True
"""
Add a exception-node to the current node
Check if new rule fire the cornerstone cases of the father node
"""
def addExceptChild(self, node):
for object in self.cornerstoneCases:
if node.satisfied(object):
print "Error while adding new except node: the new rule fire the cornerstonecases of exception-father node"
print "Condition: %s" % node.condition
print "Object: %s" % object.toStr()
self.cornerstoneCases.remove(object)
self.exceptChild = node
return True
"""
Write to file
"""
def writeToFile(self, out, depth):
space = tabStr(depth)
out.write(space + self.condition + " : " + self.conclusion + "\n")
for case in self.cornerstoneCases:
out.write(" " + space + "cc: " + case.toStr() + "\n")
if self.exceptChild != None:
self.exceptChild.writeToFile(out, depth + 1)
if self.elseChild != None:
self.elseChild.writeToFile(out, depth)
"""
Write to file without seen/cornerstone cases
"""
def writeToFileWithoutSeenCases(self, out, depth):
space = tabStr(depth)
out.write(space + self.condition + " : " + self.conclusion + "\n")
if self.exceptChild != None:
self.exceptChild.writeToFileWithoutSeenCases(out, depth + 1)
if self.elseChild != None:
self.elseChild.writeToFileWithoutSeenCases(out, depth)
| mit | 642,892,481,107,128,700 | 35.8 | 133 | 0.58 | false |
willu47/pyrate | docs/conf.py | 1 | 8874 | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
# Support markdown
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
source_suffix = ['.rst', '.md']
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- Hack for ReadTheDocs ------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/rtfd/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
import inspect
from sphinx import apidoc
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
output_dir = os.path.join(__location__, "../docs/api")
module_dir = os.path.join(__location__, "../pyrate")
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
apidoc.main(cmd_line.split(" "))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.pngmath',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyrate'
copyright = u'2016, Julia Schaumeier, Sam Macbeth, Will Usher'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from pyrate import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyrate-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'pyrate Documentation',
u'Will Usher', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://sphinx.pocoo.org', None),
'python': ('http://docs.python.org/' + python_version, None),
'matplotlib': ('http://matplotlib.sourceforge.net', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
}
| mit | -4,127,852,748,272,299,500 | 33.395349 | 85 | 0.690219 | false |
cblop/tropic | instal-linux/instal/factparser/FactParser.py | 1 | 2172 | import os
from io import StringIO
# noinspection PyUnresolvedReferences
from instal.clingo import parse_term
from instal.instaljsonhelpers import dict_funs_to_list, trace_dicts_from_file
from instal.instalexceptions import InstalCompileError
class FactParser(object):
"""
FactParser
See __init__.py for more details.
"""
def __init__(self):
pass
def get_facts(self, fact_filenames: list) -> list:
"""
input: a set of fact filenames
output: a list of Function objects that are true at the first timestep
"""
initlist = []
for f in fact_filenames:
name, ext = os.path.splitext(f)
with open(f, "rt") as factfile:
if ext == ".iaf":
initlist += self.parse_iaf_factfile(factfile.read())
if ext == ".json":
initlist += self.parse_json_factfile(factfile)
return initlist
def parse_json_factfile(self, json_file: "File") -> list:
"""
input: a json file
output: a list of the facts true in the *last* step in that json file.
(This allows restarting from an old trace.)
"""
jsons = trace_dicts_from_file(json_file)
if len(jsons) == 0:
return []
return dict_funs_to_list(jsons[-1], keys=["holdsat"])
def parse_iaf_factfile(self, iaf_text: str) -> list:
"""
input: an iaf text (as a string)
output: a list of the facts
"""
initlist = []
iafIO = StringIO(iaf_text)
for init in iafIO.readlines():
init = init.rstrip()
if init == '':
continue
term = parse_term(init)
if term.name in ["initially"]:
where = term.arguments[1]
what = term.arguments[0]
initlist = [
"holdsat(" + str(what) + "," + str(where) + ")"] + initlist
else:
raise InstalCompileError(
".iaf file should be in the format initially({holdsat}, {institution})")
return map(parse_term, initlist)
| epl-1.0 | 4,603,804,719,699,574,300 | 31.909091 | 92 | 0.541436 | false |
poppogbr/genropy | gnrpy/gnr/xtnd/sync4Dapp_new.py | 1 | 16906 | # -*- coding: UTF-8 -*-
# Genro
# Copyright (c) 2004 Softwell sas - Milano see LICENSE for details
# Author Giovanni Porcari, Francesco Cavazzana, Saverio Porcari, Francesco Porcari
import os
import time, datetime
from logging.handlers import TimedRotatingFileHandler
from logging import Formatter
import logging
gnrlogger = logging.getLogger(__name__)
from gnr.core.gnrlang import errorLog
from gnr.core.gnrbag import Bag, DirectoryResolver
from gnr.app.gnrapp import GnrApp
from gnr.sql.gnrsql_exceptions import NotMatchingModelError
from gnr.sql.gnrsqlmodel import DbModelSrc
from gnr.xtnd.sync4Dtransaction import TransactionManager4D
class GnrSync4DException(Exception):
pass
class Struct4D(object):
def __init__(self, app, packages_folder=None):
self.app = app
self.instance_folder = app.instanceFolder
self.folder4dstructBag = Bag(self.folder4dstruct + '/')['structure']
self.names4d = self.buildNames4d()
self.packages_folder = packages_folder
@property
def folder4d(self):
return self.app.folder4d
@property
def folder4dstruct(self):
path = os.path.join(self.folder4d, 'structure')
if not os.path.isdir(path):
os.mkdir(path)
return path
def areaFolder(self, area):
path = os.path.join(self.packages_folder, area)
print 'areaFolder:', path
if not os.path.isdir(path):
os.mkdir(path)
return path
def modelFolder(self, area):
path = os.path.join(self.areaFolder(area), 'model')
if not os.path.isdir(path):
os.mkdir(path)
return path
def fromNameAs(self, name):
if ' AS ' in name:
name4d, nameSql = name.split(' AS ', 1)
else:
name4d, nameSql = name, name
return name4d.strip(), nameSql.strip().lstrip('_').lower()
def nameConverter(self, table='', field='', fullname=None, mode='4D'):
if not fullname:
fullname = '%s_[%s]%s' % (mode, table, field)
else:
fullname = '%s_%s' % (mode, fullname)
return self.names4d.get(fullname.lower())
def get4dTables(self):
return [k[4:-1] for k, v in self.names4d.items() if k.startswith('4d') and not v[2]]
def buildNames4d(self):
result = {}
namesdict = {}
for sname, sbag in self.folder4dstructBag.items():
sname = sname.lower()
tablesbag = sbag['tables4d']
if tablesbag:
for tbag in tablesbag.values():
table4D, tableSql = self.fromNameAs(tbag['name'])
path4d = '4D_[%s]' % table4D
namesdict[
tableSql] = None # in namesdict put the table with area name avoiding multi area duplicates
if tableSql.startswith('%s_' % sname): #strip the area code from table name
tableSql = tableSql.lower().split('_', 1)[1]
result[path4d.lower()] = (sname, tableSql, None)
for fldbag in tbag['fields'].values():
name4D, nameSql = self.fromNameAs(fldbag['name'])
path4d = '4D_[%s]%s' % (table4D, name4D)
result[path4d.lower()] = (sname, tableSql, nameSql)
tablesbag = sbag['tablesGnr']
if tablesbag:
for tbag in tablesbag.values():
table4D, tableSql = self.fromNameAs(tbag['name'])
path4d = 'GNR_[%s]' % table4D
if tableSql in namesdict:
tableSql = tableSql + '_tbl'
if tableSql.startswith('%s_' % sname): #strip the area code from table name
tableSql = tableSql.lower().lstrip('_').split('_', 1)[1]
result[path4d.lower()] = (sname, tableSql, None)
for fldbag in tbag['fields'].values():
name4D, nameSql = self.fromNameAs(fldbag['name'])
path4d = 'GNR_[%s]%s' % (table4D, name4D)
result[path4d.lower()] = (sname, tableSql, nameSql)
return result
def build(self, configBag):
for sname, sbag in self.folder4dstructBag.items():
sname = sname.lower()
area = self.buildArea(sname, sbag)
if area: #if an area has no tables don't build the folder at all
sqlstructfile = os.path.join(self.modelFolder(sname), 'config_db.xml')
area.toXml(sqlstructfile)
configBag.setItem('packages.%s' % sname, None, alias=sname)
def buildArea(self, sname, sbag):
result = DbModelSrc.makeRoot()
pkg = result.package(name=sname)
tablesbag = sbag['tables4d']
exportArea = False
if tablesbag:
exportArea = True
for tbag in tablesbag.values():
self.buildTable(pkg, tbag)
tablesbag = sbag['tablesGnr']
if tablesbag:
exportArea = True
for tbag in tablesbag.values():
self.buildTable(pkg, tbag, mode='GNR')
if exportArea:
return result
def buildTable(self, pkg, tbag, mode='4D'):
name4D, name = self.fromNameAs(tbag['name'])
name = self.nameConverter(table=name4D, mode=mode)[1]
name_short = name
comment = 'imported from 4d %s' % name4D
pkey4d = tbag['pkey.name4d']
pkey = None
if pkey4d:
pkey = self.nameConverter(table=name4D, field=tbag['pkey.name4d'], mode=mode)[2]
table = pkg.table(name=name, comment=comment,
name_short=name_short,
pkey=pkey)
for fldbag in tbag['fields'].values():
self.buildField(table, fldbag)
if 'extrafields' in tbag:
for fldbag in tbag['extrafields'].values():
self.buildField(table, fldbag)
def buildField(self, table, fldbag):
name4D, name = self.fromNameAs(fldbag['name'])
comment = 'imported from 4d %s' % name4D
dtype = fldbag['type']
len_max = None
size = None
if dtype.startswith('A'):
len_max = dtype[1:].strip().strip('_')
len_max = str(int(len_max) + 2)
dtype = 'A'
if len_max:
size = '0:%s' % len_max
fld = table.column(name, dtype=dtype, name_long=name4D, comment=comment,
unique=fldbag['unique'], indexed=fldbag['indexed'],
size=size)
if fldbag['relate']:
case_insensitive = False
sqltarget = self.nameConverter(table=fldbag['relate.table4d'], field=fldbag['relate.field4d'])
if sqltarget:
if (dtype == 'A' and int(len_max) > 10 and sqltarget[2].lower() != 'sy_id'):
case_insensitive = True
else: # no link to 4d table, try genro table
sqltarget = self.nameConverter(table=fldbag['relate.tablegnr'], field=fldbag['relate.fieldgnr'],
mode='GNR')
case_insensitive = True
if sqltarget:
target = '%s.%s.%s' % (sqltarget[0], sqltarget[1], sqltarget[2])
fld.relation(target)
else:
print "Error: missing field \n%s" % str(fldbag['relate'])
class GnrAppSync4D(GnrApp):
def __init__(self, *args, **kwargs):
self.sync4d_name = kwargs.pop('sync4d_name','sync4d')
super(GnrAppSync4D,self).__init__(*args,**kwargs)
def onIniting(self):
basepath = self.config.getAttr('packages', 'path')
if not basepath:
basepath = os.path.normpath(os.path.join(self.instanceFolder, '..', '..', 'packages'))
if not os.path.isdir(basepath):
raise GnrSync4DException('missing package path')
self.s4d = Struct4D(self, basepath)
self.checkChanges = False
if not self.config['packages']:
self.rebuildRecipe()
def onInited(self):
self._startLog()
gnrpkg = self.db.package('gnr')
self.sync4d_timing = int(gnrpkg.getAttr('sync4d_timing', 0)) or 4
self.area_zz = self.config.getAttr('packages', 'area_zz')
self.transaction4d = TransactionManager4D(self, 'gnr')
def _startLog(self):
logdir = os.path.join(self.instanceFolder, 'logs')
if not os.path.isdir(logdir):
os.makedirs(logdir)
logfile = os.path.join(logdir, 'gnrsync4d.log')
loghandler = TimedRotatingFileHandler(logfile, 'MIDNIGHT', 1, 28)
loghandler.setLevel(logging.DEBUG)
formatter = Formatter('%(asctime)s - %(name)-12s: %(levelname)-8s %(message)s')
loghandler.setFormatter(formatter)
rootlogger = logging.getLogger('')
rootlogger.setLevel(logging.DEBUG)
rootlogger.addHandler(loghandler)
if 'admin' in self.db.packages:
self.db.package('admin').mailLog(self.processName)
def _get_processName(self):
return 'sync4d daemon: %s' % self.instanceFolder
processName = property(_get_processName)
@property
def folder4d(self):
path = os.path.join(self.instanceFolder, self.sync4d_name)
if not os.path.isdir(path):
os.mkdir(path)
return path
def _get_folderdialog4d(self):
path = os.path.join(self.folder4d, 'dialog4d')
if not os.path.isdir(path):
os.mkdir(path)
return path
folderdialog4d = property(_get_folderdialog4d)
def _get_folder4dDataIn(self):
path = os.path.join(self.folder4d, 'data')
if not os.path.isdir(path):
os.mkdir(path)
return path
folder4dDataIn = property(_get_folder4dDataIn)
def _get_folder4dDataOut(self):
path = os.path.join(self.folder4d, 'imported')
if not os.path.isdir(path):
os.mkdir(path)
return path
folder4dDataOut = property(_get_folder4dDataOut)
def beforeLoop(self):
if self.checkChanges:
changes = self.db.checkDb()
if changes:
raise NotMatchingModelError('\n'.join(self.db.model.modelChanges))
self.running = True
def loop(self):
self.beforeLoop()
while self.running:
self.do()
time.sleep(self.sync4d_timing)
def do(self):
try:
self.lookFor4dFiles()
self.lookForBackSync()
return True
except:
tb_text = errorLog(self.processName)
gnrlogger.error(tb_text)
raise
def lookForBackSync(self):
l = self.db.table('gnr.sync_out').query(columns='*',
where="$client = :client",
client='sync4d',
order_by="$request", limit=10).fetch()
while l:
for t in l:
self.syncOutTransaction(t)
l = self.db.table('gnr.sync_out').query(columns='*',
where="$client = :client",
client='sync4d',
order_by="$request", limit=10).fetch()
def syncOutTransaction(self, transaction):
fname = '%s_%s_%s_%s.xml' % (transaction['request'].strftime('%Y-%m-%d_%H%M%S'),
transaction['request'].microsecond,
transaction['maintable'],
transaction['action'])
fname = os.path.join(self.folderdialog4d, 'test', fname)
trbag = Bag()
trbag['command'] = 'sync_in'
trbag['maintable'] = transaction['maintable']
trbag['action'] = transaction['action']
trbag['data'] = Bag(transaction['data'])
trbag.toXml(fname)
self.db.table('gnr.sync_out').delete(transaction)
self.db.commit()
def lookFor4dFiles(self):
dataInPath = self.folder4dDataIn
folders = [f for f in os.listdir(dataInPath) if not f.startswith('.')]
if folders:
folders.sort()
for folder in folders:
self.importFolder(dataInPath, folder)
if folder != datetime.date.today().strftime("%Y-%m-%d"):
path = os.path.join(dataInPath, folder)
l = os.listdir(path)
for f in l:
if f.startswith('.'): os.remove(os.path.join(path, f))
if not os.listdir(path):
os.rmdir(path)
def importFolder(self, dataInPath, folder):
folderpath = os.path.join(dataInPath, folder)
names = [f for f in os.listdir(folderpath) if not f.startswith('.')]
names.sort()
for fname in names:
fullname = os.path.join(folderpath, fname)
self.importFile(fullname)
dataOutPath = os.path.join(self.folder4dDataOut, folder)
if not os.path.exists(dataOutPath):
os.mkdir(dataOutPath)
os.rename(fullname, os.path.join(dataOutPath, fname))
def importFile(self, fullname):
try:
b = Bag(fullname)
except:
time.sleep(10) # 4D may be still writing the file, wait some seconds and try again
b = Bag(fullname)
if 'transaction' in b:
for tr, attr in b.digest('#v,#a'):
n = tr.getNode('trigger')
attr.update(n.getAttr())
self.writeTransaction(n.value, attr, file_name=fullname)
else:
self.writeImport(b, file_name=fullname)
def writeTransaction(self, data, attr, file_name=None):
if not attr['from']:
return
request_ts = None
if attr.get('sy_date') and attr.get('sy_time'):
request_ts = datetime.datetime.combine(attr.get('sy_date'), attr.get('sy_time'))
if self.area_zz:
pkg = self.area_zz
tbl = attr['from'].lower()
else:
pkg, tbl = attr['from'].lower().lstrip('_').split('_', 1)
self.setSubTriggerSchemata(data)
self.transaction4d.writeTransaction(mode='sync', action=attr['mode'],
maintable='%s.%s' % (pkg, tbl),
data=data.toXml(),
request_id=attr.get('sy_id'),
file_name=file_name,
queue_id='sync4d',
request_ts=request_ts
)
gnrlogger.info("%s --> %s - %s" % (file_name, attr['mode'], '%s.%s' % (pkg, tbl)))
def writeImport(self, b, file_name=None):
if self.area_zz:
pkg = self.area_zz
tbl = b['FROM'].lower()
else:
pkg, tbl = b['FROM'].lower().lstrip('_').split('_', 1)
self.transaction4d.writeTransaction(mode='import', action=b['MODE'],
maintable='%s.%s' % (pkg, tbl),
data=b['DATA'].toXml(),
file_name=file_name,
queue_id='sync4d'
)
gnrlogger.info("%s --> %s - %s" % (file_name, 'import', '%s.%s' % (pkg, tbl)))
def setSubTriggerSchemata(self, data):
for k, tr, attr in data.digest('#k,#v,#a'):
if k != 'data':
tbl = attr['from']
if not tbl:
return
if not '.' in tbl:
if self.area_zz:
pkg = self.area_zz
tbl = tbl.lower()
else:
pkg, tbl = tbl.lower().lstrip('_').split('_', 1)
attr['from'] = '%s.%s' % (pkg, tbl)
self.setSubTriggerSchemata(tr)
def rebuildRecipe(self):
self.s4d.build(self.config)
self.config.toXml(os.path.join(self.instanceFolder, 'instanceconfig.xml'))
def importTable(self, tbl):
if len(tbl) > 23:
tbl = tbl[:23]
cmdpath = os.path.join(self.folderdialog4d, 'exp_%s.xml' % tbl)
b = Bag()
b['command'] = 'Export4dTables'
b['t4d.tbl'] = tbl
b.toXml(cmdpath)
def firstImport(self):
tables = self.s4d.get4dTables()
tables.sort()
for tbl in tables:
self.importTable(tbl)
| lgpl-2.1 | 441,285,359,815,141,440 | 38.410256 | 112 | 0.524488 | false |
KaydenIvanov/random-meme-bot | RandomMeme.py | 1 | 14387 | #Made by Andre Augusto Leite de Almeida
import os
import random
import collections
from PIL import Image, ImageDraw
from numpy import array
from skimage import img_as_float
from skimage.measure import compare_ssim
#Fast, but supports only one image on template
class Basic_Meme:
def __init__(self,folder):
self.folder = folder
#Choose the random images
# Images, Templates, and Result folders need no exist"
def random_image(self):
#Choose a random image from the folder "Images"
path = os.path.join(self.folder,"Images")
random_filename = random.choice([
x for x in os.listdir(path)
if os.path.isfile(os.path.join(path, x))
])
imagefile = os.path.join(path, random_filename)
#Choose a random image from the folder "Templates"
path2 = os.path.join(self.folder,"Templates")
random_filename2 = random.choice([
x for x in os.listdir(path2)
if os.path.isfile(os.path.join(path2, x))
])
templatefile = os.path.join(path2, random_filename2)
return templatefile, imagefile
#Calculates the alpha on the template, actually only works for 1 blank space
def view_transparency(self):
img = Image.open(self.templatefile)
img = img.convert("RGBA")
pixels = img.load()
alpha = (255,255,255,0)
xlist = []
ylist = []
for y in range(img.size[1]):
for x in range(img.size[0]):
if pixels[x, y] == alpha:
xlist.append(x)
ylist.append(y)
xleft = min(xlist)
xright = max(xlist)
ytop = min(ylist)
ybot = max(ylist)
return xleft, ytop, xright, ybot
#Test the template alpha location, debug tool
def test_templates(self):
files = self.random_image()
self.templatefile = files[0]
image = Image.open(self.templatefile)
dr = ImageDraw.Draw(image)
alpha = (self.view_transparency()) # (x1,y1, x2,y2)
line = (alpha[0],alpha[1],alpha[0],alpha[3])
dr.line(line, fill="orange", width=10)
line = (alpha[0],alpha[1],alpha[2],alpha[1])
dr.line(line, fill="orange", width=10)
line = (alpha[0],alpha[3],alpha[2],alpha[3])
dr.line(line, fill="orange", width=10)
line = (alpha[2],alpha[1],alpha[2],alpha[3])
dr.line(line, fill="orange", width=10)
image.show()
#Finally make the meme
def make(self):
#Get the random files and view transparency
files = self.random_image()
self.templatefile = files[0]
self.imagefile = files[1]
local = self.view_transparency()
#Get alpha values
xleft = local[0]
xright = local[2]
ytop = local[1]
ybot = local[3]
#Get the size from variables
x = xright - xleft
y = ybot - ytop
size = (x,y)
#Open the images and resize to fit template
image = Image.open(self.imagefile)
image = image.resize(size)
#Open two templates, one for background, other for foreground
template = Image.open(self.templatefile)
template2 = Image.open(self.templatefile)
#Convert to RGB Alpha
image = image.convert("RGBA")
template = template.convert("RGBA")
#Finally paste the images
template.paste(image, (local[0],local[1]))
template.paste(template2, (0,0), template2)
#Save out
out = os.path.join(self.folder,"Result",str(random.randrange(100000,999999)) + ".jpg") #Random name
print(out)
template.save(out)
#And return the location
return out
#Slower, but can handle templates with any number of spaces
class Advanced_Meme:
def __init__(self,folder):
self.folder = folder
#Random image function
def random_image(self):
path = os.path.join(self.folder,"Images")
random_filename = random.choice([
x for x in os.listdir(path)
if os.path.isfile(os.path.join(path, x))
])
imagefile = os.path.join(path, random_filename)
path2 = os.path.join(self.folder,"Templates")
random_filename2 = random.choice([
x for x in os.listdir(path2)
if os.path.isfile(os.path.join(path2, x))
])
templatefile = os.path.join(path2, random_filename2)
return templatefile, imagefile
#Find the borders of template squares
def follow_border(self,im, x, y, used):
work = [(x, y)]
border = []
while work:
x, y = work.pop()
used.add((x, y))
border.append((x, y))
for dx, dy in ((1, 0), (-1, 0), (0, 1), (0, -1),
(1, 1), (-1, -1), (1, -1), (-1, 1)):
px, py = x + dx, y + dy
try:
if im[px, py] == 255 or (px, py) in used:
continue
work.append((px, py))
except:
pass
return border
#Find template spaces
def find_template_local(self):
# Opening a random template
self.template = self.random_image()[0]
self.original = Image.open(self.template)
orig = self.original
orig_bin = orig.point(lambda x: 0 if x < 128 else 255)
im = orig_bin.load()
border = Image.new('1', orig.size, 'white')
width, height = orig.size
bim = border.load()
# Keep only border points
for x in range(width):
for y in range(height):
if im[x, y][3] == 255:
continue
try:
if im[x + 1, y] or im[x - 1, y] or im[x, y + 1] or im[x, y - 1]:
bim[x, y] = 0
else:
bim[x, y] = 255
except:
pass
used = set()
border = []
for x in range(width):
for y in range(height):
if bim[x, y] == 255 or (x, y) in used:
continue
b = self.follow_border(bim, x, y, used)
border.append(b)
# Find the corners of each space
template_place = []
for b in border:
xmin, xmax, ymin, ymax = width, 0, height, 0
mean_x, mean_y = 0, 0
b = sorted(b)
top_left, bottom_right = b[0], b[-1]
for x, y in b:
mean_x += x
mean_y += y
b = sorted(b, key=lambda x: x[1])
curr = 0
while b[curr][1] == b[curr + 1][1]:
curr += 1
top_right = b[curr]
curr = len(b) - 1
while b[curr][1] == b[curr - 1][1]:
curr -= 1
bottom_left = b[curr]
template_place.append(
[top_left, top_right, bottom_right, bottom_left])
return template_place
def make(self):
template_places = self.find_template_local()
images = []
for s in template_places:
image = self.random_image()[1]
foreground = Image.open(image)
images.append(image)
xleft = min(s[0][0],s[3][0])
xright = max(s[1][0],s[2][0])
ytop = min(s[0][1],s[1][1])
ybot = max(s[2][1],s[3][1])
x = xright - xleft
y = ybot - ytop
#resize_factor = foreground.size[0]/foreground.size[1]
#x = y*resize_factor
size = (int(x),y)
original2 = Image.open(self.template)
foreground = foreground.resize(size)
self.original.paste(foreground, (xleft,ytop))
self.original.paste(original2, (0, 0), original2)
# Random name and save
out = os.path.join(self.folder, "Result", str(random.randrange(100000, 999999)) + ".jpg")
self.original.save(out)
# And return location
print(images)
return out, images
#Slowest, but can multiple imagens, and calculate the best images from fit in template
class Conscious_Meme(Advanced_Meme):
#Calculate SSIM
def ssim(self, image1, image2):
image1 = Image.open(image1).convert('RGB')
if image1.size[0] > 300:
new_size = (300, int(image1.size[1] / image1.size[0] * 300))
else:
new_size = image1.size
print(image1.size, new_size)
image1 = image1.resize(new_size)
image2 = Image.open(image2).resize(new_size).convert('RGB')
image1 = array(image1)
image2 = array(image2)
img1 = img_as_float(image1)
img2 = img_as_float(image2)
return compare_ssim(img1, img2, win_size=None, gradient=False, multichannel=True)
# Convert RGB to Hue
def RGBtoHue(self,r, g, b):
R = r / 255;
G = g / 255;
B = b / 255
RGB = {'R': R, 'G': G, 'B': B}
Max = max(RGB, key=RGB.get)
Min = min(RGB, key=RGB.get)
try:
if Max is 'R':
Hue = (RGB['G'] - RGB['B']) / (RGB[Max] - RGB[Min])
elif Max is 'G':
Hue = 2.0 + (RGB['B'] - RGB['R']) / (RGB[Max] - RGB[Min])
elif Max is 'B':
Hue = 4.0 + (RGB['R'] - RGB['G']) / (RGB[Max] - RGB[Min])
except ZeroDivisionError:
Hue = 0.0
Hue = Hue * 60
if Hue < 0:
Hue = Hue + 360
return Hue
# Calculates the most common hues of a image
def ImageCommonHue(self,image):
image = Image.open(image).convert('RGB')
ratio = image.size[1] / image.size[0]
image = image.resize((400, int(400 * ratio)))
image = image.convert('P', palette=Image.ADAPTIVE, colors=3)
image = image.convert('RGB')
pixels = image.load()
hueList = []
n = 0
for y in range(image.size[1]):
for x in range(image.size[0]):
rgb = pixels[x, y]
hue = self.RGBtoHue(rgb[0], rgb[1], rgb[2])
hueList.append(hue)
n += 1
hueList = [int(x / 10) * 10 for x in hueList]
hueDict = collections.Counter(hueList)
returning = [None] * 3
n = 0
while n < 3:
try:
for hue in range(len(hueDict)):
returning[n] = hueDict.most_common(3)[n][0]
n += 1
except IndexError:
break
for hue in range(len(returning)):
if returning[hue] is None:
returning[hue] = 0
return returning
#Calculate the difference of two images by hue
def ImageHueDiff(self,image1, image2, calculeImage1=True):
if calculeImage1 is True:
hue1 = self.ImageCommonHue(image1)
elif calculeImage1 is False:
hue1 = image1
hue2 = self.ImageCommonHue(image2)
diffHue = [None] * 3
n = 0
for hue in hue1:
diffHue[n] = hue - hue2[n]
n += 1
diffHue = [abs(x) for x in diffHue]
diff = sum(diffHue)
return diff
# Make a list of image semelhance
def compare_image(self, image1):
files = [f for f in os.listdir(os.path.join(self.folder, 'Images')) if
os.path.isfile(os.path.join(os.path.join(self.folder, 'Images'), f))]
choosen = []
results = {}
i = 0
while i < 100:
image = random.choice(files)
if image in choosen:
pass
else:
choosen.append(image)
i += 1
hueImage1 = self.ImageCommonHue(image1)
if max(hueImage1) is 0:
for image2 in choosen:
image2 = os.path.join(self.folder,'Images',image2)
results[image2] = self.ssim(image1, os.path.join(image2))
resultKeys = [x for x in results.keys()]
for x in resultKeys:
if results[x] is 1.0:
del results[x]
diffList = sorted(results.keys(), key=lambda k: results[k], reverse=True)
else:
n=0
for image2 in choosen:
image2 = os.path.join(self.folder,'Images',image2)
results[image2] = self.ImageHueDiff(hueImage1, image2, calculeImage1=False)
n += 1
print(str(n) + '=' + str(results[image2]))
resultKeys = [x for x in results.keys()]
for x in resultKeys:
if results[x] is 0:
del results[x]
diffList = sorted(results.keys(), key=lambda k: results[k])
return diffList
#Make the meme
def make(self):
template_places = self.find_template_local()
images = []
i = 0
image1 = self.random_image()[1]
if len(template_places) is 1:
images = [image1]
else:
images = [image1] + self.compare_image(image1)
for s in template_places:
image = images[i]
foreground = Image.open(image)
images.append(image)
xleft = min(s[0][0], s[3][0])
xright = max(s[1][0], s[2][0])
ytop = min(s[0][1], s[1][1])
ybot = max(s[2][1], s[3][1])
x = xright - xleft
y = ybot - ytop
size = (int(x), y)
original2 = Image.open(self.template)
foreground = foreground.resize(size)
self.original.paste(foreground, (xleft, ytop))
self.original.paste(original2, (0, 0), original2)
i += 1
# Random name and save
out = os.path.join(self.folder, "Result", str(random.randrange(100000, 999999)) + ".jpg")
self.original.save(out)
# And return location
return out, images
| mit | -374,368,781,760,749,100 | 30.61978 | 107 | 0.500938 | false |
mgerstner/backintime | common/askpass.py | 1 | 1662 | # Copyright (C) 2012-2017 Germar Reitze
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import sys
try:
import gtk
except:
pass
import password
import password_ipc
import tools
import config
if __name__ == '__main__':
"""
return password.
"""
cfg = config.Config()
tools.envLoad(cfg.cronEnvFile())
profile_id = os.getenv('ASKPASS_PROFILE_ID', '1')
mode = os.getenv('ASKPASS_MODE', 'local')
if mode == 'USER':
prompt = os.getenv('ASKPASS_PROMPT', None)
pw = password.Password(cfg)
print(pw.passwordFromUser(None, prompt = prompt))
sys.exit(0)
temp_file = os.getenv('ASKPASS_TEMP')
if temp_file is None:
#normal mode, get password from module password
pw = password.Password(cfg)
print(pw.password(None, profile_id, mode))
sys.exit(0)
#temp mode
fifo = password_ipc.FIFO(temp_file)
pw = fifo.read(5)
if pw:
print(pw)
| gpl-2.0 | 3,322,878,482,488,838,000 | 28.678571 | 76 | 0.663057 | false |
skosukhin/spack | lib/spack/spack/cmd/load.py | 1 | 1770 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import argparse
from spack.cmd.common import print_module_placeholder_help
description = "add package to environment using `module load`"
section = "environment"
level = "short"
def setup_parser(subparser):
"""Parser is only constructed so that this prints a nice help
message with -h. """
subparser.add_argument(
'spec', nargs=argparse.REMAINDER,
help="spec of package to load with modules "
"(if -, read specs from STDIN)")
def load(parser, args):
print_module_placeholder_help()
| lgpl-2.1 | -8,432,269,622,799,385,000 | 40.162791 | 78 | 0.676271 | false |
cjaymes/expatriate | tests/test_importable.py | 1 | 1223 | # Copyright 2016 Casey Jaymes
# This file is part of Expatriate.
#
# Expatriate is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Expatriate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Expatriate. If not, see <http://www.gnu.org/licenses/>.
import importlib
import pkgutil
import sys
import expatriate
def iter_packages(pkg):
if sys.platform != 'win32' and 'windows' in pkg.__name__.lower():
# windows modules frequently fail to import on non-windows
return
for m_finder, m_name, m_ispkg in pkgutil.iter_modules(path=pkg.__path__):
mod = importlib.import_module(pkg.__name__ + '.' + m_name, pkg.__name__)
if m_ispkg:
iter_packages(mod)
def test_importable():
iter_packages(expatriate)
| lgpl-3.0 | -5,404,626,518,550,559,000 | 32.972222 | 80 | 0.715454 | false |
nttks/edx-platform | lms/djangoapps/courseware/tests/helpers.py | 1 | 6577 | import json
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import RequestFactory
from courseware.access import has_access
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from student.models import Registration
def get_request_for_user(user):
"""Create a request object for user."""
request = RequestFactory()
request.user = user
request.COOKIES = {}
request.META = {}
request.is_secure = lambda: True
request.get_host = lambda: "edx.org"
request.method = 'GET'
return request
class LoginEnrollmentTestCase(TestCase):
"""
Provides support for user creation,
activation, login, and course enrollment.
"""
user = None
def setup_user(self):
"""
Create a user account, activate, and log in.
"""
self.email = '[email protected]'
self.password = 'bar'
self.username = 'test'
self.user = self.create_account(
self.username,
self.email,
self.password,
)
self.activate_user(self.email)
self.login(self.email, self.password)
# Update local user data
self.user.refresh_from_db()
def assert_request_status_code(self, status_code, url, method="GET", **kwargs):
make_request = getattr(self.client, method.lower())
response = make_request(url, **kwargs)
self.assertEqual(
response.status_code, status_code,
"{method} request to {url} returned status code {actual}, "
"expected status code {expected}".format(
method=method, url=url,
actual=response.status_code, expected=status_code
)
)
return response
# ============ User creation and login ==============
def login(self, email, password):
"""
Login, check that the corresponding view's response has a 200 status code.
"""
resp = self.client.post(reverse('login'),
{'email': email, 'password': password})
self.assertEqual(resp.status_code, 200)
data = json.loads(resp.content)
self.assertTrue(data['success'])
def logout(self):
"""
Logout; check that the HTTP response code indicates redirection
as expected.
"""
# should redirect
self.assert_request_status_code(302, reverse('logout'))
def create_account(self, username, email, password):
"""
Create the account and check that it worked.
"""
url = reverse('create_account')
request_data = {
'username': username,
'email': email,
'password': password,
'name': 'username',
'terms_of_service': 'true',
'honor_code': 'true',
}
resp = self.assert_request_status_code(200, url, method="POST", data=request_data)
data = json.loads(resp.content)
self.assertEqual(data['success'], True)
# Check both that the user is created, and inactive
user = User.objects.get(email=email)
self.assertFalse(user.is_active)
return user
def activate_user(self, email):
"""
Look up the activation key for the user, then hit the activate view.
No error checking.
"""
activation_key = Registration.objects.get(user__email=email).activation_key
# and now we try to activate
url = reverse('activate', kwargs={'key': activation_key})
self.assert_request_status_code(200, url)
# Now make sure that the user is now actually activated
self.assertTrue(User.objects.get(email=email).is_active)
def enroll(self, course, verify=False):
"""
Try to enroll and return boolean indicating result.
`course` is an instance of CourseDescriptor.
`verify` is an optional boolean parameter specifying whether we
want to verify that the student was successfully enrolled
in the course.
"""
resp = self.client.post(reverse('change_enrollment'), {
'enrollment_action': 'enroll',
'course_id': course.id.to_deprecated_string(),
'check_access': True,
})
result = resp.status_code == 200
if verify:
self.assertTrue(result)
return result
def unenroll(self, course):
"""
Unenroll the currently logged-in user, and check that it worked.
`course` is an instance of CourseDescriptor.
"""
url = reverse('change_enrollment')
request_data = {
'enrollment_action': 'unenroll',
'course_id': course.id.to_deprecated_string(),
}
self.assert_request_status_code(200, url, method="POST", data=request_data)
class CourseAccessTestMixin(TestCase):
"""
Utility mixin for asserting access (or lack thereof) to courses.
If relevant, also checks access for courses' corresponding CourseOverviews.
"""
def assertCanAccessCourse(self, user, action, course):
"""
Assert that a user has access to the given action for a given course.
Test with both the given course and with a CourseOverview of the given
course.
Arguments:
user (User): a user.
action (str): type of access to test.
course (CourseDescriptor): a course.
"""
self.assertTrue(has_access(user, action, course))
self.assertTrue(has_access(user, action, CourseOverview.get_from_id(course.id)))
def assertCannotAccessCourse(self, user, action, course):
"""
Assert that a user lacks access to the given action the given course.
Test with both the given course and with a CourseOverview of the given
course.
Arguments:
user (User): a user.
action (str): type of access to test.
course (CourseDescriptor): a course.
Note:
It may seem redundant to have one method for testing access
and another method for testing lack thereof (why not just combine
them into one method with a boolean flag?), but it makes reading
stack traces of failed tests easier to understand at a glance.
"""
self.assertFalse(has_access(user, action, course))
self.assertFalse(has_access(user, action, CourseOverview.get_from_id(course.id)))
| agpl-3.0 | 6,258,002,230,087,288,000 | 34.360215 | 90 | 0.609548 | false |
Herst/selenium | py/test/selenium/webdriver/common/form_handling_tests.py | 1 | 8868 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import WebDriverException
class TestFormHandling(object):
def testShouldClickOnSubmitInputElements(self, driver, pages):
pages.load("formPage.html")
driver.find_element_by_id("submitButton").click()
driver.implicitly_wait(5)
assert driver.title == "We Arrive Here"
def testClickingOnUnclickableElementsDoesNothing(self, driver, pages):
pages.load("formPage.html")
driver.find_element_by_xpath("//body").click()
def testShouldBeAbleToClickImageButtons(self, driver, pages):
pages.load("formPage.html")
driver.find_element_by_id("imageButton").click()
driver.implicitly_wait(5)
assert driver.title == "We Arrive Here"
def testShouldBeAbleToSubmitForms(self, driver, pages):
pages.load("formPage.html")
driver.find_element_by_name("login").submit()
driver.implicitly_wait(5)
assert driver.title == "We Arrive Here"
def testShouldSubmitAFormWhenAnyInputElementWithinThatFormIsSubmitted(self, driver, pages):
pages.load("formPage.html")
driver.find_element_by_id("checky").submit()
driver.implicitly_wait(5)
assert driver.title == "We Arrive Here"
def testShouldSubmitAFormWhenAnyElementWihinThatFormIsSubmitted(self, driver, pages):
pages.load("formPage.html")
driver.find_element_by_xpath("//form/p").submit()
driver.implicitly_wait(5)
assert driver.title == "We Arrive Here"
def testShouldNotBeAbleToSubmitAFormThatDoesNotExist(self, driver, pages):
pages.load("formPage.html")
with pytest.raises(NoSuchElementException):
driver.find_element_by_name("there is no spoon").submit()
def testShouldBeAbleToEnterTextIntoATextAreaBySettingItsValue(self, driver, pages):
pages.load("javascriptPage.html")
textarea = driver.find_element_by_id("keyUpArea")
cheesey = "Brie and cheddar"
textarea.send_keys(cheesey)
assert textarea.get_attribute("value") == cheesey
def testShouldEnterDataIntoFormFields(self, driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element_by_xpath("//form[@name='someForm']/input[@id='username']")
originalValue = element.get_attribute("value")
assert originalValue == "change"
element.clear()
element.send_keys("some text")
element = driver.find_element_by_xpath("//form[@name='someForm']/input[@id='username']")
newFormValue = element.get_attribute("value")
assert newFormValue == "some text"
def testShouldBeAbleToSelectACheckBox(self, driver, pages):
pages.load("formPage.html")
checkbox = driver.find_element_by_id("checky")
assert checkbox.is_selected() is False
checkbox.click()
assert checkbox.is_selected() is True
checkbox.click()
assert checkbox.is_selected() is False
def testShouldToggleTheCheckedStateOfACheckbox(self, driver, pages):
pages.load("formPage.html")
checkbox = driver.find_element_by_id("checky")
assert checkbox.is_selected() is False
checkbox.click()
assert checkbox.is_selected() is True
checkbox.click()
assert checkbox.is_selected() is False
def testTogglingACheckboxShouldReturnItsCurrentState(self, driver, pages):
pages.load("formPage.html")
checkbox = driver.find_element_by_id("checky")
assert checkbox.is_selected() is False
checkbox.click()
assert checkbox.is_selected() is True
checkbox.click()
assert checkbox.is_selected() is False
def testShouldBeAbleToSelectARadioButton(self, driver, pages):
pages.load("formPage.html")
radioButton = driver.find_element_by_id("peas")
assert radioButton.is_selected() is False
radioButton.click()
assert radioButton.is_selected() is True
def testShouldBeAbleToSelectARadioButtonByClickingOnIt(self, driver, pages):
pages.load("formPage.html")
radioButton = driver.find_element_by_id("peas")
assert radioButton.is_selected() is False
radioButton.click()
assert radioButton.is_selected() is True
def testShouldReturnStateOfRadioButtonsBeforeInteration(self, driver, pages):
pages.load("formPage.html")
radioButton = driver.find_element_by_id("cheese_and_peas")
assert radioButton.is_selected() is True
radioButton = driver.find_element_by_id("cheese")
assert radioButton.is_selected() is False
# [ExpectedException(typeof(NotImplementedException))]
# def testShouldThrowAnExceptionWhenTogglingTheStateOfARadioButton(self, driver, pages):
# pages.load("formPage.html")
# radioButton = driver.find_element_by_id("cheese"))
# radioButton.click()
# [IgnoreBrowser(Browser.IE, "IE allows toggling of an option not in a multiselect")]
# [ExpectedException(typeof(NotImplementedException))]
# def testTogglingAnOptionShouldThrowAnExceptionIfTheOptionIsNotInAMultiSelect(self, driver, pages):
# pages.load("formPage.html")
# select = driver.find_element_by_name("selectomatic"))
# option = select.find_elements_by_tag_name("option"))[0]
# option.click()
def testTogglingAnOptionShouldToggleOptionsInAMultiSelect(self, driver, pages):
pages.load("formPage.html")
select = driver.find_element_by_name("multi")
option = select.find_elements_by_tag_name("option")[0]
selected = option.is_selected()
option.click()
assert not selected == option.is_selected()
option.click()
assert selected == option.is_selected()
def testShouldThrowAnExceptionWhenSelectingAnUnselectableElement(self, driver, pages):
pages.load("formPage.html")
element = driver.find_element_by_xpath("//title")
with pytest.raises(WebDriverException):
element.click()
def testSendingKeyboardEventsShouldAppendTextInInputs(self, driver, pages):
pages.load("formPage.html")
element = driver.find_element_by_id("working")
element.send_keys("Some")
value = element.get_attribute("value")
assert value == "Some"
element.send_keys(" text")
value = element.get_attribute("value")
assert value == "Some text"
def testShouldBeAbleToClearTextFromInputElements(self, driver, pages):
pages.load("formPage.html")
element = driver.find_element_by_id("working")
element.send_keys("Some text")
value = element.get_attribute("value")
assert len(value) > 0
element.clear()
value = element.get_attribute("value")
assert len(value) == 0
def testEmptyTextBoxesShouldReturnAnEmptyStringNotNull(self, driver, pages):
pages.load("formPage.html")
emptyTextBox = driver.find_element_by_id("working")
assert emptyTextBox.get_attribute("value") == ""
emptyTextArea = driver.find_element_by_id("emptyTextArea")
assert emptyTextArea.get_attribute("value") == ""
def testShouldBeAbleToClearTextFromTextAreas(self, driver, pages):
pages.load("formPage.html")
element = driver.find_element_by_id("withText")
element.send_keys("Some text")
value = element.get_attribute("value")
assert len(value) > 0
element.clear()
value = element.get_attribute("value")
assert len(value) == 0
def testRadioShouldNotBeSelectedAfterSelectingSibling(self, driver, pages):
pages.load("formPage.html")
cheese = driver.find_element_by_id("cheese")
peas = driver.find_element_by_id("peas")
cheese.click()
assert cheese.is_selected() is True
assert peas.is_selected() is False
peas.click()
assert cheese.is_selected() is False
assert peas.is_selected() is True
| apache-2.0 | 3,953,924,403,034,526,000 | 39.493151 | 104 | 0.680311 | false |
openprocurement/openprocurement.tender.openua | openprocurement/tender/openua/views/award_document.py | 1 | 2070 | # -*- coding: utf-8 -*-
from openprocurement.api.utils import raise_operation_error, error_handler
from openprocurement.tender.belowthreshold.views.award_document import TenderAwardDocumentResource
from openprocurement.tender.core.utils import optendersresource
@optendersresource(name='aboveThresholdUA:Tender Award Documents',
collection_path='/tenders/{tender_id}/awards/{award_id}/documents',
path='/tenders/{tender_id}/awards/{award_id}/documents/{document_id}',
procurementMethodType='aboveThresholdUA',
description="Tender award documents")
class TenderUaAwardDocumentResource(TenderAwardDocumentResource):
def validate_award_document(self, operation):
""" TODO move validators
This class is inherited from below package, but validate_award_document function has different validators.
For now, we have no way to use different validators on methods according to procedure type.
"""
if self.request.validated['tender_status'] != 'active.qualification':
raise_operation_error(self.request, 'Can\'t {} document in current ({}) tender status'.format(operation, self.request.validated['tender_status']))
if any([i.status != 'active' for i in self.request.validated['tender'].lots if i.id == self.request.validated['award'].lotID]):
raise_operation_error(self.request, 'Can {} document only in active lot status'.format(operation))
if any([any([c.status == 'accepted' for c in i.complaints]) for i in self.request.validated['tender'].awards if i.lotID == self.request.validated['award'].lotID]):
raise_operation_error(self.request, 'Can\'t {} document with accepted complaint')
if operation == 'update' and self.request.authenticated_role != (self.context.author or 'tender_owner'):
self.request.errors.add('url', 'role', 'Can update document only author')
self.request.errors.status = 403
raise error_handler(self.request.errors)
return True
| apache-2.0 | -913,681,379,639,213,000 | 70.37931 | 171 | 0.694686 | false |
david-jarman/CookieRower | websocket_server.py | 1 | 1483 | import pyrow
import json
import asyncio
from autobahn.asyncio.websocket import WebSocketServerProtocol
from autobahn.asyncio.websocket import WebSocketServerFactory
class MyServerProtocol(WebSocketServerProtocol):
def __init__(self):
self.is_open = False
def onOpen(self):
try:
self.is_open = True
yield from self.start_sending_rowing_info()
except:
print("fail")
def onClose(self, wasClean, code, reason):
print("closing time")
self.is_open = False
@asyncio.coroutine
def start_sending_rowing_info(self):
machines = list(pyrow.find())
if len(machines) > 0:
rowing_machine = machines[0]
erg = pyrow.pyrow(rowing_machine)
while self.is_open:
monitor = erg.get_monitor(forceplot=True)
message = json.dumps(monitor).encode('utf8')
try:
self.sendMessage(message, isBinary=False)
except:
print("couldn't send message")
yield from asyncio.sleep(2)
else:
print('No machines connected')
factory = WebSocketServerFactory()
factory.protocol = MyServerProtocol
loop = asyncio.get_event_loop()
coro = loop.create_server(factory, '0.0.0.0', 9000)
server = loop.run_until_complete(coro)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
server.close()
loop.close()
| bsd-2-clause | -8,962,818,085,957,830,000 | 24.135593 | 62 | 0.610249 | false |
jericks/geoscript-py | geoscript/render/mapwindow.py | 1 | 2620 | from java import awt
from java.awt import image
from javax import swing
from geoscript import geom, proj, style
from org.geotools.geometry.jts import ReferencedEnvelope
from org.geotools.map import DefaultMapContext, DefaultMapLayer
from org.geotools.renderer.lite import StreamingRenderer
from org.geotools.swing import JMapPane
from org.geotools.swing.action import *
from org.geotools.swing.control import *
class MapWindow:
"""
Renderer that produces a map window based on JMapPane.
The map window provides basic map viewing controls such as pan, zoom, etc..
"""
def __init__(self):
pass
def render(self, layers, styles, bounds, size, **options):
self.map = DefaultMapContext(bounds.proj._crs)
self.map.setAreaOfInterest(bounds)
for i in range(len(layers)):
self.map.addLayer(DefaultMapLayer(layers[i]._source,styles[i]._style()))
w,h = (size[0], size[1])
hints = {}
hints [awt.RenderingHints.KEY_ANTIALIASING] = awt.RenderingHints.VALUE_ANTIALIAS_ON
renderer = StreamingRenderer()
renderer.java2DHints = awt.RenderingHints(hints)
mappane = JMapPane(self.map)
mappane.renderer = renderer
mappane.size = (w,h)
mappane.visible = True
f = Frame(mappane)
f.setSize(w,h)
f.setVisible(True)
def dispose(self):
if self.map:
self.map.dispose()
class Frame(swing.JFrame):
def __init__(self, mappane):
self.init(mappane)
def init(self,mappane):
self.add(mappane,awt.BorderLayout.CENTER)
statusBar = JMapStatusBar.createDefaultStatusBar(mappane)
#statusBar.addItem(CRSStatusBarItem(mappane))
#statusBar.addItem(ExtentStatusBarItem(mappane))
self.add(statusBar, awt.BorderLayout.SOUTH)
toolBar = swing.JToolBar()
toolBar.setOrientation(swing.JToolBar.HORIZONTAL)
toolBar.setFloatable(False)
cursorToolGrp = swing.ButtonGroup()
zoomInBtn = swing.JButton(ZoomInAction(mappane))
toolBar.add(zoomInBtn)
cursorToolGrp.add(zoomInBtn)
zoomOutBtn = swing.JButton(ZoomOutAction(mappane))
toolBar.add(zoomOutBtn)
cursorToolGrp.add(zoomOutBtn)
toolBar.addSeparator()
panBtn = swing.JButton(PanAction(mappane))
toolBar.add(panBtn)
cursorToolGrp.add(panBtn)
toolBar.addSeparator()
resetBtn = swing.JButton(ResetAction(mappane))
toolBar.add(resetBtn)
toolBar.addSeparator()
infoBtn = swing.JButton(InfoAction(mappane))
toolBar.add(infoBtn)
self.add( toolBar, awt.BorderLayout.NORTH )
| mit | -5,584,584,365,890,114,000 | 27.478261 | 89 | 0.688168 | false |
ContinuumIO/xdata-feat | feat/pumps.py | 1 | 15715 | #
# Copyright 2015, Jack Poulson
# All rights reserved.
#
# This file is part of Pumps, which was produced as part of the DARPA XDATA
# project, and is under the BSD 2-Clause License, which can be found at
# http://opensource.org/licenses/BSD-2-Clause
#
import bz2, gzip, os, pandas, time
# import utils
home_dir = os.path.expanduser("~")
def resample_daily(filename,new_filename='',return_df=False,suffix='_daily'):
"""
Reduce an NxCore CSV file to its back-filled mean daily prices.
"""
# >>> import pumps
# >>> pumps.resample_daily('/path/to/file.csv')
# >>> pumps.resample_daily('/path/to/file.csv.bz2')
# >>> pumps.resample_daily('/path/to/file.csv.gz')
# >>> df = pumps.resample_daily('/path/to/file.csv.gz',return_df=True)
# >>> read_time, resample_time, write_time = pumps.resample_daily('file.csv')
if filename.endswith('.csv'):
date_tags = {"DateTime":['Date']}
kept_cols = ['Date','Close']
else:
date_tags = {'DateTime':['System Date','System Time','System Time Zone']}
kept_cols = ['System Date','System Time','System Time Zone','Trade Price']
tokens = filename.split('.')
if len(tokens) < 2:
print "WARNING: Skipping %s because too few tokens" % filename
return 0, 0, 0
if tokens[-1] == 'csv':
base = tokens[0]
for j in xrange(1,len(tokens)-1):
base = base + '.' + tokens[j]
print "Processing plain CSV:", filename
print " reading..."
start_read_time = time.clock()
df = pandas.read_csv(filename,parse_dates=date_tags,index_col='DateTime',
usecols=kept_cols)
read_time = time.clock() - start_read_time
elif tokens[-2] == 'csv' and tokens[-1] == 'bz2':
base = tokens[0]
for j in xrange(1,len(tokens)-2):
base = base + '.' + tokens[j]
print "Processing bzip2 CSV:", filename
print " reading..."
start_read_time = time.clock()
df = pandas.read_csv(filename,parse_dates=date_tags,compression='bz2',
index_col='DateTime',usecols=kept_cols)
read_time = time.clock() - start_read_time
elif tokens[-2] == 'csv' and tokens[-1] == 'gz':
base = tokens[0]
for j in xrange(1,len(tokens)-2):
base = base + '.' + tokens[j]
print "Processing gzipped CSV:", filename
print " reading..."
start_read_time = time.clock()
df = pandas.read_csv(filename,parse_dates=date_tags,compression='gzip',
index_col='DateTime',usecols=kept_cols)
read_time = time.clock() - start_read_time
else:
print "WARNING: Skipping", filename
return 0, 0, 0
print " resampling..."
start_resample_time = time.clock()
df.sort_index(inplace=True)
new_df = df.resample('D',how='mean',fill_method='bfill')
resample_time = time.clock() - start_resample_time
if new_filename == '':
new_filename = base+suffix+'.csv'
new_filedir = os.path.dirname(new_filename)
if new_filedir == '':
# The path was relative, e.g., 'ePTOO.csv', and should live in '.'
new_filedir = '.'
if os.access(new_filedir,os.W_OK|os.X_OK):
print " writing..."
start_write_time = time.clock()
new_df.to_csv(new_filename)
write_time = time.clock() - start_write_time
else:
print " did not have write permissions for", new_filedir
write_time = 0
if return_df:
return new_df
else:
return read_time, resample_time, write_time
def resample_daily_dir(directory,silent=False):
"""
Back-fill the NxCore CSV files in a directory with their mean daily prices.
"""
# >>> import pumps
# >>> pumps.resample_daily_dir('/home/poulson/trade_data/P/')
if not os.path.isdir(directory):
print "ERROR: directory", directory, "does not exist"
return
start_time = time.clock()
filename_list = os.listdir(directory)
num_files = 0
for filename in filename_list:
if os.path.isfile(filename):
num_files += 1
total_files = 0
total_read_time = 0
total_resample_time = 0
total_write_time = 0
for filename in filename_list:
if not os.path.isfile(filename):
continue
if not silent:
print "Processing file %d/%d" % (total_files,num_files)
total_files += 1
read_time, resample_time, write_time = resample_daily(filename)
total_read_time += read_time
total_resample_time += resample_time
total_write_time += write_time
if not silent:
print ""
print "Total time:", time.clock()-start_time, "seconds"
print " reading: ", total_read_time, "seconds"
print " resampling:", total_resample_time, "seconds"
print " writing: ", total_write_time, "seconds"
print ""
# >>> import pumps
# >>> pumps.find_pumps('ePTOO_daily.csv')
# Candidate pump: (2014-04-12,2014-04-24,2014-05-20)=(0.550000,0.575000,0.887500)
# (('2014-04-12',), ('2014-04-24',), ('2014-05-20',), (0.55000000000000004,), (0.57499999999999996,), (0.88749999999999996,))
#
# >>> pumps.find_pumps('ePTOO_daily.csv',growth_tol=0.25)
#Candidate pump: (2014-02-14,2014-03-04,2014-03-14)=(0.750000,0.754286,1.093333)
#Candidate pump: (2014-04-12,2014-04-24,2014-05-23)=(0.550000,0.575000,0.760000)
#Candidate pump: (2014-05-02,2014-05-12,2014-06-11)=(0.610000,0.610000,0.790000)
#(('2014-02-14', '2014-04-12', '2014-05-02'), ('2014-03-04', '2014-04-24', '2014-05-12'), ('2014-03-14', '2014-05-23', '2014-06-11'), (0.75, 0.55000000000000004, 0.60999999999999999), (0.75428571428571434, 0.57499999999999996, 0.60999999999999999), (1.0933333333333335, 0.76000000000000001, 0.79000000000000004))
#
# >>> pumps.find_pumps('ePTOO_daily.csv',growth_tol=0.25,silent=True)
#(('2014-02-14', '2014-04-12', '2014-05-02'), ('2014-03-04', '2014-04-24', '2014-05-12'), ('2014-03-14', '2014-05-23', '2014-06-11'), (0.75, 0.55000000000000004, 0.60999999999999999), (0.75428571428571434, 0.57499999999999996, 0.60999999999999999), (1.0933333333333335, 0.76000000000000001, 0.79000000000000004))
#
# If the 'verbose=True' argument is added, then the details of each search
# are summarized.
#
def find_pumps(filename,process=False,resampled_filename='',min_quiet_days=10,quiet_tol=0.05,
min_growth_days=1,max_growth_days=30,growth_tol=0.5,
silent=False,verbose=False):
if process:
df = resample_daily(filename,resampled_filename,return_df=True)
else:
tokens = filename.split('.')
if len(tokens) < 2:
print "ERROR: Invalid filename,", filename
return
if tokens[-1] == 'csv':
df = pandas.read_csv(filename,index_col='DateTime')
elif tokens[-2] == 'csv' and tokens[-1] == 'gz':
df = pandas.read_csv(filename,index_col='DateTime',compression='gzip')
elif tokens[-2] == 'csv' and tokens[-1] == 'bz2':
df = pandas.read_csv(filename,index_col='DateTime',compression='bz2')
else:
print "ERROR: Invalid filename,", filename
return
if min_quiet_days < 0:
print "ERROR: min_quiet_days must be non-negative"
return
if quiet_tol <= 0.:
print "ERROR: quiet_tol must be positive"
if min_growth_days <= 0:
print "ERROR: min_growth_days must be positive"
if max_growth_days < min_growth_days:
print "ERROR: max_growth_days must be at least as large as min_growth_days"
if growth_tol <= 0.:
print "ERROR: growth_tol must be positive"
# Search for the intervals where the mean price did not deviate by more than
# a given percentage, for a given number of days, and then has an overall
# growth of more than a given percentage for a given number of days.
#
# The current search works in a way which might appear to be quadratic
# complexity, but should be expected to have near-linear complexity since
# each search forward from a starting point should ideally be O(1) in length
# (with the only possible exception being if the stock is essentially only
# quiet). It should be possible to reduce the complexity in such cases with
# minor changes.
#
# Also note that the current algorithm can be viewed as being
# 'forward greedy', as it will search for the maximum quiet time period,
# followed by the maximum allowable pump period, and that it would be possible
# to formulate a time series which is *almost* detected via the current scheme
# but would be detected by a scheme which was not greedy with the initial
# quiet period (e.g., if there is a slight up-tick in the last quiet day,
# it should be easy to construct a case where the subsequent raise in price
# is too small to be considered a pump, but it would pass the threshold if the
# pump was considered to have started the day before the "quiet" up-tick).
# But since these cases should be borderline, they should not be worth
# initially worrying about.
#
# It is also possible to debate which data point the deviation should be
# formulated relative to when deciding whether a period of (e.g., 10 days)
# is sufficiently 'quiet'. The current scheme uses the starting price, whereas
# a slightly better approach might be to use the central price over each
# candidate quiet period.
#
# TODO: Consider allowing the growth tolerance to be based upon the average
# daily relative growth rather than a simple relative growth bound.
#
start_dates = ()
last_quiet_dates = ()
end_dates = ()
start_prices = ()
last_quiet_prices = ()
end_prices = ()
num_days = len(df.ix[:,0])
start_day = 0
while start_day+max(1,min_quiet_days) < num_days:
start_price = df.ix[start_day,0]
start_date = df.index[start_day]
if start_price <= 0.:
print "ERROR: Price on %s was %f" % (start_date,start_price)
start_day += 1
continue
if verbose:
print "Searching from start_date=%s (start_price=%f)" % \
(start_date,start_price)
stayed_quiet = True
for day in xrange(start_day+1,num_days):
price = df.ix[day,0]
if abs(price-start_price)/start_price > quiet_tol:
stayed_quiet = False
if verbose:
print " quiet threshold exceeded on %s with price=%f" % \
(df.index[day],price)
break
if stayed_quiet or day-start_day < min_quiet_days:
if verbose:
print " insufficient quiet time"
start_day += 1
continue
# Now look for a sufficiently fast rise so that we have a pump candidate
last_quiet_day = day-1
last_quiet_date = df.index[last_quiet_day]
last_quiet_price = df.ix[last_quiet_day,0]
# This assertion shouldn't be necessary, but just in case...
if last_quiet_price <= 0.:
print "ERROR: Price on %s was %f" % (last_quiet_day,last_quiet_price)
start_day += 1
continue
fast_rise = False
search_start_day = last_quiet_day + min_growth_days
search_stop_day = min(last_quiet_day+max_growth_days+1,num_days)
for day in xrange(search_start_day,search_stop_day):
price = df.ix[day,0]
if (price-last_quiet_price)/last_quiet_price > growth_tol:
fast_rise = True
end_day = day
end_price = price
end_date = df.index[end_day]
if verbose:
print " setting end_date=%s and end_price=%f" % (end_date,end_price)
if fast_rise:
start_dates += (start_date,)
last_quiet_dates += (last_quiet_date,)
end_dates += (end_date,)
start_prices += (start_price,)
last_quiet_prices += (last_quiet_price,)
end_prices += (end_price,)
# 'quiet' is already in use as another technical term...so use 'silent'
if not silent:
print "Candidate pump: (%s,%s,%s)=(%f,%f,%f)" % \
(start_date, last_quiet_date, end_date,
start_price,last_quiet_price,end_price)
# Conservatively advance the starting day of the next search
start_day = last_quiet_day+1
else:
if verbose:
print " no sufficiently fast rise detected in time band"
start_day += 1
return start_dates, last_quiet_dates, end_dates, \
start_prices, last_quiet_prices, end_prices
# @utils.lru_cache(150)
def find_pumps_easy(symbol,orig_dir='/team6/trade',cache_dir=home_dir+'/.team6/resampled/',suffix='_daily',
min_quiet_days=10,quiet_tol=0.05,
min_growth_days=1,max_growth_days=30,growth_tol=0.5,
silent=False,verbose=False):
"""
A thin wrapper around find_pumps which provides a few conveniences at the
expense of requiring more hard-coded information.
"""
# >>> import pumps
# >>> pumps.find_pumps_easy('eIFT',growth_tol=0.1)
# Will use gzipped CSV /team6/trade/eIFT.csv.gz
# Processing gzipped CSV: /team6/trade/eIFT.csv.gz
# reading...
# resampling...
# did not have write permissions for /team6/trade/eIFT_daily.csv
# Candidate pump: (2014-02-22 00:00:00,2014-03-13 00:00:00,2014-04-10 00:00:00)=(5.491670,5.673065,6.289004)
# Candidate pump: (2014-03-14 00:00:00,2014-04-07 00:00:00,2014-05-07 00:00:00)=(5.817291,5.978300,6.786954)
# ((Timestamp('2014-02-22 00:00:00', offset='D'), Timestamp('2014-03-14 00:00:00', offset='D')), (Timestamp('2014-03-13 00:00:00', offset='D'), Timestamp('2014-04-07 00:00:00', offset='D')), (Timestamp('2014-04-10 00:00:00', offset='D'), Timestamp('2014-05-07 00:00:00', offset='D')), (5.4916701754386024, 5.8172905405405189), (5.6730647911338368, 5.9782997050147282), (6.2890035868006358, 6.7869537671232765))
#
# Start by ensuring that, if the default cache_dir was specified, that the
# necessary folders exist
if not os.path.exists(home_dir+'/.team6'):
if not silent:
print "Creating %s" % (home_dir+'/.team6')
os.mkdir(home_dir+'/.team6')
if not os.path.exists(home_dir+'/.team6/resampled'):
if not silent:
print "Creating %s" % (home_dir+'/.team6/resampled')
os.mkdir(home_dir+'/.team6/resampled')
cached_file = cache_dir+'/'+symbol+suffix+'.csv'
plain_file = orig_dir+'/'+symbol+'.csv'
gzip_file = orig_dir+'/'+symbol+'.csv.gz'
bz2_file = orig_dir+'/'+symbol+'.csv.bz2'
if os.path.isfile(cached_file):
if not silent:
print "Will use cached processed CSV,", cached_file
candidates = find_pumps(cached_file,False,'',
min_quiet_days,quiet_tol,
min_growth_days,max_growth_days,growth_tol,
silent,verbose)
elif os.path.isfile(plain_file):
if not silent:
print "Will use plain CSV,", plain_file
candidates = find_pumps(plain_file,True,cached_file,
min_quiet_days,quiet_tol,
min_growth_days,max_growth_days,growth_tol,
silent,verbose)
elif os.path.isfile(gzip_file):
if not silent:
print "Will use gzipped CSV", gzip_file
candidates = find_pumps(gzip_file,True,cached_file,
min_quiet_days,quiet_tol,
min_growth_days,max_growth_days,growth_tol,
silent,verbose)
elif os.path.isfile(bz2_file):
if not silent:
print "Will use bz2 CSV", bz2_file
candidates = find_pumps(bz2_file,True,cached_file,
min_quiet_days,quiet_tol,
min_growth_days,max_growth_days,growth_tol,
silent,verbose)
else:
print "ERROR: Could not find csv for symbol", symbol
return
return candidates
def to_dicts(candidates):
import utils
"""
Converts find_pumps results (tuple of 6 tuples) into and array of dicts
"""
sds, lqds, eds, sps, lqps, eps = candidates
res = [{'start': utils.to_seconds(s),
'end': utils.to_seconds(e),
'last_quiet_date': lqd,
'start_prices': sp,
'last_quiet_price': lqp,
'end_price': ep,
}
for (s, e, lqd, sp, lqp, ep) in zip(sds, eds, lqds, sps, lqps, eps) if s
]
res = {
'start': [utils.to_seconds(s) for s in sds],
'end': [utils.to_seconds(e) for e in eds],
'last_quiet_date': lqds,
'start_prices': sps,
'last_quiet_price': lqps,
'end_price': eps,
}
return res
# return sorted(res, key=lambda x: x['start'])
| mit | -1,317,769,979,153,420,300 | 39.294872 | 412 | 0.651098 | false |
amol9/wallp | wallp/server/protocol/wp_change_message.py | 1 | 1346 | from time import time
from mayloop.imported.twisted.internet_protocol import Protocol
from ..wallpaper_image import WPImageError
from ...util.logger import log
class WPState():
NONE = 0
READY = 1
CHANGING = 2
ERROR = 3
def __init__(self):
self.state = self.NONE
class WPChangeMessage(Protocol, object):
def __init__(self, wp_state, wp_image):
self._wp_state = wp_state
self._wp_image = wp_image
def dataReceived(self, data):
self.messageReceived(data)
def messageReceived(self, message):
if message == WPState.READY:
log.debug('new image ready')
#self._server_shared_state.last_change = int(time())
self._wp_state.state = WPState.READY
#self._server_shared_state.abort_image_producers() ???
elif message == WPState.CHANGING:
self._wp_state.state = WPState.CHANGING
elif message == WPState.ERROR:
if self._wp_image is not None:
self._wp_state.state = WPState.READY
else:
self._wp_state.state = WPState.NONE
elif type(message) == str:
if self._wp_state.state == WPState.READY:
try:
self._wp_image.path = message
except WPImageError as e:
log.error('error loading new image')
self._wp_state = WPState.NONE
else:
self.messageError('wallpaper path received when not expected, %s'%message)
def messageError(self, reason):
log.error(reason)
| mit | 7,965,046,102,829,231,000 | 22.206897 | 78 | 0.687221 | false |
caspartse/QQ-Groups-Spider | vendor/pyexcel/plugins/sources/file_output.py | 1 | 1308 | """
pyexcel.plugins.sources.file_output
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Representation of output file sources
:copyright: (c) 2015-2017 by Onni Software Ltd.
:license: New BSD License
"""
from pyexcel.internal import RENDERER
from pyexcel.source import AbstractSource
from pyexcel.plugins import find_file_type_from_file_name
# pylint: disable=W0223
class WriteSheetToFile(AbstractSource):
"""Pick up 'file_name' field and do single sheet based read and write
"""
def __init__(self, file_name=None, renderer_library=None, **keywords):
AbstractSource.__init__(self, **keywords)
self._file_name = file_name
self.__file_type = find_file_type_from_file_name(file_name, 'write')
self._renderer = RENDERER.get_a_plugin(
self.__file_type, renderer_library)
def write_data(self, sheet):
self._renderer.render_sheet_to_file(self._file_name,
sheet, **self._keywords)
# pylint: disable=W0223
class WriteBookToFile(WriteSheetToFile):
"""Pick up 'file_name' field and do multiple sheet based read and write
"""
def write_data(self, book):
self._renderer.render_book_to_file(self._file_name, book,
**self._keywords)
| mit | 5,851,435,033,963,996,000 | 33.421053 | 76 | 0.620031 | false |
DBuildService/atomic-reactor | atomic_reactor/plugins/build_source_container.py | 1 | 3849 | """
Copyright (c) 2019 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
import os
import subprocess
import tempfile
from atomic_reactor.build import BuildResult
from atomic_reactor.constants import (PLUGIN_SOURCE_CONTAINER_KEY, EXPORTED_SQUASHED_IMAGE_NAME,
IMAGE_TYPE_DOCKER_ARCHIVE, PLUGIN_FETCH_SOURCES_KEY)
from atomic_reactor.plugin import BuildStepPlugin
from atomic_reactor.util import get_exported_image_metadata
class SourceContainerPlugin(BuildStepPlugin):
"""
Build source container image using
https://github.com/containers/BuildSourceImage
"""
key = PLUGIN_SOURCE_CONTAINER_KEY
def export_image(self, image_output_dir):
output_path = os.path.join(tempfile.mkdtemp(), EXPORTED_SQUASHED_IMAGE_NAME)
cmd = ['skopeo', 'copy']
source_img = 'oci:{}'.format(image_output_dir)
dest_img = 'docker-archive:{}'.format(output_path)
cmd += [source_img, dest_img]
self.log.info("Calling: %s", ' '.join(cmd))
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
self.log.error("failed to save docker-archive :\n%s", e.output)
raise
img_metadata = get_exported_image_metadata(output_path, IMAGE_TYPE_DOCKER_ARCHIVE)
self.workflow.exported_image_sequence.append(img_metadata)
def run(self):
"""Build image inside current environment.
Returns:
BuildResult
"""
fetch_sources_result = self.workflow.prebuild_results.get(PLUGIN_FETCH_SOURCES_KEY, {})
source_data_dir = fetch_sources_result.get('image_sources_dir')
remote_source_data_dir = fetch_sources_result.get('remote_sources_dir')
source_exists = source_data_dir and os.path.isdir(source_data_dir)
remote_source_exists = remote_source_data_dir and os.path.isdir(remote_source_data_dir)
if not source_exists and not remote_source_exists:
err_msg = "No SRPMs directory '{}' available".format(source_data_dir)
err_msg += "\nNo Remote source directory '{}' available".format(remote_source_data_dir)
self.log.error(err_msg)
return BuildResult(logs=err_msg, fail_reason=err_msg)
if source_exists and not os.listdir(source_data_dir):
self.log.warning("SRPMs directory '%s' is empty", source_data_dir)
if remote_source_exists and not os.listdir(remote_source_data_dir):
self.log.warning("Remote source directory '%s' is empty", remote_source_data_dir)
image_output_dir = tempfile.mkdtemp()
cmd = ['bsi', '-d']
drivers = []
if source_exists:
drivers.append('sourcedriver_rpm_dir')
cmd.append('-s')
cmd.append('{}'.format(source_data_dir))
if remote_source_exists:
drivers.append('sourcedriver_extra_src_dir')
cmd.append('-e')
cmd.append('{}'.format(remote_source_data_dir))
driver_str = ','.join(drivers)
cmd.insert(2, driver_str)
cmd.append('-o')
cmd.append('{}'.format(image_output_dir))
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
self.log.error("BSI failed with output:\n%s", e.output)
return BuildResult(logs=e.output, fail_reason='BSI utility failed build source image')
self.log.debug("Build log:\n%s\n", output)
self.export_image(image_output_dir)
return BuildResult(
logs=output,
oci_image_path=image_output_dir,
skip_layer_squash=True
)
| bsd-3-clause | -4,306,701,115,934,783,500 | 36.735294 | 99 | 0.638348 | false |
acil-bwh/SlicerCIP | Scripted/CIP_/CIP/logic/EventsTrigger.py | 1 | 2466 | class EventsTrigger(object):
""" 'Abstract' class that has a mechanism to subscribe and trigger events
"""
def __init__(self):
self.__events__ = []
self.__eventsCallbacks__ = {}
self.__eventsCount__ = 0
@property
def events(self):
return self.__events__
def setEvents(self, eventsList):
""" Set the events that the class is handling
:param eventsList:
:return:
"""
self.__events__ = eventsList
def addObservable(self, eventTypeId, callbackFunction):
""" Add a function that will be invoked when the corresponding event is triggered.
Ex: myWidget.addObservable(myWidget.EVENT_BEFORE_NEXT, self.onBeforeNextClicked)
:param eventTypeId: public id if the event exposed by the class
:param callbackFunction: function that will be invoked when the event is triggered
:return: identifier for this observable (that can be used to remove it)
"""
if eventTypeId not in self.events:
raise Exception("Event not recognized. Make sure that the event belongs to the class and you called the function 'setEvents'")
# Add the event to the list of funcions that will be called when the matching event is triggered
self.__eventsCallbacks__[self.__eventsCount__] = (eventTypeId, callbackFunction)
self.__eventsCount__ += 1
return self.__eventsCount__ - 1
def removeObservable(self, eventId):
""" Remove an observable from the list of callbacks to be invoked when an event is triggered
:param eventId: internal id that was given when the observable was created
"""
if eventId in self.__eventsCallbacks__:
self.__eventsCallbacks__.pop(eventId)
def removeAllObservables(self):
""" Remove all the current observables. No events will be captured anymore
"""
self.__eventsCallbacks__.clear()
def getAllObservables(self):
""" Get a list of (id, tuple) with all the current observables
:return:
"""
return list(self.__eventsCallbacks__.items())
def triggerEvent(self, eventType, *params):
"""Trigger one of the possible events from the object.
Ex: self._triggerEvent_(self.EVENT_BEFORE_NEXT) """
for callbackFunction in (item[1] for item in self.__eventsCallbacks__.values() if item[0] == eventType):
callbackFunction(*params)
| bsd-3-clause | -5,451,498,751,310,841,000 | 42.263158 | 138 | 0.644769 | false |
Fleeg/fleeg-platform | link/migrations/0001_initial.py | 1 | 3024 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-21 18:21
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('account', '__first__'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=1000)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='account.Account')),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.URLField()),
('type', models.CharField(max_length=200)),
('title', models.CharField(max_length=200)),
('summary', models.CharField(max_length=250, null=True)),
('text', models.TextField(null=True)),
('image_url', models.CharField(max_length=500, null=True)),
('tags', models.CharField(max_length=500, null=True)),
('publish_date', models.DateTimeField(null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='own_posts', to='account.Account')),
('publisher', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='posts', to='account.Account')),
('origin', models.ForeignKey(to='link.Post', related_name='adds', null=True)),
],
),
migrations.CreateModel(
name='Reaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(default='LIKE', max_length=50)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='account.Account')),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reactions', to='link.Post')),
],
),
migrations.AddField(
model_name='comment',
name='post',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='link.Post'),
),
]
| agpl-3.0 | -4,756,169,835,546,349,000 | 47 | 138 | 0.574735 | false |
mrcslws/nupic.research | projects/dynamic_sparse/validation/test_iterative_pruning.py | 1 | 1996 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2019, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import numpy as np
from nupic.research.frameworks.dynamic_sparse.common.utils import run_ray
exp_config = dict(
device="cuda",
network="MLP",
dataset_name="MNIST",
input_size=784,
hidden_sizes=[50, 50, 50],
model="IterativePruningModel",
epochs=2,
train_batches_per_epoch=2,
# ---- sparsity related
experiment_type="IterativePruning",
# 0.2, 0.4, 0.6, 0.8, 1.0
iterative_pruning_schedule=list(np.arange(0.2, 1.01, 0.20)),
sparse_start=None,
sparse_end=None,
on_perc=1.0,
# ---- optimizer related
optim_alg="SGD",
learning_rate=0.1,
weight_decay=0,
)
# run
tune_config = dict(
name=os.path.basename(__file__).replace(".py", "") + "_lt",
num_samples=1,
local_dir=os.path.expanduser("~/nta/results"),
checkpoint_freq=0,
checkpoint_at_end=False,
resources_per_trial={"cpu": 1, "gpu": 1},
verbose=0,
)
run_ray(tune_config, exp_config, fix_seed=True)
# 10/31 - ran script, working ok, results as expected
| agpl-3.0 | -3,114,277,414,524,010,500 | 30.68254 | 73 | 0.641283 | false |
minghuascode/pyj | library/pyjamas/ui/HTMLLinkPanel.py | 1 | 1988 | from pyjamas.ui.HTMLPanel import HTMLPanel
from pyjamas.ui.Hyperlink import Hyperlink
from pyjamas import Window
from pyjamas import DOM
class HTMLLinkPanel(HTMLPanel):
def __init__(self, html="", **kwargs):
self.hyperlinks = []
HTMLPanel.__init__(self, html, **kwargs)
def setHTML(self, html):
self._clear_hyperlinks()
HTMLPanel.setHTML(self, html)
def _clear_hyperlinks(self):
while self.hyperlinks:
hl = self.hyperlinks.pop()
el = hl.getElement()
parent = DOM.getParent(el)
if parent is not None:
parent.removeChild(el)
hl.setParent(None)
def replaceLinks(self, tagname="a", use_page_href=True, ajaxify=False):
""" replaces <tag href="#pagename">sometext</tag> with:
Hyperlink("sometext", "pagename"). Hyperlinks use
the History module so the notification will come
in on an onHistoryChanged.
"""
self._clear_hyperlinks()
tags = self.findTags(tagname)
pageloc = Window.getLocation()
pagehref = pageloc.getPageHref()
for el in tags:
href = el.href
l = href.split("#")
if len(l) != 2:
continue
if use_page_href and not l[0].startswith(pagehref):
continue
token = l[1]
if not token:
continue
html = DOM.getInnerHTML(el)
parent = DOM.getParent(el)
index = DOM.getChildIndex(parent, el)
if ajaxify:
token = '!%s' % token
hl = Hyperlink(TargetHistoryToken=token,
HTML=html,
Element=DOM.createSpan())
DOM.insertChild(parent, hl.getElement(), index)
parent.removeChild(el)
self.children.insert(index, hl)
hl.setParent(self)
self.hyperlinks.append(hl)
| apache-2.0 | -3,861,831,791,995,955,000 | 32.694915 | 75 | 0.545775 | false |
nsubiron/SublimeSuricate | lib/settings.py | 1 | 3540 | # Sublime Suricate, Copyright (C) 2013 N. Subiron
#
# This program comes with ABSOLUTELY NO WARRANTY. This is free software, and you
# are welcome to redistribute it and/or modify it under the terms of the GNU
# General Public License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
"""Commands to manage settings files. `settings_file` should include a file name
and extension, but not a path. If none is given, use the default Suricate
settings file."""
from contextlib import contextmanager
import os
import sublime
import suricate
from . import sublime_wrapper
suricate.reload_module(sublime_wrapper)
@contextmanager
def load_save_settings(settings_file):
"""Context manager to load and save settings."""
if not settings_file:
settings_file = suricate.get_variable('suricate_settings_file_base_name')
settings = sublime.load_settings(settings_file)
if not settings:
message = 'Settings file "%s" not found!' % settings_file
sublime.error_message(message)
raise Exception(message)
# Do not try/catch, don't save if fails.
yield settings
sublime.save_settings(settings_file)
def _make_list(obj):
if obj is None:
return []
elif isinstance(obj, (list, tuple, range, set, frozenset)):
return list(obj)
elif isinstance(obj, dict):
return [[key, value] for key, value in obj.items()]
else:
return [obj]
def toggle_boolean(key, settings_file=None):
"""Toggle the value of key in settings_file."""
with load_save_settings(settings_file) as settings:
value = settings.get(key)
if isinstance(value, bool):
settings.set(key, not value)
else:
sublime.error_message(
'Cannot toggle a non-boolean object "%s"' %
key)
def append_value_to_array(key, value, settings_file=None):
"""Append value to key in settings_file."""
with load_save_settings(settings_file) as settings:
lst = settings.get(key, [])
if isinstance(lst, list):
lst.append(value)
settings.set(key, lst)
else:
sublime.error_message(
'Cannot append value to a non-array object "%s"' %
key)
def set_key_value(key, value, settings_file=None):
"""Set value for key in settings_file."""
with load_save_settings(settings_file) as settings:
settings.set(key, value)
def set_from_resources(
key,
patterns,
settings_file=None,
set_mode='file',
window=None):
"""Set the key in settings_file from a list of resources found based on
patterns. Available values for `set_mode`:
* "file": `Packages/Default/Preferences.sublime-settings`
* "file_name": `Preferences.sublime-settings`
* "file_base_name": `Preferences`
"""
resources = set()
if set_mode == 'file':
clean = lambda x: x
elif set_mode == 'file_name':
clean = os.path.basename
elif set_mode == 'file_base_name':
clean = lambda x: os.path.splitext(os.path.basename(x))[0]
else:
sublime.error_message('Unknown set_mode "%s".' % set_mode)
return
for pattern in _make_list(patterns):
resources.update(clean(x) for x in sublime.find_resources(pattern))
on_done = lambda picked: set_key_value(key, picked, settings_file)
sublime_wrapper.show_quick_panel(sorted(list(resources)), on_done, window)
| gpl-3.0 | -4,090,290,248,179,325,000 | 32.084112 | 81 | 0.648588 | false |
tristeen/RTS-Simulator | rts/mState.py | 1 | 3697 | import math
import random
import mMap
from misc.mLogger import log
from misc import utils
import random
class RTSState(mMap.mMap):
def __init__(self, d, camp):
super(RTSState, self).__init__(d)
self.camp = camp
def Clone(self):
st = RTSState(self.to_dict(), self.camp)
return st
def equal(self, state):
if cmp(self.to_dict(), state.to_dict()):
return False
print 'maps of state, k are the same'
return self.camp == state.camp
def DoMove(self, move):
# print 'DoMove 1'
# print move
# for u, i, j in self:
# if u.type:
# print 'id %s, type %d, camp %d, pos (%d, %d)'%(u.id, u.type, u.camp_, i, j)
# print 'DoMove 2'
for _id, action in move.iteritems():
if not action:
continue
try:
action[0](self.get_unit(_id), action[1])
except TypeError:
log.info('UCT unit %s has been killed.' % _id)
self.camp = int(not self.camp)
def GetMoves(self):
actions = []
_ids = []
_availables = []
for u, i, j in self:
if u.camp_ != self.camp:
continue
available = u.available_actions()
if not available:
continue
_ids.append(u.id)
_availables.append(available)
for i in utils.product(_availables):
acts = [j[0] for j in i]
args = [j[1] for j in i]
for j in utils.product(args):
actions.append(dict(zip(_ids, zip(acts, j))))
return actions
# def GetRandomMove(self):
# action = {}
# for u, i, j in self:
# if u.camp_ != self.camp_:
# continue
# available = u.available_actions()
# if not available:
# continue
# not uniform exactly
# _act = random.choice(available)
# action[u.id] = (_act[0], random.choice(_act[1]))
# return action
def GetRandomMove(self):
state = self.Clone()
action = {}
# for u, i, j in self:
for u, i, j in state:
if u.camp_ != state.camp:
continue
if not self.get_unit(u.id):
continue
available = u.available_actions()
if not available:
continue
# not uniform exactly
_act = random.choice(available)
_arg = random.choice(_act[1])
action[u.id] = (_act[0], _arg)
action[u.id][0](u, _arg)
state = self.Clone()
for _id, act in action.iteritems():
# print act, _id
# print self.get_unit(_id)
# print act
# print state.get_unit(_id)
act[0](state.get_unit(_id), act[1])
return action
# confilicated states?
def GetMovesNum(self):
num = 1
for u, i, j in self:
if u.camp_ != self.camp:
continue
available = u.available_actions()
if not available:
continue
n = 0
for a in available:
n += len(a[1])
num *= n
return num
def GetResult(self, playerjm):
scores = {playerjm: self.calc_score(playerjm)[0], (not playerjm): self.calc_score(not playerjm)[0]}
#return max(0, round(1.0 * (scores[playerjm] - scores[not playerjm]) / scores[not playerjm], 2))
return scores[playerjm] - 1.5 * scores[not playerjm]
#return scores[playerjm]
# if scores[playerjm] > scores[not playerjm]:
# return 1
# elif scores[playerjm] == scores[not playerjm]:
# return 0.5
# else:
# return 0
class ObjectFunc(RTSState):
dist = 3
def __init__(self, d, camp):
super(ObjectFunc, self).__init__(d, camp)
self.args_ = []
self.init_args()
def init_args(self):
for u, i, j in self:
if u.camp_ == self.camp and u.type:
self.args_.append(u)
def evaluate(self, values):
return 1
def args_num(self):
return len(self.args_)
def space_size(self):
return reduce(operator.mul, map(lambda x: x.size(), self.args_))
def arg_size(self, i):
return self.args_[i].size()
def random(self):
return [i.random() for i in self.args_]
def vibrate(self, values, p2):
return [self.args_[i].vibrate(v, ObjectFunc.dist, p2) for i, v in enumerate(values)]
| mit | -6,769,616,184,281,385,000 | 22.547771 | 101 | 0.626454 | false |
KelSolaar/sIBL_GUI | sibl_gui/components/core/templates_outliner/templates_outliner.py | 1 | 75391 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
**templates_outliner.py**
**Platform:**
Windows, Linux, Mac Os X.
**Description:**
Defines the :class:`TemplatesOutliner` Component Interface class.
**Others:**
"""
from __future__ import unicode_literals
import os
import platform
import re
import sys
if sys.version_info[:2] <= (2, 6):
from ordereddict import OrderedDict
else:
from collections import OrderedDict
from PyQt4.QtCore import QMargins
from PyQt4.QtCore import QString
from PyQt4.QtCore import QUrl
from PyQt4.QtCore import Qt
from PyQt4.QtCore import pyqtSignal
from PyQt4.QtGui import QAction
from PyQt4.QtGui import QDesktopServices
from PyQt4.QtGui import QFileDialog
from PyQt4.QtGui import QMessageBox
import foundations.common
import foundations.exceptions
import foundations.strings
import foundations.verbose
import foundations.walkers
import sibl_gui.components.core.database.exceptions
import sibl_gui.components.core.database.operations
import umbra.engine
import umbra.exceptions
import umbra.ui.common
import umbra.ui.nodes
import umbra.ui.widgets.message_box as message_box
from manager.QWidget_component import QWidgetComponentFactory
from sibl_gui.components.core.database.nodes import CollectionNode
from sibl_gui.components.core.database.nodes import TemplateNode
from sibl_gui.components.core.templates_outliner.models import TemplatesModel
from sibl_gui.components.core.templates_outliner.nodes import SoftwareNode
from sibl_gui.components.core.templates_outliner.views import Templates_QTreeView
from sibl_gui.components.core.database.types import Template
from umbra.globals.constants import Constants
from umbra.globals.runtime_globals import RuntimeGlobals
from umbra.globals.ui_constants import UiConstants
__author__ = "Thomas Mansencal"
__copyright__ = "Copyright (C) 2008 - 2014 - Thomas Mansencal"
__license__ = "GPL V3.0 - http://www.gnu.org/licenses/"
__maintainer__ = "Thomas Mansencal"
__email__ = "[email protected]"
__status__ = "Production"
__all__ = ["LOGGER", "COMPONENT_UI_FILE", "TemplatesOutliner"]
LOGGER = foundations.verbose.install_logger()
COMPONENT_UI_FILE = os.path.join(os.path.dirname(__file__), "ui", "Templates_Outliner.ui")
class TemplatesOutliner(QWidgetComponentFactory(ui_file=COMPONENT_UI_FILE)):
"""
| Defines the :mod:`sibl_gui.components.core.templates_outliner.templates_outliner` Component Interface class.
| It defines methods for Database Templates management.
"""
# Custom signals definitions.
refresh_nodes = pyqtSignal()
"""
This signal is emited by the :class:`TemplatesOutliner` class when :obj:`TemplatesOutliner.model` class property
model Nodes needs to be refreshed.
"""
def __init__(self, parent=None, name=None, *args, **kwargs):
"""
Initializes the class.
:param parent: Object parent.
:type parent: QObject
:param name: Component name.
:type name: unicode
:param \*args: Arguments.
:type \*args: \*
:param \*\*kwargs: Keywords arguments.
:type \*\*kwargs: \*\*
"""
LOGGER.debug("> Initializing '{0}()' class.".format(self.__class__.__name__))
super(TemplatesOutliner, self).__init__(parent, name, *args, **kwargs)
# --- Setting class attributes. ---
self.deactivatable = False
self.__ui_resources_directory = "resources"
self.__ui_software_affixe = "_Software.png"
self.__ui_unknown_software_image = "Unknown_Software.png"
self.__dock_area = 1
self.__engine = None
self.__settings = None
self.__settings_section = None
self.__settings_separator = ","
self.__script_editor = None
self.__database = None
self.__model = None
self.__view = None
self.__headers = OrderedDict([("templates", "name"),
("Release", "release"),
("Software Version", "version")])
self.__extension = "sIBLT"
self.__default_collections = None
self.__factory_collection = "Factory"
self.__user_collection = "User"
self.__tree_view_inner_margins = QMargins(0, 0, 0, 12)
self.__templates_informations_default_text = \
"<center><h4>* * *</h4>Select a Template to display related informations!<h4>* * *</h4></center>"
self.__templates_informations_text = """
<h4><center>{0}</center></h4>
<p>
<b>Date:</b> {1}
<br/>
<b>Author:</b> {2}
<br/>
<b>Email:</b> <a href="mailto:{3}">
<span style=" text-decoration: underline; color:#e0e0e0;">{3}</span></a>
<br/>
<b>Url:</b> <a href="{4}">
<span style=" text-decoration: underline; color:#e0e0e0;">{4}</span></a>
<br/>
<b>Output script:</b> {5}
<p>
<b>Comment:</b> {6}
</p>
<p>
<b>Help file:</b> <a href="{7}">
<span style=" text-decoration: underline; color:#e0e0e0;">
Template Manual</span></a>
</p>
</p>
"""
@property
def ui_resources_directory(self):
"""
Property for **self.__ui_resources_directory** attribute.
:return: self.__ui_resources_directory.
:rtype: unicode
"""
return self.__ui_resources_directory
@ui_resources_directory.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def ui_resources_directory(self, value):
"""
Setter for **self.__ui_resources_directory** attribute.
:param value: Attribute value.
:type value: unicode
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "ui_resources_directory"))
@ui_resources_directory.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def ui_resources_directory(self):
"""
Deleter for **self.__ui_resources_directory** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "ui_resources_directory"))
@property
def ui_software_affixe(self):
"""
Property for **self.__ui_software_affixe** attribute.
:return: self.__ui_software_affixe.
:rtype: unicode
"""
return self.__ui_software_affixe
@ui_software_affixe.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def ui_software_affixe(self, value):
"""
Setter for **self.__ui_software_affixe** attribute.
:param value: Attribute value.
:type value: unicode
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "ui_software_affixe"))
@ui_software_affixe.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def ui_software_affixe(self):
"""
Deleter for **self.__ui_software_affixe** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "ui_software_affixe"))
@property
def ui_unknown_software_image(self):
"""
Property for **self.__ui_unknown_software_image** attribute.
:return: self.__ui_unknown_software_image.
:rtype: unicode
"""
return self.__ui_unknown_software_image
@ui_unknown_software_image.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def ui_unknown_software_image(self, value):
"""
Setter for **self.__ui_unknown_software_image** attribute.
:param value: Attribute value.
:type value: unicode
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "ui_unknown_software_image"))
@ui_unknown_software_image.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def ui_unknown_software_image(self):
"""
Deleter for **self.__ui_unknown_software_image** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "ui_unknown_software_image"))
@property
def dock_area(self):
"""
Property for **self.__dock_area** attribute.
:return: self.__dock_area.
:rtype: int
"""
return self.__dock_area
@dock_area.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def dock_area(self, value):
"""
Setter for **self.__dock_area** attribute.
:param value: Attribute value.
:type value: int
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "dock_area"))
@dock_area.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def dock_area(self):
"""
Deleter for **self.__dock_area** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "dock_area"))
@property
def engine(self):
"""
Property for **self.__engine** attribute.
:return: self.__engine.
:rtype: QObject
"""
return self.__engine
@engine.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def engine(self, value):
"""
Setter for **self.__engine** attribute.
:param value: Attribute value.
:type value: QObject
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "engine"))
@engine.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def engine(self):
"""
Deleter for **self.__engine** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "engine"))
@property
def settings(self):
"""
Property for **self.__settings** attribute.
:return: self.__settings.
:rtype: QSettings
"""
return self.__settings
@settings.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def settings(self, value):
"""
Setter for **self.__settings** attribute.
:param value: Attribute value.
:type value: QSettings
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "settings"))
@settings.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def settings(self):
"""
Deleter for **self.__settings** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "settings"))
@property
def settings_section(self):
"""
Property for **self.__settings_section** attribute.
:return: self.__settings_section.
:rtype: unicode
"""
return self.__settings_section
@settings_section.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def settings_section(self, value):
"""
Setter for **self.__settings_section** attribute.
:param value: Attribute value.
:type value: unicode
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "settings_section"))
@settings_section.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def settings_section(self):
"""
Deleter for **self.__settings_section** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "settings_section"))
@property
def settings_separator(self):
"""
Property for **self.__settings_separator** attribute.
:return: self.__settings_separator.
:rtype: unicode
"""
return self.__settings_separator
@settings_separator.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def settings_separator(self, value):
"""
Setter for **self.__settings_separator** attribute.
:param value: Attribute value.
:type value: unicode
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "settings_separator"))
@settings_separator.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def settings_separator(self):
"""
Deleter for **self.__settings_separator** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "settings_separator"))
@property
def script_editor(self):
"""
Property for **self.__script_editor** attribute.
:return: self.__script_editor.
:rtype: QWidget
"""
return self.__script_editor
@script_editor.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def script_editor(self, value):
"""
Setter for **self.__script_editor** attribute.
:param value: Attribute value.
:type value: QWidget
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "script_editor"))
@script_editor.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def script_editor(self):
"""
Deleter for **self.__script_editor** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "script_editor"))
@property
def database(self):
"""
Property for **self.__database** attribute.
:return: self.__database.
:rtype: object
"""
return self.__database
@database.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def database(self, value):
"""
Setter for **self.__database** attribute.
:param value: Attribute value.
:type value: object
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "database"))
@database.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def database(self):
"""
Deleter for **self.__database** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "database"))
@property
def model(self):
"""
Property for **self.__model** attribute.
:return: self.__model.
:rtype: TemplatesModel
"""
return self.__model
@model.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def model(self, value):
"""
Setter for **self.__model** attribute.
:param value: Attribute value.
:type value: TemplatesModel
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "model"))
@model.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def model(self):
"""
Deleter for **self.__model** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "model"))
@property
def view(self):
"""
Property for **self.__view** attribute.
:return: self.__view.
:rtype: QWidget
"""
return self.__view
@view.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def view(self, value):
"""
Setter for **self.__view** attribute.
:param value: Attribute value.
:type value: QWidget
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "view"))
@view.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def view(self):
"""
Deleter for **self.__view** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "view"))
@property
def headers(self):
"""
Property for **self.__headers** attribute.
:return: self.__headers.
:rtype: OrderedDict
"""
return self.__headers
@headers.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def headers(self, value):
"""
Setter for **self.__headers** attribute.
:param value: Attribute value.
:type value: OrderedDict
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "headers"))
@headers.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def headers(self):
"""
Deleter for **self.__headers** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "headers"))
@property
def extension(self):
"""
Property for **self.__extension** attribute.
:return: self.__extension.
:rtype: unicode
"""
return self.__extension
@extension.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def extension(self, value):
"""
Setter for **self.__extension** attribute.
:param value: Attribute value.
:type value: unicode
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "extension"))
@extension.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def extension(self):
"""
Deleter for **self.__extension** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "extension"))
@property
def default_collections(self):
"""
Property for **self.__default_collections** attribute.
:return: self.__default_collections.
:rtype: dict
"""
return self.__default_collections
@default_collections.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def default_collections(self, value):
"""
Setter for **self.__default_collections** attribute.
:param value: Attribute value.
:type value: dict
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "default_collections"))
@default_collections.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def default_collections(self):
"""
Deleter for **self.__default_collections** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "default_collections"))
@property
def factory_collection(self):
"""
Property for **self.__factory_collection** attribute.
:return: self.__factory_collection.
:rtype: unicode
"""
return self.__factory_collection
@factory_collection.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def factory_collection(self, value):
"""
Setter for **self.__factory_collection** attribute.
:param value: Attribute value.
:type value: unicode
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "factory_collection"))
@factory_collection.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def factory_collection(self):
"""
Deleter for **self.__factory_collection** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "factory_collection"))
@property
def user_collection(self):
"""
Property for **self.__user_collection** attribute.
:return: self.__user_collection.
:rtype: unicode
"""
return self.__user_collection
@user_collection.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def user_collection(self, value):
"""
Setter for **self.__user_collection** attribute.
:param value: Attribute value.
:type value: unicode
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "user_collection"))
@user_collection.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def user_collection(self):
"""
Deleter for **self.__user_collection** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "user_collection"))
@property
def templates_informations_default_text(self):
"""
Property for **self.__templates_informations_default_text** attribute.
:return: self.__templates_informations_default_text.
:rtype: unicode
"""
return self.__templates_informations_default_text
@templates_informations_default_text.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def templates_informations_default_text(self, value):
"""
Setter for **self.__templates_informations_default_text** attribute.
:param value: Attribute value.
:type value: unicode
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__,
"templates_informations_default_text"))
@templates_informations_default_text.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def templates_informations_default_text(self):
"""
Deleter for **self.__templates_informations_default_text** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__,
"templates_informations_default_text"))
@property
def tree_view_inner_margins(self):
"""
Property for **self.__tree_view_inner_margins** attribute.
:return: self.__tree_view_inner_margins.
:rtype: int
"""
return self.__tree_view_inner_margins
@tree_view_inner_margins.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def tree_view_inner_margins(self, value):
"""
Setter for **self.__tree_view_inner_margins** attribute.
:param value: Attribute value.
:type value: int
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "tree_view_inner_margins"))
@tree_view_inner_margins.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def tree_view_inner_margins(self):
"""
Deleter for **self.__tree_view_inner_margins** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "tree_view_inner_margins"))
@property
def templates_informations_text(self):
"""
Property for **self.__templates_informations_text** attribute.
:return: self.__templates_informations_text.
:rtype: unicode
"""
return self.__templates_informations_text
@templates_informations_text.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def templates_informations_text(self, value):
"""
Setter for **self.__templates_informations_text** attribute.
:param value: Attribute value.
:type value: unicode
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "templates_informations_text"))
@templates_informations_text.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def templates_informations_text(self):
"""
Deleter for **self.__templates_informations_text** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "templates_informations_text"))
def activate(self, engine):
"""
Activates the Component.
:param engine: Engine to attach the Component to.
:type engine: QObject
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Activating '{0}' Component.".format(self.__class__.__name__))
self.__ui_resources_directory = os.path.join(os.path.dirname(__file__), self.__ui_resources_directory)
self.__engine = engine
self.__settings = self.__engine.settings
self.__settings_section = self.name
self.__script_editor = self.__engine.components_manager["factory.script_editor"]
self.__database = self.__engine.components_manager["core.database"]
RuntimeGlobals.templates_factory_directory = umbra.ui.common.get_resource_path(Constants.templates_directory)
RuntimeGlobals.templates_user_directory = os.path.join(self.__engine.user_application_data_directory,
Constants.templates_directory)
self.__default_collections = {self.__factory_collection: RuntimeGlobals.templates_factory_directory,
self.__user_collection: RuntimeGlobals.templates_user_directory}
self.activated = True
return True
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def deactivate(self):
"""
Deactivates the Component.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' Component cannot be deactivated!".format(self.__class__.__name__, self.__name))
def initialize_ui(self):
"""
Initializes the Component ui.
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Initializing '{0}' Component ui.".format(self.__class__.__name__))
self.__engine.parameters.database_read_only and \
LOGGER.info("{0} | Model edition deactivated by '{1}' command line parameter value!".format(
self.__class__.__name__, "database_read_only"))
self.__model = TemplatesModel(self, horizontal_headers=self.__headers)
self.set_templates()
self.Templates_Outliner_treeView.setParent(None)
self.Templates_Outliner_treeView = Templates_QTreeView(self,
self.__model,
self.__engine.parameters.database_read_only,
"No Template to view!")
self.Templates_Outliner_treeView.setObjectName("Templates_Outliner_treeView")
self.Templates_Outliner_gridLayout.setContentsMargins(self.__tree_view_inner_margins)
self.Templates_Outliner_gridLayout.addWidget(self.Templates_Outliner_treeView, 0, 0)
self.__view = self.Templates_Outliner_treeView
self.__view.setContextMenuPolicy(Qt.ActionsContextMenu)
self.__view_add_actions()
self.Template_Informations_textBrowser.setText(self.__templates_informations_default_text)
self.Template_Informations_textBrowser.setOpenLinks(False)
self.Templates_Outliner_splitter.setSizes([16777215, 1])
# Signals / Slots.
self.__engine.images_caches.QIcon.content_added.connect(self.__view.viewport().update)
self.__view.selectionModel().selectionChanged.connect(self.__view_selectionModel__selectionChanged)
self.Template_Informations_textBrowser.anchorClicked.connect(
self.__Template_Informations_textBrowser__anchorClicked)
self.refresh_nodes.connect(self.__model__refresh_nodes)
if not self.__engine.parameters.database_read_only:
self.__engine.file_system_events_manager.file_changed.connect(
self.__engine_file_system_events_manager__file_changed)
self.__engine.content_dropped.connect(self.__engine__content_dropped)
else:
LOGGER.info("{0} | Templates file system events ignored by '{1}' command line parameter value!".format(
self.__class__.__name__, "database_read_only"))
self.initialized_ui = True
return True
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def uninitialize_ui(self):
"""
Uninitializes the Component ui.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' Component ui cannot be uninitialized!".format(self.__class__.__name__, self.name))
def add_widget(self):
"""
Adds the Component Widget to the engine.
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Adding '{0}' Component Widget.".format(self.__class__.__name__))
self.__engine.addDockWidget(Qt.DockWidgetArea(self.__dock_area), self)
return True
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def remove_widget(self):
"""
Removes the Component Widget from the engine.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' Component Widget cannot be removed!".format(self.__class__.__name__, self.name))
def on_startup(self):
"""
Defines the slot triggered by Framework startup.
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Calling '{0}' Component Framework 'on_startup' method.".format(self.__class__.__name__))
if not self.__engine.parameters.database_read_only:
# Adding default Templates.
self.add_default_templates()
# Wizard if Templates table is empty.
if not self.get_templates():
if message_box.message_box("Question", "Question",
"The Database has no Templates, would you like to add some?",
buttons=QMessageBox.Yes | QMessageBox.No) == QMessageBox.Yes:
directory = umbra.ui.common.store_last_browsed_path((QFileDialog.getExistingDirectory(self,
"Add Content:",
RuntimeGlobals.last_browsed_path)))
if directory:
if not self.add_directory(directory):
raise Exception(
"{0} | Exception raised while adding '{1}' directory content to the Database!".format(
self.__class__.__name__, directory))
# Templates table integrity checking.
erroneous_templates = sibl_gui.components.core.database.operations.check_templates_table_integrity()
for template, exceptions in erroneous_templates.iteritems():
if sibl_gui.components.core.database.exceptions.MissingTemplateFileError in exceptions:
choice = message_box.message_box("Question", "Error",
"{0} | '{1}' Template file is missing, would you like to update it's location?".format(
self.__class__.__name__, template.name),
QMessageBox.Critical, QMessageBox.Yes | QMessageBox.No,
custom_buttons=((QString("No To All"), QMessageBox.RejectRole),))
if choice == 0:
break
if choice == QMessageBox.Yes:
if self.update_template_location_ui(template):
# TODO: Check updated Template file integrity.
continue
for exception in exceptions:
self.__engine.notifications_manager.warnify(
"{0} | '{1}' {2}".format(self.__class__.__name__,
template.name,
sibl_gui.components.core.database.operations.DATABASE_EXCEPTIONS[
exception]))
else:
LOGGER.info("{0} | Database default Templates wizard and Templates integrity checking method deactivated\
by '{1}' command line parameter value!".format(self.__class__.__name__, "database_read_only"))
active_collections_identities = foundations.strings.to_string(self.__settings.get_key(
self.__settings_section, "active_collections").toString())
LOGGER.debug("> Stored '{0}' active Collections selection: '{1}'.".format(self.__class__.__name__,
active_collections_identities))
self.__view.model_selection["collections"] = active_collections_identities and [int(identity)
for identity in
active_collections_identities.split(
self.__settings_separator)] or []
active_softwares = foundations.strings.to_string(
self.__settings.get_key(self.__settings_section, "active_softwares").toString())
LOGGER.debug("> Stored '{0}' active softwares selection: '{1}'.".format(
self.__class__.__name__, active_softwares))
self.__view.model_selection["Softwares"] = active_softwares and active_softwares.split(
self.__settings_separator) or []
active_templates_identities = foundations.strings.to_string(
self.__settings.get_key(self.__settings_section, "activeTemplates").toString())
LOGGER.debug("> '{0}' View stored selected Templates identities '{1}'.".format(self.__class__.__name__,
active_templates_identities))
self.__view.model_selection["templates"] = active_templates_identities and [int(identity)
for identity in
active_templates_identities.split(
self.__settings_separator)] or []
self.__view.restore_model_selection()
return True
def on_close(self):
"""
Defines the slot triggered by Framework close.
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Calling '{0}' Component Framework 'on_close' method.".format(self.__class__.__name__))
self.__view.store_model_selection()
self.__settings.set_key(self.__settings_section,
"activeTemplates",
self.__settings_separator.join(foundations.strings.to_string(identity)
for identity in
self.__view.model_selection["templates"]))
self.__settings.set_key(self.__settings_section,
"active_collections",
self.__settings_separator.join(foundations.strings.to_string(identity)
for identity in
self.__view.model_selection["collections"]))
self.__settings.set_key(self.__settings_section,
"active_softwares",
self.__settings_separator.join(foundations.strings.to_string(name)
for name in self.__view.model_selection["Softwares"]))
return True
def __model__refresh_nodes(self):
"""
Defines the slot triggered by the Model when nodes need refresh.
"""
self.set_templates()
def __view_add_actions(self):
"""
Sets the View actions.
"""
if not self.__engine.parameters.database_read_only:
self.__view.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|core.templates_outliner|Add Template ...",
slot=self.__view_add_template_action__triggered))
self.__view.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|core.templates_outliner|Remove Template(s) ...",
slot=self.__view_remove_templates_action__triggered))
separator_action = QAction(self.__view)
separator_action.setSeparator(True)
self.__view.addAction(separator_action)
self.__view.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|core.templates_outliner|Import Default Templates",
slot=self.__view_import_default_templates_action__triggered))
self.__view.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|core.templates_outliner|Filter Templates Versions",
slot=self.__view_filter_templates_versions_action__triggered))
separator_action = QAction(self.__view)
separator_action.setSeparator(True)
self.__view.addAction(separator_action)
else:
LOGGER.info("{0} | Templates Database alteration capabilities deactivated\
by '{1}' command line parameter value!".format(self.__class__.__name__, "database_read_only"))
self.__view.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|core.templates_outliner|Display Help File(s) ...",
slot=self.__view_display_help_files_action__triggered))
separator_action = QAction(self.__view)
separator_action.setSeparator(True)
self.__view.addAction(separator_action)
def __view_add_template_action__triggered(self, checked):
"""
Defines the slot triggered by \*\*'Actions|Umbra|Components|core.templates_outliner|Add Template ...'** action.
:param checked: Action checked state.
:type checked: bool
:return: Method success.
:rtype: bool
"""
return self.add_template_ui()
def __view_remove_templates_action__triggered(self, checked):
"""
Defines the slot triggered by \*\*'Actions|Umbra|Components|core.templates_outliner|Remove Template(s) ...'** action.
:param checked: Action checked state.
:type checked: bool
:return: Method success.
:rtype: bool
"""
return self.remove_templates_ui()
def __view_import_default_templates_action__triggered(self, checked):
"""
Defines the slot triggered by \*\*'Actions|Umbra|Components|core.templates_outliner|Import Default Templates'** action.
:param checked: Action checked state.
:type checked: bool
:return: Method success.
:rtype: bool
"""
return self.import_default_templates_ui()
def __view_display_help_files_action__triggered(self, checked):
"""
Defines the slot triggered by \*\*'Actions|Umbra|Components|core.templates_outliner|Display Help File(s) ...'** action.
:param checked: Action checked state.
:type checked: bool
:return: Method success.
:rtype: bool
"""
return self.display_help_files_ui()
def __view_filter_templates_versions_action__triggered(self, checked):
"""
Defines the slot triggered by \*\*'Actions|Umbra|Components|core.templates_outliner|Filter Templates Versions'** action.
:param checked: Action checked state.
:type checked: bool
:return: Method success.
:rtype: bool
"""
return self.filter_templates_versions_ui()
def __view_selectionModel__selectionChanged(self, selected_items, deselected_items):
"""
Sets the **Template_Informations_textEdit** Widget.
:param selected_items: Selected items.
:type selected_items: QItemSelection
:param deselected_items: Deselected items.
:type deselected_items: QItemSelection
"""
LOGGER.debug("> Initializing '{0}' Widget.".format("Template_Informations_textEdit"))
selected_templates = self.get_selected_templates()
content = []
if selected_templates:
for template in selected_templates:
help_file = template.help_file or umbra.ui.common.get_resource_path(UiConstants.invalid_link_html_file)
content.append(self.__templates_informations_text.format(template.title,
template.date,
template.author,
template.email,
template.url,
template.output_script,
template.comment,
QUrl.fromLocalFile(help_file).toString()))
else:
content.append(self.__templates_informations_default_text)
separator = "" if len(content) == 1 else "<p><center>* * *<center/></p>"
self.Template_Informations_textBrowser.setText(separator.join(content))
@foundations.exceptions.handle_exceptions(umbra.exceptions.notify_exception_handler,
foundations.exceptions.UserError)
@umbra.engine.show_processing("Retrieving Templates ...")
def __engine__content_dropped(self, event):
"""
Defines the slot triggered when content is dropped into the engine.
:param event: Event.
:type event: QEvent
"""
if not event.mimeData().hasUrls():
return
LOGGER.debug("> Drag event urls list: '{0}'!".format(event.mimeData().urls()))
if not self.__engine.parameters.database_read_only:
for url in event.mimeData().urls():
path = foundations.strings.to_string(url.path())
LOGGER.debug("> Handling dropped '{0}' file.".format(path))
path = (platform.system() == "Windows" or platform.system() == "Microsoft") and \
re.search(r"^\/[A-Z]:", path) and path[1:] or path
if re.search(r"\.{0}$".format(self.__extension), path):
name = foundations.strings.get_splitext_basename(path)
choice = message_box.message_box("Question", "Question",
"'{0}' Template file has been dropped, would you like to 'Add' it to the Database or \
'Edit' it in the Script Editor?".format(name),
buttons=QMessageBox.Cancel,
custom_buttons=((QString("Add"), QMessageBox.AcceptRole),
(QString("Edit"), QMessageBox.AcceptRole)))
if choice == 0:
self.add_template(name, path)
elif choice == 1:
self.__script_editor.load_file(path) and self.__script_editor.restore_development_layout()
else:
if not os.path.isdir(path):
return
if not list(foundations.walkers.files_walker(path, ("\.{0}$".format(self.__extension),), ("\._",))):
return
if message_box.message_box("Question", "Question",
"Would you like to add '{0}' directory Template(s) file(s) to the Database?".format(
path),
buttons=QMessageBox.Yes | QMessageBox.No) == QMessageBox.Yes:
self.add_directory(path)
self.__engine.process_events()
else:
raise foundations.exceptions.UserError(
"{0} | Cannot perform action, Database has been set read only!".format(self.__class__.__name__))
def __engine_file_system_events_manager__file_changed(self, file):
"""
Defines the slot triggered by the **file_system_events_manager** when a file is changed.
:param file: File changed.
:type file: unicode
"""
template = foundations.common.get_first_item(filter(lambda x: x.path == file, self.get_templates()))
if not template:
return
if sibl_gui.components.core.database.operations.update_template_content(template):
self.__engine.notifications_manager.notify(
"{0} | '{1}' Template file has been reparsed and associated database object updated!".format(
self.__class__.__name__, template.title))
self.refresh_nodes.emit()
def __Template_Informations_textBrowser__anchorClicked(self, url):
"""
Defines the slot triggered by **Template_Informations_textBrowser** Widget when a link is clicked.
:param url: Url to explore.
:type url: QUrl
"""
QDesktopServices.openUrl(url)
def __get_candidate_collection_id(self, path=None):
"""
Returns a Collection id.
:param path: Template path.
:type path: unicode
:return: Collection id.
:rtype: int
"""
collection = self.get_collection_by_name(self.__user_collection)
identity = collection and collection.id or None
factory_collectionPath = self.__default_collections[self.__factory_collection]
if path and factory_collectionPath:
if os.path.normpath(factory_collectionPath) in os.path.normpath(path):
collection = self.get_collection_by_name(self.__factory_collection)
identity = collection and collection.id or None
return identity
@foundations.exceptions.handle_exceptions(umbra.exceptions.notify_exception_handler, Exception)
@umbra.engine.show_processing("Adding Template ...")
def add_template_ui(self):
"""
Adds an user defined Template to the Database.
:return: Method success.
:rtype: bool
:note: May require user interaction.
"""
path = umbra.ui.common.store_last_browsed_path((QFileDialog.getOpenFileName(self,
"Add Template:",
RuntimeGlobals.last_browsed_path,
"sIBLT files (*.{0})".format(
self.__extension))))
if not path:
return
if not self.template_exists(path):
LOGGER.debug("> Chosen Template path: '{0}'.".format(path))
if self.add_template(foundations.strings.get_splitext_basename(path), path):
return True
else:
raise Exception("{0} | Exception raised while adding '{1}' Template to the Database!".format(
self.__class__.__name__, path))
else:
self.__engine.notifications_manager.warnify(
"{0} | '{1}' Template already exists in Database!".format(self.__class__.__name__, path))
@foundations.exceptions.handle_exceptions(umbra.exceptions.notify_exception_handler, Exception)
@umbra.engine.encapsulate_processing
def remove_templates_ui(self):
"""
Removes user selected Templates from the Database.
:return: Method success.
:rtype: bool
:note: May require user interaction.
"""
selected_nodes = self.get_selected_nodes()
selected_collections = []
selected_softwares = []
for item in selected_nodes:
if item.family == "Collection":
selected_collections.append(item.name)
elif item.family == "Software":
selected_softwares.append(item.name)
selected_collections and self.__engine.notifications_manager.warnify(
"{0} | '{1}' Collection(s) cannot be removed!".format(self.__class__.__name__,
", ".join(selected_collections)))
selected_softwares and self.__engine.notifications_manager.warnify(
"{0} | '{1}' software(s) cannot be removed!".format(self.__class__.__name__, ", ".join(selected_softwares)))
selected_templates = self.get_selected_templates()
if not selected_templates:
return False
if message_box.message_box("Question", "Question",
"Are you sure you want to remove '{0}' Template(s)?".format(
", ".join([foundations.strings.to_string(template.name) for template in
selected_templates])),
buttons=QMessageBox.Yes | QMessageBox.No) == QMessageBox.Yes:
self.__engine.start_processing("Removing Templates ...", len(selected_templates))
success = True
for template in selected_templates:
success *= umbra.ui.common.signals_blocker(self, self.remove_template, template) or False
self.__engine.step_processing()
self.__engine.stop_processing()
self.refresh_nodes.emit()
if success:
return True
else:
raise Exception("{0} | Exception raised while removing '{1}' Templates from the Database!".format(
self.__class__.__name__, ", ".join((template.name for template in selected_templates))))
@foundations.exceptions.handle_exceptions(umbra.exceptions.notify_exception_handler,
sibl_gui.components.core.database.exceptions.DatabaseOperationError)
def update_template_location_ui(self, template):
"""
Updates given Template location.
:param template: Template to update.
:type template: Template
:return: Method success.
:rtype: bool
:note: May require user interaction.
"""
file = umbra.ui.common.store_last_browsed_path((QFileDialog.getOpenFileName(self,
"Updating '{0}' Template Location:".format(
template.name),
RuntimeGlobals.last_browsed_path,
"Template files (*{0})".format(
self.__extension))))
if not file:
return False
LOGGER.info("{0} | Updating '{1}' Template with new location '{2}'!".format(self.__class__.__name__,
template.name, file))
if sibl_gui.components.core.database.operations.update_template_location(template, file):
self.refresh_nodes.emit()
return True
else:
raise sibl_gui.components.core.database.exceptions.DatabaseOperationError(
"{0} | Exception raised while updating '{1}' Template location!".format(self.__class__.__name__,
template.name))
@foundations.exceptions.handle_exceptions(umbra.exceptions.notify_exception_handler, Exception)
@umbra.engine.show_processing("Importing Default Templates ...")
def import_default_templates_ui(self):
"""
Imports default Templates into the Database.
:return: Method success.
:rtype: bool
:note: May require user interaction.
"""
if self.add_default_templates(force_import=True):
return True
else:
raise Exception("{0} | Exception raised while importing default Templates into the Database!".format(
self.__class__.__name__))
@foundations.exceptions.handle_exceptions(umbra.exceptions.notify_exception_handler, Exception)
@umbra.engine.encapsulate_processing
def display_help_files_ui(self):
"""
Displays user selected Templates help files.
:return: Method success.
:rtype: bool
"""
selected_templates = self.get_selected_templates()
if not selected_templates:
return False
self.__engine.start_processing("Displaying Templates Help Files ...", len(selected_templates))
success = True
for template in selected_templates:
success *= self.display_help_file(template) or False
self.__engine.step_processing()
self.__engine.stop_processing()
if success:
return True
else:
raise Exception(
"{0} | Exception raised while displaying Templates help files!".format(self.__class__.__name__))
@foundations.exceptions.handle_exceptions(umbra.exceptions.notify_exception_handler, Exception)
@umbra.engine.encapsulate_processing
def filter_templates_versions_ui(self):
"""
Filters Templates by versions.
:return: Method success.
:rtype: bool
:note: May require user interaction.
"""
templates = sibl_gui.components.core.database.operations.get_templates()
self.__engine.start_processing("Filtering Templates ...", len(templates.all()))
success = True
for template in templates:
matching_templates = sibl_gui.components.core.database.operations.filter_templates(
"^{0}$".format(template.name), "name")
if len(matching_templates) != 1:
for identity in sorted(
[(database_template.id, database_template.release) for database_template in matching_templates],
reverse=True,
key=lambda x: (foundations.strings.get_version_rank(x[1])))[1:]:
success *= sibl_gui.components.core.database.operations.remove_template(
foundations.common.get_first_item(identity)) or False
self.refresh_nodes.emit()
self.__engine.step_processing()
self.__engine.stop_processing()
if success:
return True
else:
raise Exception(
"{0} | Exception raised while filtering Templates by versions!".format(self.__class__.__name__))
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError,
sibl_gui.components.core.database.exceptions.DatabaseOperationError)
def add_template(self, name, path, collection_id=None):
"""
Adds a Template to the Database.
:param name: Template set name.
:type name: unicode
:param path: Template set path.
:type path: unicode
:param collection_id: Target Collection id.
:type collection_id: int
:return: Method success.
:rtype: bool
"""
if not sibl_gui.components.core.database.operations.filter_templates("^{0}$".format(re.escape(path)), "path"):
LOGGER.info("{0} | Adding '{1}' Template to the Database!".format(self.__class__.__name__, name))
if sibl_gui.components.core.database.operations.add_template(
name, path, collection_id or self.__get_candidate_collection_id(path)):
self.refresh_nodes.emit()
return True
else:
raise sibl_gui.components.core.database.exceptions.DatabaseOperationError(
"{0} | Exception raised while adding '{1}' Template to the Database!".format(
self.__class__.__name__, name))
else:
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' Template already exists in Database!".format(self.__class__.__name__, name))
@foundations.exceptions.handle_exceptions(umbra.exceptions.notify_exception_handler, Exception)
@umbra.engine.encapsulate_processing
def add_directory(self, directory, collection_id=None):
"""
Adds given directory Templates to the Database.
:param directory: Templates directory.
:type directory: unicode
:param collection_id: Collection id.
:type collection_id: int
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Initializing directory '{0}' files_walker.".format(directory))
files = list(foundations.walkers.files_walker(directory, ("\.{0}$".format(self.__extension),), ("\._",)))
self.__engine.start_processing("Adding Directory Templates ...", len(files))
success = True
for path in files:
if not self.template_exists(path):
success *= umbra.ui.common.signals_blocker(self,
self.add_template,
foundations.strings.get_splitext_basename(path),
path,
collection_id) or False
self.__engine.step_processing()
self.__engine.stop_processing()
self.refresh_nodes.emit()
if success:
return True
else:
raise Exception("{0} | Exception raised while adding '{1}' directory content to the Database!".format(
self.__class__.__name__, directory))
@foundations.exceptions.handle_exceptions(umbra.exceptions.notify_exception_handler, Exception)
def add_default_templates(self, force_import=False):
"""
Adds default Templates Collections / Templates to the Database.
:param force_import: Force Templates import.
:type force_import: bool
:return: Method success.
:rtype: bool
"""
if not force_import and self.get_templates():
return False
LOGGER.debug("> Adding default Templates to the Database.")
success = True
for collection, path in ((collection, path) for (collection, path) in self.__default_collections.iteritems() if
path):
if not foundations.common.path_exists(path):
continue
if not set(sibl_gui.components.core.database.operations.filter_collections(
"^{0}$".format(collection), "name")).intersection(
sibl_gui.components.core.database.operations.filter_collections("templates", "type")):
LOGGER.info("{0} | Adding '{1}' Collection to the Database!".format(
self.__class__.__name__, collection))
sibl_gui.components.core.database.operations.add_collection(
collection, "templates", "Template {0} Collection".format(collection))
success *= self.add_directory(path, self.get_collection_by_name(collection).id)
if success:
return True
else:
raise Exception("{0} | Exception raised while adding default Templates to the Database!".format(
self.__class__.__name__))
@foundations.exceptions.handle_exceptions(sibl_gui.components.core.database.exceptions.DatabaseOperationError)
def remove_template(self, template):
"""
Removes given Template from the Database.
:param templates: Template to remove.
:type templates: list
:return: Method success.
:rtype: bool
"""
LOGGER.info("{0} | Removing '{1}' Template from the Database!".format(self.__class__.__name__, template.name))
if sibl_gui.components.core.database.operations.remove_template(foundations.strings.to_string(template.id)):
self.refresh_nodes.emit()
return True
else:
raise sibl_gui.components.core.database.exceptions.DatabaseOperationError(
"{0} | Exception raised while removing '{1}' Template from the Database!".format(
self.__class__.__name__,
template.name))
def template_exists(self, path):
"""
Returns if given Template path exists in the Database.
:param name: Template path.
:type name: unicode
:return: Template exists.
:rtype: bool
"""
return sibl_gui.components.core.database.operations.template_exists(path)
@foundations.exceptions.handle_exceptions(foundations.exceptions.FileExistsError)
def display_help_file(self, template):
"""
Displays given Templates help file.
:param template: Template to display help file.
:type template: Template
:return: Method success.
:rtype: bool
"""
help_file = template.help_file or umbra.ui.common.get_resource_path(UiConstants.invalid_link_html_file)
if foundations.common.path_exists(help_file):
LOGGER.info("{0} | Opening '{1}' Template help file: '{2}'.".format(self.__class__.__name__,
template.name,
help_file))
QDesktopServices.openUrl(QUrl.fromLocalFile(help_file))
return True
else:
raise foundations.exceptions.FileExistsError(
"{0} | Exception raised while displaying '{1}' Template help file: '{2}' file doesn't exists!".format(
self.__class__.__name__, template.name, help_file))
def get_collections(self):
"""
Returns Database Templates Collections.
:return: Database Templates Collections.
:rtype: list
"""
return sibl_gui.components.core.database.operations.get_collections_by_type("templates")
def filter_collections(self, pattern, attribute, flags=re.IGNORECASE):
"""
Filters the Database Templates Collections on given attribute using given pattern.
:param pattern: Filter pattern.
:type pattern: unicode
:param attribute: Attribute to filter on.
:type attribute: unicode
:param flags: Regex filtering flags.
:type flags: int
:return: Filtered Database Templates Collections.
:rtype: list
"""
try:
pattern = re.compile(pattern, flags)
except Exception:
return list()
return sibl_gui.components.core.database.operations.filter_templates_collections(
"{0}".format(foundations.strings.to_string(pattern.pattern)), attribute, flags)
def get_templates(self):
"""
Returns Database Templates.
:return: Database Templates.
:rtype: list
"""
return [template for template in sibl_gui.components.core.database.operations.get_templates()]
def filter_templates(self, pattern, attribute, flags=re.IGNORECASE):
"""
Filters the Database Templates on given attribute using given pattern.
:param pattern: Filter pattern.
:type pattern: unicode
:param attribute: Attribute to filter on.
:type attribute: unicode
:param flags: Regex filtering flags.
:type flags: int
:return: Filtered Database Templates.
:rtype: list
"""
try:
pattern = re.compile(pattern, flags)
except Exception:
return list()
return list(set(self.get_templates()).intersection(
sibl_gui.components.core.database.operations.filter_templates(
"{0}".format(foundations.strings.to_string(pattern.pattern)), attribute, flags)))
def list_templates(self):
"""
Lists Database Templates names.
:return: Database Templates names.
:rtype: list
"""
return [template.title for template in self.get_templates()]
def set_templates(self):
"""
Sets the Templates Model nodes.
"""
node_flags = attributes_flags = int(Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled)
root_node = umbra.ui.nodes.DefaultNode(name="InvisibleRootNode")
collections = sibl_gui.components.core.database.operations.filter_collections("templates", "type")
for collection in collections:
softwares = set((foundations.common.get_first_item(software) for software in
sibl_gui.components.core.database.operations.query(Template.software).filter(
Template.collection == collection.id)))
if not softwares:
continue
collection_node = CollectionNode(collection,
name=collection.name,
parent=root_node,
node_flags=int(Qt.ItemIsSelectable | Qt.ItemIsEnabled),
attributes_flags=int(Qt.ItemIsSelectable | Qt.ItemIsEnabled))
collection_node["release"] = sibl_gui.ui.nodes.GraphModelAttribute(name="release",
flags=int(
Qt.ItemIsSelectable | Qt.ItemIsEnabled))
collection_node["version"] = sibl_gui.ui.nodes.GraphModelAttribute(name="version",
flags=int(
Qt.ItemIsSelectable | Qt.ItemIsEnabled))
for software in softwares:
templates = set((template for template in sibl_gui.components.core.database.operations.query(
Template).filter(Template.collection == collection.id).filter(
Template.software == software)))
if not templates:
continue
software_node = SoftwareNode(name=software,
parent=collection_node,
node_flags=int(Qt.ItemIsSelectable | Qt.ItemIsEnabled),
attributes_flags=int(Qt.ItemIsSelectable | Qt.ItemIsEnabled))
icon_path = os.path.join(
self.__ui_resources_directory, "{0}{1}".format(software, self.__ui_software_affixe))
software_node.roles[Qt.DecorationRole] = icon_path if foundations.common.path_exists(icon_path) else \
os.path.join(self.__ui_resources_directory, self.__ui_unknown_software_image)
for template in templates:
template_node = TemplateNode(template,
name=foundations.strings.remove_strip(
template.title, template.software),
parent=software_node,
node_flags=node_flags,
attributes_flags=attributes_flags)
path = foundations.strings.to_string(template.path)
if not foundations.common.path_exists(path):
continue
not self.__engine.file_system_events_manager.is_path_registered(path) and \
self.__engine.file_system_events_manager.register_path(
path, modified_time=float(template.os_stats.split(",")[8]))
root_node.sort_children(attribute="title")
self.__model.initialize_model(root_node)
return True
def get_template_by_name(self, name):
"""
Returns Database Template with given name.
:param name: Template name.
:type name: unicode
:return: Database Template.
:rtype: Template
:note: The filtering is actually performed on 'title' attributes instead of 'name' attributes.
"""
templates = self.filter_templates(r"^{0}$".format(name), "title")
return foundations.common.get_first_item(templates)
def get_collection_by_name(self, name):
"""
Returns Templates Collection from given Collection name.
:param collection: Collection name.
:type collection: unicode
:return: Collection.
:rtype: Collection
"""
collections = self.filter_collections(r"^{0}$".format(name), "name")
return foundations.common.get_first_item(collections)
def get_collection_id(self, collection):
"""
Returns given Collection id.
:param collection: Collection to get the id from.
:type collection: unicode
:return: Provided Collection id.
:rtype: int
"""
children = self.__model.find_children(r"^{0}$".format(collection))
child = foundations.common.get_first_item(children)
return child and child.database_item.id or None
def get_selected_nodes(self):
"""
Returns the View selected nodes.
:return: View selected nodes.
:rtype: dict
"""
return self.__view.get_selected_nodes()
def get_selected_templates_nodes(self):
"""
Returns the View selected Templates nodes.
:return: View selected Templates nodes.
:rtype: list
"""
return [node for node in self.get_selected_nodes() if node.family == "Template"]
def get_selected_templates(self):
"""
Returns the View selected Templates.
:return: View selected Templates.
:rtype: list
"""
return [node.database_item for node in self.get_selected_templates_nodes()]
| gpl-3.0 | -2,440,541,239,006,415,400 | 39.016454 | 141 | 0.569033 | false |
runt18/nupic | tests/unit/nupic/encoders/logenc_test.py | 1 | 10561 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for logarithmic encoder"""
import numpy
import math
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.data.fieldmeta import FieldMetaType
import tempfile
import unittest
from nupic.encoders.logenc import LogEncoder
from nupic.encoders.scalar import ScalarEncoder
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.log_capnp import LogEncoderProto
class LogEncoderTest(unittest.TestCase):
"""Unit tests for LogEncoder class"""
def testLogEncoder(self):
# Create the encoder
# use of forced=True is not recommended, but is used in the example for
# readibility, see scalar.py
le = LogEncoder(w=5,
resolution=0.1,
minval=1,
maxval=10000,
name="amount",
forced=True)
# Verify we're setting the description properly
self.assertEqual(le.getDescription(), [("amount", 0)])
# Verify we're getting the correct field types
types = le.getDecoderOutputFieldTypes()
self.assertEqual(types[0], FieldMetaType.float)
# Verify the encoder ends up with the correct width
#
# 10^0 -> 10^4 => 0 -> 4; With a resolution of 0.1
# 41 possible values plus padding = 4 = width 45
self.assertEqual(le.getWidth(), 45)
# Verify we have the correct number of possible values
self.assertEqual(len(le.getBucketValues()), 41)
# Verify closeness calculations
testTuples = [([1], [10000], 0.0),
([1], [1000], 0.25),
([1], [1], 1.0),
([1], [-200], 1.0)]
for tp in testTuples:
expected = tp[0]
actual = tp[1]
expectedResult = tp[2]
self.assertEqual(le.closenessScores(expected, actual),
expectedResult,
"exp: {0!s} act: {1!s} expR: {2!s}".format(str(expected),
str(actual),
str(expectedResult)))
# Verify a value of 1.0 is encoded as expected
value = 1.0
output = le.encode(value)
# Our expected encoded representation of the value 1 is the first
# w bits on in an array of len width.
expected = [1, 1, 1, 1, 1] + 40 * [0]
# Convert to numpy array
expected = numpy.array(expected, dtype="uint8")
self.assertTrue(numpy.array_equal(output, expected))
# Test reverse lookup
decoded = le.decode(output)
(fieldsDict, _) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, _) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 1)
self.assertTrue(numpy.array_equal(ranges[0], [1, 1]))
# Verify an input representing a missing value is handled properly
mvOutput = le.encode(SENTINEL_VALUE_FOR_MISSING_DATA)
self.assertEqual(sum(mvOutput), 0)
# Test top-down for all values
value = le.minval
while value <= le.maxval:
output = le.encode(value)
topDown = le.topDownCompute(output)
# Do the scaling by hand here.
scaledVal = math.log10(value)
# Find the range of values that would also produce this top down
# output.
minTopDown = math.pow(10, (scaledVal - le.encoder.resolution))
maxTopDown = math.pow(10, (scaledVal + le.encoder.resolution))
# Verify the range surrounds this scaled val
self.assertGreaterEqual(topDown.value, minTopDown)
self.assertLessEqual(topDown.value, maxTopDown)
# Test bucket support
bucketIndices = le.getBucketIndices(value)
topDown = le.getBucketInfo(bucketIndices)[0]
# Verify our reconstructed value is in the valid range
self.assertGreaterEqual(topDown.value, minTopDown)
self.assertLessEqual(topDown.value, maxTopDown)
# Same for the scalar value
self.assertGreaterEqual(topDown.scalar, minTopDown)
self.assertLessEqual(topDown.scalar, maxTopDown)
# That the encoding portion of our EncoderResult matched the result of
# encode()
self.assertTrue(numpy.array_equal(topDown.encoding, output))
# Verify our reconstructed value is the same as the bucket value
bucketValues = le.getBucketValues()
self.assertEqual(topDown.value,
bucketValues[bucketIndices[0]])
# Next value
scaledVal += le.encoder.resolution / 4.0
value = math.pow(10, scaledVal)
# Verify next power of 10 encoding
output = le.encode(100)
# increase of 2 decades = 20 decibels
# bit 0, 1 are padding; bit 3 is 1, ..., bit 22 is 20 (23rd bit)
expected = 20 * [0] + [1, 1, 1, 1, 1] + 20 * [0]
expected = numpy.array(expected, dtype="uint8")
self.assertTrue(numpy.array_equal(output, expected))
# Test reverse lookup
decoded = le.decode(output)
(fieldsDict, _) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, _) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 1)
self.assertTrue(numpy.array_equal(ranges[0], [100, 100]))
# Verify next power of 10 encoding
output = le.encode(10000)
expected = 40 * [0] + [1, 1, 1, 1, 1]
expected = numpy.array(expected, dtype="uint8")
self.assertTrue(numpy.array_equal(output, expected))
# Test reverse lookup
decoded = le.decode(output)
(fieldsDict, _) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, _) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 1)
self.assertTrue(numpy.array_equal(ranges[0], [10000, 10000]))
def testGetBucketValues(self):
"""
Verify that the values of buckets are as expected for given
init params
"""
# Create the encoder
le = LogEncoder(w=5,
resolution=0.1,
minval=1,
maxval=10000,
name="amount",
forced=True)
# Build our expected values
inc = 0.1
exp = 0
expected = []
# Incrementing to exactly 4.0 runs into fp issues
while exp <= 4.0001:
val = 10 ** exp
expected.append(val)
exp += inc
expected = numpy.array(expected)
actual = numpy.array(le.getBucketValues())
numpy.testing.assert_almost_equal(expected, actual, 7)
def testInitWithRadius(self):
"""
Verifies you can use radius to specify a log encoder
"""
# Create the encoder
le = LogEncoder(w=1,
radius=1,
minval=1,
maxval=10000,
name="amount",
forced=True)
self.assertEqual(le.encoder.n, 5)
# Verify a a couple powers of 10 are encoded as expected
value = 1.0
output = le.encode(value)
expected = [1, 0, 0, 0, 0]
# Convert to numpy array
expected = numpy.array(expected, dtype="uint8")
self.assertTrue(numpy.array_equal(output, expected))
value = 100.0
output = le.encode(value)
expected = [0, 0, 1, 0, 0]
# Convert to numpy array
expected = numpy.array(expected, dtype="uint8")
self.assertTrue(numpy.array_equal(output, expected))
def testInitWithN(self):
"""
Verifies you can use N to specify a log encoder
"""
# Create the encoder
n = 100
le = LogEncoder(n=n, forced=True)
self.assertEqual(le.encoder.n, n)
def testMinvalMaxVal(self):
"""
Verifies unusual instances of minval and maxval are handled properly
"""
self.assertRaises(ValueError, LogEncoder, n=100, minval=0, maxval=-100,
forced=True)
self.assertRaises(ValueError, LogEncoder, n=100, minval=0, maxval=1e-07,
forced=True)
le = LogEncoder(n=100, minval=42, maxval=1.3e12, forced=True)
expectedRadius = 0.552141792732
expectedResolution = 0.110428358546
self.assertAlmostEqual(le.encoder.radius, expectedRadius)
self.assertAlmostEqual(le.encoder.resolution, expectedResolution)
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testReadWrite(self):
le = LogEncoder(w=5,
resolution=0.1,
minval=1,
maxval=10000,
name="amount",
forced=True)
originalValue = le.encode(1.0)
proto1 = LogEncoderProto.new_message()
le.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = LogEncoderProto.read(f)
encoder = LogEncoder.read(proto2)
self.assertIsInstance(encoder, LogEncoder)
self.assertEqual(encoder.minScaledValue, le.minScaledValue)
self.assertEqual(encoder.maxScaledValue, le.maxScaledValue)
self.assertEqual(encoder.minval, le.minval)
self.assertEqual(encoder.maxval, le.maxval)
self.assertEqual(encoder.name, le.name)
self.assertEqual(encoder.verbosity, le.verbosity)
self.assertEqual(encoder.clipInput, le.clipInput)
self.assertEqual(encoder.width, le.width)
self.assertEqual(encoder.description, le.description)
self.assertIsInstance(encoder.encoder, ScalarEncoder)
self.assertTrue(numpy.array_equal(encoder.encode(1), originalValue))
self.assertEqual(le.decode(encoder.encode(1)),
encoder.decode(le.encode(1)))
# Feed in a new value and ensure the encodings match
result1 = le.encode(10)
result2 = encoder.encode(10)
self.assertTrue(numpy.array_equal(result1, result2))
if __name__ == "__main__":
unittest.main()
| agpl-3.0 | 3,334,153,091,080,537,600 | 31.495385 | 80 | 0.633179 | false |
wgergely/After-Effects | Maya/standalone.py | 1 | 2206 | # -*- coding: utf-8 -*-
# pylint: disable=E1101, I1101, C0103, C0301, R0913, E0401, C0413
"""Maya standalone context."""
import os
import sys
MAYA_LOCATION = r'C:\Program Files\Autodesk\Maya2018'
MAYA_BIN = r'C:\Program Files\Autodesk\Maya2018\bin'
MTOA_EXTENSIONS_PATH = r'C:\solidangle\mtoadeploy\2018\extensions'
QTDIR = r'C:\Python27\Lib\site-packages\PySide2'
QT_QPA_PLATFORM_PLUGIN_PATH = r'C:\Program Files\Autodesk\Maya2018\qt-plugins\platforms'
PYTHON_DLLS = r'C:\Program Files\Autodesk\Maya2018\Python\DLLs'
PYTHON_PACKAGES = r'C:\Program Files\Autodesk\Maya2018\Python\Lib\site-packages'
PYTHON_ROOT = r'C:\Program Files\Autodesk\Maya2018\Python'
MAYA_PLUGINS = (
'AbcExport',
'AbcImport',
'MayaMuscle',
'AbcImport',
'deformerEvaluator',
'OneClick',
'objExport',
'GPUBuiltInDeformer',
'stereoCamera',
'Unfold3D',
'fbxmaya',
'modelingToolkit',
# 'renderSetup', # I don't seem to be able to import this module. Keeps crashing :/
)
MEL_SCRIPTS = (
# 'defaultRunTimeCommands.res.mel', # sourced automatically
# 'defaultRunTimeCommands.mel', # sourced automatically
'createPreferencesOptVars.mel',
'initAddAttr.mel',
'createGlobalOptVars.mel',
'initialStartup.mel',
# 'initialPlugins.mel',
'namedCommandSetup.mel',
)
os.environ['MAYA_LOCATION'] = MAYA_LOCATION
os.environ['PYMEL_SKIP_MEL_INIT'] = '0'
os.environ['MAYA_SKIP_USERSETUP_PY'] = '1'
os.environ["PATH"] = MAYA_LOCATION + os.pathsep + os.environ['PATH']
os.environ["PATH"] = MAYA_BIN + os.pathsep + os.environ['PATH']
sys.path.insert(0, MAYA_BIN)
sys.path.insert(0, PYTHON_DLLS)
sys.path.insert(0, PYTHON_PACKAGES)
sys.path.insert(0, PYTHON_ROOT)
def initialize():
"""Loads the development environment needed to
test and build extensions for maya."""
import maya.standalone
maya.standalone.initialize(name='python')
from maya import cmds as MayaCmds
from maya.mel import eval as MelEval
print maya.mel
maya.cmds.AbcExport('-jobArg')
# for script in MEL_SCRIPTS:
# mel.eval('source "{}"'.format(script))
# for plugin in MAYA_PLUGINS:
# cmds.loadPlugin(plugin, quiet=True)
initialize()
| mit | 6,119,666,044,479,041,000 | 28.026316 | 88 | 0.693563 | false |
stefanv/aandete | app/lib/formencode/validators.py | 1 | 108132 | ## FormEncode, a Form processor
## Copyright (C) 2003, Ian Bicking <[email protected]>
"""
Validator/Converters for use with FormEncode.
"""
import cgi
import locale
import re
import warnings
from encodings import idna
try: # import dnspython
import dns.resolver
import dns.exception
except (IOError, ImportError):
have_dns = False
else:
have_dns = True
# These are only imported when needed
httplib = None
random = None
sha1 = None
socket = None
urlparse = None
from .api import (FancyValidator, Identity, Invalid, NoDefault, Validator,
deprecation_warning, is_empty)
assert Identity and Invalid and NoDefault # silence unused import warnings
# Dummy i18n translation function, nothing is translated here.
# Instead this is actually done in api.message.
# The surrounding _('string') of the strings is only for extracting
# the strings automatically.
# If you run pygettext with this source comment this function out temporarily.
_ = lambda s: s
############################################################
## Utility methods
############################################################
# These all deal with accepting both datetime and mxDateTime modules and types
datetime_module = None
mxDateTime_module = None
def import_datetime(module_type):
global datetime_module, mxDateTime_module
module_type = module_type.lower() if module_type else 'datetime'
if module_type == 'datetime':
if datetime_module is None:
import datetime as datetime_module
return datetime_module
elif module_type == 'mxdatetime':
if mxDateTime_module is None:
from mx import DateTime as mxDateTime_module
return mxDateTime_module
else:
raise ImportError('Invalid datetime module %r' % module_type)
def datetime_now(module):
if module.__name__ == 'datetime':
return module.datetime.now()
else:
return module.now()
def datetime_makedate(module, year, month, day):
if module.__name__ == 'datetime':
return module.date(year, month, day)
else:
try:
return module.DateTime(year, month, day)
except module.RangeError as e:
raise ValueError(str(e))
def datetime_time(module):
if module.__name__ == 'datetime':
return module.time
else:
return module.Time
def datetime_isotime(module):
if module.__name__ == 'datetime':
return module.time.isoformat
else:
return module.ISO.Time
############################################################
## Wrapper Validators
############################################################
class ConfirmType(FancyValidator):
"""
Confirms that the input/output is of the proper type.
Uses the parameters:
subclass:
The class or a tuple of classes; the item must be an instance
of the class or a subclass.
type:
A type or tuple of types (or classes); the item must be of
the exact class or type. Subclasses are not allowed.
Examples::
>>> cint = ConfirmType(subclass=int)
>>> cint.to_python(True)
True
>>> cint.to_python('1')
Traceback (most recent call last):
...
Invalid: '1' is not a subclass of <type 'int'>
>>> cintfloat = ConfirmType(subclass=(float, int))
>>> cintfloat.to_python(1.0), cintfloat.from_python(1.0)
(1.0, 1.0)
>>> cintfloat.to_python(1), cintfloat.from_python(1)
(1, 1)
>>> cintfloat.to_python(None)
Traceback (most recent call last):
...
Invalid: None is not a subclass of one of the types <type 'float'>, <type 'int'>
>>> cint2 = ConfirmType(type=int)
>>> cint2(accept_python=False).from_python(True)
Traceback (most recent call last):
...
Invalid: True must be of the type <type 'int'>
"""
accept_iterator = True
subclass = None
type = None
messages = dict(
subclass=_('%(object)r is not a subclass of %(subclass)s'),
inSubclass=_('%(object)r is not a subclass of one of the types %(subclassList)s'),
inType=_('%(object)r must be one of the types %(typeList)s'),
type=_('%(object)r must be of the type %(type)s'))
def __init__(self, *args, **kw):
FancyValidator.__init__(self, *args, **kw)
if self.subclass:
if isinstance(self.subclass, list):
self.subclass = tuple(self.subclass)
elif not isinstance(self.subclass, tuple):
self.subclass = (self.subclass,)
self._validate_python = self.confirm_subclass
if self.type:
if isinstance(self.type, list):
self.type = tuple(self.type)
elif not isinstance(self.type, tuple):
self.type = (self.type,)
self._validate_python = self.confirm_type
def confirm_subclass(self, value, state):
if not isinstance(value, self.subclass):
if len(self.subclass) == 1:
msg = self.message('subclass', state, object=value,
subclass=self.subclass[0])
else:
subclass_list = ', '.join(map(str, self.subclass))
msg = self.message('inSubclass', state, object=value,
subclassList=subclass_list)
raise Invalid(msg, value, state)
def confirm_type(self, value, state):
for t in self.type:
if type(value) is t:
break
else:
if len(self.type) == 1:
msg = self.message('type', state, object=value,
type=self.type[0])
else:
msg = self.message('inType', state, object=value,
typeList=', '.join(map(str, self.type)))
raise Invalid(msg, value, state)
return value
def is_empty(self, value):
return False
class Wrapper(FancyValidator):
"""
Used to convert functions to validator/converters.
You can give a simple function for `_convert_to_python`,
`_convert_from_python`, `_validate_python` or `_validate_other`.
If that function raises an exception, the value is considered invalid.
Whatever value the function returns is considered the converted value.
Unlike validators, the `state` argument is not used. Functions
like `int` can be used here, that take a single argument.
Note that as Wrapper will generate a FancyValidator, empty
values (those who pass ``FancyValidator.is_empty)`` will return ``None``.
To override this behavior you can use ``Wrapper(empty_value=callable)``.
For example passing ``Wrapper(empty_value=lambda val: val)`` will return
the value itself when is considered empty.
Examples::
>>> def downcase(v):
... return v.lower()
>>> wrap = Wrapper(convert_to_python=downcase)
>>> wrap.to_python('This')
'this'
>>> wrap.from_python('This')
'This'
>>> wrap.to_python('') is None
True
>>> wrap2 = Wrapper(
... convert_from_python=downcase, empty_value=lambda value: value)
>>> wrap2.from_python('This')
'this'
>>> wrap2.to_python('')
''
>>> wrap2.from_python(1)
Traceback (most recent call last):
...
Invalid: 'int' object has no attribute 'lower'
>>> wrap3 = Wrapper(validate_python=int)
>>> wrap3.to_python('1')
'1'
>>> wrap3.to_python('a') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
Invalid: invalid literal for int()...
"""
func_convert_to_python = None
func_convert_from_python = None
func_validate_python = None
func_validate_other = None
_deprecated_methods = (
('func_to_python', 'func_convert_to_python'),
('func_from_python', 'func_convert_from_python'))
def __init__(self, *args, **kw):
# allow old method names as parameters
if 'to_python' in kw and 'convert_to_python' not in kw:
kw['convert_to_python'] = kw.pop('to_python')
if 'from_python' in kw and 'convert_from_python' not in kw:
kw['convert_from_python'] = kw.pop('from_python')
for n in ('convert_to_python', 'convert_from_python',
'validate_python', 'validate_other'):
if n in kw:
kw['func_%s' % n] = kw.pop(n)
FancyValidator.__init__(self, *args, **kw)
self._convert_to_python = self.wrap(self.func_convert_to_python)
self._convert_from_python = self.wrap(self.func_convert_from_python)
self._validate_python = self.wrap(self.func_validate_python)
self._validate_other = self.wrap(self.func_validate_other)
def wrap(self, func):
if not func:
return None
def result(value, state, func=func):
try:
return func(value)
except Exception as e:
raise Invalid(str(e), value, state)
return result
class Constant(FancyValidator):
"""
This converter converts everything to the same thing.
I.e., you pass in the constant value when initializing, then all
values get converted to that constant value.
This is only really useful for funny situations, like::
# Any evaluates sub validators in reverse order for to_python
fromEmailValidator = Any(
Constant('unknown@localhost'),
Email())
In this case, the if the email is not valid
``'unknown@localhost'`` will be used instead. Of course, you
could use ``if_invalid`` instead.
Examples::
>>> Constant('X').to_python('y')
'X'
"""
__unpackargs__ = ('value',)
def _convert_to_python(self, value, state):
return self.value
_convert_from_python = _convert_to_python
############################################################
## Normal validators
############################################################
class MaxLength(FancyValidator):
"""
Invalid if the value is longer than `maxLength`. Uses len(),
so it can work for strings, lists, or anything with length.
Examples::
>>> max5 = MaxLength(5)
>>> max5.to_python('12345')
'12345'
>>> max5.from_python('12345')
'12345'
>>> max5.to_python('123456')
Traceback (most recent call last):
...
Invalid: Enter a value less than 5 characters long
>>> max5(accept_python=False).from_python('123456')
Traceback (most recent call last):
...
Invalid: Enter a value less than 5 characters long
>>> max5.to_python([1, 2, 3])
[1, 2, 3]
>>> max5.to_python([1, 2, 3, 4, 5, 6])
Traceback (most recent call last):
...
Invalid: Enter a value less than 5 characters long
>>> max5.to_python(5)
Traceback (most recent call last):
...
Invalid: Invalid value (value with length expected)
"""
__unpackargs__ = ('maxLength',)
messages = dict(
tooLong=_('Enter a value less than %(maxLength)i characters long'),
invalid=_('Invalid value (value with length expected)'))
def _validate_python(self, value, state):
try:
if value and len(value) > self.maxLength:
raise Invalid(
self.message('tooLong', state,
maxLength=self.maxLength), value, state)
else:
return None
except TypeError:
raise Invalid(
self.message('invalid', state), value, state)
class MinLength(FancyValidator):
"""
Invalid if the value is shorter than `minlength`. Uses len(), so
it can work for strings, lists, or anything with length. Note
that you **must** use ``not_empty=True`` if you don't want to
accept empty values -- empty values are not tested for length.
Examples::
>>> min5 = MinLength(5)
>>> min5.to_python('12345')
'12345'
>>> min5.from_python('12345')
'12345'
>>> min5.to_python('1234')
Traceback (most recent call last):
...
Invalid: Enter a value at least 5 characters long
>>> min5(accept_python=False).from_python('1234')
Traceback (most recent call last):
...
Invalid: Enter a value at least 5 characters long
>>> min5.to_python([1, 2, 3, 4, 5])
[1, 2, 3, 4, 5]
>>> min5.to_python([1, 2, 3])
Traceback (most recent call last):
...
Invalid: Enter a value at least 5 characters long
>>> min5.to_python(5)
Traceback (most recent call last):
...
Invalid: Invalid value (value with length expected)
"""
__unpackargs__ = ('minLength',)
messages = dict(
tooShort=_('Enter a value at least %(minLength)i characters long'),
invalid=_('Invalid value (value with length expected)'))
def _validate_python(self, value, state):
try:
if len(value) < self.minLength:
raise Invalid(
self.message('tooShort', state,
minLength=self.minLength), value, state)
except TypeError:
raise Invalid(
self.message('invalid', state), value, state)
class NotEmpty(FancyValidator):
"""
Invalid if value is empty (empty string, empty list, etc).
Generally for objects that Python considers false, except zero
which is not considered invalid.
Examples::
>>> ne = NotEmpty(messages=dict(empty='enter something'))
>>> ne.to_python('')
Traceback (most recent call last):
...
Invalid: enter something
>>> ne.to_python(0)
0
"""
not_empty = True
messages = dict(
empty=_('Please enter a value'))
def _validate_python(self, value, state):
if value == 0:
# This isn't "empty" for this definition.
return value
if not value:
raise Invalid(self.message('empty', state), value, state)
class Empty(FancyValidator):
"""
Invalid unless the value is empty. Use cleverly, if at all.
Examples::
>>> Empty.to_python(0)
Traceback (most recent call last):
...
Invalid: You cannot enter a value here
"""
messages = dict(
notEmpty=_('You cannot enter a value here'))
def _validate_python(self, value, state):
if value or value == 0:
raise Invalid(self.message('notEmpty', state), value, state)
class Regex(FancyValidator):
"""
Invalid if the value doesn't match the regular expression `regex`.
The regular expression can be a compiled re object, or a string
which will be compiled for you.
Use strip=True if you want to strip the value before validation,
and as a form of conversion (often useful).
Examples::
>>> cap = Regex(r'^[A-Z]+$')
>>> cap.to_python('ABC')
'ABC'
Note that ``.from_python()`` calls (in general) do not validate
the input::
>>> cap.from_python('abc')
'abc'
>>> cap(accept_python=False).from_python('abc')
Traceback (most recent call last):
...
Invalid: The input is not valid
>>> cap.to_python(1)
Traceback (most recent call last):
...
Invalid: The input must be a string (not a <type 'int'>: 1)
>>> Regex(r'^[A-Z]+$', strip=True).to_python(' ABC ')
'ABC'
>>> Regex(r'this', regexOps=('I',)).to_python('THIS')
'THIS'
"""
regexOps = ()
strip = False
regex = None
__unpackargs__ = ('regex',)
messages = dict(
invalid=_('The input is not valid'))
def __init__(self, *args, **kw):
FancyValidator.__init__(self, *args, **kw)
if isinstance(self.regex, basestring):
ops = 0
assert not isinstance(self.regexOps, basestring), (
"regexOps should be a list of options from the re module "
"(names, or actual values)")
for op in self.regexOps:
if isinstance(op, basestring):
ops |= getattr(re, op)
else:
ops |= op
self.regex = re.compile(self.regex, ops)
def _validate_python(self, value, state):
self.assert_string(value, state)
if self.strip and isinstance(value, basestring):
value = value.strip()
if not self.regex.search(value):
raise Invalid(self.message('invalid', state), value, state)
def _convert_to_python(self, value, state):
if self.strip and isinstance(value, basestring):
return value.strip()
return value
class PlainText(Regex):
"""
Test that the field contains only letters, numbers, underscore,
and the hyphen. Subclasses Regex.
Examples::
>>> PlainText.to_python('_this9_')
'_this9_'
>>> PlainText.from_python(' this ')
' this '
>>> PlainText(accept_python=False).from_python(' this ')
Traceback (most recent call last):
...
Invalid: Enter only letters, numbers, or _ (underscore)
>>> PlainText(strip=True).to_python(' this ')
'this'
>>> PlainText(strip=True).from_python(' this ')
'this'
"""
regex = r"^[a-zA-Z_\-0-9]*$"
messages = dict(
invalid=_('Enter only letters, numbers, or _ (underscore)'))
class OneOf(FancyValidator):
"""
Tests that the value is one of the members of a given list.
If ``testValueList=True``, then if the input value is a list or
tuple, all the members of the sequence will be checked (i.e., the
input must be a subset of the allowed values).
Use ``hideList=True`` to keep the list of valid values out of the
error message in exceptions.
Examples::
>>> oneof = OneOf([1, 2, 3])
>>> oneof.to_python(1)
1
>>> oneof.to_python(4)
Traceback (most recent call last):
...
Invalid: Value must be one of: 1; 2; 3 (not 4)
>>> oneof(testValueList=True).to_python([2, 3, [1, 2, 3]])
[2, 3, [1, 2, 3]]
>>> oneof.to_python([2, 3, [1, 2, 3]])
Traceback (most recent call last):
...
Invalid: Value must be one of: 1; 2; 3 (not [2, 3, [1, 2, 3]])
"""
list = None
testValueList = False
hideList = False
__unpackargs__ = ('list',)
messages = dict(
invalid=_('Invalid value'),
notIn=_('Value must be one of: %(items)s (not %(value)r)'))
def _validate_python(self, value, state):
if self.testValueList and isinstance(value, (list, tuple)):
for v in value:
self._validate_python(v, state)
else:
if not value in self.list:
if self.hideList:
raise Invalid(self.message('invalid', state), value, state)
else:
try:
items = '; '.join(map(str, self.list))
except UnicodeError:
items = '; '.join(map(unicode, self.list))
raise Invalid(
self.message('notIn', state,
items=items, value=value), value, state)
@property
def accept_iterator(self):
return self.testValueList
class DictConverter(FancyValidator):
"""
Converts values based on a dictionary which has values as keys for
the resultant values.
If ``allowNull`` is passed, it will not balk if a false value
(e.g., '' or None) is given (it will return None in these cases).
to_python takes keys and gives values, from_python takes values and
gives keys.
If you give hideDict=True, then the contents of the dictionary
will not show up in error messages.
Examples::
>>> dc = DictConverter({1: 'one', 2: 'two'})
>>> dc.to_python(1)
'one'
>>> dc.from_python('one')
1
>>> dc.to_python(3)
Traceback (most recent call last):
....
Invalid: Enter a value from: 1; 2
>>> dc2 = dc(hideDict=True)
>>> dc2.hideDict
True
>>> dc2.dict
{1: 'one', 2: 'two'}
>>> dc2.to_python(3)
Traceback (most recent call last):
....
Invalid: Choose something
>>> dc.from_python('three')
Traceback (most recent call last):
....
Invalid: Nothing in my dictionary goes by the value 'three'. Choose one of: 'one'; 'two'
"""
messages = dict(
keyNotFound=_('Choose something'),
chooseKey=_('Enter a value from: %(items)s'),
valueNotFound=_('That value is not known'),
chooseValue=_('Nothing in my dictionary goes by the value %(value)s.'
' Choose one of: %(items)s'))
dict = None
hideDict = False
__unpackargs__ = ('dict',)
def _convert_to_python(self, value, state):
try:
return self.dict[value]
except KeyError:
if self.hideDict:
raise Invalid(self.message('keyNotFound', state), value, state)
else:
items = sorted(self.dict)
items = '; '.join(map(repr, items))
raise Invalid(self.message('chooseKey',
state, items=items), value, state)
def _convert_from_python(self, value, state):
for k, v in self.dict.iteritems():
if value == v:
return k
if self.hideDict:
raise Invalid(self.message('valueNotFound', state), value, state)
else:
items = '; '.join(map(repr, self.dict.itervalues()))
raise Invalid(
self.message('chooseValue', state,
value=repr(value), items=items), value, state)
class IndexListConverter(FancyValidator):
"""
Converts a index (which may be a string like '2') to the value in
the given list.
Examples::
>>> index = IndexListConverter(['zero', 'one', 'two'])
>>> index.to_python(0)
'zero'
>>> index.from_python('zero')
0
>>> index.to_python('1')
'one'
>>> index.to_python(5)
Traceback (most recent call last):
Invalid: Index out of range
>>> index(not_empty=True).to_python(None)
Traceback (most recent call last):
Invalid: Please enter a value
>>> index.from_python('five')
Traceback (most recent call last):
Invalid: Item 'five' was not found in the list
"""
list = None
__unpackargs__ = ('list',)
messages = dict(
integer=_('Must be an integer index'),
outOfRange=_('Index out of range'),
notFound=_('Item %(value)s was not found in the list'))
def _convert_to_python(self, value, state):
try:
value = int(value)
except (ValueError, TypeError):
raise Invalid(self.message('integer', state), value, state)
try:
return self.list[value]
except IndexError:
raise Invalid(self.message('outOfRange', state), value, state)
def _convert_from_python(self, value, state):
for i, v in enumerate(self.list):
if v == value:
return i
raise Invalid(
self.message('notFound', state, value=repr(value)), value, state)
class DateValidator(FancyValidator):
"""
Validates that a date is within the given range. Be sure to call
DateConverter first if you aren't expecting mxDateTime input.
``earliest_date`` and ``latest_date`` may be functions; if so,
they will be called each time before validating.
``after_now`` means a time after the current timestamp; note that
just a few milliseconds before now is invalid! ``today_or_after``
is more permissive, and ignores hours and minutes.
Examples::
>>> from datetime import datetime, timedelta
>>> d = DateValidator(earliest_date=datetime(2003, 1, 1))
>>> d.to_python(datetime(2004, 1, 1))
datetime.datetime(2004, 1, 1, 0, 0)
>>> d.to_python(datetime(2002, 1, 1))
Traceback (most recent call last):
...
Invalid: Date must be after Wednesday, 01 January 2003
>>> d.to_python(datetime(2003, 1, 1))
datetime.datetime(2003, 1, 1, 0, 0)
>>> d = DateValidator(after_now=True)
>>> now = datetime.now()
>>> d.to_python(now+timedelta(seconds=5)) == now+timedelta(seconds=5)
True
>>> d.to_python(now-timedelta(days=1))
Traceback (most recent call last):
...
Invalid: The date must be sometime in the future
>>> d.to_python(now+timedelta(days=1)) > now
True
>>> d = DateValidator(today_or_after=True)
>>> d.to_python(now) == now
True
"""
earliest_date = None
latest_date = None
after_now = False
# Like after_now, but just after this morning:
today_or_after = False
# Use None or 'datetime' for the datetime module in the standard lib,
# or 'mxDateTime' to force the mxDateTime module
datetime_module = None
messages = dict(
after=_('Date must be after %(date)s'),
before=_('Date must be before %(date)s'),
# Double %'s, because this will be substituted twice:
date_format=_('%%A, %%d %%B %%Y'),
future=_('The date must be sometime in the future'))
def _validate_python(self, value, state):
date_format = self.message('date_format', state)
if (str is not unicode # Python 2
and isinstance(date_format, unicode)):
# strftime uses the locale encoding, not Unicode
encoding = locale.getlocale(locale.LC_TIME)[1] or 'utf-8'
date_format = date_format.encode(encoding)
else:
encoding = None
if self.earliest_date:
if callable(self.earliest_date):
earliest_date = self.earliest_date()
else:
earliest_date = self.earliest_date
if value < earliest_date:
date_formatted = earliest_date.strftime(date_format)
if encoding:
date_formatted = date_formatted.decode(encoding)
raise Invalid(
self.message('after', state, date=date_formatted),
value, state)
if self.latest_date:
if callable(self.latest_date):
latest_date = self.latest_date()
else:
latest_date = self.latest_date
if value > latest_date:
date_formatted = latest_date.strftime(date_format)
if encoding:
date_formatted = date_formatted.decode(encoding)
raise Invalid(
self.message('before', state, date=date_formatted),
value, state)
if self.after_now:
dt_mod = import_datetime(self.datetime_module)
now = datetime_now(dt_mod)
if value < now:
date_formatted = now.strftime(date_format)
if encoding:
date_formatted = date_formatted.decode(encoding)
raise Invalid(
self.message('future', state, date=date_formatted),
value, state)
if self.today_or_after:
dt_mod = import_datetime(self.datetime_module)
now = datetime_now(dt_mod)
today = datetime_makedate(dt_mod,
now.year, now.month, now.day)
value_as_date = datetime_makedate(
dt_mod, value.year, value.month, value.day)
if value_as_date < today:
date_formatted = now.strftime(date_format)
if encoding:
date_formatted = date_formatted.decode(encoding)
raise Invalid(
self.message('future', state, date=date_formatted),
value, state)
class Bool(FancyValidator):
"""
Always Valid, returns True or False based on the value and the
existance of the value.
If you want to convert strings like ``'true'`` to booleans, then
use ``StringBool``.
Examples::
>>> Bool.to_python(0)
False
>>> Bool.to_python(1)
True
>>> Bool.to_python('')
False
>>> Bool.to_python(None)
False
"""
if_missing = False
def _convert_to_python(self, value, state):
return bool(value)
_convert_from_python = _convert_to_python
def empty_value(self, value):
return False
class RangeValidator(FancyValidator):
"""This is an abstract base class for Int and Number.
It verifies that a value is within range. It accepts min and max
values in the constructor.
(Since this is an abstract base class, the tests are in Int and Number.)
"""
messages = dict(
tooLow=_('Please enter a number that is %(min)s or greater'),
tooHigh=_('Please enter a number that is %(max)s or smaller'))
min = None
max = None
def _validate_python(self, value, state):
if self.min is not None:
if value < self.min:
msg = self.message('tooLow', state, min=self.min)
raise Invalid(msg, value, state)
if self.max is not None:
if value > self.max:
msg = self.message('tooHigh', state, max=self.max)
raise Invalid(msg, value, state)
class Int(RangeValidator):
"""Convert a value to an integer.
Example::
>>> Int.to_python('10')
10
>>> Int.to_python('ten')
Traceback (most recent call last):
...
Invalid: Please enter an integer value
>>> Int(min=5).to_python('6')
6
>>> Int(max=10).to_python('11')
Traceback (most recent call last):
...
Invalid: Please enter a number that is 10 or smaller
"""
messages = dict(
integer=_('Please enter an integer value'))
def _convert_to_python(self, value, state):
try:
return int(value)
except (ValueError, TypeError):
raise Invalid(self.message('integer', state), value, state)
_convert_from_python = _convert_to_python
class Number(RangeValidator):
"""Convert a value to a float or integer.
Tries to convert it to an integer if no information is lost.
Example::
>>> Number.to_python('10')
10
>>> Number.to_python('10.5')
10.5
>>> Number.to_python('ten')
Traceback (most recent call last):
...
Invalid: Please enter a number
>>> Number.to_python([1.2])
Traceback (most recent call last):
...
Invalid: Please enter a number
>>> Number(min=5).to_python('6.5')
6.5
>>> Number(max=10.5).to_python('11.5')
Traceback (most recent call last):
...
Invalid: Please enter a number that is 10.5 or smaller
"""
messages = dict(
number=_('Please enter a number'))
def _convert_to_python(self, value, state):
try:
value = float(value)
try:
int_value = int(value)
except OverflowError:
int_value = None
if value == int_value:
return int_value
return value
except (ValueError, TypeError):
raise Invalid(self.message('number', state), value, state)
class ByteString(FancyValidator):
"""Convert to byte string, treating empty things as the empty string.
Under Python 2.x you can also use the alias `String` for this validator.
Also takes a `max` and `min` argument, and the string length must fall
in that range.
Also you may give an `encoding` argument, which will encode any unicode
that is found. Lists and tuples are joined with `list_joiner`
(default ``', '``) in ``from_python``.
::
>>> ByteString(min=2).to_python('a')
Traceback (most recent call last):
...
Invalid: Enter a value 2 characters long or more
>>> ByteString(max=10).to_python('xxxxxxxxxxx')
Traceback (most recent call last):
...
Invalid: Enter a value not more than 10 characters long
>>> ByteString().from_python(None)
''
>>> ByteString().from_python([])
''
>>> ByteString().to_python(None)
''
>>> ByteString(min=3).to_python(None)
Traceback (most recent call last):
...
Invalid: Please enter a value
>>> ByteString(min=1).to_python('')
Traceback (most recent call last):
...
Invalid: Please enter a value
"""
min = None
max = None
not_empty = None
encoding = None
list_joiner = ', '
messages = dict(
tooLong=_('Enter a value not more than %(max)i characters long'),
tooShort=_('Enter a value %(min)i characters long or more'))
def __initargs__(self, new_attrs):
if self.not_empty is None and self.min:
self.not_empty = True
def _convert_to_python(self, value, state):
if value is None:
value = ''
elif not isinstance(value, basestring):
try:
value = bytes(value)
except UnicodeEncodeError:
value = unicode(value)
if self.encoding is not None and isinstance(value, unicode):
value = value.encode(self.encoding)
return value
def _convert_from_python(self, value, state):
if value is None:
value = ''
elif not isinstance(value, basestring):
if isinstance(value, (list, tuple)):
value = self.list_joiner.join(
self._convert_from_python(v, state) for v in value)
try:
value = str(value)
except UnicodeEncodeError:
value = unicode(value)
if self.encoding is not None and isinstance(value, unicode):
value = value.encode(self.encoding)
if self.strip:
value = value.strip()
return value
def _validate_other(self, value, state):
if self.max is None and self.min is None:
return
if value is None:
value = ''
elif not isinstance(value, basestring):
try:
value = str(value)
except UnicodeEncodeError:
value = unicode(value)
if self.max is not None and len(value) > self.max:
raise Invalid(
self.message('tooLong', state, max=self.max), value, state)
if self.min is not None and len(value) < self.min:
raise Invalid(
self.message('tooShort', state, min=self.min), value, state)
def empty_value(self, value):
return ''
class UnicodeString(ByteString):
"""Convert things to unicode string.
This is implemented as a specialization of the ByteString class.
Under Python 3.x you can also use the alias `String` for this validator.
In addition to the String arguments, an encoding argument is also
accepted. By default the encoding will be utf-8. You can overwrite
this using the encoding parameter. You can also set inputEncoding
and outputEncoding differently. An inputEncoding of None means
"do not decode", an outputEncoding of None means "do not encode".
All converted strings are returned as Unicode strings.
::
>>> UnicodeString().to_python(None)
u''
>>> UnicodeString().to_python([])
u''
>>> UnicodeString(encoding='utf-7').to_python('Ni Ni Ni')
u'Ni Ni Ni'
"""
encoding = 'utf-8'
inputEncoding = NoDefault
outputEncoding = NoDefault
messages = dict(
badEncoding=_('Invalid data or incorrect encoding'))
def __init__(self, **kw):
ByteString.__init__(self, **kw)
if self.inputEncoding is NoDefault:
self.inputEncoding = self.encoding
if self.outputEncoding is NoDefault:
self.outputEncoding = self.encoding
def _convert_to_python(self, value, state):
if not value:
return u''
if isinstance(value, unicode):
return value
if not isinstance(value, unicode):
if hasattr(value, '__unicode__'):
value = unicode(value)
return value
if not (unicode is str # Python 3
and isinstance(value, bytes) and self.inputEncoding):
value = str(value)
if self.inputEncoding and not isinstance(value, unicode):
try:
value = unicode(value, self.inputEncoding)
except UnicodeDecodeError:
raise Invalid(self.message('badEncoding', state), value, state)
except TypeError:
raise Invalid(
self.message('badType', state,
type=type(value), value=value), value, state)
return value
def _convert_from_python(self, value, state):
if not isinstance(value, unicode):
if hasattr(value, '__unicode__'):
value = unicode(value)
else:
value = str(value)
if self.outputEncoding and isinstance(value, unicode):
value = value.encode(self.outputEncoding)
return value
def empty_value(self, value):
return u''
# Provide proper alias for native strings
String = UnicodeString if str is unicode else ByteString
class Set(FancyValidator):
"""
This is for when you think you may return multiple values for a
certain field.
This way the result will always be a list, even if there's only
one result. It's equivalent to ForEach(convert_to_list=True).
If you give ``use_set=True``, then it will return an actual
``set`` object.
::
>>> Set.to_python(None)
[]
>>> Set.to_python('this')
['this']
>>> Set.to_python(('this', 'that'))
['this', 'that']
>>> s = Set(use_set=True)
>>> s.to_python(None)
set([])
>>> s.to_python('this')
set(['this'])
>>> s.to_python(('this',))
set(['this'])
"""
use_set = False
if_missing = ()
accept_iterator = True
def _convert_to_python(self, value, state):
if self.use_set:
if isinstance(value, set):
return value
elif isinstance(value, (list, tuple)):
return set(value)
elif value is None:
return set()
else:
return set([value])
else:
if isinstance(value, list):
return value
elif isinstance(value, set):
return list(value)
elif isinstance(value, tuple):
return list(value)
elif value is None:
return []
else:
return [value]
def empty_value(self, value):
if self.use_set:
return set()
else:
return []
class Email(FancyValidator):
r"""
Validate an email address.
If you pass ``resolve_domain=True``, then it will try to resolve
the domain name to make sure it's valid. This takes longer, of
course. You must have the `dnspython <http://www.dnspython.org/>`__ modules
installed to look up DNS (MX and A) records.
::
>>> e = Email()
>>> e.to_python(' [email protected] ')
'[email protected]'
>>> e.to_python('test')
Traceback (most recent call last):
...
Invalid: An email address must contain a single @
>>> e.to_python('test@foobar')
Traceback (most recent call last):
...
Invalid: The domain portion of the email address is invalid (the portion after the @: foobar)
>>> e.to_python('[email protected]')
Traceback (most recent call last):
...
Invalid: The domain portion of the email address is invalid (the portion after the @: foobar.com.5)
>>> e.to_python('[email protected]')
Traceback (most recent call last):
...
Invalid: The domain portion of the email address is invalid (the portion after the @: foo..bar.com)
>>> e.to_python('[email protected]')
Traceback (most recent call last):
...
Invalid: The domain portion of the email address is invalid (the portion after the @: .foo.bar.com)
>>> e.to_python('[email protected]')
'[email protected]'
>>> e.to_python('o*[email protected]')
'o*[email protected]'
>>> e = Email(resolve_domain=True)
>>> e.resolve_domain
True
>>> e.to_python('[email protected]')
'[email protected]'
>>> e.to_python('[email protected]')
'[email protected]'
>>> # NOTE: If you do not have dnspython installed this example won't work:
>>> e.to_python('[email protected]')
Traceback (most recent call last):
...
Invalid: The domain of the email address does not exist (the portion after the @: thisdomaindoesnotexistithinkforsure.com)
>>> e.to_python(u'[email protected]')
u'[email protected]'
>>> e = Email(not_empty=False)
>>> e.to_python('')
"""
resolve_domain = False
resolve_timeout = 10 # timeout in seconds when resolving domains
usernameRE = re.compile(r"^[\w!#$%&'*+\-/=?^`{|}~.]+$")
domainRE = re.compile(r'''
^(?:[a-z0-9][a-z0-9\-]{,62}\.)+ # subdomain
(?:[a-z]{2,63}|xn--[a-z0-9\-]{2,59})$ # top level domain
''', re.I | re.VERBOSE)
messages = dict(
empty=_('Please enter an email address'),
noAt=_('An email address must contain a single @'),
badUsername=_('The username portion of the email address is invalid'
' (the portion before the @: %(username)s)'),
socketError=_('An error occured when trying to connect to the server:'
' %(error)s'),
badDomain=_('The domain portion of the email address is invalid'
' (the portion after the @: %(domain)s)'),
domainDoesNotExist=_('The domain of the email address does not exist'
' (the portion after the @: %(domain)s)'))
def __init__(self, *args, **kw):
FancyValidator.__init__(self, *args, **kw)
if self.resolve_domain:
if not have_dns:
warnings.warn(
"dnspython <http://www.dnspython.org/> is not installed on"
" your system (or the dns.resolver package cannot be found)."
" I cannot resolve domain names in addresses")
raise ImportError("no module named dns.resolver")
def _validate_python(self, value, state):
if not value:
raise Invalid(self.message('empty', state), value, state)
value = value.strip()
splitted = value.split('@', 1)
try:
username, domain = splitted
except ValueError:
raise Invalid(self.message('noAt', state), value, state)
if not self.usernameRE.search(username):
raise Invalid(
self.message('badUsername', state, username=username),
value, state)
try:
idna_domain = [idna.ToASCII(p) for p in domain.split('.')]
if unicode is str: # Python 3
idna_domain = [p.decode('ascii') for p in idna_domain]
idna_domain = '.'.join(idna_domain)
except UnicodeError:
# UnicodeError: label empty or too long
# This exception might happen if we have an invalid domain name part
# (for example [email protected])
raise Invalid(
self.message('badDomain', state, domain=domain),
value, state)
if not self.domainRE.search(idna_domain):
raise Invalid(
self.message('badDomain', state, domain=domain),
value, state)
if self.resolve_domain:
assert have_dns, "dnspython should be available"
global socket
if socket is None:
import socket
try:
try:
dns.resolver.query(domain, 'MX')
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer) as e:
try:
dns.resolver.query(domain, 'A')
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer) as e:
raise Invalid(
self.message('domainDoesNotExist',
state, domain=domain), value, state)
except (socket.error, dns.exception.DNSException) as e:
raise Invalid(
self.message('socketError', state, error=e), value, state)
def _convert_to_python(self, value, state):
return value.strip()
class URL(FancyValidator):
"""
Validate a URL, either http://... or https://. If check_exists
is true, then we'll actually make a request for the page.
If add_http is true, then if no scheme is present we'll add
http://
::
>>> u = URL(add_http=True)
>>> u.to_python('foo.com')
'http://foo.com'
>>> u.to_python('http://hahaha.ha/bar.html')
'http://hahaha.ha/bar.html'
>>> u.to_python('http://xn--m7r7ml7t24h.com')
'http://xn--m7r7ml7t24h.com'
>>> u.to_python('http://xn--c1aay4a.xn--p1ai')
'http://xn--c1aay4a.xn--p1ai'
>>> u.to_python('http://foo.com/test?bar=baz&fleem=morx')
'http://foo.com/test?bar=baz&fleem=morx'
>>> u.to_python('http://foo.com/login?came_from=http%3A%2F%2Ffoo.com%2Ftest')
'http://foo.com/login?came_from=http%3A%2F%2Ffoo.com%2Ftest'
>>> u.to_python('http://foo.com:8000/test.html')
'http://foo.com:8000/test.html'
>>> u.to_python('http://foo.com/something\\nelse')
Traceback (most recent call last):
...
Invalid: That is not a valid URL
>>> u.to_python('https://test.com')
'https://test.com'
>>> u.to_python('http://test')
Traceback (most recent call last):
...
Invalid: You must provide a full domain name (like test.com)
>>> u.to_python('http://test..com')
Traceback (most recent call last):
...
Invalid: That is not a valid URL
>>> u = URL(add_http=False, check_exists=True)
>>> u.to_python('http://google.com')
'http://google.com'
>>> u.to_python('google.com')
Traceback (most recent call last):
...
Invalid: You must start your URL with http://, https://, etc
>>> u.to_python('http://www.formencode.org/does/not/exist/page.html')
Traceback (most recent call last):
...
Invalid: The server responded that the page could not be found
>>> u.to_python('http://this.domain.does.not.exist.example.org/test.html')
... # doctest: +ELLIPSIS
Traceback (most recent call last):
...
Invalid: An error occured when trying to connect to the server: ...
If you want to allow addresses without a TLD (e.g., ``localhost``) you can do::
>>> URL(require_tld=False).to_python('http://localhost')
'http://localhost'
By default, internationalized domain names (IDNA) in Unicode will be
accepted and encoded to ASCII using Punycode (as described in RFC 3490).
You may set allow_idna to False to change this behavior::
>>> URL(allow_idna=True).to_python(
... u'http://\u0433\u0443\u0433\u043b.\u0440\u0444')
'http://xn--c1aay4a.xn--p1ai'
>>> URL(allow_idna=True, add_http=True).to_python(
... u'\u0433\u0443\u0433\u043b.\u0440\u0444')
'http://xn--c1aay4a.xn--p1ai'
>>> URL(allow_idna=False).to_python(
... u'http://\u0433\u0443\u0433\u043b.\u0440\u0444')
Traceback (most recent call last):
...
Invalid: That is not a valid URL
"""
add_http = True
allow_idna = True
check_exists = False
require_tld = True
url_re = re.compile(r'''
^(http|https)://
(?:[%:\w]*@)? # authenticator
(?: # ip or domain
(?P<ip>(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?))|
(?P<domain>[a-z0-9][a-z0-9\-]{,62}\.)* # subdomain
(?P<tld>[a-z]{2,63}|xn--[a-z0-9\-]{2,59}) # top level domain
)
(?::[0-9]{1,5})? # port
# files/delims/etc
(?P<path>/[a-z0-9\-\._~:/\?#\[\]@!%\$&\'\(\)\*\+,;=]*)?
$
''', re.I | re.VERBOSE)
scheme_re = re.compile(r'^[a-zA-Z]+:')
messages = dict(
noScheme=_('You must start your URL with http://, https://, etc'),
badURL=_('That is not a valid URL'),
httpError=_('An error occurred when trying to access the URL:'
' %(error)s'),
socketError=_('An error occured when trying to connect to the server:'
' %(error)s'),
notFound=_('The server responded that the page could not be found'),
status=_('The server responded with a bad status code (%(status)s)'),
noTLD=_('You must provide a full domain name (like %(domain)s.com)'))
def _convert_to_python(self, value, state):
value = value.strip()
if self.add_http:
if not self.scheme_re.search(value):
value = 'http://' + value
if self.allow_idna:
value = self._encode_idna(value)
match = self.scheme_re.search(value)
if not match:
raise Invalid(self.message('noScheme', state), value, state)
value = match.group(0).lower() + value[len(match.group(0)):]
match = self.url_re.search(value)
if not match:
raise Invalid(self.message('badURL', state), value, state)
if self.require_tld and not match.group('domain'):
raise Invalid(
self.message('noTLD', state, domain=match.group('tld')),
value, state)
if self.check_exists and value.startswith(('http://', 'https://')):
self._check_url_exists(value, state)
return value
def _encode_idna(self, url):
global urlparse
if urlparse is None:
import urlparse
try:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(
url)
except ValueError:
return url
try:
netloc = netloc.encode('idna')
if unicode is str: # Python 3
netloc = netloc.decode('ascii')
return str(urlparse.urlunparse((scheme, netloc,
path, params, query, fragment)))
except UnicodeError:
return url
def _check_url_exists(self, url, state):
global httplib, urlparse, socket
if httplib is None:
import httplib
if urlparse is None:
import urlparse
if socket is None:
import socket
scheme, netloc, path, params, query, fragment = urlparse.urlparse(
url, 'http')
if scheme == 'https':
ConnClass = httplib.HTTPSConnection
else:
ConnClass = httplib.HTTPConnection
try:
conn = ConnClass(netloc)
if params:
path += ';' + params
if query:
path += '?' + query
conn.request('HEAD', path)
res = conn.getresponse()
except httplib.HTTPException as e:
raise Invalid(
self.message('httpError', state, error=e), state, url)
except socket.error as e:
raise Invalid(
self.message('socketError', state, error=e), state, url)
else:
if res.status == 404:
raise Invalid(
self.message('notFound', state), state, url)
if not 200 <= res.status < 500:
raise Invalid(
self.message('status', state, status=res.status),
state, url)
class XRI(FancyValidator):
r"""
Validator for XRIs.
It supports both i-names and i-numbers, of the first version of the XRI
standard.
::
>>> inames = XRI(xri_type="i-name")
>>> inames.to_python(" =John.Smith ")
'=John.Smith'
>>> inames.to_python("@Free.Software.Foundation")
'@Free.Software.Foundation'
>>> inames.to_python("Python.Software.Foundation")
Traceback (most recent call last):
...
Invalid: The type of i-name is not defined; it may be either individual or organizational
>>> inames.to_python("http://example.org")
Traceback (most recent call last):
...
Invalid: The type of i-name is not defined; it may be either individual or organizational
>>> inames.to_python("=!2C43.1A9F.B6F6.E8E6")
Traceback (most recent call last):
...
Invalid: "!2C43.1A9F.B6F6.E8E6" is an invalid i-name
>>> iname_with_schema = XRI(True, xri_type="i-name")
>>> iname_with_schema.to_python("=Richard.Stallman")
'xri://=Richard.Stallman'
>>> inames.to_python("=John Smith")
Traceback (most recent call last):
...
Invalid: "John Smith" is an invalid i-name
>>> inumbers = XRI(xri_type="i-number")
>>> inumbers.to_python("!!1000!de21.4536.2cb2.8074")
'!!1000!de21.4536.2cb2.8074'
>>> inumbers.to_python("@!1000.9554.fabd.129c!2847.df3c")
'@!1000.9554.fabd.129c!2847.df3c'
"""
iname_valid_pattern = re.compile(r"""
^
[\w]+ # A global alphanumeric i-name
(\.[\w]+)* # An i-name with dots
(\*[\w]+(\.[\w]+)*)* # A community i-name
$
""", re.VERBOSE | re.UNICODE)
iname_invalid_start = re.compile(r"^[\d\.-]", re.UNICODE)
"""@cvar: These characters must not be at the beggining of the i-name"""
inumber_pattern = re.compile(r"""
^
(
[=@]! # It's a personal or organization i-number
|
!! # It's a network i-number
)
[\dA-F]{1,4}(\.[\dA-F]{1,4}){0,3} # A global i-number
(![\dA-F]{1,4}(\.[\dA-F]{1,4}){0,3})* # Zero or more sub i-numbers
$
""", re.VERBOSE | re.IGNORECASE)
messages = dict(
noType=_('The type of i-name is not defined;'
' it may be either individual or organizational'),
repeatedChar=_('Dots and dashes may not be repeated consecutively'),
badIname=_('"%(iname)s" is an invalid i-name'),
badInameStart=_('i-names may not start with numbers'
' nor punctuation marks'),
badInumber=_('"%(inumber)s" is an invalid i-number'),
badType=_('The XRI must be a string (not a %(type)s: %(value)r)'),
badXri=_('"%(xri_type)s" is not a valid type of XRI'))
def __init__(self, add_xri=False, xri_type="i-name", **kwargs):
"""Create an XRI validator.
@param add_xri: Should the schema be added if not present?
Officially it's optional.
@type add_xri: C{bool}
@param xri_type: What type of XRI should be validated?
Possible values: C{i-name} or C{i-number}.
@type xri_type: C{str}
"""
self.add_xri = add_xri
assert xri_type in ('i-name', 'i-number'), (
'xri_type must be "i-name" or "i-number"')
self.xri_type = xri_type
super(XRI, self).__init__(**kwargs)
def _convert_to_python(self, value, state):
"""Prepend the 'xri://' schema if needed and remove trailing spaces"""
value = value.strip()
if self.add_xri and not value.startswith('xri://'):
value = 'xri://' + value
return value
def _validate_python(self, value, state=None):
"""Validate an XRI
@raise Invalid: If at least one of the following conditions in met:
- C{value} is not a string.
- The XRI is not a personal, organizational or network one.
- The relevant validator (i-name or i-number) considers the XRI
is not valid.
"""
if not isinstance(value, basestring):
raise Invalid(
self.message('badType', state,
type=str(type(value)), value=value), value, state)
# Let's remove the schema, if any
if value.startswith('xri://'):
value = value[6:]
if not value[0] in ('@', '=') and not (
self.xri_type == 'i-number' and value[0] == '!'):
raise Invalid(self.message('noType', state), value, state)
if self.xri_type == 'i-name':
self._validate_iname(value, state)
else:
self._validate_inumber(value, state)
def _validate_iname(self, iname, state):
"""Validate an i-name"""
# The type is not required here:
iname = iname[1:]
if '..' in iname or '--' in iname:
raise Invalid(self.message('repeatedChar', state), iname, state)
if self.iname_invalid_start.match(iname):
raise Invalid(self.message('badInameStart', state), iname, state)
if not self.iname_valid_pattern.match(iname) or '_' in iname:
raise Invalid(
self.message('badIname', state, iname=iname), iname, state)
def _validate_inumber(self, inumber, state):
"""Validate an i-number"""
if not self.__class__.inumber_pattern.match(inumber):
raise Invalid(
self.message('badInumber', state,
inumber=inumber, value=inumber), inumber, state)
class OpenId(FancyValidator):
r"""
OpenId validator.
::
>>> v = OpenId(add_schema=True)
>>> v.to_python(' example.net ')
'http://example.net'
>>> v.to_python('@TurboGears')
'xri://@TurboGears'
>>> w = OpenId(add_schema=False)
>>> w.to_python(' example.net ')
Traceback (most recent call last):
...
Invalid: "example.net" is not a valid OpenId (it is neither an URL nor an XRI)
>>> w.to_python('!!1000')
'!!1000'
>>> w.to_python('[email protected]')
Traceback (most recent call last):
...
Invalid: "[email protected]" is not a valid OpenId (it is neither an URL nor an XRI)
"""
messages = dict(
badId=_('"%(id)s" is not a valid OpenId'
' (it is neither an URL nor an XRI)'))
def __init__(self, add_schema=False, **kwargs):
"""Create an OpenId validator.
@param add_schema: Should the schema be added if not present?
@type add_schema: C{bool}
"""
self.url_validator = URL(add_http=add_schema)
self.iname_validator = XRI(add_schema, xri_type="i-name")
self.inumber_validator = XRI(add_schema, xri_type="i-number")
def _convert_to_python(self, value, state):
value = value.strip()
try:
return self.url_validator.to_python(value, state)
except Invalid:
try:
return self.iname_validator.to_python(value, state)
except Invalid:
try:
return self.inumber_validator.to_python(value, state)
except Invalid:
pass
# It's not an OpenId!
raise Invalid(self.message('badId', state, id=value), value, state)
def _validate_python(self, value, state):
self._convert_to_python(value, state)
def StateProvince(*kw, **kwargs):
deprecation_warning("please use formencode.national.USStateProvince")
from formencode.national import USStateProvince
return USStateProvince(*kw, **kwargs)
def PhoneNumber(*kw, **kwargs):
deprecation_warning("please use formencode.national.USPhoneNumber")
from formencode.national import USPhoneNumber
return USPhoneNumber(*kw, **kwargs)
def IPhoneNumberValidator(*kw, **kwargs):
deprecation_warning(
"please use formencode.national.InternationalPhoneNumber")
from formencode.national import InternationalPhoneNumber
return InternationalPhoneNumber(*kw, **kwargs)
class FieldStorageUploadConverter(FancyValidator):
"""
Handles cgi.FieldStorage instances that are file uploads.
This doesn't do any conversion, but it can detect empty upload
fields (which appear like normal fields, but have no filename when
no upload was given).
"""
def _convert_to_python(self, value, state=None):
if isinstance(value, cgi.FieldStorage):
if getattr(value, 'filename', None):
return value
raise Invalid('invalid', value, state)
else:
return value
def is_empty(self, value):
if isinstance(value, cgi.FieldStorage):
return not bool(getattr(value, 'filename', None))
return FancyValidator.is_empty(self, value)
class FileUploadKeeper(FancyValidator):
"""
Takes two inputs (a dictionary with keys ``static`` and
``upload``) and converts them into one value on the Python side (a
dictionary with ``filename`` and ``content`` keys). The upload
takes priority over the static value. The filename may be None if
it can't be discovered.
Handles uploads of both text and ``cgi.FieldStorage`` upload
values.
This is basically for use when you have an upload field, and you
want to keep the upload around even if the rest of the form
submission fails. When converting *back* to the form submission,
there may be extra values ``'original_filename'`` and
``'original_content'``, which may want to use in your form to show
the user you still have their content around.
To use this, make sure you are using variabledecode, then use
something like::
<input type="file" name="myfield.upload">
<input type="hidden" name="myfield.static">
Then in your scheme::
class MyScheme(Scheme):
myfield = FileUploadKeeper()
Note that big file uploads mean big hidden fields, and lots of
bytes passed back and forth in the case of an error.
"""
upload_key = 'upload'
static_key = 'static'
def _convert_to_python(self, value, state):
upload = value.get(self.upload_key)
static = value.get(self.static_key, '').strip()
filename = content = None
if isinstance(upload, cgi.FieldStorage):
filename = upload.filename
content = upload.value
elif isinstance(upload, basestring) and upload:
filename = None
# @@: Should this encode upload if it is unicode?
content = upload
if not content and static:
filename, content = static.split(None, 1)
filename = '' if filename == '-' else filename.decode('base64')
content = content.decode('base64')
return {'filename': filename, 'content': content}
def _convert_from_python(self, value, state):
filename = value.get('filename', '')
content = value.get('content', '')
if filename or content:
result = self.pack_content(filename, content)
return {self.upload_key: '',
self.static_key: result,
'original_filename': filename,
'original_content': content}
else:
return {self.upload_key: '',
self.static_key: ''}
def pack_content(self, filename, content):
enc_filename = self.base64encode(filename) or '-'
enc_content = (content or '').encode('base64')
result = '%s %s' % (enc_filename, enc_content)
return result
class DateConverter(FancyValidator):
"""
Validates and converts a string date, like mm/yy, dd/mm/yy,
dd-mm-yy, etc. Using ``month_style`` you can support
the three general styles ``mdy`` = ``us`` = ``mm/dd/yyyy``,
``dmy`` = ``euro`` = ``dd/mm/yyyy`` and
``ymd`` = ``iso`` = ``yyyy/mm/dd``.
Accepts English month names, also abbreviated. Returns value as a
datetime object (you can get mx.DateTime objects if you use
``datetime_module='mxDateTime'``). Two year dates are assumed to
be within 1950-2020, with dates from 21-49 being ambiguous and
signaling an error.
Use accept_day=False if you just want a month/year (like for a
credit card expiration date).
::
>>> d = DateConverter()
>>> d.to_python('12/3/09')
datetime.date(2009, 12, 3)
>>> d.to_python('12/3/2009')
datetime.date(2009, 12, 3)
>>> d.to_python('2/30/04')
Traceback (most recent call last):
...
Invalid: That month only has 29 days
>>> d.to_python('13/2/05')
Traceback (most recent call last):
...
Invalid: Please enter a month from 1 to 12
>>> d.to_python('1/1/200')
Traceback (most recent call last):
...
Invalid: Please enter a four-digit year after 1899
If you change ``month_style`` you can get European-style dates::
>>> d = DateConverter(month_style='dd/mm/yyyy')
>>> date = d.to_python('12/3/09')
>>> date
datetime.date(2009, 3, 12)
>>> d.from_python(date)
'12/03/2009'
"""
# set to False if you want only month and year
accept_day = True
# allowed month styles: 'mdy' = 'us', 'dmy' = 'euro', 'ymd' = 'iso'
# also allowed: 'mm/dd/yyyy', 'dd/mm/yyyy', 'yyyy/mm/dd'
month_style = 'mdy'
# preferred separator for reverse conversion: '/', '.' or '-'
separator = '/'
# Use 'datetime' to force the Python datetime module, or
# 'mxDateTime' to force the mxDateTime module (None means use
# datetime, or if not present mxDateTime)
datetime_module = None
_month_names = {
'jan': 1, 'january': 1,
'feb': 2, 'febuary': 2,
'mar': 3, 'march': 3,
'apr': 4, 'april': 4,
'may': 5,
'jun': 6, 'june': 6,
'jul': 7, 'july': 7,
'aug': 8, 'august': 8,
'sep': 9, 'sept': 9, 'september': 9,
'oct': 10, 'october': 10,
'nov': 11, 'november': 11,
'dec': 12, 'december': 12,
}
_date_re = dict(
dmy=re.compile(
r'^\s*(\d\d?)[\-\./\\](\d\d?|%s)[\-\./\\](\d\d\d?\d?)\s*$'
% '|'.join(_month_names), re.I),
mdy=re.compile(
r'^\s*(\d\d?|%s)[\-\./\\](\d\d?)[\-\./\\](\d\d\d?\d?)\s*$'
% '|'.join(_month_names), re.I),
ymd=re.compile(
r'^\s*(\d\d\d?\d?)[\-\./\\](\d\d?|%s)[\-\./\\](\d\d?)\s*$'
% '|'.join(_month_names), re.I),
my=re.compile(
r'^\s*(\d\d?|%s)[\-\./\\](\d\d\d?\d?)\s*$'
% '|'.join(_month_names), re.I),
ym=re.compile(
r'^\s*(\d\d\d?\d?)[\-\./\\](\d\d?|%s)\s*$'
% '|'.join(_month_names), re.I))
_formats = dict(d='%d', m='%m', y='%Y')
_human_formats = dict(d=_('DD'), m=_('MM'), y=_('YYYY'))
# Feb. should be leap-year aware (but mxDateTime does catch that)
_monthDays = {
1: 31, 2: 29, 3: 31, 4: 30, 5: 31, 6: 30, 7: 31, 8: 31,
9: 30, 10: 31, 11: 30, 12: 31}
messages = dict(
badFormat=_('Please enter the date in the form %(format)s'),
monthRange=_('Please enter a month from 1 to 12'),
invalidDay=_('Please enter a valid day'),
dayRange=_('That month only has %(days)i days'),
invalidDate=_('That is not a valid day (%(exception)s)'),
unknownMonthName=_('Unknown month name: %(month)s'),
invalidYear=_('Please enter a number for the year'),
fourDigitYear=_('Please enter a four-digit year after 1899'),
wrongFormat=_('Please enter the date in the form %(format)s'))
def __init__(self, *args, **kw):
super(DateConverter, self).__init__(*args, **kw)
month_style = (self.month_style or DateConverter.month_style).lower()
accept_day = bool(self.accept_day)
self.accept_day = self.accept_day
if month_style in ('mdy',
'md', 'mm/dd/yyyy', 'mm/dd', 'us', 'american'):
month_style = 'mdy'
elif month_style in ('dmy',
'dm', 'dd/mm/yyyy', 'dd/mm', 'euro', 'european'):
month_style = 'dmy'
elif month_style in ('ymd',
'ym', 'yyyy/mm/dd', 'yyyy/mm', 'iso', 'china', 'chinese'):
month_style = 'ymd'
else:
raise TypeError('Bad month_style: %r' % month_style)
self.month_style = month_style
separator = self.separator
if not separator or separator == 'auto':
separator = dict(mdy='/', dmy='.', ymd='-')[month_style]
elif separator not in ('-', '.', '/', '\\'):
raise TypeError('Bad separator: %r' % separator)
self.separator = separator
self.format = separator.join(self._formats[part]
for part in month_style if part != 'd' or accept_day)
self.human_format = separator.join(self._human_formats[part]
for part in month_style if part != 'd' or accept_day)
def _convert_to_python(self, value, state):
self.assert_string(value, state)
month_style = self.month_style
if not self.accept_day:
month_style = 'ym' if month_style == 'ymd' else 'my'
match = self._date_re[month_style].search(value)
if not match:
raise Invalid(
self.message('badFormat', state,
format=self.human_format), value, state)
groups = match.groups()
if self.accept_day:
if month_style == 'mdy':
month, day, year = groups
elif month_style == 'dmy':
day, month, year = groups
else:
year, month, day = groups
day = int(day)
if not 1 <= day <= 31:
raise Invalid(self.message('invalidDay', state), value, state)
else:
day = 1
if month_style == 'my':
month, year = groups
else:
year, month = groups
month = self.make_month(month, state)
if not 1 <= month <= 12:
raise Invalid(self.message('monthRange', state), value, state)
if self._monthDays[month] < day:
raise Invalid(
self.message('dayRange', state,
days=self._monthDays[month]), value, state)
year = self.make_year(year, state)
dt_mod = import_datetime(self.datetime_module)
try:
return datetime_makedate(dt_mod, year, month, day)
except ValueError as v:
raise Invalid(
self.message('invalidDate', state,
exception=str(v)), value, state)
def make_month(self, value, state):
try:
return int(value)
except ValueError:
try:
return self._month_names[value.lower().strip()]
except KeyError:
raise Invalid(
self.message('unknownMonthName', state,
month=value), value, state)
def make_year(self, year, state):
try:
year = int(year)
except ValueError:
raise Invalid(self.message('invalidYear', state), year, state)
if year <= 20:
year += 2000
elif 50 <= year < 100:
year += 1900
if 20 < year < 50 or 99 < year < 1900:
raise Invalid(self.message('fourDigitYear', state), year, state)
return year
def _convert_from_python(self, value, state):
if self.if_empty is not NoDefault and not value:
return ''
return value.strftime(self.format)
class TimeConverter(FancyValidator):
"""
Converts times in the format HH:MM:SSampm to (h, m, s).
Seconds are optional.
For ampm, set use_ampm = True. For seconds, use_seconds = True.
Use 'optional' for either of these to make them optional.
Examples::
>>> tim = TimeConverter()
>>> tim.to_python('8:30')
(8, 30)
>>> tim.to_python('20:30')
(20, 30)
>>> tim.to_python('30:00')
Traceback (most recent call last):
...
Invalid: You must enter an hour in the range 0-23
>>> tim.to_python('13:00pm')
Traceback (most recent call last):
...
Invalid: You must enter an hour in the range 1-12
>>> tim.to_python('12:-1')
Traceback (most recent call last):
...
Invalid: You must enter a minute in the range 0-59
>>> tim.to_python('12:02pm')
(12, 2)
>>> tim.to_python('12:02am')
(0, 2)
>>> tim.to_python('1:00PM')
(13, 0)
>>> tim.from_python((13, 0))
'13:00:00'
>>> tim2 = tim(use_ampm=True, use_seconds=False)
>>> tim2.from_python((13, 0))
'1:00pm'
>>> tim2.from_python((0, 0))
'12:00am'
>>> tim2.from_python((12, 0))
'12:00pm'
Examples with ``datetime.time``::
>>> v = TimeConverter(use_datetime=True)
>>> a = v.to_python('18:00')
>>> a
datetime.time(18, 0)
>>> b = v.to_python('30:00')
Traceback (most recent call last):
...
Invalid: You must enter an hour in the range 0-23
>>> v2 = TimeConverter(prefer_ampm=True, use_datetime=True)
>>> v2.from_python(a)
'6:00:00pm'
>>> v3 = TimeConverter(prefer_ampm=True,
... use_seconds=False, use_datetime=True)
>>> a = v3.to_python('18:00')
>>> a
datetime.time(18, 0)
>>> v3.from_python(a)
'6:00pm'
>>> a = v3.to_python('18:00:00')
Traceback (most recent call last):
...
Invalid: You may not enter seconds
"""
use_ampm = 'optional'
prefer_ampm = False
use_seconds = 'optional'
use_datetime = False
# This can be set to make it prefer mxDateTime:
datetime_module = None
messages = dict(
noAMPM=_('You must indicate AM or PM'),
tooManyColon=_('There are too many :\'s'),
noSeconds=_('You may not enter seconds'),
secondsRequired=_('You must enter seconds'),
minutesRequired=_('You must enter minutes (after a :)'),
badNumber=_('The %(part)s value you gave is not a number: %(number)r'),
badHour=_('You must enter an hour in the range %(range)s'),
badMinute=_('You must enter a minute in the range 0-59'),
badSecond=_('You must enter a second in the range 0-59'))
def _convert_to_python(self, value, state):
result = self._to_python_tuple(value, state)
if self.use_datetime:
dt_mod = import_datetime(self.datetime_module)
time_class = datetime_time(dt_mod)
return time_class(*result)
else:
return result
def _to_python_tuple(self, value, state):
time = value.strip()
explicit_ampm = False
if self.use_ampm:
last_two = time[-2:].lower()
if last_two not in ('am', 'pm'):
if self.use_ampm != 'optional':
raise Invalid(self.message('noAMPM', state), value, state)
offset = 0
else:
explicit_ampm = True
offset = 12 if last_two == 'pm' else 0
time = time[:-2]
else:
offset = 0
parts = time.split(':', 3)
if len(parts) > 3:
raise Invalid(self.message('tooManyColon', state), value, state)
if len(parts) == 3 and not self.use_seconds:
raise Invalid(self.message('noSeconds', state), value, state)
if (len(parts) == 2
and self.use_seconds and self.use_seconds != 'optional'):
raise Invalid(self.message('secondsRequired', state), value, state)
if len(parts) == 1:
raise Invalid(self.message('minutesRequired', state), value, state)
try:
hour = int(parts[0])
except ValueError:
raise Invalid(
self.message('badNumber', state,
number=parts[0], part='hour'), value, state)
if explicit_ampm:
if not 1 <= hour <= 12:
raise Invalid(
self.message('badHour', state,
number=hour, range='1-12'), value, state)
if hour == 12 and offset == 12:
# 12pm == 12
pass
elif hour == 12 and offset == 0:
# 12am == 0
hour = 0
else:
hour += offset
else:
if not 0 <= hour < 24:
raise Invalid(
self.message('badHour', state,
number=hour, range='0-23'), value, state)
try:
minute = int(parts[1])
except ValueError:
raise Invalid(
self.message('badNumber', state,
number=parts[1], part='minute'), value, state)
if not 0 <= minute < 60:
raise Invalid(
self.message('badMinute', state, number=minute),
value, state)
if len(parts) == 3:
try:
second = int(parts[2])
except ValueError:
raise Invalid(
self.message('badNumber', state,
number=parts[2], part='second'), value, state)
if not 0 <= second < 60:
raise Invalid(
self.message('badSecond', state, number=second),
value, state)
else:
second = None
if second is None:
return (hour, minute)
else:
return (hour, minute, second)
def _convert_from_python(self, value, state):
if isinstance(value, basestring):
return value
if hasattr(value, 'hour'):
hour, minute = value.hour, value.minute
second = value.second
elif len(value) == 3:
hour, minute, second = value
elif len(value) == 2:
hour, minute = value
second = 0
ampm = ''
if (self.use_ampm == 'optional' and self.prefer_ampm) or (
self.use_ampm and self.use_ampm != 'optional'):
ampm = 'am'
if hour > 12:
hour -= 12
ampm = 'pm'
elif hour == 12:
ampm = 'pm'
elif hour == 0:
hour = 12
if self.use_seconds:
return '%i:%02i:%02i%s' % (hour, minute, second, ampm)
else:
return '%i:%02i%s' % (hour, minute, ampm)
def PostalCode(*kw, **kwargs):
deprecation_warning("please use formencode.national.USPostalCode")
from formencode.national import USPostalCode
return USPostalCode(*kw, **kwargs)
class StripField(FancyValidator):
"""
Take a field from a dictionary, removing the key from the dictionary.
``name`` is the key. The field value and a new copy of the dictionary
with that field removed are returned.
>>> StripField('test').to_python({'a': 1, 'test': 2})
(2, {'a': 1})
>>> StripField('test').to_python({})
Traceback (most recent call last):
...
Invalid: The name 'test' is missing
"""
__unpackargs__ = ('name',)
messages = dict(
missing=_('The name %(name)s is missing'))
def _convert_to_python(self, valueDict, state):
v = valueDict.copy()
try:
field = v.pop(self.name)
except KeyError:
raise Invalid(
self.message('missing', state, name=repr(self.name)),
valueDict, state)
return field, v
def is_empty(self, value):
# empty dictionaries don't really apply here
return False
class StringBool(FancyValidator): # originally from TurboGears 1
"""
Converts a string to a boolean.
Values like 'true' and 'false' are considered True and False,
respectively; anything in ``true_values`` is true, anything in
``false_values`` is false, case-insensitive). The first item of
those lists is considered the preferred form.
::
>>> s = StringBool()
>>> s.to_python('yes'), s.to_python('no')
(True, False)
>>> s.to_python(1), s.to_python('N')
(True, False)
>>> s.to_python('ye')
Traceback (most recent call last):
...
Invalid: Value should be 'true' or 'false'
"""
true_values = ['true', 't', 'yes', 'y', 'on', '1']
false_values = ['false', 'f', 'no', 'n', 'off', '0']
messages = dict(
string=_('Value should be %(true)r or %(false)r'))
def _convert_to_python(self, value, state):
if isinstance(value, basestring):
value = value.strip().lower()
if value in self.true_values:
return True
if not value or value in self.false_values:
return False
raise Invalid(
self.message('string', state,
true=self.true_values[0], false=self.false_values[0]),
value, state)
return bool(value)
def _convert_from_python(self, value, state):
return (self.true_values if value else self.false_values)[0]
# Should deprecate:
StringBoolean = StringBool
class SignedString(FancyValidator):
"""
Encodes a string into a signed string, and base64 encodes both the
signature string and a random nonce.
It is up to you to provide a secret, and to keep the secret handy
and consistent.
"""
messages = dict(
malformed=_('Value does not contain a signature'),
badsig=_('Signature is not correct'))
secret = None
nonce_length = 4
def _convert_to_python(self, value, state):
global sha1
if not sha1:
from hashlib import sha1
assert self.secret is not None, "You must give a secret"
parts = value.split(None, 1)
if not parts or len(parts) == 1:
raise Invalid(self.message('malformed', state), value, state)
sig, rest = parts
sig = sig.decode('base64')
rest = rest.decode('base64')
nonce = rest[:self.nonce_length]
rest = rest[self.nonce_length:]
expected = sha1(str(self.secret) + nonce + rest).digest()
if expected != sig:
raise Invalid(self.message('badsig', state), value, state)
return rest
def _convert_from_python(self, value, state):
global sha1
if not sha1:
from hashlib import sha1
nonce = self.make_nonce()
value = str(value)
digest = sha1(self.secret + nonce + value).digest()
return self.encode(digest) + ' ' + self.encode(nonce + value)
def encode(self, value):
return value.encode('base64').strip().replace('\n', '')
def make_nonce(self):
global random
if not random:
import random
return ''.join(chr(random.randrange(256))
for _i in xrange(self.nonce_length))
class IPAddress(FancyValidator):
"""
Formencode validator to check whether a string is a correct IP address.
Examples::
>>> ip = IPAddress()
>>> ip.to_python('127.0.0.1')
'127.0.0.1'
>>> ip.to_python('299.0.0.1')
Traceback (most recent call last):
...
Invalid: The octets must be within the range of 0-255 (not '299')
>>> ip.to_python('192.168.0.1/1')
Traceback (most recent call last):
...
Invalid: Please enter a valid IP address (a.b.c.d)
>>> ip.to_python('asdf')
Traceback (most recent call last):
...
Invalid: Please enter a valid IP address (a.b.c.d)
"""
messages = dict(
badFormat=_('Please enter a valid IP address (a.b.c.d)'),
leadingZeros=_('The octets must not have leading zeros'),
illegalOctets=_('The octets must be within the range of 0-255'
' (not %(octet)r)'))
leading_zeros = False
def _validate_python(self, value, state=None):
try:
if not value:
raise ValueError
octets = value.split('.', 5)
# Only 4 octets?
if len(octets) != 4:
raise ValueError
# Correct octets?
for octet in octets:
if octet.startswith('0') and octet != '0':
if not self.leading_zeros:
raise Invalid(
self.message('leadingZeros', state), value, state)
# strip zeros so this won't be an octal number
octet = octet.lstrip('0')
if not 0 <= int(octet) < 256:
raise Invalid(
self.message('illegalOctets', state, octet=octet),
value, state)
# Splitting faild: wrong syntax
except ValueError:
raise Invalid(self.message('badFormat', state), value, state)
class CIDR(IPAddress):
"""
Formencode validator to check whether a string is in correct CIDR
notation (IP address, or IP address plus /mask).
Examples::
>>> cidr = CIDR()
>>> cidr.to_python('127.0.0.1')
'127.0.0.1'
>>> cidr.to_python('299.0.0.1')
Traceback (most recent call last):
...
Invalid: The octets must be within the range of 0-255 (not '299')
>>> cidr.to_python('192.168.0.1/1')
Traceback (most recent call last):
...
Invalid: The network size (bits) must be within the range of 8-32 (not '1')
>>> cidr.to_python('asdf')
Traceback (most recent call last):
...
Invalid: Please enter a valid IP address (a.b.c.d) or IP network (a.b.c.d/e)
"""
messages = dict(IPAddress._messages,
badFormat=_('Please enter a valid IP address (a.b.c.d)'
' or IP network (a.b.c.d/e)'),
illegalBits=_('The network size (bits) must be within the range'
' of 8-32 (not %(bits)r)'))
def _validate_python(self, value, state):
try:
# Split into octets and bits
if '/' in value: # a.b.c.d/e
addr, bits = value.split('/')
else: # a.b.c.d
addr, bits = value, 32
# Use IPAddress validator to validate the IP part
IPAddress._validate_python(self, addr, state)
# Bits (netmask) correct?
if not 8 <= int(bits) <= 32:
raise Invalid(
self.message('illegalBits', state, bits=bits),
value, state)
# Splitting faild: wrong syntax
except ValueError:
raise Invalid(self.message('badFormat', state), value, state)
class MACAddress(FancyValidator):
"""
Formencode validator to check whether a string is a correct hardware
(MAC) address.
Examples::
>>> mac = MACAddress()
>>> mac.to_python('aa:bb:cc:dd:ee:ff')
'aabbccddeeff'
>>> mac.to_python('aa:bb:cc:dd:ee:ff:e')
Traceback (most recent call last):
...
Invalid: A MAC address must contain 12 digits and A-F; the value you gave has 13 characters
>>> mac.to_python('aa:bb:cc:dd:ee:fx')
Traceback (most recent call last):
...
Invalid: MAC addresses may only contain 0-9 and A-F (and optionally :), not 'x'
>>> MACAddress(add_colons=True).to_python('aabbccddeeff')
'aa:bb:cc:dd:ee:ff'
"""
strip = True
valid_characters = '0123456789abcdefABCDEF'
add_colons = False
messages = dict(
badLength=_('A MAC address must contain 12 digits and A-F;'
' the value you gave has %(length)s characters'),
badCharacter=_('MAC addresses may only contain 0-9 and A-F'
' (and optionally :), not %(char)r'))
def _convert_to_python(self, value, state):
address = value.replace(':', '').lower() # remove colons
if len(address) != 12:
raise Invalid(
self.message('badLength', state,
length=len(address)), address, state)
for char in address:
if char not in self.valid_characters:
raise Invalid(
self.message('badCharacter', state,
char=char), address, state)
if self.add_colons:
address = '%s:%s:%s:%s:%s:%s' % (
address[0:2], address[2:4], address[4:6],
address[6:8], address[8:10], address[10:12])
return address
_convert_from_python = _convert_to_python
class FormValidator(FancyValidator):
"""
A FormValidator is something that can be chained with a Schema.
Unlike normal chaining the FormValidator can validate forms that
aren't entirely valid.
The important method is .validate(), of course. It gets passed a
dictionary of the (processed) values from the form. If you have
.validate_partial_form set to True, then it will get the incomplete
values as well -- check with the "in" operator if the form was able
to process any particular field.
Anyway, .validate() should return a string or a dictionary. If a
string, it's an error message that applies to the whole form. If
not, then it should be a dictionary of fieldName: errorMessage.
The special key "form" is the error message for the form as a whole
(i.e., a string is equivalent to {"form": string}).
Returns None on no errors.
"""
validate_partial_form = False
validate_partial_python = None
validate_partial_other = None
def is_empty(self, value):
return False
def field_is_empty(self, value):
return is_empty(value)
class RequireIfMissing(FormValidator):
"""
Require one field based on another field being present or missing.
This validator is applied to a form, not an individual field (usually
using a Schema's ``pre_validators`` or ``chained_validators``) and is
available under both names ``RequireIfMissing`` and ``RequireIfPresent``.
If you provide a ``missing`` value (a string key name) then
if that field is missing the field must be entered.
This gives you an either/or situation.
If you provide a ``present`` value (another string key name) then
if that field is present, the required field must also be present.
::
>>> from formencode import validators
>>> v = validators.RequireIfPresent('phone_type', present='phone')
>>> v.to_python(dict(phone_type='', phone='510 420 4577'))
Traceback (most recent call last):
...
Invalid: You must give a value for phone_type
>>> v.to_python(dict(phone=''))
{'phone': ''}
Note that if you have a validator on the optionally-required
field, you should probably use ``if_missing=None``. This way you
won't get an error from the Schema about a missing value. For example::
class PhoneInput(Schema):
phone = PhoneNumber()
phone_type = String(if_missing=None)
chained_validators = [RequireIfPresent('phone_type', present='phone')]
"""
# Field that potentially is required:
required = None
# If this field is missing, then it is required:
missing = None
# If this field is present, then it is required:
present = None
__unpackargs__ = ('required',)
def _convert_to_python(self, value_dict, state):
is_empty = self.field_is_empty
if is_empty(value_dict.get(self.required)) and (
(self.missing and is_empty(value_dict.get(self.missing))) or
(self.present and not is_empty(value_dict.get(self.present)))):
raise Invalid(
_('You must give a value for %s') % self.required,
value_dict, state,
error_dict={self.required:
Invalid(self.message('empty', state),
value_dict.get(self.required), state)})
return value_dict
RequireIfPresent = RequireIfMissing
class RequireIfMatching(FormValidator):
"""
Require a list of fields based on the value of another field.
This validator is applied to a form, not an individual field (usually
using a Schema's ``pre_validators`` or ``chained_validators``).
You provide a field name, an expected value and a list of required fields
(a list of string key names). If the value of the field, if present,
matches the value of ``expected_value``, then the validator will raise an
``Invalid`` exception for every field in ``required_fields`` that is
missing.
::
>>> from formencode import validators
>>> v = validators.RequireIfMatching('phone_type', expected_value='mobile', required_fields=['mobile'])
>>> v.to_python(dict(phone_type='mobile'))
Traceback (most recent call last):
...
formencode.api.Invalid: You must give a value for mobile
>>> v.to_python(dict(phone_type='someothervalue'))
{'phone_type': 'someothervalue'}
"""
# Field that we will check for its value:
field = None
# Value that the field shall have
expected_value = None
# If this field is present, then these fields are required:
required_fields = []
__unpackargs__ = ('field', 'expected_value')
def _convert_to_python(self, value_dict, state):
is_empty = self.field_is_empty
if self.field in value_dict and value_dict.get(self.field) == self.expected_value:
for required_field in self.required_fields:
if required_field not in value_dict or is_empty(value_dict.get(required_field)):
raise Invalid(
_('You must give a value for %s') % required_field,
value_dict, state,
error_dict={required_field:
Invalid(self.message('empty', state),
value_dict.get(required_field), state)})
return value_dict
class FieldsMatch(FormValidator):
"""
Tests that the given fields match, i.e., are identical. Useful
for password+confirmation fields. Pass the list of field names in
as `field_names`.
::
>>> f = FieldsMatch('pass', 'conf')
>>> sorted(f.to_python({'pass': 'xx', 'conf': 'xx'}).items())
[('conf', 'xx'), ('pass', 'xx')]
>>> f.to_python({'pass': 'xx', 'conf': 'yy'})
Traceback (most recent call last):
...
Invalid: conf: Fields do not match
"""
show_match = False
field_names = None
validate_partial_form = True
__unpackargs__ = ('*', 'field_names')
messages = dict(
invalid=_('Fields do not match (should be %(match)s)'),
invalidNoMatch=_('Fields do not match'),
notDict=_('Fields should be a dictionary'))
def __init__(self, *args, **kw):
super(FieldsMatch, self).__init__(*args, **kw)
if len(self.field_names) < 2:
raise TypeError('FieldsMatch() requires at least two field names')
def validate_partial(self, field_dict, state):
for name in self.field_names:
if name not in field_dict:
return
self._validate_python(field_dict, state)
def _validate_python(self, field_dict, state):
try:
ref = field_dict[self.field_names[0]]
except TypeError:
# Generally because field_dict isn't a dict
raise Invalid(self.message('notDict', state), field_dict, state)
except KeyError:
ref = ''
errors = {}
for name in self.field_names[1:]:
if field_dict.get(name, '') != ref:
if self.show_match:
errors[name] = self.message('invalid', state,
match=ref)
else:
errors[name] = self.message('invalidNoMatch', state)
if errors:
error_list = sorted(errors.iteritems())
error_message = '<br>\n'.join(
'%s: %s' % (name, value) for name, value in error_list)
raise Invalid(error_message, field_dict, state, error_dict=errors)
class CreditCardValidator(FormValidator):
"""
Checks that credit card numbers are valid (if not real).
You pass in the name of the field that has the credit card
type and the field with the credit card number. The credit
card type should be one of "visa", "mastercard", "amex",
"dinersclub", "discover", "jcb".
You must check the expiration date yourself (there is no
relation between CC number/types and expiration dates).
::
>>> cc = CreditCardValidator()
>>> sorted(cc.to_python({'ccType': 'visa', 'ccNumber': '4111111111111111'}).items())
[('ccNumber', '4111111111111111'), ('ccType', 'visa')]
>>> cc.to_python({'ccType': 'visa', 'ccNumber': '411111111111111'})
Traceback (most recent call last):
...
Invalid: ccNumber: You did not enter a valid number of digits
>>> cc.to_python({'ccType': 'visa', 'ccNumber': '411111111111112'})
Traceback (most recent call last):
...
Invalid: ccNumber: You did not enter a valid number of digits
>>> cc().to_python({})
Traceback (most recent call last):
...
Invalid: The field ccType is missing
"""
validate_partial_form = True
cc_type_field = 'ccType'
cc_number_field = 'ccNumber'
__unpackargs__ = ('cc_type_field', 'cc_number_field')
messages = dict(
notANumber=_('Please enter only the number, no other characters'),
badLength=_('You did not enter a valid number of digits'),
invalidNumber=_('That number is not valid'),
missing_key=_('The field %(key)s is missing'))
def validate_partial(self, field_dict, state):
if not field_dict.get(self.cc_type_field, None) \
or not field_dict.get(self.cc_number_field, None):
return None
self._validate_python(field_dict, state)
def _validate_python(self, field_dict, state):
errors = self._validateReturn(field_dict, state)
if errors:
error_list = sorted(errors.iteritems())
raise Invalid(
'<br>\n'.join('%s: %s' % (name, value)
for name, value in error_list),
field_dict, state, error_dict=errors)
def _validateReturn(self, field_dict, state):
for field in self.cc_type_field, self.cc_number_field:
if field not in field_dict:
raise Invalid(
self.message('missing_key', state, key=field),
field_dict, state)
ccType = field_dict[self.cc_type_field].lower().strip()
number = field_dict[self.cc_number_field].strip()
number = number.replace(' ', '')
number = number.replace('-', '')
try:
long(number)
except ValueError:
return {self.cc_number_field: self.message('notANumber', state)}
assert ccType in self._cardInfo, (
"I can't validate that type of credit card")
foundValid = False
validLength = False
for prefix, length in self._cardInfo[ccType]:
if len(number) == length:
validLength = True
if number.startswith(prefix):
foundValid = True
break
if not validLength:
return {self.cc_number_field: self.message('badLength', state)}
if not foundValid:
return {self.cc_number_field: self.message('invalidNumber', state)}
if not self._validateMod10(number):
return {self.cc_number_field: self.message('invalidNumber', state)}
return None
def _validateMod10(self, s):
"""Check string with the mod 10 algorithm (aka "Luhn formula")."""
checksum, factor = 0, 1
for c in reversed(s):
for c in str(factor * int(c)):
checksum += int(c)
factor = 3 - factor
return checksum % 10 == 0
_cardInfo = {
"visa": [('4', 16),
('4', 13)],
"mastercard": [('51', 16),
('52', 16),
('53', 16),
('54', 16),
('55', 16)],
"discover": [('6011', 16)],
"amex": [('34', 15),
('37', 15)],
"dinersclub": [('300', 14),
('301', 14),
('302', 14),
('303', 14),
('304', 14),
('305', 14),
('36', 14),
('38', 14)],
"jcb": [('3', 16),
('2131', 15),
('1800', 15)],
}
class CreditCardExpires(FormValidator):
"""
Checks that credit card expiration date is valid relative to
the current date.
You pass in the name of the field that has the credit card
expiration month and the field with the credit card expiration
year.
::
>>> ed = CreditCardExpires()
>>> sorted(ed.to_python({'ccExpiresMonth': '11', 'ccExpiresYear': '2250'}).items())
[('ccExpiresMonth', '11'), ('ccExpiresYear', '2250')]
>>> ed.to_python({'ccExpiresMonth': '10', 'ccExpiresYear': '2005'})
Traceback (most recent call last):
...
Invalid: ccExpiresMonth: Invalid Expiration Date<br>
ccExpiresYear: Invalid Expiration Date
"""
validate_partial_form = True
cc_expires_month_field = 'ccExpiresMonth'
cc_expires_year_field = 'ccExpiresYear'
__unpackargs__ = ('cc_expires_month_field', 'cc_expires_year_field')
datetime_module = None
messages = dict(
notANumber=_('Please enter numbers only for month and year'),
invalidNumber=_('Invalid Expiration Date'))
def validate_partial(self, field_dict, state):
if not field_dict.get(self.cc_expires_month_field, None) \
or not field_dict.get(self.cc_expires_year_field, None):
return None
self._validate_python(field_dict, state)
def _validate_python(self, field_dict, state):
errors = self._validateReturn(field_dict, state)
if errors:
error_list = sorted(errors.iteritems())
raise Invalid(
'<br>\n'.join('%s: %s' % (name, value)
for name, value in error_list),
field_dict, state, error_dict=errors)
def _validateReturn(self, field_dict, state):
ccExpiresMonth = str(field_dict[self.cc_expires_month_field]).strip()
ccExpiresYear = str(field_dict[self.cc_expires_year_field]).strip()
try:
ccExpiresMonth = int(ccExpiresMonth)
ccExpiresYear = int(ccExpiresYear)
dt_mod = import_datetime(self.datetime_module)
now = datetime_now(dt_mod)
today = datetime_makedate(dt_mod, now.year, now.month, now.day)
next_month = ccExpiresMonth % 12 + 1
next_month_year = ccExpiresYear
if next_month == 1:
next_month_year += 1
expires_date = datetime_makedate(
dt_mod, next_month_year, next_month, 1)
assert expires_date > today
except ValueError:
return {self.cc_expires_month_field:
self.message('notANumber', state),
self.cc_expires_year_field:
self.message('notANumber', state)}
except AssertionError:
return {self.cc_expires_month_field:
self.message('invalidNumber', state),
self.cc_expires_year_field:
self.message('invalidNumber', state)}
class CreditCardSecurityCode(FormValidator):
"""
Checks that credit card security code has the correct number
of digits for the given credit card type.
You pass in the name of the field that has the credit card
type and the field with the credit card security code.
::
>>> code = CreditCardSecurityCode()
>>> sorted(code.to_python({'ccType': 'visa', 'ccCode': '111'}).items())
[('ccCode', '111'), ('ccType', 'visa')]
>>> code.to_python({'ccType': 'visa', 'ccCode': '1111'})
Traceback (most recent call last):
...
Invalid: ccCode: Invalid credit card security code length
"""
validate_partial_form = True
cc_type_field = 'ccType'
cc_code_field = 'ccCode'
__unpackargs__ = ('cc_type_field', 'cc_code_field')
messages = dict(
notANumber=_('Please enter numbers only for credit card security code'),
badLength=_('Invalid credit card security code length'))
def validate_partial(self, field_dict, state):
if (not field_dict.get(self.cc_type_field, None)
or not field_dict.get(self.cc_code_field, None)):
return None
self._validate_python(field_dict, state)
def _validate_python(self, field_dict, state):
errors = self._validateReturn(field_dict, state)
if errors:
error_list = sorted(errors.iteritems())
raise Invalid(
'<br>\n'.join('%s: %s' % (name, value)
for name, value in error_list),
field_dict, state, error_dict=errors)
def _validateReturn(self, field_dict, state):
ccType = str(field_dict[self.cc_type_field]).strip()
ccCode = str(field_dict[self.cc_code_field]).strip()
try:
int(ccCode)
except ValueError:
return {self.cc_code_field: self.message('notANumber', state)}
length = self._cardInfo[ccType]
if len(ccCode) != length:
return {self.cc_code_field: self.message('badLength', state)}
# key = credit card type, value = length of security code
_cardInfo = dict(visa=3, mastercard=3, discover=3, amex=4)
def validators():
"""Return the names of all validators in this module."""
return [name for name, value in globals().iteritems()
if isinstance(value, type) and issubclass(value, Validator)]
__all__ = ['Invalid'] + validators()
| bsd-3-clause | 8,455,634,364,547,294,000 | 34.016839 | 130 | 0.553573 | false |
SINGROUP/pycp2k | pycp2k/classes/_force_matching1.py | 1 | 1399 | from pycp2k.inputsection import InputSection
from ._compare_energies1 import _compare_energies1
from ._compare_forces1 import _compare_forces1
class _force_matching1(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Optimize_file_name = None
self.Ref_traj_file_name = None
self.Ref_force_file_name = None
self.Ref_cell_file_name = None
self.Group_size = None
self.Frame_start = None
self.Frame_stop = None
self.Frame_stride = None
self.Frame_count = None
self.Energy_weight = None
self.Shift_average = None
self.Shift_qm = None
self.Shift_mm = None
self.COMPARE_ENERGIES = _compare_energies1()
self.COMPARE_FORCES = _compare_forces1()
self._name = "FORCE_MATCHING"
self._keywords = {'Ref_force_file_name': 'REF_FORCE_FILE_NAME', 'Ref_cell_file_name': 'REF_CELL_FILE_NAME', 'Frame_start': 'FRAME_START', 'Optimize_file_name': 'OPTIMIZE_FILE_NAME', 'Shift_average': 'SHIFT_AVERAGE', 'Ref_traj_file_name': 'REF_TRAJ_FILE_NAME', 'Frame_stride': 'FRAME_STRIDE', 'Shift_qm': 'SHIFT_QM', 'Frame_stop': 'FRAME_STOP', 'Frame_count': 'FRAME_COUNT', 'Shift_mm': 'SHIFT_MM', 'Group_size': 'GROUP_SIZE', 'Energy_weight': 'ENERGY_WEIGHT'}
self._subsections = {'COMPARE_FORCES': 'COMPARE_FORCES', 'COMPARE_ENERGIES': 'COMPARE_ENERGIES'}
| lgpl-3.0 | -7,583,532,691,834,028,000 | 50.814815 | 467 | 0.645461 | false |
travel-intelligence/flasfka | setup.py | 1 | 1608 | # -*- coding: utf-8 -*-
"""
Push/Pull on Kafka over HTTP
"""
from setuptools import setup
from subprocess import check_output as run
from subprocess import CalledProcessError
import sys
import os
from flasfka import __version__
# Travis uploads our releases on pypi when the tests are passing and there
# is a new tag.
#
# When this happens, we want to make sure that the version in the code
# (see flasfka/__init___.py) is in sync with the git tag. This snippet
# performs the check.
if os.getenv("TRAVIS") is not None:
try:
GIT_VERSION = run(["git", "describe"]).decode().strip()
if not GIT_VERSION.startswith(__version__):
sys.exit("The git tag does not match the version. Please fix.")
except CalledProcessError:
pass
setup(
name="flasfka",
version=__version__,
description=__doc__,
long_description=open("README.rst").read(),
author="Christophe-Marie Duquesne",
author_email="[email protected]",
url="https://github.com/travel-intelligence/flasfka",
download_url="https://github.com/travel-intelligence/flasfka/" +
"archive/%s.tar.gz" % __version__,
packages=["flasfka"],
classifiers=[
'Operating System :: POSIX :: Linux',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
],
install_requires=[
"Flask >= 0.10",
"kafka-python >= 0.9.2",
],
scripts=["flasfka-serve"],
zip_safe=False,
include_package_data=True
)
| mit | -8,993,859,213,536,265,000 | 28.777778 | 75 | 0.629353 | false |
honnibal/spaCy | spacy/displacy/templates.py | 1 | 2571 | # coding: utf8
from __future__ import unicode_literals
# Setting explicit height and max-width: none on the SVG is required for
# Jupyter to render it properly in a cell
TPL_DEP_SVG = """
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xml:lang="{lang}" id="{id}" class="displacy" width="{width}" height="{height}" direction="{dir}" style="max-width: none; height: {height}px; color: {color}; background: {bg}; font-family: {font}; direction: {dir}">{content}</svg>
"""
TPL_DEP_WORDS = """
<text class="displacy-token" fill="currentColor" text-anchor="middle" y="{y}">
<tspan class="displacy-word" fill="currentColor" x="{x}">{text}</tspan>
<tspan class="displacy-tag" dy="2em" fill="currentColor" x="{x}">{tag}</tspan>
</text>
"""
TPL_DEP_ARCS = """
<g class="displacy-arrow">
<path class="displacy-arc" id="arrow-{id}-{i}" stroke-width="{stroke}px" d="{arc}" fill="none" stroke="currentColor"/>
<text dy="1.25em" style="font-size: 0.8em; letter-spacing: 1px">
<textPath xlink:href="#arrow-{id}-{i}" class="displacy-label" startOffset="50%" side="{label_side}" fill="currentColor" text-anchor="middle">{label}</textPath>
</text>
<path class="displacy-arrowhead" d="{head}" fill="currentColor"/>
</g>
"""
TPL_FIGURE = """
<figure style="margin-bottom: 6rem">{content}</figure>
"""
TPL_TITLE = """
<h2 style="margin: 0">{title}</h2>
"""
TPL_ENTS = """
<div class="entities" style="line-height: 2.5; direction: {dir}">{content}</div>
"""
TPL_ENT = """
<mark class="entity" style="background: {bg}; padding: 0.45em 0.6em; margin: 0 0.25em; line-height: 1; border-radius: 0.35em;">
{text}
<span style="font-size: 0.8em; font-weight: bold; line-height: 1; border-radius: 0.35em; text-transform: uppercase; vertical-align: middle; margin-left: 0.5rem">{label}</span>
</mark>
"""
TPL_ENT_RTL = """
<mark class="entity" style="background: {bg}; padding: 0.45em 0.6em; margin: 0 0.25em; line-height: 1; border-radius: 0.35em">
{text}
<span style="font-size: 0.8em; font-weight: bold; line-height: 1; border-radius: 0.35em; text-transform: uppercase; vertical-align: middle; margin-right: 0.5rem">{label}</span>
</mark>
"""
TPL_PAGE = """
<!DOCTYPE html>
<html lang="{lang}">
<head>
<title>displaCy</title>
</head>
<body style="font-size: 16px; font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Helvetica, Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol'; padding: 4rem 2rem; direction: {dir}">{content}</body>
</html>
"""
| mit | 8,251,126,806,409,686,000 | 35.728571 | 312 | 0.649553 | false |
pualxiao/pyenv-installer | setup.py | 1 | 1030 | import distutils.core
from distutils.command.install import install
import subprocess
class PyenvInstall(install):
def run(self):
print(subprocess.check_output(['bash', 'bin/pyenv-installer']))
with open('README.rst') as file:
long_description = file.read()
distutils.core.setup(
version='20150113',
name='pyenv',
author='Yamashita, Yuu',
url='https://github.com/yyuu/pyenv-installer',
description='Tool to install pyenv and friends',
long_description=long_description,
license='MIT',
platforms=['UNIX'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX',
'Programming Language :: Unix Shell',
'Topic :: Software Development :: Interpreters',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
cmdclass=dict(install=PyenvInstall),
)
| mit | -7,404,762,627,322,683,000 | 29.294118 | 71 | 0.647573 | false |
encukou/freeipa | ipatests/test_webui/test_group.py | 3 | 15435 | # Authors:
# Petr Vobornik <[email protected]>
#
# Copyright (C) 2013 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Group tests
"""
from ipatests.test_webui.ui_driver import UI_driver
from ipatests.test_webui.ui_driver import screenshot
import ipatests.test_webui.data_group as group
import ipatests.test_webui.data_user as user
import ipatests.test_webui.data_netgroup as netgroup
import ipatests.test_webui.data_hbac as hbac
import ipatests.test_webui.test_rbac as rbac
import ipatests.test_webui.data_sudo as sudo
import pytest
try:
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
except ImportError:
pass
@pytest.mark.tier1
class test_group(UI_driver):
@screenshot
def test_crud(self):
"""
Basic CRUD: group
"""
self.init_app()
self.basic_crud(group.ENTITY, group.DATA,
default_facet=group.DEFAULT_FACET)
@screenshot
def test_group_types(self):
"""
Test group types in adder dialog
"""
self.init_app()
pkey = 'itest-group'
data = {
'pkey': pkey,
'add': [
('callback', self.check_posix_enabled, True),
('textbox', 'cn', pkey),
('textarea', 'description', 'test-group desc'),
('radio', 'type', 'nonposix'),
('callback', self.check_posix_enabled, False),
('radio', 'type', 'posix'),
('callback', self.check_posix_enabled, True),
('radio', 'type', 'external'),
('callback', self.check_posix_enabled, False),
('radio', 'type', 'posix'),
('callback', self.check_posix_enabled, True),
],
}
self.add_record(group.ENTITY, data)
self.delete(group.ENTITY, [data], navigate=False)
def check_posix_enabled(self, enabled):
self.assert_disabled("[name=gidnumber]", negative=enabled)
@screenshot
def test_add_group_negative(self):
"""
Negative test for adding groups
"""
self.init_app()
self.empty_group_name()
self.invalid_group_name()
self.duplicate_group_name()
self.tailing_spaces_in_group_description()
self.leading_spaces_in_group_description()
def empty_group_name(self):
self.navigate_to_entity(group.ENTITY)
self.facet_button_click('add')
self.dialog_button_click('add')
elem = self.find(".widget[name='cn']")
self.assert_field_validation_required(elem)
self.dialog_button_click('cancel')
def invalid_group_name(self):
expected_error = 'may only include letters, numbers, _, -, . and $'
pkey = ';test-gr@up'
self.navigate_to_entity(group.ENTITY)
self.facet_button_click('add')
self.fill_input('cn', pkey)
elem = self.find(".widget[name='cn']")
self.assert_field_validation(expected_error, parent=elem)
self.dialog_button_click('cancel')
def duplicate_group_name(self):
pkey = 'editors'
expected_error = 'group with name "editors" already exists'
self.navigate_to_entity(group.ENTITY)
self.facet_button_click('add')
self.fill_input('cn', pkey)
self.cancel_retry_dialog(expected_error)
def tailing_spaces_in_group_description(self):
pkey = 'itest_group0'
desc = 'with_trailing_space '
expected_error = 'invalid \'desc\': Leading and trailing ' \
'spaces are not allowed'
self.navigate_to_entity(group.ENTITY)
self.facet_button_click('add')
self.fill_input('cn', pkey)
self.fill_textarea('description', desc)
self.cancel_retry_dialog(expected_error)
def leading_spaces_in_group_description(self):
pkey = 'itest_group0'
desc = ' with_leading_space'
expected_error = 'invalid \'desc\': Leading and trailing' \
' spaces are not allowed'
self.navigate_to_entity(group.ENTITY)
self.facet_button_click('add')
self.fill_input('cn', pkey)
self.fill_textarea('description', desc)
self.cancel_retry_dialog(expected_error)
def cancel_retry_dialog(self, expected_error):
self.dialog_button_click('add')
dialog = self.get_last_error_dialog()
assert (expected_error in dialog.text)
self.wait_for_request()
# Key press for Retry
actions = ActionChains(self.driver)
actions.send_keys(Keys.ENTER).perform()
self.wait_for_request(n=2)
self.dialog_button_click('cancel')
self.wait_for_request(n=2)
self.dialog_button_click('cancel')
@screenshot
def test_add_multiple_group(self):
"""
Use 'add and add another' button to create multiple groups at one shot
"""
self.init_app()
# adding a POSIX and a Non-POSIX group
self.add_record(group.ENTITY, [group.DATA, group.DATA2])
# adding Two Non-POSIX groups
self.add_record(group.ENTITY, [group.DATA9, group.DATA10])
# adding Two POSIX groups
self.add_record(group.ENTITY, [group.DATA5, group.DATA6])
# delete multiple records
records = [group.DATA, group.DATA2, group.DATA5, group.DATA6,
group.DATA9, group.DATA10]
self.select_multiple_records(records)
self.facet_button_click('remove')
self.dialog_button_click('ok')
@screenshot
def test_add_and_edit_group(self):
"""
1. add and switch to edit mode
2. add and cancel
"""
self.init_app()
# add and edit record
self.add_record(group.ENTITY, group.DATA, dialog_btn='add_and_edit')
self.switch_to_facet('details')
self.delete_action()
# add then cancel
self.add_record(group.ENTITY, group.DATA, dialog_btn='cancel')
@screenshot
def test_actions(self):
"""
Test group actions
"""
self.init_app()
self.add_record(group.ENTITY, group.DATA)
self.navigate_to_record(group.PKEY)
self.switch_to_facet('details')
self.make_posix_action()
self.delete_action()
self.add_record(group.ENTITY, group.DATA, navigate=False)
self.navigate_to_record(group.PKEY)
self.switch_to_facet('details')
self.facet_button_click('refresh') # workaround for BUG: #3702
self.make_external_action()
self.delete_action()
def make_external_action(self):
self.action_list_action('make_external')
self.wait_for_request(n=2)
self.assert_no_error_dialog()
self.assert_text_field('external', 'External', element='span')
def make_posix_action(self):
self.action_list_action('make_posix')
self.wait_for_request(n=2)
self.assert_no_error_dialog()
self.assert_text_field('external', 'POSIX', element='span')
def delete_action(self, entity=group.ENTITY, pkey=group.PKEY):
self.action_list_action('delete')
self.wait_for_request(n=4)
self.assert_no_error_dialog()
self.assert_facet(entity, 'search')
self.assert_record(pkey, negative=True)
@screenshot
def test_associations(self):
"""
Test group associations
"""
self.init_app()
# prepare
# -------
self.add_record(group.ENTITY, [group.DATA, group.DATA2, group.DATA3])
self.add_record(user.ENTITY, [user.DATA, user.DATA2])
self.add_record(netgroup.ENTITY, [netgroup.DATA, netgroup.DATA2])
self.add_record(rbac.ROLE_ENTITY, rbac.ROLE_DATA)
self.add_record(hbac.RULE_ENTITY, hbac.RULE_DATA)
self.add_record(sudo.RULE_ENTITY, sudo.RULE_DATA)
# add & remove associations
# -------------------------
self.navigate_to_record(group.PKEY, entity=group.ENTITY)
# "members" add with multiple select
self.add_associations([group.PKEY2, group.PKEY3], facet='member_group',
delete=True)
self.add_associations([user.PKEY, user.PKEY2], facet='member_user',
delete=True)
# TODO: external
# "member of": add with search
self.add_associations([group.PKEY3, group.PKEY2],
facet='memberof_group', delete=True, search=True)
self.add_associations([netgroup.PKEY, netgroup.PKEY2],
facet='memberof_netgroup',
delete=True, search=True)
self.add_associations([rbac.ROLE_PKEY], facet='memberof_role',
delete=True)
self.add_associations([hbac.RULE_PKEY], facet='memberof_hbacrule',
delete=True)
self.navigate_to_record(group.PKEY, entity=group.ENTITY)
self.add_associations([sudo.RULE_PKEY], facet='memberof_sudorule',
delete=True, search=True)
# cleanup
# -------
self.delete(group.ENTITY, [group.DATA, group.DATA2, group.DATA3])
self.delete(user.ENTITY, [user.DATA, user.DATA2])
self.delete(netgroup.ENTITY, [netgroup.DATA, netgroup.DATA2])
self.delete(rbac.ROLE_ENTITY, [rbac.ROLE_DATA])
self.delete(hbac.RULE_ENTITY, [hbac.RULE_DATA])
self.delete(sudo.RULE_ENTITY, [sudo.RULE_DATA])
@screenshot
def test_indirect_associations(self):
"""
Group indirect associations
"""
self.init_app()
# add
# ---
self.add_record(group.ENTITY, [group.DATA, group.DATA2, group.DATA3,
group.DATA4, group.DATA5])
self.add_record(user.ENTITY, user.DATA)
# prepare indirect member
self.navigate_to_entity(group.ENTITY, 'search')
self.navigate_to_record(group.PKEY2)
self.add_associations([user.PKEY])
self.add_associations([group.PKEY3], 'member_group')
self.navigate_to_entity(group.ENTITY, 'search')
self.navigate_to_record(group.PKEY)
self.add_associations([group.PKEY2], 'member_group')
# prepare indirect memberof
self.navigate_to_entity(group.ENTITY, 'search')
self.navigate_to_record(group.PKEY4)
self.add_associations([group.PKEY], 'member_group')
self.add_associations([group.PKEY5], 'memberof_group')
self.add_record(netgroup.ENTITY, netgroup.DATA)
self.navigate_to_record(netgroup.PKEY)
self.add_table_associations('memberuser_group', [group.PKEY4])
self.add_record(rbac.ROLE_ENTITY, rbac.ROLE_DATA)
self.navigate_to_record(rbac.ROLE_PKEY)
self.add_associations([group.PKEY4], facet='member_group')
self.add_record(hbac.RULE_ENTITY, hbac.RULE_DATA)
self.navigate_to_record(hbac.RULE_PKEY)
self.add_table_associations('memberuser_group', [group.PKEY4])
self.add_record(sudo.RULE_ENTITY, sudo.RULE_DATA)
self.navigate_to_record(sudo.RULE_PKEY)
self.add_table_associations('memberuser_group', [group.PKEY4])
# check indirect associations
# ---------------------------
self.navigate_to_entity(group.ENTITY, 'search')
self.navigate_to_record(group.PKEY)
self.assert_indirect_record(user.PKEY, group.ENTITY, 'member_user')
self.assert_indirect_record(group.PKEY3, group.ENTITY, 'member_group')
self.assert_indirect_record(group.PKEY5, group.ENTITY,
'memberof_group')
self.assert_indirect_record(netgroup.PKEY, group.ENTITY,
'memberof_netgroup')
self.assert_indirect_record(rbac.ROLE_PKEY, group.ENTITY,
'memberof_role')
self.assert_indirect_record(hbac.RULE_PKEY, group.ENTITY,
'memberof_hbacrule')
self.assert_indirect_record(sudo.RULE_PKEY, group.ENTITY,
'memberof_sudorule')
# cleanup
# -------
self.delete(group.ENTITY, [group.DATA, group.DATA2, group.DATA3,
group.DATA4, group.DATA5])
self.delete(user.ENTITY, [user.DATA])
self.delete(netgroup.ENTITY, [netgroup.DATA])
self.delete(rbac.ROLE_ENTITY, [rbac.ROLE_DATA])
self.delete(hbac.RULE_ENTITY, [hbac.RULE_DATA])
self.delete(sudo.RULE_ENTITY, [sudo.RULE_DATA])
@screenshot
def test_member_manager_user(self):
"""
Test member manager user has permissions to add and remove group
members
"""
self.init_app()
self.add_record(user.ENTITY, [user.DATA_MEMBER_MANAGER, user.DATA])
self.add_record(group.ENTITY, group.DATA2)
self.navigate_to_record(group.PKEY2)
self.add_associations([user.PKEY_MEMBER_MANAGER],
facet='membermanager_user')
# try to add user to group with member manager permissions
self.logout()
self.login(user.PKEY_MEMBER_MANAGER, user.PASSWD_MEMBER_MANAGER)
self.navigate_to_record(group.PKEY2, entity=group.ENTITY)
self.add_associations([user.PKEY], delete=True)
# re-login as admin and clean up data
self.logout()
self.init_app()
self.delete(user.ENTITY, [user.DATA_MEMBER_MANAGER, user.DATA])
self.delete(group.ENTITY, [group.DATA2])
@screenshot
def test_member_manager_group(self):
"""
Test member managers group has permissions to add and remove group
members
"""
self.init_app()
self.add_record(user.ENTITY, [user.DATA_MEMBER_MANAGER, user.DATA])
self.add_record(group.ENTITY, [group.DATA2, group.DATA3])
self.navigate_to_record(group.PKEY2)
self.add_associations([user.PKEY_MEMBER_MANAGER], facet='member_user')
self.navigate_to_record(group.PKEY3, entity=group.ENTITY)
self.add_associations([group.PKEY2], facet='membermanager_group')
# try to add host to group with member manager permissions
self.logout()
self.login(user.PKEY_MEMBER_MANAGER, user.PASSWD_MEMBER_MANAGER)
self.navigate_to_record(group.PKEY3, entity=group.ENTITY)
self.add_associations([user.PKEY], delete=True)
# re-login as admin and clean up data
self.logout()
self.init_app()
self.delete(user.ENTITY, [user.DATA_MEMBER_MANAGER, user.DATA])
self.delete(group.ENTITY, [group.DATA2, group.DATA3])
| gpl-3.0 | -2,374,140,517,396,858,400 | 35.925837 | 79 | 0.606997 | false |
archlinux/archweb | news/views.py | 1 | 2543 | from django import forms
from django.conf import settings
from django.core.mail import EmailMessage
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect
from django.template import loader
from django.views.decorators.http import require_POST
from django.views.generic import DetailView, ListView, CreateView, UpdateView, DeleteView
from .models import News
from main.utils import find_unique_slug, parse_markdown
class NewsForm(forms.ModelForm):
class Meta:
model = News
exclude = ('id', 'slug', 'author', 'postdate', 'safe_mode')
class NewsDetailView(DetailView):
queryset = News.objects.all().select_related('author')
template_name = "news/view.html"
class NewsListView(ListView):
queryset = News.objects.all().select_related('author').defer('content')
template_name = "news/list.html"
paginate_by = 50
class NewsCreateView(CreateView):
model = News
form_class = NewsForm
template_name = "news/add.html"
def form_valid(self, form):
# special logic, we auto-fill the author and slug fields
newsitem = form.save(commit=False)
newsitem.author = self.request.user
newsitem.slug = find_unique_slug(News, newsitem.title)
newsitem.save()
if newsitem.send_announce:
ctx = {
'news': newsitem,
}
headers = dict()
if settings.MAILMAN_PASSWORD:
headers['Approved'] = settings.MAILMAN_PASSWORD
template = loader.get_template('news/news_email_notification.txt')
EmailMessage(
subject=f'[arch-announce] {newsitem.title}',
body=template.render(ctx),
from_email=f'"Arch Linux: Recent news updates: {newsitem.author.get_full_name()}" <{settings.ANNOUNCE_EMAIL}>',
to=[settings.ANNOUNCE_EMAIL],
headers=headers).send()
return super(NewsCreateView, self).form_valid(form)
class NewsEditView(UpdateView):
model = News
form_class = NewsForm
template_name = "news/add.html"
class NewsDeleteView(DeleteView):
model = News
template_name = "news/delete.html"
success_url = "/news/"
def view_redirect(request, object_id):
newsitem = get_object_or_404(News, pk=object_id)
return redirect(newsitem, permanent=True)
@require_POST
def preview(request):
data = request.POST.get('data', '')
markup = parse_markdown(data)
return HttpResponse(markup)
# vim: set ts=4 sw=4 et:
| gpl-2.0 | 3,187,957,363,548,591,000 | 30.012195 | 127 | 0.66221 | false |
ChenglongChen/Kaggle_HomeDepot | Code/Chenglong/get_feature_conf_linear_stacking.py | 1 | 7469 | # -*- coding: utf-8 -*-
"""
@author: Chenglong Chen <[email protected]>
@brief: generate feature conf for the following models (most of which are linear models)
- reg_skl_ridge
- reg_skl_bayesian_ridge
- reg_skl_lasso
- reg_skl_lsvr
- reg_xgb_linear
- reg_keras_dnn (nonlinear models)
@note:
- such features DO NOT INCLUDE "DocId_(search_term|product_title|product_color|product_brand)"
- one can tune the MANDATORY_FEATS and COMMENT_OUT_FEATS to generate different feature subset
"""
import re
import os
from optparse import OptionParser
import config
from utils import time_utils
INCLUDE_FEATS = [
".+"
]
COUNT_FEATS = [
"Freq",
"Len",
"Count",
"Size",
"Position",
]
# COUNT_FEATS = []
NOT_COUNT_FEATS = ["Norm", "Ratio"]
MANDATORY_FEATS = [
# # including product_uid according to
# # https://www.kaggle.com/c/home-depot-product-search-relevance/forums/t/20288/trends-in-relevances-by-row-ids/115886#post115886
# "DocIdEcho_product_uid",
# "ProductUidDummy1_product_uid",
# "ProductUidDummy2_product_uid",
# "IsInGoogleDict",
# "GroupRelevance_Size",
# "TSNE",
]
COMMENT_OUT_FEATS = [
#-------------- General --------------
"search_term_alt",
"Bigram",
"Trigram",
"UBgram",
"UBTgram",
"Median",
"Std",
".+(Bigram|Trigram)_.+_product_(brand|color)",
"TSNE",
#-------------- Basic --------------
"DocLogFreq",
"Digit",
"Unique",
"^DocIdOneHot",
"^DocId",
"DocLen_product_(brand|color)",
"DocLen_product_attribute_1D",
"DocFreq_product_description_1D",
"DocFreq_product_attribute_1D",
"Digit(Count|Ratio)_product_(brand|color)",
"Doc(Entropy|Len)_product_(brand|color)",
"Unique(Count|Ratio)_.+_product_(brand|color)",
#-------------- Distance --------------
"DiceDistance",
# "EditDistance",
"Compression",
#-------------- First and Last Ngram --------------
"FirstIntersectNormPosition",
"FirstIntersectPosition",
"LastIntersectNormPosition",
"LastIntersectPosition",
#-------------- Group --------------
"GroupRelevance_(Mean|Std|Max|Min|Median)",
"Group_\d+",
"GroupDistanceStat",
#-------------- Intersect Count & Position --------------
"IntersectPosition_.+_(Std|Max|Min|Median)",
"IntersectNormPosition_.+_(Std|Max|Min|Median)",
#-------------- Match --------------
"LongestMatchSize",
#-------------- StatCooc --------------
# since product_name is of length 2, it makes no difference for various aggregation as there is only one item
"StatCooc(TF|NormTF|TFIDF|NormTFIDF|BM25)_Bigram_(Std|Max|Min|Median)_search_term_product_name_x_product_title_product_name_1D",
"StatCooc(TF|NormTF|TFIDF|NormTFIDF|BM25)_Bigram_(Std|Max|Min|Median)_product_title_product_name_x_search_term_product_name_1D",
"NormTF",
"NormTFIDF",
#-------------- Vector Space --------------
# as TFIDF_Word_Trigram has the largest corr
"LSA\d+_Word_Unigram",
"LSA\d+_Word_Bigram",
"TFIDF_Word_Unigram",
"TFIDF_Word_Bigram",
# as TFIDF_Char_Fourgram has the largest corr
"LSA\d+_Char_Bigram",
"LSA\d+_Char_Trigram",
"LSA\d+_Char_Fivegram",
"TFIDF_Char_Bigram",
"TFIDF_Char_Trigram",
"TFIDF_Char_Fivegram",
"CharDistribution_Ratio",
#-------------- Word2Vec & Doc2Vec --------------
"_Vector_",
"_Vdiff_",
"Word2Vec_Wikipedia_D50",
"Word2Vec_Wikipedia_D100",
"Word2Vec_Wikipedia_D200",
# "Word2Vec_GoogleNews",
"Word2Vec_GoogleNews_D300_Vector",
# as all the words are used to train the model
"Word2Vec_Homedepot_D100_Importance",
"Word2Vec_Homedepot_D100_N_Similarity_Imp",
#-------------- Turing Test --------------
# d = {
# "df_basic_features.csv": "Basic",
# "df_brand_material_dummies.csv": "BrandMaterialDummy",
# "df_dist_new.csv": "Dist",
# "dld_features.csv": "DLD",
# "df_st_tfidf.csv": "StTFIDF",
# "df_tfidf_intersept_new.csv": "TFIDF",
# "df_thekey_dummies.csv": "TheKeyDummy",
# "df_word2vec_new.csv": "Word2Vec",
# }
# "TuringTest_Basic",
# "TuringTest_BrandMaterialDummy",
# "TuringTest_Dist",
# "TuringTest_DLD",
# "TuringTest_StTFIDF",
# "TuringTest_TFIDF",
# "TuringTest_TheKeyDummy",
# "TuringTest_Word2Vec",
]
def _check_include(fname):
for v in INCLUDE_FEATS:
pat = re.compile(v)
if len(re.findall(pat, fname)) > 0:
return True
return False
def _check_count_feat(fname):
for v in NOT_COUNT_FEATS:
pat = re.compile(v)
if len(re.findall(pat, fname)) > 0:
return False
for v in COUNT_FEATS:
pat = re.compile(v)
if len(re.findall(pat, fname)) > 0:
return True
return False
def _check_lsa_matrix(fname):
pat = re.compile("^LSA")
if len(re.findall(pat, fname)) > 0:
return True
return False
def _check_mandatory(fname):
for v in MANDATORY_FEATS:
pat = re.compile(v)
if len(re.findall(pat, fname)) > 0:
return True
return False
def _check_comment_out(fname):
for v in COMMENT_OUT_FEATS:
pat = re.compile(v)
if len(re.findall(pat, fname)) > 0:
return True
return False
header_pattern = """
# -*- coding: utf-8 -*-
\"\"\"
@author: Chenglong Chen <[email protected]>
@brief: one feature conf
Generated by
python %s -d %d -o %s
Format:
FEATURE_NAME : (MANDATORY, TRANSFORM)
\"\"\"
import config
from feature_transformer import SimpleTransform, ColumnSelector
LSA_COLUMNS = range(%d)
feature_dict = {
"""
def _create_feature_conf(lsa_columns, outfile):
res = header_pattern%(__file__, int(lsa_columns), outfile, int(lsa_columns))
folders = [config.FEAT_DIR, config.FEAT_DIR+"/All"]
for folder in folders:
try:
for file in sorted(os.listdir(folder)):
if config.FEAT_FILE_SUFFIX in file:
fname = file.split(".")[0]
if _check_include(fname):
line = ""
mandatory = _check_mandatory(fname)
if not mandatory and _check_comment_out(fname):
continue
line += "# "
line += "'%s' : "%fname
if mandatory:
line += "(True, "
else:
line += "(False, "
if _check_lsa_matrix(fname):
if int(lsa_columns) > 0:
line += "ColumnSelector(LSA_COLUMNS)),\n"
else:
continue
elif _check_count_feat(fname):
line += "SimpleTransform(config.COUNT_TRANSFORM)),\n"
else:
line += "SimpleTransform()),\n"
res += line
except:
pass
res += "}\n"
with open(os.path.join(config.FEAT_CONF_DIR, outfile), "w") as f:
f.write(res)
def parse_args(parser):
parser.add_option("-d", "--dim", default=1, type=int, dest="lsa_columns",
help="lsa_columns")
parser.add_option("-o", "--outfile", default="feature_conf_%s.py"%time_utils._timestamp(),
type="string", dest="outfile", help="outfile")
(options, args) = parser.parse_args()
return options, args
def main(options):
_create_feature_conf(lsa_columns=options.lsa_columns, outfile=options.outfile)
if __name__ == "__main__":
parser = OptionParser()
options, args = parse_args(parser)
main(options)
| mit | -3,370,424,729,518,298,600 | 23.569079 | 129 | 0.579863 | false |
gwax/nikola | nikola/plugins/misc/taxonomies_classifier.py | 1 | 18628 | # -*- coding: utf-8 -*-
# Copyright © 2012-2017 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Render the taxonomy overviews, classification pages and feeds."""
import blinker
import functools
import natsort
import os
import sys
from collections import defaultdict
from nikola.plugin_categories import SignalHandler
from nikola import utils, hierarchy_utils
class TaxonomiesClassifier(SignalHandler):
"""Classify posts and pages by taxonomies."""
name = "classify_taxonomies"
def _do_classification(self, site):
# Needed to avoid strange errors during tests
if site is not self.site:
return
# Get list of enabled taxonomy plugins and initialize data structures
taxonomies = site.taxonomy_plugins.values()
site.posts_per_classification = {}
for taxonomy in taxonomies:
site.posts_per_classification[taxonomy.classification_name] = {
lang: defaultdict(set) for lang in site.config['TRANSLATIONS'].keys()
}
# Classify posts
for post in site.timeline:
# Do classify pages, but don’t classify posts that are hidden
# (draft/private/future)
if post.is_post and not post.use_in_feeds:
continue
for taxonomy in taxonomies:
if taxonomy.apply_to_posts if post.is_post else taxonomy.apply_to_pages:
classifications = {}
for lang in site.config['TRANSLATIONS'].keys():
# Extract classifications for this language
classifications[lang] = taxonomy.classify(post, lang)
if not taxonomy.more_than_one_classifications_per_post and len(classifications[lang]) > 1:
raise ValueError("Too many {0} classifications for post {1}".format(taxonomy.classification_name, post.source_path))
# Add post to sets
for classification in classifications[lang]:
while True:
site.posts_per_classification[taxonomy.classification_name][lang][classification].add(post)
if not taxonomy.include_posts_from_subhierarchies or not taxonomy.has_hierarchy:
break
classification_path = taxonomy.extract_hierarchy(classification)
if len(classification_path) <= 1:
if len(classification_path) == 0 or not taxonomy.include_posts_into_hierarchy_root:
break
classification = taxonomy.recombine_classification_from_hierarchy(classification_path[:-1])
# Sort everything.
site.page_count_per_classification = {}
site.hierarchy_per_classification = {}
site.flat_hierarchy_per_classification = {}
site.hierarchy_lookup_per_classification = {}
for taxonomy in taxonomies:
site.page_count_per_classification[taxonomy.classification_name] = {}
# Sort post lists
for lang, posts_per_classification in site.posts_per_classification[taxonomy.classification_name].items():
# Ensure implicit classifications are inserted
for classification in taxonomy.get_implicit_classifications(lang):
if classification not in posts_per_classification:
posts_per_classification[classification] = []
site.page_count_per_classification[taxonomy.classification_name][lang] = {}
# Convert sets to lists and sort them
for classification in list(posts_per_classification.keys()):
posts = list(posts_per_classification[classification])
posts = self.site.sort_posts_chronologically(posts, lang)
taxonomy.sort_posts(posts, classification, lang)
posts_per_classification[classification] = posts
# Create hierarchy information
if taxonomy.has_hierarchy:
site.hierarchy_per_classification[taxonomy.classification_name] = {}
site.flat_hierarchy_per_classification[taxonomy.classification_name] = {}
site.hierarchy_lookup_per_classification[taxonomy.classification_name] = {}
for lang, posts_per_classification in site.posts_per_classification[taxonomy.classification_name].items():
# Compose hierarchy
hierarchy = {}
for classification in posts_per_classification.keys():
hier = taxonomy.extract_hierarchy(classification)
node = hierarchy
for he in hier:
if he not in node:
node[he] = {}
node = node[he]
hierarchy_lookup = {}
def create_hierarchy(hierarchy, parent=None, level=0):
"""Create hierarchy."""
result = {}
for name, children in hierarchy.items():
node = hierarchy_utils.TreeNode(name, parent)
node.children = create_hierarchy(children, node, level + 1)
node.classification_path = [pn.name for pn in node.get_path()]
node.classification_name = taxonomy.recombine_classification_from_hierarchy(node.classification_path)
hierarchy_lookup[node.classification_name] = node
result[node.name] = node
classifications = natsort.natsorted(result.keys(), alg=natsort.ns.F | natsort.ns.IC)
taxonomy.sort_classifications(classifications, lang, level=level)
return [result[classification] for classification in classifications]
root_list = create_hierarchy(hierarchy)
if '' in posts_per_classification:
node = hierarchy_utils.TreeNode('', parent=None)
node.children = root_list
node.classification_path = []
node.classification_name = ''
hierarchy_lookup[node.name] = node
root_list = [node]
flat_hierarchy = hierarchy_utils.flatten_tree_structure(root_list)
# Store result
site.hierarchy_per_classification[taxonomy.classification_name][lang] = root_list
site.flat_hierarchy_per_classification[taxonomy.classification_name][lang] = flat_hierarchy
site.hierarchy_lookup_per_classification[taxonomy.classification_name][lang] = hierarchy_lookup
taxonomy.postprocess_posts_per_classification(site.posts_per_classification[taxonomy.classification_name],
site.flat_hierarchy_per_classification[taxonomy.classification_name],
site.hierarchy_lookup_per_classification[taxonomy.classification_name])
else:
taxonomy.postprocess_posts_per_classification(site.posts_per_classification[taxonomy.classification_name])
# Check for valid paths and for collisions
taxonomy_outputs = {lang: dict() for lang in site.config['TRANSLATIONS'].keys()}
quit = False
for taxonomy in taxonomies:
# Check for collisions (per language)
for lang in site.config['TRANSLATIONS'].keys():
if not taxonomy.is_enabled(lang):
continue
for tlang in site.config['TRANSLATIONS'].keys():
if lang != tlang and not taxonomy.also_create_classifications_from_other_languages:
continue
for classification, posts in site.posts_per_classification[taxonomy.classification_name][tlang].items():
# Obtain path as tuple
path = site.path_handlers[taxonomy.classification_name](classification, lang)
# Check that path is OK
for path_element in path:
if len(path_element) == 0:
utils.LOGGER.error("{0} {1} yields invalid path '{2}'!".format(taxonomy.classification_name.title(), classification, '/'.join(path)))
quit = True
# Combine path
path = os.path.join(*[os.path.normpath(p) for p in path if p != '.'])
# Determine collisions
if path in taxonomy_outputs[lang]:
other_classification_name, other_classification, other_posts = taxonomy_outputs[lang][path]
if other_classification_name == taxonomy.classification_name and other_classification == classification:
taxonomy_outputs[lang][path][2].extend(posts)
else:
utils.LOGGER.error('You have classifications that are too similar: {0} "{1}" and {2} "{3}" both result in output path {4} for language {5}.'.format(
taxonomy.classification_name, classification, other_classification_name, other_classification, path, lang))
utils.LOGGER.error('{0} {1} is used in: {2}'.format(
taxonomy.classification_name.title(), classification, ', '.join(sorted([p.source_path for p in posts]))))
utils.LOGGER.error('{0} {1} is used in: {2}'.format(
other_classification_name.title(), other_classification, ', '.join(sorted([p.source_path for p in other_posts]))))
quit = True
else:
taxonomy_outputs[lang][path] = (taxonomy.classification_name, classification, list(posts))
if quit:
sys.exit(1)
blinker.signal('taxonomies_classified').send(site)
def _get_filtered_list(self, taxonomy, classification, lang):
"""Return the filtered list of posts for this classification and language."""
post_list = self.site.posts_per_classification[taxonomy.classification_name][lang].get(classification, [])
if self.site.config["SHOW_UNTRANSLATED_POSTS"]:
return post_list
else:
return [x for x in post_list if x.is_translation_available(lang)]
@staticmethod
def _compute_number_of_pages(filtered_posts, posts_count):
"""Given a list of posts and the maximal number of posts per page, computes the number of pages needed."""
return min(1, (len(filtered_posts) + posts_count - 1) // posts_count)
def _postprocess_path(self, path, lang, append_index='auto', dest_type='page', page_info=None, alternative_path=False):
"""Postprocess a generated path.
Takes the path `path` for language `lang`, and postprocesses it.
It appends `site.config['INDEX_FILE']` depending on `append_index`
(which can have the values `'always'`, `'never'` and `'auto'`) and
`site.config['PRETTY_URLS']`.
It also modifies/adds the extension of the last path element resp.
`site.config['INDEX_FILE']` depending on `dest_type`, which can be
`'feed'`, `'rss'` or `'page'`.
If `dest_type` is `'page'`, `page_info` can be `None` or a tuple
of two integers: the page number and the number of pages. This will
be used to append the correct page number by calling
`utils.adjust_name_for_index_path_list` and
`utils.get_displayed_page_number`.
If `alternative_path` is set to `True`, `utils.adjust_name_for_index_path_list`
is called with `force_addition=True`, resulting in an alternative path for the
first page of an index or Atom feed by including the page number into the path.
"""
# Forcing extension for Atom feeds and RSS feeds
force_extension = None
if dest_type == 'feed':
force_extension = '.atom'
elif dest_type == 'rss':
force_extension = '.xml'
# Determine how to extend path
path = [_f for _f in path if _f]
if force_extension is not None:
if len(path) == 0 and dest_type == 'rss':
path = ['rss']
elif len(path) == 0 or append_index == 'always':
path = path + [os.path.splitext(self.site.config['INDEX_FILE'])[0]]
elif len(path) > 0 and append_index == 'never':
path[-1] = os.path.splitext(path[-1])[0]
path[-1] += force_extension
elif (self.site.config['PRETTY_URLS'] and append_index != 'never') or len(path) == 0 or append_index == 'always':
path = path + [self.site.config['INDEX_FILE']]
elif append_index != 'never':
path[-1] += '.html'
# Create path
result = [_f for _f in [self.site.config['TRANSLATIONS'][lang]] + path if _f]
if page_info is not None and dest_type in ('page', 'feed'):
result = utils.adjust_name_for_index_path_list(result,
page_info[0],
utils.get_displayed_page_number(page_info[0], page_info[1], self.site),
lang,
self.site, force_addition=alternative_path, extension=force_extension)
return result
@staticmethod
def _parse_path_result(result):
"""Interpret the return values of taxonomy.get_path() and taxonomy.get_overview_path() as if all three return values were given."""
if not isinstance(result[0], (list, tuple)):
# The result must be a list or tuple of strings. Wrap into a tuple
result = (result, )
path = result[0]
append_index = result[1] if len(result) > 1 else 'auto'
page_info = result[2] if len(result) > 2 else None
return path, append_index, page_info
def _taxonomy_index_path(self, name, lang, taxonomy):
"""Return path to the classification overview."""
result = taxonomy.get_overview_path(lang)
path, append_index, _ = self._parse_path_result(result)
return self._postprocess_path(path, lang, append_index=append_index, dest_type='list')
def _taxonomy_path(self, name, lang, taxonomy, dest_type='page', page=None, alternative_path=False):
"""Return path to a classification."""
if taxonomy.has_hierarchy:
result = taxonomy.get_path(taxonomy.extract_hierarchy(name), lang, dest_type=dest_type)
else:
result = taxonomy.get_path(name, lang, dest_type=dest_type)
path, append_index, page_ = self._parse_path_result(result)
if page is not None:
page = int(page)
else:
page = page_
page_info = None
if taxonomy.show_list_as_index and page is not None:
number_of_pages = self.site.page_count_per_classification[taxonomy.classification_name][lang].get(name)
if number_of_pages is None:
number_of_pages = self._compute_number_of_pages(self._get_filtered_list(taxonomy, name, lang), self.site.config['INDEX_DISPLAY_POST_COUNT'])
self.site.page_count_per_classification[taxonomy.classification_name][lang][name] = number_of_pages
page_info = (page, number_of_pages)
return self._postprocess_path(path, lang, append_index=append_index, dest_type=dest_type, page_info=page_info)
def _taxonomy_atom_path(self, name, lang, taxonomy, page=None, alternative_path=False):
"""Return path to a classification Atom feed."""
return self._taxonomy_path(name, lang, taxonomy, dest_type='feed', page=page, alternative_path=alternative_path)
def _taxonomy_rss_path(self, name, lang, taxonomy):
"""Return path to a classification RSS feed."""
return self._taxonomy_path(name, lang, taxonomy, dest_type='rss')
def _register_path_handlers(self, taxonomy):
functions = (
('{0}_index', self._taxonomy_index_path),
('{0}', self._taxonomy_path),
('{0}_atom', self._taxonomy_atom_path),
('{0}_rss', self._taxonomy_rss_path),
)
for name, function in functions:
name = name.format(taxonomy.classification_name)
p = functools.partial(function, taxonomy=taxonomy)
doc = taxonomy.path_handler_docstrings[name]
if doc is not False:
p.__doc__ = doc
self.site.register_path_handler(name, p)
def set_site(self, site):
"""Set site, which is a Nikola instance."""
super(TaxonomiesClassifier, self).set_site(site)
# Add hook for after post scanning
blinker.signal("scanned").connect(self._do_classification)
# Register path handlers
for taxonomy in site.taxonomy_plugins.values():
self._register_path_handlers(taxonomy)
| mit | 169,274,457,148,989,400 | 55.268882 | 180 | 0.586899 | false |
tutorcruncher/morpheus | tests/test_user_display.py | 1 | 24349 | import hashlib
import hmac
import json
import re
import uuid
from arq.utils import to_unix_ms
from buildpg import MultipleValues, Values
from datetime import date, datetime, timedelta, timezone
from operator import itemgetter
from pytest_toolbox.comparison import RegexStr
from urllib.parse import urlencode
from morpheus.app.models import MessageStatus
from morpheus.app.worker import update_aggregation_view
def modify_url(url, settings, company='foobar'):
args = dict(company=company, expires=to_unix_ms(datetime(2032, 1, 1)))
body = '{company}:{expires}'.format(**args).encode()
args['signature'] = hmac.new(settings.user_auth_key, body, hashlib.sha256).hexdigest()
return str(url) + ('&' if '?' in str(url) else '?') + urlencode(args)
async def test_user_list(cli, settings, send_email, db_conn):
expected_msg_ids = []
for i in range(4):
uid = str(uuid.uuid4())
await send_email(uid=uid, company_code='whoever', recipients=[{'address': f'{i}@t.com'}])
expected_msg_ids.append(f'{uid}-{i}tcom')
await send_email(uid=str(uuid.uuid4()), company_code='different1')
await send_email(uid=str(uuid.uuid4()), company_code='different2')
r = await cli.get(modify_url('/user/email-test/messages.json', settings, 'whoever'))
assert r.status == 200, await r.text()
assert r.headers['Access-Control-Allow-Origin'] == '*'
data = await r.json()
# debug(data)
assert data['count'] == 4
msg_ids = [h['external_id'] for h in data['items']]
assert msg_ids == list(reversed(expected_msg_ids))
first_item = data['items'][0]
assert first_item == {
'id': await db_conn.fetchval('select id from messages where external_id=$1', expected_msg_ids[3]),
'external_id': expected_msg_ids[3],
'send_ts': RegexStr(r'\d{4}-\d{2}-\d{2}.*'),
'update_ts': RegexStr(r'\d{4}-\d{2}-\d{2}.*'),
'status': 'send',
'to_first_name': None,
'to_last_name': None,
'to_user_link': None,
'to_address': '[email protected]',
'company_id': await db_conn.fetchval('select id from companies where code=$1', 'whoever'),
'method': 'email-test',
'subject': 'test message',
'tags': [expected_msg_ids[3][:-6]],
'from_name': 'Sender Name',
'cost': None,
'extra': None,
}
r = await cli.get(modify_url('/user/email-test/messages.json', settings, '__all__'))
assert r.status == 200, await r.text()
data = await r.json()
assert data['count'] == 6
r = await cli.get(modify_url('/user/email-test/messages.html', settings, '__all__'))
assert r.status == 200, await r.text()
text = await r.text()
assert '<caption>Results: <b>6</b></caption>' in text
assert text.count('.com</a>') == 6
async def test_user_list_sms(cli, settings, send_sms, db_conn):
await send_sms(company_code='testing')
r = await cli.get(modify_url('/user/sms-test/messages.json', settings, 'testing'))
assert r.status == 200, await r.text()
assert r.headers['Access-Control-Allow-Origin'] == '*'
data = await r.json()
assert data['count'] == 1
assert len(data['items']) == 1
assert data['items'][0]['body'] == 'this is a test apples'
async def test_user_search(cli, settings, send_email):
msgs = {}
for i, subject in enumerate(['apple', 'banana', 'cherry', 'durian']):
uid = str(uuid.uuid4())
await send_email(
uid=uid, company_code='whoever', recipients=[{'address': f'{i}@t.com'}], subject_template=subject
)
msgs[subject] = f'{uid}-{i}tcom'
await send_email(uid=str(uuid.uuid4()), company_code='different1', subject_template='eggplant')
r = await cli.get(modify_url('/user/email-test/messages.json?q=cherry', settings, 'whoever'))
assert r.status == 200, await r.text()
data = await r.json()
assert data['count'] == 1
item = data['items'][0]
# debug(item)
assert item['external_id'] == msgs['cherry']
assert item['subject'] == 'cherry'
r = await cli.get(modify_url('/user/email-test/messages.json?q=eggplant', settings, 'whoever'))
assert r.status == 200, await r.text()
data = await r.json()
# debug(data)
assert data['count'] == 0
async def test_user_search_space(cli, settings, send_email):
uid = str(uuid.uuid4())
await send_email(
uid=uid, company_code='testing', recipients=[{'address': '[email protected]'}], subject_template='foobar'
)
r = await cli.get(modify_url('/user/email-test/messages.json?q=foobar', settings, 'testing'))
assert r.status == 200, await r.text()
data = await r.json()
assert data['count'] == 1
r = await cli.get(modify_url('/user/email-test/messages.json?q=foo%20bar', settings, 'testing'))
assert r.status == 200, await r.text()
data = await r.json()
assert data['count'] == 0
async def test_user_list_lots_query_test(cli, settings, send_email):
for i in range(110):
await send_email(
uid=str(uuid.uuid4()),
company_code='testing',
recipients=[{'address': f'{i}@t.com'}],
subject_template='foobar',
)
for i in range(20):
await send_email(
uid=str(uuid.uuid4()),
company_code='testing',
recipients=[{'address': f'{i}@t.com'}],
subject_template='barfoo',
)
r = await cli.get(modify_url('/user/email-test/messages.html', settings, 'testing'))
assert r.status == 200, await r.text()
assert r.headers['Access-Control-Allow-Origin'] == '*'
text = await r.text()
m = re.search(r'<caption>Results: <b>(\d+)</b></caption>', text)
results = int(m.groups()[0])
assert results == 130
assert '1 - 100' not in text
assert f'101 - {min(results, 150)}' in text
assert 'href="?from=100"' in text
url = modify_url('/user/email-test/messages.html', settings, 'testing')
r = await cli.get(url + '&q=foobar&from=100')
assert r.status == 200, await r.text()
text = await r.text()
m = re.search(r'<caption>Results: <b>(\d+)</b></caption>', text)
results = int(m.groups()[0])
assert results == 10
assert '1 - 100' in text
assert f'101 - {min(results, 150)}' not in text
assert 'href="?q=foobar&from=0"' in text
async def test_user_aggregate(cli, settings, send_email, db_conn):
for i in range(4):
await send_email(uid=str(uuid.uuid4()), company_code='user-aggs', recipients=[{'address': f'{i}@t.com'}])
msg_id = await send_email(uid=str(uuid.uuid4()), company_code='user-aggs', recipients=[{'address': f'{i}@t.com'}])
data = {'ts': int(2e10), 'event': 'open', '_id': msg_id, 'user_agent': 'testincalls'}
await cli.post('/webhook/test/', json=data)
await send_email(uid=str(uuid.uuid4()), company_code='different')
await update_aggregation_view({'pg': db_conn})
r = await cli.get(modify_url('/user/email-test/aggregation.json', settings, 'user-aggs'))
assert r.status == 200, await r.text()
assert r.headers['Access-Control-Allow-Origin'] == '*'
data = await r.json()
histogram = data.pop('histogram')
assert data == {
'all_90_day': 5,
'open_90_day': 1,
'all_7_day': 5,
'open_7_day': 1,
'all_28_day': 5,
'open_28_day': 1,
}
assert sorted(histogram, key=itemgetter('count')) == [
{'count': 1, 'day': f'{date.today():%Y-%m-%d}', 'status': 'open'},
{'count': 4, 'day': f'{date.today():%Y-%m-%d}', 'status': 'send'},
]
r = await cli.get(modify_url('/user/email-test/aggregation.json', settings, '__all__'))
assert r.status == 200, await r.text()
data = await r.json()
assert sum(v['count'] for v in data['histogram']) == 6
async def test_user_aggregate_no_data(cli, settings, db_conn):
await db_conn.execute('insert into companies (code) values ($1)', 'testing')
r = await cli.get(modify_url('/user/email-test/aggregation.json', settings, 'testing'))
assert r.status == 200, await r.text()
data = await r.json()
assert data == {
'histogram': [],
'all_90_day': 0,
'open_90_day': 0,
'all_7_day': 0,
'open_7_day': 0,
'all_28_day': 0,
'open_28_day': 0,
}
async def test_user_tags(cli, settings, send_email):
uid1 = str(uuid.uuid4())
await send_email(
uid=uid1,
company_code='tagtest',
tags=['trigger:broadcast', 'broadcast:123'],
recipients=[
{'address': '[email protected]', 'tags': ['user:1', 'shoesize:10']},
{'address': '[email protected]', 'tags': ['user:2', 'shoesize:8']},
],
)
uid2 = str(uuid.uuid4())
await send_email(
uid=uid2,
company_code='tagtest',
tags=['trigger:other'],
recipients=[
{'address': '[email protected]', 'tags': ['user:3', 'shoesize:10']},
{'address': '[email protected]', 'tags': ['user:4', 'shoesize:8']},
],
)
await send_email(uid=str(uuid.uuid4()), company_code='different1')
await send_email(uid=str(uuid.uuid4()), company_code='different2')
url = cli.server.app.router['user-messages'].url_for(method='email-test').with_query([('tags', 'broadcast:123')])
r = await cli.get(modify_url(url, settings, 'tagtest'))
assert r.status == 200, await r.text()
data = await r.json()
assert data['count'] == 2, json.dumps(data, indent=2)
assert {h['external_id'] for h in data['items']} == {f'{uid1}-1tcom', f'{uid1}-2tcom'}
url = cli.server.app.router['user-messages'].url_for(method='email-test').with_query([('tags', 'user:2')])
r = await cli.get(modify_url(url, settings, 'tagtest'))
assert r.status == 200, await r.text()
data = await r.json()
assert data['count'] == 1, json.dumps(data, indent=2)
assert data['items'][0]['external_id'] == f'{uid1}-2tcom'
query = [('tags', 'trigger:other'), ('tags', 'shoesize:8')]
url = cli.server.app.router['user-messages'].url_for(method='email-test').with_query(query)
r = await cli.get(modify_url(url, settings, 'tagtest'))
assert r.status == 200, await r.text()
data = await r.json()
# debug(data)
assert data['count'] == 1
assert data['items'][0]['external_id'] == f'{uid2}-4tcom'
async def test_message_details(cli, settings, send_email, db_conn, worker):
msg_ext_id = await send_email(company_code='test-details')
data = {'ts': int(1e10), 'event': 'open', '_id': msg_ext_id, 'user_agent': 'testincalls'}
r = await cli.post('/webhook/test/', json=data)
assert r.status == 200, await r.text()
assert await worker.run_check() == 2
message_id = await db_conn.fetchval('select id from messages where external_id=$1', msg_ext_id)
r = await cli.get(modify_url(f'/user/email-test/message/{message_id}.html', settings, 'test-details'))
text = await r.text()
assert r.status == 200, text
assert r.headers['Access-Control-Allow-Origin'] == '*'
spaceless = re.sub('\n +', '\n', text)
assert '<label>Subject:</label>\n<span>test message</span>' in spaceless
assert '<label>To:</label>\n<span><[email protected]></span>' in spaceless
assert 'Open •' in text
assert '"user_agent": "testincalls",' in text
assert text.count('<span class="datetime">') == 3
async def test_message_details_link(cli, settings, send_email, db_conn, worker):
msg_ext_id = await send_email(
company_code='test-details',
recipients=[
{
'first_name': 'Foo',
'last_name': 'Bar',
'user_link': '/whatever/123/',
'address': '[email protected]',
'pdf_attachments': [
{'name': 'testing.pdf', 'html': '<h1>testing</h1>', 'id': 123},
{'name': 'different.pdf', 'html': '<h1>different</h1>'},
],
}
],
)
data = {'ts': int(2e12), 'event': 'open', '_id': msg_ext_id, 'user_agent': 'testincalls'}
r = await cli.post('/webhook/test/', json=data)
assert r.status == 200, await r.text()
assert await worker.run_check() == 2
message_id = await db_conn.fetchval('select id from messages where external_id=$1', msg_ext_id)
url = modify_url(f'/user/email-test/message/{message_id}.html', settings, 'test-details')
r = await cli.get(url)
assert r.status == 200, await r.text()
text = await r.text()
assert '<span><a href="/whatever/123/">Foo Bar <[email protected]></a></span>' in text
assert '<a href="/attachment-doc/123/">testing.pdf</a>' in text
assert '<a href="#">different.pdf</a>' in text
d = re.search('Open • .+', text).group()
assert 'Open • <span class="datetime">2033-05-18T03:33:20+00</span>' == d, text
assert 'extra values not shown' not in text
r = await cli.get(url + '&' + urlencode({'dttz': 'Europe/London'}))
assert r.status == 200, await r.text()
text = await r.text()
d = re.search('Open • .+', text).group()
assert 'Open • <span class="datetime">2033-05-18T04:33:20+01</span>' == d, text
r = await cli.get(url + '&' + urlencode({'dttz': 'snap'}))
assert r.status == 400, await r.text()
assert r.headers.get('Access-Control-Allow-Origin') == '*'
assert {'message': 'unknown timezone: "snap"'} == await r.json()
async def test_no_event_data(cli, settings, send_email, db_conn):
msg_ext_id = await send_email(
company_code='test-details', recipients=[{'first_name': 'Foo', 'address': '[email protected]'}]
)
message_id = await db_conn.fetchval('select id from messages where external_id=$1', msg_ext_id)
await db_conn.execute_b(
'insert into events (:values__names) values :values',
values=MultipleValues(
*[
Values(
ts=(datetime(2032, 6, 1) + timedelta(days=i, hours=i * 2)).replace(tzinfo=timezone.utc),
message_id=message_id,
status=MessageStatus.send,
)
for i in range(3)
]
),
)
message_id = await db_conn.fetchval('select id from messages where external_id=$1', msg_ext_id)
r = await cli.get(modify_url(f'/user/email-test/message/{message_id}.html', settings, 'test-details'))
assert '<div class="events" id="morpheus-accordion">\n' in await r.text()
async def test_single_item_events(cli, settings, send_email, db_conn):
msg_ext_id = await send_email(
company_code='test-details', recipients=[{'first_name': 'Foo', 'address': '[email protected]'}]
)
message_id = await db_conn.fetchval('select id from messages where external_id=$1', msg_ext_id)
await db_conn.execute_b(
'insert into events (:values__names) values :values',
values=MultipleValues(
*[
Values(
ts=(datetime(2032, 6, 1) + timedelta(days=i, hours=i * 2)).replace(tzinfo=timezone.utc),
message_id=message_id,
status=MessageStatus.send,
)
for i in range(3)
]
),
)
url = modify_url(f'/user/email-test/messages.json?message_id={message_id}', settings, 'test-details')
r = await cli.get(url)
assert r.status == 200, await r.text()
data = await r.json()
assert data['events'] == [
{'status': 'send', 'ts': '2032-06-01T00:00:00+00', 'extra': None},
{'status': 'send', 'ts': '2032-06-02T02:00:00+00', 'extra': None},
{'status': 'send', 'ts': '2032-06-03T04:00:00+00', 'extra': None},
]
async def test_invalid_message_id(cli, settings):
url = modify_url('/user/email-test/messages.json?message_id=foobar', settings, 'test-details')
r = await cli.get(url)
assert r.status == 400, await r.text()
data = await r.json()
assert data == {'message': "invalid get argument 'message_id': 'foobar'"}
async def test_many_events(cli, settings, send_email, db_conn):
msg_ext_id = await send_email(
company_code='test-details', recipients=[{'first_name': 'Foo', 'address': '[email protected]'}]
)
message_id = await db_conn.fetchval('select id from messages where external_id=$1', msg_ext_id)
await db_conn.execute_b(
'insert into events (:values__names) values :values',
values=MultipleValues(
*[
Values(
ts=(datetime(2032, 6, 1) + timedelta(days=i)).replace(tzinfo=timezone.utc),
message_id=message_id,
status=MessageStatus.send,
extra=json.dumps({'foo': 'bar', 'v': i}),
)
for i in range(55)
]
),
)
url = modify_url(f'/user/email-test/message/{message_id}.html', settings, 'test-details')
r = await cli.get(url)
assert r.status == 200, await r.text()
text = await r.text()
assert text.count('#morpheus-accordion') == 51
assert 'Send • <span class="datetime">2032-06-16T00:00:00+00</span>\n' in text, text
assert '5 more • ...' in text
async def test_message_details_missing(cli, settings):
r = await cli.get(modify_url('/user/email-test/message/123.html', settings, 'test-details'))
assert r.status == 404, await r.text()
assert {'message': 'message not found'} == await r.json()
async def test_message_preview(cli, settings, send_email, db_conn):
msg_ext_id = await send_email(company_code='preview')
message_id = await db_conn.fetchval('select id from messages where external_id=$1', msg_ext_id)
r = await cli.get(modify_url(f'/user/email-test/{message_id}/preview/', settings, 'preview'))
assert r.status == 200, await r.text()
assert '<body>\nthis is a test\n</body>' == await r.text()
async def test_message_preview_disable_links(cli, send_email, settings, db_conn):
msg_ext_id = await send_email(
company_code='preview',
context={
'message__render': (
'Hi, <a href="https://lp.example.com/">\n<span class="class">Look at '
'this link that needs removed</span></a>'
),
'unsubscribe_link': 'http://example.org/unsub',
},
recipients=[{'address': '[email protected]'}],
)
message_id = await db_conn.fetchval('select id from messages where external_id=$1', msg_ext_id)
r = await cli.get(modify_url(f'/user/email-test/{message_id}/preview/', settings, 'preview'))
assert r.status == 200, await r.text()
msg = await r.text()
assert '<p>Hi, <a href="#"><br>\n<span class="class">Look at this link that needs removed</span></a></p>' in msg
async def test_message_preview_disable_links_md(send_email, settings, cli, db_conn):
msg_ext_id = await send_email(
company_code='preview',
main_template='testing {{{ foobar }}}',
context={
'message__render': 'test email {{ unsubscribe_link }}.\n',
'foobar__md': '[hello](www.example.org/hello)',
},
)
message_id = await db_conn.fetchval('select id from messages where external_id=$1', msg_ext_id)
r = await cli.get(modify_url(f'/user/email-test/{message_id}/preview/', settings, 'preview'))
assert r.status == 200, await r.text()
assert 'testing <p><a href="#">hello</a></p>\n' == await r.text()
async def test_user_sms(cli, settings, send_sms, db_conn):
await send_sms(company_code='snapcrap')
await send_sms(uid=str(uuid.uuid4()), company_code='flip')
r = await cli.get(modify_url('/user/sms-test/messages.json', settings, 'snapcrap'))
assert r.status == 200, await r.text()
data = await r.json()
assert data['count'] == 1
item = data['items'][0]
assert item['method'] == 'sms-test'
assert item['company_id'] == await db_conn.fetchval('select id from companies where code=$1', 'snapcrap')
assert item['status'] == 'send'
assert item['from_name'] == 'FooBar'
assert item['cost'] == 0.012
assert 'events' not in item
assert json.loads(item['extra']) == {'length': 21, 'parts': 1}
assert data['spend'] == 0.012
r = await cli.get(modify_url('/user/sms-test/messages.json', settings, '__all__'))
assert r.status == 200, await r.text()
data = await r.json()
assert data['count'] == 2
r = await cli.get(modify_url('/user/sms-test/messages.html', settings, 'snapcrap'))
assert r.status == 200, await r.text()
text = await r.text()
assert '<caption>Total spend this month: <b>£0.012</b><span id="extra-spend-info"></span></caption>' in text, text
async def test_user_sms_preview(cli, settings, send_sms, db_conn):
msg_ext_id = await send_sms(company_code='smspreview', main_template='this is a test {{ variable }} ' * 10)
message_id = await db_conn.fetchval('select id from messages where external_id=$1', msg_ext_id)
await send_sms(uid=str(uuid.uuid4()), company_code='flip')
r = await cli.get(modify_url(f'/user/sms-test/{message_id}/preview/', settings, 'smspreview'))
text = await r.text()
assert r.status == 200, text
assert '<span class="metadata">Length:</span>220' in text
assert '<span class="metadata">Multipart:</span>2 parts' in text
async def test_user_list_lots(cli, settings, send_email):
for i in range(110):
await send_email(uid=str(uuid.uuid4()), company_code='list-lots', recipients=[{'address': f'{i}@t.com'}])
r = await cli.get(modify_url('/user/email-test/messages.html', settings, '__all__'))
assert r.status == 200, await r.text()
assert r.headers['Access-Control-Allow-Origin'] == '*'
text = await r.text()
m = re.search(r'<caption>Results: <b>(\d+)</b></caption>', text)
results = int(m.groups()[0])
assert results >= 110
assert '1 - 100' not in text
assert f'101 - {min(results, 150)}' in text
url = modify_url('/user/email-test/messages.html', settings, '__all__')
r = await cli.get(url + '&from=100')
assert r.status == 200, await r.text()
text = await r.text()
assert '1 - 100' in text
assert f'101 - {min(results, 150)}' not in text
async def test_valid_signature(cli, settings, db_conn):
await db_conn.execute('insert into companies (code) values ($1)', 'whatever')
args = dict(company='whatever', expires=to_unix_ms(datetime(2032, 1, 1)))
body = '{company}:{expires}'.format(**args).encode()
args['signature'] = hmac.new(settings.user_auth_key, body, hashlib.sha256).hexdigest()
r = await cli.get('/user/email-test/messages.json?' + urlencode(args))
assert r.status == 200, await r.text()
assert r.headers['Access-Control-Allow-Origin'] == '*'
async def test_invalid_signature(cli, settings):
args = dict(company='whatever', expires=to_unix_ms(datetime(2032, 1, 1)))
body = '{company}:{expires}'.format(**args).encode()
args['signature'] = hmac.new(settings.user_auth_key, body, hashlib.sha256).hexdigest() + 'xxx'
r = await cli.get('/user/email-test/messages.json?' + urlencode(args))
assert r.status == 403, await r.text()
assert r.headers['Access-Control-Allow-Origin'] == '*'
assert {'message': 'Invalid token'} == await r.json()
async def test_invalid_expiry(cli, settings):
args = dict(company='whatever', expires='xxx')
body = '{company}:{expires}'.format(**args).encode()
args['signature'] = hmac.new(settings.user_auth_key, body, hashlib.sha256).hexdigest()
r = await cli.get('/user/email-test/messages.json?' + urlencode(args))
assert r.status == 400, await r.text()
assert r.headers['Access-Control-Allow-Origin'] == '*'
assert {
'message': 'Invalid Data',
'details': [{'loc': ['expires'], 'msg': 'invalid datetime format', 'type': 'value_error.datetime'}],
} == await r.json()
async def test_sig_expired(cli, settings):
args = dict(company='whatever', expires=to_unix_ms(datetime(2000, 1, 1)))
body = '{company}:{expires}'.format(**args).encode()
args['signature'] = hmac.new(settings.user_auth_key, body, hashlib.sha256).hexdigest()
r = await cli.get('/user/email-test/messages.json?' + urlencode(args))
assert r.status == 403, await r.text()
assert r.headers['Access-Control-Allow-Origin'] == '*'
assert {'message': 'token expired'} == await r.json()
| mit | -7,279,376,518,004,348,000 | 41.051813 | 118 | 0.602555 | false |
glennhickey/vg2sg | data/fetchRegion.py | 1 | 17655 | #!/usr/bin/env python2.7
#
# FROM https://raw.githubusercontent.com/adamnovak/sequence-graphs/master/scripts/fetchRegion.py
#
"""
fetchRegion.py: Fetch the sequence data, GRC alignments, and gene sets for a GRC
region (like "LRC_KIR" or "MHC") by name.
"""
import argparse, sys, os, os.path, random, subprocess, shutil, itertools
import collections, urllib2, shutil, subprocess, glob, doctest
import tsv
from Bio import AlignIO, SeqIO, Align, Entrez
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
def parse_args(args):
"""
Takes in the command-line arguments list (args), and returns a nice argparse
result with fields for all the options.
Borrows heavily from the argparse documentation examples:
<http://docs.python.org/library/argparse.html>
"""
# Construct the parser (which is stored in parser)
# Module docstring lives in __doc__
# See http://python-forum.com/pythonforum/viewtopic.php?f=3&t=36847
# And a formatter class so our examples in the docstring look good. Isn't it
# convenient how we already wrapped it to 80 characters?
# See http://docs.python.org/library/argparse.html#formatter-class
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
# General options
parser.add_argument("region",
help="name of the region to download, and the output directory")
parser.add_argument("--assembly_url",
default=("ftp://ftp.ncbi.nlm.nih.gov/genomes/all/"
"GCA_000001405.17_GRCh38.p2/"
"GCA_000001405.17_GRCh38.p2_assembly_structure"),
help="URL for the assembly, containing genomic_region_definitions.txt")
parser.add_argument("--email", default="[email protected]",
help="E-mail address to report to Entrez")
# The command line arguments start with the program name, which we don't
# want to treat as an argument for argparse. So we remove it.
args = args[1:]
return parser.parse_args(args)
def url_open_tsv(url):
"""
Open a TSV URL and loop through the lines as lists.
"""
try:
reader = tsv.TsvReader(urllib2.urlopen(url))
except urllib2.URLError as err:
print("Could not open " + url)
raise err
return reader
def get_region_info(region_name, assembly_root):
"""
Go download the genomic_region_definitions.txt from the specified assembly,
and return the (contig, start, end) of the named region.
"""
# Open the region definitions
for parts in url_open_tsv(assembly_root + "/genomic_regions_definitions.txt"):
# For every region in the list
if parts[0] == region_name:
# We found it. Parse out contig, start, end.
# Contig is like "CM000663.2" and not all that useful...
return (parts[1], int(parts[2]), int(parts[3]))
# If we get here, there's no region by that name.
raise RuntimeError("No region named " + region_name)
def get_region_sequences(region_name, assembly_root):
"""
Given the name of a region and the root URL for the assembly, yield all the
alt locus sequence names (Genbank IDs) and assembly unit names
(ALT_REF_LOCI_###) that are in the region.
"""
# Open the alt locus placement file
for parts in url_open_tsv(assembly_root + "/all_alt_scaffold_placement.txt"):
# For every alt locus...
if parts[7] == region_name:
# We found one in the correct region (which happens to be column 7)
# Give its sequence ID/accession with version (column 3) and the
# assembly unit name
yield parts[3], parts[0]
def get_record_by_grc(grc_id):
"""
Given a GRC ID, return the Entrez nucleotide DocSum record.
"""
# First just search the ID as a search term.
search_results = Entrez.read(Entrez.esearch("nucleotide", term=grc_id))
if len(search_results["IdList"]) > 1:
# We should only get one result. If we have many, we might be looking at
# the wrong one.
print(search_results)
raise RuntimeError("Too many results!")
# Grab the handle thingy for the first search result
first_handle = Entrez.read(Entrez.epost("nucleotide",
id=search_results["IdList"][0]))
# Actually download that record
record = Entrez.read(Entrez.esummary(db="nucleotide",
webenv=first_handle["WebEnv"], query_key=first_handle["QueryKey"]))[0]
# Return it
return record
def get_ucsc_name(grc_id, alt_parent_grc_id=None):
"""
Given a GRC-style (genbank) ID with version, like "CM000663.2" or
"GL383549.1" or "KI270832.1", get the UCSC name for that sequence, like
"chr6_GL000252v2_alt".
If the sequence is an alt, the GRC id of its parent chromosome must be
specified.
"""
if alt_parent_grc_id is None:
# Simple case; it's a primary chromosome.
# Fetch the record
record = get_record_by_grc(grc_id)
# Parse out all the "extra" fields
extra_parts = record["Extra"].split("|")
# Find the "gnl" key
gnl_index = extra_parts.index("gnl")
# The chromosome number/letter is two fields later.
chromosome_character = extra_parts[gnl_index + 2]
# Make it chrThat
ucsc_name = "chr{}".format(chromosome_character)
else:
# We do have a parent. Get its UCSC name.
parent_name = get_ucsc_name(alt_parent_grc_id)
# Convert from .2 or whatever to v2 or whatever
name_middle = grc_id.replace(".", "v")
# Put them in the name pattern template to generate the name
ucsc_name = "{}_{}_alt".format(parent_name, name_middle)
# Report the result
print("{} is {} at UCSC".format(grc_id, ucsc_name))
return ucsc_name
def get_gi_number(grc_id):
"""
Given a GRC-style (genbank) ID with version, like "CM000663.2" or
"GL383549.1" or "KI270832.1", get the GI number associated with that ID from
Entrez.
"""
# Go fetch the record
record = get_record_by_grc(grc_id)
print("{} = {}".format(grc_id, record["Gi"]))
# Return the GI number. TODO: should this be the ID instead because of how
# we use it next? Are they ever different?
return record["Gi"]
def get_length(gi_id):
"""
Get the length of a sequence given its numerical GI number.
"""
# Grab the handle thingy for the record with this ID
handle = Entrez.read(Entrez.epost("nucleotide", id=str(gi_id)))
# Actually download that record
record = Entrez.read(Entrez.esummary(db="nucleotide",
webenv=handle["WebEnv"], query_key=handle["QueryKey"]))[0]
# Return the length of the sequence
return record["Length"]
def get_sequence(gi_id, start=None, end=None):
"""
Get a sequence by numerical GI number, optionally with start and end
parameters (in 1-based coordinates from the left). If start is
specified, end must also be specified.
"""
if start is None:
# Go fetch the whole record. We need to make the ID a str or the API
# client freaks out.
fetch_handle = Entrez.efetch(db="nucleotide", id=str(gi_id),
rettype="fasta")
else:
# Just fetch part of it
fetch_handle = Entrez.efetch(db="nucleotide", id=str(gi_id),
rettype="fasta", seq_start=start, seq_end=end)
# Load up the FASTA record
record = SeqIO.read(fetch_handle, "fasta")
# Change the record FASTA ID to just GIwhatever
record.id = "GI{}".format(gi_id)
# Return the fixed-up record
return record
def download_gff3(ref_acc, alt_acc, alt_unit, assembly_root, out_filename):
"""
Download the GFF3 alignment between the given reference accession and the
given alt accession (in the given assembly unit), from the given assembly
root URL, and save it to the given output filename.
"""
# Figure out what we want to download
gff3_url = "{}/{}/alt_scaffolds/alignments/{}_{}.gff".format(assembly_root,
alt_unit, alt_acc, ref_acc)
# Open the URL to read
in_stream = urllib2.urlopen(gff3_url)
with open(out_filename, "w") as out_stream:
# Copy everything to the output file as in
# <http://stackoverflow.com/a/5397438/402891>
shutil.copyfileobj(in_stream, out_stream)
def get_genes(grc_id, out_name, start=1, end=None, alt_parent_grc_id=None,
db="hg38"):
"""
Given a GRC ID (like "CM000663.2"), the name of the contig on which to
report the genes, optional start and end coordinates (1-based) and the GRC
ID of the parent chromosome if it is an alt, yield BED lines for all the
genes in the specified region.
If start is specified, coordinates will be given relative to that position.
Assumes "hgsql" is installed and configured and available on the PATH.
Uses the hg38 database unless told otherwise.
All inputs must be trusted and not permitted to contain SQL injection.
"""
# Convert to 0-based not-end-inclusive coordinates.
start -= 1
# Get the name to look up in the database.
query_contig = get_ucsc_name(grc_id, alt_parent_grc_id)
# Spec out the query. TODO: Can I not say the database name constantly?
query_parts = ["SELECT \"", out_name, "\", ", db, ".knownGene.txStart - ",
start, ", ", db, ".knownGene.txEnd - ", start, ", ", db,
".kgXref.geneSymbol, 0, ", db, ".knownGene.strand FROM ", db,
".knownGene LEFT OUTER JOIN ", db, ".kgXref ON ", db,
".knownGene.name = ", db, ".kgXref.kgID WHERE ", db,
".knownGene.txStart != ", db, ".knownGene.cdsStart AND ", db,
".knownGene.chrom = \"", query_contig, "\" AND ", db,
".knownGene.txStart >= ", start]
if end is not None:
# Require the end criterion to be met too.
query_parts += [" AND ", db, ".knownGene.txEnd < ", end]
# Finish off the query.
query_parts.append(";")
# Put together the whole query.
query = "".join([str(part) for part in query_parts])
# Build the hgsql command line
args = ["hgsql", "-e", query]
# Open the process
process = subprocess.Popen(args, stdout=subprocess.PIPE)
for line in itertools.islice(process.stdout, 1, None):
# For all lines except the first, yield them because they are BED lines.
yield line
if process.wait() != 0:
raise RuntimeError("hgsql")
# We are done with this process.
process.stdout.close()
def open_gene_bed(region, sequence_id):
"""
Given the region name and the sequence ID ("ref" or "GI<whatever>") for a
sequence, give back an output file object to which a BED of the genes in
that sequence may be written.
"""
# Each bed goes in a folder named after its sequence, so hal2assemblyHub can
# use it.
bed_dir = "{}/genes/{}".format(region, sequence_id)
if not os.path.exists(bed_dir):
# Make sure we have a place to put the genes
os.makedirs(bed_dir)
# Open a bed file in there for writing and return it.
return open(bed_dir + "/genes.bed", "w")
def main(args):
"""
Parses command line arguments and do the work of the program.
"args" specifies the program arguments, with args[0] being the executable
name. The return value should be used as the program's exit code.
"""
if len(args) == 2 and args[1] == "--test":
# Run the tests
return doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
options = parse_args(args) # This holds the nicely-parsed options object
# Set Entrez e-mail
Entrez.email = options.email
# Go get the region of the reference we're talking about. Starts and ends
# are 1-based.
ref_acc, ref_start, ref_end = get_region_info(options.region,
options.assembly_url)
# Make our output directory
if not os.path.exists(options.region):
os.makedirs(options.region)
# We're going to write a chrom.sizes file with accessions (not the GI
# numbers) for the gff3->psl conversion step
acc_chrom_sizes = tsv.TsvWriter(open(options.region + "/acc.chrom.sizes",
"w"))
# Get the reference's GI
ref_gi = get_gi_number(ref_acc)
print("Reference for {} is GI{}:{}-{} 1-based".format(options.region,
ref_gi, ref_start, ref_end))
# Grab the reference sequence
ref_seq = get_sequence(ref_gi, ref_start, ref_end)
print("Got {}bp for a {}bp reference".format(len(ref_seq),
ref_end - ref_start + 1))
if len(ref_seq) > ref_end - ref_start + 1:
# Clip it down if it's too long. Assuming we have the correct sort of
# coordinates, and that we got served the data starting at the correct
# offset.
ref_seq = ref_seq[0:ref_end - ref_start + 1]
elif len(ref_seq) < ref_end - ref_start:
raise RuntimeError("Didn't get enough sequence from the API!")
# Change it to be just called "ref"
ref_seq.id = "ref"
# Write it to <region>/ref.fa
SeqIO.write([ref_seq], open("{}/ref.fa".format(options.region), "w"),
"fasta")
# Write a chromosome size entry for the reference by its accession
acc_chrom_sizes.line(ref_acc, get_length(ref_gi))
print("Writing genes for ref")
# Make a BED to put reference genes in
ref_bed = open_gene_bed(options.region, "ref")
for line in get_genes(ref_acc, "ref", ref_start, ref_end):
# Write all the BED lines for the appropriate region of the reference to
# that file.
ref_bed.write(line)
ref_bed.close()
for alt_acc, alt_unit in get_region_sequences(options.region,
options.assembly_url):
# For every alt in the region
# Get its GI number
alt_gi = get_gi_number(alt_acc)
print("Downloading alt GI{}".format(alt_gi))
# Grab the sequence data
alt_seq = get_sequence(alt_gi)
# Write it to <region>/GI<number>.fa
SeqIO.write([alt_seq], open("{}/GI{}.fa".format(options.region, alt_gi),
"w"), "fasta")
# Add this alt to the chromosome-sizes-by-accession file
acc_chrom_sizes.line(alt_acc, get_length(alt_gi))
# Sneak into the TSV writer and flush, so the sizes file can now be
# read.
acc_chrom_sizes.stream.flush()
# Where should we put the GFF alignment for this alt to the reference?
alt_gff3 = "{}/GI{}.gff3".format(options.region, alt_gi)
print("Downloading alignment")
# Go download it
download_gff3(ref_acc, alt_acc, alt_unit, options.assembly_url,
alt_gff3)
# And we need to convert that to PSL
alt_psl = "{}/GI{}.psl".format(options.region, alt_gi)
print("Converting to PSL")
# Run the conversion with the bit of the sizes file we have so far. We
# need to pass the chrom.sizes file twice now because gff3ToPsl has
# changed its interface.
subprocess.check_call(["gff3ToPsl", options.region + "/acc.chrom.sizes",
options.region + "/acc.chrom.sizes", alt_gff3, alt_psl])
# Edit the output to point to the GI instead of the accession
subprocess.check_call(["sed", "-i", "s/{}/GI{}/g".format(alt_acc,
alt_gi), alt_psl])
print("Writing genes for GI{}".format(alt_gi))
# Make a BED to put alt genes in
alt_bed = open_gene_bed(options.region, "GI{}".format(alt_gi))
for line in get_genes(alt_acc, "GI{}".format(alt_gi),
alt_parent_grc_id=ref_acc):
# Write all the BED lines for the alt to the file
alt_bed.write(line)
alt_bed.close()
# Now we need to do psl2maf, complete with globbing.
print("Creating GRC MAF")
# Find the psl2maf.py script
psl2maf = (os.path.dirname(os.path.realpath(__file__)) +
"/../mhc/psl2maf.py")
# Go call psl2maf, moving the reference stuff over to "ref" and shifting it
# back so that the first base we clipped out of the reference is 0,
# splitting apart mismatches, and making sure to use all the PSLs and MAFs
# in our output directory. We make sure to add 1 to the reference start in
# the offset, because some basedness-conversion needs to happen. TODO: Make
# this a function or make this use an import or somehow de-uglify it.
args = ([psl2maf, "--maf",
options.region + "/GRCAlignment.maf", "--referenceOffset",
str(-ref_start + 1), "--referenceSequence", "ref", "--noMismatch",
"--psls"] + glob.glob(options.region + "/*.psl") + ["--fastas"] +
glob.glob(options.region + "/*.fa"))
print("Calling: {}".format(" ".join(args)))
subprocess.check_call(args)
if __name__ == "__main__" :
sys.exit(main(sys.argv))
| mit | 1,954,067,280,967,600,400 | 34.31 | 96 | 0.608666 | false |
gbanegas/HappyClient | migration/env.py | 1 | 2112 | from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
#fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
from happy import model
target_metadata = model.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url, version_table='migrate_version')
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata,
version_table='migrate_version'
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| apache-2.0 | 6,247,591,407,888,438,000 | 27.931507 | 69 | 0.684659 | false |
exaile/exaile | xlgui/preferences/widgets.py | 1 | 33912 | # Copyright (C) 2008-2010 Adam Olsen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version.
import hashlib
import logging
import os
from typing import Any, Callable, Optional
from gi.repository import Gdk
from gi.repository import GLib
from gi.repository import Gtk
from gi.repository import Pango
from xl.nls import gettext as _
from xl import event, main, settings
from xlgui import guiutil
from xlgui.widgets import dialogs
from xlgui.guiutil import GtkTemplate
logger = logging.getLogger(__name__)
class Preference:
"""
Representing a Gtk.Entry preferences item
"""
default: Any = ''
done: Callable[[], bool]
label_widget: Optional[Gtk.Widget]
name: str
restart_required = False
def __init__(self, preferences, widget):
"""
Initializes the preferences item
expects the name of the widget in the designer file, the default for
this setting, an optional function to be called when the value is
changed, and an optional function to be called when this setting
is applied
"""
self.widget = widget
self.preferences = preferences
if self.restart_required:
self.message = dialogs.MessageBar(
parent=preferences.builder.get_object('preferences_box'),
type=Gtk.MessageType.QUESTION,
buttons=Gtk.ButtonsType.CLOSE,
text=_('Restart Exaile?'),
)
self.message.set_secondary_text(
_('A restart is required for this change to take effect.')
)
button = self.message.add_button(_('Restart'), Gtk.ResponseType.ACCEPT)
button.set_image(
Gtk.Image.new_from_icon_name('view-refresh', Gtk.IconSize.BUTTON)
)
self.message.connect('response', self.on_message_response)
self._set_value()
self._setup_change()
def change(self, *args):
self.apply()
def _setup_change(self):
"""
Sets up the function to be called when this preference is changed
"""
self.widget.connect(
'focus-out-event', self.change, self.name, self._get_value()
)
try:
self.widget.connect(
'activate',
lambda *e: self.change(self.widget, None, self.name, self._get_value()),
)
except TypeError:
pass
def _get_value(self):
"""
Value to be stored into the settings file
"""
return self.widget.get_text()
def _set_value(self):
"""
Sets the GUI widget up for this preference
"""
if not self.widget:
logger.error("Widget not found: %s", self.name)
return
self.widget.set_text(str(settings.get_option(self.name, self.default)))
def apply(self, value=None):
"""
Applies this setting
"""
if hasattr(self, 'done') and not self.done():
return False
oldvalue = settings.get_option(self.name, self.default)
if value is None:
value = self._get_value()
if value != oldvalue:
settings.set_option(self.name, value)
if self.restart_required:
self.message.show()
return True
def on_message_response(self, widget, response):
"""
Restarts Exaile if requested
"""
widget.hide()
if response == Gtk.ResponseType.ACCEPT:
GLib.idle_add(main.exaile().quit, True)
def hide_widget(self):
'''Hides the widget and optionally its associated label'''
self.widget.hide()
if hasattr(self, 'label_widget'):
self.label_widget.hide()
def show_widget(self):
'''Shows the widget and optionally its associated label'''
self.widget.show_all()
if hasattr(self, 'label_widget'):
self.label_widget.show_all()
def set_widget_sensitive(self, value):
'''Sets sensitivity of widget and optionally its associated label'''
self.widget.set_sensitive(value)
if hasattr(self, 'label_widget'):
self.label_widget.set_sensitive(value)
class Conditional:
"""
Allows for reactions on changes
of other preference items
"""
condition_preference_name = ''
condition_widget = None
def __init__(self):
event.add_ui_callback(self.on_option_set, 'option_set')
GLib.idle_add(
self.on_option_set, 'option_set', settings, self.condition_preference_name
)
def get_condition_value(self):
"""
:returns: The currently selected value in the condition widget,
presumes it is a combo box
"""
i = self.condition_widget.get_active_iter()
return self.condition_widget.get_model().get_value(i, 0)
def on_check_condition(self):
"""
Specifies the condition to meet
:returns: Whether the condition is met or not
:rtype: bool
"""
pass
def on_condition_met(self):
"""
Called as soon as the
specified condition is met
"""
self.widget.set_sensitive(True)
def on_condition_failed(self):
"""
Called as soon as the specified
condition is not met anymore
"""
self.widget.set_sensitive(False)
def on_option_set(self, event, settings, option):
"""
Called as soon as options change
"""
if option == self.condition_preference_name:
if self.on_check_condition():
self.on_condition_met()
else:
self.on_condition_failed()
class CheckConditional(Conditional):
"""
True if the conditional widget is active
"""
def get_condition_value(self):
return self.condition_widget.get_active()
def on_check_condition(self):
"""
Specifies the condition to meet
:returns: Whether the condition is met or not
:rtype: bool
"""
return self.get_condition_value()
class MultiConditional:
"""
Allows for reactions on changes of multiple preference items
"""
condition_preference_names = []
condition_widgets = {}
def __init__(self):
event.add_ui_callback(self.on_option_set, 'option_set')
GLib.idle_add(
self.on_option_set,
'option_set',
settings,
self.condition_preference_names[0],
)
def get_condition_value(self, name):
"""
:returns: The currently selected value in the condition widget,
presumes it is a combo box
"""
widget = self.condition_widgets[name]
return widget.get_model().get_value(widget.get_active_iter(), 0)
def on_check_condition(self):
"""
Specifies the condition to meet
:returns: Whether the condition is met or not
:rtype: bool
"""
pass
def on_condition_met(self):
"""
Called as soon as the
specified condition is met
"""
self.widget.set_sensitive(True)
def on_condition_failed(self):
"""
Called as soon as the specified
condition is not met anymore
"""
self.widget.set_sensitive(False)
def on_option_set(self, event, settings, option):
"""
Called as soon as options change
"""
if option in self.condition_preference_names:
if self.on_check_condition():
self.on_condition_met()
else:
self.on_condition_failed()
class Button(Preference):
"""
Represents a button for custom usage
"""
def __init__(self, preferences, widget):
Preference.__init__(self, preferences, widget)
widget.connect('clicked', self.on_clicked)
def _setup_change(self, *e):
pass
def _get_value(self):
return None
def _set_value(self):
pass
def apply(self, *e):
return False
def on_clicked(self, button):
"""Override"""
pass
class HashedPreference(Preference):
"""
Represents a text entry with automated hashing
Options:
* type (Which hashfunction to use, default: md5)
"""
type = 'md5'
def __init__(self, preferences, widget):
Preference.__init__(self, preferences, widget)
self.widget.set_visibility(True)
self._delete_text_id = self.widget.connect('delete-text', self.on_delete_text)
self._insert_text_id = self.widget.connect('insert-text', self.on_insert_text)
def _setup_change(self):
"""
Sets up the function to be called when this preference is changed
"""
self.widget.connect('focus-out-event', lambda *e: self.apply())
def done(self):
"""
Determines if changes are to be expected
"""
if self._delete_text_id is None and self._insert_text_id is None:
return True
return False
def apply(self, value=None):
"""
Applies this setting
"""
if not self.done():
return False
if value is None:
value = self._get_value()
if value is None:
return True
if value != '':
hashfunc = hashlib.new(self.type)
hashfunc.update(value.encode('utf-8'))
value = hashfunc.hexdigest()
oldvalue = settings.get_option(self.name, self.default)
if value != oldvalue:
settings.set_option(self.name, value)
self.widget.set_text(value)
self.widget.set_visibility(True)
self._delete_text_id = self.widget.connect('delete-text', self.on_delete_text)
self._insert_text_id = self.widget.connect('insert-text', self.on_insert_text)
return True
def on_delete_text(self, widget, start, end):
"""
Clears the text entry and makes following input invisible
"""
self.widget.disconnect(self._delete_text_id)
self.widget.disconnect(self._insert_text_id)
self._delete_text_id = self._insert_text_id = None
self.widget.set_visibility(False)
self.widget.set_text('')
def on_insert_text(self, widget, text, length, position):
"""
Clears the text entry and makes following input invisible
"""
self.widget.disconnect(self._delete_text_id)
self.widget.disconnect(self._insert_text_id)
self._delete_text_id = self._insert_text_id = None
self.widget.set_visibility(False)
# Defer to after returning from this method
GLib.idle_add(self.widget.set_text, text)
GLib.idle_add(self.widget.set_position, length)
class CheckPreference(Preference):
"""
A class to represent check boxes in the preferences window
"""
def __init__(self, preferences, widget):
Preference.__init__(self, preferences, widget)
def _setup_change(self):
self.widget.connect('toggled', self.change)
def _set_value(self):
self.widget.set_active(settings.get_option(self.name, self.default))
def _get_value(self):
return self.widget.get_active()
class DirPreference(Preference):
"""
Directory chooser button
"""
def __init__(self, preferences, widget):
Preference.__init__(self, preferences, widget)
def _setup_change(self):
self.widget.connect('current-folder-changed', self.change)
def _set_value(self):
"""
Sets the current directory
"""
directory = os.path.expanduser(settings.get_option(self.name, self.default))
if not os.path.exists(directory):
os.makedirs(directory)
self.widget.set_current_folder(directory)
def _get_value(self):
return self.widget.get_filename()
class OrderListPreference(Preference):
"""
A list box with reorderable items
"""
def __init__(self, preferences, widget):
self.model = Gtk.ListStore(str)
Preference.__init__(self, preferences, widget)
widget.set_headers_visible(False)
widget.set_reorderable(True)
text = Gtk.CellRendererText()
col = Gtk.TreeViewColumn("Item", text, text=0)
self.widget.append_column(col)
self.widget.set_model(self.model)
def _setup_change(self):
self.widget.connect('drag-end', self.change)
def _set_value(self):
"""
Sets the preferences for this widget
"""
items = settings.get_option(self.name, self.default)
self.model.clear()
for item in items:
self.model.append([item])
def _get_value(self):
"""
Value to be stored into the settings file
"""
items = []
for row in self.model:
items += [row[0]]
return items
class SelectionListPreference(Preference):
"""
A list allowing for enabling/disabling
as well as reordering of items
Options:
* items: list of :class:`SelectionListPreference.Item` objects
* default: list of item ids
"""
class Item:
"""
Convenience class for preference item description
"""
def __init__(self, id, title, description=None, fixed=False):
"""
:param id: the unique identifier
:type id: string
:param title: the readable title
:type title: string
:param description: optional description of the item
:type description: string
:param fixed: whether the item should be removable
:type fixed: bool
"""
self.__id = id
self.__title = title
self.__description = description
self.__fixed = fixed
id = property(lambda self: self.__id)
title = property(lambda self: self.__title)
description = property(lambda self: self.__description)
fixed = property(lambda self: self.__fixed)
@GtkTemplate('ui', 'preferences', 'widgets', 'selection_list_preference.ui')
class InternalWidget(Gtk.ScrolledWindow):
"""
Internal class for making GtkTemplate work with subclassing
"""
__gtype_name__ = 'InternalWidget'
(
model,
tree,
toggle_renderer,
text_renderer,
enabled_column,
title_column,
) = GtkTemplate.Child.widgets(6)
selectionlp = None
def __init__(self, preference):
Gtk.ScrolledWindow.__init__(self)
self.init_template()
self.selectionlp = preference
self.tree.enable_model_drag_source(
Gdk.ModifierType.BUTTON1_MASK,
[('GTK_TREE_MODEL_ROW', Gtk.TargetFlags.SAME_WIDGET, 0)],
Gdk.DragAction.MOVE,
)
self.tree.enable_model_drag_dest(
[('GTK_TREE_MODEL_ROW', Gtk.TargetFlags.SAME_WIDGET, 0)],
Gdk.DragAction.MOVE,
)
self.tree.connect('drag-end', self.selectionlp.change)
self.enabled_column.set_cell_data_func(
self.toggle_renderer, self.enabled_data_function
)
self.title_column.set_cell_data_func(
self.text_renderer, self.title_data_function
)
@GtkTemplate.Callback
def on_row_activated(self, tree, path, column):
"""
Updates the enabled column
"""
if self.model[path][4]:
return
enabled = not self.model[path][3]
self.model[path][3] = enabled
def enabled_data_function(self, column, cell, model, iter, user_data):
"""
Prepares sensitivity
of the enabled column
"""
path = model.get_path(iter)
fixed = model[path][4]
cell.props.sensitive = not fixed
def title_data_function(self, column, cell, model, iter, user_data):
"""
Prepares the markup to be
used for the title column
"""
path = model.get_path(iter)
title, description = model[path][1], model[path][2]
markup = '<b>%s</b>' % title
if description is not None:
markup += '\n<span size="small">%s</span>' % description
cell.props.markup = markup
def iter_prev(self, iter, model):
"""
Returns the previous iter
Taken from PyGtk FAQ 13.51
"""
path = model.get_path(iter)
position = path[-1]
if position == 0:
return None
prev_path = list(path)[:-1]
prev_path.append(position - 1)
prev = model.get_iter(tuple(prev_path))
return prev
@GtkTemplate.Callback
def on_key_press_event(self, tree, event):
"""
Allows for reordering via keyboard (Alt+<direction>)
"""
if not event.get_state() & Gdk.ModifierType.MOD1_MASK:
return
if event.keyval not in (Gdk.KEY_Up, Gdk.KEY_Down):
return
model, selected_iter = tree.get_selection().get_selected()
if event.keyval == Gdk.KEY_Up:
previous_iter = self.iter_prev(selected_iter, model)
model.move_before(selected_iter, previous_iter)
elif event.keyval == Gdk.KEY_Down:
next_iter = model.iter_next(selected_iter)
model.move_after(selected_iter, next_iter)
tree.scroll_to_cell(model.get_path(selected_iter))
self.selectionlp.apply()
@GtkTemplate.Callback
def on_toggled(self, cell, path):
"""
Updates the enabled column
"""
if self.model[path][4]:
return
active = not cell.get_active()
cell.set_active(active)
self.model[path][3] = active
self.selectionlp.apply()
def __init__(self, preferences, widget):
internal_widget = self.InternalWidget(self)
self.model = internal_widget.model
for item in self.items:
row = [item.id, item.title, item.description, True, item.fixed]
self.model.append(row)
guiutil.gtk_widget_replace(widget, internal_widget)
Preference.__init__(self, preferences, internal_widget)
def _get_value(self):
"""
Value to be stored in the settings
"""
return [row[0] for row in self.model if row[3]]
def _set_value(self):
"""
Updates the internal representation
"""
selected_items = settings.get_option(self.name, self.default)
# Get list of available items
available_items = [row[0] for row in self.model]
if not available_items:
return
# Filter out invalid items
selected_items = [item for item in selected_items if item in available_items]
# Cut out unselected items
unselected_items = [
item for item in available_items if item not in selected_items
]
# Move unselected items to the end
items = selected_items + unselected_items
new_order = [available_items.index(item) for item in items]
self.model.reorder(new_order)
# Disable unselected items
for row in self.model:
if row[0] in unselected_items and not row[4]:
row[3] = False
else:
row[3] = True
class ShortcutListPreference(Preference):
"""
A list showing available items and allowing
to assign/edit/remove key accelerators
"""
def __init__(self, preferences, widget):
self.list = Gtk.ListStore(str, str)
Preference.__init__(self, preferences, widget)
self.widget.set_model(self.list)
title_renderer = Gtk.CellRendererText()
title_column = Gtk.TreeViewColumn(_('Action'), title_renderer, text=0)
title_column.set_expand(True)
title_column.set_cell_data_func(title_renderer, self.title_data_func)
accel_renderer = Gtk.CellRendererAccel()
accel_renderer.set_property('editable', True)
accel_renderer.set_property('style', Pango.Style.OBLIQUE)
accel_renderer.connect('accel-cleared', self.on_accel_cleared)
accel_renderer.connect('accel-edited', self.on_accel_edited)
accel_column = Gtk.TreeViewColumn(_('Shortcut'), accel_renderer, text=1)
accel_column.set_expand(True)
self.widget.append_column(title_column)
self.widget.append_column(accel_column)
def title_data_func(self, celllayout, cell, model, iter, user_data):
"""
Renders human readable titles instead of the actual keys
"""
key = model.get_value(iter, 0)
try:
cell.set_property('text', self.available_items[key])
except KeyError:
pass
def on_accel_cleared(self, cellrenderer, path):
"""
Clears accelerators in the list
"""
iter = self.list.get_iter(path)
self.list.set_value(iter, 1, '')
def on_accel_edited(self, cellrenderer, path, accel_key, accel_mods, keycode):
"""
Updates accelerators display in the list
"""
accel = Gtk.accelerator_name(accel_key, accel_mods)
iter = self.list.get_iter(path)
self.list.set_value(iter, 1, accel)
def _set_value(self):
"""
Sets the preferences for this widget
"""
items = settings.get_option(self.name, self.default)
self.update_list(items)
def _get_value(self):
"""
Value to be stored into the settings file
"""
option = {}
iter = self.list.get_iter_first()
while iter:
action = self.list.get_value(iter, 0)
accel = self.list.get_value(iter, 1)
if accel:
option[action] = accel
iter = self.list.iter_next(iter)
return option
def update_list(self, items):
"""
Updates the displayed items
"""
self.list.clear()
for action in self.available_items.keys():
try:
accel = items[action]
except KeyError:
accel = ''
self.list.append([action, accel])
class TextViewPreference(Preference):
"""
Represents a Gtk.TextView
"""
def __init__(self, preferences, widget):
"""
Initializes the object
"""
Preference.__init__(self, preferences, widget)
def _setup_change(self):
self.widget.connect('focus-out-event', self.change)
def get_all_text(self):
"""
Returns the value of the text buffer
"""
buf = self.widget.get_buffer()
start = buf.get_start_iter()
end = buf.get_end_iter()
return buf.get_text(start, end, True)
def _set_value(self):
"""
Sets the value of this widget
"""
self.widget.get_buffer().set_text(
str(settings.get_option(self.name, default=self.default))
)
def _get_value(self):
"""
Applies the setting
"""
return self.get_all_text()
class ListPreference(Preference):
"""
A class to represent a space separated list in the preferences window
"""
def __init__(self, preferences, widget):
Preference.__init__(self, preferences, widget)
def _set_value(self):
items = settings.get_option(self.name, default=self.default)
try:
items = " ".join(items)
except TypeError:
items = ""
self.widget.set_text(items)
def _get_value(self):
import shlex
return shlex.split(self.widget.get_text())
class SpinPreference(Preference):
"""
A class to represent a numeric entry box with stepping buttons
"""
def __init__(self, preferences, widget):
Preference.__init__(self, preferences, widget)
def _set_value(self):
value = settings.get_option(self.name, default=self.default)
self.widget.set_value(value)
def _setup_change(self):
self.widget.connect('value-changed', self.change)
def _get_value(self):
return self.widget.get_value()
class ScalePreference(SpinPreference):
"""
Representation of Gtk.Scale widgets
"""
def __init__(self, preferences, widget):
SpinPreference.__init__(self, preferences, widget)
class FloatPreference(Preference):
"""
A class to represent a floating point number in the preferences window
"""
def __init__(self, preferences, widget):
Preference.__init__(self, preferences, widget)
def _set_value(self):
self.widget.set_text(str(settings.get_option(self.name, default=self.default)))
def _get_value(self):
return float(self.widget.get_text())
class IntPreference(FloatPreference):
def _get_value(self):
return int(self.widget.get_text())
class RGBAButtonPreference(Preference):
"""
A class to represent the color button
"""
def __init__(self, preferences, widget):
Preference.__init__(self, preferences, widget)
def _setup_change(self):
self.widget.connect('color-set', self.change)
def _set_value(self):
value = settings.get_option(self.name, self.default)
rgba = Gdk.RGBA()
rgba.parse(value)
self.widget.set_rgba(rgba)
def _get_value(self):
return self.widget.get_rgba().to_string()
class FontButtonPreference(Preference):
"""
Font button
"""
def __init__(self, preferences, widget):
Preference.__init__(self, preferences, widget)
def _setup_change(self):
self.widget.connect('font-set', self.change)
def _set_value(self):
font = settings.get_option(self.name, self.default)
self.widget.set_font_name(font)
def _get_value(self):
font = self.widget.get_font_name()
return font
class FontResetButtonPreference(Button, Conditional):
"""
A button to reset a font button to a default font
"""
def __init__(self, preferences, widget):
Button.__init__(self, preferences, widget)
Conditional.__init__(self)
def on_check_condition(self):
if self.condition_widget.get_font_name() == self.default:
return False
return True
def on_clicked(self, button):
self.condition_widget.set_font_name(self.default)
self.condition_widget.emit('font-set')
class ComboPreference(Preference):
"""
A combo box. The value stored in the settings must be the
first column of the combo box model.
"""
def __init__(self, preferences, widget):
Preference.__init__(self, preferences, widget)
def _setup_change(self):
self.widget.connect('changed', self.change)
def _set_value(self):
"""
Sets the preferences for this widget
"""
item = settings.get_option(self.name, self.default)
model = self.widget.get_model()
for row in model:
if item == row[0]:
self.widget.set_active_iter(row.iter)
def _get_value(self):
"""
Value to be stored into the settings file
"""
model = self.widget.get_model()
iter = self.widget.get_active_iter()
return model.get_value(iter, 0)
class ComboEntryPreference(Preference):
"""
A combo box allowing for user defined
values, presets and auto completion
Options:
* completion_items (List of completion items or
dictionary of items and their titles)
* preset_items (List of preset items or
dictionary of items and their titles)
"""
def __init__(self, preferences, widget):
Preference.__init__(self, preferences, widget)
self.list = Gtk.ListStore(str)
try:
try:
preset_items = list(self.preset_items.items())
self.list = Gtk.ListStore(str, str)
text_renderer = self.widget.get_cells()[0]
text_renderer.set_property('weight', Pango.Weight.BOLD)
title_renderer = Gtk.CellRendererText()
self.widget.pack_start(title_renderer, False)
self.widget.add_attribute(title_renderer, 'text', 1)
except AttributeError:
preset_items = [[item] for item in self.preset_items]
for preset in preset_items:
self.list.append(preset)
except AttributeError:
pass
self.widget.set_model(self.list)
self.widget.set_entry_text_column(0)
try:
completion = Gtk.EntryCompletion()
try:
completion_items = list(self.completion_items.items())
self.completion_list = Gtk.ListStore(str, str)
title_renderer = Gtk.CellRendererText()
completion.pack_end(title_renderer, True)
completion.add_attribute(title_renderer, 'text', 1)
except AttributeError:
completion_items = [[item] for item in self.completion_items]
self.completion_list = Gtk.ListStore(str)
keyword_renderer = Gtk.CellRendererText()
keyword_renderer.set_property('weight', Pango.Weight.BOLD)
completion.pack_end(keyword_renderer, True)
completion.add_attribute(keyword_renderer, 'text', 0)
completion.set_match_func(self.on_matching)
completion.connect('match-selected', self.on_match_selected)
completion.set_popup_single_match(True)
completion.set_model(self.completion_list)
self.widget.get_child().set_completion(completion)
for item in completion_items:
self.completion_list.append(item)
except AttributeError:
pass
def _setup_change(self):
"""
Sets up the function to be called
when this preference is changed
"""
self.widget.connect('changed', self.change, self.name, self._get_value())
def _set_value(self):
"""
Sets the preferences for this widget
"""
value = settings.get_option(self.name, self.default)
self.widget.get_child().set_text(str(value))
def _get_value(self):
"""
Value to be stored into the settings file
"""
return self.widget.get_child().get_text()
def on_matching(self, completion, text, iter):
"""
Matches the content of this box to
the list of available completions
"""
cursor_pos = self.widget.get_child().get_position()
# Ignore the rest, allows for completions in the middle
text = text[:cursor_pos]
match = self.completion_list.get_value(iter, 0)
# Try to find match, from largest to smallest
for i in range(len(match), -1, -1):
# Find from the rear
match_pos = text.rfind(match[:i])
# Matched if the match is not empty
# and equal to the text from the matched position to the end
# and not equal to the match itself
# (the latter hides the match if it was already fully typed)
if match[:i] and match[:i] == text[match_pos:] and match[:i] != match:
return True
return False
def on_match_selected(self, completion, list, iter):
"""
Inserts the selected completion
"""
cursor_pos = self.widget.get_child().get_position()
text = self.widget.get_child().get_text()[:cursor_pos]
match = list.get_value(iter, 0)
for i in range(len(match), -1, -1):
match_pos = text.rfind(match[:i])
if match[:i] and match[:i] == text[match_pos:]:
# Delete halfway typed text
self.widget.get_child().delete_text(
match_pos, match_pos + len(match[:i])
)
# Insert match at matched position
self.widget.get_child().insert_text(match, match_pos)
# Update cursor position
self.widget.get_child().set_position(match_pos + len(match))
return True
# vim: et sts=4 sw=4
| gpl-2.0 | -1,620,276,805,065,975,300 | 28.695271 | 88 | 0.584188 | false |
vsiivola/vesamusictraining | lib/resource_simplehtml.py | 1 | 4458 | #!/usr/bin/env python
"""Creates the media files and database fixtures for Vesa's
Music Trainer."""
import logging
import os
import re
from typing import Dict, Optional
from resource_base import BuildTarget
LOGGER = logging.getLogger(__name__)
HTML_TEMPLATE = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0
Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html lang="en-US" xml:lang="en-US" xmlns="http://www.w3.org/1999/xhtml">
<head>
</head>
<body>
%s
</body>"""
class SimpleHtmlTarget(BuildTarget):
"""Create simple raw html pages for debugging."""
def __init__(self, htmlfile: Optional[str] = None, mediadir: Optional[str] = None) -> None:
basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "simple_html"))
self.htmlfile = htmlfile if htmlfile else\
os.path.join(basedir, "index.html")
mediadir = mediadir if mediadir else\
os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "simple_html", "media"))
super(SimpleHtmlTarget, self).__init__(
mediadir, mediadir, sound_formats=set(["mp3", "ogg"]))
@staticmethod
def clean_fname(fname: str) -> str:
"""Fix the file names to be relative to the index.html"""
return re.sub('^.*simple_html/', '', fname)
def write(self, content_index: Dict) -> None:
"""Write the web pages and the corresponding media"""
def image_str(fname: str) -> str:
"""HTML string for showing the image"""
return '<img src="%s"></img>' % fname
def audio_str(oggname: str, mp3name: str) -> str:
"""HTML string for playing the audio"""
return """<audio controls="controls">
<source src="%s" type="audio/ogg" />
<source src="%s" type="audio/mpeg" />
<a href="%s">Play</a></audio>""" % (oggname, mp3name, mp3name)
pagestr = ""
pagestr += "<h2>Musical Notation Trainer</h2>\n"
pagestr += "<ul>\n"
for doc in content_index:
pagestr += "<li>%s (%d exercises)</li>\n" % (
doc["languages"]["en"]["Title"], len(doc["Exercises"]))
pagestr += "</ul>\n"
pagestr += "</p>\n\n"
for doc in [doc2 for doc2 in content_index if len(doc2["Exercises"]) > 0]:
pagestr += "<h2>%s</h2>\n" % doc["languages"]["en"]["Title"]
for exer in doc["Exercises"]:
pagestr += "<h3>%s</h3>\n" % exer["name"]["en"].capitalize()
pagestr += '<table cellpadding="10" border="1">\n'
pagestr += '<tr><td colspan="3" align="center">'
if exer["question_type"] == "audio":
pagestr += audio_str(exer["ogg"], exer["mp3"])
else:
pagestr += image_str(exer["png"])
pagestr += "</td></tr>\n"
alternatives = []
for alt in [exer] + exer["confusers"]:
text = alt["text"]["en"] \
if "text" in alt and alt["text"] else None
if exer["answer_type"] == "image":
alternatives.append(
(image_str(alt["png"]),
audio_str(alt["ogg"], alt["mp3"]), text))
elif exer["answer_type"] == "audio":
alternatives.append(
(audio_str(alt["ogg"], alt["mp3"]),
image_str(alt["png"]), text))
pagestr += "<tr>\n"
# FIXME: randomize order
pagestr += '<td align="center">' + '</td>\n<td align="center">'.join(
[atmp[0] for atmp in alternatives])+"</td>"
pagestr += "</tr>\n"
if any([atmp[2] for atmp in alternatives]):
pagestr += "<tr>\n"
pagestr += '<td align="center">' + \
'</td>\n<td align="center">'.join(
[atmp[2] for atmp in alternatives])+"</td>"
pagestr += "</tr>\n"
pagestr += "<tr>\n"
pagestr += '<td align="center">' + '</td>\n<td align="center">'.join(
[atmp[1] for atmp in alternatives])+"</td>"
pagestr += "</tr>\n"
pagestr += "</table></p>\n"
with open(self.htmlfile, "w") as ofh:
ofh.write(HTML_TEMPLATE % pagestr)
| bsd-3-clause | -1,509,600,440,775,579,400 | 40.277778 | 96 | 0.499103 | false |
BrentDorsey/pipeline | lib/models/setup.py | 1 | 1024 | # -*- coding: utf-8 -*-
"""setup.py: setuptools control."""
import re
from setuptools import setup
#import sys
#if not sys.version_info[0] == 3:
# print("\n \
# sys.exit("\n \
# ****************************************************************\n \
# * The CLI has only been tested with Python 3+ at this time. *\n \
# * Report any issues with Python 2 by emailing [email protected] *\n \
# ****************************************************************\n")
version = re.search(
'^__version__\s*=\s*"(.*)"',
open('pipeline_models/__init__.py').read(),
re.M
).group(1)
setup(
name = "pipeline-models",
packages = ["pipeline_models"],
version = version,
description = "PipelineAI Models",
long_description = "PipelineAI Models",
author = "Chris Fregly",
author_email = "[email protected]",
url = "https://github.com/fluxcapacitor/pipeline/lib/models",
install_requires=[
],
dependency_links=[
]
)
| apache-2.0 | 3,142,549,495,761,106,000 | 27.444444 | 83 | 0.495117 | false |
jonas-hagen/meteo-logger-server | meteo/logger.py | 1 | 9175 | #!/usr/bin/env python3
import argparse
import csv
import os
import pathlib
from collections import OrderedDict
from datetime import datetime, timedelta
from time import sleep
import yaml
import sqlalchemy as sqa
import serial
from serial.tools import list_ports
import logging
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
FIELDS = OrderedDict([
('time', 'time'),
('air_temperature', 'Ta'),
('rel_humidity', 'Ua'),
('air_pressure', 'Pa'),
('wind_speed_avg', 'Sm'),
('wind_speed_min', 'Sn'),
('wind_speed_max', 'Sx'),
('wind_dir_avg', 'Dm'),
('wind_dir_min', 'Dn'),
('wind_dir_max', 'Dx'),
('rain_accumulation', 'Rc'),
('rain_duration', 'Rd'),
('rain_intensity', 'Ri'),
('rain_peak_intensity', 'Rp'),
# ('hail_accumulation', 'Hc'),
# ('hail_duration', 'Hd'),
# ('hail_intensity', 'Hi'),
# ('hail_peak_intensity', 'Hp'),
# ('heating_voltage', 'Vh'),
# ('ref_voltage', 'Vr'),
# ('supply_voltage', 'Vs'),
('heating_temperature', 'Th'),
# ('internal_temperature', 'Tp'),
# ('information', 'Id'),
])
reverse_FIELDS = {v: k for k, v in FIELDS.items()}
def append_csv_row(path, data, default='NaN'):
"""Append a row to a CSV file. If the file does not exist already, also write the header."""
new_file = not path.exists()
f = path.open(mode='a', newline='')
writer = csv.DictWriter(f, FIELDS.keys(), restval=default)
if new_file:
logger.info('Created new file '+str(path))
writer.writeheader()
writer.writerow(data)
def delete_log_files_if_needed(log_dir, max_files):
path = pathlib.Path(log_dir)
files = sorted(list(path.glob('meteo_????-??-??.csv')))
if len(files) > max_files:
logger.info('Too many log files. Deleting oldest.')
old = files[0:len(files)-max_files]
for p in old:
p.unlink()
def convert_unit(key, value, unit, default=None):
"""Convert units to hPa, degC, mm and mm/h."""
def identity(v):
return v
dispatcher = dict()
# Speed
dispatcher['S'] = {
'M': identity, # m/s
'K': lambda v: 1000/3600 * v, # km/h
'S': lambda v: 0.44704 * v, # mph
'N': lambda v: 0.514444 * v, # knots
}
# Pressure
dispatcher['P'] = {
'H': identity, # hPa
'P': lambda v: v / 100, # Pa
'B': lambda v: v * 1000, # bar
'M': lambda v: v * 1.33322, # mmHg
'I': lambda v: v * 25.4 * 1.33322, # inHg
}
# Temperature
dispatcher['T'] = {
'C': identity, # Celsius
'F': lambda v: (v - 32) * 5/9
}
# Rain
dispatcher['R'] = {
'M': identity, # mm or mm/h
's': identity, # seconds
'I': lambda v: 52.4 * v, # in or in/h
}
if unit == '#':
return default
else:
conversion_fuc = dispatcher.get(key[0], {unit: identity})[unit]
return conversion_fuc(value)
def parse_line(line):
"""Parse a data message from the meteo station."""
parts = line.split(',')
msg_type = parts.pop(0)
data = dict()
for p in parts:
key, payload = p.split('=')
value = payload[:-1]
unit = payload[-1]
data[key] = convert_unit(key, value, unit, default='NaN')
data_row = {reverse_FIELDS[k]: v for k, v in data.items() if k in reverse_FIELDS}
return msg_type, data_row
def parse_settings(line):
"""Parse a data message from the meteo station."""
parts = line.split(',')
msg_type = parts.pop(0)
data = dict()
for p in parts:
key, value = p.split('=')
data[key] = value
return msg_type, data
class MeteoTerminal(serial.Serial):
"""Simple wraper around pyserial object to send and receive commands from meteo station."""
def __init__(self, name, *args, **kwargs):
default_kwargs = {'baudrate': 19200, 'timeout': 2}
default_kwargs.update(kwargs)
super().__init__(name, *args, **default_kwargs)
self.clear()
def ask(self, s):
self.send(s)
return self.receive()
def clear(self, loud=False):
"""Clear any previous incomplete input"""
line = self.receive()
while line:
if loud:
logger.warning('Unexpected response: ' + line)
line = self.receive()
self.send('?')
self.readline()
def send(self, s):
self.write((s + '\r\n').encode('utf-8'))
self.flush()
def receive(self):
bs = self.readline()
if bs:
return bs.decode('utf-8').strip()
else:
return ''
def setup(self, settings):
for line in settings:
cmd, expected = parse_settings(line)
cmd, current = parse_settings(self.ask(cmd))
current = {k: v for k, v in current.items() if k in expected}
if current != expected:
answer = self.ask(line)
logger.info('Setup "{}", answer "{}".'.format(line, answer))
self.clear(loud=True)
else:
logger.info('Setup "{}" already ok.'.format(line))
@staticmethod
def find_station():
ports = list_ports.comports()
found = None
for name, desc, hw in ports:
try:
logger.debug('Try ' + name)
with MeteoTerminal(name) as ser:
answer = ser.ask('0')
if answer == '0':
logger.debug('OK: '+name)
found = name
break
except Exception:
pass
logger.info('Found meteo station: {}'.format(found))
return found
def create_db_table(conn, table):
logger.info('Create table {} if not exists.'.format(table))
if conn.dialect.name == 'mysql':
columns = [sqa.Column('time', sqa.dialects.mysql.DATETIME(fsp=6), primary_key=True), ]
else:
columns = [sqa.Column('time', sqa.types.DateTime(), primary_key=True), ]
columns += [sqa.Column(name, sqa.types.Float()) for name in FIELDS if name != 'time']
meta = sqa.MetaData()
table = sqa.Table(table, meta, *columns)
table.create(conn, checkfirst=True)
return table
def meteo_logger(config):
if config['serial'] == 'auto':
port = MeteoTerminal.find_station()
else:
port = config['serial']
if port is None:
logger.error('No meteo station found. Specify port in config file.')
exit(1)
db_engine = None
db_table = None
if config['database']['use_database']:
try:
db_engine = sqa.create_engine(config['database']['url'])
with db_engine.connect() as conn:
db_table = create_db_table(conn, config['database']['table'])
except Exception as e:
logger.error('While setting up database: ' + str(e))
exit(1)
output_dir = config['target']
interval = config['interval']
with MeteoTerminal(port, baudrate=config['baudrate']) as term:
term.setup(config['setup'])
logger.info('Will now take action every {} s.'.format(interval))
while True:
try:
now = datetime.utcnow()
# Poll and store measurement
msg = term.ask('0R0')
msg_type, data = parse_line(msg)
data['time'] = now.isoformat() + 'Z'
# Write csv
day = now.date()
path = pathlib.Path(output_dir) / ('meteo_' + str(day) + '.csv')
append_csv_row(path, data)
# Store to database
if db_engine is not None:
with db_engine.connect() as conn:
conn.execute(db_table.insert(), **data)
if (now + timedelta(seconds=interval)).day > now.day:
# Reset counters
# next measurement will be in next day, so we reset now
logger.info('Reset precipitation counters.')
term.ask('0XZRU') # Precipitation counter reset
term.ask('0XZRI') # Precipitation intensity reset
# Housekeeping
delete_log_files_if_needed(output_dir, config['max_files'])
# Time
if datetime.utcnow() - now >= timedelta(seconds=interval):
logger.warning('Loop took longer than interval. Working as fast as possible.')
while datetime.utcnow() - now < timedelta(seconds=min(interval-2, 0)):
sleep(1)
while datetime.utcnow() - now < timedelta(seconds=interval):
pass # busy loop
except KeyboardInterrupt:
logger.info('Terminated by user.')
exit(0)
except Exception as e:
logger.warning('Exception in main loop: ' + str(e))
def main():
with open('/etc/meteo.yml', 'r') as f:
config = yaml.load(f)
meteo_logger(config)
if __name__ == '__main__':
main()
| bsd-2-clause | 5,202,571,777,053,866,000 | 29.081967 | 98 | 0.535586 | false |
JoseBlanca/ngs_crumbs | crumbs/settings.py | 1 | 4923 | # Copyright 2012 Jose Blanca, Peio Ziarsolo, COMAV-Univ. Politecnica Valencia
# This file is part of ngs_crumbs.
# ngs_crumbs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# ngs_crumbs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with ngs_crumbs. If not, see <http://www.gnu.org/licenses/>.
import os
import tempfile
_SUPPORTED_OUTPUT_FORMATS = ['fasta', 'fastq', 'fastq-illumina']
# number of sequences to process in a chunk. Lenght of the sequence list to
# hold in memory
_PACKET_SIZE = 1000
# number of sequences to analyze in the fastq version guessing of a seekable
# file
_SEQS_TO_GUESS_FASTQ_VERSION = 1000
# number of bytes to analyze in the fastq version guessing of a non-seekable
# file
_CHUNK_TO_GUESS_FASTQ_VERSION = 50000
# maximum length expected for an Illumina read
_LONGEST_EXPECTED_ILLUMINA_READ = 250
# 454 FLX mate pair linker
_FLX_LINKER = 'GTTGGAACCGAAAGGGTTTGAATTCAAACCCTTTCGGTTCCAAC'
# Titanium mate pair linker. It could be found forward or reverse
_TITANIUM_LINKER = 'TCGTATAACTTCGTATAATGTATGCTATACGAAGTTATTACG'
_TITANIUM_LINKER_REV = 'CGTAATAACTTCGTATAGCATACATTATACGAAGTTATACGA'
_FWD_454_LINKERS = [_FLX_LINKER, _TITANIUM_LINKER]
_ION_TORRENT_LINKER = 'CTGCTGTACCGTACATCCGCCTTGGCCGTACAGCAG'
_ION_TORRENT_LINKER_REV = 'CTGCTGTACGGCCAAGGCGGATGTACGGTACAGCAG'
_LINKERS = [_FLX_LINKER, _TITANIUM_LINKER, _ION_TORRENT_LINKER]
# # Use this to modify how get_binary path works
# if need to modify the binary's name
_USE_EXTERNAL_BIN_PREFIX = False
# prefix to add to the binary name
_EXTERNAL_BIN_PREFIX = 'ngs_crumbs'
# mark True if need the path or assumes that is on the path
_ADD_PATH_TO_EXT_BIN = True
_THIRD_PARTY_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'third_party')
_PICARD_JAR = os.path.join(_THIRD_PARTY_DIR, 'java', 'picard-tools',
'picard.jar')
_GATK_JAR = '/usr/local/biology/GenomeAnalysisTK/GenomeAnalysisTK.jar'
# how many reads can be hold in memory by default
_DEFAULT_SEQS_IN_MEM_LIMIT = 500000
# max width of a line of an ASCII plot
_MAX_WIDTH_ASCII_PLOT = 100
# default minimum number of bins in an histogram
_MIN_BINS = 20
# default maximum number of bins in an histogram
_MAX_BINS = 500
_MEAN_VALUES_IN_BIN = 10000
# default number of location to plot in a nucleotide frequency plot
_DEF_PLOT_FREQS_UP_TO_BASE = 40
# when 2 match parts are in this distance they are merges as just one matchpart
_DEFAULT_IGNORE_ELONGATION_SHORTER = 3
# default kmer size to do the kmer stats
_DEFAULT_KMER_SIZE = 20
# trimest polyannotator
_POLYA_ANNOTATOR_MIN_LEN = 5
_POLYA_ANNOTATOR_MISMATCHES = 1
# quality trim
_DEFAULT_QUALITY_TRIM_TRESHOLD = 25
_DEFAULT_QUALITY_TRIM_WINDOW = 5
# dust score parameters
_DUST_WINDOWSIZE = 64
_DUST_WINDOWSTEP = 32
_DEFATULT_DUST_THRESHOLD = 7
# dust snp filter parameters
_DEF_SNP_DUST_WINDOW = 11
_TEMP_DIR = None
# min_mapq to use as a filter for maped reads
_DEFAULT_MIN_MAPQ = 0
# buffer size and memory limit for match_pairs
_MAX_READS_IN_MEMORY = 1000000
_CHECK_ORDER_BUFFER_SIZE = 100000
# default parameters for chimera finding
_CHIMERAS_SETTINGS = {}
_CHIMERAS_SETTINGS['MAX_CLIPPING'] = 0.05
_CHIMERAS_SETTINGS['MAX_PE_LEN'] = 750
_CHIMERAS_SETTINGS['MATE_DISTANCE_VARIATION'] = 1000
_DEFAULT_N_BINS = 80
_DEFAULT_N_MOST_ABUNDANT_REFERENCES = 40
class _Settings(dict):
'''A class that stores the ngs_crumbs settings.'''
def __init__(self):
'It inits the class'
super(_Settings, self).__init__()
self.load_settings()
tempfile.tempdir = self.__getitem__('TEMP_DIR')
def load_settings(self):
'It loads the settings defined in this module'
for key, val in globals().viewitems():
if not key.isupper():
continue
key = key[1:] # strip the underscore
super(_Settings, self).__setitem__(key, val)
# Are there any environment variable to update the settings?
for key, value in os.environ.items():
if key.startswith('SEQ_CRUMBS_'):
key = key[11:]
if key in self.viewkeys():
value = type(key)(value)
super(_Settings, self).__setitem__(key, value)
_settings = _Settings()
def get_settings():
'It returns the settings'
# load the settings defined in this module
return _settings
def get_setting(key):
'It returns the value for one setting'
return _settings[key]
| gpl-3.0 | 7,303,150,130,454,446,000 | 31.176471 | 79 | 0.703839 | false |
marknca/cling | dependencies/scss/scss_meta.py | 1 | 2197 | #-*- coding: utf-8 -*-
"""
pyScss, a Scss compiler for Python
@author German M. Bravo (Kronuz) <[email protected]>
@version 1.2.0 alpha
@see https://github.com/Kronuz/pyScss
@copyright (c) 2012-2013 German M. Bravo (Kronuz)
@license MIT License
http://www.opensource.org/licenses/mit-license.php
pyScss compiles Scss, a superset of CSS that is more powerful, elegant and
easier to maintain than plain-vanilla CSS. The library acts as a CSS source code
preprocesor which allows you to use variables, nested rules, mixins, andhave
inheritance of rules, all with a CSS-compatible syntax which the preprocessor
then compiles to standard CSS.
Scss, as an extension of CSS, helps keep large stylesheets well-organized. It
borrows concepts and functionality from projects such as OOCSS and other similar
frameworks like as Sass. It's build on top of the original PHP xCSS codebase
structure but it's been completely rewritten, many bugs have been fixed and it
has been extensively extended to support almost the full range of Sass' Scss
syntax and functionality.
Bits of code in pyScss come from various projects:
Compass:
(c) 2009 Christopher M. Eppstein
http://compass-style.org/
Sass:
(c) 2006-2009 Hampton Catlin and Nathan Weizenbaum
http://sass-lang.com/
xCSS:
(c) 2010 Anton Pawlik
http://xcss.antpaw.org/docs/
This file defines Meta data, according to PEP314
(http://www.python.org/dev/peps/pep-0314/) which is common to both pyScss
and setup.py distutils.
We create this here so this information can be compatible with BOTH
Python 2.x and Python 3.x so setup.py can use it when building pyScss
for both Py3.x and Py2.x
"""
from __future__ import unicode_literals
import sys
VERSION_INFO = (1, 3, 5)
DATE_INFO = (2016, 6, 8) # YEAR, MONTH, DAY
VERSION = '.'.join(str(i) for i in VERSION_INFO)
REVISION = '%04d%02d%02d' % DATE_INFO
BUILD_INFO = "pyScss v" + VERSION + " (" + REVISION + ")"
AUTHOR = "German M. Bravo (Kronuz)"
AUTHOR_EMAIL = '[email protected]'
URL = 'http://github.com/Kronuz/pyScss'
DOWNLOAD_URL = 'http://github.com/Kronuz/pyScss/tarball/v' + VERSION
LICENSE = "MIT"
PROJECT = "pyScss"
| apache-2.0 | -1,225,172,330,657,079,000 | 36.237288 | 80 | 0.724169 | false |
chihongze/girlfriend | girlfriend/plugin/excel.py | 1 | 5490 | # coding: utf-8
import xlrd
import time
import random
import types
import xlsxwriter
from StringIO import StringIO
from girlfriend.util.lang import (
args2fields,
SequenceCollectionType
)
from girlfriend.data.table import AbstractTable
from girlfriend.plugin.data import AbstractDataReader
class ExcelReaderPlugin(object):
"""该插件读取Excel文件,并转换为需要的数据结构
"""
name = "read_excel"
def execute(self, context, filepath, *sheet_readers):
workbook = xlrd.open_workbook(filepath)
return [reader(context, workbook) for reader in sheet_readers]
class SheetR(AbstractDataReader):
"""读取Sheet"""
@args2fields()
def __init__(self, sheetname, record_handler=None, record_filter=None,
result_wrapper=None, skip_first_row=False, variable=None):
pass
def __call__(self, context, workbook):
worksheet = workbook.sheet_by_name(self._sheetname)
result = []
for row_index in xrange(0, worksheet.nrows):
if self._skip_first_row and row_index == 0:
continue
record = [None] * worksheet.ncols
for column_index in xrange(0, worksheet.ncols):
value = worksheet.cell(row_index, column_index).value
record[column_index] = value
self._handle_record(record, result.append)
return self._handle_result(context, result)
class ExcelWriterPlugin(object):
"""该插件输出xlsx格式文件
"""
name = "write_excel"
def execute(self, context, filepath, sheets=None, workbook_handler=None):
if filepath is None:
# 不指定则随机生成文件名
filepath = "/tmp/{}_{}.xlsx".format(
int(time.time()), random.randint(100, 999))
workbook = xlsxwriter.Workbook(filepath)
elif filepath.startswith("memory:"):
output = StringIO()
workbook = xlsxwriter.Workbook(output, {'in_memory': True})
context[filepath[len("memory:"):]] = output
else:
workbook = xlsxwriter.Workbook(filepath)
for sheet in sheets:
sheet(context, workbook)
if workbook_handler:
workbook_handler(workbook)
workbook.close()
return filepath
class SheetW(object):
"""写入Sheet"""
@args2fields()
def __init__(self, table=None, sheet_name=None,
style=None, sheet_handler=None):
pass
def __call__(self, context, workbook):
if isinstance(self._table, types.StringTypes):
self._table = context[self._table]
sheet = self._generate_sheet(workbook)
if self._table is not None:
if isinstance(self._table, AbstractTable):
self._handle_table(sheet, workbook)
else:
self._handle_sequence(sheet, workbook)
if self._sheet_handler:
self._sheet_handler(workbook, sheet)
def _handle_table(self, sheet, workbook):
# 先写入标题
for idx, title in enumerate(self._table.titles):
style = self._get_style(0, idx, workbook)
sheet.write(0, idx, title.title, style)
self._handle_sequence(sheet, workbook, start=1)
def _handle_sequence(self, sheet, workbook, start=0):
for row_index, row in enumerate(self._table, start=start):
for column_index, column in enumerate(row):
style = self._get_style(row_index, column_index, workbook)
sheet.write(row_index, column_index, column, style)
def _get_style(self, row_index, column_index, workbook):
if not self._style:
return None
style = {}
for s in self._style:
if s.match(row_index, column_index):
style.update(s.style_dict)
if style:
return workbook.add_format(style)
else:
return None
def _generate_sheet(self, workbook):
sheet_name = None
if self._sheet_name:
sheet_name = self._sheet_name
else:
sheet_name = self._table.name
if sheet_name is None:
return workbook.add_worksheet()
else:
return workbook.add_worksheet(sheet_name)
class CellStyle(object):
"""单元格样式"""
def __init__(self, selector, style_dict):
self._selector = selector
self._style_dict = style_dict
self._formatter = None
def match(self, row_index, column_index):
if isinstance(self._selector, types.FunctionType):
return self._selector(row_index, column_index)
elif isinstance(self._selector, int):
return row_index == self._selector
elif isinstance(self._selector, SequenceCollectionType):
row_condition, column_condition = self._selector
row_matched, column_matched = (
self._match(row_index, row_condition),
self._match(column_index, column_condition)
)
return row_matched and column_matched
def _match(self, index, condition):
if condition is None:
return True
if isinstance(condition, int):
return index == condition
elif isinstance(condition, SequenceCollectionType):
return condition[0] <= index <= condition[-1]
@property
def style_dict(self):
return self._style_dict
| mit | -2,902,981,250,176,907,300 | 29.942529 | 77 | 0.598068 | false |
davinellulinvega/SpheroMouse | mouse.py | 1 | 2772 | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'davinellulinvega'
from sphero_driver import sphero_driver
import pyautogui
from time import sleep
# Define the size of the screen
width, height = pyautogui.size()
half_width = width / 2
half_height = height / 2
pyautogui.FAILSAFE = False
# Define a function for processing collision detection
def on_collision(data):
"""
Each time the robot detect a collision, triggers a click on the mouse
:param data: a dictionary containing information on the collision (irrelevant in our case)
:return: Nothing
"""
# Simply click on the present location
pyautogui.click()
# Define a function for processing IMU data
def on_imu(data):
"""
Process the IMU data to move the mouse around the screen.
:param data: a dictionary containing information from the IMU sensor
:return: Nothing
"""
# Declare some variables for ease of reading
pitch = float(data['IMU_PITCH_FILTERED']) # Translate in a displacement on Y axis
roll = float(data['IMU_ROLL_FILTERED']) # Translate in a displacement on X axis
x = half_width + (half_width * (roll / 45))
y = half_height + (half_height * (pitch / 90))
# Move the mouse on the screen
pyautogui.moveTo(x, y)
# Create an instance of the sphero class
sphero = sphero_driver.Sphero(target_addr="68:86:E7:06:30:CB")
# Connect to the robot
sphero.connect()
# Disable the stabilization
sphero.set_stablization(0x00, False)
# Set the heading to 0
sphero.set_heading(0x00, False)
# Put the robot into the 0 position
sphero.roll(0x00, 0x00, 0x00, False)
# Set the data streaming
sphero.set_data_strm(70, 1,
sphero_driver.STRM_MASK1['IMU_PITCH_FILTERED'] | sphero_driver.STRM_MASK1['IMU_YAW_FILTERED'] |
sphero_driver.STRM_MASK1['IMU_ROLL_FILTERED'], 0, 0, False)
# Configure the collision detection
sphero.config_collision_detect(0x01, 0x0C, 0x00, 0x07, 0x00, 10, False)
# Add the callbacks for processing imu and collision data/events
sphero.add_async_callback(sphero_driver.IDCODE['COLLISION'], on_collision)
sphero.add_async_callback(sphero_driver.IDCODE['DATA_STRM'], on_imu)
# Turn the back led on
sphero.set_back_led(0xff, False)
# Start the thread for data processing
sphero.start()
try: # Encapsulate into a try catch to somehow be able to stop this infinite loop
# Create an infinite loop to keep the program alive
while True:
# Yeah just sleep
sleep(60)
except KeyboardInterrupt:
print("The user asked us to stop")
# Switch the back led off
sphero.set_back_led(0x00, False)
# Disconnect from the robot
sphero.disconnect()
# Wait for all threads to stop
sphero.join()
print("Goodbye all you people")
| lgpl-3.0 | -5,150,611,249,706,489,000 | 29.130435 | 116 | 0.700577 | false |
thatneat/petl | setup.py | 1 | 1346 | from __future__ import print_function, absolute_import, division
from ast import literal_eval
from distutils.core import setup
def get_version(source='petl/__init__.py'):
with open(source) as f:
for line in f:
if line.startswith('__version__'):
return literal_eval(line.split('=')[-1].lstrip())
raise ValueError("__version__ not found")
setup(
name='petl',
version=get_version(),
author='Alistair Miles',
author_email='[email protected]',
package_dir={'': '.'},
packages=['petl', 'petl.io', 'petl.transform', 'petl.util',
'petl.test', 'petl.test.io', 'petl.test.transform',
'petl.test.util'],
scripts=['bin/petl'],
url='https://github.com/alimanfoo/petl',
license='MIT License',
description='A Python package for extracting, transforming and loading '
'tables of data.',
long_description=open('README.txt').read(),
classifiers=['Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| mit | 6,906,650,021,279,503,000 | 34.421053 | 79 | 0.579495 | false |
CiscoSystems/jujucharm-n1k | charms/precise/juju-gui/tests/test_utils.py | 1 | 50606 | # This file is part of the Juju GUI, which lets users view and manage Juju
# environments within a graphical interface (https://launchpad.net/juju-gui).
# Copyright (C) 2012-2013 Canonical Ltd.
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License version 3, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranties of MERCHANTABILITY,
# SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Juju GUI utils tests."""
from contextlib import contextmanager
import json
import os
import shutil
from subprocess import CalledProcessError
import tempfile
import unittest
import charmhelpers
import mock
from shelltoolbox import environ
import tempita
import yaml
from utils import (
API_PORT,
JUJU_GUI_DIR,
JUJU_PEM,
WEB_PORT,
_get_by_attr,
cmd_log,
compute_build_dir,
download_release,
fetch_gui_release,
first_path_in_dir,
get_api_address,
get_launchpad_release,
get_npm_cache_archive_url,
get_release_file_path,
get_zookeeper_address,
install_builtin_server,
install_missing_packages,
legacy_juju,
log_hook,
parse_source,
remove_apache_setup,
remove_haproxy_setup,
render_to_file,
save_or_create_certificates,
setup_apache_config,
setup_haproxy_config,
start_agent,
start_builtin_server,
start_haproxy_apache,
start_improv,
stop_agent,
stop_builtin_server,
stop_haproxy_apache,
stop_improv,
write_builtin_server_startup,
write_gui_config,
)
# Import the whole utils package for monkey patching.
import utils
class AttrDict(dict):
"""A dict with the ability to access keys as attributes."""
def __getattr__(self, attr):
try:
return self[attr]
except KeyError:
raise AttributeError
class TestAttrDict(unittest.TestCase):
def test_key_as_attribute(self):
# Ensure attributes can be used to retrieve dict values.
attr_dict = AttrDict(myattr='myvalue')
self.assertEqual('myvalue', attr_dict.myattr)
def test_attribute_not_found(self):
# An AttributeError is raised if the dict does not contain an attribute
# corresponding to an existent key.
with self.assertRaises(AttributeError):
AttrDict().myattr
@mock.patch('utils.run')
@mock.patch('utils.log')
@mock.patch('utils.cmd_log', mock.Mock())
class TestDownloadRelease(unittest.TestCase):
def test_download(self, mock_log, mock_run):
# A release is properly downloaded using curl.
url = 'http://download.example.com/release.tgz'
filename = 'local-release.tgz'
destination = download_release(url, filename)
expected_destination = os.path.join(os.getcwd(), 'releases', filename)
self.assertEqual(expected_destination, destination)
expected_log = 'Downloading release file: {} --> {}.'.format(
url, expected_destination)
mock_log.assert_called_once_with(expected_log)
mock_run.assert_called_once_with(
'curl', '-L', '-o', expected_destination, url)
@mock.patch('utils.log', mock.Mock())
class TestFetchGuiRelease(unittest.TestCase):
sources = tuple(
{'filename': 'release.' + extension,
'release_path': '/my/release.' + extension}
for extension in ('tgz', 'xz'))
@contextmanager
def patch_launchpad(self, origin, version, source):
"""Mock the functions used to download a release from Launchpad.
Ensure all the functions are called correctly.
"""
url = 'http://launchpad.example.com/' + source['filename'] + '/file'
patch_launchpad = mock.patch('utils.Launchpad')
patch_get_launchpad_release = mock.patch(
'utils.get_launchpad_release',
mock.Mock(return_value=(url, source['filename'])),
)
patch_download_release = mock.patch(
'utils.download_release',
mock.Mock(return_value=source['release_path']),
)
with patch_launchpad as mock_launchpad:
with patch_get_launchpad_release as mock_get_launchpad_release:
with patch_download_release as mock_download_release:
yield
login = mock_launchpad.login_anonymously
login.assert_called_once_with('Juju GUI charm', 'production')
mock_get_launchpad_release.assert_called_once_with(
login().projects['juju-gui'], origin, version)
mock_download_release.assert_called_once_with(url, source['filename'])
@mock.patch('utils.download_release')
def test_url(self, mock_download_release):
# The release is retrieved from an URL.
for source in self.sources:
mock_download_release.return_value = source['release_path']
url = 'http://download.example.com/' + source['filename']
path = fetch_gui_release('url', url)
self.assertEqual(source['release_path'], path)
mock_download_release.assert_called_once_with(
url, 'url-' + source['filename'])
mock_download_release.reset_mock()
@mock.patch('utils.get_release_file_path')
def test_local(self, mock_get_release_file_path):
# The last local release is requested.
for source in self.sources:
mock_get_release_file_path.return_value = source['release_path']
path = fetch_gui_release('local', None)
self.assertEqual(source['release_path'], path)
mock_get_release_file_path.assert_called_once_with()
mock_get_release_file_path.reset_mock()
@mock.patch('utils.get_release_file_path')
def test_version_found(self, mock_get_release_file_path):
# A release version is specified and found locally.
for source in self.sources:
mock_get_release_file_path.return_value = source['release_path']
path = fetch_gui_release('stable', '0.1.42')
self.assertEqual(source['release_path'], path)
mock_get_release_file_path.assert_called_once_with('0.1.42')
mock_get_release_file_path.reset_mock()
@mock.patch('utils.get_release_file_path')
def test_version_not_found(self, mock_get_release_file_path):
# A release version is specified but not found locally.
for source in self.sources:
mock_get_release_file_path.return_value = None
with self.patch_launchpad('stable', '0.1.42', source):
path = fetch_gui_release('stable', '0.1.42')
self.assertEqual(source['release_path'], path)
mock_get_release_file_path.assert_called_once_with('0.1.42')
mock_get_release_file_path.reset_mock()
@mock.patch('utils.get_release_file_path')
def test_stable(self, mock_get_release_file_path):
# The last stable release is requested.
for source in self.sources:
with self.patch_launchpad('stable', None, source):
path = fetch_gui_release('stable', None)
self.assertEqual(source['release_path'], path)
self.assertFalse(mock_get_release_file_path.called)
@mock.patch('utils.get_release_file_path')
def test_trunk(self, mock_get_release_file_path):
# The last development release is requested.
for source in self.sources:
with self.patch_launchpad('trunk', None, source):
path = fetch_gui_release('trunk', None)
self.assertEqual(source['release_path'], path)
self.assertFalse(mock_get_release_file_path.called)
class TestFirstPathInDir(unittest.TestCase):
def setUp(self):
self.directory = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.directory)
self.path = os.path.join(self.directory, 'file_or_dir')
def test_file_path(self):
# Ensure the full path of a file is correctly returned.
open(self.path, 'w').close()
self.assertEqual(self.path, first_path_in_dir(self.directory))
def test_directory_path(self):
# Ensure the full path of a directory is correctly returned.
os.mkdir(self.path)
self.assertEqual(self.path, first_path_in_dir(self.directory))
def test_empty_directory(self):
# An IndexError is raised if the directory is empty.
self.assertRaises(IndexError, first_path_in_dir, self.directory)
class TestGetApiAddress(unittest.TestCase):
env_address = 'env.example.com:17070'
agent_address = 'agent.example.com:17070'
@contextmanager
def agent_file(self, addresses=None):
"""Set up a directory structure similar to the one created by juju.
If addresses are provided, also create a machiner directory and an
agent file containing the addresses.
Remove the directory structure when exiting from the context manager.
"""
base_dir = tempfile.mkdtemp()
unit_dir = tempfile.mkdtemp(dir=base_dir)
machine_dir = os.path.join(base_dir, 'machine-1')
if addresses is not None:
os.mkdir(machine_dir)
with open(os.path.join(machine_dir, 'agent.conf'), 'w') as conf:
yaml.dump({'apiinfo': {'addrs': addresses}}, conf)
try:
yield unit_dir, machine_dir
finally:
shutil.rmtree(base_dir)
def test_retrieving_address_from_env(self):
# The API address is correctly retrieved from the environment.
with environ(JUJU_API_ADDRESSES=self.env_address):
self.assertEqual(self.env_address, get_api_address())
def test_multiple_addresses_in_env(self):
# If multiple API addresses are listed in the environment variable,
# the first one is returned.
addresses = '{} foo.example.com:42'.format(self.env_address)
with environ(JUJU_API_ADDRESSES=addresses):
self.assertEqual(self.env_address, get_api_address())
def test_both_env_and_agent_file(self):
# If the API address is included in both the environment and the
# agent.conf file, the environment variable takes precedence.
with environ(JUJU_API_ADDRESSES=self.env_address):
with self.agent_file([self.agent_address]) as (unit_dir, _):
self.assertEqual(self.env_address, get_api_address(unit_dir))
def test_retrieving_address_from_agent_file(self):
# The API address is correctly retrieved from the machiner agent file.
with self.agent_file([self.agent_address]) as (unit_dir, _):
self.assertEqual(self.agent_address, get_api_address(unit_dir))
def test_multiple_addresses_in_agent_file(self):
# If multiple API addresses are listed in the agent file, the first
# one is returned.
addresses = [self.agent_address, 'foo.example.com:42']
with self.agent_file(addresses) as (unit_dir, _):
self.assertEqual(self.agent_address, get_api_address(unit_dir))
def test_missing_env_and_agent_file(self):
# An IOError is raised if the agent configuration file is not found.
with self.agent_file() as (unit_dir, machine_dir):
os.mkdir(machine_dir)
self.assertRaises(IOError, get_api_address, unit_dir)
def test_missing_env_and_agent_directory(self):
# An IOError is raised if the machine directory is not found.
with self.agent_file() as (unit_dir, _):
self.assertRaises(IOError, get_api_address, unit_dir)
class TestGetReleaseFilePath(unittest.TestCase):
def setUp(self):
self.playground = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.playground)
def mock_releases_dir(self):
"""Mock the releases directory."""
return mock.patch('utils.RELEASES_DIR', self.playground)
def assert_path(self, filename, path):
"""Ensure the absolute path of filename equals the given path."""
expected = os.path.join(self.playground, filename)
self.assertEqual(expected, path)
@contextmanager
def assert_error(self):
"""Ensure the code executed in the context block raises a ValueError.
Also check the error message.
"""
with self.assertRaises(ValueError) as context_manager:
yield
error = str(context_manager.exception)
self.assertEqual('Error: no releases found in the charm.', error)
def add(self, filename):
"""Create a release file in the playground directory."""
path = os.path.join(self.playground, filename)
open(path, 'w').close()
def test_last_release(self):
# The last release is correctly retrieved.
self.add('juju-gui-0.12.1.tgz')
self.add('juju-gui-1.2.3.tgz')
self.add('juju-gui-2.0.0+build.42.tgz')
self.add('juju-gui-2.0.1.tgz')
with self.mock_releases_dir():
path = get_release_file_path()
self.assert_path('juju-gui-2.0.1.tgz', path)
def test_xz(self):
# The last release is correctly retrieved for xz files too.
self.add('juju-gui-0.12.1.tgz')
self.add('juju-gui-1.2.3.tgz')
self.add('juju-gui-2.0.0+build.42.tgz')
self.add('juju-gui-2.0.1.xz')
with self.mock_releases_dir():
path = get_release_file_path()
self.assert_path('juju-gui-2.0.1.xz', path)
def test_ordering(self):
# Release versions are correctly ordered.
self.add('juju-gui-0.12.1.tgz')
self.add('juju-gui-0.9.1.tgz')
with self.mock_releases_dir():
path = get_release_file_path()
self.assert_path('juju-gui-0.12.1.tgz', path)
def test_no_releases(self):
# A ValueError is raised if no releases are found.
with self.mock_releases_dir():
with self.assert_error():
get_release_file_path()
def test_no_releases_with_files(self):
# A ValueError is raised if no releases are found.
# Extraneous files are ignored while looking for releases.
self.add('jujugui-1.2.3.tgz') # Wrong prefix.
self.add('juju-gui-1.2.tgz') # Missing patch version number.
self.add('juju-gui-1.2.3.bz2') # Wrong file extension.
self.add('juju-gui-1.2.3.4.tgz') # Wrong version.
self.add('juju-gui-1.2.3.build.42.tgz') # Missing "+" separator.
self.add('juju-gui-1.2.3+built.42.tgz') # Typo.
self.add('juju-gui-1.2.3+build.42.47.tgz') # Invalid bzr revno.
self.add('juju-gui-1.2.3+build.42.bz2') # Wrong file extension again.
with self.mock_releases_dir():
with self.assert_error():
print get_release_file_path()
def test_stable_version(self):
# A specific stable version is correctly retrieved.
self.add('juju-gui-1.2.3.tgz')
self.add('juju-gui-2.0.1+build.42.tgz')
self.add('juju-gui-2.0.1.tgz')
self.add('juju-gui-3.2.1.tgz')
with self.mock_releases_dir():
path = get_release_file_path('2.0.1')
self.assert_path('juju-gui-2.0.1.tgz', path)
def test_development_version(self):
# A specific development version is correctly retrieved.
self.add('juju-gui-1.2.3+build.4247.tgz')
self.add('juju-gui-2.42.47+build.4247.tgz')
self.add('juju-gui-2.42.47.tgz')
self.add('juju-gui-3.42.47+build.4247.tgz')
with self.mock_releases_dir():
path = get_release_file_path('2.42.47+build.4247')
self.assert_path('juju-gui-2.42.47+build.4247.tgz', path)
def test_version_not_found(self):
# None is returned if the requested version is not found.
self.add('juju-gui-1.2.3.tgz')
self.add('juju-GUI-1.42.47.tgz') # This is not a valid release.
with self.mock_releases_dir():
path = get_release_file_path('1.42.47')
self.assertIsNone(path)
class TestLegacyJuju(unittest.TestCase):
def setUp(self):
self.base_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.base_dir)
# Monkey patch utils.CURRENT_DIR.
self.original_current_dir = utils.CURRENT_DIR
utils.CURRENT_DIR = tempfile.mkdtemp(dir=self.base_dir)
def tearDown(self):
# Restore the original utils.CURRENT_DIR.
utils.CURRENT_DIR = self.original_current_dir
def test_jujucore(self):
# If the agent file is found this is a juju-core environment.
agent_path = os.path.join(self.base_dir, 'agent.conf')
open(agent_path, 'w').close()
self.assertFalse(legacy_juju())
def test_pyjuju(self):
# If the agent file does not exist this is a PyJuju environment.
self.assertTrue(legacy_juju())
def make_collection(attr, values):
"""Create a collection of objects having an attribute named *attr*.
The value of the *attr* attribute, for each instance, is taken from
the *values* sequence.
"""
return [AttrDict({attr: value}) for value in values]
class TestMakeCollection(unittest.TestCase):
def test_factory(self):
# Ensure the factory returns the expected object instances.
instances = make_collection('myattr', range(5))
self.assertEqual(5, len(instances))
for num, instance in enumerate(instances):
self.assertEqual(num, instance.myattr)
class TestGetByAttr(unittest.TestCase):
attr = 'myattr'
collection = make_collection(attr, range(5))
def test_item_found(self):
# Ensure an object instance is correctly returned if found in
# the collection.
item = _get_by_attr(self.collection, self.attr, 3)
self.assertEqual(3, item.myattr)
def test_value_not_found(self):
# None is returned if the collection does not contain the requested
# item.
item = _get_by_attr(self.collection, self.attr, '__does_not_exist__')
self.assertIsNone(item)
def test_attr_not_found(self):
# An AttributeError is raised if items in collection does not have the
# required attribute.
with self.assertRaises(AttributeError):
_get_by_attr(self.collection, 'another_attr', 0)
class FileStub(object):
"""Simulate a Launchpad hosted file returned by launchpadlib."""
def __init__(self, file_link):
self.file_link = file_link
def __str__(self):
return self.file_link
class TestGetLaunchpadRelease(unittest.TestCase):
project = AttrDict(
series=(
AttrDict(
name='stable',
releases=(
AttrDict(
version='0.1.1',
files=(
FileStub('http://example.com/0.1.1.dmg'),
FileStub('http://example.com/0.1.1.tgz'),
),
),
AttrDict(
version='0.1.0',
files=(
FileStub('http://example.com/0.1.0.dmg'),
FileStub('http://example.com/0.1.0.tgz'),
),
),
),
),
AttrDict(
name='trunk',
releases=(
AttrDict(
version='0.1.1+build.1',
files=(
FileStub('http://example.com/0.1.1+build.1.dmg'),
FileStub('http://example.com/0.1.1+build.1.tgz'),
),
),
AttrDict(
version='0.1.0+build.1',
files=(
FileStub('http://example.com/0.1.0+build.1.dmg'),
FileStub('http://example.com/0.1.0+build.1.tgz'),
),
),
),
),
),
)
def test_latest_stable_release(self):
# Ensure the correct URL is returned for the latest stable release.
url, name = get_launchpad_release(self.project, 'stable', None)
self.assertEqual('http://example.com/0.1.1.tgz', url)
self.assertEqual('0.1.1.tgz', name)
def test_latest_trunk_release(self):
# Ensure the correct URL is returned for the latest trunk release.
url, name = get_launchpad_release(self.project, 'trunk', None)
self.assertEqual('http://example.com/0.1.1+build.1.tgz', url)
self.assertEqual('0.1.1+build.1.tgz', name)
def test_specific_stable_release(self):
# Ensure the correct URL is returned for a specific version of the
# stable release.
url, name = get_launchpad_release(self.project, 'stable', '0.1.0')
self.assertEqual('http://example.com/0.1.0.tgz', url)
self.assertEqual('0.1.0.tgz', name)
def test_specific_trunk_release(self):
# Ensure the correct URL is returned for a specific version of the
# trunk release.
url, name = get_launchpad_release(
self.project, 'trunk', '0.1.0+build.1')
self.assertEqual('http://example.com/0.1.0+build.1.tgz', url)
self.assertEqual('0.1.0+build.1.tgz', name)
def test_series_not_found(self):
# A ValueError is raised if the series cannot be found.
with self.assertRaises(ValueError) as cm:
get_launchpad_release(self.project, 'unstable', None)
self.assertIn('series not found', str(cm.exception))
def test_no_releases(self):
# A ValueError is raised if the series does not contain releases.
project = AttrDict(series=[AttrDict(name='stable', releases=[])])
with self.assertRaises(ValueError) as cm:
get_launchpad_release(project, 'stable', None)
self.assertIn('series does not contain releases', str(cm.exception))
def test_release_not_found(self):
# A ValueError is raised if the release cannot be found.
with self.assertRaises(ValueError) as cm:
get_launchpad_release(self.project, 'stable', '2.0')
self.assertIn('release not found', str(cm.exception))
def test_file_not_found(self):
# A ValueError is raised if the hosted file cannot be found.
project = AttrDict(
series=[
AttrDict(
name='stable',
releases=[AttrDict(version='0.1.0', files=[])],
),
],
)
with self.assertRaises(ValueError) as cm:
get_launchpad_release(project, 'stable', None)
self.assertIn('file not found', str(cm.exception))
def test_file_not_found_in_latest_release(self):
# The URL of a file from a previous release is returned if the latest
# one does not contain tarballs.
project = AttrDict(
series=[
AttrDict(
name='stable',
releases=[
AttrDict(version='0.1.1', files=[]),
AttrDict(
version='0.1.0',
files=[FileStub('http://example.com/0.1.0.tgz')],
),
],
),
],
)
url, name = get_launchpad_release(project, 'stable', None)
self.assertEqual('http://example.com/0.1.0.tgz', url)
self.assertEqual('0.1.0.tgz', name)
def test_xz_files_are_found(self):
project = AttrDict(
series=[
AttrDict(
name='stable',
releases=[
AttrDict(
version='0.1.0',
files=[FileStub('http://example.com/0.1.0.xz')],
),
],
),
],
)
url, name = get_launchpad_release(project, 'stable', None)
self.assertEqual('http://example.com/0.1.0.xz', url)
self.assertEqual('0.1.0.xz', name)
class TestGetZookeeperAddress(unittest.TestCase):
def setUp(self):
self.zookeeper_address = 'example.com:2000'
contents = 'env JUJU_ZOOKEEPER="{}"\n'.format(self.zookeeper_address)
with tempfile.NamedTemporaryFile(delete=False) as agent_file:
agent_file.write(contents)
self.agent_file_path = agent_file.name
self.addCleanup(os.remove, self.agent_file_path)
def test_get_zookeeper_address(self):
# Ensure the Zookeeper address is correctly retreived.
address = get_zookeeper_address(self.agent_file_path)
self.assertEqual(self.zookeeper_address, address)
class TestLogHook(unittest.TestCase):
def setUp(self):
# Monkeypatch the charmhelpers log function.
self.output = []
self.original = utils.log
utils.log = self.output.append
def tearDown(self):
# Restore the original charmhelpers log function.
utils.log = self.original
def test_logging(self):
# The function emits log messages on entering and exiting the hook.
with log_hook():
self.output.append('executing hook')
self.assertEqual(3, len(self.output))
enter_message, executing_message, exit_message = self.output
self.assertIn('>>> Entering', enter_message)
self.assertEqual('executing hook', executing_message)
self.assertIn('<<< Exiting', exit_message)
def test_subprocess_error(self):
# If a CalledProcessError exception is raised, the command output is
# logged.
with self.assertRaises(CalledProcessError) as cm:
with log_hook():
raise CalledProcessError(2, 'command', 'output')
exception = cm.exception
self.assertIsInstance(exception, CalledProcessError)
self.assertEqual(2, exception.returncode)
self.assertEqual('output', self.output[-2])
def test_error(self):
# Possible errors are re-raised by the context manager.
with self.assertRaises(TypeError) as cm:
with log_hook():
raise TypeError
exception = cm.exception
self.assertIsInstance(exception, TypeError)
self.assertIn('<<< Exiting', self.output[-1])
class TestParseSource(unittest.TestCase):
def setUp(self):
# Monkey patch utils.CURRENT_DIR.
self.original_current_dir = utils.CURRENT_DIR
utils.CURRENT_DIR = '/current/dir'
def tearDown(self):
# Restore the original utils.CURRENT_DIR.
utils.CURRENT_DIR = self.original_current_dir
def test_latest_local_release(self):
# Ensure the latest local release is correctly parsed.
expected = ('local', None)
self.assertTupleEqual(expected, parse_source('local'))
def test_latest_stable_release(self):
# Ensure the latest stable release is correctly parsed.
expected = ('stable', None)
self.assertTupleEqual(expected, parse_source('stable'))
def test_latest_trunk_release(self):
# Ensure the latest trunk release is correctly parsed.
expected = ('trunk', None)
self.assertTupleEqual(expected, parse_source('trunk'))
def test_stable_release(self):
# Ensure a specific stable release is correctly parsed.
expected = ('stable', '0.1.0')
self.assertTupleEqual(expected, parse_source('0.1.0'))
def test_trunk_release(self):
# Ensure a specific trunk release is correctly parsed.
expected = ('trunk', '0.1.0+build.1')
self.assertTupleEqual(expected, parse_source('0.1.0+build.1'))
def test_bzr_branch(self):
# Ensure a Bazaar branch is correctly parsed.
sources = ('lp:example', 'http://bazaar.launchpad.net/example')
for source in sources:
expected = ('branch', (source, None))
self.assertEqual(expected, parse_source(source))
def test_bzr_branch_and_revision(self):
# A Bazaar branch is correctly parsed when including revision.
sources = ('lp:example:42', 'http://bazaar.launchpad.net/example:1')
for source in sources:
expected = ('branch', tuple(source.rsplit(':', 1)))
self.assertEqual(expected, parse_source(source))
def test_url(self):
expected = ('url', 'http://example.com/gui')
self.assertTupleEqual(
expected, parse_source('url:http://example.com/gui'))
def test_file_url(self):
expected = ('url', 'file:///foo/bar')
self.assertTupleEqual(expected, parse_source('url:/foo/bar'))
def test_relative_file_url(self):
expected = ('url', 'file:///current/dir/foo/bar')
self.assertTupleEqual(expected, parse_source('url:foo/bar'))
class TestRenderToFile(unittest.TestCase):
def setUp(self):
self.destination_file = tempfile.NamedTemporaryFile()
self.addCleanup(self.destination_file.close)
self.template = tempita.Template('{{foo}}, {{bar}}')
with tempfile.NamedTemporaryFile(delete=False) as template_file:
template_file.write(self.template.content)
self.template_path = template_file.name
self.addCleanup(os.remove, self.template_path)
def test_render_to_file(self):
# Ensure the template is correctly rendered using the given context.
context = {'foo': 'spam', 'bar': 'eggs'}
render_to_file(self.template_path, context, self.destination_file.name)
expected = self.template.substitute(context)
self.assertEqual(expected, self.destination_file.read())
class TestSaveOrCreateCertificates(unittest.TestCase):
def setUp(self):
base_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base_dir)
self.cert_path = os.path.join(base_dir, 'certificates')
self.cert_file = os.path.join(self.cert_path, 'juju.crt')
self.key_file = os.path.join(self.cert_path, 'juju.key')
def test_generation(self):
# Ensure certificates are correctly generated.
save_or_create_certificates(
self.cert_path, 'some ignored contents', None)
self.assertIn('CERTIFICATE', open(self.cert_file).read())
self.assertIn('PRIVATE KEY', open(self.key_file).read())
def test_provided_certificates(self):
# Ensure files are correctly saved if their contents are provided.
save_or_create_certificates(self.cert_path, 'mycert', 'mykey')
self.assertIn('mycert', open(self.cert_file).read())
self.assertIn('mykey', open(self.key_file).read())
def test_pem_file(self):
# Ensure the pem file is created concatenating the key and cert files.
save_or_create_certificates(self.cert_path, 'Certificate', 'Key')
pem_file = os.path.join(self.cert_path, JUJU_PEM)
self.assertEqual('KeyCertificate', open(pem_file).read())
class TestCmdLog(unittest.TestCase):
def setUp(self):
# Patch the charmhelpers 'command', which powers get_config. The
# result of this is the mock_config dictionary will be returned.
# The monkey patch is undone in the tearDown.
self.command = charmhelpers.command
fd, self.log_file_name = tempfile.mkstemp()
os.close(fd)
mock_config = {'command-log-file': self.log_file_name}
charmhelpers.command = lambda *args: lambda: json.dumps(mock_config)
def tearDown(self):
charmhelpers.command = self.command
def test_contents_logged(self):
cmd_log('foo')
line = open(self.log_file_name, 'r').read()
self.assertTrue(line.endswith(': juju-gui@INFO \nfoo\n'))
class TestStartImprovAgentGui(unittest.TestCase):
def setUp(self):
self.service_names = []
self.actions = []
self.svc_ctl_call_count = 0
self.run_call_count = 0
self.fake_zk_address = '192.168.5.26'
self.build_dir = 'juju-gui/build-'
self.charmworld_url = 'http://charmworld.example.com/'
self.ssl_cert_path = 'ssl/cert/path'
# Monkey patches.
def service_control_mock(service_name, action):
self.svc_ctl_call_count += 1
self.service_names.append(service_name)
self.actions.append(action)
def noop(*args):
pass
def run(*args):
self.run_call_count += 1
@contextmanager
def su(user):
yield None
def get_zookeeper_address_mock(fp):
return self.fake_zk_address
self.files = {}
orig_rtf = utils.render_to_file
def render_to_file(template, context, dest):
target = tempfile.NamedTemporaryFile()
orig_rtf(template, context, target.name)
with open(target.name, 'r') as fp:
self.files[os.path.basename(dest)] = fp.read()
self.utils_names = dict(
service_control=(utils.service_control, service_control_mock),
log=(utils.log, noop),
su=(utils.su, su),
run=(utils.run, run),
unit_get=(utils.unit_get, noop),
render_to_file=(utils.render_to_file, render_to_file),
get_zookeeper_address=(
utils.get_zookeeper_address, get_zookeeper_address_mock),
get_api_address=(utils.get_api_address, noop),
APACHE_PORTS=(utils.APACHE_PORTS, 'PORTS_NOT_THERE'),
APACHE_SITE=(utils.APACHE_SITE, 'SITE_NOT_THERE'),
)
# Apply the patches.
for fn, fcns in self.utils_names.items():
setattr(utils, fn, fcns[1])
self.shutil_copy = shutil.copy
shutil.copy = noop
def tearDown(self):
# Undo all of the monkey patching.
for fn, fcns in self.utils_names.items():
setattr(utils, fn, fcns[0])
shutil.copy = self.shutil_copy
def test_start_improv(self):
staging_env = 'large'
start_improv(staging_env, self.ssl_cert_path,)
conf = self.files['juju-api-improv.conf']
self.assertTrue('--port %s' % API_PORT in conf)
self.assertTrue(staging_env + '.json' in conf)
self.assertTrue(self.ssl_cert_path in conf)
self.assertEqual(self.svc_ctl_call_count, 1)
self.assertEqual(self.service_names, ['juju-api-improv'])
self.assertEqual(self.actions, [charmhelpers.START])
def test_stop_improv(self):
stop_improv()
self.assertEqual(self.svc_ctl_call_count, 1)
self.assertEqual(self.service_names, ['juju-api-improv'])
self.assertEqual(self.actions, [charmhelpers.STOP])
def test_start_agent(self):
start_agent(self.ssl_cert_path, 'config')
conf = self.files['juju-api-agent.conf']
self.assertTrue('--port %s' % API_PORT in conf)
self.assertTrue('JUJU_ZOOKEEPER=%s' % self.fake_zk_address in conf)
self.assertTrue(self.ssl_cert_path in conf)
self.assertEqual(self.svc_ctl_call_count, 1)
self.assertEqual(self.service_names, ['juju-api-agent'])
self.assertEqual(self.actions, [charmhelpers.START])
def test_stop_agent(self):
stop_agent()
self.assertEqual(self.svc_ctl_call_count, 1)
self.assertEqual(self.service_names, ['juju-api-agent'])
self.assertEqual(self.actions, [charmhelpers.STOP])
def test_compute_build_dir(self):
for (juju_gui_debug, serve_tests, result) in (
(False, False, 'build-prod'),
(True, False, 'build-debug'),
(False, True, 'build-prod'),
(True, True, 'build-prod'),
):
build_dir = compute_build_dir(juju_gui_debug, serve_tests)
self.assertIn(
result, build_dir, 'debug: {}, serve_tests: {}'.format(
juju_gui_debug, serve_tests))
def test_setup_haproxy_config(self):
setup_haproxy_config(self.ssl_cert_path)
haproxy_conf = self.files['haproxy.cfg']
self.assertIn('ca-base {}'.format(self.ssl_cert_path), haproxy_conf)
self.assertIn('crt-base {}'.format(self.ssl_cert_path), haproxy_conf)
self.assertIn('ws1 127.0.0.1:{}'.format(API_PORT), haproxy_conf)
self.assertIn('web1 127.0.0.1:{}'.format(WEB_PORT), haproxy_conf)
self.assertIn('ca-file {}'.format(JUJU_PEM), haproxy_conf)
self.assertIn('crt {}'.format(JUJU_PEM), haproxy_conf)
self.assertIn('redirect scheme https', haproxy_conf)
def test_remove_haproxy_setup(self):
remove_haproxy_setup()
self.assertEqual(self.run_call_count, 2)
def test_setup_apache_config(self):
setup_apache_config(self.build_dir, serve_tests=True)
apache_site_conf = self.files['SITE_NOT_THERE']
self.assertIn('juju-gui/build-', apache_site_conf)
self.assertIn('VirtualHost *:{}'.format(WEB_PORT), apache_site_conf)
self.assertIn(
'Alias /test {}/test/'.format(JUJU_GUI_DIR), apache_site_conf)
apache_ports_conf = self.files['PORTS_NOT_THERE']
self.assertIn('NameVirtualHost *:8000', apache_ports_conf)
self.assertIn('Listen 8000', apache_ports_conf)
def test_start_haproxy_apache(self):
start_haproxy_apache(JUJU_GUI_DIR, False, self.ssl_cert_path, True)
self.assertEqual(self.svc_ctl_call_count, 2)
self.assertEqual(self.service_names, ['apache2', 'haproxy'])
self.assertEqual(
self.actions, [charmhelpers.RESTART, charmhelpers.RESTART])
def test_stop_haproxy_apache(self):
stop_haproxy_apache()
self.assertEqual(self.svc_ctl_call_count, 2)
self.assertEqual(self.service_names, ['haproxy', 'apache2'])
self.assertEqual(self.actions, [charmhelpers.STOP, charmhelpers.STOP])
def test_install_builtin_server(self):
install_builtin_server()
# Two run calls are executed: one for the dependencies, one for the
# server itself.
self.assertEqual(2, self.run_call_count)
def test_write_builtin_server_startup(self):
write_builtin_server_startup(
JUJU_GUI_DIR, self.ssl_cert_path, serve_tests=True, insecure=True,
charmworld_url=self.charmworld_url)
guiserver_conf = self.files['guiserver.conf']
self.assertIn('description "GUIServer"', guiserver_conf)
self.assertIn('--logging="info"', guiserver_conf)
self.assertIn('--apiurl="wss://127.0.0.1:8080/ws"', guiserver_conf)
self.assertIn('--apiversion="python"', guiserver_conf)
self.assertIn(
'--testsroot="{}/test/"'.format(JUJU_GUI_DIR), guiserver_conf)
self.assertIn('--insecure', guiserver_conf)
self.assertNotIn('--sandbox', guiserver_conf)
self.assertIn('--charmworldurl="http://charmworld.example.com/"',
guiserver_conf)
def test_write_builtin_server_startup_sandbox_and_logging(self):
# The upstart configuration file for the GUI server is correctly
# generated when the GUI is in sandbox mode and when a customized log
# level is specified.
write_builtin_server_startup(
JUJU_GUI_DIR, self.ssl_cert_path, serve_tests=True, sandbox=True,
builtin_server_logging='debug')
guiserver_conf = self.files['guiserver.conf']
self.assertIn('description "GUIServer"', guiserver_conf)
self.assertIn('--logging="debug"', guiserver_conf)
self.assertIn('--sandbox', guiserver_conf)
self.assertNotIn('--apiurl', guiserver_conf)
self.assertNotIn('--apiversion', guiserver_conf)
def test_start_builtin_server(self):
start_builtin_server(
JUJU_GUI_DIR, self.ssl_cert_path, serve_tests=False, sandbox=False,
builtin_server_logging='info', insecure=False,
charmworld_url='http://charmworld.example.com/')
self.assertEqual(self.svc_ctl_call_count, 1)
self.assertEqual(self.service_names, ['guiserver'])
self.assertEqual(self.actions, [charmhelpers.RESTART])
def test_stop_builtin_server(self):
stop_builtin_server()
self.assertEqual(self.svc_ctl_call_count, 1)
self.assertEqual(self.service_names, ['guiserver'])
self.assertEqual(self.actions, [charmhelpers.STOP])
self.assertEqual(self.run_call_count, 1)
def test_write_gui_config(self):
write_gui_config(
False, 'This is login help.', True, True, self.charmworld_url,
self.build_dir, config_js_path='config',
ga_key='UA-123456')
js_conf = self.files['config']
self.assertIn('consoleEnabled: false', js_conf)
self.assertIn('user: "admin"', js_conf)
self.assertIn('password: "admin"', js_conf)
self.assertIn('login_help: "This is login help."', js_conf)
self.assertIn('readOnly: true', js_conf)
self.assertIn("socket_url: 'wss://", js_conf)
self.assertIn('socket_protocol: "wss"', js_conf)
self.assertIn('charmworldURL: "http://charmworld.example.com/"',
js_conf)
self.assertIn('GA_key: "UA-123456"', js_conf)
def test_write_gui_config_insecure(self):
write_gui_config(
False, 'This is login help.', True, True, self.charmworld_url,
self.build_dir, secure=False, config_js_path='config')
js_conf = self.files['config']
self.assertIn("socket_url: 'ws://", js_conf)
self.assertIn('socket_protocol: "ws"', js_conf)
@mock.patch('utils.legacy_juju')
def test_write_gui_config_default_python_password(self, mock_legacy_juju):
mock_legacy_juju.return_value = True
write_gui_config(
False, 'This is login help.', True, True, self.charmworld_url,
self.build_dir, config_js_path='config',
password='kumquat')
js_conf = self.files['config']
self.assertIn('user: "admin"', js_conf)
self.assertIn('password: "kumquat"', js_conf)
@mock.patch('utils.legacy_juju')
def test_write_gui_config_default_sandbox_backend(self, mock_legacy_juju):
mock_legacy_juju.return_value = True
write_gui_config(
False, 'This is login help.', True, True, self.charmworld_url,
self.build_dir, config_js_path='config',
password='kumquat', sandbox=True)
js_conf = self.files['config']
# Because this is sandbox, the apiBackend is always go, even though it
# is legacy_juju.
self.assertIn('apiBackend: "go"', js_conf)
@mock.patch('utils.legacy_juju')
def test_write_gui_config_default_go_password(self, mock_legacy_juju):
mock_legacy_juju.return_value = False
write_gui_config(
False, 'This is login help.', True, True, self.charmworld_url,
self.build_dir, config_js_path='config',
password='kumquat')
js_conf = self.files['config']
self.assertIn('user: "user-admin"', js_conf)
self.assertIn('password: "kumquat"', js_conf)
def test_setup_haproxy_config_insecure(self):
setup_haproxy_config(self.ssl_cert_path, secure=False)
# The insecure approach eliminates the https redirect.
self.assertNotIn('redirect scheme https', self.files['haproxy.cfg'])
def test_write_gui_config_sandbox(self):
write_gui_config(
False, 'This is login help.', False, False, self.charmworld_url,
self.build_dir, sandbox=True, config_js_path='config')
js_conf = self.files['config']
self.assertIn('sandbox: true', js_conf)
self.assertIn('user: "admin"', js_conf)
self.assertIn('password: "admin"', js_conf)
def test_write_gui_config_fullscreen(self):
write_gui_config(
False, 'This is login help.', False, False, self.charmworld_url,
self.build_dir, sandbox=True, default_viewmode='fullscreen',
config_js_path='config')
self.assertIn('defaultViewmode: "fullscreen"', self.files['config'])
def test_write_gui_config_with_button(self):
write_gui_config(
False, 'This is login help.', False, False, self.charmworld_url,
self.build_dir, sandbox=True, show_get_juju_button=True,
config_js_path='config')
self.assertIn('showGetJujuButton: true', self.files['config'])
@mock.patch('utils.run')
@mock.patch('utils.log')
@mock.patch('utils.cmd_log', mock.Mock())
@mock.patch('utils.su', mock.MagicMock())
class TestInstallBuiltinServer(unittest.TestCase):
def test_call(self, mock_log, mock_run):
# The builtin server its correctly installed.
install_builtin_server()
charm_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..'))
mock_log.assert_has_calls([
mock.call('Installing the builtin server dependencies.'),
mock.call('Installing the builtin server.'),
])
mock_run.assert_has_calls([
mock.call(
'pip', 'install', '--no-index', '--no-dependencies',
'--find-links', 'file:///{}/deps'.format(charm_dir),
'-r', os.path.join(charm_dir, 'server-requirements.pip')),
mock.call(
'/usr/bin/python',
os.path.join(charm_dir, 'server', 'setup.py'), 'install')
])
@mock.patch('utils.run')
@mock.patch('utils.cmd_log', mock.Mock())
@mock.patch('utils.log', mock.Mock())
@mock.patch('utils.su', mock.MagicMock())
class TestRemoveApacheSetup(unittest.TestCase):
def test_existing_configuration(self, mock_run):
# The Apache configuration is cleaned up if previously set up.
apache_site = tempfile.mkdtemp()
apache_ports = os.path.join(apache_site, 'ports.conf')
self.addCleanup(shutil.rmtree, apache_site)
with mock.patch('utils.APACHE_SITE', apache_site):
with mock.patch('utils.APACHE_PORTS', apache_ports):
remove_apache_setup()
self.assertEqual(4, mock_run.call_count)
expected_calls = [
mock.call('rm', '-f', apache_site),
mock.call('a2dismod', 'headers'),
mock.call('a2dissite', 'juju-gui'),
mock.call('a2ensite', 'default'),
]
mock_run.assert_has_calls(expected_calls)
def test_missing_configuration(self, mock_run):
# Nothing happens if the configuration does not already exist.
remove_apache_setup()
self.assertEqual(0, mock_run.call_count)
@mock.patch('utils.find_missing_packages')
@mock.patch('utils.install_extra_repositories')
@mock.patch('utils.apt_get_install')
@mock.patch('utils.log')
@mock.patch('utils.cmd_log', mock.Mock())
class TestInstallMissingPackages(unittest.TestCase):
packages = ('pkg1', 'pkg2', 'pkg3')
repository = 'ppa:my/repository'
def test_missing(
self, mock_log, mock_apt_get_install,
mock_install_extra_repositories, mock_find_missing_packages):
# The extra repository and packages are correctly installed.
repository = self.repository
mock_find_missing_packages.return_value = ['pkg1', 'pkg2']
install_missing_packages(self.packages, repository=repository)
mock_find_missing_packages.assert_called_once_with(*self.packages)
mock_install_extra_repositories.assert_called_once_with(repository)
mock_apt_get_install.assert_called_once_with('pkg1', 'pkg2')
mock_log.assert_has_calls([
mock.call('Adding the apt repository ppa:my/repository.'),
mock.call('Installing deb packages: pkg1, pkg2.')
])
def test_missing_no_repository(
self, mock_log, mock_apt_get_install,
mock_install_extra_repositories, mock_find_missing_packages):
# No repositories are installed if not passed.
mock_find_missing_packages.return_value = ['pkg1', 'pkg2']
install_missing_packages(self.packages)
mock_find_missing_packages.assert_called_once_with(*self.packages)
self.assertFalse(mock_install_extra_repositories.called)
mock_apt_get_install.assert_called_once_with('pkg1', 'pkg2')
mock_log.assert_called_once_with(
'Installing deb packages: pkg1, pkg2.')
def test_no_missing(
self, mock_log, mock_apt_get_install,
mock_install_extra_repositories, mock_find_missing_packages):
# Nothing is installed if no missing packages are found.
mock_find_missing_packages.return_value = []
install_missing_packages(self.packages, repository=self.repository)
mock_find_missing_packages.assert_called_once_with(*self.packages)
self.assertFalse(mock_install_extra_repositories.called)
self.assertFalse(mock_apt_get_install.called)
mock_log.assert_called_once_with('No missing deb packages.')
class TestNpmCache(unittest.TestCase):
"""To speed building from a branch we prepopulate the NPM cache."""
def test_retrieving_cache_url(self):
# The URL for the latest cache file can be retrieved from Launchpad.
class FauxLaunchpadFactory(object):
@staticmethod
def login_anonymously(agent, site):
# We download the cache from the production site.
self.assertEqual(site, 'production')
return FauxLaunchpad
class CacheFile(object):
file_link = 'http://launchpad.example/path/to/cache/file'
def __str__(self):
return 'cache-file-123.tgz'
class NpmRelease(object):
files = [CacheFile()]
class NpmSeries(object):
name = 'npm-cache'
releases = [NpmRelease]
class FauxProject(object):
series = [NpmSeries]
class FauxLaunchpad(object):
projects = {'juju-gui': FauxProject()}
url = get_npm_cache_archive_url(Launchpad=FauxLaunchpadFactory())
self.assertEqual(url, 'http://launchpad.example/path/to/cache/file')
if __name__ == '__main__':
unittest.main(verbosity=2)
| apache-2.0 | -6,968,436,779,733,894,000 | 39.227345 | 79 | 0.617002 | false |
McDermott-Group/LabRAD | LabRAD/StartupScripts/ADR3.py | 1 | 1110 | # Copyright (C) 2015 Ivan Pechenezhskiy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This script can be used to start ADR3 client with all necessary servers.
"""
import sys
import subprocess as sp
def main():
sp.Popen([sys.executable, 'electronics.py',
'--registry-start-program-key', 'Start ADR3 Program List',
'--registry-start-server-key', 'Start ADR3 Server List'],
cwd='.')
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
main() | gpl-2.0 | 5,027,545,588,910,771,000 | 36.033333 | 73 | 0.718919 | false |
tensorflow/probability | tensorflow_probability/python/bijectors/tanh_test.py | 1 | 3449 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tanh Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import bijectors as tfb
from tensorflow_probability.python.bijectors import bijector_test_util
from tensorflow_probability.python.internal import test_util
@test_util.test_all_tf_execution_regimes
class TanhBijectorTest(test_util.TestCase):
"""Tests correctness of the Y = g(X) = tanh(X) transformation."""
def testBijector(self):
self.assertStartsWith(tfb.Tanh().name, "tanh")
x = np.linspace(-3., 3., 100).reshape([2, 5, 10]).astype(np.float64)
y = np.tanh(x)
ildj = -np.log1p(-np.square(np.tanh(x)))
bijector = tfb.Tanh()
self.assertAllClose(
y, self.evaluate(bijector.forward(x)), atol=0., rtol=1e-2)
self.assertAllClose(
x, self.evaluate(bijector.inverse(y)), atol=0., rtol=1e-4)
self.assertAllClose(
ildj,
self.evaluate(bijector.inverse_log_det_jacobian(
y, event_ndims=0)), atol=0., rtol=1e-6)
self.assertAllClose(
-ildj,
self.evaluate(bijector.forward_log_det_jacobian(
x, event_ndims=0)), atol=0., rtol=1e-4)
def testScalarCongruency(self):
bijector_test_util.assert_scalar_congruency(
tfb.Tanh(), lower_x=-7., upper_x=7., eval_func=self.evaluate,
n=int(10e4), rtol=.5)
def testBijectiveAndFinite(self):
x = np.linspace(-100., 100., 100).astype(np.float64)
eps = 1e-3
y = np.linspace(-1. + eps, 1. - eps, 100).astype(np.float64)
bijector_test_util.assert_bijective_and_finite(
tfb.Tanh(), x, y, eval_func=self.evaluate, event_ndims=0, atol=0.,
rtol=1e-4)
def testMatchWithAffineTransform(self):
direct_bj = tfb.Tanh()
indirect_bj = tfb.Chain([
tfb.Shift(tf.cast(-1.0, dtype=tf.float64)),
tfb.Scale(tf.cast(2.0, dtype=tf.float64)),
tfb.Sigmoid(),
tfb.Scale(tf.cast(2.0, dtype=tf.float64))
])
x = np.linspace(-3.0, 3.0, 100)
y = np.tanh(x)
self.assertAllClose(self.evaluate(direct_bj.forward(x)),
self.evaluate(indirect_bj.forward(x)))
self.assertAllClose(self.evaluate(direct_bj.inverse(y)),
self.evaluate(indirect_bj.inverse(y)))
self.assertAllClose(
self.evaluate(direct_bj.inverse_log_det_jacobian(y, event_ndims=0)),
self.evaluate(indirect_bj.inverse_log_det_jacobian(y, event_ndims=0)))
self.assertAllClose(
self.evaluate(direct_bj.forward_log_det_jacobian(x, event_ndims=0)),
self.evaluate(indirect_bj.forward_log_det_jacobian(x, event_ndims=0)))
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | 7,585,944,420,415,478,000 | 37.322222 | 78 | 0.656422 | false |
vcrini/cbr2djvu | cbr2djvu.py | 1 | 1136 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import shutil
import sys
from subprocess import call
import tempfile
import magic
env = os.environ.copy()
env['MAGICK_TMPDIR'] = tempfile.mkdtemp(dir=os.getcwd())
def convert(filename):
directory = tempfile.mkdtemp()
os.chdir(directory)
filename = os.path.join(old_directory, filename)
m = mime.from_file(filename)
if m == 'application/x-rar':
call(['rar', 'e', filename])
else:
call(['unzip', '-j', filename])
ll = os.listdir(directory)
inner_path = os.path.join(directory, ll[0])
name = os.path.splitext(filename)[0] + '.djvu'
call([
'convert',
'-limit',
'memory',
'1',
'-limit',
'map',
'1',
'*',
'foo.pdf',
], env=env)
call(['pdf2djvu', 'foo.pdf', '-o', name])
call(['mv', os.path.join(inner_path, name), old_directory])
shutil.rmtree(directory)
if __name__ == '__main__':
mime = magic.Magic(mime=True)
old_directory = os.getcwd()
for fn in sys.argv[1:]:
convert(fn)
shutil.rmtree(env['MAGICK_TMPDIR'])
| gpl-2.0 | 7,152,145,809,202,884,000 | 20.846154 | 63 | 0.566901 | false |
chrisxue815/leetcode_python | problems/test_0064.py | 1 | 1060 | import unittest
import utils
# O(mn) time. O(mn) space. DP.
class Solution:
def minPathSum(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
m = len(grid)
n = len(grid[0])
# dp[i][j]: the minimal sum of path to reach (i, j)
dp = [[0] * n for _ in range(m)]
sum_ = 0
for i in range(m):
sum_ += grid[i][0]
dp[i][0] = sum_
sum_ = 0
for j in range(n):
sum_ += grid[0][j]
dp[0][j] = sum_
for i in range(1, m):
for j in range(1, n):
dp[i][j] = min(dp[i - 1][j], dp[i][j - 1]) + grid[i][j]
return dp[-1][-1]
class Test(unittest.TestCase):
def test(self):
cases = utils.load_test_json(__file__).test_cases
for case in cases:
args = str(case.args)
actual = Solution().minPathSum(**case.args.__dict__)
self.assertEqual(case.expected, actual, msg=args)
if __name__ == '__main__':
unittest.main()
| unlicense | 1,228,235,570,249,658,400 | 22.043478 | 71 | 0.460377 | false |
jeffmorais/estrutura-de-dados | soma_quadrados.py | 1 | 1910 | from collections import Counter
from collections import deque
def soma_quadrados(n):
test=0
aux=n
res = {0:[0], 1:[1]}
if n>=0 and n<=1:
return res[n]
else:
quad=[]
test=2
while test<=aux:
n=test
for q in range(1,n+1):
if q**2<=n and q**2 not in quad:
quad.append(q**2)
if n>11:
quad.pop()
entr=n
hel=[]
k=n
while n not in res.keys() and n!=0:
if quad and n not in quad:
while k>=n and quad and k!=n-k:
k=quad.pop()
hel.append(k)
n=n-k
test=test+1
if n==0:
res[entr]=hel
else:
res[entr]=hel.__add__(res[n])
return res[entr]
import unittest
class SomaQuadradosPerfeitosTestes(unittest.TestCase):
def teste_0(self):
self.assert_possui_mesmo_elementos([0], soma_quadrados(0))
def teste_1(self):
self.assert_possui_mesmo_elementos([1], soma_quadrados(1))
def teste_2(self):
self.assert_possui_mesmo_elementos([1, 1], soma_quadrados(2))
def teste_3(self):
self.assert_possui_mesmo_elementos([1, 1, 1], soma_quadrados(3))
def teste_4(self):
self.assert_possui_mesmo_elementos([4], soma_quadrados(4))
def teste_5(self):
self.assert_possui_mesmo_elementos([4, 1], soma_quadrados(5))
def teste_11(self):
self.assert_possui_mesmo_elementos([9, 1, 1], soma_quadrados(11))
def teste_12(self):
self.assert_possui_mesmo_elementos([4, 4, 4], soma_quadrados(12))
def assert_possui_mesmo_elementos(self, esperado, resultado):
self.assertEqual(Counter(esperado), Counter(resultado))
if __name__ == '__main__':
unittest.main()
| mit | 607,065,658,926,150,900 | 26.681159 | 73 | 0.52356 | false |
FilipMalczak/corpy | src/main/python/corpy/core.py | 1 | 1061 | import types
class CorpyRuntime:
def __init__(self, configuration):
self.configuration = configuration
def get_components(self):
return dict(self.configuration.__dict__)
def component(class_or_foo):
if isinstance(class_or_foo, type):
clazz = class_or_foo
elif isinstance(class_or_foo, (types.MethodType, types.FunctionType, types.LambdaType):
foo = class_or_foo
class Configuration(types.SimpleNamespace):
current_runtime = None
def get_not_overloaded_main_msg(self):
return "Overload main method of your configuration ("+str(type(self))")!"
def main(self):
print(get_not_overloaded_main_msg(self))
def run(self):
last_runtime = Configuration.current_runtime
Configuration.current_runtime = CorpyRuntime(self)
try:
kwargs = {
"a": 3,
"b": 4
}
self.main(**kwargs)
finally:
Configuration.current_runtime = last_runtime
Configuration().run()
| apache-2.0 | -609,070,897,693,372,400 | 26.921053 | 91 | 0.60509 | false |
diego-bernal/quantum | quantum/atom_cavity/__init__.py | 1 | 1750 | # -*- coding: utf-8 -*-
# This file is part of Quantum.
#
# Copyright (c) 2017, Diego Nicolás Bernal-García
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
# -----------------------------------------------------------------------------
# Load modules
#
# core
from quantum.atom_cavity.states import *
from quantum.atom_cavity.operators import *
| bsd-2-clause | -5,203,070,641,484,842,000 | 46.243243 | 79 | 0.676201 | false |
syabro/django_mediatoolset | mediatoolset/management/commands/loaddata_with_media.py | 1 | 1876 | import os
import shutil
from django.conf import settings
from django.core import serializers
from django.core.management.commands.loaddata import Command as LoaddataCommand
from django.db.models import FileField
class Command(LoaddataCommand):
def handle(self, *fixture_labels, **options):
super(Command, self).handle(*fixture_labels, **options)
for fixture_label in fixture_labels:
for fixture_file, fixture_dir, fixture_name in self.find_fixtures(fixture_label):
_, ser_fmt, cmp_fmt = self.parse_name(os.path.basename(fixture_file))
open_method, mode = self.compression_formats[cmp_fmt]
fixture = open_method(fixture_file, mode)
objects = serializers.deserialize(ser_fmt, fixture, using=self.using, ignorenonexistent=self.ignore)
for obj in list(objects):
self.process_object_from_fixture(obj.object, fixture_dir)
def process_object_from_fixture(self, object_from_fixture, fixture_dir):
fields = [field for field in type(object_from_fixture)._meta.get_fields() if isinstance(field, FileField)]
for field in fields:
relative_file_path = getattr(object_from_fixture, field.name).name
if not relative_file_path:
continue
src_path = os.path.join(fixture_dir, 'media', relative_file_path)
if not os.path.exists(src_path):
print "Source files %s doesn't exist. Skipping." % src_path
continue
target_path = os.path.join(settings.MEDIA_ROOT, relative_file_path)
if not os.path.exists(os.path.dirname(target_path)):
os.makedirs(os.path.dirname(target_path))
if os.path.exists(target_path):
os.remove(target_path)
shutil.copy(src_path, target_path)
| mit | 2,237,305,081,493,083,400 | 45.9 | 116 | 0.64339 | false |
CityOfNewYork/NYCOpenRecords | openrecords.py | 1 | 10724 | """
`.` `.` ...` .... ...`
`;'', :##+ +##`#####, .######` +##.+####'
`'''', :#### +#########, .########` '#########,
;'';` `###' +###,``;###``###. `;##+ '###,``####
.'''` `### +##' ###,:##: ###`'##+ .###
.''' ###.+##, :##'###+++++###.'##, `###
.:::,,, ###.+##` ,##+###########.'##: `###
.### ###.+##. :##+###,.......`'##: `###
`###` `### +##; +##:;##, ...`'##: `###
+##+ ###+ +###. .###.`###. .### '##: `###
`#########` +#########' ;########. '##: `###
`#######` +##,#####' ,######. '##: `###
`,:,` +##, .:,` `.::. ```` ```
+##,
+##,
+##,
```
.........` ............` .,::,. ,,;:,` .......... `........ .,::,.
`'''''''''''. ''''''''''''. .''''''''` ,''''''''. ;'''''''''': .''''''''''. `'''''''',
`''''''''''''. ''''''''''''. ,''''''''''. ;''''''''''` ;'''''''''''' .''''''''''': `'''''''''',
`''':::::;'''' '''';;;;;;;;. .'''':.,:''''` :'''',.,:''''. ;'''::::,''''. .''';;;;''''', ;'''.``.''''`
`'''. ,'''` '''' ''''` `''': `'''' .'''' ;''' .''': .'''` ,''''` ''', `'''.
`'''. .'''. '''' ,'''. :''' :'''` ,'''. ;''' '''; .'''` ,''', ''', :,:.
`'''. .'''` '''' ;''' ```` '''; .''', ;''' ''', .'''` '''; '''';.
`'''. `'''' ''''''''''': '''; `''', ''': ;''' ,'''` .'''` ;''' .''''''':.
`'''''''''''' ''''''''''': ''': `'''. '''; ;'''''''''''. .'''` :''' .'''''''''.
`'''''''''''. '''''''''''; ''': `'''. '''; ;'''''''''': .'''` ;''' ,''''''''.
`''':::::''''. '''' '''' `...`''': ''': ;'''::::;'''' .'''` ;''' .,'''''
`'''. '''' '''' :'''` :''' '''' .'''. ;''' ,'''` .'''` ''':,::: ;'''`
`'''. ,''' '''; .''': '''' ,'''. ;'''` ;''' ''', .'''` :'''.:''' .'''`
`'''. .''' '''' '''', :'''. ''''. :''': ;''' ''', .'''. .;'''' .''': ;'''
`'''. `'''` '''''''''''', `''''';;''''' .''''';'''''' ;''' ''', .''''''''''''. ''''':::'''',
`'''. '''. ''''''''''''. .'''''''''' .''''''''''` ;''' ;''; .'''''''''''. '''''''''';
`'''. ''': '''''''''''', ,'''''', `:''''''. '''' ,''' .'''''''';. ,'''''''.
"""
import os
from dotenv import load_dotenv
basedir = os.path.abspath(os.path.dirname(__file__))
dotenv_path = os.path.join(basedir, '.env')
load_dotenv(dotenv_path)
from datetime import datetime
from urllib.parse import unquote
import sys
import click
from flask import url_for
from flask.cli import main
from flask_migrate import Migrate, upgrade
from werkzeug.middleware.profiler import ProfilerMiddleware
from app import create_app, db
from app.models import (
Agencies,
AgencyUsers,
CustomRequestForms,
Determinations,
Emails,
EnvelopeTemplates,
Envelopes,
Events,
LetterTemplates,
Letters,
Reasons,
Requests,
Responses,
Roles,
UserRequests,
Users,
)
from app.report.utils import generate_request_closing_user_report, generate_monthly_metrics_report
from app.request.utils import generate_guid
from app.search.utils import recreate
from app.user.utils import make_user_admin
if os.getenv('FLASK_ENV') != 'production':
import pytest
app = create_app(os.getenv("FLASK_CONFIG") or "default")
migrate = Migrate(app, db)
COV = None
if os.environ.get("FLASK_COVERAGE"):
import coverage
COV = coverage.coverage(
branch=True, include="app/*", config_file=os.path.join(os.curdir, ".coveragerc")
)
COV.start()
@app.shell_context_processor
def make_shell_context():
return dict(
app=app,
db=db,
Users=Users,
Agencies=Agencies,
Determinations=Determinations,
Requests=Requests,
Responses=Responses,
Events=Events,
Reasons=Reasons,
Roles=Roles,
UserRequests=UserRequests,
AgencyUsers=AgencyUsers,
Emails=Emails,
Letters=Letters,
LetterTemplates=LetterTemplates,
Envelopes=Envelopes,
EnvelopeTemplates=EnvelopeTemplates,
CustomRequestForms=CustomRequestForms,
)
@app.cli.command()
@click.option("--first_name", prompt="First Name")
@click.option("--middle_initial", default="", prompt="Middle Initial")
@click.option("--last_name", prompt="Last Name")
@click.option("--email", prompt="Email Address")
@click.option("--agency_ein", prompt="Agency EIN (e.g. 0002)")
@click.option(
"--is_admin", default=False, prompt="Should user be made an agency administrator?"
)
@click.option(
"--is_active", default=False, prompt="Should user be activated immediately?"
)
def add_user(
first_name: str,
last_name: str,
email: str,
agency_ein: str,
middle_initial: str = None,
is_admin: bool = False,
is_active: bool = False,
):
"""
Add an agency user into the database.
"""
if not first_name:
raise click.UsageError("First name is required")
if not last_name:
raise click.UsageError("Last name is required")
if not email:
raise click.UsageError("Email Address is required")
if not agency_ein:
raise click.UsageError("Agency EIN is required")
user = Users(
guid=generate_guid(),
first_name=first_name,
middle_initial=middle_initial,
last_name=last_name,
email=email,
email_validated=False,
is_nyc_employee=True,
is_anonymous_requester=False,
)
db.session.add(user)
agency_user = AgencyUsers(
user_guid=user.guid,
agency_ein=agency_ein,
is_agency_active=is_active,
is_agency_admin=is_admin,
is_primary_agency=True,
)
db.session.add(agency_user)
db.session.commit()
if is_admin:
redis_key = "{current_user_guid}-{update_user_guid}-{agency_ein}-{timestamp}".format(
current_user_guid="openrecords_support",
update_user_guid=user.guid,
agency_ein=agency_ein,
timestamp=datetime.now(),
)
make_user_admin.apply_async(
args=(user.guid, "openrecords_support", agency_ein), task_id=redis_key
)
print(user)
@app.cli.command()
@click.option("--agency_ein", prompt="Agency EIN (e.g. 0002)")
@click.option("--date_from", prompt="Date From (e.g. 2000-01-01")
@click.option("--date_to", prompt="Date To (e.g. 2000-02-01)")
@click.option("--emails", prompt="Emails (e.g. [email protected],[email protected])")
def generate_closing_report(agency_ein: str, date_from: str, date_to: str, emails: str):
"""Generate request closing report.
"""
email_list = emails.split(',')
generate_request_closing_user_report(agency_ein, date_from, date_to, email_list)
@app.cli.command()
@click.option("--agency_ein", prompt="Agency EIN (e.g. 0002)")
@click.option("--date_from", prompt="Date From (e.g. 2000-01-01")
@click.option("--date_to", prompt="Date To (e.g. 2000-02-01)")
@click.option("--emails", prompt="Emails (e.g. [email protected],[email protected])")
def generate_monthly_report(agency_ein: str, date_from: str, date_to: str, emails: str):
"""Generate monthly metrics report.
CLI command to generate monthly metrics report.
Purposely leaving a full date range option instead of a monthly limit in order to provide more granularity for devs.
"""
email_list = emails.split(',')
generate_monthly_metrics_report(agency_ein, date_from, date_to, email_list)
@app.cli.command()
def es_recreate():
"""
Recreate elasticsearch index and request docs.
"""
recreate()
@app.cli.command
def routes():
"""
Generate a list of HTTP routes for the application.
"""
output = []
for rule in app.url_map.iter_rules():
options = {}
for arg in rule.arguments:
if arg == "year":
options[arg] = "{}".format(datetime.now().year)
continue
options[arg] = "[{}]".format(arg)
methods = ",".join(rule.methods)
url = url_for(rule.endpoint, **options)
if str(datetime.now().year) in url:
url = url.replace(str(datetime.now().year), "[year]")
line = unquote("{:50} {:20} {}".format(rule.endpoint, methods, url))
output.append(line)
for line in sorted(output):
print(line)
@app.cli.command()
@click.option("--test-name", help="Specify tests (file, class, or specific test)")
@click.option(
"--coverage/--no-coverage", "use_coverage", default=False, help="Run tests under code coverage."
)
@click.option("--verbose", is_flag=True, default=False, help="Py.Test verbose mode")
def test(test_name: str = None, use_coverage: bool = False, verbose: bool = False):
"""Run the unit tests."""
if use_coverage and not os.environ.get("FLASK_COVERAGE"):
os.environ["FLASK_COVERAGE"] = "1"
os.execvp(sys.executable, [sys.executable] + sys.argv)
command = []
if verbose:
command.append("-v")
if test_name:
command.append("tests/{test_name}".format(test_name=test_name))
else:
command.append("tests/")
pytest.main(command)
if COV:
COV.stop()
COV.save()
print("Coverage Summary:")
COV.report()
COV.html_report()
COV.xml_report()
@app.cli.command()
@click.option(
"--length",
default=25,
help="Number of functions to include in the profiler report.",
)
@click.option(
"--profile-dir", default=None, help="Directory where profiler data files are saved."
)
def profile(length, profile_dir):
"""
Start the application under the code profiler.
"""
app.wsgi_app = ProfilerMiddleware(
app.wsgi_app, restrictions=[length], profile_dir=profile_dir
)
app.run()
@app.cli.command()
def deploy():
"""
Run deployment tasks.
"""
# migrate database to latest revision
upgrade()
# pre-populate
list(
map(
lambda x: x.populate(),
(
Roles,
Agencies,
Reasons,
Users,
LetterTemplates,
EnvelopeTemplates,
CustomRequestForms,
),
)
)
recreate()
if __name__ == "__main__":
main()
| apache-2.0 | -2,969,086,178,814,162,400 | 30.727811 | 120 | 0.471746 | false |
mjordan/pkppln | services/microservices/validate_bag.py | 1 | 1351 | import os
from services.PlnService import PlnService
import pkppln
import bagit
import zipfile
import shutil
class ValidateBag(PlnService):
"""Validate a bag in a deposit"""
def state_before(self):
return 'payloadVerified'
def state_after(self):
return 'bagValidated'
def execute(self, deposit):
filename = deposit['file_uuid']
filepath = pkppln.input_path('harvested', filename=filename)
self.output(1, 'Opening ' + filepath)
zfile = zipfile.ZipFile(filepath)
for name in zfile.namelist():
self.output(2, ' * ' + name)
if name.startswith('/') or '..' in name:
raise Exception(
'Suspicious file name %s in zipped bag' % (name)
)
expanded_path = pkppln.microservice_directory(
self.state_after(),
filename
)
if os.path.exists(expanded_path):
self.output(1, 'Removing old bag ' + expanded_path)
shutil.rmtree(expanded_path)
zfile.extractall(expanded_path)
bag_path = os.path.join(expanded_path, 'bag')
self.output(1, 'extracted to ' + bag_path)
bag = bagit.Bag(bag_path)
if not bag.is_valid():
raise Exception('Bag is not valid.')
self.output(2, 'bag is valid.')
| gpl-3.0 | 5,493,576,950,560,890,000 | 27.744681 | 68 | 0.582531 | false |
aYukiSekiguchi/ACCESS-Chromium | chrome/test/functional/netflix.py | 1 | 6272 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
import pyauto_functional
import pyauto
class NetflixTestHelper():
"""Helper functions for Netflix tests.
For sample usage, look at class NetflixTest.
"""
# Netflix player states.
IS_PLAYING = '4'
TITLE_HOMEPAGE = 'http://movies.netflix.com/WiHome'
SIGNOUT_PAGE = 'https://account.netflix.com/Logout'
# 30 Rock.
VIDEO_URL = 'http://movies.netflix.com/WiPlayer?' + \
'movieid=70136124&trkid=2361637&t=30+Rock'
_pyauto = None
def __init__(self, pyauto):
self._pyauto = pyauto
def _IsNetflixPluginEnabled(self):
"""Determine Netflix plugin availability and its state."""
return [x for x in self._pyauto.GetPluginsInfo().Plugins() \
if x['name'] == 'Netflix' and x['enabled']]
def _LoginToNetflix(self):
"""Login to Netflix."""
credentials = self._pyauto.GetPrivateInfo()['test_netflix_acct']
board_name = self._pyauto.ChromeOSBoard()
assert credentials.get(board_name), \
'No netflix credentials for %s.' % board_name
self._pyauto.NavigateToURL(credentials['login_url'])
login_js = """
document.getElementById('email').value='%s';
document.getElementById('password').value='%s';
window.domAutomationController.send('ok');
""" % (credentials[board_name], credentials['password'])
self._pyauto.assertEqual(self._pyauto.ExecuteJavascript(login_js), 'ok',
msg='Failed to set login credentials.')
self._pyauto.assertTrue(self._pyauto.SubmitForm('login-form'),
msg='Login to Netflix failed. We think this is an authetication '
'problem from the Netflix side. Sometimes we also see this while '
'login in manually.')
def _GetVideoDroppedFrames(self, tab_index=0, windex=0):
"""Returns total Netflix video dropped frames."""
js = """
var frames = nrdp.video.droppedFrames;
window.domAutomationController.send(frames + '');
"""
return int(self._pyauto.ExecuteJavascript(js, tab_index=tab_index,
windex=windex))
def _GetVideoFrames(self, tab_index=0, windex=0):
"""Returns Netflix video total frames."""
js = """
var frames = nrdp.video.totalFrames;
window.domAutomationController.send(frames + '');
"""
return int(self._pyauto.ExecuteJavascript(js, tab_index=tab_index,
windex=windex))
def _HandleInfobars(self):
"""Manage infobars, come up during the test.
We expect password and Netflix infobars. Processing only Netflix infobar,
since to start a vidoe, pressing the OK button is a must. We can keep other
infobars open."""
self._pyauto.WaitForInfobarCount(2)
tab_info = self._pyauto.GetBrowserInfo()['windows'][0]['tabs'][0]
infobars = tab_info['infobars']
index = 0
netflix_infobar_status = False
for infobar in infobars:
if infobar['buttons'][0] == 'OK':
self._pyauto.PerformActionOnInfobar('accept', infobar_index=index)
netflix_infobar_status = True
index = index + 1
self._pyauto.assertTrue(netflix_infobar_status,
msg='Netflix infobar did not show up')
def _CurrentPlaybackTime(self):
"""Returns the current playback time in seconds."""
time = self._pyauto.ExecuteJavascript("""
time = nrdp.video.currentTime;
window.domAutomationController.send(time + '');
""")
return int(float(time))
def _SignOut(self):
"""Sing out from Netflix Login."""
self._pyauto.NavigateToURL(self.SIGNOUT_PAGE)
def _LoginAndStartPlaying(self):
"""Login and start playing the video."""
self._pyauto.assertTrue(self._pyauto._IsNetflixPluginEnabled(),
msg='Netflix plugin is disabled or not available.')
self._pyauto._LoginToNetflix()
self._pyauto.assertTrue(self._pyauto.WaitUntil(
lambda:self._pyauto.GetActiveTabURL().spec(),
expect_retval=self.TITLE_HOMEPAGE),
msg='Login to Netflix failed.')
self._pyauto.NavigateToURL(self.VIDEO_URL)
self._pyauto._HandleInfobars()
self._pyauto.assertTrue(self._pyauto.WaitUntil(
lambda: self._pyauto.ExecuteJavascript("""
player_status = nrdp.video.readyState;
window.domAutomationController.send(player_status + '');
"""), expect_retval=self.IS_PLAYING),
msg='Player did not start playing the title.')
class NetflixTest(pyauto.PyUITest, NetflixTestHelper):
"""Test case for Netflix player."""
def __init__(self, methodName='runTest', **kwargs):
pyauto.PyUITest.__init__(self, methodName, **kwargs)
NetflixTestHelper.__init__(self, self)
def tearDown(self):
self._SignOut()
pyauto.PyUITest.tearDown(self)
def testPlayerLoadsAndPlays(self):
"""Test that Netflix player loads and plays the title."""
self._LoginAndStartPlaying()
def testPlaying(self):
"""Test that title playing progresses."""
self._LoginAndStartPlaying()
title_length = self.ExecuteJavascript("""
time = nrdp.video.duration;
window.domAutomationController.send(time + '');
""")
title_length = int(float(title_length))
prev_time = 0
current_time = 0
count = 0
while current_time < title_length:
# We want to test playing only for ten seconds.
count = count + 1
if count == 10:
break
current_time = self._CurrentPlaybackTime()
self.assertTrue(prev_time <= current_time,
msg='Prev playing time %s is greater than current time %s.'
% (prev_time, current_time))
prev_time = current_time
# play video for some time
time.sleep(1)
# crosbug.com/22037
# In case player doesn't start playing at all, above while loop may
# still pass. So re-verifying and assuming that player did play something
# during last 10 seconds.
self.assertTrue(current_time > 0,
msg='Netflix player didnot start playing.')
if __name__ == '__main__':
pyauto_functional.Main()
| bsd-3-clause | 440,122,788,789,496,000 | 36.112426 | 79 | 0.646524 | false |
MadsJensen/malthe_alpha_project | fwd_inv.py | 1 | 3446 | """
Doc string goes here.
@author: mje mads [] cnru.dk
"""
import socket
import mne
from mne.minimum_norm import make_inverse_operator
import os
# import subprocess
import glob
cmd = "/usr/local/common/meeg-cfin/configurations/bin/submit_to_isis"
# SETUP PATHS AND PREPARE RAW DATA
hostname = socket.gethostname()
if hostname == "wintermute":
data_path = "/home/mje/mnt/caa/scratch/"
else:
data_path = "/projects/MINDLAB2015_MEG-CorticalAlphaAttention/scratch/"
# CHANGE DIR TO SAVE FILES THE RIGTH PLACE
os.chdir(data_path)
subjects_dir = data_path + "fs_subjects_dir/"
save_folder = data_path + "filter_ica_data/"
maxfiltered_folder = data_path + "maxfiltered_data/"
epochs_folder = data_path + "epoched_data/"
tf_folder = data_path + "tf_data/"
mne_folder = data_path + "minimum_norm/"
subjects = ["0004", "0005", "0006", "0007", "0008", "0009", "0010", "0011",
"0012", "0013", "0014", "0015", "0016", "0017", "0020", "0021",
"0022", "0023", "0024", "0025"] # subject to run
os.chdir(mne_folder)
bem_list = glob.glob("*8192-8192*sol.fif")
bem_list.sort()
subjects = ["0006"]
# Setup source space and forward model
for j, subject in enumerate(subjects[1:11]):
print("Workingt on subject: %s" % subject)
raw_fname = save_folder + "%s_filtered_ica_mc_raw_tsss.fif" % subject
trans_fname = mne_folder + "%s-trans.fif" % subject
bem = bem_list[j]
cov = mne.read_cov(mne_folder + "%s-cov.fif" % subject)
src = mne.setup_source_space(subject,
mne_folder + "%s-all-src.fif" % subject,
spacing="all",
subjects_dir=subjects_dir,
n_jobs=2,
overwrite=True) # 1 for each hemispere
fwd = mne.make_forward_solution(raw_fname, trans=trans_fname,
src=src,
bem=bem,
meg=True,
eeg=True,
fname=mne_folder + "%s-fwd.fif" % subject,
overwrite=True)
# Calculate covariance matrix
best_fit = []
for subject in subjects[3:]:
epochs = mne.read_epochs(epochs_folder +
"%s_filtered_ica_mc_tsss-epo.fif" % subject)
cov = mne.compute_covariance(epochs, tmin=None, tmax=-0.01,
method="auto", return_estimators="all")
best_fit.append({"method": cov[0]["method"], "loglik": cov[0]["loglik"]})
cov = cov[0]
cov.save(mne_folder + "%s-cov.fif" % subject)
# Make inverse model
for subject in subjects:
fwd = mne.read_forward_solution(mne_folder + "%s-fwd.fif" % subject)
cov = mne.read_cov(mne_folder + "%s-cov.fif" % subject)
epochs = mne.read_epochs(epochs_folder +
"%s_filtered_ica_mc_tsss-epo.fif" % subject,
preload=False)
inv = make_inverse_operator(epochs.info, fwd, cov,
loose=0.2, depth=0.8)
mne.minimum_norm.write_inverse_operator(mne_folder +
"%s-inv.fif" % subject,
inv)
os.chdir(mne_folder)
invs = glob.glob("*-inv.fif")
invs.sort()
for i in invs[:10]:
tmp = mne.minimum_norm.read_inverse_operator(i)
print(tmp)
| mit | 2,468,889,138,565,906,400 | 32.134615 | 78 | 0.548172 | false |
pombreda/https-gitorious.org-appstream-software-center | test/test_addons.py | 1 | 2305 | #!/usr/bin/python
import unittest
from testutils import setup_test_env
setup_test_env()
from softwarecenter.db.pkginfo import get_pkg_info
class TestSCAddons(unittest.TestCase):
""" tests the addons """
def setUp(self):
self.cache = get_pkg_info()
self.cache.open()
def test_get_addons_simple(self):
# 7zip
res = self.cache.get_addons("p7zip-full", ignore_installed=False)
self.assertEqual(res, ([], ["p7zip-rar"]))
# apt
(recommends, suggests) = self.cache.get_addons(
"apt", ignore_installed=False)
self.assertEqual(set(suggests), set(
['lzma', 'bzip2', 'apt-doc', 'wajig', 'aptitude', 'dpkg-dev',
'python-apt', 'synaptic']))
# synaptic: FIXME: use something that changes less often
#(recommends, suggests) = self.cache.get_addons(
# "synaptic", ignore_installed=False)
#self.assertEqual(set(recommends), set(
# ['libgtk2-perl', 'rarian-compat', 'software-properties-gtk']))
#self.assertEqual(set(suggests), set(
# ["apt-xapian-index", "dwww", "deborphan", "menu"]))
def test_enhances(self):
res = self.cache.get_addons("gwenview")
self.assertEqual(res, ([], ["svgpart", "kipi-plugins"]))
def test_enhances_with_virtual_pkgs(self):
res = self.cache.get_addons("bibletime")
self.assertTrue("sword-text-tr" in res[1])
self.assertTrue(len(res[1]) > 5)
def test_lonley_dependency(self):
# gets additional recommends via lonely dependency
# for arduino-core, there is a dependency on avrdude, nothing
# else depends on avrdude other than arduino-core, so
# we want to get the recommends/suggests/enhances for
# this package too
# FIXME: why only for "lonley" dependencies and not all?
res = self.cache.get_addons("arduino-core")
self.assertEqual(res, ([], ["avrdude-doc", "arduino-mk"]))
def test_addons_removal_included_depends(self):
res = self.cache.get_addons("amule-gnome-support")
self.assertEqual(res, (['amule-daemon'], []))
if __name__ == "__main__":
import logging
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| gpl-3.0 | -398,762,153,800,454,140 | 33.924242 | 79 | 0.607809 | false |
super13/tensorflow-speech-recognition-pai | src/preProcessData/wavtotfrecords.py | 1 | 1573 | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
"""
transform wav files to tfrecords.
usage : python wavtotfrecords.py dirname tfrecordsFileName
egg : python wavtotfrecords.py train-dir train.tfrecords
"""
import os
import sys
import tensorflow as tf
import numpy as np
from features.utils.load_audio_to_mem import get_audio_and_transcript, \
pad_sequences
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
txt_files = []
for root, dirs, files in os.walk(sys.argv[1]):
for fname in files:
if fname.endswith('.txt'):
txt_files.append(os.path.join(root, fname))
chunks_list = chunks(txt_files, 160)
writer = tf.python_io.TFRecordWriter(sys.argv[2])
for txt_file in chunks_list:
wav_files = [x.replace('.txt', '.wav') for x in txt_file]
(source, _, target, _) = get_audio_and_transcript(
txt_file, wav_files, 26, 9)
source, source_lengths = pad_sequences(source)
for sa, la, ta in zip(source, source_lengths, target):
a_reshape = np.reshape(sa, -1)
print(ta)
print(np.shape(ta))
print(type(ta))
example = tf.train.Example(features=tf.train.Features(feature={
'source': tf.train.Feature(
float_list=tf.train.FloatList(value=a_reshape)),
'source_lengths': tf.train.Feature(
int64_list=tf.train.Int64List(value=[la])),
"label": tf.train.Feature(int64_list=tf.train.Int64List(value=ta))
}))
writer.write(example.SerializeToString()) # 序列化为字符串
| mit | 1,849,613,331,995,784,700 | 28.980769 | 78 | 0.627325 | false |
phievo/phievo | Examples/Methyl.py | 1 | 6136 | ## Create a methylation interaction
from phievo import __silent__,__verbose__
if __verbose__:
print("Execute Methyl (Interaction Template)")
from phievo.Networks import mutation
from phievo.Networks import deriv2
from phievo.Networks import classes_eds2
import copy
## Define the function that assigns new parameters to a new methylable species
mutation.species_types["Methylable"] = lambda random_generator:[
["Methylable"],
['Diffusible',mutation.sample_dictionary_ranges('Species.diffusion',random_generator)]
]
classes_eds2.Species.Tags_Species["Methylable"] = []
## Define the default dictionary_range
mutation.dictionary_ranges['Methyl.methyl'] = 0.0/(mutation.C*mutation.T)
mutation.dictionary_ranges['Methyl.demethyl'] = 0.0/mutation.T
class Methyl(classes_eds2.Interaction):
"""
Methylation interaction
Args:
Methyl.methyl(float): binding rate of a methyl group
Methyl.demethyl(float): unbinding rate of a methyl group
label(str): Methylation
input(list): Type of the inputs
output(list): Type of the outputs
"""
def __init__(self,methyl=0,demethyl=0):
classes_eds2.Node.__init__(self)
self.methyl=methyl
self.demethyl=demethyl
self.label='Methylation'
self.input=['Methylable']
self.output=['Species']
def __str__(self):
"""
Used by the print function to display the interaction.
"""
return "{0.id} Methylation: methyl. = {0.methyl:.2f}, demethyl = {0.demethyl:.2f}".format(self)
def outputs_to_delete(self,net):
"""
Returns the methylated form of the species to delete when the reaction is deleted.
"""
return net.graph.list_successors(self)
#################################################
#### Functions to add to the Mutable_Network ####
#################################################
def number_Methyl(self):
"""
Returns the number of possible methylation in the current network.
Note: this function is optional, it is used to check the consistency of
the random_Methyl function.
"""
n = self.number_nodes('Methylable')
n_Methyl = self.number_nodes('Methyl')
return n-n_Methyl
def new_Methyl(self,S,methyl,demethyl,parameters):
"""
Creates a new :class:`Networks.Methyl.Methyl` and the species methylated for in the the network.
Args:
S: species to methylate
methyl(float): binding rate of a methyl group
demethyl(float): unbinding rate of a methyl group
parameters(list): Parameters of the methylated species
Returns:
[methyl_inter,S_methyl]: returns a Methyl interaction and a methylated species.
"""
S_methyl = classes_eds2.Species(parameters)
meth_inter = Methyl(methyl,demethyl)
assert meth_inter.check_grammar([S],[S_methyl]),"Error in grammar, new Methylation"
self.add_Node(S_methyl)
self.add_Node(meth_inter)
self.graph.add_edge(S,meth_inter)
self.graph.add_edge(meth_inter,S_methyl)
return [meth_inter,S_methyl]
def new_random_Methyl(self,S):
"""
Creates a methylation with random parameters.
Args:
S: Species to methylate
Returns:
[methyl_inter,S_methyl]:returns a Methyl interaction and a methylated species.
"""
parameters = {}
if S.isinstance("TF"):
parameters['TF'] = self.Random.random()*2
for tt in S.types:
if tt not in ["TF","Methylable","Input","Output"]:
parameters[tt] = [mutation.sample_dictionary_ranges('Species.{}'.format(attr),self.Random) for attr in S.Tags_Species[tt]]
# Transform to fit phievo list structure
parameters = [[kk]+val if val else [kk] for kk,val in parameters.items()]
methyl = mutation.sample_dictionary_ranges('Methyl.methyl',self.Random)
demethyl = mutation.sample_dictionary_ranges('Methyl.demethyl',self.Random)
return self.new_Methyl(S,methyl,demethyl,parameters)
def random_Methyl(self):
"""
Evaluates the species that can be phosphorilated, picks one an create a random
methylation. The random mutation is made using :func:`new_random_Methyl <phievo.Networks.classes_eds2.new_random_Methyl>`.
Returns:
[methyl_inter,S_methyl]: returns a Methyl interaction and a methylated species.
"""
try:
list_methylable=self.dict_types["Methylable"]
except KeyError:
print("\tThe network contain no Methylacble species.")
raise
list_possible_methylable = []
for S in list_methylable:
if not self.check_existing_binary([S],"Methyl"):
list_possible_methylable.append(S)
n_possible = len(list_possible_methylable)
assert n_possible==self.number_Methyl(),"The number of possible new methylation does not match its theoretical value."
if n_possible==0:
if __verbose__:
print("No more possible methylation.")
return None
else:
S = list_possible_methylable[int(self.Random.random()*n_possible)]
return self.new_random_Methyl(S)
def Methyl_deriv_inC(net):
"""
Function called to generate the string corresponding to in a methylation in C.
"""
func_str = "\n/************** Methylations *****************/\n"
methylations = net.dict_types.get("Methyl",[])
for methyl_inter in methylations:
S = net.graph.list_predecessors(methyl_inter)[0]
S_meth = net.graph.list_successors(methyl_inter)[0]
f_rate = "{M.methyl}*{S.id}".format(M=methyl_inter,S=S)
b_rate = "{M.demethyl}*{S_m.id}".format(M=methyl_inter,S_m=S_meth)
func_str += deriv2.compute_leap([S.id],[S_meth.id],f_rate)
func_str += deriv2.compute_leap([S_meth.id],[S.id],b_rate)
return func_str
## Add the current the new functions to the network.
setattr(classes_eds2.Network,"number_Methyl",number_Methyl)
setattr(classes_eds2.Network,"new_Methyl",new_Methyl)
setattr(classes_eds2.Network,"new_random_Methyl",new_random_Methyl)
setattr(classes_eds2.Network,"random_Methyl",random_Methyl)
deriv2.interactions_deriv_inC["Methyl"] = Methyl_deriv_inC
| lgpl-3.0 | 8,465,405,386,646,926,000 | 36.876543 | 134 | 0.658409 | false |
Athemis/PyDSF | ui/mplwidget.py | 1 | 2733 | from PyQt5 import QtWidgets
from PyQt5.QtCore import QCoreApplication
from matplotlib.backends.backend_qt5agg import (FigureCanvasQTAgg as
FigureCanvas)
from matplotlib.backends.backend_qt5agg import (NavigationToolbar2QT as
NavigationToolbar)
from matplotlib.figure import Figure
_translate = QCoreApplication.translate
class MplCanvas(FigureCanvas):
def __init__(self, parent=None, width=4, height=5, dpi=100):
self.fig = Figure(figsize=(width, height), dpi=dpi)
self.ax = self.fig.add_subplot(111)
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self, QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
# override mouseMoveEvent with non-functional dummy
# this will prevent the gui thread to hang while moving the mouse
# while a large number of plots is shown simultaniously
def mouseMoveEvent(self, event):
pass
def clear(self):
self.ax.clear()
self.fig.clear()
def save(self, filename):
try:
self.fig.savefig(filename, dpi=300)
except IOError:
QtWidgets.QMessageBox.critical(
self, _translate("MainWindow", "Error"),
_translate("MainWindow", "Error saving figure! Please check "
"permissions/free space of target path!"),
QtWidgets.QMessageBox.Close, QtWidgets.QMessageBox.Close)
class CustomNavigationToolbar(NavigationToolbar):
toolitems = (
(_translate("CustomNavigationToolbar", "Save"),
_translate("CustomNavigationToolbar",
"Save the figure"), "filesave",
"save_figure"),
(_translate("CustomNavigationToolbar", "Subplots"),
_translate("CustomNavigationToolbar",
"Configure subplots"), "subplots",
"configure_subplots"),
(None, None, None, None), )
def __init__(self, canvas, parent, coordinates=True):
NavigationToolbar.__init__(self, canvas, parent,
coordinates=coordinates)
class MplWidget(QtWidgets.QGraphicsView):
def __init__(self, parent=None):
QtWidgets.QGraphicsView.__init__(self, parent)
self.canvas = MplCanvas()
self.ntb = CustomNavigationToolbar(self.canvas, self,
coordinates=False)
self.vbl = QtWidgets.QVBoxLayout()
self.vbl.addWidget(self.ntb)
self.vbl.addWidget(self.canvas)
self.setLayout(self.vbl)
| mit | 8,196,216,573,821,732,000 | 37.492958 | 77 | 0.612148 | false |
EscapeLife/common-python-scripts | scripts/check_memory.py | 1 | 1468 | #!/usr/bin/env python
# encoding: utf-8
#=========================================================================================
# 如果提示告警信息,可以根据提示看是否为libgmp版本低的问题,如果是的话,直接升级就可以了
# 使用源码安装或者yum直接安装
#=========================================================================================
import argparse
import paramiko
cmd = 'free -m'
def show_remote_meminfo(hostname, port, user, keyfilename):
sh = paramiko.SSHClient()
sh.load_system_host_keys()
pkey = keyfilename
key = paramiko.RSAKey.from_private_key_file(pkey)
sh.connect(hostname, port, user, pkey=key, timeout=10)
stdin, stdout, stderr = sh.exec_command(cmd)
result = stdout.read()
if result:
for line in result:
print(line)
sh.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--host', action='store', dest='host', default='192.168.31.173')
parser.add_argument('--port', action='store', dest='port', default=22, type=int)
parser.add_argument('--user', action='store', dest='user', default='escape')
parser.add_argument('--keyfilename', action='store', dest='keyfile', default='/root/.ssh/id_rsa')
args = parser.parse_args()
hostname, port, user, keyfilename = args.host, args.port, args.user, args.keyfilename
show_remote_meminfo(hostname, port, user, keyfilename)
| apache-2.0 | 4,852,110,352,853,459,000 | 33.974359 | 101 | 0.581378 | false |
kyukyukyu/dash | tests/test_models.py | 1 | 7874 | # -*- coding: utf-8 -*-
"""Model unit tests."""
import datetime as dt
import pytest
from dash.user.models import User, Role
from dash.catalog.models import (
Campus,
Department,
Subject,
Course,
GenEduCategory,
CourseClass,
)
from .factories import (
UserFactory,
CampusFactory,
DepartmentFactory,
SubjectFactory,
GenEduCategoryFactory,
GeneralCourseFactory,
CourseFactory,
CourseClassFactory,
)
@pytest.mark.usefixtures('db')
class TestUser:
def test_get_by_id(self):
user = User('foo', '[email protected]')
user.save()
retrieved = User.get_by_id(user.id)
assert retrieved == user
def test_created_at_defaults_to_datetime(self):
user = User(username='foo', email='[email protected]')
user.save()
assert bool(user.created_at)
assert isinstance(user.created_at, dt.datetime)
def test_password_is_nullable(self):
user = User(username='foo', email='[email protected]')
user.save()
assert user.password is None
def test_factory(self, db):
user = UserFactory(password="myprecious")
db.session.commit()
assert bool(user.username)
assert bool(user.email)
assert bool(user.created_at)
assert user.is_admin is False
assert user.active is True
assert user.check_password('myprecious')
def test_check_password(self):
user = User.create(username="foo", email="[email protected]",
password="foobarbaz123")
assert user.check_password('foobarbaz123') is True
assert user.check_password("barfoobaz") is False
def test_full_name(self):
user = UserFactory(first_name="Foo", last_name="Bar")
assert user.full_name == "Foo Bar"
def test_roles(self):
role = Role(name='admin')
role.save()
u = UserFactory()
u.roles.append(role)
u.save()
assert role in u.roles
@pytest.mark.usefixtures('db')
class TestCatalog:
def test_get_by_id(self):
campus = Campus(code='HYSEOUL', scraper='scrapers.hyseoul',
name='HYU Seoul')
campus.save()
retrieved_campus = Campus.get_by_id(campus.id)
assert retrieved_campus == campus
department = Department(code='H3HADD',
name='Division of Computer Science and '
'Engineering')
department.campus = campus
department.save()
retrieved_department = Department.get_by_id(department.id)
assert retrieved_department == department
subject = Subject(code='CSE4006', name='Software Engineering')
subject.save()
retrieved_subject = Subject.get_by_id(subject.id)
assert retrieved_subject == subject
gen_edu_category = GenEduCategory(code='B4',
name='Business and Leadership')
gen_edu_category.save()
retrieved_category = GenEduCategory.get_by_id(gen_edu_category.id)
assert retrieved_category == gen_edu_category
course = Course(code='10020', subject=subject, credit=3.0,
departments=[department], major=False)
course.gen_edu_category = gen_edu_category
course.save()
retrieved_course = Course.get_by_id(course.id)
assert retrieved_course == course
assert retrieved_course.general is True
course2 = Course(code='10020', subject=subject,
departments=[department], credit=3.0,
target_grade=3, major=True)
course2.save()
retrieved_course = Course.get_by_id(course2.id)
assert retrieved_course == course2
assert retrieved_course.general is False
course_class = CourseClass(day_of_week=2, start_period=14,
end_period=17, course=course)
course_class.save()
retrieved_course_class = CourseClass.get_by_id(course_class.id)
assert retrieved_course_class == course_class
def test_course_class_conflicts_with(self):
c_c1 = CourseClassFactory(day_of_week=1, start_period=2, end_period=5)
c_c2 = CourseClassFactory(day_of_week=1, start_period=4, end_period=10)
c_c3 = CourseClassFactory(day_of_week=1, start_period=6, end_period=9)
c_c4 = CourseClassFactory(day_of_week=2, start_period=6, end_period=9)
assert c_c1.conflicts_with(c_c2) and c_c2.conflicts_with(c_c1)
assert not c_c1.conflicts_with(c_c3) and not c_c3.conflicts_with(c_c1)
assert c_c2.conflicts_with(c_c3) and c_c3.conflicts_with(c_c2)
assert all((
not c_c1.conflicts_with(c_c4) and not c_c4.conflicts_with(c_c1),
not c_c2.conflicts_with(c_c4) and not c_c4.conflicts_with(c_c2),
not c_c3.conflicts_with(c_c4) and not c_c4.conflicts_with(c_c3),
))
def test_factory(self, db):
campus = CampusFactory()
db.session.commit()
assert bool(campus.name)
assert bool(campus.code)
assert bool(campus.scraper)
assert bool(campus.created_at)
department = DepartmentFactory()
db.session.commit()
assert bool(department.name)
assert bool(department.code)
assert bool(department.created_at)
assert bool(department.campus)
assert isinstance(department.campus, Campus)
assert bool(department.campus_id)
assert department in department.campus.departments
subject = SubjectFactory()
db.session.commit()
assert bool(subject.name)
assert bool(subject.code)
assert bool(subject.created_at)
gen_edu_category = GenEduCategoryFactory()
db.session.commit()
assert bool(gen_edu_category.name)
assert bool(gen_edu_category.code)
assert bool(gen_edu_category.created_at)
def assert_course(course):
assert bool(course.code)
assert bool(course.created_at)
assert bool(course.instructor)
assert bool(course.credit)
assert bool(course.subject)
assert course in course.subject.courses
assert bool(course.subject_id)
assert bool(course.name)
assert course.name == course.subject.name
assert bool(course.departments)
for d in course.departments:
assert course in d.courses
general_course = GeneralCourseFactory()
db.session.commit()
assert_course(general_course)
assert bool(general_course.gen_edu_category)
assert general_course in general_course.gen_edu_category.courses
assert bool(general_course.gen_edu_category_id)
assert general_course.target_grade is None
assert general_course.major is False
assert general_course.general is True
major_course = CourseFactory()
db.session.commit()
assert_course(major_course)
assert 1 <= major_course.target_grade <= 5
assert major_course.major is True
assert major_course.general is False
# If a course is not major, then it should be associated with a
# general education category.
with pytest.raises(AssertionError):
invalid_course = CourseFactory(major=False)
db.session.commit()
db.session.rollback()
course_class = CourseClassFactory()
db.session.commit()
assert bool(course_class.created_at)
assert 0 <= course_class.day_of_week < 7
assert course_class.start_period >= 0
assert course_class.end_period >= 0
assert bool(course_class.course)
assert course_class in course_class.course.classes
assert bool(course_class.course_id)
| bsd-3-clause | 8,446,594,913,177,446,000 | 33.687225 | 79 | 0.61811 | false |
hicinformatic/DJANGO-APP_Authenta | models.py | 1 | 19435 | from django.db import models
from django.utils.translation import ugettext as _
from django.core.validators import MinValueValidator, MaxValueValidator, MinLengthValidator, MaxLengthValidator,RegexValidator
from django.contrib.auth.models import User, Group, Permission
import random
import string
from .settings import conf
choices_method = []
m = -1
if conf["LDAP"] :
m = m+1
from .methods.ldap import methodLDAP
choices_method.insert(m, ('LDAP', _("LDAP")))
if conf["INVI"] :
m = m+1
from .methods.invitation import methodINVI
choices_method.insert(m, ('INVI', _("Invitation")))
def key():
return ''.join(random.choice("#{}[]"+string.hexdigits) for x in range(32))
##########################################################################################################
### Classe contenant chaque type d'authentification
##########################################################################################################
class Authenta(models.Model):
# Configuration global
######################################################################################################
method = models.CharField(
max_length=4,
verbose_name=_("Method"),
choices=choices_method,
default="LDAP",
help_text=_("Authentication type"))
name = models.CharField(
help_text=_("Naming your authentication"),
max_length=254,
verbose_name=_("Firstname"),
)
status = models.BooleanField(
default=True,
help_text=_("Authentication enable or disable"),
verbose_name=_("Activated"),
)
order = models.PositiveSmallIntegerField(
default=1,
help_text=_("Position in the loop authentication"),
validators=[
MinValueValidator(1),
],
verbose_name=_("Authentication order"),
)
# Action on Failure
######################################################################################################
choices_failaction = (
('DIS', _("Disable")),
('DEL', _("Delete")),
)
failaction = models.CharField(
blank=True,
choices=choices_failaction,
max_length=3,
null=True,
help_text=_("Choice an action for failure ( ---- No action)"),
verbose_name=_("Action on failure"),
)
failnumber = models.PositiveIntegerField(
default=0,
help_text=_("Authorized number failure (0 - Unlimited)"),
validators=[
MinValueValidator(0),
MaxValueValidator(50)],
verbose_name=_("Number of failure"),
)
# Groups & Permissions
######################################################################################################
isactive = models.BooleanField(
default=True,
verbose_name=_("Active status"),
)
isstaff = models.BooleanField(
default=False,
verbose_name=_("Staff status"),
)
issuperuser = models.BooleanField(
default=False,
verbose_name=_("Superuser status"),
)
groups = models.ManyToManyField(
Group,
blank=True,
verbose_name=_("Groups associated"),
)
permissions = models.ManyToManyField(
Permission,
blank=True,
verbose_name=_("Permissions associated"),
)
# Logs modifications
######################################################################################################
updateby = models.ForeignKey(
User,
blank=True,
editable=False,
null=True,
verbose_name=_("Last update by"),
)
datecreate = models.DateTimeField(
auto_now_add=True,
editable=False,
verbose_name=_("Creation date"),
)
dateupdate = models.DateTimeField(
auto_now=True,
editable=False,
verbose_name=_("Last modification date"),
)
errorstr = models.TextField(
blank=True,
editable=False,
null=True,
verbose_name=_("Error encountered"),
)
errorstatus = models.BooleanField(
default=True,
editable=False,
verbose_name=_("Not in error"),
)
usednumber = models.PositiveIntegerField(
default=0,
editable=False,
verbose_name=_("Number user created"),
)
# Fields mapping
######################################################################################################
help_text=_("Automatically filled field with key map (Keep null if not used)")
firstname = models.CharField(
blank=True,
help_text=help_text,
max_length=254,
null=True,
verbose_name=_("Firstname"),
)
lastname = models.CharField(
blank=True,
help_text=help_text,
max_length=254,
null=True,
verbose_name=_("Lastname"),
)
email = models.EmailField(
blank=True,
help_text=help_text,
max_length=254,
null=True,
verbose_name=_("Email"),
)
# LDAP
#######################################################################################################
ldap_host = models.CharField(
blank=True,
help_text=_("Use hostname or IP address"),
max_length=254,
null=True,
verbose_name=_("Hostname/IP"),
)
ldap_port = models.PositiveIntegerField(
blank=True,
default=389,
help_text=_("Keep 389 to use default port"),
null=True,
validators=[
MinValueValidator(0),
MaxValueValidator(65535)],
verbose_name=_("Port"),
)
ldap_tls = models.BooleanField(
default=False,
help_text=_("Use option TLS"),
verbose_name=_("Option TLS"),
)
ldap_define = models.CharField(
blank=True,
help_text=_("Base DN ex: dc=domain,dc=com"),
max_length=254,
null=True,
verbose_name=_("Base DN"),
)
choices_ldap_scope = (
("SCOPE_BASE", "SCOPE_BASE" ),
("SCOPE_ONELEVEL", "SCOPE_ONELEVEL"),
("SCOPE_SUBTREE", "SCOPE_SUBTREE" ),
)
ldap_scope = models.CharField(
max_length=14,
verbose_name=_("Scope"),
choices=choices_ldap_scope,
default="SCOPE_BASE",
help_text=_("Choice a scope"),
)
choices_ldap_version = (
("VERSION2", "VERSION2"),
("VERSION3", "VERSION3"),
)
ldap_version = models.CharField(
max_length=8,
verbose_name=_("Version"),
choices=choices_ldap_version,
default="VERSION2",
help_text=_("Choice a version")
)
ldap_bind = models.CharField(
blank=True,
help_text=_("Bind for override user permission, ex: cn=manager,dc=domain,dc=com (Keep null if not used)"),
max_length=254,
null=True,
verbose_name=_("Bind DN"),
)
ldap_password = models.CharField(
blank=True,
help_text=_("Password used by the bind"),
max_length=254,
null=True,
verbose_name=_("Password"),
)
ldap_user = models.TextField(
blank=True,
help_text=_("user DN ex : uid={{username}},ou=users,dc=domain,dc=com | Tags: {{username}}"),
null=True,
verbose_name=_("User DN"),
)
ldap_search = models.TextField(
verbose_name=_("Search DN"),
help_text=_("search DN (filter) ex : uid={{username}} | Tags: {{username}}"),
blank=True,
null=True,
)
# Invitation
#######################################################################################################
invit_goddefault = models.ForeignKey(
User,
blank=True,
null=True,
related_name="INVI_godfather",
verbose_name=_("Default GodFather"),
)
invit_godfather = models.BooleanField(
default=True,
help_text=_("Use GodFather enable or disable"),
verbose_name=_("Use GodFather"),
)
invit_generatenumber = models.PositiveSmallIntegerField(
blank=True,
default=100,
help_text=_("Number of unallocated invitations"),
null=True,
verbose_name=_("Number of invitation"),
)
invit_generatebyuser = models.PositiveSmallIntegerField(
blank=True,
help_text=_("Number of unallocated invitations by user"),
null=True,
validators=[
MinValueValidator(0),
MaxValueValidator(10)
],
verbose_name=_("Number generating"),
)
invit_isactive = models.BooleanField(
default=True,
verbose_name=_("Active status"),
)
invit_isstaff = models.BooleanField(
default=False,
verbose_name=_("Just for staff"),
)
invit_issuperuser = models.BooleanField(
default=False,
verbose_name=_("Just for SuperUser"),
)
invit_groups = models.ManyToManyField(
Group,
blank=True,
related_name="INVI_group",
verbose_name=_("Users with these permissions"),
)
invit_permissions = models.ManyToManyField(
Permission,
related_name="INVI_persmission",
blank=True,
verbose_name=_("Users with these permissions"),
)
def invit_count(self, **kwargs):
return AuthentaInvitation.objects.filter(authenta=self, nephew__isnull=True).count()
def invit_generator(self, user):
groups = self.invit_groups.all()
permissions = self.invit_permissions.all()
kwargs={}
kwargs["is_active"] = self.invit_isactive
if self.invit_isstaff: kwargs["is_staff"] = True
if self.invit_issuperuser: kwargs["is_superuser"] = True
if groups: kwargs["groups__in"] = groups
if permissions: kwargs["user_permissions__in"] = permissions
i = 0
if self.invit_generatebyuser is not None:
users = User.objects.filter(**kwargs)
if users:
for u in users:
number = AuthentaInvitation.objects.filter(authenta=self, godfather=u, nephew__isnull=True).count()
if number > 0:
for inv in range(0, self.invit_generatebyuser - number):
i = i+1
newinvitation = AuthentaInvitation(authenta=self, updateby=user)
newinvitation.godfather = u
newinvitation.save()
else:
self.success()
else:
self.failed(_("No user available"))
else:
number = self.invit_generatenumber - AuthentaInvitation.objects.filter(authenta=self, nephew__isnull=True).count()
if number > 0 :
for inv in range(0, number):
i = i+1
godfather = User.objects.filter(**kwargs).order_by('?').first()
print(godfather)
if godfather:
newinvitation = AuthentaInvitation(authenta=self, updateby=user)
newinvitation.godfather = godfather
newinvitation.save()
else:
self.failed(_("No user available"))
break
else:
self.success()
return i
# Meta & Return Django
#######################################################################################################
class Meta:
verbose_name = _("Authentication")
verbose_name_plural = _("Authentications")
ordering = ["order"]
def __str__(self):
return self.name
# Methods
#######################################################################################################
def get(self):
if self.method == "LDAP": return methodLDAP(self)
if self.method == "INVI": return methodINVI(self)
return False
def failed(self, error):
self.errorstr = str(error)
self.errorstatus = False
self.save()
return False
def success(self):
self.errorstr = None
self.errorstatus = True
self.save()
return True
def create(self, username, password, data=None):
try:
user = {}
user["username"] = username
user["password"] = password
user["is_staff"] = self.isstaff
user["is_superuser"] = self.issuperuser
user["is_active"] = self.isactive
user = User.objects.create_user(**user)
for g in self.groups.all():
user.groups.add(g)
for p in self.permissions.all():
user.user_permissions.add(p)
user.save()
self.usednumber = self.usednumber+1
self.save()
log = AuthentaLogs(authenta=self, user=user)
if data is not None:
log.data = data
log.save()
self.success()
return user
except Exception as error:
return self.failed(error)
#########################################################################################################
### Classe gérant l'API gérant des requête HTTP via CURL
#########################################################################################################
class AuthentaAPI(models.Model):
status = models.BooleanField(
default=True,
help_text=_("Authentication with API enable or disable"),
verbose_name=_("Activated"),
)
user = models.OneToOneField(
User,
default=1,
verbose_name=_("User"),
)
key = models.CharField(
default=key,
max_length=32,
unique=True,
validators=[
MaxLengthValidator(32),
MinLengthValidator(10)
],
verbose_name=_("Authentication key"),
)
updateby = models.ForeignKey(
User,
blank=True,
editable=False,
null=True,
related_name="API_updatebyuser",
verbose_name=_("Last update by"),
)
datecreate = models.DateTimeField(
auto_now_add=True,
verbose_name=_("Creation date"),
)
dateupdate = models.DateTimeField(
auto_now=True,
verbose_name=_("Last modification date"),
)
usednumber = models.BigIntegerField(
default=0,
verbose_name=_("Number of use"),
)
# Meta & Return Django
#######################################################################################################
class Meta:
verbose_name = _("Key management")
verbose_name_plural = _("Keys management")
def increment(self):
self.usednumber = self.usednumber+1
self.save()
#########################################################################################################
### Classe gérant les codes d'invitations pour le type INVI
#########################################################################################################
class AuthentaInvitation(models.Model):
authenta = models.ForeignKey(
Authenta,
editable=False,
verbose_name=_("Method associated"),
)
code = models.CharField(
default=key,
max_length=32,
unique=True,
validators=[
MaxLengthValidator(32),
MinLengthValidator(10)
],
verbose_name=_("Invitation code"),
)
godfather = models.ForeignKey(
User,
default=1,
related_name="godfather",
verbose_name=_("GodFather"),
)
nephew = models.OneToOneField(
User,
blank=True,
null=True,
related_name="nephew",
verbose_name=_("Nephew"),
)
updateby = models.ForeignKey(
User,
blank=True,
editable=False,
null=True,
related_name="Invitation_updatebyuser",
verbose_name=_("Last update by"),
)
datecreate = models.DateTimeField(
auto_now_add=True,
editable=False,
verbose_name=_("Creation date"),
)
dateupdate = models.DateTimeField(
auto_now=True,
editable=False,
verbose_name=_("Last modification date"),
)
# Meta & Return Django
#######################################################################################################
class Meta:
verbose_name = _("Invitation code")
verbose_name_plural = _("Invitation codes")
def __str__(self):
return self.code
#########################################################################################################
### Classe gérant les logs d'authentifications
#########################################################################################################
class AuthentaLogs(models.Model):
authenta = models.ForeignKey(
Authenta,
editable=False,
)
user = models.OneToOneField(
User,
default=1,
editable=False,
verbose_name=_("User"),
)
failnumber = models.PositiveIntegerField(
default=0,
help_text=_("Number of login failed"),
validators=[
MinValueValidator(0),
MaxValueValidator(50)],
verbose_name=_("Number of failure"),
)
errorstr = models.TextField(
blank=True,
editable=False,
null=True,
verbose_name=_("Error encountered"),
)
data = models.TextField(
blank=True,
editable=False,
null=True,
verbose_name=_("Data returned"),
)
datecreate = models.DateTimeField(
auto_now_add=True,
editable=False,
verbose_name=_("Creation date"),
)
dateupdate = models.DateTimeField(
auto_now=True,
editable=False,
verbose_name=_("Last modification date"),
)
# Meta & Return Django
#######################################################################################################
class Meta:
verbose_name = _("#Log")
verbose_name_plural = _("#Logs")
def __str__(self):
return self.user.username
# Methods
#######################################################################################################
def failed(self):
self.errorstr = self.authenta.errorstr
self.failnumber = self.failnumber+1
self.save()
self.actions()
return False
def success(self):
self.errorstr = None
self.failnumber = 0
self.save()
return True
def update(self, password):
try:
self.user.set_password(password)
self.user.save()
print("password: "+password)
except Exception as error:
return self.failed(error)
return self.success()
def actions(self):
if self.authenta.failnumber > 0:
if self.authenta.failaction is not None:
if self.failnumber >= self.authenta.failnumber:
if self.authenta.failaction == "DEL": return self.delete()
if self.authenta.failaction == "DIS": return self.disable()
def delete(self):
self.user.delete()
def disable(self):
self.user.is_active = False
self.user.save() | gpl-3.0 | 1,347,190,716,806,569,500 | 30.64658 | 126 | 0.493567 | false |
Smoothieware/Smoothieware | fast-stream.py | 1 | 2988 | #!/usr/bin/env python
"""\
Stream g-code to Smoothie USB serial connection
Based on GRBL stream.py, but completely different
"""
from __future__ import print_function
import sys
import argparse
import serial
import threading
import time
import signal
import sys
errorflg = False
intrflg = False
def signal_term_handler(signal, frame):
global intrflg
print('got SIGTERM...')
intrflg = True
signal.signal(signal.SIGTERM, signal_term_handler)
# Define command line argument interface
parser = argparse.ArgumentParser(description='Stream g-code file to Smoothie over telnet.')
parser.add_argument('gcode_file', type=argparse.FileType('r'), help='g-code filename to be streamed')
parser.add_argument('device', help='Smoothie Serial Device')
parser.add_argument('-q', '--quiet', action='store_true', default=False, help='suppress output text')
args = parser.parse_args()
f = args.gcode_file
verbose = not args.quiet
# Stream g-code to Smoothie
dev = args.device
# Open port
s = serial.Serial(dev, 115200)
s.flushInput() # Flush startup text in serial input
print("Streaming " + args.gcode_file.name + " to " + args.device)
okcnt = 0
def read_thread():
"""thread worker function"""
global okcnt, errorflg
flag = 1
while flag:
rep = s.readline().decode('latin1')
n = rep.count("ok")
if n == 0:
print("Incoming: " + rep)
if "error" in rep or "!!" in rep or "ALARM" in rep or "ERROR" in rep:
errorflg = True
break
else:
okcnt += n
print("Read thread exited")
return
# start read thread
t = threading.Thread(target=read_thread)
t.daemon = True
t.start()
linecnt = 0
try:
for line in f:
if errorflg:
break
# strip comments
if line.startswith(';'):
continue
l = line.strip()
o = "{}\n".format(l).encode('latin1')
n = s.write(o)
if n != len(o):
print("Not entire line was sent: {} - {}".format(n, len(o)))
linecnt += 1
if verbose:
print("SND " + str(linecnt) + ": " + line.strip() + " - " + str(okcnt))
except KeyboardInterrupt:
print("Interrupted...")
intrflg = True
if intrflg:
# We need to consume oks otherwise smoothie will deadlock on a full tx buffer
print("Sending Abort - this may take a while...")
s.write(b'\x18') # send halt
while(s.inWaiting()):
s.read(s.inWaiting())
linecnt = 0
if errorflg:
print("Target halted due to errors")
else:
print("Waiting for complete...")
while okcnt < linecnt:
if verbose:
print(str(linecnt) + " - " + str(okcnt))
if errorflg:
s.read(s.inWaiting()) # rad all remaining characters
break
time.sleep(1)
# Wait here until finished to close serial port and file.
print(" Press <Enter> to exit")
input()
# Close file and serial port
f.close()
s.close()
| gpl-3.0 | 954,539,669,062,893,400 | 22.904 | 101 | 0.614123 | false |
FedericoCeratto/lightyears | code/ui.py | 1 | 17342 | #
# 20,000 Light Years Into Space
# This game is licensed under GPL v2, and copyright (C) Jack Whitham 2006-07.
#
# Do you believe in the users?
import pygame , random
from pygame.locals import *
import stats , menu , draw_obj , mail , particle , tutor
import resource
from map_items import *
from primitives import *
class Gauge(object):
"""Round steampunk gauge"""
def __init__(self, x, y, d):
d = d * Get_Grid_Size() # diameter
self.back_img = resource.Load_Image("gauge.png", scale_to=(d, d))
self.hand_img = resource.Load_Image("gauge_hand.png", scale_to=(d, d))
self.glass_img = resource.Load_Image("gauge_glass.png", scale_to=(d, d))
self._pos = GVector(x, y).in_pixels
self._animated_pressure = 0
self._speed = .2
self._vibration = random.randint(0, 200)
def rotate_hand(self, bar=None):
"""Rotate pressure hand"""
if bar is None:
bar = 0
angle = 199 - bar / 27.0 * 170
center = self.hand_img.get_rect().center
hand = pygame.transform.rotate(self.hand_img, angle)
newrect = hand.get_rect()
newrect.center = center
return hand, newrect
def draw(self, output, bar=None):
"""Draw gauge and hand"""
# the pressure displayed by the gauge will reach "bar" eventually
delta = (bar - self._animated_pressure) * self._speed
self._animated_pressure += delta
bar = self._animated_pressure
# animate vibration
v = self._vibration
if v > 200:
self._vibration = 0
if v % 2 == 0:
if v < 40:
bar += v / 100.0
elif v < 80:
bar += (80 - v) / 100.0
self._vibration += 1
hand, hand_rect = self.rotate_hand(bar=bar)
output.blit(self.back_img, self._pos)
output.blit(hand, self._pos + hand_rect)
output.blit(self.glass_img, self._pos)
class Valve(object):
"""Big valve"""
def __init__(self):
self._pos = PVector(9.5 * Get_Grid_Size(), 0)
h = 5 * Get_Grid_Size() # height
d = Get_Grid_Size() # handle diameter
self._back_img = resource.Load_Image("valve_back.png", scale_to=(None, h))
self._handle_img = resource.Load_Image("valve_handle.png", scale_to=(d, d))
self._anim_rotation = self._gen_animate_rotation()
self._anim_rotation.next()
def _gen_animate_rotation(self):
"""Generate handle rotation animation"""
angle = 0
is_open = True
while True:
if angle < 30 and not is_open:
angle += 4
elif angle > 0 and is_open:
angle -= 4
is_open = (yield angle)
def rotate_handle(self, is_open):
"""Rotate handle"""
if is_open is None:
angle = 0 # no pipe selected
else:
angle = self._anim_rotation.send(is_open)
center = self._handle_img.get_rect().center
handle = pygame.transform.rotate(self._handle_img, angle)
newrect = handle.get_rect()
newrect.center = GVector(-.55, 1.38).in_pixels + PVector(center)
return handle, newrect
def draw(self, output, is_open=None):
output.blit(self._back_img, self._pos)
if is_open is not None:
handle, handle_rect = self.rotate_handle(is_open=is_open)
output.blit(handle, self._pos + handle_rect)
class User_Interface:
def __init__(self, net, (width, height)):
self.net = net
self.control_menu = None
self.Reset()
self.blink = 0xff
# Although there is only one base image, it is scaled and
# cropped on startup to create different backdrops.
# (Note: These don't get saved, as they're part of the UI. That's bad.)
img = resource.Load_Image("moon_surface.jpg")
zoom = 1 + random.random() # zoom in between 1 and 2
scaled = pygame.transform.smoothscale(img,
(int(width * zoom), int(height * zoom))
)
# get random coordinates to extract a background surface
x = randint(0, scaled.get_width() - width)
y = randint(0, scaled.get_height() - height)
self.background = pygame.Surface((width, height),flags=pygame.SRCALPHA)
self.background.blit(scaled, (0,0),(x, y, x + width, y + height))
self.steam_effect = particle.Make_Particle_Effect(particle.Steam_Particle)
self.steam_effect_frame = 0
self.gauges = dict(
city_pressure = Gauge(0, 0, 4),
selected_pressure = Gauge(4.5, 0, 4)
)
self.valve = Valve()
self.vehicle_list = []
#self.vehicle_list.extend(
# Transport(network=self.net) for x in xrange(2)
#)
#self.vehicle_list.extend(
# [Tank(network=self.net, vehicles=self.vehicle_list) for x in xrange(10)]
#)
def Update_Area(self, area):
if ( area != None ):
self.partial_update = True
# pygame.Rect is rather good.
if ( len(self.update_area_list) == 0 ):
self.update_area_list = [area]
else:
ci = area.collidelist(self.update_area_list)
if ( ci < 0 ):
# New area!
self.update_area_list.append(area)
else:
# Area overlaps an existing area, which gets expanded.
self.update_area_list[ ci ].union_ip(area)
def Draw_Game(self, output, season_fx):
blink = self.blink
if ( season_fx.Is_Shaking() and not self.Is_Menu_Open() ):
# Earthquake effect
m = 6
r = output.get_rect()
r.left += random.randint(-m, m)
r.top += random.randint(-m, m)
r = output.get_rect().clip(r)
output = output.subsurface(r)
if ( self.net.dirty ):
self.net.dirty = False
output.blit(self.background,(0,0))
self.__Update_Reset()
for w in self.net.well_list:
w.Draw(output)
self.Add_Steam_Effect(output, w.pos)
if ( self.selection != None ):
# highlight selection
r = self.selection.Draw_Selected(output, (blink, blink, 0))
self.Update_Area(r)
for p in self.net.pipe_list:
p.Draw(output)
for n in self.net.node_list:
n.Draw(output)
if ( n.emits_steam ):
self.Add_Steam_Effect(output, n.pos)
for r in self.net.rock_list:
r.Draw(output)
for v in self.vehicle_list:
v.draw(output)
season_fx.Draw(output, self.Update_Area)
gpos = self.mouse_pos
if ( gpos != None ):
if ( self.mode == BUILD_NODE ):
# could put a node here.
r = Grid_To_Scr_Rect(gpos)
self.Update_Area(r)
color = (255, 255, 0, 200)
draw_ellipse(output, Point(r.topleft), 1, color, 1)
# draw excavation shadow
draw_ellipse(output, Point(r.topleft),
INITIAL_NODE_EXCAVATION_DISTANCE, (0, 0, 0, 10), 1,
filled=True)
# draw excavation limit
draw_ellipse(output, Point(r.topleft),
INITIAL_NODE_EXCAVATION_DISTANCE , (0, 0, 0, 30), 1)
elif (( self.mode == BUILD_PIPE )
and ( self.selection != None )
and ( isinstance(self.selection, Node) )):
# pipe route illustrated
sp = Grid_To_Scr(self.selection.pos)
ep = Grid_To_Scr(gpos)
colour = (80,80,50)
if ( not self.net.Pipe_Possible(self.selection.pos, gpos) ):
colour = (100,0,0)
r = Rect(sp,(2,2)).union(Rect(ep,(2,2)))
self.Update_Area(r)
pygame.draw.line(output, colour, sp, ep, 2)
for item in self.net.popups:
r = item.Draw_Popup(output)
self.Update_Area(r)
mail.Draw_Mail(output)
if ( not self.Is_Menu_Open () ):
self.blink = 0x80 | ( 0xff & ( self.blink + 0x10 ))
self.steam_effect_frame = (
self.steam_effect_frame + 1 ) % len(self.steam_effect)
if ( DEBUG_GRID ):
self.Debug_Grid(output)
def Draw_Selection(self, output):
output.fill((20,0,0))
if ( self.selection != None ):
r = output.get_rect()
r.center = Grid_To_Scr(self.selection.pos)
for p in self.net.pipe_list:
p.Draw_Mini(output, r.topleft)
for n in self.net.node_list:
n.Draw_Mini(output, r.topleft)
def Draw_Stats(self, output, default_stats):
if ( self.selection == None ):
l = default_stats
else:
l = self.selection.Get_Information()
if ( not self.net.Is_Connected(self.selection) ):
l += [ ((255,0,0), 15, "Not connected to network") ]
h = hash(str(l))
if ( h != self.stats_hash ):
# Stats have changed.
output.fill((0,0,0))
stats.Draw_Stats_Window(output, l)
self.stats_hash = h
def Draw_Controls(self, output):
if ( self.control_menu == None ):
self.__Make_Control_Menu(output.get_rect().width)
# draw city pressure gauge
self.gauges['city_pressure'].draw(
output,
bar=self.net.hub.Get_Pressure() * .4,
)
# draw selected item gauge
if isinstance(self.selection, Node):
bar = self.selection.steam.Get_Pressure() * .4
elif isinstance(self.selection, Pipe):
bar = self.selection.current_n1_to_n2
bar = abs(bar)
else:
bar = 0
self.gauges['selected_pressure'].draw(
output,
bar=bar,
)
if isinstance(self.selection, Pipe):
self.valve.draw(output, is_open=self.selection.valve_open)
else:
self.valve.draw(output)
self.control_menu.Draw(output, top=5*Get_Grid_Size())
def Control_Mouse_Move(self, spos):
if ( self.control_menu != None ):
self.control_menu.Mouse_Move(spos)
def Control_Mouse_Down(self, spos):
if ( self.control_menu != None ):
self.control_menu.Mouse_Down(spos)
self.mode = self.control_menu.Get_Command()
if ( self.selection != None ):
if ( self.mode == DESTROY ):
self.net.Destroy(self.selection)
self.__Clear_Control_Selection()
self.selection = None
elif ( self.mode == UPGRADE ):
self.selection.Begin_Upgrade()
self.__Clear_Control_Selection()
def Key_Press(self, k):
if ( self.control_menu != None ):
self.control_menu.Key_Press(k)
self.mode = self.control_menu.Get_Command()
def Right_Mouse_Down(self):
self.selection = None
self.mouse_pos = None
self.__Clear_Control_Selection()
def __Clear_Control_Selection(self):
self.mode = NEUTRAL
if ( self.control_menu != None ):
self.control_menu.Select(NEUTRAL)
def Reset(self):
self.selection = None
self.mouse_pos = None
self.__Clear_Control_Selection()
self.stats_hash = 0
self.__Update_Reset()
def __Update_Reset(self):
self.partial_update = False
self.update_area_list = []
def Is_Menu_Open(self):
return ( self.mode == OPEN_MENU )
def Game_Mouse_Down(self, spos):
gpos = Scr_To_Grid(spos)
if (( self.selection != None )
and ( self.selection.Is_Destroyed() )):
self.selection = None
if ( DEBUG ):
print 'Selection:',self.selection
for (i,n) in enumerate(self.net.node_list):
if ( n == self.selection ):
print 'Found: node',i
for (i,p) in enumerate(self.net.pipe_list):
if ( p == self.selection ):
print 'Found: pipe',i
print 'End'
if ( not self.net.ground_grid.has_key(gpos) ):
self.selection = self.net.Get_Pipe(gpos)
# empty (may contain pipes)
if ( self.mode == BUILD_NODE ):
# create new node!
if self.net.use_metal('node'):
n = Node(gpos, rocks=self.net.rock_list)
n.Sound_Effect()
self.selection = None
if ( self.net.Add_Grid_Item(n) ):
self.selection = n
tutor.Notify_Add_Node(n)
elif ( self.mode == DESTROY ):
# I presume you are referring to a pipe?
pipe = self.selection
if ( pipe != None ):
self.net.Destroy(pipe)
self.__Clear_Control_Selection()
self.selection = None
elif ( self.mode == UPGRADE ):
if ( self.selection != None ):
if self.net.use_metal('up_node'):
self.selection.Begin_Upgrade()
self.__Clear_Control_Selection()
elif ( self.selection != None ):
self.selection.Sound_Effect()
elif ( isinstance(self.net.ground_grid[ gpos ], Node)):
# Contains node
n = self.net.ground_grid[ gpos ]
if ( self.mode == BUILD_PIPE ):
if (( self.selection == None )
or ( isinstance(self.selection, Pipe))):
# start a new pipe here
self.selection = n
n.Sound_Effect()
elif (( isinstance(n, Node) )
and ( isinstance(self.selection, Node) )
and ( n != self.selection )):
# end pipe here
if ( self.net.Add_Pipe(self.selection, n) ):
tutor.Notify_Add_Pipe()
self.selection = None
elif ( self.mode == DESTROY ):
self.net.Destroy(n)
self.selection = None
self.__Clear_Control_Selection()
elif ( self.mode == UPGRADE ):
if self.net.use_metal('up_node'):
n.Begin_Upgrade()
self.selection = n
self.__Clear_Control_Selection()
else:
self.selection = n
n.Sound_Effect()
elif ( isinstance(self.net.ground_grid[ gpos ], Well)):
# Contains well (unimproved)
w = self.net.ground_grid[ gpos ]
if ( self.mode == BUILD_NODE ):
# A node is planned on top of the well.
if self.net.use_metal('well'):
self.selection = None
n = Well_Node(gpos)
if ( self.net.Add_Grid_Item(n) ):
self.selection = n
self.selection.Sound_Effect()
## Select a rock
#for rock in self.net.rock_list:
# if rock.pos == gpos:
# self.selection = rock
# rock.Sound_Effect()
# continue
self.net.Popup(self.selection)
tutor.Notify_Select(self.selection)
def Game_Mouse_Move(self, spos):
self.mouse_pos = Scr_To_Grid(spos)
if ( self.control_menu != None ):
self.control_menu.Mouse_Move(None)
def Debug_Grid(self, output):
(mx, my) = GRID_SIZE
for y in xrange(my):
for x in xrange(mx):
if ( self.net.pipe_grid.has_key( (x,y) ) ):
r = Grid_To_Scr_Rect((x,y))
pygame.draw.rect(output, (55,55,55), r, 1)
r.width = len(self.net.pipe_grid[ (x,y) ]) + 1
pygame.draw.rect(output, (255,0,0), r)
def Add_Steam_Effect(self, output, pos):
sfx = self.steam_effect[ self.steam_effect_frame ]
r = sfx.get_rect()
r.midbottom = Grid_To_Scr(pos)
output.blit(sfx, r.topleft)
self.Update_Area(r)
def __Make_Control_Menu(self, width):
pictures = dict()
pictures[ BUILD_NODE ] = "bricks.png"
pictures[ BUILD_PIPE ] = "bricks2.png"
pictures[ DESTROY ] = "destroy.png"
pictures[ UPGRADE ] = "upgrade.png"
pictures[ OPEN_MENU ] = "menuicon.png"
self.control_menu = menu.Enhanced_Menu([
(BUILD_NODE, "Build &Node", [ K_n ]),
(BUILD_PIPE, "Build &Pipe", [ K_p ]),
(DESTROY, "&Destroy", [ K_d , K_BACKSPACE ]),
(UPGRADE, "&Upgrade", [ K_u ]),
(None, None, None),
(OPEN_MENU, "Menu", [ K_ESCAPE ])],
pictures, width)
def Frame_Advance(self, frame_time):
for p in self.net.pipe_list:
p.Frame_Advance(frame_time)
| gpl-2.0 | -6,321,136,411,997,031,000 | 32.871094 | 85 | 0.509918 | false |
matthiasplappert/lego-elevator | elevator.py | 1 | 2512 | #!/usr/bin/python
import serial
from Queue import Queue, Empty
import time
import threading
from dotstar import Adafruit_DotStar
BAUDRATE = 115200
DEVICE = '/dev/ttyACM0'
DATAPIN = 20
CLOCKPIN = 21
NUMPIXELS = 59
BLUE = 0x0000FF
GREEN = 0xFF0000
RED = 0x00FF00
OFF = 0x000000
ELEVATOR_DOWN = b'd'
ELEVATOR_UP = b'u'
TIME_UP_S = 8
TIME_DOWN_S = 8
TIME_TO_LEAVE_ELEVATOR_S = 0 # disabled since the lock already guarantees that
class Elevator(threading.Thread):
def __init__(self, kill_event, loop_time=1.0 / 60.0):
self.status = "INIT"
self.q = Queue()
self.kill_event = kill_event
self.timeout = loop_time
self.baudrate = BAUDRATE
self.dev = DEVICE
self.strip = Adafruit_DotStar(NUMPIXELS, DATAPIN, CLOCKPIN)
self.strip.begin()
self.strip.setBrightness(255)
self.serial = serial.Serial(self.dev)
self.serial.baudrate = 115200
# Initial state
self.send_elevator_command(ELEVATOR_DOWN)
self.status = "DOWN"
self.set_lights(OFF)
super(Elevator, self).__init__()
def onThread(self, function, *args, **kwargs):
self.q.put((function, args, kwargs))
def run(self):
self.down()
while True:
if self.kill_event.is_set():
self.close()
return
try:
function, args, kwargs = self.q.get(timeout=self.timeout)
function(*args, **kwargs)
except Empty:
pass
def send_elevator_command(self, command):
self.serial.flushInput() # avoids that the input buffer overfloats
self.serial.write(command)
self.serial.flush()
def up(self):
self.status = "GOING_UP"
self.send_elevator_command(ELEVATOR_UP)
time.sleep(TIME_UP_S)
self.status = "UP"
self.set_lights(GREEN)
def down(self):
self.status = "GOING_DOWN"
self.send_elevator_command(ELEVATOR_DOWN)
self.set_lights(OFF)
time.sleep(TIME_DOWN_S)
self.status = "DOWN"
time.sleep(TIME_TO_LEAVE_ELEVATOR_S)
self.status = "FREE"
def close(self):
self.serial.close()
def set_lights(self, color):
for i in range(NUMPIXELS):
self.strip.setPixelColor(i, color)
if color == OFF:
self.strip.setBrightness(0)
else:
self.strip.setBrightness(255)
self.strip.show()
| gpl-3.0 | 4,372,351,016,660,468,000 | 24.896907 | 79 | 0.586385 | false |
276562578/baidustory | baidustory.py | 1 | 2234 | #!/usr/bin/python
#coding:utf-8
print '这是一个抓取百度贴吧小说的爬虫,将某部小说贴吧的精品贴进行抓取并合并在一起'+'\n'
import urllib2
import re
#url=raw_input('请输入精品连载贴地址')
url='http://tieba.baidu.com/f/good?kw=%E5%A4%A7%E4%B8%BB%E5%AE%B0&ie=utf-8&cid=2'
#找到每一章节帖子的地址,并以page_address列表形式存储
def get_page_address():
#这里的函数可以连起来写
#像这样 html=urllib2.urlopen('www.com').read()
html=urllib2.urlopen(url).read().decode("utf-8")
print "正在请求网页......"
#正则寻找每个帖子的地址
find_page_string=re.compile(r'<a href="/p/.*?" title=.*?\" target="_blank"')
page_string=find_page_string.findall(html)
print "正在分析网页......"
all_page_string=""
for i in page_string:
all_page_string=all_page_string+i
find_page_number=re.compile(r'\d{10}')
page_number=find_page_number.findall(all_page_string)
page_address=[]
for p in range(len(page_number)):
page_address.append("http://tieba.baidu.com/p/"+str(page_number[p]))
print "已得到网页列表"
return(page_address)
def get_article(page_address):
article_html=[]
crude_article=[]
article=[]
for p in range(len(page_address)):
article_html.append(urllib2.urlopen(page_address[p]).read().decode("utf-8"))
print "正在添加第"+str(p+1)+"篇文章"
find_crude_article=re.compile(r'd_post_content j_d_post_content.*?share_thread share_thread_wrapper')
crude_article.append(find_crude_article.findall(article_html[p]))
article_begin_dropped=re.compile(r'd_post_content j_d_post_content ">')
crude_article[p]=article_begin_dropped.sub(r'',crude_article[p][0])
article_end_dropped=re.compile(r'</div>.*share_thread_wrapper')
crude_article[p]=article_end_dropped.sub(r'',crude_article[p])
article_br_replace=re.compile(r'<br>.*?<br>')
article.append(article_br_replace.sub(r'\n',crude_article[p]))
print article[p]
get_article(get_page_address())
| gpl-3.0 | -149,551,047,161,510,900 | 38.92 | 117 | 0.620741 | false |
septicmk/MEHI | test/test_utils.py | 1 | 1542 | ################################
# Author : septicmk
# Date : 2015/07/24 19:41:26
# FileName : test_utils.py
################################
import shutil
import tempfile
import unittest
from numpy import vstack
from pyspark import SparkContext
class PySparkTestCase(unittest.TestCase):
def setUp(self):
class_name = self.__class__.__name__
self.sc = SparkContext('local', class_name)
self.sc._jvm.System.setProperty("spark.ui.showConsoleProgress", "false")
log4j = self.sc._jvm.org.apache.log4j
log4j.LogManager.getRootLogger().setLevel(log4j.Level.FATAL)
def tearDown(self):
self.sc.stop()
# To avoid Akka rebinding to the same port, since it doesn't unbind
# immediately on shutdown
self.sc._jvm.System.clearProperty("spark.driver.port")
class PySparkTestCaseWithOutputDir(PySparkTestCase):
def setUp(self):
super(PySparkTestCaseWithOutputDir, self).setUp()
self.outputdir = tempfile.mkdtemp()
def tearDown(self):
super(PySparkTestCaseWithOutputDir, self).tearDown()
shutil.rmtree(self.outputdir)
class LocalTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
class LocalTestCaseWithOutputDir(LocalTestCase):
def setUp(self):
super(LocalTestCaseWithOutputDir, self).setUp()
self.outputdir = tempfile.mktemp()
def tearDown(self):
super(LocalTestCaseWithOutputDir, self).tearDown()
shutil.rmtree(self.outputdir)
| bsd-3-clause | -8,656,020,026,376,493,000 | 27.036364 | 80 | 0.654994 | false |
omise/omise-python | omise/test/test_dispute.py | 1 | 10703 | import mock
import unittest
import tempfile
from .helper import _ResourceMixin
class DisputeTest(_ResourceMixin, unittest.TestCase):
def _getTargetClass(self):
from .. import Dispute
return Dispute
def _getCollectionClass(self):
from .. import Collection
return Collection
def _getLazyCollectionClass(self):
from .. import LazyCollection
return LazyCollection
def _makeOne(self):
return self._getTargetClass().from_data({
'object': 'dispute',
'id': 'dspt_test',
'livemode': False,
'location': '/disputes/dspt_test',
'amount': 100000,
'currency': 'thb',
'status': 'open',
'message': None,
'charge': 'chrg_test',
'created': '2015-03-23T05:24:39Z'
})
@mock.patch('requests.get')
def test_retrieve(self, api_call):
class_ = self._getTargetClass()
self.mockResponse(api_call, """{
"object": "dispute",
"id": "dspt_test",
"livemode": false,
"location": "/disputes/dspt",
"amount": 100000,
"currency": "thb",
"status": "pending",
"message": null,
"charge": "chrg_test",
"created": "2015-03-23T05:24:39Z"
}""")
dispute = class_.retrieve('dspt_test')
self.assertTrue(isinstance(dispute, class_))
self.assertEqual(dispute.id, 'dspt_test')
self.assertEqual(dispute.amount, 100000)
self.assertEqual(dispute.currency, 'thb')
self.assertEqual(dispute.status, 'pending')
self.assertEqual(dispute.charge, 'chrg_test')
self.assertEqual(dispute.message, None)
self.assertRequest(
api_call,
'https://api.omise.co/disputes/dspt_test')
self.mockResponse(api_call, """{
"object": "dispute",
"id": "dspt_test",
"livemode": false,
"location": "/disputes/dspt_test",
"amount": 100000,
"currency": "thb",
"status": "pending",
"message": "Foobar Baz",
"charge": "chrg_test",
"created": "2015-03-23T05:24:39Z"
}""")
dispute.reload()
self.assertEqual(dispute.message, 'Foobar Baz')
@mock.patch('requests.get')
def test_retrieve_no_args(self, api_call):
class_ = self._getTargetClass()
collection_class_ = self._getCollectionClass()
self.mockResponse(api_call, """{
"object": "list",
"from": "1970-01-01T07:00:00+07:00",
"to": "2015-03-23T05:24:39+07:00",
"offset": 0,
"limit": 20,
"total": 1,
"data": [
{
"object": "dispute",
"id": "dspt_test",
"livemode": false,
"location": "/disputes/dspt_test",
"amount": 100000,
"currency": "thb",
"status": "pending",
"message": "Foobar Baz",
"charge": "chrg_test",
"created": "2015-03-23T05:24:39Z"
}
]
}""")
disputes = class_.retrieve()
self.assertTrue(isinstance(disputes, collection_class_))
self.assertTrue(isinstance(disputes[0], class_))
self.assertTrue(disputes[0].id, 'dspt_test')
self.assertTrue(disputes[0].amount, 100000)
self.assertRequest(api_call, 'https://api.omise.co/disputes')
@mock.patch('requests.get')
def test_retrieve_kwargs(self, api_call):
class_ = self._getTargetClass()
collection_class_ = self._getCollectionClass()
self.mockResponse(api_call, """{
"object": "list",
"from": "1970-01-01T07:00:00+07:00",
"to": "2015-03-23T05:24:39+07:00",
"offset": 0,
"limit": 20,
"total": 1,
"data": [
{
"object": "dispute",
"id": "dspt_test",
"livemode": false,
"location": "/disputes/dspt_test",
"amount": 100000,
"currency": "thb",
"status": "closed",
"message": "Foobar Baz",
"charge": "chrg_test",
"created": "2015-03-23T05:24:39Z"
}
]
}""")
disputes = class_.retrieve('closed')
self.assertTrue(isinstance(disputes, collection_class_))
self.assertTrue(isinstance(disputes[0], class_))
self.assertTrue(disputes[0].id, 'dspt_test')
self.assertTrue(disputes[0].status, 'closed')
self.assertRequest(api_call, 'https://api.omise.co/disputes/closed')
@mock.patch('requests.get')
def test_list(self, api_call):
class_ = self._getTargetClass()
lazy_collection_class_ = self._getLazyCollectionClass()
self.mockResponse(api_call, """{
"object": "list",
"from": "1970-01-01T07:00:00+07:00",
"to": "2015-03-23T05:24:39+07:00",
"offset": 0,
"limit": 20,
"total": 1,
"data": [
{
"object": "dispute",
"id": "dspt_test",
"livemode": false,
"location": "/disputes/dspt_test",
"amount": 100000,
"currency": "thb",
"status": "pending",
"message": "Foobar Baz",
"charge": "chrg_test",
"created": "2015-03-23T05:24:39Z"
}
]
}""")
disputes = class_.list()
self.assertTrue(isinstance(disputes, lazy_collection_class_))
disputes = list(disputes)
self.assertTrue(isinstance(disputes[0], class_))
self.assertTrue(disputes[0].id, 'dspt_test')
self.assertTrue(disputes[0].amount, 100000)
@mock.patch('requests.patch')
def test_update(self, api_call):
dispute = self._makeOne()
class_ = self._getTargetClass()
self.mockResponse(api_call, """{
"object": "dispute",
"id": "dspt_test",
"livemode": false,
"location": "/disputes/dspt_test",
"amount": 100000,
"currency": "thb",
"status": "pending",
"message": "Foobar Baz",
"charge": "chrg_test",
"created": "2015-03-23T05:24:39Z"
}""")
self.assertTrue(isinstance(dispute, class_))
self.assertEqual(dispute.message, None)
dispute.message = 'Foobar Baz'
dispute.update()
self.assertEqual(dispute.message, 'Foobar Baz')
self.assertRequest(
api_call,
'https://api.omise.co/disputes/dspt_test',
{'message': 'Foobar Baz'}
)
@mock.patch('requests.patch')
def test_accept(self, api_call):
dispute = self._makeOne()
class_ = self._getTargetClass()
self.mockResponse(api_call, """{
"object": "dispute",
"id": "dspt_test",
"livemode": false,
"status": "lost"
}""")
self.assertTrue(isinstance(dispute, class_))
self.assertEqual(dispute.status, 'open')
dispute.accept()
self.assertEqual(dispute.status, 'lost')
self.assertRequest(
api_call,
'https://api.omise.co/disputes/dspt_test/accept'
)
@mock.patch('requests.get')
@mock.patch('requests.post')
def test_upload_document(self, api_call, reload_call):
dispute = self._makeOne()
class_ = self._getTargetClass()
self.mockResponse(api_call, """{
"object": "document",
"livemode": false,
"id": "docu_test",
"deleted": false,
"filename": "evidence.png",
"location": "/disputes/dspt_test/documents/docu_test",
"download_uri": null,
"created_at": "2021-02-05T10:40:32Z"
}""")
self.mockResponse(reload_call, """{
"object": "dispute",
"id": "dspt_test",
"livemode": false,
"location": "/disputes/dspt_test",
"currency": "THB",
"amount": 1101000,
"funding_amount": 1101000,
"funding_currency": "THB",
"metadata": {
},
"charge": "chrg_test_5m7wj8yi1pa9vlk9bq8",
"documents": {
"object": "list",
"data": [
{
"object": "document",
"livemode": false,
"id": "docu_test",
"deleted": false,
"filename": "evidence.png",
"location": "/disputes/dspt_test/documents/docu_test",
"download_uri": null,
"created_at": "2021-02-05T10:40:32Z"
}
],
"limit": 20,
"offset": 0,
"total": 1,
"location": "/disputes/dspt_test/documents",
"order": "chronological",
"from": "1970-01-01T00:00:00Z",
"to": "2021-02-05T10:42:02Z"
},
"transactions": [
{
"object": "transaction",
"id": "trxn_test",
"livemode": false,
"currency": "THB",
"amount": 1101000,
"location": "/transactions/trxn_test",
"direction": "debit",
"key": "dispute.started.debit",
"origin": "dspt_test",
"transferable_at": "2021-02-04T12:08:04Z",
"created_at": "2021-02-04T12:08:04Z"
}
],
"admin_message": null,
"message": null,
"reason_code": "goods_or_services_not_provided",
"reason_message": "Services not provided or Merchandise not received",
"status": "open",
"closed_at": null,
"created_at": "2021-02-04T12:08:04Z"
}""")
self.assertTrue(isinstance(dispute, class_))
files = tempfile.TemporaryFile()
document = dispute.upload_document(files)
files.close()
self.assertEqual(dispute.id, 'dspt_test')
self.assertEqual(document.filename, 'evidence.png')
self.assertUpload(api_call, 'https://api.omise.co/disputes/dspt_test/documents', files)
| mit | 3,960,135,462,406,913,000 | 32.977778 | 95 | 0.48435 | false |
colour-science/colour-science.org | tasks.py | 1 | 1699 | # -*- coding: utf-8 -*-
"""
Invoke - Tasks
==============
"""
from __future__ import unicode_literals
from invoke import task
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = ['formatting', 'quality', 'build']
@task
def formatting(ctx, yapf=True):
"""
Formats the codebase with *Yapf*.
Parameters
----------
ctx : invoke.context.Context
Context.
yapf : bool, optional
Whether to format the codebase with *Yapf*.
Returns
-------
bool
Task success.
"""
if yapf:
print('Formatting codebase with "Yapf"...')
ctx.run('yapf -p -i -r --exclude \'.git\' --exclude \'cache\' .')
@task
def quality(ctx, flake8=True):
"""
Checks the codebase with *Flake8* and lints various *restructuredText*
files with *rst-lint*.
Parameters
----------
ctx : invoke.context.Context
Context.
flake8 : bool, optional
Whether to check the codebase with *Flake8*.
Returns
-------
bool
Task success.
"""
if flake8:
print('Checking codebase with "Flake8"...')
ctx.run('flake8 --max-line-length=120 --exclude \'cache\'')
@task(formatting, quality)
def build(ctx):
"""
Builds the project.
Parameters
----------
ctx : invoke.context.Context
Context.
Returns
-------
bool
Task success.
"""
print('Building...')
ctx.run('nikola build')
| bsd-3-clause | 5,180,346,486,369,861,000 | 18.988235 | 78 | 0.570924 | false |
jeromekelleher/msprime | tests/__init__.py | 1 | 1304 | #
# Copyright (C) 2015-2018 University of Oxford
#
# This file is part of msprime.
#
# msprime is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# msprime is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with msprime. If not, see <http://www.gnu.org/licenses/>.
#
"""
Common code for the msprime test cases.
"""
import unittest
import numpy as np
class SequenceEqualityMixin:
"""
Overwrites unittest.TestCase.assertEqual to work with numpy arrays.
Note: unittest.TestCase.assertSequenceEqual also fails to work with
numpy arrays, and assertEqual works with ordinary lists/tuples anyway.
"""
def assertEqual(self, it1, it2, msg=None):
if isinstance(it1, np.ndarray):
it1 = list(it1)
if isinstance(it2, np.ndarray):
it2 = list(it2)
unittest.TestCase.assertEqual(self, it1, it2, msg=msg)
| gpl-3.0 | 6,612,686,942,591,749,000 | 31.6 | 74 | 0.718558 | false |
OpenAcademy-OpenStack/nova-scheduler | nova/tests/api/openstack/compute/plugins/v3/test_simple_tenant_usage.py | 1 | 14065 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
import webob
from nova.api.openstack.compute.plugins.v3 import simple_tenant_usage
from nova.compute import flavors
from nova.compute import vm_states
from nova import context
from nova import db
from nova.objects import flavor as flavor_obj
from nova.objects import instance as instance_obj
from nova.openstack.common import jsonutils
from nova.openstack.common import policy as common_policy
from nova.openstack.common import timeutils
from nova import policy
from nova import test
from nova.tests.api.openstack import fakes
from nova import utils
SERVERS = 5
TENANTS = 2
HOURS = 24
ROOT_GB = 10
EPHEMERAL_GB = 20
MEMORY_MB = 1024
VCPUS = 2
NOW = timeutils.utcnow()
START = NOW - datetime.timedelta(hours=HOURS)
STOP = NOW
FAKE_INST_TYPE = {'id': 1,
'vcpus': VCPUS,
'root_gb': ROOT_GB,
'ephemeral_gb': EPHEMERAL_GB,
'memory_mb': MEMORY_MB,
'name': 'fakeflavor',
'flavorid': 'foo',
'rxtx_factor': 1.0,
'vcpu_weight': 1,
'swap': 0,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'disabled': False,
'is_public': True,
'extra_specs': {'foo': 'bar'}}
def get_fake_db_instance(start, end, instance_id, tenant_id,
vm_state=vm_states.ACTIVE):
sys_meta = utils.dict_to_metadata(
flavors.save_flavor_info({}, FAKE_INST_TYPE))
# NOTE(mriedem): We use fakes.stub_instance since it sets the fields
# needed on the db instance for converting it to an object, but we still
# need to override system_metadata to use our fake flavor.
inst = fakes.stub_instance(
id=instance_id,
uuid='00000000-0000-0000-0000-00000000000000%02d' % instance_id,
image_ref='1',
project_id=tenant_id,
user_id='fakeuser',
display_name='name',
flavor_id=FAKE_INST_TYPE['id'],
launched_at=start,
terminated_at=end,
vm_state=vm_state)
inst['system_metadata'] = sys_meta
return inst
def fake_instance_get_active_by_window_joined(context, begin, end,
project_id, host):
return [get_fake_db_instance(START,
STOP,
x,
"faketenant_%s" % (x / SERVERS))
for x in xrange(TENANTS * SERVERS)]
@mock.patch.object(db, 'instance_get_active_by_window_joined',
fake_instance_get_active_by_window_joined)
class SimpleTenantUsageTest(test.TestCase):
def setUp(self):
super(SimpleTenantUsageTest, self).setUp()
self.admin_context = context.RequestContext('fakeadmin_0',
'faketenant_0',
is_admin=True)
self.user_context = context.RequestContext('fakeadmin_0',
'faketenant_0',
is_admin=False)
self.alt_user_context = context.RequestContext('fakeadmin_0',
'faketenant_1',
is_admin=False)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Simple_tenant_usage'])
def _test_verify_index(self, start, stop):
req = webob.Request.blank(
'/v3/os-simple-tenant-usage?start=%s&end=%s' %
(start.isoformat(), stop.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app_v3(
fake_auth_context=self.admin_context,
init_only=('os-simple-tenant-usage',
'servers')))
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)
usages = res_dict['tenant_usages']
for i in xrange(TENANTS):
self.assertEqual(int(usages[i]['total_hours']),
SERVERS * HOURS)
self.assertEqual(int(usages[i]['total_local_gb_usage']),
SERVERS * (ROOT_GB + EPHEMERAL_GB) * HOURS)
self.assertEqual(int(usages[i]['total_memory_mb_usage']),
SERVERS * MEMORY_MB * HOURS)
self.assertEqual(int(usages[i]['total_vcpus_usage']),
SERVERS * VCPUS * HOURS)
self.assertFalse(usages[i].get('server_usages'))
def test_verify_index(self):
self._test_verify_index(START, STOP)
def test_verify_index_future_end_time(self):
future = NOW + datetime.timedelta(hours=HOURS)
self._test_verify_index(START, future)
def test_verify_index_with_invalid_time_format(self):
req = webob.Request.blank(
'/v3/os-simple-tenant-usage?start=%s&end=%s' %
('aa', 'bb'))
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app_v3(
fake_auth_context=self.admin_context,
init_only=('os-simple-tenant-usage',
'servers')))
self.assertEqual(res.status_int, 400)
def test_verify_show(self):
self._test_verify_show(START, STOP)
def test_verify_show_future_end_time(self):
future = NOW + datetime.timedelta(hours=HOURS)
self._test_verify_show(START, future)
def test_verify_show_with_invalid_time_format(self):
tenant_id = 0
req = webob.Request.blank(
'/v3/os-simple-tenant-usage/'
'faketenant_%s?start=%s&end=%s' %
(tenant_id, 'aa', 'bb'))
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app_v3(
fake_auth_context=self.user_context,
init_only=('os-simple-tenant-usage',
'servers')))
self.assertEqual(res.status_int, 400)
def _get_tenant_usages(self, detailed=''):
req = webob.Request.blank(
'/v3/os-simple-tenant-usage?'
'detailed=%s&start=%s&end=%s' %
(detailed, START.isoformat(), STOP.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app_v3(
fake_auth_context=self.admin_context,
init_only=('os-simple-tenant-usage',
'servers')))
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)
return res_dict['tenant_usages']
def test_verify_detailed_index(self):
usages = self._get_tenant_usages('1')
for i in xrange(TENANTS):
servers = usages[i]['server_usages']
for j in xrange(SERVERS):
self.assertEqual(int(servers[j]['hours']), HOURS)
def test_verify_simple_index(self):
usages = self._get_tenant_usages(detailed='0')
for i in xrange(TENANTS):
self.assertIsNone(usages[i].get('server_usages'))
def test_verify_simple_index_empty_param(self):
# NOTE(lzyeval): 'detailed=&start=..&end=..'
usages = self._get_tenant_usages()
for i in xrange(TENANTS):
self.assertIsNone(usages[i].get('server_usages'))
def _test_verify_show(self, start, stop):
tenant_id = 0
req = webob.Request.blank(
'/v3/os-simple-tenant-usage/'
'faketenant_%s?start=%s&end=%s' %
(tenant_id, start.isoformat(), stop.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app_v3(
fake_auth_context=self.user_context,
init_only=('os-simple-tenant-usage',
'servers')))
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)
usage = res_dict['tenant_usage']
servers = usage['server_usages']
self.assertEqual(len(usage['server_usages']), SERVERS)
uuids = ['00000000-0000-0000-0000-00000000000000%02d' %
(x + (tenant_id * SERVERS)) for x in xrange(SERVERS)]
for j in xrange(SERVERS):
delta = STOP - START
uptime = delta.days * 24 * 3600 + delta.seconds
self.assertEqual(int(servers[j]['uptime']), uptime)
self.assertEqual(int(servers[j]['hours']), HOURS)
self.assertIn(servers[j]['instance_id'], uuids)
def test_verify_show_cant_view_other_tenant(self):
req = webob.Request.blank(
'/v3/os-simple-tenant-usage/'
'faketenant_0?start=%s&end=%s' %
(START.isoformat(), STOP.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
rules = {
"compute_extension:simple_tenant_usage:show":
common_policy.parse_rule([
["role:admin"], ["project_id:%(project_id)s"]
])
}
common_policy.set_rules(common_policy.Rules(rules))
try:
res = req.get_response(fakes.wsgi_app_v3(
fake_auth_context=self.alt_user_context,
init_only=('os-simple-tenant-usage',
'servers')))
self.assertEqual(res.status_int, 403)
finally:
policy.reset()
def test_get_tenants_usage_with_bad_start_date(self):
future = NOW + datetime.timedelta(hours=HOURS)
tenant_id = 0
req = webob.Request.blank(
'/v3/os-simple-tenant-usage/'
'faketenant_%s?start=%s&end=%s' %
(tenant_id, future.isoformat(), NOW.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app_v3(
fake_auth_context=self.user_context,
init_only=('os-simple-tenant-usage',
'servers')))
self.assertEqual(res.status_int, 400)
class SimpleTenantUsageControllerTest(test.TestCase):
def setUp(self):
super(SimpleTenantUsageControllerTest, self).setUp()
self.controller = simple_tenant_usage.SimpleTenantUsageController()
self.context = context.RequestContext('fakeuser', 'fake-project')
self.baseinst = get_fake_db_instance(START, STOP, instance_id=1,
tenant_id=self.context.project_id,
vm_state=vm_states.DELETED)
# convert the fake instance dict to an object
self.inst_obj = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), self.baseinst)
def test_get_flavor_from_sys_meta(self):
# Non-deleted instances get their type information from their
# system_metadata
with mock.patch.object(db, 'instance_get_by_uuid',
return_value=self.baseinst):
flavor = self.controller._get_flavor(self.context,
self.inst_obj, {})
self.assertEqual(flavor_obj.Flavor, type(flavor))
self.assertEqual(FAKE_INST_TYPE['id'], flavor.id)
def test_get_flavor_from_non_deleted_with_id_fails(self):
# If an instance is not deleted and missing type information from
# system_metadata, then that's a bug
self.inst_obj.system_metadata = {}
self.assertRaises(KeyError,
self.controller._get_flavor, self.context,
self.inst_obj, {})
def test_get_flavor_from_deleted_with_id(self):
# Deleted instances may not have type info in system_metadata,
# so verify that they get their type from a lookup of their
# instance_type_id
self.inst_obj.system_metadata = {}
self.inst_obj.deleted = 1
flavor = self.controller._get_flavor(self.context, self.inst_obj, {})
self.assertEqual(flavor_obj.Flavor, type(flavor))
self.assertEqual(FAKE_INST_TYPE['id'], flavor.id)
def test_get_flavor_from_deleted_with_id_of_deleted(self):
# Verify the legacy behavior of instance_type_id pointing to a
# missing type being non-fatal
self.inst_obj.system_metadata = {}
self.inst_obj.deleted = 1
self.inst_obj.instance_type_id = 99
flavor = self.controller._get_flavor(self.context, self.inst_obj, {})
self.assertIsNone(flavor)
| apache-2.0 | -3,819,851,166,597,155,300 | 40.612426 | 79 | 0.549591 | false |
mohamedhagag/community-addons | openeducat_erp/op_health/__init__.py | 1 | 1089 | # -*- coding: utf-8 -*-
###############################################################################
#
# Tech-Receptives Solutions Pvt. Ltd.
# Copyright (C) 2009-TODAY Tech-Receptives(<http://www.techreceptives.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from . import op_health
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -163,294,289,551,092,320 | 42.56 | 79 | 0.61708 | false |
SatoshiNakamotoGeoscripting/SatoshiNakamotoGeoscripting | Tweets project/sentimentAnalyzerVader.py | 1 | 4995 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 27 16:23:07 2017
@author: user
"""
## FOUND HERE http://www.nltk.org/howto/sentiment.html
## Source code http://www.nltk.org/_modules/nltk/sentiment/vader.html
## http://www.nltk.org/api/nltk.sentiment.html
## Hutto, C.J. & Gilbert, E.E. (2014). VADER: A Parsimonious Rule-based Model
##for Sentiment Analysis of Social Media Text. Eighth International Conference on
## Weblogs and Social Media (ICWSM-14). Ann Arbor, MI, June 2014.
## http://www.postgresqltutorial.com/postgresql-python
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import psycopg2
import sys
reload(sys) #Prevents errors with utf-8 encoding not working properly
sys.setdefaultencoding('utf8')
def getTweetsFromDB(dbname, user, password, table_name):
# get a connection, if a connect cannot be made an exception will be raised here
conn = psycopg2.connect("dbname={} user={} password={}".format(dbname, user, password))
cursor = conn.cursor()
# Retrieve data from database and extract coordinates
cursor.execute("SELECT * FROM {} WHERE lang = 'en' or lang = 'en-GB'".format(table_name))
records = cursor.fetchall()
list_of_tweets = []
for i, record in enumerate(records):
tweet_text = record[11].encode('utf-8')
tweet_id = record[0]
list_of_tweets.append([tweet_id,tweet_text])
return list_of_tweets
def SentimentAnalyzer(tweets):
sid = SentimentIntensityAnalyzer() #need to nltk.download() to use all the packages
sentiment_tweets = []
#for i in range(10):
for tweet in tweets:
tweet_id = tweet[0]
tweet_id = str(tweet_id)
tweet_id = int(tweet_id)
ss = sid.polarity_scores(tweet[1])
if ss['compound'] <= -0.293:
label = 'negative'
elif ss['compound'] >= 0.293:
label = 'positive'
else:
label = 'neutral'
sentiment = ss['compound']
sentiment_tweets.append((tweet_id,sentiment,label))
return sentiment_tweets
## Should we connect with the file already named createTable?
def createTable(db_name, user, password, table_name, overwrite = False):
try:
con = psycopg2.connect("dbname={} user={} password={}".format(db_name, user, password))
cur = con.cursor()
except:
print "oops error"
if overwrite == True:
del_table_query = """DROP TABLE IF EXISTS {table_name};""".format(table_name = table_name)
cur.execute(del_table_query)
insert_query = """CREATE TABLE IF NOT EXISTS {table_name} (
id bigint,
label varchar(15),
sentiment numeric);
""".format(table_name = table_name)
cur.execute(insert_query)
con.commit()
cur.close()
con.close()
def insertSentiments(db_name, user, password, table_name, sentiment_tweets):
try:
con = psycopg2.connect("dbname={} user={} password={}".format(db_name, user, password))
cur = con.cursor()
except:
print "oops error"
for tweet in sentiment_tweets:
insert_query = r"""INSERT INTO public.sentiment_tweets VALUES (%s,%s,%s)"""
data = (tweet[0],tweet[2],tweet[1])
cur.execute(insert_query, data)
con.commit()
def updateColumns(db_name, user, password,target_table,source_table, list_columns, list_type):
try:
con = psycopg2.connect("dbname={} user={} password={}".format(db_name, user, password))
cur = con.cursor()
except:
print "oops error"
for i in range(len(list_columns)):
drop_column = """
ALTER TABLE {target_table} DROP COLUMN IF EXISTS {column_name};
""".format(target_table = target_table, column_name = list_columns[i])
cur.execute(drop_column)
add_column = """
ALTER TABLE {target_table} ADD COLUMN {column_name} {columntype};
""".format(target_table = target_table, column_name=list_columns[i], columntype =list_type[i])
cur.execute(add_column)
update = """
UPDATE {target_table} t2
SET {column_name} = t1.{column_name}
FROM {source_table} t1
WHERE t2.id = t1.id
""".format(target_table=target_table,column_name=list_columns[i],source_table=source_table)
cur.execute(update)
#AND t2.val2 IS DISTINCT FROM t1.val1 -- optional, to avoid empty updates
con.commit()
tweets = getTweetsFromDB("tweets","user","user","trumptweets2")
sentiment_tweets = SentimentAnalyzer(tweets)
createTable(db_name="tweets", user="user", password="user", table_name = "sentiment_tweets", overwrite = True)
insertSentiments("tweets","user","user","sentiment_tweets",sentiment_tweets)
updateColumns("tweets", "user", "user","trumptweets2","sentiment_tweets",["label","sentiment"],["varchar(15)","numeric"])
| mit | -4,219,276,119,546,220,000 | 38.96 | 121 | 0.624825 | false |
reissmann/PyTgen | configs/xp1.py | 1 | 5767 | '''
Copyright (c) 2012 Sven Reissmann <[email protected]>
This file is part of the PyTgen traffic generator.
PyTgen is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PyTgen is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PyTgen. If not, see <http://www.gnu.org/licenses/>.
'''
import logging
#
# This is a default configuration for the SecMonet test network
#
class Conf(object):
# maximum number of worker threads that can be used to execute the jobs.
# the program will start using 3 threads and spawn new ones if needed.
# this setting depends on the number of jobs that have to be executed
# simultaneously (not the number of jobs given in the config file).
maxthreads = 15
# set to "logging.INFO" or "logging.DEBUG"
loglevel = logging.DEBUG
# ssh commands that will be randomly executed by the ssh traffic generator
ssh_commands = ['ls', 'cd', 'cd /etc', 'ps ax', 'date', 'mount', 'free', 'vmstat',
'touch /tmp/tmpfile', 'rm /tmp/tmpfile', 'ls /tmp/tmpfile',
'tail /etc/hosts', 'tail /etc/passwd', 'tail /etc/fstab',
'cat /var/log/messages', 'cat /etc/group', 'cat /etc/mtab']
# urls the http generator will randomly fetch from
http_extern = ['http://web.extern.ndsec']
http_intern = ['http://web.intern.ndsec']
# a number of files that will randomly be used for ftp upload
ftp_put = ['S:/share/files/file%s' % i for i in xrange(0, 9)]
# a number of files that will randomly be used for ftp download
ftp_get = ['~/files/file%s' % i for i in xrange(0, 9)]
# array of source-destination tuples for sftp upload
sftp_put = [('S:/share/files/file%s' % i, '/tmp/file%s' % i) for i in xrange(0, 9)]
# array of source-destination tuples for sftp download
sftp_get = [('/media/share/files/file%s' % i, 'S:/share/files/tmp/file%s' % i) for i in xrange(0, 9)]
# significant part of the shell prompt to be able to recognize
# the end of a telnet data transmission
telnet_prompt = "$ "
# job configuration (see config.example.py)
jobdef = [
# ping
# ('ping_gen', [(12, 0), (18, 0), (240, 0)], ['ssh.intern.ndsec', 4]),
# ('ping_gen', [(16, 0), (16, 30), (5, 0)], ['127.0.0.1', 4]),
#
# http (intern)
# ('http_gen', [(8, 0), (16, 30), (60, 0)], [http_intern, 12, 30]),
# ('http_gen', [(8, 55), (9, 30), (5, 0)], [http_intern, 5, 20]),
# ('http_gen', [(12, 0), (12, 30), (2, 0)], [http_intern, 6, 10]),
('http_gen', [(10, 50), (12, 0), (10, 0)], [http_intern, 2, 10]),
('http_gen', [(15, 0), (17, 30), (30, 0)], [http_intern, 8, 20]),
#
# http (extern)
('http_gen', [(12, 0), (12, 30), (5, 0)], [http_extern, 10, 20]),
('http_gen', [(8, 0), (17, 0), (30, 0)], [http_extern, 5, 30]),
('http_gen', [(8, 0), (12, 0), (60, 0)], [http_extern, 30, 30]),
('http_gen', [(8, 0), (17, 0), (90, 0)], [http_extern, 10, 30]),
# ('http_gen', [(12, 0), (12, 10), (5, 0)], [http_extern, 15, 20]),
#
# smtp
('smtp_gen', [(9, 0), (18, 0), (120, 0)], ['mail.extern.ndsec', 'mail1', 'mail', '[email protected]', '[email protected]']),
#
# ftp
# ('ftp_gen', [(9, 0), (11, 0), (15, 0)], ['ftp.intern.ndsec', 'ndsec', 'ndsec', ftp_put, ftp_get, 10, False, 5]),
('ftp_gen', [(10, 0), (18, 0), (135, 0)], ['ftp.intern.ndsec', 'ndsec', 'ndsec', ftp_put, [], 2, False]),
#
# nfs / smb
('copy_gen', [(9, 0), (12, 0), (90, 0)], [None, 'Z:/tmp/dummyfile.txt', 30]),
('copy_gen', [(10, 0), (16, 0), (120, 0)], [None, 'Z:/tmp/dummyfile.txt', 80]),
# ('copy_gen', [(9, 0), (18, 0), (0, 10)], ['file1', 'file2']),
#
# telnet
# ('telnet_gen', [(9, 0), (18, 0), (60, 0)], ['127.0.0.1', None, 'user', 'pass', 5, ssh_commands, telnet_prompt, 10]),
('telnet_gen', [(9, 0), (18, 0), (100, 0)], ['telnet.intern.ndsec', 23, 'ndsec', 'ndsec', 2, [], telnet_prompt]),
#
# ssh
# ('ssh_gen', [(9, 0), (18, 0), (120, 0)], ['ssh.intern.ndsec', 22, 'ndsec', 'ndsec', 5, ssh_commands]),
('ssh_gen', [(9, 0), (18, 0), (240, 0)], ['ssh.intern.ndsec', 22, 'ndsec', 'ndsec', 30, [], 20]),
# ('ssh_gen', [(9, 0), (18, 0), (120, 0)], ['192.168.10.50', 22, 'dummy1', 'dummy1', 5, ssh_commands]),
# ('ssh_gen', [(12, 0), (14, 0), (120, 0)], ['ssh.intern.ndsec', 22, 'dummy1', 'wrongpass', 5, ssh_commands]),
#
# sftp
# ('sftp_gen', [(17, 0), (18, 0), (60, 0)], ['127.0.0.1', 22, 'user', 'pass', sftp_put, sftp_get, 5, 1]),
#
# xmpp
('xmpp_gen', [(9, 0), (15, 0), (200, 0)], ['xmpp.intern.ndsec', 5222, '[email protected]', 'xmpp', 'xmpp12', 120, ['xmpp%[email protected]' % i for i in xrange(1, 15)]]),
#
# reboot
# ('reboot_gen', [(7, 50), (8, 0), (10, 0)], []),
# ('reboot_gen', [(13, 0), (13, 5), (5, 0)], [])
]
| gpl-3.0 | 3,112,552,111,622,368,000 | 50.035398 | 193 | 0.509277 | false |
mikeboers/C3Linearize | setup.py | 1 | 1166 |
from distutils.core import setup
setup(
name='C3Linearize',
version='0.1.0',
description='Python implementation of the C3 linearization algorithm.',
url='http://github.com/mikeboers/C3Linearize',
py_modules=['c3linearize'],
author='Mike Boers',
author_email='[email protected]',
license='BSD-3',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.3',
'Programming Language :: Python :: 2.4',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| bsd-3-clause | -6,231,679,453,400,698,000 | 34.333333 | 75 | 0.59777 | false |
mdbartos/RIPS | temperature/census_year_compat.py | 1 | 1057 | import numpy as np
import pandas as pd
import geopandas as gpd
from geopandas import tools
census_old = '/home/kircheis/data/shp/census/census_tracts_all/census_tracts_1990.shp'
census_new = '/home/kircheis/data/shp/census/census_tracts_all/census_tracts_2014.shp'
df_90 = gpd.read_file(census_old)
df_14 = gpd.read_file(census_new)
df_14_c = df_14.copy()
df_14_c['geometry'] = df_14_c.centroid
j = tools.sjoin(df_90, df_14_c, op='contains')
#### FORMAT CENSUS TRACT NAMES
#### NONDECIMAL ENTRIES
j['TRACT_NAME'][~j['TRACT_NAME'].str.contains('\.')] = (j['TRACT_NAME'][~j['TRACT_NAME'].str.contains('\.')] + '00').str.pad(6, side='left', fillchar='0')
#### DECIMAL ENTRIES
j['TRACT_NAME'][j['TRACT_NAME'].str.contains('\.')] = j['TRACT_NAME'][j['TRACT_NAME'].str.contains('\.')].str.replace('.', '').str.pad(6, side='left', fillchar='0')
#### CREATE FIPS
j['GEOID_1990'] = j['ST'].astype(str).str.cat(j['CO'].astype(str)).str.cat(j['TRACT_NAME'])
j_cross = j.rename(columns={'GEOID':'GEOID_2014'})[['GEOID_1990', 'GEOID_2014']].sort('GEOID_1990')
| mit | 3,268,306,680,844,698,600 | 35.448276 | 164 | 0.658467 | false |
itsrachelfish/gstreamer | preview.py | 1 | 5408 | import os
import time
import signal
import subprocess
from datetime import datetime
from select import select
camera = dict()
command = dict()
currentVideo = dict()
commandTimeout = 5
def startVideo(camera, resolution="width=1280,height=720"):
directory = "camera/" + str(camera) + "/" + datetime.now().strftime("%Y-%m-%d") + "/" + datetime.now().strftime("%H") + "/"
filename = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + '.mp4'
audio = camera + 1;
currentVideo['filename'] = filename;
currentVideo['time'] = datetime.now()
if not os.path.exists(directory):
os.makedirs(directory)
return subprocess.Popen(['gst-launch',
'-e', 'v4l2src', 'device=/dev/video' + str(camera), '!', 'image/jpeg,' + resolution, '!', 'jpegdec', '!',
'autovideosink'])
#gst-launch -e v4l2src ! 'image/jpeg,width=1280,height=720' ! jpegdec ! tee name=t ! queue ! ffenc_mpeg4 bitrate=3200000 ! mp4mux name=mux ! filesink location=example.mp4 autoaudiosrc ! lamemp3enc ! mux. t. ! queue ! colorspace ! autovideosink
def startCam(camera, cameraNumber, resolution = "big"):
if(resolution == 'big'):
resolution = "width=1280,height=720"
elif(resolution == 'medium' or resolution == 'med'):
resolution = "width=640,height=480"
cameraNumber = cameraNumber - 1
camera[cameraNumber] = startVideo(cameraNumber, resolution)
return camera
def stopCam(camera, cameraNumber):
cameraNumber = cameraNumber - 1
camera[cameraNumber] = startVideo(cameraNumber, resolution)
return camera
def startCams(camera):
camera[0] = startVideo(0)
camera[1] = startVideo(1)
camera[2] = startVideo(2)
camera[3] = startVideo(3)
# camera[4] = startVideo(4)
# camera[5] = startVideo(5)
return camera
def stopCams(camera):
# camera[0].send_signal(signal.SIGINT);
# camera[1].send_signal(signal.SIGINT);
# camera[2].send_signal(signal.SIGINT);
subprocess.Popen(['killall', '-INT', 'gst-launch-0.10'])
time.sleep(1)
return camera
def restartCams(camera):
camera = stopCams(camera)
camera = startCams(camera)
return camera
camera = startCams(camera)
#camera1 = startVideo(0)
#camera2 = startVideo(1)
#camera3 = startVideo(2)
while 1:
command['input']= raw_input("> ")
if(command['input']):
command['list'] = command['input'].lower()
command['list'] = command['list'].split()
if len(command['list']) <= 2:
if len(command['list']) <= 1:
command['list'].append(0)
command['list'].append(0)
if command['list'][0] == 'quit':
print "Closing cameras..."
camera = stopCams(camera)
print "Goodbye!"
break
elif command['list'][0] == 'restart':
print "Restarting cameras..."
camera = restartCams(camera)
elif command['list'][0] == 'start':
print type(command['list'][1])
if(command['list'][1]):
if(command['list'][1] == 'all'):
camera = startCams(camera, command['list'][2])
else:
cameraNumber = int(command['list'][1])
if len(command['list']) > 2 and command['list'][2]:
camera = startCam(camera, cameraNumber, command['list'][2])
else:
camera = startCam(camera, cameraNumber)
else:
cameraNumber = raw_input("What number? \n> ")
cameraNumber = int(cameraNumber)
camera = startCam(camera, cameraNumber)
elif command['list'][0] == 'stop':
if(command['list'][1]):
if(command['list'][1] == 'all'):
camera = stopCams(camera)
else:
cameraNumber = int(command['list'][1])
if len(command['list']) > 2 and command['list'][2]:
camera = stopCam(camera, cameraNumber, command['list'][2])
else:
camera = stopCam(camera, cameraNumber)
else:
cameraNumber = raw_input("What number? \n> ")
if(cameraNumber == 'all'):
camera = stopCams(camera)
else:
cameraNumber = int(cameraNumber)
camera = stopCam(camera, cameraNumber)
# camera = stopCams(camera)
elif command['list'][0] == 'kill':
subprocess.Popen(['killall', 'gst-launch-0.10'])
elif command['list'][0] == 'time':
print "The time is... " + datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print "The video started at... " + currentVideo['time'].strftime("%Y-%m-%d %H:%M:%S")
print "Time elapsed... " + str(timeElapsed.seconds)
elif command['input']:
# print "Saving annotation: " + command + " at " + datetime.now().strftime("%Y-%m-%d %H:%M:%S")
with open("annotations.txt", "a") as myfile:
myfile.write(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + " - " + command['input'] + "\n")
| gpl-2.0 | -1,269,651,193,475,749,600 | 33.44586 | 248 | 0.527922 | false |
Mausy5043/ubundiagd | daemon15.py | 1 | 4996 | #!/usr/bin/env python
# Based on previous work by
# Charles Menguy (see: http://stackoverflow.com/questions/10217067/implementing-a-full-python-unix-style-daemon-process)
# and Sander Marechal (see: http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/)
# Adapted by M.Hendrix [2015] (deprecated)
# daemon15.py measures the size of selected logfiles.
# These are all counters, therefore no averaging is needed.
import syslog, traceback
import os, sys, time, math, commands
from subprocess import check_output
from libdaemon import Daemon
import ConfigParser
DEBUG = False
IS_SYSTEMD = os.path.isfile('/bin/journalctl')
leaf = os.path.realpath(__file__).split('/')[-2]
os.nice(10)
class MyDaemon(Daemon):
def run(self):
iniconf = ConfigParser.ConfigParser()
inisection = "15"
home = os.path.expanduser('~')
s = iniconf.read(home + '/' + leaf + '/config.ini')
if DEBUG: print "config file : ", s
if DEBUG: print iniconf.items(inisection)
reportTime = iniconf.getint(inisection, "reporttime")
cycles = iniconf.getint(inisection, "cycles")
samplesperCycle = iniconf.getint(inisection, "samplespercycle")
flock = iniconf.get(inisection, "lockfile")
fdata = iniconf.get(inisection, "resultfile")
samples = samplesperCycle * cycles # total number of samples averaged
sampleTime = reportTime/samplesperCycle # time [s] between samples
cycleTime = samples * sampleTime # time [s] per cycle
data = [] # array for holding sampledata
while True:
try:
startTime = time.time()
result = do_work().split(',')
data = map(int, result)
if (startTime % reportTime < sampleTime):
do_report(data, flock, fdata)
waitTime = sampleTime - (time.time() - startTime) - (startTime%sampleTime)
if (waitTime > 0):
if DEBUG:print "Waiting {0} s".format(waitTime)
time.sleep(waitTime)
except Exception as e:
if DEBUG:
print "Unexpected error:"
print e.message
syslog.syslog(syslog.LOG_ALERT,e.__doc__)
syslog_trace(traceback.format_exc())
raise
def do_work():
# 3 datapoints gathered here
kernlog=messlog=syslog=0
if IS_SYSTEMD:
# -p, --priority=
# Filter output by message priorities or priority ranges. Takes either a single numeric or textual log level (i.e.
# between 0/"emerg" and 7/"debug"), or a range of numeric/text log levels in the form FROM..TO. The log levels are the
# usual syslog log levels as documented in syslog(3), i.e. "emerg" (0), "alert" (1), "crit" (2), "err" (3),
# "warning" (4), "notice" (5), "info" (6), "debug" (7). If a single log level is specified, all messages with this log
# level or a lower (hence more important) log level are shown. If a range is specified, all messages within the range
# are shown, including both the start and the end value of the range. This will add "PRIORITY=" matches for the
# specified priorities.
critlog = commands.getoutput("journalctl --since=00:00:00 --no-pager -p 0..3 |wc -l").split()[0]
warnlog = commands.getoutput("journalctl --since=00:00:00 --no-pager -p 4 |wc -l").split()[0]
syslog = commands.getoutput("journalctl --since=00:00:00 --no-pager |wc -l").split()[0]
else:
critlog = wc("/var/log/kern.log")
warnlog = wc("/var/log/smartd.log")
syslog = wc("/var/log/syslog")
return '{0}, {1}, {2}'.format(critlog, warnlog, syslog)
def wc(filename):
return int(check_output(["wc", "-l", filename]).split()[0])
def do_report(result, flock, fdata):
# Get the time and date in human-readable form and UN*X-epoch...
outDate = commands.getoutput("date '+%F %H:%M:%S, %s'")
result = ', '.join(map(str, result))
lock(flock)
with open(fdata, 'a') as f:
f.write('{0}, {1}\n'.format(outDate, result) )
unlock(flock)
def lock(fname):
open(fname, 'a').close()
def unlock(fname):
if os.path.isfile(fname):
os.remove(fname)
def syslog_trace(trace):
# Log a python stack trace to syslog
log_lines = trace.split('\n')
for line in log_lines:
if line:
syslog.syslog(syslog.LOG_ALERT,line)
if __name__ == "__main__":
daemon = MyDaemon('/tmp/' + leaf + '/15.pid')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
elif 'foreground' == sys.argv[1]:
# assist with debugging.
print "Debug-mode started. Use <Ctrl>+C to stop."
DEBUG = True
if DEBUG:
logtext = "Daemon logging is ON"
syslog.syslog(syslog.LOG_DEBUG, logtext)
daemon.run()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: {0!s} start|stop|restart|foreground".format(sys.argv[0])
sys.exit(2)
| mit | -2,956,924,764,112,557,600 | 35.735294 | 128 | 0.630705 | false |
ixc/django-reversion | docs/conf.py | 1 | 7971 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# django-reversion documentation build configuration file, created by
# sphinx-quickstart on Thu Aug 29 09:17:37 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'django-reversion'
copyright = '2013, Dave Hall'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.10'
# The full version, including alpha/beta/rc tags.
release = '1.10.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-reversiondoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-reversion.tex', 'django-reversion Documentation',
'Dave Hall', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-reversion', 'django-reversion Documentation',
['Dave Hall'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-reversion', 'django-reversion Documentation',
'Dave Hall', 'django-reversion', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| bsd-3-clause | 114,505,575,416,278,370 | 31.271255 | 80 | 0.70631 | false |
Hugovdberg/QgsTIM | resources_rc.py | 1 | 5340 | # -*- coding: utf-8 -*-
# Resource object code
#
# Created: wo 8. jul 13:55:36 2015
# by: The Resource Compiler for PyQt (Qt v4.8.6)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = "\
\x00\x00\x04\x0a\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x17\x00\x00\x00\x18\x08\x06\x00\x00\x00\x11\x7c\x66\x75\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\
\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xd9\x02\x15\
\x16\x11\x2c\x9d\x48\x83\xbb\x00\x00\x03\x8a\x49\x44\x41\x54\x48\
\xc7\xad\x95\x4b\x68\x5c\x55\x18\xc7\x7f\xe7\xdc\x7b\x67\xe6\xce\
\x4c\x66\x26\x49\xd3\x24\x26\xa6\xc6\xf8\x40\x21\xa5\x04\xb3\x28\
\xda\x98\x20\xa5\x0b\xad\x55\xa8\x2b\xc5\x50\x1f\xa0\x6e\x34\x2b\
\x45\x30\x14\x02\xba\x52\x69\x15\x17\x66\x63\x45\x97\x95\xa0\xad\
\x0b\xfb\xc0\x06\x25\xb6\x71\x61\x12\x41\x50\xdb\x2a\x21\xd1\xe2\
\x24\xf3\x9e\xc9\xcc\xbd\xe7\x1c\x17\x35\x43\x1e\x33\x21\xb6\xfd\
\x56\x87\xf3\x9d\xfb\xfb\x1e\xf7\xff\x9d\x23\x8c\x31\x43\x95\xf4\
\x85\x1e\x3f\x3b\x35\xac\xfd\xcc\x43\xdc\xa4\x49\x3b\xfe\x9d\x1d\
\xdb\x7b\x22\x90\x78\xf8\xb2\x28\xa7\xbe\x7d\xc1\x4b\x9d\x79\xdf\
\x18\x15\xe5\x16\x99\x10\x56\xde\x69\xdc\x3f\x22\xfd\xec\xd4\xf0\
\xad\x04\x03\x18\xa3\xa2\x7e\x76\x6a\x58\xde\x68\x2b\xb4\x36\xf8\
\xbe\xc6\x18\x53\xdb\xef\xe7\xfa\xec\xed\x67\x63\x10\x42\x00\xf0\
\xfb\xd5\x65\x2a\x15\x45\xc7\x6d\x0d\x00\xc4\xa2\xc1\xaa\x6f\x0d\
\x3e\x6c\xab\xc2\x1c\x56\xa4\x77\x4b\xb0\xf2\x35\x15\x5f\x21\x85\
\xe0\xc8\x6b\x5f\x92\x2d\x37\x33\x39\xf9\x03\x27\x8e\x1f\xa2\xf7\
\xbe\x9d\x04\x1c\x0b\x37\xe4\xac\xff\xa6\x30\x87\xbd\xba\x00\x6a\
\x06\x79\xe5\xf5\xaf\x89\xd9\x92\xc5\xcc\x0a\xd9\x7c\x19\xcf\xe9\
\xe2\xe4\xa9\x2f\x78\x7c\xff\x01\x72\x85\x0a\x2b\x65\x1f\xa5\x4c\
\xb5\xb2\x55\x16\x80\xbd\x31\xda\xda\x20\x1f\x7d\x3e\xcd\xc2\xfd\
\x59\xa6\x93\x39\x92\xd1\x22\xea\x9b\x16\xce\x9d\x3f\xce\xe0\x83\
\x03\x24\x82\x59\x3a\xdb\x7b\x88\xc7\x82\x68\x63\x58\xc9\xcc\x62\
\x8c\x21\x18\xb0\x6a\xc3\x37\x06\x49\x16\xff\x24\x6b\xa5\x49\xbb\
\x25\xbc\xa2\xa6\x21\xbb\x40\x7f\xdf\x00\x83\xbd\x01\x8e\x3c\xd5\
\x45\xd7\x8e\x6b\x9c\x9c\x98\x25\x1a\xb6\xe8\xbe\x3d\xc2\xdd\x77\
\x44\x48\xc4\x1c\x22\xe1\xeb\x58\x59\xaf\xcf\xd3\x33\x29\x2e\x34\
\x2d\x91\x93\x3e\xbe\x34\x78\x01\xc5\xe2\x61\xc5\xae\x72\x8e\x70\
\xc8\xc2\x0d\x5a\xbc\xf5\xee\x2f\x9c\xfa\x3e\x86\x69\x7a\x8e\xcf\
\x26\xe6\xf9\x63\xa1\x44\xa1\xa4\xd0\xda\x6c\x0d\x2f\x15\x7c\xb4\
\x67\x28\x59\x0a\xcf\xd6\x54\xe2\x06\x13\x87\x2b\x6f\x68\xa6\x27\
\xaf\x31\x32\x36\xc7\xb2\x7f\x17\xef\x7d\x7c\x8c\x33\x67\xcf\x12\
\x70\x24\x4a\x69\xd6\x6a\x46\xd6\xd3\x70\x72\xa9\x82\x67\x34\x45\
\xad\x28\xdb\x1a\x15\x34\x98\xff\x46\xed\xef\x37\x0d\x99\xbf\x4a\
\x3c\x30\x38\xc0\xc8\x4b\xaf\x92\x5a\x9c\xe2\xe0\x23\x6d\x74\xb4\
\xba\x84\x5d\x0b\x29\x45\x7d\xb8\x94\x82\x96\xb6\x10\xf3\xc5\x12\
\x2a\xef\x53\x11\x1a\x63\xad\x3f\x93\x19\x85\xf1\xb1\x77\x58\x5a\
\xf8\x99\x97\x9f\xe9\xa6\x75\x47\x90\xc6\xb8\x43\xd8\xb5\xb6\xce\
\xfc\xfa\xfd\x00\xfb\x3e\xf4\xc8\x05\x35\xba\x5e\xeb\x46\x21\xf9\
\xcf\x0a\xa9\x8c\x87\xe3\x48\xdc\x90\xb5\x6e\x98\x6a\xaa\x65\xf2\
\x52\x92\x43\x2f\x5e\xc2\x8c\x02\x1a\x10\xf5\x07\xac\xc3\x75\x70\
\x83\x92\x80\xb3\xf9\xd0\x26\xf8\x8f\xb3\x29\xc6\x3e\xb8\x8c\x19\
\x35\x75\x6b\x7b\x7e\x3c\xca\x45\x0c\x7e\x49\x31\xf4\x58\x3b\xf7\
\xf6\x34\x90\x88\x39\x04\x1c\x59\x1f\xfe\xdb\xd5\x3c\x5f\x9d\x4b\
\x32\xfd\x44\xb2\xba\xd7\xfa\xb6\x60\xcf\xde\x16\xdc\x90\x45\x4c\
\x4a\x2a\x9e\x62\xfe\x4e\xc5\xc8\xc1\x4e\xda\x76\x86\xe8\xe9\x0a\
\xe3\xd8\x92\x58\xd4\xc6\xb2\x44\x6d\x78\x2a\x53\xe1\xca\x7c\x99\
\x63\x5d\xbf\x56\x9d\xbd\x9f\x44\x18\x7a\xba\x95\x27\x0f\xb4\xd3\
\xdc\x18\xc0\xf3\x0d\x52\x40\xd8\xb5\xb0\xa4\x20\x14\xb2\x70\x6c\
\x81\x63\xcb\xaa\x42\xd6\xfd\xb7\xf4\xec\xa3\x06\xa0\x50\x52\xd8\
\x4e\x1b\x7e\x4a\xd3\x31\xf9\x29\xcf\xfe\xd4\x49\x7f\x5f\x13\xfb\
\xfa\x9b\x71\x43\x92\x58\xd4\x21\x18\x90\xac\xde\xb0\x42\x50\x13\
\x58\x33\xf3\x88\x6b\xa1\xfd\x65\x96\xf2\x79\xc6\x43\x7b\xd8\x75\
\x38\xcc\x3d\xdd\xd1\xaa\xcf\x71\xe4\xff\x7f\x91\x56\x33\xaf\xea\
\x37\xe7\xa1\x94\x21\x16\xb5\xd1\x06\x2c\x29\x36\xf5\x72\x9b\x96\
\x95\xc0\xc4\xda\x9d\x78\x83\x43\x53\x22\x80\x65\x09\x1c\xfb\x86\
\xc1\x00\xe7\x25\x70\x14\x48\x6f\x1e\x22\x51\xe3\x75\xd9\xb6\xa5\
\x81\xa3\x32\xb1\xfb\xf4\x0c\x30\xb8\xb1\x82\x9b\xb0\x09\x60\x30\
\xb1\xfb\xf4\xcc\xbf\xa0\xe9\x6e\xae\x5a\xdf\x4b\x81\x00\x00\x00\
\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = "\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x06\
\x05\x7e\x88\xdd\
\x00\x51\
\x00\x67\x00\x73\x00\x54\x00\x49\x00\x4d\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x26\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| gpl-2.0 | -5,011,464,189,356,588,000 | 47.545455 | 96 | 0.722097 | false |
google/skia | tools/android/measure_fps.py | 2 | 1394 | #!/usr/bin/env python
# Copyright 2017 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import optparse
import re
import subprocess
import time
def query_surfaceflinger_frame_count():
parcel = subprocess.Popen("adb shell service call SurfaceFlinger 1013",
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True).communicate()[0]
if not parcel:
raise Exception("FAILED: adb shell service call SurfaceFlinger 1013")
framecount = re.search("Result: Parcel\(([a-f0-9]+) ", parcel)
if not framecount:
raise Exception("Unexpected result from SurfaceFlinger: " + parcel)
return int(framecount.group(1), 16)
def main(interval):
startframe = query_surfaceflinger_frame_count()
starttime = time.time()
while True:
time.sleep(interval)
endframe = query_surfaceflinger_frame_count()
endtime = time.time()
fps = (endframe - startframe) / (endtime - starttime)
print("%.2f" % fps)
startframe = endframe
starttime = endtime
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option("-i", "--interval", type="int", default="2",
help="Number of seconds to count frames.")
options, args = parser.parse_args()
main(options.interval)
| bsd-3-clause | -8,101,786,456,625,272,000 | 25.807692 | 75 | 0.66571 | false |
samuelsinayoko/kaggle-housing-prices | test_samlib.py | 1 | 1545 | import pandas as pd
import samlib
import pytest
@pytest.fixture
def raw_train():
return pd.read_csv('data/train_prepared_light.csv')
@pytest.fixture
def raw_test():
return pd.read_csv('data/test_prepared_light.csv')
@pytest.fixture
def ds(raw_train, raw_test):
return samlib.DataSet(raw_train, raw_test)
def test_split_merge(ds, raw_train, raw_test):
"""Check the merge and split functions"""
df1, df2 = ds.split(ds.df)
assert all(df1 == raw_train)
assert all(df2 == raw_test)
assert all(ds.merge(df1, df2) == ds.df)
def test_synchronization(ds):
"""Check that if we update df then the train and test sets are
updated accordingly
"""
ds.df = 2 * ds.df
assert all(ds.train == 2 * ds.raw_train)
assert all(ds.test == 2 * ds.raw_test)
def test_copy(ds):
ds1 = ds
ds2 = ds1.copy()
assert not (ds1 is ds2)
assert all(ds1.df == ds2.df)
assert all(ds1.raw_train == ds2.raw_train)
assert all(ds1.train == ds2.train)
assert all(ds1.test == ds2.test)
def test_apply(ds):
ds2 = ds.apply(lambda x: x * 2)
assert not (ds is ds2)
assert all(ds.df == ds2.df * 2)
def test_apply_inplace(ds):
ds_init = ds.copy()
ds2 = ds.apply(lambda x: x * 2, inplace=True)
assert (ds is ds2)
assert all(ds2.df == ds_init.df * 2)
assert all(ds2.raw_train == ds_init.raw_train)
def test_getattr(ds):
"""Get an attribute of the underlying dataframe if possible"""
assert all(ds.columns == ds.df.columns)
assert ds.shape == ds.df.shape
| mit | 2,747,487,203,714,297,000 | 23.52381 | 66 | 0.639482 | false |
openstack/oslo.context | releasenotes/source/conf.py | 1 | 8643 | # -*- coding: utf-8 -*-
# Copyright (C) 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'openstackdocstheme',
'reno.sphinxext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
copyright = u'2016, oslo.context Developers'
# Release notes do not need a version in the title, they span
# multiple versions.
# The full version, including alpha/beta/rc tags.
release = ''
# The short X.Y version.
version = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'oslo.contextReleaseNotesDoc'
# -- Options for LaTeX output ---------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'oslo.contextReleaseNotes.tex',
u'oslo.context Release Notes Documentation',
u'oslo.context Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'oslo.contextReleaseNotes',
u'oslo.context Release Notes Documentation',
[u'oslo.context Developers'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'oslo.contextReleaseNotes',
u'oslo.context Release Notes Documentation',
u'oslo.context Developers', 'oslo.contextReleaseNotes',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Internationalization output ------------------------------
locale_dirs = ['locale/']
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/oslo.context'
openstackdocs_bug_project = 'oslo.context'
openstackdocs_bug_tag = ''
| apache-2.0 | 6,351,396,031,582,769,000 | 32.370656 | 79 | 0.708435 | false |
SpaceGroupUCL/qgisSpaceSyntaxToolkit | esstoolkit/external/networkx/algorithms/tests/test_sparsifiers.py | 4 | 4043 | """Unit tests for the sparsifier computation functions."""
import pytest
import networkx as nx
from networkx.utils import py_random_state
_seed = 2
def _test_spanner(G, spanner, stretch, weight=None):
"""Test whether a spanner is valid.
This function tests whether the given spanner is a subgraph of the
given graph G with the same node set. It also tests for all shortest
paths whether they adhere to the given stretch.
Parameters
----------
G : NetworkX graph
The original graph for which the spanner was constructed.
spanner : NetworkX graph
The spanner to be tested.
stretch : float
The proclaimed stretch of the spanner.
weight : object
The edge attribute to use as distance.
"""
# check node set
assert set(G.nodes()) == set(spanner.nodes())
# check edge set and weights
for u, v in spanner.edges():
assert G.has_edge(u, v)
if weight:
assert spanner[u][v][weight] == G[u][v][weight]
# check connectivity and stretch
original_length = dict(nx.shortest_path_length(G, weight=weight))
spanner_length = dict(nx.shortest_path_length(spanner, weight=weight))
for u in G.nodes():
for v in G.nodes():
if u in original_length and v in original_length[u]:
assert spanner_length[u][v] <= stretch * original_length[u][v]
@py_random_state(1)
def _assign_random_weights(G, seed=None):
"""Assigns random weights to the edges of a graph.
Parameters
----------
G : NetworkX graph
The original graph for which the spanner was constructed.
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
"""
for u, v in G.edges():
G[u][v]["weight"] = seed.random()
def test_spanner_trivial():
"""Test a trivial spanner with stretch 1."""
G = nx.complete_graph(20)
spanner = nx.spanner(G, 1, seed=_seed)
for u, v in G.edges:
assert spanner.has_edge(u, v)
def test_spanner_unweighted_complete_graph():
"""Test spanner construction on a complete unweighted graph."""
G = nx.complete_graph(20)
spanner = nx.spanner(G, 4, seed=_seed)
_test_spanner(G, spanner, 4)
spanner = nx.spanner(G, 10, seed=_seed)
_test_spanner(G, spanner, 10)
def test_spanner_weighted_complete_graph():
"""Test spanner construction on a complete weighted graph."""
G = nx.complete_graph(20)
_assign_random_weights(G, seed=_seed)
spanner = nx.spanner(G, 4, weight="weight", seed=_seed)
_test_spanner(G, spanner, 4, weight="weight")
spanner = nx.spanner(G, 10, weight="weight", seed=_seed)
_test_spanner(G, spanner, 10, weight="weight")
def test_spanner_unweighted_gnp_graph():
"""Test spanner construction on an unweighted gnp graph."""
G = nx.gnp_random_graph(20, 0.4, seed=_seed)
spanner = nx.spanner(G, 4, seed=_seed)
_test_spanner(G, spanner, 4)
spanner = nx.spanner(G, 10, seed=_seed)
_test_spanner(G, spanner, 10)
def test_spanner_weighted_gnp_graph():
"""Test spanner construction on an weighted gnp graph."""
G = nx.gnp_random_graph(20, 0.4, seed=_seed)
_assign_random_weights(G, seed=_seed)
spanner = nx.spanner(G, 4, weight="weight", seed=_seed)
_test_spanner(G, spanner, 4, weight="weight")
spanner = nx.spanner(G, 10, weight="weight", seed=_seed)
_test_spanner(G, spanner, 10, weight="weight")
def test_spanner_unweighted_disconnected_graph():
"""Test spanner construction on a disconnected graph."""
G = nx.disjoint_union(nx.complete_graph(10), nx.complete_graph(10))
spanner = nx.spanner(G, 4, seed=_seed)
_test_spanner(G, spanner, 4)
spanner = nx.spanner(G, 10, seed=_seed)
_test_spanner(G, spanner, 10)
def test_spanner_invalid_stretch():
"""Check whether an invalid stretch is caught."""
with pytest.raises(ValueError):
G = nx.empty_graph()
nx.spanner(G, 0)
| gpl-3.0 | -8,946,419,108,409,216,000 | 28.510949 | 78 | 0.644571 | false |
i19870503/i19870503 | Python/kegg_map_color.py | 1 | 1316 | from urllib.parse import urlparse
import urllib.request
import re
import requests as req
from PIL import Image
from io import BytesIO
import pandas as pd
kegg_out = pd.read_table('/home/zluna/Work/kegg/2.xls')
headers = {'User-Agent' : 'Mozilla/5.0 (Linux;Android 6.0;Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML,like Gecko) Chrome/58.0.3029.96 Mobile Safari/537.36'}
url = "https://www.kegg.jp/pathway/map00361+K00462%09red+K01061%09red+K01563%09red+K03119%09red+K06912%09red+K10621%09red+K14583%09red+K14751%09red+K16049%09red"
#list(kegg_out.Pathway)[0].split(':',1)[1].strip(']').lstrip('ko')
#str(kegg_out.KOID[0]).replace(', ', '%09purple+') + '%09purple'
for i in range(len(kegg_out)):
path_name = list(kegg_out.Pathway)[i].split(':',1)[1].strip(']').lstrip('ko')
path_gene = str(kegg_out.KOID[i]).replace(', ', '%09purple+') + '%09purple'
url = "https://www.kegg.jp/pathway/map" + path_name + '+' + path_gene
response = req.get(url, headers=headers)
content = response.text
img_url = re.search(r'<img src=.*pathwayimage.*>', str(content)).group()
img_url.split('"', 3)[1]
img_url = "https://www.kegg.jp" + img_url.split('"', 3)[1]
img = req.get(img_url)
#print(img)
img = Image.open(BytesIO(img.content))
img.save(fp = "Path:" + path_name + '.png')
| gpl-2.0 | -3,536,817,236,411,083,300 | 40.125 | 161 | 0.661094 | false |
ntj/fraktionstool | django/fraktionstool/migrations/0008_auto__add_hilfe.py | 1 | 7704 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Hilfe'
db.create_table(u'fraktionstool_hilfe', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('text', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'fraktionstool', ['Hilfe'])
def backwards(self, orm):
# Deleting model 'Hilfe'
db.delete_table(u'fraktionstool_hilfe')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'fraktionstool.gremium': {
'Meta': {'ordering': "['name']", 'object_name': 'Gremium'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'through': u"orm['fraktionstool.GremiumUser']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'typ': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['fraktionstool.GremiumTyp']"})
},
u'fraktionstool.gremiumtyp': {
'Meta': {'object_name': 'GremiumTyp'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'fraktionstool.gremiumuser': {
'Meta': {'object_name': 'GremiumUser'},
'gremium': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['fraktionstool.Gremium']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'fraktionstool.gremiumvorhaben': {
'Meta': {'object_name': 'GremiumVorhaben'},
'gremium': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['fraktionstool.Gremium']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'vorhaben': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['fraktionstool.Vorhaben']"})
},
u'fraktionstool.hilfe': {
'Meta': {'object_name': 'Hilfe'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
},
u'fraktionstool.nachricht': {
'Meta': {'object_name': 'Nachricht'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'gremium': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['fraktionstool.Gremium']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'text': ('django.db.models.fields.TextField', [], {}),
'vorhaben': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['fraktionstool.Vorhaben']"})
},
u'fraktionstool.vorhaben': {
'Meta': {'ordering': "['nummer', 'name']", 'object_name': 'Vorhaben'},
'abstimmung': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'beobachten': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date': ('django.db.models.fields.DateField', [], {}),
'geschlossen': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'gremien': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['fraktionstool.Gremium']", 'through': u"orm['fraktionstool.GremiumVorhaben']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nummer': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'typ': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['fraktionstool.VorhabenTyp']"})
},
u'fraktionstool.vorhabentyp': {
'Meta': {'object_name': 'VorhabenTyp'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['fraktionstool'] | gpl-3.0 | -5,301,308,383,767,980,000 | 64.29661 | 196 | 0.553738 | false |
mgp/coding-in-the-real-world-code-samples | code-samples/ch-bittorrent-client-case-study/bencode.py | 1 | 8213 | # Copyright (C) 2001-2002 Bram Cohen
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# The Software is provided "AS IS", without warranty of any kind,
# express or implied, including but not limited to the warranties of
# merchantability, fitness for a particular purpose and
# noninfringement. In no event shall the authors or copyright holders
# be liable for any claim, damages or other liability, whether in an
# action of contract, tort or otherwise, arising from, out of or in
# connection with the Software or the use or other dealings in the
# Software.
# Written by Petru Paler
def decode_int(x, f):
f += 1
newf = x.index('e', f)
try:
n = int(x[f:newf])
except (OverflowError, ValueError):
n = long(x[f:newf])
if x[f] == '-':
if x[f + 1] == '0':
raise ValueError
elif x[f] == '0' and newf != f+1:
raise ValueError
return (n, newf+1)
def decode_string(x, f):
colon = x.index(':', f)
try:
n = int(x[f:colon])
except (OverflowError, ValueError):
n = long(x[f:colon])
if x[f] == '0' and colon != f+1:
raise ValueError
colon += 1
return (x[colon:colon+n], colon+n)
def decode_list(x, f):
r, f = [], f+1
while x[f] != 'e':
v, f = decode_func[x[f]](x, f)
r.append(v)
return (r, f + 1)
def decode_dict(x, f):
r, f = {}, f+1
lastkey = None
while x[f] != 'e':
k, f = decode_string(x, f)
if lastkey >= k:
raise ValueError
lastkey = k
r[k], f = decode_func[x[f]](x, f)
return (r, f + 1)
decode_func = {}
decode_func['l'] = decode_list
decode_func['d'] = decode_dict
decode_func['i'] = decode_int
decode_func['0'] = decode_string
decode_func['1'] = decode_string
decode_func['2'] = decode_string
decode_func['3'] = decode_string
decode_func['4'] = decode_string
decode_func['5'] = decode_string
decode_func['6'] = decode_string
decode_func['7'] = decode_string
decode_func['8'] = decode_string
decode_func['9'] = decode_string
def bdecode(x):
# Decode a bencoded string to Python objects.
try:
r, l = decode_func[x[0]](x, 0)
except (IndexError, KeyError):
raise ValueError
if l != len(x):
raise ValueError
return r
def test_bdecode():
try:
bdecode('0:0:')
assert 0
except ValueError:
pass
try:
bdecode('ie')
assert 0
except ValueError:
pass
try:
bdecode('i341foo382e')
assert 0
except ValueError:
pass
assert bdecode('i4e') == 4L
assert bdecode('i0e') == 0L
assert bdecode('i123456789e') == 123456789L
assert bdecode('i-10e') == -10L
try:
bdecode('i-0e')
assert 0
except ValueError:
pass
try:
bdecode('i123')
assert 0
except ValueError:
pass
try:
bdecode('')
assert 0
except ValueError:
pass
try:
bdecode('i6easd')
assert 0
except ValueError:
pass
try:
bdecode('35208734823ljdahflajhdf')
assert 0
except ValueError:
pass
try:
bdecode('2:abfdjslhfld')
assert 0
except ValueError:
pass
assert bdecode('0:') == ''
assert bdecode('3:abc') == 'abc'
assert bdecode('10:1234567890') == '1234567890'
try:
bdecode('02:xy')
assert 0
except ValueError:
pass
try:
bdecode('l')
assert 0
except ValueError:
pass
assert bdecode('le') == []
try:
bdecode('leanfdldjfh')
assert 0
except ValueError:
pass
assert bdecode('l0:0:0:e') == ['', '', '']
try:
bdecode('relwjhrlewjh')
assert 0
except ValueError:
pass
assert bdecode('li1ei2ei3ee') == [1, 2, 3]
assert bdecode('l3:asd2:xye') == ['asd', 'xy']
assert bdecode('ll5:Alice3:Bobeli2ei3eee') == [['Alice', 'Bob'], [2, 3]]
try:
bdecode('d')
assert 0
except ValueError:
pass
try:
bdecode('defoobar')
assert 0
except ValueError:
pass
assert bdecode('de') == {}
assert bdecode('d3:agei25e4:eyes4:bluee') == {'age': 25, 'eyes': 'blue'}
assert bdecode('d8:spam.mp3d6:author5:Alice6:lengthi100000eee') == {'spam.mp3': {'author': 'Alice', 'length': 100000}}
try:
bdecode('d3:fooe')
assert 0
except ValueError:
pass
try:
bdecode('di1e0:e')
assert 0
except ValueError:
pass
try:
bdecode('d1:b0:1:a0:e')
assert 0
except ValueError:
pass
try:
bdecode('d1:a0:1:a0:e')
assert 0
except ValueError:
pass
try:
bdecode('i03e')
assert 0
except ValueError:
pass
try:
bdecode('l01:ae')
assert 0
except ValueError:
pass
try:
bdecode('9999:x')
assert 0
except ValueError:
pass
try:
bdecode('l0:')
assert 0
except ValueError:
pass
try:
bdecode('d0:0:')
assert 0
except ValueError:
pass
try:
bdecode('d0:')
assert 0
except ValueError:
pass
try:
bdecode('00:')
assert 0
except ValueError:
pass
try:
bdecode('l-3:e')
assert 0
except ValueError:
pass
try:
bdecode('i-03e')
assert 0
except ValueError:
pass
bdecode('d0:i3ee')
from types import StringType, IntType, LongType, DictType, ListType, TupleType
class Bencached(object):
__slots__ = ['bencoded']
def __init__(self, s):
self.bencoded = s
def encode_bencached(x,r):
r.append(x.bencoded)
def encode_int(x, r):
r.extend(('i', str(x), 'e'))
def encode_string(x, r):
r.extend((str(len(x)), ':', x))
def encode_list(x, r):
r.append('l')
for i in x:
encode_func[type(i)](i, r)
r.append('e')
def encode_dict(x,r):
r.append('d')
ilist = x.items()
ilist.sort()
for k, v in ilist:
r.extend((str(len(k)), ':', k))
encode_func[type(v)](v, r)
r.append('e')
encode_func = {}
encode_func[type(Bencached(0))] = encode_bencached
encode_func[IntType] = encode_int
encode_func[LongType] = encode_int
encode_func[StringType] = encode_string
encode_func[ListType] = encode_list
encode_func[TupleType] = encode_list
encode_func[DictType] = encode_dict
try:
from types import BooleanType
encode_func[BooleanType] = encode_int
except ImportError:
pass
def bencode(x):
# Encode a Python object as a bencoded string.
r = []
encode_func[type(x)](x, r)
return ''.join(r)
def test_bencode():
assert bencode(4) == 'i4e'
assert bencode(0) == 'i0e'
assert bencode(-10) == 'i-10e'
assert bencode(12345678901234567890L) == 'i12345678901234567890e'
assert bencode('') == '0:'
assert bencode('abc') == '3:abc'
assert bencode('1234567890') == '10:1234567890'
assert bencode([]) == 'le'
assert bencode([1, 2, 3]) == 'li1ei2ei3ee'
assert bencode([['Alice', 'Bob'], [2, 3]]) == 'll5:Alice3:Bobeli2ei3eee'
assert bencode({}) == 'de'
assert bencode({'age': 25, 'eyes': 'blue'}) == 'd3:agei25e4:eyes4:bluee'
assert bencode({'spam.mp3': {'author': 'Alice', 'length': 100000}}) == 'd8:spam.mp3d6:author5:Alice6:lengthi100000eee'
assert bencode(Bencached(bencode(3))) == 'i3e'
try:
bencode({1: 'foo'})
except TypeError:
return
assert 0
try:
import psyco
psyco.bind(bdecode)
psyco.bind(bencode)
except ImportError:
pass
| apache-2.0 | 809,009,702,031,387,800 | 23.887879 | 122 | 0.57689 | false |
jkthompson/nupic | py/regions/SPRegion.py | 1 | 36420 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy
import sys
import os
from nupic.bindings.math import GetNTAReal
from nupic.research.FDRCSpatial2 import FDRCSpatial2
from nupic.bindings.algorithms import SpatialPooler as CPPSpatialPooler
from nupic.research.spatial_pooler import SpatialPooler as PYSpatialPooler
import nupic.research.fdrutilities as fdru
from nupic.support import getArgumentDescriptions
from PyRegion import PyRegion
gDefaultSpatialImp = 'oldpy'
##############################################################################
def getDefaultSPImp():
return gDefaultSpatialImp
##############################################################################
def getSPClass(spatialImp):
""" Return the class corresponding to the given spatialImp string
"""
if spatialImp == 'py':
return PYSpatialPooler
elif spatialImp == 'cpp':
return CPPSpatialPooler
elif spatialImp == 'oldpy':
return FDRCSpatial2
else:
raise RuntimeError("Invalid spatialImp '%s'. Legal values are: 'py', "
"'cpp', 'oldpy'" % (spatialImp))
##############################################################################
def _buildArgs(f, self=None, kwargs={}):
"""
Get the default arguments from the function and assign as instance vars.
Return a list of 3-tuples with (name, description, defaultValue) for each
argument to the function.
Assigns all arguments to the function as instance variables of SPRegion.
If the argument was not provided, uses the default value.
Pops any values from kwargs that go to the function.
"""
# Get the name, description, and default value for each argument
argTuples = getArgumentDescriptions(f)
argTuples = argTuples[1:] # Remove 'self'
# Get the names of the parameters to our own constructor and remove them
# Check for _originial_init first, because if LockAttributesMixin is used,
# __init__'s signature will be just (self, *args, **kw), but
# _original_init is created with the original signature
#init = getattr(self, '_original_init', self.__init__)
init = SPRegion.__init__
ourArgNames = [t[0] for t in getArgumentDescriptions(init)]
# Also remove a few other names that aren't in our constructor but are
# computed automatically (e.g. numberOfCols for the TP)
# TODO: where does numberOfCols come into SPRegion?
ourArgNames += [
'numberOfCols',
]
for argTuple in argTuples[:]:
if argTuple[0] in ourArgNames:
argTuples.remove(argTuple)
# Build the dictionary of arguments
if self:
for argTuple in argTuples:
argName = argTuple[0]
if argName in kwargs:
# Argument was provided
argValue = kwargs.pop(argName)
else:
# Argument was not provided; use the default value if there is one, and
# raise an exception otherwise
if len(argTuple) == 2:
# No default value
raise TypeError("Must provide value for '%s'" % argName)
argValue = argTuple[2]
# Set as an instance variable if 'self' was passed in
setattr(self, argName, argValue)
# Translate some parameters for backward compatibility
if kwargs.has_key('numActivePerInhArea'):
setattr(self, 'numActiveColumnsPerInhArea', kwargs['numActivePerInhArea'])
kwargs.pop('numActivePerInhArea')
if kwargs.has_key('coincInputPoolPct'):
setattr(self, 'potentialPct', kwargs['coincInputPoolPct'])
kwargs.pop('coincInputPoolPct')
return argTuples
def _getAdditionalSpecs(spatialImp, kwargs={}):
"""Build the additional specs in three groups (for the inspector)
Use the type of the default argument to set the Spec type, defaulting
to 'Byte' for None and complex types
Determines the spatial parameters based on the selected implementation.
It defaults to FDRCSpatial.
"""
typeNames = {int: 'UInt32', float: 'Real32', str: 'Byte', bool: 'bool', tuple: 'tuple'}
def getArgType(arg):
t = typeNames.get(type(arg), 'Byte')
count = 0 if t == 'Byte' else 1
if t == 'tuple':
t = typeNames.get(type(arg[0]), 'Byte')
count = len(arg)
if t == 'bool':
t = 'UInt32'
return (t, count)
def getConstraints(arg):
t = typeNames.get(type(arg), 'Byte')
if t == 'Byte':
return 'multiple'
elif t == 'bool':
return 'bool'
else:
return ''
# Get arguments from spatial pooler constructors, figure out types of
# variables and populate spatialSpec. The old FDRCSpatialPooler and
# the new SpatialPooler classes have slightly different constructor argument
# names, so include them all as possible arguments.
spatialSpec = {}
FDRSpatialClass = getSPClass(spatialImp)
sArgTuples = _buildArgs(FDRSpatialClass.__init__)
argTuplesNew = _buildArgs(CPPSpatialPooler.__init__)
sArgTuples.extend(argTuplesNew)
for argTuple in sArgTuples:
d = dict(
description=argTuple[1],
accessMode='ReadWrite',
dataType=getArgType(argTuple[2])[0],
count=getArgType(argTuple[2])[1],
constraints=getConstraints(argTuple[2]))
spatialSpec[argTuple[0]] = d
# Add special parameters that weren't handled automatically
# Spatial parameters only!
spatialSpec.update(dict(
columnCount=dict(
description='Total number of columns (coincidences).',
accessMode='Read',
dataType='UInt32',
count=1,
constraints=''),
inputWidth=dict(
description='Size of inputs to the SP.',
accessMode='Read',
dataType='UInt32',
count=1,
constraints=''),
spInputNonZeros=dict(
description='The indices of the non-zero inputs to the spatial pooler',
accessMode='Read',
dataType='UInt32',
count=0,
constraints=''),
spOutputNonZeros=dict(
description='The indices of the non-zero outputs from the spatial pooler',
accessMode='Read',
dataType='UInt32',
count=0,
constraints=''),
spOverlapDistribution=dict(
description="""The overlaps between the active output coincidences
and the input. The overlap amounts for each coincidence are sorted
from highest to lowest. """,
accessMode='Read',
dataType='Real32',
count=0,
constraints=''),
sparseCoincidenceMatrix=dict(
description='The coincidences, as a SparseMatrix',
accessMode='Read',
dataType='Byte',
count=0,
constraints=''),
denseOutput=dict(
description='Score for each coincidence.',
accessMode='Read',
dataType='Real32',
count=0,
constraints=''),
spLearningStatsStr=dict(
description="""String representation of dictionary containing a number
of statistics related to learning.""",
accessMode='Read',
dataType='Byte',
count=0,
constraints='handle'),
spatialImp=dict(
description="""Which spatial pooler implementation to use. Set to either
'py', or 'cpp'. The 'cpp' implementation is optimized for
speed in C++.""",
accessMode='ReadWrite',
dataType='Byte',
count=0,
constraints='enum: py, cpp, oldpy'),
))
# The last group is for parameters that aren't specific to spatial pooler
otherSpec = dict(
learningMode=dict(
description='1 if the node is learning (default 1).',
accessMode='ReadWrite',
dataType='UInt32',
count=1,
constraints='bool'),
inferenceMode=dict(
description='1 if the node is inferring (default 0).',
accessMode='ReadWrite',
dataType='UInt32',
count=1,
constraints='bool'),
anomalyMode=dict(
description='1 if an anomaly score is being computed',
accessMode='ReadWrite',
dataType='UInt32',
count=1,
constraints='bool'),
topDownMode=dict(
description='1 if the node should do top down compute on the next call '
'to compute into topDownOut (default 0).',
accessMode='ReadWrite',
dataType='UInt32',
count=1,
constraints='bool'),
activeOutputCount=dict(
description='Number of active elements in bottomUpOut output.',
accessMode='Read',
dataType='UInt32',
count=1,
constraints=''),
logPathInput=dict(
description='Optional name of input log file. If set, every input vector'
' will be logged to this file.',
accessMode='ReadWrite',
dataType='Byte',
count=0,
constraints=''),
logPathOutput=dict(
description='Optional name of output log file. If set, every output vector'
' will be logged to this file.',
accessMode='ReadWrite',
dataType='Byte',
count=0,
constraints=''),
logPathOutputDense=dict(
description='Optional name of output log file. If set, every output vector'
' will be logged to this file as a dense vector.',
accessMode='ReadWrite',
dataType='Byte',
count=0,
constraints=''),
)
return spatialSpec, otherSpec
class SPRegion(PyRegion):
"""
SPRegion is designed to implement the spatial pooler compute for a given
HTM level.
Uses the FDRCSpatial2 class to do most of the work. This node has just one
FDRCSpatial instance for the enitire level and does *not* support the concept
of "baby nodes" within it.
Automatic parameter handling:
Parameter names, default values, and descriptions are retrieved automatically
from FDRCSpatial2. Thus, there are only a few hardcoded arguments in __init__,
and the rest are passed to the appropriate underlying class. The NodeSpec is
mostly built automatically from these parameters, too.
If you add a parameter to FDRCSpatial2, it will be exposed through SPRegion
automatically as if it were in SPRegion.__init__, with the right default
value. Add an entry in the __init__ docstring for it too, and that will be
brought into the NodeSpec. SPRegion will maintain the parameter as its own
instance variable and also pass it to FDRCSpatial2. If the parameter is
changed, SPRegion will propagate the change.
If you want to do something different with the parameter, add it as an
argument into SPRegion.__init__, which will override all the default handling.
"""
def __init__(self,
columnCount, # Number of columns in the SP, a required parameter
inputWidth, # Size of inputs to the SP, a required parameter
spatialImp=gDefaultSpatialImp, #'py', 'cpp', or 'oldpy'
**kwargs):
if columnCount <= 0 or inputWidth <=0:
raise TypeError("Parameters columnCount and inputWidth must be > 0")
# Pull out the spatial arguments automatically
# These calls whittle down kwargs and create instance variables of SPRegion
self._FDRCSpatialClass = getSPClass(spatialImp)
sArgTuples = _buildArgs(self._FDRCSpatialClass.__init__, self, kwargs)
# Make a list of automatic spatial arg names for later use
self._spatialArgNames = [t[0] for t in sArgTuples]
# Learning and SP parameters.
# By default we start out in stage learn with inference disabled
self.learningMode = True
self.inferenceMode = False
self.anomalyMode = False
self.topDownMode = False
self.columnCount = columnCount
self.inputWidth = inputWidth
PyRegion.__init__(self, **kwargs)
# Initialize all non-persistent base members, as well as give
# derived class an opportunity to do the same.
self._loaded = False
self._initializeEphemeralMembers()
# Debugging support, used in _conditionalBreak
self.breakPdb = False
self.breakKomodo = False
# Defaults for all other parameters
self.logPathInput = ''
self.logPathOutput = ''
self.logPathOutputDense = ''
self._fpLogSPInput = None
self._fpLogSP = None
self._fpLogSPDense = None
#
# Variables set up in initInNetwork()
#
# FDRCSpatial instance
self._sfdr = None
# Spatial pooler's bottom-up output value: hang on to this output for
# top-down inference and for debugging
self._spatialPoolerOutput = None
# Spatial pooler's bottom-up input: hang on to this for supporting the
# spInputNonZeros parameter
self._spatialPoolerInput = None
#############################################################################
#
# Initialization code
#
#############################################################################
def _initializeEphemeralMembers(self):
"""
Initialize all ephemeral data members, and give the derived class the
opportunity to do the same by invoking the virtual member _initEphemerals(),
which is intended to be overridden.
NOTE: this is used by both __init__ and __setstate__ code paths.
"""
for attrName in self._getEphemeralMembersBase():
if attrName != "_loaded":
if hasattr(self, attrName):
if self._loaded:
# print self.__class__.__name__, "contains base class member '%s' " \
# "after loading." % attrName
# TODO: Re-enable warning or turn into error in a future release.
pass
else:
print self.__class__.__name__, "contains base class member '%s'" % \
attrName
if not self._loaded:
for attrName in self._getEphemeralMembersBase():
if attrName != "_loaded":
# if hasattr(self, attrName):
# import pdb; pdb.set_trace()
assert not hasattr(self, attrName)
else:
assert hasattr(self, attrName)
# Profiling information
self._profileObj = None
self._iterations = 0
# Let derived class initialize ephemerals
self._initEphemerals()
self._checkEphemeralMembers()
#############################################################################
def initialize(self, dims, splitterMaps):
""""""
# Zero out the spatial output in case it is requested
self._spatialPoolerOutput = numpy.zeros(self.columnCount,
dtype=GetNTAReal())
# Zero out the rfInput in case it is requested
self._spatialPoolerInput = numpy.zeros((1,self.inputWidth), dtype=GetNTAReal())
# Allocate the spatial pooler
self._allocateSpatialFDR(None)
#############################################################################
def _allocateSpatialFDR(self, rfInput):
"""Allocate the spatial pooler instance."""
if self._sfdr:
return
# Retrieve the necessary extra arguments that were handled automatically
autoArgs = dict((name, getattr(self, name))
for name in self._spatialArgNames)
# Instantiate the spatial pooler class.
if ( (self._FDRCSpatialClass == CPPSpatialPooler) or
(self._FDRCSpatialClass == PYSpatialPooler) ):
autoArgs['columnDimensions'] = [self.columnCount]
autoArgs['inputDimensions'] = [self.inputWidth]
autoArgs['potentialRadius'] = self.inputWidth
self._sfdr = self._FDRCSpatialClass(
**autoArgs
)
else:
# Backward compatibility
autoArgs.pop('coincidencesShape')
autoArgs.pop('inputShape')
autoArgs.pop('inputBorder')
autoArgs.pop('coincInputRadius')
autoArgs.pop('cloneMap')
autoArgs.pop('numCloneMasters')
coincidencesShape = (self.columnCount, 1)
inputShape = (1, self.inputWidth)
inputBorder = inputShape[1]/2
if inputBorder*2 >= inputShape[1]:
inputBorder -= 1
coincInputRadius = inputShape[1]/2
cloneMap, numCloneMasters = fdru.makeCloneMap(
columnsShape=coincidencesShape,
outputCloningWidth=coincidencesShape[1],
outputCloningHeight=coincidencesShape[0]
)
self._sfdr = self._FDRCSpatialClass(
# These parameters are standard defaults for SPRegion
# They can be overridden by explicit calls to
# getParameter
cloneMap=cloneMap,
numCloneMasters=numCloneMasters,
coincidencesShape=coincidencesShape,
inputShape=inputShape,
inputBorder=inputBorder,
coincInputRadius = coincInputRadius,
**autoArgs)
#############################################################################
#
# Core compute methods: learning, inference, and prediction
#
#############################################################################
#############################################################################
def compute(self, inputs, outputs):
"""
Run one iteration of SPRegion's compute, profiling it if requested.
The guts of the compute are contained in the _compute() call so that
we can profile it if requested.
"""
# Uncomment this to find out who is generating divide by 0, or other numpy warnings
# numpy.seterr(divide='raise', invalid='raise', over='raise')
# Modify this line to turn on profiling for a given node. The results file
# ('hotshot.stats') will be sensed and printed out by the vision framework's
# RunInference.py script at the end of inference.
# Also uncomment the hotshot import at the top of this file.
if False and self.learningMode \
and self._iterations > 0 and self._iterations <= 10:
import hotshot
if self._iterations == 10:
print "\n Collecting and sorting internal node profiling stats generated by hotshot..."
stats = hotshot.stats.load("hotshot.stats")
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats()
if self._profileObj is None:
print "\n Preparing to capture profile using hotshot..."
if os.path.exists('hotshot.stats'):
# There is an old hotshot stats profile left over, remove it.
os.remove('hotshot.stats')
self._profileObj = hotshot.Profile("hotshot.stats", 1, 1)
# filename, lineevents, linetimings
self._profileObj.runcall(self._compute, *[inputs, outputs])
else:
self._compute(inputs, outputs)
def _compute(self, inputs, outputs):
"""
Run one iteration of SPRegion's compute
"""
#if self.topDownMode and (not 'topDownIn' in inputs):
# raise RuntimeError("The input topDownIn must be linked in if "
# "topDownMode is True")
if self._sfdr is None:
raise RuntimeError("Spatial pooler has not been initialized")
if not self.topDownMode:
#
# BOTTOM-UP compute
#
self._iterations += 1
# Get our inputs into numpy arrays
buInputVector = inputs['bottomUpIn']
resetSignal = False
if 'resetIn' in inputs:
assert len(inputs['resetIn']) == 1
resetSignal = inputs['resetIn'][0] != 0
# Perform inference and/or learning
rfOutput = self._doBottomUpCompute(
rfInput = buInputVector.reshape((1,buInputVector.size)),
resetSignal = resetSignal
)
outputs['bottomUpOut'][:] = rfOutput.flat
else:
#
# TOP-DOWN inference
#
topDownIn = inputs.get('topDownIn',None)
spatialTopDownOut, temporalTopDownOut = self._doTopDownInfer(topDownIn)
outputs['spatialTopDownOut'][:] = spatialTopDownOut
if temporalTopDownOut is not None:
outputs['temporalTopDownOut'][:] = temporalTopDownOut
# OBSOLETE
outputs['anomalyScore'][:] = 0
# Write the bottom up out to our node outputs only if we are doing inference
#print "SPRegion input: ", buInputVector.nonzero()[0]
#print "SPRegion output: ", rfOutput.nonzero()[0]
#############################################################################
def _doBottomUpCompute(self, rfInput, resetSignal):
"""
Do one iteration of inference and/or learning and return the result
Parameters:
--------------------------------------------
rfInput: Input vector. Shape is: (1, inputVectorLen).
resetSignal: True if reset is asserted
"""
# Conditional compute break
self._conditionalBreak()
# Save the rfInput for the spInputNonZeros parameter
self._spatialPoolerInput = rfInput.reshape(-1)
assert(rfInput.shape[0] == 1)
# Run inference using the spatial pooler. We learn on the coincidences only
# if we are in learning mode and trainingStep is set appropriately.
# Run SFDR bottom-up compute and cache output in self._spatialPoolerOutput
if (self._FDRCSpatialClass == FDRCSpatial2):
# Backwards compatibility
self._spatialPoolerOutput = self._sfdr.compute(rfInput[0],
learn=self.learningMode,
infer=self.inferenceMode,
computeAnomaly=self.anomalyMode)
else:
inputVector = numpy.array(rfInput[0]).astype('uint32')
outputVector = numpy.zeros(self._sfdr.getNumColumns()).astype('uint32')
self._sfdr.compute(inputVector, self.learningMode, outputVector)
self._spatialPoolerOutput[:] = outputVector[:]
# Direct logging of SP outputs if requested
if self._fpLogSP:
output = self._spatialPoolerOutput.reshape(-1)
outputNZ = output.nonzero()[0]
outStr = " ".join(["%d" % int(token) for token in outputNZ])
print >>self._fpLogSP, output.size, outStr
# Direct logging of SP inputs
if self._fpLogSPInput:
output = rfInput.reshape(-1)
outputNZ = output.nonzero()[0]
outStr = " ".join(["%d" % int(token) for token in outputNZ])
print >>self._fpLogSPInput, output.size, outStr
return self._spatialPoolerOutput
#############################################################################
def _doTopDownInfer(self, topDownInput = None):
"""
Do one iteration of top-down inference.
Parameters:
--------------------------------------------
tdInput: Top-down input
retval: (spatialTopDownOut, temporalTopDownOut)
spatialTopDownOut is the top down output computed only from the SP,
using it's current bottom-up output.
temporalTopDownOut is the top down output computed from the topDown in
of the level above us.
"""
return None, None
#############################################################################
#
# Region API support methods: getSpec, getParameter, and setParameter
#
#############################################################################
#############################################################################
@classmethod
def getBaseSpec(cls):
"""Return the base Spec for SPRegion.
Doesn't include the spatial, temporal and other parameters
"""
spec = dict(
description=SPRegion.__doc__,
singleNodeOnly=True,
inputs=dict(
bottomUpIn=dict(
description="""The input vector.""",
dataType='Real32',
count=0,
required=True,
regionLevel=False,
isDefaultInput=True,
requireSplitterMap=False),
resetIn=dict(
description="""A boolean flag that indicates whether
or not the input vector received in this compute cycle
represents the start of a new temporal sequence.""",
dataType='Real32',
count=1,
required=False,
regionLevel=True,
isDefaultInput=False,
requireSplitterMap=False),
topDownIn=dict(
description="""The top-down input signal, generated from
feedback from upper levels""",
dataType='Real32',
count=0,
required = False,
regionLevel=True,
isDefaultInput=False,
requireSplitterMap=False),
),
outputs=dict(
bottomUpOut=dict(
description="""The output signal generated from the bottom-up inputs
from lower levels.""",
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=True),
topDownOut=dict(
description="""The top-down output signal, generated from
feedback from upper levels""",
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=False),
spatialTopDownOut = dict(
description="""The top-down output, generated only from the current
SP output. This can be used to evaluate how well the
SP is representing the inputs independent of the TP.""",
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=False),
temporalTopDownOut = dict(
description="""The top-down output, generated only from the current
TP output feedback down through the SP.""",
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=False),
anomalyScore = dict(
description="""The score for how 'anomalous' (i.e. rare) this spatial
input pattern is. Higher values are increasingly rare""",
dataType='Real32',
count=1,
regionLevel=True,
isDefaultOutput=False),
),
parameters=dict(
breakPdb=dict(
description='Set to 1 to stop in the pdb debugger on the next compute',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=0,
accessMode='ReadWrite'),
breakKomodo=dict(
description='Set to 1 to stop in the Komodo debugger on the next compute',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=0,
accessMode='ReadWrite'),
),
)
return spec
@classmethod
def getSpec(cls):
"""Return the Spec for SPRegion.
The parameters collection is constructed based on the parameters specified
by the variosu components (spatialSpec, temporalSpec and otherSpec)
"""
spec = cls.getBaseSpec()
s, o = _getAdditionalSpecs(spatialImp=gDefaultSpatialImp)
spec['parameters'].update(s)
spec['parameters'].update(o)
return spec
#############################################################################
def getParameter(self, parameterName, index=-1):
"""
Get the value of a NodeSpec parameter. Most parameters are handled
automatically by PyRegion's parameter get mechanism. The ones that need
special treatment are explicitly handled here.
"""
if parameterName == 'activeOutputCount':
return self.columnCount
elif parameterName == 'spatialPoolerInput':
return list(self._spatialPoolerInput.reshape(-1))
elif parameterName == 'spatialPoolerOutput':
return list(self._spatialPoolerOutput)
elif parameterName == 'spNumActiveOutputs':
return len(self._spatialPoolerOutput.nonzero()[0])
elif parameterName == 'spOutputNonZeros':
return [len(self._spatialPoolerOutput)] + \
list(self._spatialPoolerOutput.nonzero()[0])
elif parameterName == 'spInputNonZeros':
import pdb; pdb.set_trace()
return [len(self._spatialPoolerInput)] + \
list(self._spatialPoolerInput.nonzero()[0])
elif parameterName == 'spLearningStatsStr':
try:
return str(self._sfdr.getLearningStats())
except:
return str(dict())
else:
return PyRegion.getParameter(self, parameterName, index)
#############################################################################
def setParameter(self, parameterName, index, parameterValue):
"""
Set the value of a Spec parameter. Most parameters are handled
automatically by PyRegion's parameter set mechanism. The ones that need
special treatment are explicitly handled here.
"""
if parameterName in self._spatialArgNames:
setattr(self._sfdr, parameterName, parameterValue)
elif parameterName == "logPathInput":
self.logPathInput = parameterValue
# Close any existing log file
if self._fpLogSPInput:
self._fpLogSPInput.close()
self._fpLogSPInput = None
# Open a new log file
if parameterValue:
if self.disableSpatial:
raise RuntimeError ("Spatial pooler is disabled for this level, "
"can not turn on logging of SP inputs.")
self._fpLogSPInput = open(self.logPathSPInput, 'w')
elif parameterName == "logPathOutput":
self.logPathOutput = parameterValue
# Close any existing log file
if self._fpLogSP:
self._fpLogSP.close()
self._fpLogSP = None
# Open a new log file
if parameterValue:
if self.disableSpatial:
raise RuntimeError ("Spatial pooler is disabled for this level, "
"can not turn on logging of SP outputs.")
self._fpLogSP = open(self.logPathSP, 'w')
elif parameterName == "logPathOutputDense":
self.logPathOutputDense = parameterValue
# Close any existing log file
if self._fpLogSPDense:
self._fpLogSPDense.close()
self._fpLogSPDense = None
# Open a new log file
if parameterValue:
self._fpLogSPDense = open(self.logPathSPDense, 'w')
elif hasattr(self, parameterName):
setattr(self, parameterName, parameterValue)
else:
raise Exception('Unknown parameter: ' + parameterName)
#############################################################################
#
# Methods to support serialization
#
#############################################################################
#############################################################################
def __getstate__(self):
"""
Return serializable state. This function will return a version of the
__dict__ with all "ephemeral" members stripped out. "Ephemeral" members
are defined as those that do not need to be (nor should be) stored
in any kind of persistent file (e.g., NuPIC network XML file.)
"""
state = self.__dict__.copy()
# We only want to serialize a single spatial/temporal FDR if they're cloned
for ephemeralMemberName in self._getEphemeralMembersAll():
state.pop(ephemeralMemberName, None)
return state
#############################################################################
def __setstate__(self, state):
"""
Set the state of ourself from a serialized state.
"""
self.__dict__.update(state)
self._loaded = True
# Backwards compatibility
if not hasattr(self, "_FDRCSpatialClass"):
self._FDRCSpatialClass = self._sfdr.__class__
# Initialize all non-persistent base members, as well as give
# derived class an opportunity to do the same.
self._initializeEphemeralMembers()
self._allocateSpatialFDR(None)
#############################################################################
def _initEphemerals(self):
"""
Initialize all ephemerals used by derived classes.
"""
if hasattr(self, '_sfdr') and self._sfdr:
self._spatialPoolerOutput = numpy.zeros(self.columnCount,
dtype=GetNTAReal())
else:
self._spatialPoolerOutput = None # Will be filled in initInNetwork
# Direct logging support (faster than node watch)
self._fpLogSPInput = None
self._fpLogSP = None
self._fpLogSPDense = None
self.logPathInput = ""
self.logPathOutput = ""
self.logPathOutputDense = ""
#############################################################################
def _getEphemeralMembers(self):
"""
Callback that returns a list of all "ephemeral" members (i.e., data members
that should not and/or cannot be pickled.)
"""
return ['_spatialPoolerOutput', '_fpLogSP', '_fpLogSPDense',
'logPathInput', 'logPathOutput', 'logPathOutputDense'
]
#############################################################################
def _getEphemeralMembersBase(self):
"""
Returns list of all ephemeral members.
"""
return [
'_loaded',
'_profileObj',
'_iterations',
]
def _getEphemeralMembersAll(self):
"""
Returns a concatenated list of both the standard base class
ephemeral members, as well as any additional ephemeral members
(e.g., file handles, etc.).
"""
return self._getEphemeralMembersBase() + self._getEphemeralMembers()
#############################################################################
def _checkEphemeralMembers(self):
for attrName in self._getEphemeralMembersBase():
if not hasattr(self, attrName):
print "Missing base class member:", attrName
for attrName in self._getEphemeralMembers():
if not hasattr(self, attrName):
print "Missing derived class member:", attrName
for attrName in self._getEphemeralMembersBase():
assert hasattr(self, attrName)
for attrName in self._getEphemeralMembers():
assert hasattr(self, attrName), "Node missing attr '%s'." % attrName
#############################################################################
#
# Misc. code
#
#############################################################################
#########################################################################################
def _conditionalBreak(self):
if self.breakKomodo:
import dbgp.client; dbgp.client.brk()
if self.breakPdb:
import pdb; pdb.set_trace()
#############################################################################
#
# NuPIC 2 Support
# These methods are required by NuPIC 2
#
#############################################################################
def getOutputElementCount(self, name):
if name == 'bottomUpOut':
return self.columnCount
elif name == 'spatialTopDownOut' or name == 'temporalTopDownOut' or \
name == 'topDownOut':
return self.inputWidth
else:
raise Exception("Invalid output name specified")
# TODO: as a temporary hack, getParameterArrayCount checks to see if there's a
# variable, private or not, with that name. If so, it attempts to return the
# length of that variable.
def getParameterArrayCount(self, name, index):
p = self.getParameter(name)
if (not hasattr(p, '__len__')):
raise Exception("Attempt to access parameter '%s' as an array but it is not an array" % name)
return len(p)
# TODO: as a temporary hack, getParameterArray checks to see if there's a
# variable, private or not, with that name. If so, it returns the value of the
# variable.
def getParameterArray(self, name, index, a):
p = self.getParameter(name)
if (not hasattr(p, '__len__')):
raise Exception("Attempt to access parameter '%s' as an array but it is not an array" % name)
if len(p) > 0:
a[:] = p[:]
| gpl-3.0 | 6,989,874,241,734,602,000 | 33.488636 | 99 | 0.596513 | false |
swift-nav/peregrine | peregrine/iqgen/bits/tcxo_factory.py | 2 | 2021 | # Copyright (C) 2016 Swift Navigation Inc.
# Contact: Valeri Atamaniouk <[email protected]>
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
"""
The :mod:`peregrine.iqgen.bits.amplitude_factory` module contains classes and
functions related to object factory for amplitude objects.
"""
from peregrine.iqgen.bits.tcxo_poly import TCXOPoly as PolyTcxo
from peregrine.iqgen.bits.tcxo_sine import TCXOSine as SineTcxo
class ObjectFactory(object):
'''
Object factory for amplitude objects.
'''
def __init__(self):
super(ObjectFactory, self).__init__()
def toMapForm(self, obj):
t = type(obj)
if t is PolyTcxo:
return self.__PolyTcxo_ToMap(obj)
elif t is SineTcxo:
return self.__SineTcxo_ToMap(obj)
else:
raise ValueError("Invalid object type")
def fromMapForm(self, data):
t = data['type']
if t == 'PolyTcxo':
return self.__MapTo_PolyTcxo(data)
elif t == 'SineTcxo':
return self.__MapTo_SineTcxo(data)
else:
raise ValueError("Invalid object type")
def __PolyTcxo_ToMap(self, obj):
data = {'type': 'PolyTcxo', 'coeffs': obj.coeffs}
return data
def __SineTcxo_ToMap(self, obj):
data = {'type': 'SineTcxo',
'initial_ppm': obj.initial_ppm,
'amplitude_ppm': obj.amplitude_ppm,
'period_s': obj.period_s}
return data
def __MapTo_PolyTcxo(self, data):
coeffs = data['coeffs']
return PolyTcxo(coeffs)
def __MapTo_SineTcxo(self, data):
initial_ppm = data['initial_ppm']
amplitude_ppm = data['amplitude_ppm']
period_s = data['period_s']
return SineTcxo(initial_ppm, amplitude_ppm, period_s)
factoryObject = ObjectFactory()
| gpl-3.0 | -1,870,101,731,011,039,500 | 28.720588 | 78 | 0.680851 | false |
flv0/qutebrowser | qutebrowser/browser/hints.py | 1 | 46816 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""A HintManager to draw hints over links."""
import collections
import functools
import math
import re
from string import ascii_lowercase
from PyQt5.QtCore import (pyqtSignal, pyqtSlot, QObject, QEvent, Qt, QUrl,
QTimer)
from PyQt5.QtGui import QMouseEvent
from PyQt5.QtWebKit import QWebElement
from PyQt5.QtWebKitWidgets import QWebPage
from qutebrowser.config import config
from qutebrowser.keyinput import modeman, modeparsers
from qutebrowser.browser import webelem
from qutebrowser.commands import userscripts, cmdexc, cmdutils, runners
from qutebrowser.utils import usertypes, log, qtutils, message, objreg, utils
ElemTuple = collections.namedtuple('ElemTuple', ['elem', 'label'])
Target = usertypes.enum('Target', ['normal', 'current', 'tab', 'tab_fg',
'tab_bg', 'window', 'yank', 'yank_primary',
'run', 'fill', 'hover', 'download',
'userscript', 'spawn'])
class WordHintingError(Exception):
"""Exception raised on errors during word hinting."""
@pyqtSlot(usertypes.KeyMode)
def on_mode_entered(mode, win_id):
"""Stop hinting when insert mode was entered."""
if mode == usertypes.KeyMode.insert:
modeman.maybe_leave(win_id, usertypes.KeyMode.hint, 'insert mode')
class HintContext:
"""Context namespace used for hinting.
Attributes:
frames: The QWebFrames to use.
destroyed_frames: id()'s of QWebFrames which have been destroyed.
(Workaround for https://github.com/The-Compiler/qutebrowser/issues/152)
all_elems: A list of all (elem, label) namedtuples ever created.
elems: A mapping from key strings to (elem, label) namedtuples.
May contain less elements than `all_elems` due to filtering.
baseurl: The URL of the current page.
target: What to do with the opened links.
normal/current/tab/tab_fg/tab_bg/window: Get passed to
BrowserTab.
yank/yank_primary: Yank to clipboard/primary selection.
run: Run a command.
fill: Fill commandline with link.
download: Download the link.
userscript: Call a custom userscript.
spawn: Spawn a simple command.
to_follow: The link to follow when enter is pressed.
args: Custom arguments for userscript/spawn
rapid: Whether to do rapid hinting.
mainframe: The main QWebFrame where we started hinting in.
group: The group of web elements to hint.
"""
def __init__(self):
self.all_elems = []
self.elems = {}
self.target = None
self.baseurl = None
self.to_follow = None
self.rapid = False
self.frames = []
self.destroyed_frames = []
self.args = []
self.mainframe = None
self.group = None
def get_args(self, urlstr):
"""Get the arguments, with {hint-url} replaced by the given URL."""
args = []
for arg in self.args:
arg = arg.replace('{hint-url}', urlstr)
args.append(arg)
return args
class HintManager(QObject):
"""Manage drawing hints over links or other elements.
Class attributes:
HINT_TEXTS: Text displayed for different hinting modes.
Attributes:
_context: The HintContext for the current invocation.
_win_id: The window ID this HintManager is associated with.
_tab_id: The tab ID this HintManager is associated with.
_filterstr: Used to save the filter string for restoring in rapid mode.
Signals:
mouse_event: Mouse event to be posted in the web view.
arg: A QMouseEvent
start_hinting: Emitted when hinting starts, before a link is clicked.
arg: The ClickTarget to use.
stop_hinting: Emitted after a link was clicked.
"""
HINT_TEXTS = {
Target.normal: "Follow hint",
Target.current: "Follow hint in current tab",
Target.tab: "Follow hint in new tab",
Target.tab_fg: "Follow hint in foreground tab",
Target.tab_bg: "Follow hint in background tab",
Target.window: "Follow hint in new window",
Target.yank: "Yank hint to clipboard",
Target.yank_primary: "Yank hint to primary selection",
Target.run: "Run a command on a hint",
Target.fill: "Set hint in commandline",
Target.hover: "Hover over a hint",
Target.download: "Download hint",
Target.userscript: "Call userscript via hint",
Target.spawn: "Spawn command via hint",
}
mouse_event = pyqtSignal('QMouseEvent')
start_hinting = pyqtSignal(usertypes.ClickTarget)
stop_hinting = pyqtSignal()
def __init__(self, win_id, tab_id, parent=None):
"""Constructor."""
super().__init__(parent)
self._win_id = win_id
self._tab_id = tab_id
self._context = None
self._filterstr = None
self._word_hinter = WordHinter()
mode_manager = objreg.get('mode-manager', scope='window',
window=win_id)
mode_manager.left.connect(self.on_mode_left)
def _get_text(self):
"""Get a hint text based on the current context."""
text = self.HINT_TEXTS[self._context.target]
if self._context.rapid:
text += ' (rapid mode)'
text += '...'
return text
def _cleanup(self):
"""Clean up after hinting."""
for elem in self._context.all_elems:
try:
elem.label.removeFromDocument()
except webelem.IsNullError:
pass
for f in self._context.frames:
log.hints.debug("Disconnecting frame {}".format(f))
if id(f) in self._context.destroyed_frames:
# WORKAROUND for
# https://github.com/The-Compiler/qutebrowser/issues/152
log.hints.debug("Frame has been destroyed, ignoring.")
continue
try:
f.contentsSizeChanged.disconnect(self.on_contents_size_changed)
except TypeError:
# It seems we can get this here:
# TypeError: disconnect() failed between
# 'contentsSizeChanged' and 'on_contents_size_changed'
# See # https://github.com/The-Compiler/qutebrowser/issues/263
pass
log.hints.debug("Disconnected.")
text = self._get_text()
message_bridge = objreg.get('message-bridge', scope='window',
window=self._win_id)
message_bridge.maybe_reset_text(text)
self._context = None
def _hint_strings(self, elems):
"""Calculate the hint strings for elems.
Inspired by Vimium.
Args:
elems: The elements to get hint strings for.
Return:
A list of hint strings, in the same order as the elements.
"""
hint_mode = config.get('hints', 'mode')
if hint_mode == 'word':
try:
return self._word_hinter.hint(elems)
except WordHintingError as e:
message.error(self._win_id, str(e), immediately=True)
# falls back on letter hints
if hint_mode == 'number':
chars = '0123456789'
else:
chars = config.get('hints', 'chars')
min_chars = config.get('hints', 'min-chars')
if config.get('hints', 'scatter') and hint_mode != 'number':
return self._hint_scattered(min_chars, chars, elems)
else:
return self._hint_linear(min_chars, chars, elems)
def _hint_scattered(self, min_chars, chars, elems):
"""Produce scattered hint labels with variable length (like Vimium).
Args:
min_chars: The minimum length of labels.
chars: The alphabet to use for labels.
elems: The elements to generate labels for.
"""
# Determine how many digits the link hints will require in the worst
# case. Usually we do not need all of these digits for every link
# single hint, so we can show shorter hints for a few of the links.
needed = max(min_chars, math.ceil(math.log(len(elems), len(chars))))
# Short hints are the number of hints we can possibly show which are
# (needed - 1) digits in length.
if needed > min_chars:
short_count = math.floor((len(chars) ** needed - len(elems)) /
len(chars))
else:
short_count = 0
long_count = len(elems) - short_count
strings = []
if needed > 1:
for i in range(short_count):
strings.append(self._number_to_hint_str(i, chars, needed - 1))
start = short_count * len(chars)
for i in range(start, start + long_count):
strings.append(self._number_to_hint_str(i, chars, needed))
return self._shuffle_hints(strings, len(chars))
def _hint_linear(self, min_chars, chars, elems):
"""Produce linear hint labels with constant length (like dwb).
Args:
min_chars: The minimum length of labels.
chars: The alphabet to use for labels.
elems: The elements to generate labels for.
"""
strings = []
needed = max(min_chars, math.ceil(math.log(len(elems), len(chars))))
for i in range(len(elems)):
strings.append(self._number_to_hint_str(i, chars, needed))
return strings
def _shuffle_hints(self, hints, length):
"""Shuffle the given set of hints so that they're scattered.
Hints starting with the same character will be spread evenly throughout
the array.
Inspired by Vimium.
Args:
hints: A list of hint strings.
length: Length of the available charset.
Return:
A list of shuffled hint strings.
"""
buckets = [[] for i in range(length)]
for i, hint in enumerate(hints):
buckets[i % len(buckets)].append(hint)
result = []
for bucket in buckets:
result += bucket
return result
def _number_to_hint_str(self, number, chars, digits=0):
"""Convert a number like "8" into a hint string like "JK".
This is used to sequentially generate all of the hint text.
The hint string will be "padded with zeroes" to ensure its length is >=
digits.
Inspired by Vimium.
Args:
number: The hint number.
chars: The charset to use.
digits: The minimum output length.
Return:
A hint string.
"""
base = len(chars)
hintstr = []
remainder = 0
while True:
remainder = number % base
hintstr.insert(0, chars[remainder])
number -= remainder
number //= base
if number <= 0:
break
# Pad the hint string we're returning so that it matches digits.
for _ in range(0, digits - len(hintstr)):
hintstr.insert(0, chars[0])
return ''.join(hintstr)
def _is_hidden(self, elem):
"""Check if the element is hidden via display=none."""
display = elem.styleProperty('display', QWebElement.InlineStyle)
return display == 'none'
def _show_elem(self, elem):
"""Show a given element."""
elem.setStyleProperty('display', 'inline !important')
def _hide_elem(self, elem):
"""Hide a given element."""
elem.setStyleProperty('display', 'none !important')
def _set_style_properties(self, elem, label):
"""Set the hint CSS on the element given.
Args:
elem: The QWebElement to set the style attributes for.
label: The label QWebElement.
"""
attrs = [
('display', 'inline !important'),
('z-index', '{} !important'.format(int(2 ** 32 / 2 - 1))),
('pointer-events', 'none !important'),
('position', 'fixed !important'),
('color', config.get('colors', 'hints.fg') + ' !important'),
('background', config.get('colors', 'hints.bg') + ' !important'),
('font', config.get('fonts', 'hints') + ' !important'),
('border', config.get('hints', 'border') + ' !important'),
('opacity', str(config.get('hints', 'opacity')) + ' !important'),
]
# Make text uppercase if set in config
if (config.get('hints', 'uppercase') and
config.get('hints', 'mode') == 'letter'):
attrs.append(('text-transform', 'uppercase !important'))
else:
attrs.append(('text-transform', 'none !important'))
for k, v in attrs:
label.setStyleProperty(k, v)
self._set_style_position(elem, label)
def _set_style_position(self, elem, label):
"""Set the CSS position of the label element.
Args:
elem: The QWebElement to set the style attributes for.
label: The label QWebElement.
"""
rect = elem.rect_on_view(adjust_zoom=False)
left = rect.x()
top = rect.y()
log.hints.vdebug("Drawing label '{!r}' at {}/{} for element '{!r}'"
.format(label, left, top, elem))
label.setStyleProperty('left', '{}px !important'.format(left))
label.setStyleProperty('top', '{}px !important'.format(top))
def _draw_label(self, elem, string):
"""Draw a hint label over an element.
Args:
elem: The QWebElement to use.
string: The hint string to print.
Return:
The newly created label element
"""
doc = elem.webFrame().documentElement()
# It seems impossible to create an empty QWebElement for which isNull()
# is false so we can work with it.
# As a workaround, we use appendInside() with markup as argument, and
# then use lastChild() to get a reference to it.
# See: http://stackoverflow.com/q/7364852/2085149
body = doc.findFirst('body')
if not body.isNull():
parent = body
else:
parent = doc
parent.appendInside('<span></span>')
label = webelem.WebElementWrapper(parent.lastChild())
label['class'] = 'qutehint'
self._set_style_properties(elem, label)
label.setPlainText(string)
return label
def _show_url_error(self):
"""Show an error because no link was found."""
message.error(self._win_id, "No suitable link found for this element.",
immediately=True)
def _click(self, elem, context):
"""Click an element.
Args:
elem: The QWebElement to click.
context: The HintContext to use.
"""
target_mapping = {
Target.normal: usertypes.ClickTarget.normal,
Target.current: usertypes.ClickTarget.normal,
Target.tab_fg: usertypes.ClickTarget.tab,
Target.tab_bg: usertypes.ClickTarget.tab_bg,
Target.window: usertypes.ClickTarget.window,
Target.hover: usertypes.ClickTarget.normal,
}
if config.get('tabs', 'background-tabs'):
target_mapping[Target.tab] = usertypes.ClickTarget.tab_bg
else:
target_mapping[Target.tab] = usertypes.ClickTarget.tab
# Click the center of the largest square fitting into the top/left
# corner of the rectangle, this will help if part of the <a> element
# is hidden behind other elements
# https://github.com/The-Compiler/qutebrowser/issues/1005
rect = elem.rect_on_view()
if rect.width() > rect.height():
rect.setWidth(rect.height())
else:
rect.setHeight(rect.width())
pos = rect.center()
action = "Hovering" if context.target == Target.hover else "Clicking"
log.hints.debug("{} on '{}' at position {}".format(
action, elem.debug_text(), pos))
self.start_hinting.emit(target_mapping[context.target])
if context.target in [Target.tab, Target.tab_fg, Target.tab_bg,
Target.window]:
modifiers = Qt.ControlModifier
else:
modifiers = Qt.NoModifier
events = [
QMouseEvent(QEvent.MouseMove, pos, Qt.NoButton, Qt.NoButton,
Qt.NoModifier),
]
if context.target != Target.hover:
events += [
QMouseEvent(QEvent.MouseButtonPress, pos, Qt.LeftButton,
Qt.LeftButton, modifiers),
QMouseEvent(QEvent.MouseButtonRelease, pos, Qt.LeftButton,
Qt.NoButton, modifiers),
]
if context.target in [Target.normal, Target.current]:
# Set the pre-jump mark ', so we can jump back here after following
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=self._win_id)
tabbed_browser.set_mark("'")
if context.target == Target.current:
elem.remove_blank_target()
for evt in events:
self.mouse_event.emit(evt)
if elem.is_text_input() and elem.is_editable():
QTimer.singleShot(0, functools.partial(
elem.webFrame().page().triggerAction,
QWebPage.MoveToEndOfDocument))
QTimer.singleShot(0, self.stop_hinting.emit)
def _yank(self, url, context):
"""Yank an element to the clipboard or primary selection.
Args:
url: The URL to open as a QUrl.
context: The HintContext to use.
"""
sel = (context.target == Target.yank_primary and
utils.supports_selection())
urlstr = url.toString(QUrl.FullyEncoded | QUrl.RemovePassword)
utils.set_clipboard(urlstr, selection=sel)
msg = "Yanked URL to {}: {}".format(
"primary selection" if sel else "clipboard",
urlstr)
message.info(self._win_id, msg)
def _run_cmd(self, url, context):
"""Run the command based on a hint URL.
Args:
url: The URL to open as a QUrl.
context: The HintContext to use.
"""
urlstr = url.toString(QUrl.FullyEncoded)
args = context.get_args(urlstr)
commandrunner = runners.CommandRunner(self._win_id)
commandrunner.run_safely(' '.join(args))
def _preset_cmd_text(self, url, context):
"""Preset a commandline text based on a hint URL.
Args:
url: The URL to open as a QUrl.
context: The HintContext to use.
"""
urlstr = url.toDisplayString(QUrl.FullyEncoded)
args = context.get_args(urlstr)
text = ' '.join(args)
if text[0] not in modeparsers.STARTCHARS:
message.error(self._win_id,
"Invalid command text '{}'.".format(text),
immediately=True)
else:
message.set_cmd_text(self._win_id, text)
def _download(self, elem, context):
"""Download a hint URL.
Args:
elem: The QWebElement to download.
_context: The HintContext to use.
"""
url = self._resolve_url(elem, context.baseurl)
if url is None:
self._show_url_error()
return
if context.rapid:
prompt = False
else:
prompt = None
download_manager = objreg.get('download-manager', scope='window',
window=self._win_id)
download_manager.get(url, page=elem.webFrame().page(),
prompt_download_directory=prompt)
def _call_userscript(self, elem, context):
"""Call a userscript from a hint.
Args:
elem: The QWebElement to use in the userscript.
context: The HintContext to use.
"""
cmd = context.args[0]
args = context.args[1:]
frame = context.mainframe
env = {
'QUTE_MODE': 'hints',
'QUTE_SELECTED_TEXT': str(elem),
'QUTE_SELECTED_HTML': elem.toOuterXml(),
}
url = self._resolve_url(elem, context.baseurl)
if url is not None:
env['QUTE_URL'] = url.toString(QUrl.FullyEncoded)
env.update(userscripts.store_source(frame))
userscripts.run(cmd, *args, win_id=self._win_id, env=env)
def _spawn(self, url, context):
"""Spawn a simple command from a hint.
Args:
url: The URL to open as a QUrl.
context: The HintContext to use.
"""
urlstr = url.toString(QUrl.FullyEncoded | QUrl.RemovePassword)
args = context.get_args(urlstr)
commandrunner = runners.CommandRunner(self._win_id)
commandrunner.run_safely('spawn ' + ' '.join(args))
def _resolve_url(self, elem, baseurl):
"""Resolve a URL and check if we want to keep it.
Args:
elem: The QWebElement to get the URL of.
baseurl: The baseurl of the current tab.
Return:
A QUrl with the absolute URL, or None.
"""
for attr in ('href', 'src'):
if attr in elem:
text = elem[attr].strip()
break
else:
return None
url = QUrl(text)
if not url.isValid():
return None
if url.isRelative():
url = baseurl.resolved(url)
qtutils.ensure_valid(url)
return url
def _find_prevnext(self, frame, prev=False):
"""Find a prev/next element in frame."""
# First check for <link rel="prev(ious)|next">
elems = frame.findAllElements(webelem.SELECTORS[webelem.Group.links])
rel_values = ('prev', 'previous') if prev else ('next')
for e in elems:
e = webelem.WebElementWrapper(e)
try:
rel_attr = e['rel']
except KeyError:
continue
if rel_attr in rel_values:
log.hints.debug("Found '{}' with rel={}".format(
e.debug_text(), rel_attr))
return e
# Then check for regular links/buttons.
elems = frame.findAllElements(
webelem.SELECTORS[webelem.Group.prevnext])
elems = [webelem.WebElementWrapper(e) for e in elems]
filterfunc = webelem.FILTERS[webelem.Group.prevnext]
elems = [e for e in elems if filterfunc(e)]
option = 'prev-regexes' if prev else 'next-regexes'
if not elems:
return None
for regex in config.get('hints', option):
log.hints.vdebug("== Checking regex '{}'.".format(regex.pattern))
for e in elems:
text = str(e)
if not text:
continue
if regex.search(text):
log.hints.debug("Regex '{}' matched on '{}'.".format(
regex.pattern, text))
return e
else:
log.hints.vdebug("No match on '{}'!".format(text))
return None
def _connect_frame_signals(self):
"""Connect the contentsSizeChanged signals to all frames."""
for f in self._context.frames:
log.hints.debug("Connecting frame {}".format(f))
f.contentsSizeChanged.connect(self.on_contents_size_changed)
def _check_args(self, target, *args):
"""Check the arguments passed to start() and raise if they're wrong.
Args:
target: A Target enum member.
args: Arguments for userscript/download
"""
if not isinstance(target, Target):
raise TypeError("Target {} is no Target member!".format(target))
if target in (Target.userscript, Target.spawn, Target.run,
Target.fill):
if not args:
raise cmdexc.CommandError(
"'args' is required with target userscript/spawn/run/"
"fill.")
else:
if args:
raise cmdexc.CommandError(
"'args' is only allowed with target userscript/spawn.")
def _init_elements(self):
"""Initialize the elements and labels based on the context set."""
elems = []
for f in self._context.frames:
elems += f.findAllElements(webelem.SELECTORS[self._context.group])
elems = [e for e in elems
if webelem.is_visible(e, self._context.mainframe)]
# We wrap the elements late for performance reasons, as wrapping 1000s
# of elements (with ~50 methods each) just takes too much time...
elems = [webelem.WebElementWrapper(e) for e in elems]
filterfunc = webelem.FILTERS.get(self._context.group, lambda e: True)
elems = [e for e in elems if filterfunc(e)]
if not elems:
raise cmdexc.CommandError("No elements found.")
strings = self._hint_strings(elems)
log.hints.debug("hints: {}".format(', '.join(strings)))
for e, string in zip(elems, strings):
label = self._draw_label(e, string)
elem = ElemTuple(e, label)
self._context.all_elems.append(elem)
self._context.elems[string] = elem
keyparsers = objreg.get('keyparsers', scope='window',
window=self._win_id)
keyparser = keyparsers[usertypes.KeyMode.hint]
keyparser.update_bindings(strings)
def _filter_matches(self, filterstr, elemstr):
"""Return True if `filterstr` matches `elemstr`."""
# Empty string and None always match
if not filterstr:
return True
filterstr = filterstr.casefold()
elemstr = elemstr.casefold()
# Do multi-word matching
return all(word in elemstr for word in filterstr.split())
def follow_prevnext(self, frame, baseurl, prev=False, tab=False,
background=False, window=False):
"""Click a "previous"/"next" element on the page.
Args:
frame: The frame where the element is in.
baseurl: The base URL of the current tab.
prev: True to open a "previous" link, False to open a "next" link.
tab: True to open in a new tab, False for the current tab.
background: True to open in a background tab.
window: True to open in a new window, False for the current one.
"""
from qutebrowser.mainwindow import mainwindow
elem = self._find_prevnext(frame, prev)
if elem is None:
raise cmdexc.CommandError("No {} links found!".format(
"prev" if prev else "forward"))
url = self._resolve_url(elem, baseurl)
if url is None:
raise cmdexc.CommandError("No {} links found!".format(
"prev" if prev else "forward"))
qtutils.ensure_valid(url)
if window:
new_window = mainwindow.MainWindow()
new_window.show()
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=new_window.win_id)
tabbed_browser.tabopen(url, background=False)
elif tab:
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=self._win_id)
tabbed_browser.tabopen(url, background=background)
else:
webview = objreg.get('webview', scope='tab', window=self._win_id,
tab=self._tab_id)
webview.openurl(url)
@cmdutils.register(instance='hintmanager', scope='tab', name='hint',
star_args_optional=True, maxsplit=2)
@cmdutils.argument('win_id', win_id=True)
def start(self, rapid=False, group=webelem.Group.all, target=Target.normal,
*args, win_id):
"""Start hinting.
Args:
rapid: Whether to do rapid hinting. This is only possible with
targets `tab` (with background-tabs=true), `tab-bg`,
`window`, `run`, `hover`, `userscript` and `spawn`.
group: The hinting mode to use.
- `all`: All clickable elements.
- `links`: Only links.
- `images`: Only images.
target: What to do with the selected element.
- `normal`: Open the link.
- `current`: Open the link in the current tab.
- `tab`: Open the link in a new tab (honoring the
background-tabs setting).
- `tab-fg`: Open the link in a new foreground tab.
- `tab-bg`: Open the link in a new background tab.
- `window`: Open the link in a new window.
- `hover` : Hover over the link.
- `yank`: Yank the link to the clipboard.
- `yank-primary`: Yank the link to the primary selection.
- `run`: Run the argument as command.
- `fill`: Fill the commandline with the command given as
argument.
- `download`: Download the link.
- `userscript`: Call a userscript with `$QUTE_URL` set to the
link.
- `spawn`: Spawn a command.
*args: Arguments for spawn/userscript/run/fill.
- With `spawn`: The executable and arguments to spawn.
`{hint-url}` will get replaced by the selected
URL.
- With `userscript`: The userscript to execute. Either store
the userscript in
`~/.local/share/qutebrowser/userscripts`
(or `$XDG_DATA_DIR`), or use an absolute
path.
- With `fill`: The command to fill the statusbar with.
`{hint-url}` will get replaced by the selected
URL.
- With `run`: Same as `fill`.
"""
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=self._win_id)
widget = tabbed_browser.currentWidget()
if widget is None:
raise cmdexc.CommandError("No WebView available yet!")
mainframe = widget.page().mainFrame()
if mainframe is None:
raise cmdexc.CommandError("No frame focused!")
mode_manager = objreg.get('mode-manager', scope='window',
window=self._win_id)
if mode_manager.mode == usertypes.KeyMode.hint:
modeman.leave(win_id, usertypes.KeyMode.hint, 're-hinting')
if rapid:
if target in [Target.tab_bg, Target.window, Target.run,
Target.hover, Target.userscript, Target.spawn,
Target.download, Target.normal, Target.current]:
pass
elif (target == Target.tab and
config.get('tabs', 'background-tabs')):
pass
else:
name = target.name.replace('_', '-')
raise cmdexc.CommandError("Rapid hinting makes no sense with "
"target {}!".format(name))
self._check_args(target, *args)
self._context = HintContext()
self._context.target = target
self._context.rapid = rapid
try:
self._context.baseurl = tabbed_browser.current_url()
except qtutils.QtValueError:
raise cmdexc.CommandError("No URL set for this page yet!")
self._context.frames = webelem.get_child_frames(mainframe)
for frame in self._context.frames:
# WORKAROUND for
# https://github.com/The-Compiler/qutebrowser/issues/152
frame.destroyed.connect(functools.partial(
self._context.destroyed_frames.append, id(frame)))
self._context.args = args
self._context.mainframe = mainframe
self._context.group = group
self._init_elements()
message_bridge = objreg.get('message-bridge', scope='window',
window=self._win_id)
message_bridge.set_text(self._get_text())
self._connect_frame_signals()
modeman.enter(self._win_id, usertypes.KeyMode.hint,
'HintManager.start')
def handle_partial_key(self, keystr):
"""Handle a new partial keypress."""
log.hints.debug("Handling new keystring: '{}'".format(keystr))
for string, elem in self._context.elems.items():
try:
if string.startswith(keystr):
matched = string[:len(keystr)]
rest = string[len(keystr):]
match_color = config.get('colors', 'hints.fg.match')
elem.label.setInnerXml(
'<font color="{}">{}</font>{}'.format(
match_color, matched, rest))
if self._is_hidden(elem.label):
# hidden element which matches again -> show it
self._show_elem(elem.label)
else:
# element doesn't match anymore -> hide it
self._hide_elem(elem.label)
except webelem.IsNullError:
pass
def _filter_number_hints(self):
"""Apply filters for numbered hints and renumber them.
Return:
Elements which are still visible
"""
# renumber filtered hints
elems = []
for e in self._context.all_elems:
try:
if not self._is_hidden(e.label):
elems.append(e)
except webelem.IsNullError:
pass
if not elems:
# Whoops, filtered all hints
modeman.leave(self._win_id, usertypes.KeyMode.hint,
'all filtered')
return {}
strings = self._hint_strings(elems)
self._context.elems = {}
for elem, string in zip(elems, strings):
elem.label.setInnerXml(string)
self._context.elems[string] = elem
keyparsers = objreg.get('keyparsers', scope='window',
window=self._win_id)
keyparser = keyparsers[usertypes.KeyMode.hint]
keyparser.update_bindings(strings, preserve_filter=True)
return self._context.elems
def _filter_non_number_hints(self):
"""Apply filters for letter/word hints.
Return:
Elements which are still visible
"""
visible = {}
for string, elem in self._context.elems.items():
try:
if not self._is_hidden(elem.label):
visible[string] = elem
except webelem.IsNullError:
pass
if not visible:
# Whoops, filtered all hints
modeman.leave(self._win_id, usertypes.KeyMode.hint,
'all filtered')
return visible
def filter_hints(self, filterstr):
"""Filter displayed hints according to a text.
Args:
filterstr: The string to filter with, or None to use the filter
from previous call (saved in `self._filterstr`). If
`filterstr` is an empty string or if both `filterstr`
and `self._filterstr` are None, all hints are shown.
"""
if filterstr is None:
filterstr = self._filterstr
else:
self._filterstr = filterstr
for elem in self._context.all_elems:
try:
if self._filter_matches(filterstr, str(elem.elem)):
if self._is_hidden(elem.label):
# hidden element which matches again -> show it
self._show_elem(elem.label)
else:
# element doesn't match anymore -> hide it
self._hide_elem(elem.label)
except webelem.IsNullError:
pass
if config.get('hints', 'mode') == 'number':
visible = self._filter_number_hints()
else:
visible = self._filter_non_number_hints()
if (len(visible) == 1 and
config.get('hints', 'auto-follow') and
filterstr is not None):
# apply auto-follow-timeout
timeout = config.get('hints', 'auto-follow-timeout')
keyparsers = objreg.get('keyparsers', scope='window',
window=self._win_id)
normal_parser = keyparsers[usertypes.KeyMode.normal]
normal_parser.set_inhibited_timeout(timeout)
# unpacking gets us the first (and only) key in the dict.
self.fire(*visible)
def fire(self, keystr, force=False):
"""Fire a completed hint.
Args:
keystr: The keychain string to follow.
force: When True, follow even when auto-follow is false.
"""
if not (force or config.get('hints', 'auto-follow')):
self.handle_partial_key(keystr)
self._context.to_follow = keystr
return
# Handlers which take a QWebElement
elem_handlers = {
Target.normal: self._click,
Target.current: self._click,
Target.tab: self._click,
Target.tab_fg: self._click,
Target.tab_bg: self._click,
Target.window: self._click,
Target.hover: self._click,
# _download needs a QWebElement to get the frame.
Target.download: self._download,
Target.userscript: self._call_userscript,
}
# Handlers which take a QUrl
url_handlers = {
Target.yank: self._yank,
Target.yank_primary: self._yank,
Target.run: self._run_cmd,
Target.fill: self._preset_cmd_text,
Target.spawn: self._spawn,
}
elem = self._context.elems[keystr].elem
if elem.webFrame() is None:
message.error(self._win_id,
"This element has no webframe.",
immediately=True)
return
if self._context.target in elem_handlers:
handler = functools.partial(elem_handlers[self._context.target],
elem, self._context)
elif self._context.target in url_handlers:
url = self._resolve_url(elem, self._context.baseurl)
if url is None:
self._show_url_error()
return
handler = functools.partial(url_handlers[self._context.target],
url, self._context)
else:
raise ValueError("No suitable handler found!")
if not self._context.rapid:
modeman.maybe_leave(self._win_id, usertypes.KeyMode.hint,
'followed')
else:
# Reset filtering
self.filter_hints(None)
# Undo keystring highlighting
for string, elem in self._context.elems.items():
elem.label.setInnerXml(string)
handler()
@cmdutils.register(instance='hintmanager', scope='tab', hide=True,
modes=[usertypes.KeyMode.hint])
def follow_hint(self, keystring=None):
"""Follow a hint.
Args:
keystring: The hint to follow, or None.
"""
if keystring is None:
if self._context.to_follow is None:
raise cmdexc.CommandError("No hint to follow")
else:
keystring = self._context.to_follow
elif keystring not in self._context.elems:
raise cmdexc.CommandError("No hint {}!".format(keystring))
self.fire(keystring, force=True)
@pyqtSlot('QSize')
def on_contents_size_changed(self, _size):
"""Reposition hints if contents size changed."""
log.hints.debug("Contents size changed...!")
for e in self._context.all_elems:
try:
if e.elem.webFrame() is None:
# This sometimes happens for some reason...
e.label.removeFromDocument()
continue
self._set_style_position(e.elem, e.label)
except webelem.IsNullError:
pass
@pyqtSlot(usertypes.KeyMode)
def on_mode_left(self, mode):
"""Stop hinting when hinting mode was left."""
if mode != usertypes.KeyMode.hint or self._context is None:
# We have one HintManager per tab, so when this gets called,
# self._context might be None, because the current tab is not
# hinting.
return
self._cleanup()
class WordHinter:
"""Generator for word hints.
Attributes:
words: A set of words to be used when no "smart hint" can be
derived from the hinted element.
"""
def __init__(self):
# will be initialized on first use.
self.words = set()
self.dictionary = None
def ensure_initialized(self):
"""Generate the used words if yet uninitialized."""
dictionary = config.get("hints", "dictionary")
if not self.words or self.dictionary != dictionary:
self.words.clear()
self.dictionary = dictionary
try:
with open(dictionary, encoding="UTF-8") as wordfile:
alphabet = set(ascii_lowercase)
hints = set()
lines = (line.rstrip().lower() for line in wordfile)
for word in lines:
if set(word) - alphabet:
# contains none-alphabetic chars
continue
if len(word) > 4:
# we don't need words longer than 4
continue
for i in range(len(word)):
# remove all prefixes of this word
hints.discard(word[:i + 1])
hints.add(word)
self.words.update(hints)
except IOError as e:
error = "Word hints requires reading the file at {}: {}"
raise WordHintingError(error.format(dictionary, str(e)))
def extract_tag_words(self, elem):
"""Extract tag words form the given element."""
attr_extractors = {
"alt": lambda elem: elem["alt"],
"name": lambda elem: elem["name"],
"title": lambda elem: elem["title"],
"src": lambda elem: elem["src"].split('/')[-1],
"href": lambda elem: elem["href"].split('/')[-1],
"text": str,
}
extractable_attrs = collections.defaultdict(list, {
"IMG": ["alt", "title", "src"],
"A": ["title", "href", "text"],
"INPUT": ["name"]
})
return (attr_extractors[attr](elem)
for attr in extractable_attrs[elem.tagName()]
if attr in elem or attr == "text")
def tag_words_to_hints(self, words):
"""Take words and transform them to proper hints if possible."""
for candidate in words:
if not candidate:
continue
match = re.search('[A-Za-z]{3,}', candidate)
if not match:
continue
if 4 < match.end() - match.start() < 8:
yield candidate[match.start():match.end()].lower()
def any_prefix(self, hint, existing):
return any(hint.startswith(e) or e.startswith(hint) for e in existing)
def filter_prefixes(self, hints, existing):
return (h for h in hints if not self.any_prefix(h, existing))
def new_hint_for(self, elem, existing, fallback):
"""Return a hint for elem, not conflicting with the existing."""
new = self.tag_words_to_hints(self.extract_tag_words(elem))
new_no_prefixes = self.filter_prefixes(new, existing)
fallback_no_prefixes = self.filter_prefixes(fallback, existing)
# either the first good, or None
return (next(new_no_prefixes, None) or
next(fallback_no_prefixes, None))
def hint(self, elems):
"""Produce hint labels based on the html tags.
Produce hint words based on the link text and random words
from the words arg as fallback.
Args:
words: Words to use as fallback when no link text can be used.
elems: The elements to get hint strings for.
Return:
A list of hint strings, in the same order as the elements.
"""
self.ensure_initialized()
hints = []
used_hints = set()
words = iter(self.words)
for elem in elems:
hint = self.new_hint_for(elem, used_hints, words)
if not hint:
raise WordHintingError("Not enough words in the dictionary.")
used_hints.add(hint)
hints.append(hint)
return hints
| gpl-3.0 | -4,491,975,690,415,150,600 | 38.341176 | 97 | 0.552354 | false |
MobProgramming/MobTimer.Python | Infrastructure/CountdownManager.py | 1 | 1350 | import datetime
import time
class CountdownManager(object):
def __init__(self, root_tk_app):
self.start_time = time.time()
self.minutes = 0
self.seconds = 0
self.time_change_callbacks = []
self.count_down_total = datetime.timedelta(days=-1, minutes=0, seconds=0)
self.root_tk_app = root_tk_app
self.refresh_timer()
def set_countdown_duration(self, minutes, seconds):
self.start_time = time.time()
self.minutes = minutes
self.seconds = seconds
self.count_down_total = datetime.timedelta(minutes=minutes, seconds=seconds)
self.fire_time_change_callbacks()
def subscribe_to_time_changes(self, time_change_callback):
self.time_change_callbacks.append(time_change_callback)
def fire_time_change_callbacks(self):
end_time = time.time()
up_time = end_time - self.start_time
remaining_time = self.count_down_total - datetime.timedelta(seconds=(int(up_time)))
for callback in self.time_change_callbacks:
if callback:
callback(remaining_time.days, (remaining_time.seconds // 60) % 60, remaining_time.seconds % 60)
def refresh_timer(self):
self.fire_time_change_callbacks()
if self.root_tk_app:
self.root_tk_app.after(500, self.refresh_timer) | mit | -2,718,077,430,360,165,400 | 35.513514 | 111 | 0.640741 | false |
nbessi/l10n-switzerland | __unported__/l10n_ch_dta/payment.py | 1 | 1449 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi. Copyright Camptocamp SA
# Donors: Hasa Sàrl, Open Net Sàrl and Prisme Solutions Informatique SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class PaymentOrder(orm.Model):
_inherit = 'payment.order'
_columns = {'dta_ids': fields.one2many('ir.attachment',
'res_id',
domain=[('res_model', '=', 'payment.order'),
('name', 'like', 'DTA')])}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -9,097,814,641,466,798,000 | 44.21875 | 87 | 0.565308 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.