repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
2013Commons/hue | desktop/core/ext-py/guppy-0.1.10/guppy/heapy/test/test_ER.py | 37 | 9414 | #._cv_part guppy.heapy.test.test_ER
# Tests of equivalence relations.
# These are also tested by test_Classifiers.
# This is some more tests, tailored esp. to the user view.
# (test_Classifiers was so slow already, so I start over)
# o Intended to be exhaustive wrt all ER's defined
#
# o Intersection of ER's
from guppy.heapy.test import support
class TestCase(support.TestCase):
pass
class FirstCase(TestCase):
if 1:
def test_1(self):
hp = self.heapy.Use
hp.reprefix = 'hp.'
a = hp.iso(1,'', 'asdf', 3.4, 3.7, 2)
ts = (hp.Type & hp.Size)
k = ts[a]
# print 'k', k, 'ts', ts
# From Sep 1-2 2005
# (h&dict).by(hp.Id.dictof&hp.Size)._get_partition()
# (h&dict).by((hp.Type&hp.Size).dictof&hp.Size)
# These require with_referrers of refby/via classifier
# after gc collect referrers graph will be empty
# (h).by(hp.Module.refby.dictof)
# (h).by(hp.Via.dictof)
# How to construct RCS / refby
#self.aseq(hp.Type.refby(int, list) , hp.Type.refby(list, int)
class C:
pass
di = hp.iso(C.__dict__, [])
import types
db = di.by('Rcs')
for i in (0, 1):
rk = repr(db[i].kind)
# print rk
ek = eval(rk)
self.aseq( ek, db[i].kind )
# print db & ek
self.aseq( db & ek , db[i] )
def test_2(self):
' Systematically test all kind constructors: '
# wrt repr and evaluation of repr
hp = self.heapy.Use
hp.reprefix = 'hp.'
class C:
pass
class T(object):
pass
c = C()
t = T()
import sys
for s in (
'hp.Class(C)',
'hp.Class(C).dictof',
'hp.Clodo(dictof=C)',
'hp.Clodo(dictof=T)',
'hp.Clodo(dictof=())',
'hp.Clodo(C)',
'hp.Clodo(T)',
'hp.Id(id(c))',
'hp.Module("sys")',
'hp.Rcs(hp.Clodo.sokind(int)(dictof=C))',
'hp.Size(hp.iso(c).indisize)',
'hp.Size(hp.iso(C).indisize).dictof',
'hp.Type(T)',
'hp.Type(int)',
'hp.Unity()',
'hp.Via()',
# Via is also specially tested below
):
x = eval(s)
rx = repr(x)
self.aseq(eval(rx), x)
for i, s in enumerate((
# Test Via construction.
# One test for each relation kind defined in Path except IDENTITY and RELSRC.
# In code order.
"hp.Via('_.x')",
"hp.Via('_[0]')",
"hp.Via('_.keys()[0]')",
"hp.Via('_->abc')",
"hp.Via('_.__dict__.keys()[0]')",
"hp.Via('_.f_locals[\"abc\"]')",
"hp.Via('_.f_locals [\"abc\"]')",
"hp.Via('_->f_valuestack[0]')",
)):
code = i + 1
x = eval(s)
rel = list(x.arg)[0]
self.aseq(rel.kind, code)
rx = repr(x)
self.aseq(eval(rx), x)
def test_3(self):
' Test of dictof '
# Test of dictof on something that requires memoization, i.e. Size, & (and)
hp = self.heapy.Use
class C:
pass
class T(object):
# The test works only if sizes of objects of class C and T differ.
# At first test, T() was 4 bytes smaller than C().
# This might be brittle with different systems.
# This is to make sure this diff gets significantly bigger:
__slots__ = '__dict__', 'a','b','c','d','e','f','g','h'
c = C()
t = T()
dn = {}
isod = hp.iso(c.__dict__, t.__dict__, dn)
for x in (
t, c):
X = x.__class__
for k in (
hp.Clodo(dictof=X),
hp.Class(X).dictof,
hp.Size(hp.iso(x).indisize).dictof,
hp.iso(x).bysize.kind.dictof,
(hp.iso(x).bysize.kind & hp.Class(X)).dictof,
hp.iso(x.__dict__).kind,
):
self.aseq(isod & k, hp.iso(x.__dict__))
# Test no-owner selection
for k in (
hp.Nothing.dictof,
):
self.aseq(isod & k, hp.iso(dn))
def test_4(self):
' Test of via '
# Esp. representation, construction
class C:
pass
c = C()
hp = self.heapy.Use
isod = hp.iso(c.__dict__)
x = isod.by('Via').kind
self.aseq(repr(x), "hpy().Via('.__dict__')")
#print repr(x)
def test_5(self):
' Non-systematic tests that came up around Sep 14 2005 '
class C:
pass
c = C()
d = {}
cref = [c]
cref.append(cref)
c.cref = cref
hp = self.heapy.Use
hp.reprefix = 'hp.'
# I thought these should be the same
a = hp.iso(C.__dict__, C, c, c.__dict__, d)&hp.Class.sokind(C).refdby
b = hp.iso(C.__dict__, C, c, c.__dict__, d)&hp.Clodo.sokind(C).refdby
self.aseq(a, b)
# This is a kind of nested refdby that has been a concern lately
# -- how to represent it
s = hp.iso(C.__dict__, C, c, c.__dict__, d).by(hp.Clodo.refdby.refdby)
# print s
for i in range(len(s)):
a = s[i].kind
ra = repr(a)
# print ra
era = eval(ra)
self.aseq(a, era)
self.aseq(s&era,
s[i])
import sys
p = sys.path
del sys
s = hp.iso(p)
x = s.by(hp.Module.dictof.refdby)
self.aseq(s&eval(repr(x.kind)), s)
def test_6(self):
' Test of .refdby on all others '
class C:
pass
c = C()
d = {}
cref = [c]
cref.append(cref)
c.cref = cref
hp = self.heapy.Use
hp.reprefix = 'hp.'
import sys
s = hp.iso(C.__dict__, C, c, c.__dict__, d, sys)
for pre in (
'Unity',
'Class',
'Clodo',
'Id',
'Module',
('Rcs', 0),
'Size',
'Type',
'Via')[:]:
if isinstance(pre, tuple):
pre, level = pre[:2]
else:
level = 1
er = getattr(hp, pre)
self.er_test(er, s, level)
def er_test(self, er, set, level=1):
# Tests what any eqv. rel. er should do
hp = self.heapy.Use
rer = repr(er)
# print rer
self.aseq(eval(rer), er)
for s in (set,):
sby = s.by(er)
sk = sby.kind
rsk = repr(sk)
# print rsk
ske = eval(rsk)
self.aseq(ske, sk)
self.aseq(s & sk, s)
self.aseq(s & ske, s)
# That it can do .refdby
er_refdby = er.refdby
# That it can do .dictof
er_dictof = er.dictof
if level > 0:
self.er_test(er_refdby, set, level - 1)
self.er_test(er_dictof, set, level - 1)
def test_7(self):
' Test of alternative sets w. biper '
hp = self.heapy.Use
class C:
pass
class D(C):
pass
class E(D):
pass
class T(object):
pass
class U(T):
pass
class V(U):
pass
c = C()
d = D()
e = E()
t = T()
u = U()
v = V()
s = hp.iso([], {}, c, d, e, t, u, v, d.__dict__)
for k in (
hp.Size(32),
hp.Class(D),
hp.Type(U),
hp.Class.sokind(D).refdby,
):
lt = k.alt('<')
le = k.alt('<=')
ge = k.alt('>=')
gt = k.alt('>=')
ne = k.alt('!=')
assert (s & le) & (s & ge) == s & k
for a in ( lt, le, ge, gt, ne, le & ~k ):
s & a
# print s.by(a.biper)
# print s.by(a.biper)[0].kind
# print s.by(a.biper)[1].kind
#print s & a
# A bug specific for refdby
# occured after gc when using biper
# noted Sep 21 2005
k=hp.Class.sokind(D).refdby
import gc
gc.collect()
a = s.by(k.alt('>=').biper)
b = s.by(k.alt('>=').biper)
# print a
self.assert_( hp.iso(d.__dict__) <= a[1] )
self.assert_( a == b )
gc.collect()
a = s.by(k.alt('<=').biper)
b = s.by(k.alt('<=').biper)
# print a
self.assert_( hp.iso(d.__dict__) <= a[0])
self.assert_( a == b )
def test_8(self):
' Test of findex and biper '
# added Nov 3 2005
hp = self.heapy.Use
class C:
pass
c=C()
li = []
di = {}
s = hp.iso(li, di, c, 1)
for k, i in (
(hp.Class(C), 1),
(hp.Type(dict), 0),
(hp.iso(c), 1),
(hp.iso(c, li), 1),
(hp.Type(dict) | hp.Class(C), 0)
):
p = s.by(k.biper)
# print p
self.aseq(p[i].kind.fam.classifier.kinds[0], k)
def test_9(self):
' Test the subrelation relation '
from guppy import hpy
ernames = ['Class', 'Clodo', 'Id', 'Idset',
'Module', 'Rcs', 'Size', 'Type',
'Unity']
hp=hpy()
ers = [(name, getattr(hp, name)) for name in ernames]
ers.append(('Size&Type', hp.Size&hp.Type))
from StringIO import StringIO
f = StringIO()
print >>f, ''.ljust(10),
for b in ers:
print >>f, b[0].ljust(7),
print >>f
for a in ers:
print >>f, a[0].ljust(10),
for b in ers:
print >>f, str((a[1] < b[1]))[:1].ljust(7),
print >>f
self.aseq( f.getvalue(), """\
Class Clodo Id Idset Module Rcs Size Type Unity Size&Type
Class F F F F F F F T T F
Clodo T F F F F F F T T F
Id F F F F F F F F T F
Idset F F F F F F F F T F
Module F F F F F F F F T F
Rcs F F F F F F F F T F
Size F F F F F F F F T F
Type F F F F F F F F T F
Unity F F F F F F F F F F
Size&Type F F F F F F T T T F
""")
def test_main(debug = 0):
support.run_unittest(FirstCase, debug)
if __name__ == "__main__":
from guppy.heapy.Remote import off
off()
test_main()
| apache-2.0 | 7,436,025,212,766,090,000 | 19.92 | 92 | 0.488103 | false |
gautam1858/tensorflow | tensorflow/contrib/tpu/python/tpu/tpu_sharding.py | 42 | 9554 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Helper library for sharding during TPU compilation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import tensor_shape
_DEFAULT_NUMBER_OF_SHARDS = 1
_DEFAULT_SHARD_DIMENSION = 0
# TODO(b/36777903) change other parts of tpu.py to use this class.
class ShardingPolicy(object):
"""An object use to hold the sharding policy for a Tensor.
"""
def __init__(self):
self._number_of_shards = None
self._shard_dimension = None
self._frozen = False
def __str__(self):
if self.number_of_shards is None or self.shard_dimension is None:
return "ShardingPolicy(unset)"
else:
return ("ShardingPolicy(%d shards dimension %d)" %
(self.number_of_shards, self.shard_dimension))
def _fill_default_values(self):
if self._number_of_shards is None:
self._number_of_shards = _DEFAULT_NUMBER_OF_SHARDS
if self._shard_dimension is None:
self._shard_dimension = tensor_shape.as_dimension(
_DEFAULT_SHARD_DIMENSION)
def freeze(self):
"""Prevents further modification to the sharding policy.
Any values that have not been set when freeze is called are set to
defaults. If the ShardingPolicy is already frozen, this is a NoOp.
"""
if not self._frozen:
self._fill_default_values()
self._frozen = True
@property
def number_of_shards(self):
"""Returns the number of shards in the policy or None if unspecified."""
return self._number_of_shards
def set_number_of_shards(self, number_of_shards):
"""Sets the number of shards for the current policy.
If the policy has been frozen then number_of_shards must match the
existing setting.
Args:
number_of_shards: The number of shards to use in the policy.
Raises:
ValueError: If the policy has been frozen and number_of_shards
differs from the frozen value; or number_of_shards <= 0.
"""
if self._frozen:
if self._number_of_shards != number_of_shards:
raise ValueError(
"Can't set sharding policy to use %d shards since it has been "
"frozen to use %d." % (number_of_shards, self._number_of_shards))
else:
if number_of_shards > 0:
self._number_of_shards = number_of_shards
else:
raise ValueError(
"Can't set sharding policy to use %s shards; value must be >0",
str(number_of_shards))
@property
def shard_dimension(self):
"""Returns the shard dimension of the policy or None if unspecified."""
return self._shard_dimension
def set_shard_dimension(self, shard_dimension):
"""Sets the shard dimension for the current policy.
If the policy has been frozen then shard_dimension must match the
existing setting.
Args:
shard_dimension: The shard dimension to use in the policy.
Raises:
ValueError: If the policy has been frozen and shard_dimension
differs from the frozen value, or shard_dimension can't be
interpreted as a Dimension.
"""
if self._frozen:
if self._shard_dimension != shard_dimension:
raise ValueError(
"Can't set shard dimension to %d since it has been frozen to "
"use %d." % (shard_dimension, self._shard_dimension))
else:
self._shard_dimension = tensor_shape.as_dimension(shard_dimension)
def merge(self, other):
"""Merges the policy of another policy into the current policy.
Args:
other: The policy to merge into this one.
Raises:
ValueError: If this policy has been frozen and the merge conflicts with
the frozen policy.
"""
if other.number_of_shards is not None:
self.set_number_of_shards(other.number_of_shards)
if other.shard_dimension is not None:
self.set_shard_dimension(other.shard_dimension)
def get_sharded_shape(self, shape, shard_index=None):
"""Returns the shape of a shard of a full Tensor.
When given the shape of a 'full-size' Tensor, returns the shape of
the sub-Tensor after it has been sharded. Freezes the policy if it
has not yet been frozen.
Args:
shape: The shape of the full-size Tensor to be sharded.
shard_index: The index of the shard whose shape should be returned.
shard_index can be None for sharding policies that use the same
shape for every shard.
freeze_config:
Returns:
The shape of the sharded version of the Tensor.
Raises:
ValueError: If shard_index is None when shards are of different
shapes; or shard_index is not None and
!(0<=shard_index<number_of_shards); or shape does not have at
least self.shard_dimension+1 dimensions; or the value of
shape's shard dimension is not a multiple of
self.number_of_shards
"""
if self._shard_dimension is None or self._number_of_shards is None:
# Don't raise an error if the config is unset.
return None
if shard_index is not None:
if shard_index < 0 or shard_index >= self.number_of_shards:
raise ValueError("shard_index %d, but must be in [0,%d)." %
(shard_index, self._number_of_shards))
shape = tensor_shape.as_shape(shape)
if self._number_of_shards == 1:
# Don't do anything when there's only one shard.
return shape
ndims = shape.ndims
if ndims is None:
raise ValueError("shape must be a specified shape not Unknown")
if ndims <= self._shard_dimension:
raise ValueError("shape %s does not contain shard_dimension %d" %
(shape.as_list(), self._shard_dimension))
dims = shape.as_list()
if dims[self._shard_dimension] is None:
raise ValueError("shape %s must have a fixed size for dimension %d "
"that is known at graph construction time." %
(shape.as_list(), self._shard_dimension))
if (dims[self._shard_dimension] % self._number_of_shards) != 0:
raise ValueError("shape %s cannot be sharded %d ways along dimension %d" %
(shape.as_list(), self._number_of_shards,
self._shard_dimension))
dims[self._shard_dimension] /= self._number_of_shards
return tensor_shape.as_shape(dims)
def _unshard_shape(self, shape):
"""Return the unsharded shape that would generate a given sharded shape.
Args:
shape: the sharded shape to unshard
Returns:
The unsharded shape.
Raises:
ValueError: if shape is unknown or does not contain
self.shard_dimension
TypeError: if shape is not convertible to a TensorShape
"""
shape = tensor_shape.as_shape(shape)
if self._number_of_shards == 1:
# Don't do anything when there's only one shard.
return shape
ndims = shape.ndims
if ndims is None:
raise ValueError("shape must be a specified shape not Unknown")
if ndims <= self._shard_dimension:
raise ValueError("shape %s does not contain shard_dimension %d" %
(shape.as_list(), self._shard_dimension))
dims = shape.as_list()
dims[self._shard_dimension] *= self._number_of_shards
return tensor_shape.as_shape(dims)
def get_unsharded_shape(self, shapes):
"""Returns the shape of an unsharded Tensor given a list of shards.
When given a list of shapes of shards, returns the shape of the
unsharded Tensor that would generate the shards. Sets defaults for the
policy if number_of_shards or shard_dimension is None.
Args:
shapes: The shapes of the Tensor shards to be combined.
Returns:
The shape of the unsharded version of the Tensor.
Raises:
ValueError: if shapes is not a list of length
self.number_of_shards; or any element of shapes is not a valid
shape consistent with the sharding policy; or the list of
shapes is not a valid sharding of a full shape.
TypeError: if an element of shapes is not convertible to a
TensorShape
"""
self._fill_default_values()
if len(shapes) != self.number_of_shards:
raise ValueError(
"shapes is %s but must be a list of length number_of_shards=%d" % (
str(shapes), self.number_of_shards))
unsharded_shapes = [self._unshard_shape(s) for s in shapes]
for i in xrange(self.number_of_shards - 1):
if not unsharded_shapes[i].is_compatible_with(
unsharded_shapes[self.number_of_shards - 1]):
raise ValueError(
"sharded shapes %s are not consistent shards of a full shape "
"sharded %d ways along dimension %d" % (
str(shapes), self.number_of_shards, self.shard_dimension))
return unsharded_shapes[0]
| apache-2.0 | 5,061,569,037,642,996,000 | 36.762846 | 80 | 0.657735 | false |
Kussie/HTPC-Manager | libs/sqlobject/constraints.py | 10 | 1868 | """
Constraints
"""
class BadValue(ValueError):
def __init__(self, desc, obj, col, value, *args):
self.desc = desc
self.col = col
# I want these objects to be garbage-collectable, so
# I just keep their repr:
self.obj = repr(obj)
self.value = repr(value)
fullDesc = "%s.%s %s (you gave: %s)" \
% (obj, col.name, desc, value)
ValueError.__init__(self, fullDesc, *args)
def isString(obj, col, value):
if not isinstance(value, str):
raise BadValue("only allows strings", obj, col, value)
def notNull(obj, col, value):
if value is None:
raise BadValue("is defined NOT NULL", obj, col, value)
def isInt(obj, col, value):
if not isinstance(value, (int, long)):
raise BadValue("only allows integers", obj, col, value)
def isFloat(obj, col, value):
if not isinstance(value, (int, long, float)):
raise BadValue("only allows floating point numbers", obj, col, value)
def isBool(obj, col, value):
if not isinstance(value, bool):
raise BadValue("only allows booleans", obj, col, value)
class InList:
def __init__(self, l):
self.list = l
def __call__(self, obj, col, value):
if value not in self.list:
raise BadValue("accepts only values in %s" % repr(self.list),
obj, col, value)
class MaxLength:
def __init__(self, length):
self.length = length
def __call__(self, obj, col, value):
try:
length = len(value)
except TypeError:
raise BadValue("object does not have a length",
obj, col, value)
if length > self.length:
raise BadValue("must be shorter in length than %s"
% self.length,
obj, col, value)
| mit | -98,339,858,472,114,750 | 28.650794 | 77 | 0.558351 | false |
jsirois/commons | src/python/twitter/common/metrics/rate.py | 16 | 2824 | # ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import time
from twitter.common.quantity import Amount, Time
from .gauge import NamedGauge, gaugelike, namablegauge
class Rate(NamedGauge):
"""
Gauge that computes a windowed rate.
"""
@staticmethod
def of(gauge, name = None, window = None, clock = None):
kw = {}
if window: kw.update(window = window)
if clock: kw.update(clock = clock)
if name:
if not gaugelike(gauge):
raise TypeError('Rate.of must take a Gauge-like object! Got %s' % type(gauge))
return Rate(name, gauge, **kw)
else:
if not namablegauge(gauge):
raise TypeError('Rate.of must take a namable Gauge-like object if no name specified!')
return Rate(gauge.name(), gauge, **kw)
def __init__(self, name, gauge, window = Amount(1, Time.SECONDS), clock = time):
"""
Create a gauge using name as a base for a <name>_per_<window> sampling gauge.
name: The base name of the gauge.
gauge: The gauge to sample
window: The window over which the samples should be measured (default 1 second.)
"""
self._clock = clock
self._gauge = gauge
self._samples = []
self._window = window
NamedGauge.__init__(self, '%s_per_%s%s' % (name, window.amount(), window.unit()))
def filter(self, newer_than=None):
"""
Filter the samples to only contain elements in the window.
"""
if newer_than is None:
newer_than = self._clock.time() - self._window.as_(Time.SECONDS)
self._samples = [sample for sample in self._samples if sample[0] >= newer_than]
def read(self):
now = self._clock.time()
self.filter(now - self._window.as_(Time.SECONDS))
new_sample = self._gauge.read()
self._samples.insert(0, (now, new_sample))
if len(self._samples) == 1:
return 0
last_sample = self._samples[-1]
dy = new_sample - last_sample[1]
dt = now - last_sample[0]
return 0 if dt == 0 else dy / dt
| apache-2.0 | -3,644,793,311,744,688,000 | 38.222222 | 100 | 0.590652 | false |
hxddh/youtube-dl | youtube_dl/extractor/mofosex.py | 87 | 1686 | from __future__ import unicode_literals
import os
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_unquote,
compat_urllib_parse_urlparse,
compat_urllib_request,
)
class MofosexIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?(?P<url>mofosex\.com/videos/(?P<id>[0-9]+)/.*?\.html)'
_TEST = {
'url': 'http://www.mofosex.com/videos/5018/japanese-teen-music-video.html',
'md5': '1b2eb47ac33cc75d4a80e3026b613c5a',
'info_dict': {
'id': '5018',
'ext': 'mp4',
'title': 'Japanese Teen Music Video',
'age_limit': 18,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
url = 'http://www.' + mobj.group('url')
req = compat_urllib_request.Request(url)
req.add_header('Cookie', 'age_verified=1')
webpage = self._download_webpage(req, video_id)
video_title = self._html_search_regex(r'<h1>(.+?)<', webpage, 'title')
video_url = compat_urllib_parse_unquote(self._html_search_regex(r'flashvars.video_url = \'([^\']+)', webpage, 'video_url'))
path = compat_urllib_parse_urlparse(video_url).path
extension = os.path.splitext(path)[1][1:]
format = path.split('/')[5].split('_')[:2]
format = "-".join(format)
age_limit = self._rta_search(webpage)
return {
'id': video_id,
'title': video_title,
'url': video_url,
'ext': extension,
'format': format,
'format_id': format,
'age_limit': age_limit,
}
| unlicense | 2,566,813,040,023,743,500 | 30.811321 | 131 | 0.551008 | false |
714168586/Test | ops-docs/python/dbuser.py | 1 | 2974 | #!/usr/bin/env python
#coding=utf-8
import json
import yaml
from aliyunsdkcore import client
from aliyunsdkrds.request.v20140815 import CreateAccountRequest,GrantAccountPrivilegeRequest,DeleteAccountRequest,DescribeAccountsRequest
# 添加用户
def AddUser(DBInstanceId,username,passwd):
accessKeyId, accessKeySecret = "", ""
clt = client.AcsClient(accessKeyId,accessKeySecret,"cn-hangzhou")
request=CreateAccountRequest.CreateAccountRequest()
request.set_DBInstanceId(DBInstanceId)
request.set_AccountName(username)
request.set_AccountPassword(passwd)
result = clt.do_action_with_exception(request)
print result
def add(name,info):
passwd = info['passwd']
for ID in info['DB']:
instanceid=ID['ID']
print '==========creatuser with AddUser========='
print name, passwd, instanceid
AddUser(instanceid,name,passwd)
# 授权
def Grant(DBInstanceId,dbname,username,privilege):
accessKeyId, accessKeySecret = "LTAIMplK007fM3iy", "Yy6SoWHIlNXrDg9JsGRNiXD2omX4yc"
clt = client.AcsClient(accessKeyId,accessKeySecret,"cn-hangzhou")
request=GrantAccountPrivilegeRequest.GrantAccountPrivilegeRequest()
request.set_DBName(dbname)
request.set_DBInstanceId(DBInstanceId)
request.set_AccountName(username)
request.set_AccountPrivilege(privilege)
result = clt.do_action(request)
print result
# 账号权限,ReadOnly 只读,ReadWrite读写
def grant(name,info):
passwd = info['passwd']
for ID in info['DB']:
instanceid = ID['ID']
for db in ID['DBNAME']:
print '==========grand with grant right========='
print name, passwd, instanceid, db
Grant(instanceid,db,name,'ReadOnly')
# 查看用户
def ListUser(DBInstanceId):
accessKeyId, accessKeySecret = "", ""
clt = client.AcsClient(accessKeyId, accessKeySecret, "cn-hangzhou")
request = DescribeAccountsRequest.DescribeAccountsRequest()
request.set_accept_format('json')
request.set_DBInstanceId(DBInstanceId)
result = clt.do_action(request)
res = json.loads(result)
users = res['Accounts']['DBInstanceAccount']
for user in users:
username = user['AccountName']
print username
# 删除用户
def DelUser(DBInstanceId,username):
accessKeyId, accessKeySecret = "", ""
clt = client.AcsClient(accessKeyId,accessKeySecret,"cn-hangzhou")
request=DeleteAccountRequest.DeleteAccountRequest()
request.set_DBInstanceId(DBInstanceId)
request.set_AccountName(username)
result = clt.do_action(request)
print result
if __name__ == '__main__':
# 实例上面的数据库
list = ['rds72y31682d1mq79tmy']
username = 'fengshang'
for i in list:
ListUser(i)
#DelUser(i,username)
# 添加用户
# s = yaml.load(file('db.yaml'))
# for name,info in s.items():
# # 添加用户
# add(name,info)
# # 授权
# grant(name, info)
| lgpl-2.1 | -5,614,617,662,496,829,000 | 30.758242 | 137 | 0.686505 | false |
bkahlert/seqan-research | raw/workshop11/seqan-trunk/misc/trac_plugins/DocLinks/doc_links.py | 2 | 4362 | """Seqan Doc Links for Trac.
Version 0.1.
Copyright (C) 2010 Manuel Holtgrewe
Install by copying this file into the plugins directory of your trac
work directory. In your trac.ini, you can use something like this
(the following also shows the defaults).
[seqan_doc_links]
prefix = seqan
base_url = http://www.seqan.de/dddoc/html/
Use something like this to test the plugin:
* {{{[seqan:Page.Sequences]}}} [seqan:Page.Sequences]
* {{{seqan:Class.Finder}}} seqan:Class.Finder
* {{{seqan:"Concept.Simple Type"}}} seqan:"Concept.Simple Type"
* {{{seqan:"Spec.Chunk Pool Allocator}}} seqan:"Spec.Chunk Pool Allocator"
"""
import urllib
import sys
import trac.wiki
import genshi.builder as gb
# Map categories to better names.
CATEGORY_NAME_MAP = {
'Concept': 'concept',
'Class' : 'class',
'Spec' : 'specialization',
'Shortcut': 'shortcut',
'Function': 'function',
'Metafunction': 'metafunction',
'Tag': 'tag',
'Adaption': 'adaption',
}
def getFilename(cat, item):
"""Get the filename that dddoc would create.
Args:
cat String, category of the link.
item String, name of the item.
Returns:
File name of the categorized item.
"""
return cat.upper() + escapeFiles(item) + ".html"
def escapeFiles(text):
"""Escape the file name as dddoc would do it.
Args:
text String with the text to escape.
Returns:
Escaped text.
"""
text = text.replace("_", "__")
ret = ""
for i in range(len(text)):
if (text[i] >= 'A') and (text[i] <= 'Z'):
ret += "_"
ret += text[i]
ret = ret.replace("\t", "_09")
ret = ret.replace("\n", "_0a")
ret = ret.replace("!", "_21")
ret = ret.replace("\"", "_22")
ret = ret.replace("#", "_23")
ret = ret.replace("$", "_24")
ret = ret.replace("%", "_25")
ret = ret.replace("&", "_26")
ret = ret.replace("'", "_27")
ret = ret.replace("(", "_28")
ret = ret.replace(")", "_29")
ret = ret.replace("*", "_2a")
ret = ret.replace("+", "_2b")
ret = ret.replace("/", "_2f")
ret = ret.replace(":", "_3a")
ret = ret.replace(",", "_2c")
ret = ret.replace("<", "_3c")
ret = ret.replace(">", "_3e")
ret = ret.replace("?", "_3f")
ret = ret.replace("\\", "_5c")
ret = ret.replace("|", "_7c")
ret = ret.replace(" ", "+")
if (len(ret) == 0) or (ret[0] == '_'): return ret
else: return '.'+ret
class SeqanDocsSyntaxProvider(trac.core.Component):
"""Expands seqan:<Category>.<EntryName> links."""
trac.core.implements(trac.wiki.IWikiSyntaxProvider)
SECTION_NAME = 'seqan_doc_links'
DEFAULT_PREFIX = 'seqan'
DEFAULT_BASE_URL = 'http://www.seqan.de/dddoc/html/'
def __init__(self):
# Set defaults.
self.prefix = self.DEFAULT_PREFIX
self.base_url = self.DEFAULT_BASE_URL
# Parse configuration from trac.ini config file.
for option in self.config.options(self.SECTION_NAME):
if option[0] == 'prefix':
self.prefix = option[1]
if option[0] == 'base_url':
self.base_url = option[1]
def get_wiki_syntax(self):
"""Method from IWikiSyntaxProvider.
Returns empty list, we do not implement any."""
return []
def get_link_resolvers(self):
"""Method from IWikiSyntaxProvider.
Returns iterable (list) of (prefix, function) pairs.
"""
return [(self.prefix, self.format_doc_link)]
def format_doc_link(self, formatter, ns, target, label):
"""Function to perform formatting for seqan:XYZ links.
This roughly follows [1].
[1] http://trac.edgewall.org/wiki/TracDev/IWikiSyntaxProviderExample
"""
# The following is a heuristic for "no alternative label".
if ns in label and target in label:
if '.' in target:
category, item = tuple(target.split('.', 1))
label = item
else:
label = target
# Now, use dddoc's logic to generate the appropriate file name for
file_name = getFilename(*target.split('.', 1))
span = [gb.tag.span(' ', class_='icon'), label]
title = ' "%s" in SeqAn documentation.' % target
return gb.tag.a(span, class_='ext-link',
href=self.base_url + file_name, title=title)
| mit | 6,089,659,766,521,040,000 | 28.08 | 76 | 0.578863 | false |
paolodedios/tensorflow | tensorflow/python/distribute/cross_device_ops_test.py | 8 | 49240 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for CrossDeviceOps."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import threading
import time
from absl.testing import parameterized
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import tensorflow_server_pb2
from tensorflow.python.distribute import cluster_resolver as cluster_resolver_lib
from tensorflow.python.distribute import collective_util
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import cross_device_utils
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import test_util
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import nest
CollectiveReplicaLauncher = cross_device_utils.CollectiveReplicaLauncher
CommunicationImplementation = collective_util.CommunicationImplementation
ReduceOp = reduce_util.ReduceOp
IndexedSlicesValue = indexed_slices.IndexedSlicesValue
IndexedSlices = indexed_slices.IndexedSlices
def make_per_replica_value(value, devices):
"""Creates a `PerReplica` object whose values reside in `devices`.
Args:
value: a tensor-convertible value or a `IndexedSlicesValue`, or a callable
that takes one argument (`device_idx`) and should return the value that is
going to be created on devices[device_idx].
devices: a list of device strings to create `PerReplica` values on.
Returns:
A `PerReplica` object.
"""
values = []
for device_idx, device in enumerate(devices):
if callable(value):
v = value(device_idx)
elif isinstance(value, list):
v = value[device_idx]
else:
v = value
if isinstance(v, IndexedSlicesValue):
with ops.device(device):
values.append(
IndexedSlices(
values=array_ops.identity(v.values),
indices=array_ops.identity(v.indices),
dense_shape=array_ops.identity(v.dense_shape)))
else:
with ops.device(device):
values.append(array_ops.identity(v))
return value_lib.PerReplica(values)
def enable_collective_ops():
"""Enable collectives in the current process."""
cluster_resolver = cluster_resolver_lib.TFConfigClusterResolver()
context.context().configure_collective_ops(
collective_leader="'/job:worker/replica:0/task:0'")
config_proto = config_pb2.ConfigProto()
config_proto.experimental.collective_group_leader = (
"/job:worker/replica:0/task:0")
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_resolver.cluster_spec().as_cluster_def(),
default_session_config=config_proto,
job_name=cluster_resolver.task_type,
task_index=cluster_resolver.task_id,
protocol=cluster_resolver.rpc_layer)
context.context().enable_collective_ops(server_def)
# Recover default flag values.
CollectiveReplicaLauncher._prefer_unique_instance_key = True
CollectiveReplicaLauncher._prefer_ordering_token = False
class MultiProcessPoolRunner():
def __init__(self, num_processes):
cluster_spec_dict = multi_worker_test_base.create_cluster_spec(
num_workers=num_processes)
self.runner = multi_process_runner.MultiProcessPoolRunner(cluster_spec_dict)
# Global MultiProcessPoolRunners that can be shared by test cases to avoid
# expensive initialization cost of TensorFlow in new processes.
#
# Note that they have to be globals and can't be owned by test classes because
# usually fn usually captures the test class instance, and test class
# instance can't be pickled if it has mpr as a member (it is not allowed to
# pickle Process objects).
# TODO(crccw): Use `num_workers` combination once it is ready.
global_mpr_2p = MultiProcessPoolRunner(num_processes=2)
global_mpr_1p = MultiProcessPoolRunner(num_processes=1)
def get_global_mpr(num_processes):
if num_processes == 1:
return global_mpr_1p.runner
elif num_processes == 2:
return global_mpr_2p.runner
else:
raise ValueError("get_global_mpr: num_processes must be 1 or 2, got %d" %
num_processes)
class CollectiveOpsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
# Enabling collectives can be done in "setUpClass", but requires using
# different collective_keys in different tests as collectives are reused
# across tests. Always resetting collective ops before each test offers
# better test isolation.
global_mpr_1p.runner.run(enable_collective_ops)
global_mpr_2p.runner.run(enable_collective_ops)
def make_collective(self, num_processes, gpu_per_process):
"""Returns collectives and other info to be used in tests.
Args:
num_processes: an integer indicating the number of processes that
participate in the collective.
gpu_per_process: number of GPUs (0 if no GPUs) used by each process.
Returns:
A tuple of (collective, devices, pid) where collective is a instance
of `CollectiveAllReduce`, devices are a list of local devices (str)
attached to the current process, and pid is the id of this process among
all participant processes.
"""
cluster_resolver = cluster_resolver_lib.TFConfigClusterResolver()
devices = [
"/job:worker/replica:0/task:%d/device:CPU:0" % cluster_resolver.task_id
]
if gpu_per_process > 0:
devices = [
"/job:worker/replica:0/task:%d/device:GPU:%d" %
(cluster_resolver.task_id, i) for i in range(gpu_per_process)
]
group_size = num_processes * len(devices)
collective = cross_device_ops_lib.CollectiveAllReduce(
devices=devices, group_size=group_size)
return collective, devices, cluster_resolver.task_id
def as_list(self, value):
"""An utility to convert a `Mirrored`, `Tensor` or `IndexedSlices` to a list.
The reason it exists is to provide a uniformed view of returned value of
"reduce" calls, especially across tf.function boundaries. Returning
`Mirrored` from a tf.function will only evaluate the primary value, which
makes collective ops of non-primary device being pruned, and will eventually
cause hanging.
Args:
value: the value to convert, can be one of `Mirrored`, `Tensor` and
`IndexedSlices`.
Returns:
A list of `Tensor` or `IndexedSlices`.
"""
if isinstance(value, ops.Tensor):
return [value]
elif isinstance(value, IndexedSlices):
return [value]
elif isinstance(value, value_lib.Mirrored):
return value.values
else:
raise ValueError("unwrap: unsupported input type: %s" % type(value))
RunOptions = collections.namedtuple( # pylint: disable=invalid-name
"RunOptions",
[
"mode", # A list of str from ["eager", "func_graph"]
"num_processes",
"gpus_per_process",
"reduce_op",
"communication_options",
"prefer_unique_instance_key",
])
RunOptions.__new__.__defaults__ = (["eager",
"func_graph"], 2, 0, ReduceOp.SUM,
collective_util.Options(), True)
def reduce_and_verify(self, inputs, expect, options):
"""Reduce the given `inputs` and verify the output matches `expect`.
Args:
inputs: a list of `Tensor` or `IndexedSlices`, where i-th value will be
fed to i-th replica.
expect: a `Tensor` or `IndexedSlices`. This should be the expected value
for one replica.
options: a `RunOpotions` instance.
"""
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
options.prefer_unique_instance_key)
collective, devices, pid = self.make_collective(options.num_processes,
options.gpus_per_process)
def reduce_fn():
value_fn = lambda device_idx: inputs[pid * len(devices) + device_idx]
per_replica_value = make_per_replica_value(value_fn, devices)
reduced_values = collective.reduce(options.reduce_op, per_replica_value,
per_replica_value,
options.communication_options)
if options.gpus_per_process > 1:
self.assertIsInstance(reduced_values, value_lib.Mirrored)
reduced_values = self.as_list(reduced_values)
self.assertAllEqual(devices, [v.device for v in reduced_values])
return [ops.convert_to_tensor(v) for v in reduced_values]
per_replica_expect = [ops.convert_to_tensor(expect)] * len(devices)
if "eager" in options.mode:
got = reduce_fn()
self.assertAllClose(got, per_replica_expect)
if "func_graph" in options.mode:
got = def_function.function(reduce_fn)()
self.assertAllClose(got, per_replica_expect)
get_global_mpr(options.num_processes).run(replica_fn)
def batch_reduce_and_verify(self, inputs, expect, options):
"""Batch reduce the given `inputs` and verify the output matches `expect`.
Args:
inputs: a 2-level nested list of `Tensor` or `IndexedSlices`, where i-th
value will be fed to i-th replica.
expect: a list of `Tensor` or `IndexedSlices`. This should be the expected
value for one replica.
options: a `RunOpotions` instance.
"""
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
options.prefer_unique_instance_key)
collective, devices, pid = self.make_collective(options.num_processes,
options.gpus_per_process)
def batch_reduce_fn():
batch_size = len(inputs[0])
value_dst_pairs = []
for i in range(batch_size):
def value_fn(device_idx, idx=i):
return inputs[pid * len(devices) + device_idx][idx]
per_replica_value = make_per_replica_value(value_fn, devices)
value_dst_pairs.append((per_replica_value, per_replica_value))
reduced_values = collective.batch_reduce(options.reduce_op,
value_dst_pairs,
options.communication_options)
if options.gpus_per_process > 1:
for v in reduced_values:
self.assertIsInstance(v, value_lib.Mirrored)
reduced_values = [self.as_list(v) for v in reduced_values]
for v in reduced_values:
self.assertAllEqual(devices, [t.device for t in v])
return nest.map_structure(ops.convert_to_tensor, reduced_values)
per_replica_expect = nest.map_structure(
lambda x: [ops.convert_to_tensor(x)] * len(devices), expect)
if "eager" in options.mode:
got = batch_reduce_fn()
self.assertAllClose(got, per_replica_expect)
if "func_graph" in options.mode:
got = def_function.function(batch_reduce_fn)()
self.assertAllClose(got, per_replica_expect)
get_global_mpr(options.num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=[1, 2],
required_gpus=[0, 1, 2],
implementation=[
CommunicationImplementation.AUTO,
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
reduce_op=[ReduceOp.SUM, ReduceOp.MEAN],
prefer_unique_instance_key=[True, False]))
def testReduceDense(self, num_processes, required_gpus, implementation,
reduce_op, prefer_unique_instance_key):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
if (num_processes == 2 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip NCCL + 2 processes combination. NCCL requires "
"physical GPUs for every process.")
options = self.RunOptions(
num_processes=num_processes,
gpus_per_process=required_gpus,
reduce_op=reduce_op,
communication_options=collective_util.Options(
implementation=implementation),
prefer_unique_instance_key=prefer_unique_instance_key)
group_size = options.num_processes * (options.gpus_per_process or 1)
inputs_data = [1.0, 2.0, 3.0, 4.0]
inputs = inputs_data[0:group_size]
if group_size == 1:
expect = 1.0
if group_size == 2:
expect = 3.0 if reduce_op == ReduceOp.SUM else 1.5
elif group_size == 4:
expect = 10.0 if reduce_op == ReduceOp.SUM else 2.5
self.reduce_and_verify(inputs, expect, options)
@combinations.generate(
combinations.combine(
num_processes=[1, 2],
required_gpus=[0, 1, 2],
implementation=[
CommunicationImplementation.AUTO,
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
# TODO(b/166682130): add MEAN reduce once the bug is fixed.
reduce_op=ReduceOp.SUM,
prefer_unique_instance_key=[True, False]))
def testReduceSparse(self, num_processes, required_gpus, implementation,
reduce_op, prefer_unique_instance_key):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
if (num_processes == 2 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip NCCL + 2 processes combination. NCCL requires "
"physical GPUs for every process.")
options = self.RunOptions(
mode=["func_graph"], # Sparse reduce is not supported in eager.
num_processes=num_processes,
gpus_per_process=required_gpus,
reduce_op=reduce_op,
communication_options=collective_util.Options(
implementation=implementation),
prefer_unique_instance_key=prefer_unique_instance_key)
group_size = options.num_processes * (options.gpus_per_process or 1)
inputs_data = [
IndexedSlicesValue(
values=[[1.], [2.]], indices=[0, 1], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[3.], [4.]], indices=[1, 2], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[5.], [6.]], indices=[7, 8], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[7.], [8.]], indices=[3, 2], dense_shape=[10, 1]),
]
inputs = inputs_data[0:group_size]
if group_size == 1:
expect = IndexedSlices(
values=[[1.], [2.]], indices=[0, 1], dense_shape=[10, 1])
elif group_size == 2:
expect = IndexedSlices(
values=[[1.], [2.], [3.], [4.]],
indices=[0, 1, 1, 2],
dense_shape=[10, 1])
elif group_size == 4:
expect = IndexedSlices(
values=[[1.], [2.], [3.], [4.], [5.], [6.], [7.], [8.]],
indices=[0, 1, 1, 2, 7, 8, 3, 2],
dense_shape=[10, 1])
self.reduce_and_verify(inputs, expect, options)
@combinations.generate(
combinations.combine(prefer_unique_instance_key=[True, False]))
def testReduceSparseVariableLength(self, prefer_unique_instance_key):
# One device per process, 2 processes, 2 replicas in total.
inputs = [
IndexedSlicesValue(values=[[1.]], indices=[0], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[2.], [3.], [4.]], indices=[0, 1, 2], dense_shape=[10, 1]),
]
expect = IndexedSlices(
values=[[1.], [2.], [3.], [4.]],
indices=[0, 0, 1, 2],
dense_shape=[10, 1])
self.reduce_and_verify(
inputs,
expect,
self.RunOptions(
mode=["func_graph"], # Sparse reduce is not supported in eager.
num_processes=2,
reduce_op=ReduceOp.SUM,
prefer_unique_instance_key=prefer_unique_instance_key))
@combinations.generate(
combinations.combine(
num_processes=[1, 2],
required_gpus=[0, 1, 2],
implementation=[
CommunicationImplementation.AUTO,
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
reduce_op=[ReduceOp.SUM, ReduceOp.MEAN],
prefer_unique_instance_key=[True, False]))
def testBatchReduceDense(self, num_processes, required_gpus, implementation,
reduce_op, prefer_unique_instance_key):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
if (num_processes == 2 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip NCCL + 2 processes combination. NCCL requires "
"physical GPUs for every process.")
options = self.RunOptions(
num_processes=num_processes,
gpus_per_process=required_gpus,
reduce_op=reduce_op,
communication_options=collective_util.Options(
implementation=implementation),
prefer_unique_instance_key=prefer_unique_instance_key)
group_size = options.num_processes * (options.gpus_per_process or 1)
inputs_data = [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]]
inputs = inputs_data[0:group_size]
if group_size == 1:
expect = [1.0, 2.0]
if group_size == 2:
expect = [4.0, 6.0] if reduce_op == ReduceOp.SUM else [2.0, 3.0]
elif group_size == 4:
expect = [16.0, 20.0] if reduce_op == ReduceOp.SUM else [4.0, 5.0]
self.batch_reduce_and_verify(inputs, expect, options)
@combinations.generate(
combinations.combine(
num_processes=[1, 2],
required_gpus=[0, 1, 2],
implementation=[
CommunicationImplementation.AUTO,
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
# TODO(b/166682130): add MEAN reduce once the bug is fixed.
reduce_op=ReduceOp.SUM,
prefer_unique_instance_key=[True, False]))
def testBatchReduceSparse(self, num_processes, required_gpus, implementation,
reduce_op, prefer_unique_instance_key):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
if (num_processes == 2 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip NCCL + 2 processes combination. NCCL requires "
"physical GPUs for every process.")
options = self.RunOptions(
mode=["func_graph"], # Sparse reduce is not supported in eager.
num_processes=num_processes,
gpus_per_process=required_gpus,
reduce_op=reduce_op,
communication_options=collective_util.Options(
implementation=implementation),
prefer_unique_instance_key=prefer_unique_instance_key)
group_size = options.num_processes * (options.gpus_per_process or 1)
inputs_data = ([
IndexedSlicesValue(
values=[[1.], [2.]], indices=[0, 1], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[3.], [4.]], indices=[1, 2], dense_shape=[5, 1])
], [
IndexedSlicesValue(
values=[[5.], [6.]], indices=[1, 2], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[7.], [8.]], indices=[0, 1], dense_shape=[5, 1])
], [
IndexedSlicesValue(
values=[[9.], [10.]], indices=[3, 4], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[11.], [12.]], indices=[3, 4], dense_shape=[5, 1])
], [
IndexedSlicesValue(
values=[[13.], [14.]], indices=[8, 9], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[15.], [16.]], indices=[3, 4], dense_shape=[5, 1])
])
inputs = inputs_data[0:group_size]
if group_size == 1:
expect = [
IndexedSlices(
values=[[1.], [2.]], indices=[0, 1], dense_shape=[10, 1]),
IndexedSlices(
values=[[3.], [4.]], indices=[1, 2], dense_shape=[5, 1])
]
if group_size == 2:
expect = [
IndexedSlices(
values=[[1.], [2.], [5.], [6.]],
indices=[0, 1, 1, 2],
dense_shape=[10, 1]),
IndexedSlices(
values=[[3.], [4.], [7.], [8.]],
indices=[1, 2, 0, 1],
dense_shape=[5, 1])
]
elif group_size == 4:
expect = [
IndexedSlices(
values=[[1.], [2.], [5.], [6.], [9.], [10.], [13.], [14.]],
indices=[0, 1, 1, 2, 3, 4, 8, 9],
dense_shape=[10, 1]),
IndexedSlices(
values=[[3.], [4.], [7.], [8.], [11.], [12.], [15.], [16.]],
indices=[1, 2, 0, 1, 3, 4, 3, 4],
dense_shape=[5, 2])
]
self.batch_reduce_and_verify(inputs, expect, options)
def testBatchReduceMixedDenseAndSparse(self):
options = self.RunOptions(
num_processes=2,
gpus_per_process=0,
reduce_op=ReduceOp.SUM,
mode=["func_graph"])
inputs_data = [
[
1.0, 2.0,
IndexedSlicesValue(
values=[[1.], [2.]], indices=[0, 1], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[3.], [4.]], indices=[1, 2], dense_shape=[5, 1])
],
[
3.0, 4.0,
IndexedSlicesValue(
values=[[5.], [6.]], indices=[1, 2], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[7.], [8.]], indices=[0, 1], dense_shape=[5, 1])
],
]
expect = [
4.0, 6.0,
IndexedSlices(
values=[[1.], [2.], [5.], [6.]],
indices=[0, 1, 1, 2],
dense_shape=[10, 1]),
IndexedSlices(
values=[[3.], [4.], [7.], [8.]],
indices=[1, 2, 0, 1],
dense_shape=[5, 1])
]
self.batch_reduce_and_verify(inputs_data, expect, options)
@combinations.generate(
combinations.combine(
num_processes=[1, 2],
required_gpus=[0, 1, 2],
implementation=[
CommunicationImplementation.AUTO,
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
reduce_op=[ReduceOp.SUM, ReduceOp.MEAN],
))
def testAllReduceDense(self, num_processes, required_gpus, implementation,
reduce_op):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
if (num_processes == 2 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip NCCL + 2 processes combination. NCCL requires "
"physical GPUs for every process.")
def replica_fn():
collective, devices, _ = self.make_collective(num_processes,
required_gpus)
options = collective_util.Options(implementation=implementation)
group_size = num_processes * (required_gpus or 1)
@def_function.function
def collective_all_reduce():
results = []
for replica_id, device in enumerate(devices):
with ops.device(device):
value = constant_op.constant(1.0)
results.append(
collective._all_reduce(reduce_op, value, replica_id, options))
return results
got = collective_all_reduce()
if reduce_op == ReduceOp.SUM:
expect = [1.0 * group_size] * len(devices)
elif reduce_op == ReduceOp.MEAN:
expect = [1.0] * len(devices)
self.assertAllClose(got, expect)
@def_function.function
def collective_batch_all_reduce():
results = []
for replica_id, device in enumerate(devices):
with ops.device(device):
value = (constant_op.constant(1.0), constant_op.constant(2.0))
results.append(
collective._all_reduce(reduce_op, value, replica_id, options))
return results
got = collective_batch_all_reduce()
if reduce_op == ReduceOp.SUM:
expect = [(1.0 * group_size, 2.0 * group_size)] * len(devices)
elif reduce_op == ReduceOp.MEAN:
expect = [(1.0, 2.0)] * len(devices)
self.assertAllClose(got, expect)
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=[1, 2],
required_gpus=[0, 1, 2],
implementation=[
CommunicationImplementation.AUTO,
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
reduce_op=[ReduceOp.SUM, ReduceOp.MEAN],
))
def testAllReduceSparse(self, num_processes, required_gpus, implementation,
reduce_op):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
if (num_processes == 2 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip NCCL + 2 processes combination. NCCL requires "
"physical GPUs for every process.")
def replica_fn():
collective, devices, _ = self.make_collective(num_processes,
required_gpus)
options = collective_util.Options(implementation=implementation)
group_size = num_processes * (required_gpus or 1)
@def_function.function
def collective_all_reduce():
results = []
for replica_id, device in enumerate(devices):
with ops.device(device):
value = IndexedSlices(
values=array_ops.identity([[1.]]),
indices=array_ops.identity([0]),
dense_shape=array_ops.identity([5, 1]))
results.append(
collective._all_reduce(reduce_op, value, replica_id, options))
return results
got = collective_all_reduce()
if reduce_op == ReduceOp.SUM:
expect = [IndexedSlices([[1. * group_size]], [0], [5, 1])
] * len(devices)
elif reduce_op == ReduceOp.MEAN:
expect = [IndexedSlices([[1.]], [0], [5, 1])] * len(devices)
self.assertAllClose(
nest.map_structure(ops.convert_to_tensor, got),
nest.map_structure(ops.convert_to_tensor, expect))
@def_function.function
def collective_batch_all_reduce():
results = []
for replica_id, device in enumerate(devices):
with ops.device(device):
value = (IndexedSlices(
array_ops.identity([[1.]]), array_ops.identity([0]),
array_ops.identity([5, 1])),
IndexedSlices(
array_ops.identity([[3.]]), array_ops.identity([2]),
array_ops.identity([5, 1])))
results.append(
collective._all_reduce(reduce_op, value, replica_id, options))
return results
got = collective_batch_all_reduce()
if reduce_op == ReduceOp.SUM:
expect = [(IndexedSlices([[1. * group_size]], [0], [5, 1]),
IndexedSlices([[3. * group_size]], [2], [5, 1]))
] * len(devices)
elif reduce_op == ReduceOp.MEAN:
expect = [(IndexedSlices([[1.]], [0], [5, 1]),
IndexedSlices([[3.]], [2], [5, 1]))] * len(devices)
self.assertAllClose(
nest.map_structure(ops.convert_to_tensor, got),
nest.map_structure(ops.convert_to_tensor, expect))
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=2,
required_gpus=0,
implementation=CommunicationImplementation.AUTO,
reduce_op=ReduceOp.SUM))
def testAllReduceMixedDenseAndSparse(self, num_processes, required_gpus,
implementation, reduce_op):
def replica_fn():
collective, devices, _ = self.make_collective(num_processes,
required_gpus)
options = collective_util.Options(implementation=implementation)
group_size = num_processes * (required_gpus or 1)
@def_function.function
def collective_batch_all_reduce():
results = []
for replica_id, device in enumerate(devices):
with ops.device(device):
value = (IndexedSlices(
array_ops.identity([[1.]]), array_ops.identity([0]),
array_ops.identity([5, 1])), array_ops.identity(1.0),
IndexedSlices(
array_ops.identity([[3.]]), array_ops.identity([2]),
array_ops.identity([5, 1])), array_ops.identity(2.0))
results.append(
collective._all_reduce(reduce_op, value, replica_id, options))
return results
got = collective_batch_all_reduce()
expect = [
(IndexedSlices([[1. * group_size]], [0], [5, 1]), 1.0 * group_size,
IndexedSlices([[3. * group_size]], [2], [5, 1]), 2.0 * group_size)
] * len(devices)
self.assertAllClose(
nest.map_structure(ops.convert_to_tensor, got),
nest.map_structure(ops.convert_to_tensor, expect))
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=[1, 2],
required_gpus=[0, 1, 2],
axis=[0, 1, 2],
func_mode=["eager", "func_graph"],
implementation=[
CommunicationImplementation.AUTO,
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
prefer_unique_instance_key=[True, False]))
def testAllGatherSameShape(self, num_processes, required_gpus, implementation,
func_mode, axis, prefer_unique_instance_key):
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
prefer_unique_instance_key)
collective, devices, _ = self.make_collective(num_processes,
required_gpus)
options = collective_util.Options(implementation=implementation)
value = constant_op.constant([[[1, 2], [1, 2]]], dtype=dtypes.float32)
def gather_fn():
per_replica_value = make_per_replica_value(value, devices)
gathered_values = collective._gather(
per_replica_value, per_replica_value, axis=axis, options=options)
gathered_values = self.as_list(gathered_values)
# Skip checking devices in eager. In eager the device attribute doesn't
# reflect the actual device of the tensor.
if not context.executing_eagerly():
self.assertAllEqual(devices, [v.device for v in gathered_values])
return [ops.convert_to_tensor(v) for v in gathered_values]
group_size = num_processes * (required_gpus or 1)
expect = array_ops.concat([value] * group_size, axis=axis)
per_replica_expect = [ops.convert_to_tensor(expect)] * len(devices)
if func_mode == "eager":
result = gather_fn()
self.assertAllClose(result, per_replica_expect)
if func_mode == "func_graph":
result = def_function.function(gather_fn)()
self.assertAllClose(result, per_replica_expect)
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=[1, 2],
required_gpus=[0, 1, 2],
implementation=[CommunicationImplementation.RING]))
def testCollectiveV2ControlFlow(self, num_processes, required_gpus,
implementation):
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = True
collective, devices, _ = self.make_collective(num_processes,
required_gpus)
options = collective_util.Options(implementation=implementation)
value = make_per_replica_value(constant_op.constant([1.]), devices)
@def_function.function
def reduce_fn():
def cond_body():
reduced = collective.reduce(reduce_util.ReduceOp.SUM, value, value,
options)
return math_ops.add_n(self.as_list(reduced)) / len(devices)
return control_flow_ops.cond(
array_ops.identity(False), cond_body, cond_body)
num_replicas = num_processes * len(devices)
self.assertAllEqual(reduce_fn(), [1. * num_replicas])
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=1,
required_gpus=2,
implementation=[
CommunicationImplementation.NCCL, CommunicationImplementation.RING
],
prefer_unique_instance_key=[True, False]))
def testMultiThreadedCollectiveLaunchNoInterleave(self, num_processes,
required_gpus,
implementation,
prefer_unique_instance_key):
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
prefer_unique_instance_key)
collective, devices, _ = self.make_collective(num_processes,
required_gpus)
options = collective_util.Options(implementation=implementation)
# We would like to simulate the following sequence:
# thread-0 device0 device1
# thread-1 device0 device1
# If the kernel launch sequence is as-is the program will deadlock since
# NCCL requires the launch order to be same on each device.
v0 = make_per_replica_value(1.0, devices)
v1 = make_per_replica_value(2.0, devices)
# Add a delay to collective_ops.all_reduce according to the input tensors
# index in `sequence.`
sequence = [v0.values[0], v1.values[0], v1.values[1], v0.values[1]]
all_reduce = collective_ops.all_reduce
def delayed_all_reduce(input_tensor, *args, **kwargs):
for idx, v in enumerate(sequence):
if input_tensor is v:
time.sleep(idx)
break
return all_reduce(input_tensor, *args, **kwargs)
with test.mock.patch.object(collective_ops, "all_reduce",
delayed_all_reduce):
# We only use NCCL for batch reduce with two or more values, so we use
# two values here.
def thread_fn():
reduced = collective.batch_reduce(reduce_util.ReduceOp.SUM,
[(v0, v0), (v0, v0)], options)
self.assertAllEqual(reduced[0].values, [2.0, 2.0])
self.assertAllEqual(reduced[1].values, [2.0, 2.0])
t = threading.Thread(target=thread_fn)
t.start()
reduced = collective.batch_reduce(reduce_util.ReduceOp.SUM, [(v1, v1),
(v1, v1)],
options)
self.assertAllEqual(reduced[0].values, [4.0, 4.0])
self.assertAllEqual(reduced[1].values, [4.0, 4.0])
t.join()
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=1,
required_gpus=2,
implementation=[
CommunicationImplementation.NCCL, CommunicationImplementation.RING
],
prefer_unique_instance_key=[True, False]))
def testInputsAreFunctionArgs(self, num_processes, required_gpus,
implementation, prefer_unique_instance_key):
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
prefer_unique_instance_key)
collective, devices, _ = self.make_collective(num_processes,
required_gpus)
options = collective_util.Options(implementation=implementation)
@def_function.function
def reduce_fn(v):
# Function inputs don't have device placement.
self.assertEqual(v.values[0].device, "")
self.assertEqual(v.values[1].device, "")
# We only use NCCL for batch reduce with two or more values, so we use
# two values here.
reduced = collective.batch_reduce(reduce_util.ReduceOp.SUM, [(v, v),
(v, v)],
options)
self.assertEqual(reduced[0].values[0].device, devices[0])
self.assertEqual(reduced[0].values[1].device, devices[1])
self.assertEqual(reduced[1].values[0].device, devices[0])
self.assertEqual(reduced[1].values[1].device, devices[1])
# Returning Mirrored only evaluates the primary value, which causes
# hanging,
return [reduced[0].values, reduced[1].values]
v = make_per_replica_value(1.0, devices)
reduced = reduce_fn(v)
self.assertAllClose(reduced, [[2.0, 2.0], [2.0, 2.0]])
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=2,
required_gpus=[0, 1],
implementation=[
CommunicationImplementation.RING, CommunicationImplementation.NCCL
],
prefer_unique_instance_key=[True, False]))
def testTimeoutReduceDense(self, num_processes, implementation, required_gpus,
prefer_unique_instance_key):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
prefer_unique_instance_key)
collective, devices, task_id = self.make_collective(
num_processes, required_gpus)
if task_id != 0:
return
v = make_per_replica_value(1.0, devices)
options = collective_util.Options(
timeout_seconds=1, implementation=implementation)
@def_function.function
def reduce_dense():
return collective.reduce(reduce_util.ReduceOp.SUM, v, v, options)
# The collective should time out because we only launch it on worker-0,
# while there're three workers in total.
with self.assertRaises(errors.DeadlineExceededError):
reduce_dense()
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=2,
required_gpus=[0, 1],
implementation=[
CommunicationImplementation.RING, CommunicationImplementation.NCCL
],
prefer_unique_instance_key=[True, False]))
def testTimeoutBatchReduceDense(self, num_processes, implementation,
required_gpus, prefer_unique_instance_key):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
prefer_unique_instance_key)
collective, devices, task_id = self.make_collective(
num_processes, required_gpus)
if task_id != 0:
return
v = make_per_replica_value(1.0, devices)
options = collective_util.Options(
timeout_seconds=1, implementation=implementation)
@def_function.function
def batch_reduce_dense():
return collective.batch_reduce(reduce_util.ReduceOp.SUM,
[(v, v), (v, v)], options)
# The collective should time out because we only launch it on worker-0,
# while there're two workers in total.
with self.assertRaises(errors.DeadlineExceededError):
batch_reduce_dense()
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=2,
required_gpus=[0, 1],
implementation=[
CommunicationImplementation.RING, CommunicationImplementation.NCCL
],
prefer_unique_instance_key=[True, False]))
def testTimeoutReduceSparse(self, num_processes, implementation,
required_gpus, prefer_unique_instance_key):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
prefer_unique_instance_key)
collective, devices, task_id = self.make_collective(
num_processes, required_gpus)
if task_id != 0:
return
v = make_per_replica_value(
IndexedSlicesValue(
values=[[4., 6.]], indices=[1], dense_shape=[5, 2]), devices)
options = collective_util.Options(
timeout_seconds=1, implementation=implementation)
@def_function.function
def reduce_sparse():
return collective.reduce(reduce_util.ReduceOp.SUM, v, v, options)
# The collective should time out because we only launch it on worker-0,
# while there're two workers in total.
with self.assertRaises(errors.DeadlineExceededError):
reduce_sparse()
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=2,
required_gpus=[0, 1],
implementation=[
CommunicationImplementation.RING, CommunicationImplementation.NCCL
],
prefer_unique_instance_key=[True, False]))
def testTimeoutBatchReduceSparse(self, num_processes, required_gpus,
implementation, prefer_unique_instance_key):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
prefer_unique_instance_key)
collective, devices, task_id = self.make_collective(
num_processes, required_gpus)
if task_id != 0:
return
v = make_per_replica_value(
IndexedSlicesValue(
values=[[4., 6.]], indices=[1], dense_shape=[5, 2]), devices)
options = collective_util.Options(
timeout_seconds=1, implementation=implementation)
@def_function.function
def batch_reduce_sparse():
return collective.batch_reduce(reduce_util.ReduceOp.SUM,
[(v, v), (v, v)], options)
# The collective should time out because we only launch it on worker-0,
# while there're two workers in total.
with self.assertRaises(errors.DeadlineExceededError):
batch_reduce_sparse()
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(combinations.combine(num_processes=1, required_gpus=2))
def testNcclOrdering(self, num_processes, required_gpus):
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = True
CollectiveReplicaLauncher._prefer_ordering_token = True
collective, devices, _ = self.make_collective(num_processes,
required_gpus)
options = collective_util.Options(
implementation=CommunicationImplementation.NCCL)
v_dense = make_per_replica_value([1.0, 1.0], devices)
v_sparse = make_per_replica_value([
IndexedSlicesValue([[4., 6.], [5., 6.]], [1, 3], [5, 2]),
IndexedSlicesValue([[4., 6.], [5., 6.]], [1, 3], [5, 2]),
], devices)
@def_function.function
def nested_dense():
collective.reduce(reduce_util.ReduceOp.SUM, v_dense, v_dense, options)
@def_function.function
def nested_sparse():
collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse, options)
# All collectives, function calls, if clause and while loops should be
# chained by control dependencies, so that the execution order is
# deterministic.
@def_function.function
def f():
# pylint: disable=pointless-statement
collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse, options)
# reducing dense value.
collective.reduce(reduce_util.ReduceOp.SUM, v_dense, v_dense, options)
# reducing sparse value.
collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse, options)
# reduce dense value in nested tf.function.
nested_dense()
# reduce sparse value in nested tf.function.
nested_sparse()
# reduce dense value in tf.cond.
if array_ops.identity(1.0) > array_ops.identity(2.0):
collective.reduce(reduce_util.ReduceOp.SUM, v_dense, v_dense, options)
else:
v_dense
# reduce sparse value in tf.cond.
if array_ops.identity(1.0) > array_ops.identity(2.0):
v_sparse
else:
collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse,
options)
# reduce dense value in tf.while_loop.
i = array_ops.identity(1)
while i < 3:
collective.reduce(reduce_util.ReduceOp.SUM, v_dense, v_dense, options)
i += 1
# reduce sparse value in tf.while_loop.
i = array_ops.identity(1)
while i < 3:
collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse,
options)
i += 1
# reducing dense and sparse value again.
collective.reduce(reduce_util.ReduceOp.SUM, v_dense, v_dense, options)
collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse, options)
# pylint: enable=pointless-statement
graph = f.get_concrete_function().graph
should_be_ordered = set([
"CollectiveReduceV2", "CollectiveGatherV2", "If", "While",
"StatefulPartitionedCall"
])
nodes_by_device = {}
for op in graph.get_operations():
if op.type in should_be_ordered:
if op.device not in nodes_by_device:
nodes_by_device[op.device] = []
nodes_by_device[op.device].append(op)
order = test_util.topological_sort_operations(graph.get_operations())
for device in devices:
device = device_util.canonicalize(device)
# Those function ops don't have device annotations, but they contain
# collectives for both devices so we always include them.
operations = nodes_by_device[device] + nodes_by_device[""]
# Verify that we get all types of nodes we want.
self.assertEqual(set(op.type for op in operations), should_be_ordered)
test_util.assert_sequential_execution(order, operations)
get_global_mpr(num_processes).run(replica_fn)
if __name__ == "__main__":
# Set default inter op thread pool size to one to ensure we don't exhaust the
# thread pool with the additional executors to run collectives in eager.
os.environ["TF_NUM_INTEROP_THREADS"] = "1"
# TODO(b/172304955): figure why logical devices doesn't work.
test_util.main(config_logical_devices=False)
| apache-2.0 | 6,714,008,746,494,707,000 | 39.294599 | 81 | 0.610845 | false |
mlq/python-taiga | tests/test_tasks.py | 2 | 2039 | from taiga.requestmaker import RequestMaker
from taiga.models import Task, Tasks
import unittest
from mock import patch
import six
if six.PY2:
import_open = '__builtin__.open'
else:
import_open = 'builtins.open'
class TestTasks(unittest.TestCase):
@patch('taiga.requestmaker.RequestMaker.get')
def test_list_attachments(self, mock_requestmaker_get):
rm = RequestMaker('/api/v1', 'fakehost', 'faketoken')
Task(rm, id=1).list_attachments()
mock_requestmaker_get.assert_called_with(
'tasks/attachments',
query={"object_id": 1},
)
@patch(import_open)
@patch('taiga.models.base.ListResource._new_resource')
def test_file_attach(self, mock_new_resource, mock_open):
fd = open('tests/resources/tasks_list_success.json')
mock_open.return_value = fd
rm = RequestMaker('/api/v1', 'fakehost', 'faketoken')
task = Task(rm, id=1, project=1)
task.attach('tests/resources/tasks_list_success.json')
mock_new_resource.assert_called_with(
files={'attached_file': fd},
payload={'project': 1, 'object_id': 1}
)
@patch('taiga.requestmaker.RequestMaker.post')
def test_import_task(self, mock_requestmaker_post):
rm = RequestMaker('/api/v1', 'fakehost', 'faketoken')
task = Tasks(rm).import_(1, 'Subject', 'New')
mock_requestmaker_post.assert_called_with(
'/{endpoint}/{id}/{type}', endpoint='importer', payload={'project': 1,
'subject': 'Subject',
'status': 'New'},
id=1, type='task'
)
@patch('taiga.models.base.InstanceResource.update')
def test_add_comment(self, mock_update):
rm = RequestMaker('/api/v1', 'fakehost', 'faketoken')
task = Task(rm, id=1)
task.add_comment('hola')
mock_update.assert_called_with(
comment='hola'
)
| mit | -6,348,101,550,097,243,000 | 36.759259 | 90 | 0.576753 | false |
pigshell/nhnick | src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_finder.py | 123 | 7521 | # Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import errno
import logging
import re
from webkitpy.layout_tests.models import test_expectations
_log = logging.getLogger(__name__)
class LayoutTestFinder(object):
def __init__(self, port, options):
self._port = port
self._options = options
self._filesystem = self._port.host.filesystem
self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
def find_tests(self, options, args):
paths = self._strip_test_dir_prefixes(args)
if options.test_list:
paths += self._strip_test_dir_prefixes(self._read_test_names_from_file(options.test_list, self._port.TEST_PATH_SEPARATOR))
test_files = self._port.tests(paths)
return (paths, test_files)
def _strip_test_dir_prefixes(self, paths):
return [self._strip_test_dir_prefix(path) for path in paths if path]
def _strip_test_dir_prefix(self, path):
# Handle both "LayoutTests/foo/bar.html" and "LayoutTests\foo\bar.html" if
# the filesystem uses '\\' as a directory separator.
if path.startswith(self.LAYOUT_TESTS_DIRECTORY + self._port.TEST_PATH_SEPARATOR):
return path[len(self.LAYOUT_TESTS_DIRECTORY + self._port.TEST_PATH_SEPARATOR):]
if path.startswith(self.LAYOUT_TESTS_DIRECTORY + self._filesystem.sep):
return path[len(self.LAYOUT_TESTS_DIRECTORY + self._filesystem.sep):]
return path
def _read_test_names_from_file(self, filenames, test_path_separator):
fs = self._filesystem
tests = []
for filename in filenames:
try:
if test_path_separator != fs.sep:
filename = filename.replace(test_path_separator, fs.sep)
file_contents = fs.read_text_file(filename).split('\n')
for line in file_contents:
line = self._strip_comments(line)
if line:
tests.append(line)
except IOError, e:
if e.errno == errno.ENOENT:
_log.critical('')
_log.critical('--test-list file "%s" not found' % file)
raise
return tests
@staticmethod
def _strip_comments(line):
commentIndex = line.find('//')
if commentIndex is -1:
commentIndex = len(line)
line = re.sub(r'\s+', ' ', line[:commentIndex].strip())
if line == '':
return None
else:
return line
def skip_tests(self, paths, all_tests_list, expectations, http_tests):
all_tests = set(all_tests_list)
tests_to_skip = expectations.get_tests_with_result_type(test_expectations.SKIP)
if self._options.skip_failing_tests:
tests_to_skip.update(expectations.get_tests_with_result_type(test_expectations.FAIL))
tests_to_skip.update(expectations.get_tests_with_result_type(test_expectations.FLAKY))
if self._options.skipped == 'only':
tests_to_skip = all_tests - tests_to_skip
elif self._options.skipped == 'ignore':
tests_to_skip = set()
elif self._options.skipped != 'always':
# make sure we're explicitly running any tests passed on the command line; equivalent to 'default'.
tests_to_skip -= set(paths)
# unless of course we don't want to run the HTTP tests :)
if not self._options.http:
tests_to_skip.update(set(http_tests))
return tests_to_skip
def split_into_chunks(self, test_names):
"""split into a list to run and a set to skip, based on --run-chunk and --run-part."""
if not self._options.run_chunk and not self._options.run_part:
return test_names, set()
# If the user specifies they just want to run a subset of the tests,
# just grab a subset of the non-skipped tests.
chunk_value = self._options.run_chunk or self._options.run_part
try:
(chunk_num, chunk_len) = chunk_value.split(":")
chunk_num = int(chunk_num)
assert(chunk_num >= 0)
test_size = int(chunk_len)
assert(test_size > 0)
except AssertionError:
_log.critical("invalid chunk '%s'" % chunk_value)
return (None, None)
# Get the number of tests
num_tests = len(test_names)
# Get the start offset of the slice.
if self._options.run_chunk:
chunk_len = test_size
# In this case chunk_num can be really large. We need
# to make the slave fit in the current number of tests.
slice_start = (chunk_num * chunk_len) % num_tests
else:
# Validate the data.
assert(test_size <= num_tests)
assert(chunk_num <= test_size)
# To count the chunk_len, and make sure we don't skip
# some tests, we round to the next value that fits exactly
# all the parts.
rounded_tests = num_tests
if rounded_tests % test_size != 0:
rounded_tests = (num_tests + test_size - (num_tests % test_size))
chunk_len = rounded_tests / test_size
slice_start = chunk_len * (chunk_num - 1)
# It does not mind if we go over test_size.
# Get the end offset of the slice.
slice_end = min(num_tests, slice_start + chunk_len)
tests_to_run = test_names[slice_start:slice_end]
_log.debug('chunk slice [%d:%d] of %d is %d tests' % (slice_start, slice_end, num_tests, (slice_end - slice_start)))
# If we reached the end and we don't have enough tests, we run some
# from the beginning.
if slice_end - slice_start < chunk_len:
extra = chunk_len - (slice_end - slice_start)
_log.debug(' last chunk is partial, appending [0:%d]' % extra)
tests_to_run.extend(test_names[0:extra])
return (tests_to_run, set(test_names) - set(tests_to_run))
| bsd-3-clause | 584,586,672,344,144,300 | 41.977143 | 134 | 0.626778 | false |
thp44/delphin_6_automation | data_process/2d_1d/simon/example_sim.py | 1 | 7655 | __author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import os
import json
import pandas as pd
import xmltodict
import shutil
# RiBuild Modules
from delphin_6_automation.database_interactions import mongo_setup
from delphin_6_automation.database_interactions.auth import auth_2d_1d as auth_dict
from delphin_6_automation.sampling import inputs
from delphin_6_automation.database_interactions import material_interactions
from delphin_6_automation.delphin_setup import delphin_permutations
from delphin_6_automation.file_parsing import delphin_parser
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
server = mongo_setup.global_init(auth_dict)
def create_2d_designs(folder):
bricks = pd.read_excel(os.path.join(folder, 'Brick.xlsx'))
plasters = pd.read_excel(os.path.join(folder, 'Plaster.xlsx'))
ref_folder = os.path.join(folder, 'delphin')
for file in os.listdir(ref_folder):
delphin_dict = delphin_parser.dp6_to_dict(os.path.join(ref_folder, file))
for p_index, p_id in enumerate(plasters['Material ID']):
new_material = material_interactions.get_material_info(p_id)
plaster_delphin = delphin_permutations.change_layer_material(delphin_dict,
'Lime cement mortar [717]',
new_material)
for index, mat_id in enumerate(bricks['Material ID']):
new_material = material_interactions.get_material_info(mat_id)
new_delphin = delphin_permutations.change_layer_material(plaster_delphin,
'Old Building Brick Dresden ZP [504]',
new_material)
file_name = f'{file.split(".")[0]}_{plasters.iloc[p_index, 1]}_{bricks.iloc[index, 1]}.d6p'
xmltodict.unparse(new_delphin,
output=open(os.path.join(folder, 'design', file_name),
'w'), pretty=True)
def create_1d_designs(folder):
bricks = pd.read_excel(os.path.join(folder, 'Brick.xlsx'))
plasters = pd.read_excel(os.path.join(folder, 'Plaster.xlsx'))
ref_folder = os.path.join(folder, 'delphin')
temp_folder = os.path.join(folder, 'temp')
thickness = [0.228, 0.348, 0.468]
for file in os.listdir(ref_folder):
for thick in thickness:
delphin_dict = delphin_parser.dp6_to_dict(os.path.join(ref_folder, file))
thick_delphin = delphin_permutations.change_layer_width(delphin_dict,
'Old Building Brick Dresden ZP [504]',
thick)
thick_delphin = delphin_permutations.update_output_locations(thick_delphin)
for p_index, p_id in enumerate(plasters['Material ID']):
new_material = material_interactions.get_material_info(p_id)
new_delphin = delphin_permutations.change_layer_material(thick_delphin,
'Lime cement mortar [717]',
new_material)
file_name = '_'.join(file.split('_')[:2]) + f'_{int((thick+0.012)*100)}cm_1D_{plasters.iloc[p_index, 1]}.d6p'
xmltodict.unparse(new_delphin,
output=open(os.path.join(temp_folder, file_name),
'w'), pretty=True)
for file in os.listdir(temp_folder):
delphin_dict = delphin_parser.dp6_to_dict(os.path.join(temp_folder, file))
for index, mat_id in enumerate(bricks['Material ID']):
new_material = material_interactions.get_material_info(mat_id)
new_delphin = delphin_permutations.change_layer_material(delphin_dict,
'Old Building Brick Dresden ZP [504]',
new_material)
file_name = f'{file.split(".")[0]}_{bricks.iloc[index, 1]}.d6p'
xmltodict.unparse(new_delphin,
output=open(os.path.join(folder, 'design', file_name),
'w'), pretty=True)
def create_sampling_strategy(path: str, design_option: list) -> dict:
"""
Create a sampling strategy for WP6 Delphin Automation. The sampling strategy will be name 'sampling_strategy.json'
and be located at the given folder.
"""
design = [design_.split('.')[0] for design_ in design_option]
scenario = {'generic_scenario': None}
distributions = {'exterior_climate':
{'type': 'discrete', 'range': ['Weimar', 'Bremen', 'MuenchenAirp']},
'exterior_heat_transfer_coefficient_slope':
{'type': 'uniform', 'range': [1, 4], },
'exterior_moisture_transfer_coefficient':
{'type': 'discrete', 'range': [7.7*10**-9]},
'solar_absorption':
{'type': 'uniform', 'range': [0.4, 0.8], },
'rain_scale_factor':
{'type': 'uniform', 'range': [0, 2], },
'interior_climate':
{'type': 'discrete', 'range': ['a', 'b'], },
'wall_orientation':
{'type': 'uniform', 'range': [0, 360], },
'start_year':
{'type': 'discrete', 'range': [i for i in range(2020, 2046)], },
}
sampling_settings = {'initial samples per set': 1,
'add samples per run': 1,
'max samples': 500,
'sequence': 10,
'standard error threshold': 0.1,
'raw sample size': 2 ** 9}
combined_dict = {'design': design, 'scenario': scenario,
'distributions': distributions, 'settings': sampling_settings}
with open(os.path.join(path, 'sampling_strategy.json'), 'w') as file:
json.dump(combined_dict, file)
return combined_dict
def copy_designs(folder):
folder_1d = os.path.join(folder, '1D', 'design')
folder_2d = os.path.join(folder, '2D', 'design')
dst_folder = os.path.join(folder, 'designs')
print('Copy 1D')
for file1d in os.listdir(folder_1d):
shutil.copyfile(os.path.join(folder_1d, file1d), os.path.join(dst_folder, file1d))
print('Copy 2D')
for file2d in os.listdir(folder_2d):
shutil.copyfile(os.path.join(folder_2d, file2d), os.path.join(dst_folder, file2d))
folder_ = r'C:\Users\ocni\OneDrive - Danmarks Tekniske Universitet\Shared WP6 DTU-SBiAAU'
folder_1d = os.path.join(folder_, '1D')
folder_2d = os.path.join(folder_, '2D')
folder_strategy = os.path.join(folder_, 'sampling_strategy')
folder_design = os.path.join(folder_, 'designs')
#create_1d_designs(folder_1d)
create_2d_designs(folder_2d)
copy_designs(folder_)
design_options = os.listdir(folder_design)
create_sampling_strategy(folder_strategy, design_options)
mongo_setup.global_end_ssh(server)
| mit | -3,079,666,736,781,081,000 | 43.505814 | 125 | 0.523841 | false |
axinging/chromium-crosswalk | ppapi/generators/idl_lint.py | 180 | 4046 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Lint for IDL """
import os
import sys
from idl_log import ErrOut, InfoOut, WarnOut
from idl_node import IDLAttribute, IDLNode
from idl_ast import IDLAst
from idl_option import GetOption, Option, ParseOptions
from idl_outfile import IDLOutFile
from idl_visitor import IDLVisitor
Option('wcomment', 'Disable warning for missing comment.')
Option('wenum', 'Disable warning for missing enum value.')
Option('winline', 'Disable warning for inline blocks.')
Option('wname', 'Disable warning for inconsistent interface name.')
Option('wnone', 'Disable all warnings.')
Option('wparam', 'Disable warning for missing [in|out|inout] on param.')
Option('wpass', 'Disable warning for mixed passByValue and returnByValue.')
#
# IDLLinter
#
# Once the AST is build, we need to resolve the namespace and version
# information.
#
class IDLLinter(IDLVisitor):
def VisitFilter(self, node, data):
__pychecker__ = 'unusednames=node,data'
return not node.IsA('Comment', 'Copyright')
def Arrive(self, node, errors):
__pychecker__ = 'unusednames=node,errors'
warnings = 0
if node.IsA('Interface', 'Member', 'Struct', 'Enum', 'EnumItem', 'Typedef'):
comments = node.GetListOf('Comment')
if not comments and not node.GetProperty('wcomment'):
node.Warning('Expecting a comment.')
warnings += 1
if node.IsA('File'):
labels = node.GetListOf('Label')
interfaces = node.GetListOf('Interface')
if interfaces and not labels:
node.Warning('Expecting a label in a file containing interfaces.')
if node.IsA('Struct', 'Typedef') and not node.GetProperty('wpass'):
if node.GetProperty('passByValue'):
pbv = 'is'
else:
pbv = 'is not'
if node.GetProperty('returnByValue'):
ret = 'is'
else:
ret = 'is not'
if pbv != ret:
node.Warning('%s passByValue but %s returnByValue.' % (pbv, ret))
warnings += 1
if node.IsA('EnumItem'):
if not node.GetProperty('VALUE') and not node.GetProperty('wenum'):
node.Warning('Expecting value for enumeration.')
warnings += 1
if node.IsA('Interface'):
macro = node.GetProperty('macro')
if macro and not node.GetProperty('wname'):
node.Warning('Interface name inconsistent: %s' % macro)
warnings += 1
if node.IsA('Inline') and not node.GetProperty('winline'):
inline_type = node.GetProperty('NAME')
node.parent.Warning('Requires an inline %s block.' % inline_type)
warnings += 1
if node.IsA('Callspec') and not node.GetProperty('wparam'):
out = False
for arg in node.GetListOf('Param'):
if arg.GetProperty('out'):
out = True
if arg.GetProperty('in') and out:
arg.Warning('[in] parameter after [out] parameter')
warnings += 1
if node.IsA('Param') and not node.GetProperty('wparam'):
found = False;
for form in ['in', 'inout', 'out']:
if node.GetProperty(form): found = True
if not found:
node.Warning('Missing argument type: [in|out|inout]')
warnings += 1
return warnings
def Depart(self, node, warnings, childdata):
__pychecker__ = 'unusednames=node'
for child in childdata:
warnings += child
return warnings
def Lint(ast):
options = ['wcomment', 'wenum', 'winline', 'wparam', 'wpass', 'wname']
wnone = GetOption('wnone')
for opt in options:
if wnone or GetOption(opt): ast.SetProperty(opt, True)
skipList = []
for filenode in ast.GetListOf('File'):
name = filenode.GetProperty('NAME')
if filenode.GetProperty('ERRORS') > 0:
ErrOut.Log('%s : Skipped due to errors.' % name)
skipList.append(filenode)
continue
warnings = IDLLinter().Visit(filenode, 0)
if warnings:
WarnOut.Log('%s warning(s) for %s\n' % (warnings, name))
return skipList
| bsd-3-clause | -1,390,467,772,255,754,800 | 32.163934 | 80 | 0.649778 | false |
AQuadroTeam/CellsCycle | firstLaunchAWS.py | 1 | 5427 | # manually build and launch your instances
# remember that the ip field deals with a private ip
def _get_parameter(node_id, private_ip, min_key, max_key):
p = {"id": node_id, "ip": private_ip, "min_key": min_key, "max_key": max_key}
return p
def create_instances_parameters():
"""
first = _get_parameter(node_id="1", private_ip="172.31.20.1", min_key="0", max_key="19")
# parameter["master_of_master"] = first
second = _get_parameter(node_id="2", private_ip="172.31.20.2", min_key="20", max_key="39")
# parameter["master"] = second
third = _get_parameter(node_id="3", private_ip="172.31.20.3", min_key="40", max_key="59")
# parameter["myself"] = third
fourth = _get_parameter(node_id="4", private_ip="172.31.20.4", min_key="60", max_key="79")
# parameter["slave"] = fourth
fifth = _get_parameter(node_id="5", private_ip="172.31.20.5", min_key="80", max_key="99")
# parameter["slave_of_slave"] = fifth
"""
n = 5
key_int = (2**32-1)/n
first = _get_parameter(node_id="1", private_ip="172.31.20.1", min_key="0", max_key=str(key_int-1))
# parameter["master_of_master"] = first
second = _get_parameter(node_id="2", private_ip="172.31.20.2", min_key=str(key_int), max_key=str(2*key_int-1))
# parameter["master"] = second
third = _get_parameter(node_id="3", private_ip="172.31.20.3", min_key=str(2*key_int), max_key=str(3*key_int-1))
# parameter["myself"] = third
fourth = _get_parameter(node_id="4", private_ip="172.31.20.4", min_key=str(3*key_int), max_key=str(4*key_int-1))
# parameter["slave"] = fourth
fifth = _get_parameter(node_id="5", private_ip="172.31.20.5", min_key=str(4*key_int), max_key=str(5*key_int-1))
list_parameters = [first, second, third, fourth, fifth]
list_len = len(list_parameters)
result = []
for l in xrange(list_len):
parameter = {"master_of_master": list_parameters[l % list_len],
"master": list_parameters[(l + 1) % list_len],
"myself": list_parameters[(l + 2) % list_len],
"slave": list_parameters[(l + 3) % list_len],
"slave_of_slave": list_parameters[(l + 4) % list_len]}
# print '-------------------'
# print list_parameters[l % list_len]['id']
# print list_parameters[(l+1) % list_len]['id']
# print list_parameters[(l+2) % list_len]['id']
# print list_parameters[(l+3) % list_len]['id']
# print list_parameters[(l+4) % list_len]['id']
# print '-------------------'
# print '-------------------'
# for k, v in parameter.iteritems():
# print "{}, {}".format(k, v)
# print '-------------------'
result.append(parameter)
return result
def create_specific_instance_parameters(specific_nodes):
list_parameters = []
for k in specific_nodes:
list_parameters.append(_get_parameter(node_id=k.id, private_ip=k.ip, min_key=k.min_key,
max_key=k.max_key))
parameter = {"master_of_master": list_parameters[0],
"master": list_parameters[1],
"myself": list_parameters[2],
"slave": list_parameters[3],
"slave_of_slave": list_parameters[4]}
# print '-------------------'
# print list_parameters[l % list_len]['id']
# print list_parameters[(l+1) % list_len]['id']
# print list_parameters[(l+2) % list_len]['id']
# print list_parameters[(l+3) % list_len]['id']
# print list_parameters[(l+4) % list_len]['id']
# print '-------------------'
# print '-------------------'
# for k, v in parameter.iteritems():
# print "{}, {}".format(k, v)
# print '-------------------'
return parameter
def launchApplicationAWS(settings):
from CellCycle.AWS.AWSlib import startInstanceAWS
from start import loadLogger
# necessary to launch aws instances
logger = loadLogger(settings)
# every instance has an element
params_list = create_instances_parameters()
# default vpc (virtual private network) has a class of 172.31.0.0\16
# so we can create private ip from 172.31.0.1 to 172.31.255.254
# 172.31.1.0\8 is reserved
# I suggest to use (just for initial nodes) 172.31.20.0\8
# for example, create 3 nodes:
# 172.31.20.1
# 172.31.20.2
# 172.31.20.3
# only debug
# from CellCycle.ChainModule.Generator import Generator
# from json import dumps,loads
# generator = Generator(logger=logger, settings=settings, json_arg=loads(dumps(params_list)))
# generator.create_process_environment()
# for ins in params_list:
# print "######## NEW NODE #######"
# for k, v in ins.iteritems():
# print "{}, {}".format(k, v)
# print "#########################"
# launch
for ins in params_list:
startInstanceAWS(settings, logger, ins, ins["myself"]["ip"])
if __name__ == "__main__":
import sys
from start import loadSettings
if len(sys.argv) == 1:
settings = loadSettings(currentProfile='default')
else:
currentProfile = {}
currentProfile["profile_name"] = sys.argv[1]
currentProfile["key_pair"] = sys.argv[2]
currentProfile["branch"] = sys.argv[3]
settings = loadSettings(currentProfile)
launchApplicationAWS(settings)
| mit | -7,016,561,542,101,582,000 | 34.703947 | 116 | 0.566243 | false |
yuyuyu101/VirtualBox-NetBSD | src/libs/libxml2-2.6.31/python/tests/validate.py | 87 | 1710 | #!/usr/bin/python -u
import sys
import libxml2
# Memory debug specific
libxml2.debugMemory(1)
ctxt = libxml2.createFileParserCtxt("valid.xml")
ctxt.validate(1)
ctxt.parseDocument()
doc = ctxt.doc()
valid = ctxt.isValid()
if doc.name != "valid.xml":
print "doc.name failed"
sys.exit(1)
root = doc.children
if root.name != "doc":
print "root.name failed"
sys.exit(1)
if valid != 1:
print "validity chec failed"
sys.exit(1)
doc.freeDoc()
i = 1000
while i > 0:
ctxt = libxml2.createFileParserCtxt("valid.xml")
ctxt.validate(1)
ctxt.parseDocument()
doc = ctxt.doc()
valid = ctxt.isValid()
doc.freeDoc()
if valid != 1:
print "validity check failed"
sys.exit(1)
i = i - 1
#desactivate error messages from the validation
def noerr(ctx, str):
pass
libxml2.registerErrorHandler(noerr, None)
ctxt = libxml2.createFileParserCtxt("invalid.xml")
ctxt.validate(1)
ctxt.parseDocument()
doc = ctxt.doc()
valid = ctxt.isValid()
if doc.name != "invalid.xml":
print "doc.name failed"
sys.exit(1)
root = doc.children
if root.name != "doc":
print "root.name failed"
sys.exit(1)
if valid != 0:
print "validity chec failed"
sys.exit(1)
doc.freeDoc()
i = 1000
while i > 0:
ctxt = libxml2.createFileParserCtxt("invalid.xml")
ctxt.validate(1)
ctxt.parseDocument()
doc = ctxt.doc()
valid = ctxt.isValid()
doc.freeDoc()
if valid != 0:
print "validity check failed"
sys.exit(1)
i = i - 1
del ctxt
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print "OK"
else:
print "Memory leak %d bytes" % (libxml2.debugMemory(1))
libxml2.dumpMemory()
| gpl-2.0 | 3,839,277,314,929,236,000 | 19.853659 | 59 | 0.651462 | false |
ahharu/plugin-manager | plugin_manager/core/mixins/tables.py | 1 | 6129 | from __future__ import absolute_import, unicode_literals
from django.core.paginator import Paginator
from django.core import urlresolvers
from django.utils.html import mark_safe, escape
import django_tables2 as tables
from django_tables2.tables import Table
from django_tables2.utils import Accessor as A, AttributeDict
class ActionsColumn(tables.Column):
"""
This column allows you to pass in a list
of links that will form an Action Column
"""
empty_values = ()
links = None
delimiter = None
def __init__(self, links=None, delimiter=' | ', **kwargs):
super(ActionsColumn, self).__init__(**kwargs)
self.orderable = False
self.delimiter = delimiter
if links is not None:
self.links = links
def render(self, value, record, bound_column):
if not self.links:
raise NotImplementedError('Links not assigned.')
if not isinstance(self.links, (list, tuple, dict)):
raise NotImplementedError('Links must be an iterable.')
links = []
for link in self.links:
title = link['title']
url = link['url']
attrs = link['attrs'] if 'attrs' in link else None
if 'args' in link:
args = [a.resolve(record) if isinstance(a, A) else a for a in link['args']]
else:
args = None
attrs = AttributeDict(
attrs if attrs is not None else self.attrs.get('a', {}))
try:
attrs['href'] = urlresolvers.reverse(url, args=args)
except urlresolvers.NoReverseMatch:
attrs['href'] = url
links.append('<a {attrs}>{text}</a>'.format(
attrs=attrs.as_html(),
text=mark_safe(title)
))
return mark_safe(self.delimiter.join(links))
class ActionsColumn2(tables.Column):
"""
This column allows you to pass in a list
of links that will form an Action Column
"""
empty_values = ()
links = None
delimiter = None
def __init__(self, links=None, delimiter=' | ', **kwargs):
super(ActionsColumn2, self).__init__(**kwargs)
self.orderable = False
self.delimiter = delimiter
if links is not None:
self.links = links
def render(self, value, record, bound_column):
if not self.links:
raise NotImplementedError('Links not assigned.')
if not isinstance(self.links, (list, tuple, dict)):
raise NotImplementedError('Links must be an iterable.')
links = []
for link in self.links:
title = link['title']
url = link['url']
urlme = None
attrs = link['attrs'] if 'attrs' in link else None
if 'args' in link:
args = [a.resolve(record) if isinstance(a, A) else a for a in link['args']]
else:
args = None
if 'url' in link and not isinstance(url,str):
urlme = [a.resolve(record) if isinstance(a, A) else a for a in link['url']][0]
else:
urlme = None
attrs = AttributeDict(
attrs if attrs is not None else self.attrs.get('a', {}))
try:
if isinstance(urlme,str):
attrs['href'] = urlresolvers.reverse(urlme, args=args)
else:
attrs['href'] = urlresolvers.reverse(url, args=args)
except urlresolvers.NoReverseMatch:
if isinstance(urlme,str):
attrs['href'] = urlme
else:
attrs['href'] = url
links.append('<a {attrs}>{text}</a>'.format(
attrs=attrs.as_html(),
text=mark_safe(title)
))
return mark_safe(self.delimiter.join(links))
class PaginateTable(Table):
"""Generic table class that makes use of
Django's built in paginate functionality"""
def __init__(self, *args, **kwargs):
super(PaginateTable, self).__init__(*args, **kwargs)
self.template = kwargs.get('template', 'fancy_paged_tables/table.html')
def paginate(self, klass=Paginator,
per_page=None, page=1, *args, **kwargs):
"""
Paginates the table using a paginator and creates a ``page`` property
containing information for the current page.
:type klass: Paginator class
:param klass: a paginator class to paginate the results
:type per_page: `int`
:param per_page: how many records are displayed on each page
:type page: `int`
:param page: which page should be displayed.
Extra arguments are passed to the paginator.
Pagination exceptions (`~django.core.paginator.EmptyPage` and
`~django.core.paginator.PageNotAnInteger`) may be raised from this
method and should be handled by the caller.
"""
self.per_page_options = [20, 50, 100, 200] # This should probably be a passed in option
self.per_page = per_page = per_page or self._meta.per_page
self.paginator = klass(self.rows, per_page, *args, **kwargs)
self.page = self.paginator.page(page)
# Calc variables for use in displaying
# first, adjacent, and last page links
adjacent_pages = 1 # This should probably be a passed in option
# Starting page (first page between the ellipsis)
start_page = max(self.page.number - adjacent_pages, 1)
if start_page <= 3:
start_page = 1
# Ending page (last page between the ellipsis)
end_page = self.page.number + adjacent_pages + 1
if end_page >= self.paginator.num_pages - 1:
end_page = self.paginator.num_pages + 1
# Paging vars used in template
self.page_numbers = [n for n in range(start_page, end_page) if 0 < n <= self.paginator.num_pages]
self.show_first = 1 not in self.page_numbers
self.show_last = self.paginator.num_pages not in self.page_numbers
| mit | 2,728,300,248,846,607,000 | 34.633721 | 105 | 0.577745 | false |
macks22/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 247 | 2432 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Andreas Mueller <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plot_outer_labeled, = plt.plot(X[labels == outer, 0],
X[labels == outer, 1], 'rs')
plot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')
plot_inner_labeled, = plt.plot(X[labels == inner, 0],
X[labels == inner, 1], 'bs')
plt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),
('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',
numpoints=1, shadow=False)
plt.title("Raw data (2 classes=red and blue)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')
plot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')
plt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),
'upper left', numpoints=1, shadow=False)
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause | 4,311,436,500,876,449,300 | 38.225806 | 79 | 0.617599 | false |
dobbymoodge/origin | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | 24 | 47731 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import os
import re
import random
import shutil
import socket
import string
import json
import ipaddress
from charms.leadership import leader_get, leader_set
from shutil import move
from shlex import split
from subprocess import check_call
from subprocess import check_output
from subprocess import CalledProcessError
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import remove_state
from charms.reactive import set_state
from charms.reactive import is_state
from charms.reactive import when, when_any, when_not
from charms.reactive.helpers import data_changed, any_file_changed
from charms.kubernetes.common import get_version
from charms.kubernetes.common import retry
from charms.layer import tls_client
from charmhelpers.core import hookenv
from charmhelpers.core import host
from charmhelpers.core import unitdata
from charmhelpers.core.host import service_stop
from charmhelpers.core.templating import render
from charmhelpers.fetch import apt_install
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
def set_upgrade_needed(forced=False):
set_state('kubernetes-master.upgrade-needed')
config = hookenv.config()
previous_channel = config.previous('channel')
require_manual = config.get('require-manual-upgrade')
hookenv.log('set upgrade needed')
if previous_channel is None or not require_manual or forced:
hookenv.log('forcing upgrade')
set_state('kubernetes-master.upgrade-specified')
@when('config.changed.channel')
def channel_changed():
set_upgrade_needed()
def service_cidr():
''' Return the charm's service-cidr config '''
db = unitdata.kv()
frozen_cidr = db.get('kubernetes-master.service-cidr')
return frozen_cidr or hookenv.config('service-cidr')
def freeze_service_cidr():
''' Freeze the service CIDR. Once the apiserver has started, we can no
longer safely change this value. '''
db = unitdata.kv()
db.set('kubernetes-master.service-cidr', service_cidr())
@hook('upgrade-charm')
def check_for_upgrade_needed():
'''An upgrade charm event was triggered by Juju, react to that here.'''
hookenv.status_set('maintenance', 'Checking resources')
migrate_from_pre_snaps()
add_rbac_roles()
set_state('reconfigure.authentication.setup')
remove_state('authentication.setup')
changed = snap_resources_changed()
if changed == 'yes':
set_upgrade_needed()
elif changed == 'unknown':
# We are here on an upgrade from non-rolling master
# Since this upgrade might also include resource updates eg
# juju upgrade-charm kubernetes-master --resource kube-any=my.snap
# we take no risk and forcibly upgrade the snaps.
# Forcibly means we do not prompt the user to call the upgrade action.
set_upgrade_needed(forced=True)
# Set the auto storage backend to etcd2.
auto_storage_backend = leader_get('auto_storage_backend')
is_leader = is_state('leadership.is_leader')
if not auto_storage_backend and is_leader:
leader_set(auto_storage_backend='etcd2')
def snap_resources_changed():
'''
Check if the snapped resources have changed. The first time this method is
called will report "unknown".
Returns: "yes" in case a snap resource file has changed,
"no" in case a snap resources are the same as last call,
"unknown" if it is the first time this method is called
'''
db = unitdata.kv()
resources = ['kubectl', 'kube-apiserver', 'kube-controller-manager',
'kube-scheduler', 'cdk-addons']
paths = [hookenv.resource_get(resource) for resource in resources]
if db.get('snap.resources.fingerprint.initialised'):
result = 'yes' if any_file_changed(paths) else 'no'
return result
else:
db.set('snap.resources.fingerprint.initialised', True)
any_file_changed(paths)
return 'unknown'
def add_rbac_roles():
'''Update the known_tokens file with proper groups.'''
tokens_fname = '/root/cdk/known_tokens.csv'
tokens_backup_fname = '/root/cdk/known_tokens.csv.backup'
move(tokens_fname, tokens_backup_fname)
with open(tokens_fname, 'w') as ftokens:
with open(tokens_backup_fname, 'r') as stream:
for line in stream:
record = line.strip().split(',')
# token, username, user, groups
if record[2] == 'admin' and len(record) == 3:
towrite = '{0},{1},{2},"{3}"\n'.format(record[0],
record[1],
record[2],
'system:masters')
ftokens.write(towrite)
continue
if record[2] == 'kube_proxy':
towrite = '{0},{1},{2}\n'.format(record[0],
'system:kube-proxy',
'kube-proxy')
ftokens.write(towrite)
continue
if record[2] == 'kubelet' and record[1] == 'kubelet':
continue
ftokens.write('{}'.format(line))
def rename_file_idempotent(source, destination):
if os.path.isfile(source):
os.rename(source, destination)
def migrate_from_pre_snaps():
# remove old states
remove_state('kubernetes.components.installed')
remove_state('kubernetes.dashboard.available')
remove_state('kube-dns.available')
remove_state('kubernetes-master.app_version.set')
# disable old services
services = ['kube-apiserver',
'kube-controller-manager',
'kube-scheduler']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
host.service_stop(service)
# rename auth files
os.makedirs('/root/cdk', exist_ok=True)
rename_file_idempotent('/etc/kubernetes/serviceaccount.key',
'/root/cdk/serviceaccount.key')
rename_file_idempotent('/srv/kubernetes/basic_auth.csv',
'/root/cdk/basic_auth.csv')
rename_file_idempotent('/srv/kubernetes/known_tokens.csv',
'/root/cdk/known_tokens.csv')
# cleanup old files
files = [
"/lib/systemd/system/kube-apiserver.service",
"/lib/systemd/system/kube-controller-manager.service",
"/lib/systemd/system/kube-scheduler.service",
"/etc/default/kube-defaults",
"/etc/default/kube-apiserver.defaults",
"/etc/default/kube-controller-manager.defaults",
"/etc/default/kube-scheduler.defaults",
"/srv/kubernetes",
"/home/ubuntu/kubectl",
"/usr/local/bin/kubectl",
"/usr/local/bin/kube-apiserver",
"/usr/local/bin/kube-controller-manager",
"/usr/local/bin/kube-scheduler",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
@when('kubernetes-master.upgrade-needed')
@when_not('kubernetes-master.upgrade-specified')
def upgrade_needed_status():
msg = 'Needs manual upgrade, run the upgrade action'
hookenv.status_set('blocked', msg)
@when('kubernetes-master.upgrade-specified')
def do_upgrade():
install_snaps()
remove_state('kubernetes-master.upgrade-needed')
remove_state('kubernetes-master.upgrade-specified')
def install_snaps():
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-apiserver snap')
snap.install('kube-apiserver', channel=channel)
hookenv.status_set('maintenance',
'Installing kube-controller-manager snap')
snap.install('kube-controller-manager', channel=channel)
hookenv.status_set('maintenance', 'Installing kube-scheduler snap')
snap.install('kube-scheduler', channel=channel)
hookenv.status_set('maintenance', 'Installing cdk-addons snap')
snap.install('cdk-addons', channel=channel)
snap_resources_changed()
set_state('kubernetes-master.snaps.installed')
remove_state('kubernetes-master.components.started')
@when('config.changed.client_password', 'leadership.is_leader')
def password_changed():
"""Handle password change via the charms config."""
password = hookenv.config('client_password')
if password == "" and is_state('client.password.initialised'):
# password_changed is called during an upgrade. Nothing to do.
return
elif password == "":
# Password not initialised
password = token_generator()
setup_basic_auth(password, "admin", "admin", "system:masters")
set_state('reconfigure.authentication.setup')
remove_state('authentication.setup')
set_state('client.password.initialised')
@when('config.changed.storage-backend')
def storage_backend_changed():
remove_state('kubernetes-master.components.started')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set master configuration on the CNI relation. This lets the CNI
subordinate know that we're the master so it can respond accordingly. '''
cni.set_config(is_master=True, kubeconfig_path='')
@when('leadership.is_leader')
@when_not('authentication.setup')
def setup_leader_authentication():
'''Setup basic authentication and token access for the cluster.'''
service_key = '/root/cdk/serviceaccount.key'
basic_auth = '/root/cdk/basic_auth.csv'
known_tokens = '/root/cdk/known_tokens.csv'
hookenv.status_set('maintenance', 'Rendering authentication templates.')
keys = [service_key, basic_auth, known_tokens]
# Try first to fetch data from an old leadership broadcast.
if not get_keys_from_leader(keys) \
or is_state('reconfigure.authentication.setup'):
last_pass = get_password('basic_auth.csv', 'admin')
setup_basic_auth(last_pass, 'admin', 'admin', 'system:masters')
if not os.path.isfile(known_tokens):
touch(known_tokens)
# Generate the default service account token key
os.makedirs('/root/cdk', exist_ok=True)
if not os.path.isfile(service_key):
cmd = ['openssl', 'genrsa', '-out', service_key,
'2048']
check_call(cmd)
remove_state('reconfigure.authentication.setup')
# read service account key for syndication
leader_data = {}
for f in [known_tokens, basic_auth, service_key]:
with open(f, 'r') as fp:
leader_data[f] = fp.read()
# this is slightly opaque, but we are sending file contents under its file
# path as a key.
# eg:
# {'/root/cdk/serviceaccount.key': 'RSA:2471731...'}
leader_set(leader_data)
remove_state('kubernetes-master.components.started')
set_state('authentication.setup')
@when_not('leadership.is_leader')
def setup_non_leader_authentication():
service_key = '/root/cdk/serviceaccount.key'
basic_auth = '/root/cdk/basic_auth.csv'
known_tokens = '/root/cdk/known_tokens.csv'
keys = [service_key, basic_auth, known_tokens]
# The source of truth for non-leaders is the leader.
# Therefore we overwrite_local with whatever the leader has.
if not get_keys_from_leader(keys, overwrite_local=True):
# the keys were not retrieved. Non-leaders have to retry.
return
if not any_file_changed(keys) and is_state('authentication.setup'):
# No change detected and we have already setup the authentication
return
hookenv.status_set('maintenance', 'Rendering authentication templates.')
remove_state('kubernetes-master.components.started')
set_state('authentication.setup')
def get_keys_from_leader(keys, overwrite_local=False):
"""
Gets the broadcasted keys from the leader and stores them in
the corresponding files.
Args:
keys: list of keys. Keys are actually files on the FS.
Returns: True if all key were fetched, False if not.
"""
# This races with other codepaths, and seems to require being created first
# This block may be extracted later, but for now seems to work as intended
os.makedirs('/root/cdk', exist_ok=True)
for k in keys:
# If the path does not exist, assume we need it
if not os.path.exists(k) or overwrite_local:
# Fetch data from leadership broadcast
contents = leader_get(k)
# Default to logging the warning and wait for leader data to be set
if contents is None:
msg = "Waiting on leaders crypto keys."
hookenv.status_set('waiting', msg)
hookenv.log('Missing content for file {}'.format(k))
return False
# Write out the file and move on to the next item
with open(k, 'w+') as fp:
fp.write(contents)
fp.write('\n')
return True
@when('kubernetes-master.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
version = check_output(['kube-apiserver', '--version'])
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('cdk-addons.configured', 'kube-api-endpoint.available',
'kube-control.connected')
@when_not('kubernetes-master.upgrade-needed')
def idle_status(kube_api, kube_control):
''' Signal at the end of the run that we are running. '''
if not all_kube_system_pods_running():
hookenv.status_set('waiting', 'Waiting for kube-system pods to start')
elif hookenv.config('service-cidr') != service_cidr():
msg = 'WARN: cannot change service-cidr, still using ' + service_cidr()
hookenv.status_set('active', msg)
else:
# All services should be up and running at this point. Double-check...
failing_services = master_services_down()
if len(failing_services) == 0:
hookenv.status_set('active', 'Kubernetes master running.')
else:
msg = 'Stopped services: {}'.format(','.join(failing_services))
hookenv.status_set('blocked', msg)
def master_services_down():
"""Ensure master services are up and running.
Return: list of failing services"""
services = ['kube-apiserver',
'kube-controller-manager',
'kube-scheduler']
failing_services = []
for service in services:
daemon = 'snap.{}.daemon'.format(service)
if not host.service_running(daemon):
failing_services.append(service)
return failing_services
@when('etcd.available', 'tls_client.server.certificate.saved',
'authentication.setup')
@when('leadership.set.auto_storage_backend')
@when_not('kubernetes-master.components.started')
def start_master(etcd):
'''Run the Kubernetes master components.'''
hookenv.status_set('maintenance',
'Configuring the Kubernetes master services.')
freeze_service_cidr()
if not etcd.get_connection_string():
# etcd is not returning a connection string. This happens when
# the master unit disconnects from etcd and is ready to terminate.
# No point in trying to start master services and fail. Just return.
return
# TODO: Make sure below relation is handled on change
# https://github.com/kubernetes/kubernetes/issues/43461
handle_etcd_relation(etcd)
# Add CLI options to all components
configure_apiserver(etcd.get_connection_string(), getStorageBackend())
configure_controller_manager()
configure_scheduler()
set_state('kubernetes-master.components.started')
hookenv.open_port(6443)
@when('etcd.available')
def etcd_data_change(etcd):
''' Etcd scale events block master reconfiguration due to the
kubernetes-master.components.started state. We need a way to
handle these events consistently only when the number of etcd
units has actually changed '''
# key off of the connection string
connection_string = etcd.get_connection_string()
# If the connection string changes, remove the started state to trigger
# handling of the master components
if data_changed('etcd-connect', connection_string):
remove_state('kubernetes-master.components.started')
# We are the leader and the auto_storage_backend is not set meaning
# this is the first time we connect to etcd.
auto_storage_backend = leader_get('auto_storage_backend')
is_leader = is_state('leadership.is_leader')
if is_leader and not auto_storage_backend:
if etcd.get_version().startswith('3.'):
leader_set(auto_storage_backend='etcd3')
else:
leader_set(auto_storage_backend='etcd2')
@when('kube-control.connected')
@when('cdk-addons.configured')
def send_cluster_dns_detail(kube_control):
''' Send cluster DNS info '''
enableKubeDNS = hookenv.config('enable-kube-dns')
dnsDomain = hookenv.config('dns_domain')
dns_ip = None
if enableKubeDNS:
try:
dns_ip = get_dns_ip()
except CalledProcessError:
hookenv.log("kubedns not ready yet")
return
kube_control.set_dns(53, dnsDomain, dns_ip, enableKubeDNS)
@when('kube-control.connected')
@when('snap.installed.kubectl')
@when('leadership.is_leader')
def create_service_configs(kube_control):
"""Create the users for kubelet"""
should_restart = False
# generate the username/pass for the requesting unit
proxy_token = get_token('system:kube-proxy')
if not proxy_token:
setup_tokens(None, 'system:kube-proxy', 'kube-proxy')
proxy_token = get_token('system:kube-proxy')
should_restart = True
client_token = get_token('admin')
if not client_token:
setup_tokens(None, 'admin', 'admin', "system:masters")
client_token = get_token('admin')
should_restart = True
requests = kube_control.auth_user()
for request in requests:
username = request[1]['user']
group = request[1]['group']
kubelet_token = get_token(username)
if not kubelet_token and username and group:
# Usernames have to be in the form of system:node:<nodeName>
userid = "kubelet-{}".format(request[0].split('/')[1])
setup_tokens(None, username, userid, group)
kubelet_token = get_token(username)
kube_control.sign_auth_request(request[0], username,
kubelet_token, proxy_token,
client_token)
should_restart = True
if should_restart:
host.service_restart('snap.kube-apiserver.daemon')
remove_state('authentication.setup')
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator master is waiting for a relation to workers.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
"""
hookenv.status_set('blocked', 'Waiting for workers.')
@when('kube-api-endpoint.available')
def push_service_data(kube_api):
''' Send configuration to the load balancer, and close access to the
public interface '''
kube_api.configure(port=6443)
def get_ingress_address(relation):
try:
network_info = hookenv.network_get(relation.relation_name)
except NotImplementedError:
network_info = []
if network_info and 'ingress-addresses' in network_info:
# just grab the first one for now, maybe be more robust here?
return network_info['ingress-addresses'][0]
else:
# if they don't have ingress-addresses they are running a juju that
# doesn't support spaces, so just return the private address
return hookenv.unit_get('private-address')
@when('certificates.available', 'kube-api-endpoint.available')
def send_data(tls, kube_api_endpoint):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
# Get the SDN gateway based on the cidr address.
kubernetes_service_ip = get_kubernetes_service_ip()
# Get ingress address
ingress_ip = get_ingress_address(kube_api_endpoint)
domain = hookenv.config('dns_domain')
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
ingress_ip,
socket.gethostname(),
kubernetes_service_ip,
'kubernetes',
'kubernetes.{0}'.format(domain),
'kubernetes.default',
'kubernetes.default.svc',
'kubernetes.default.svc.{0}'.format(domain)
]
# maybe they have extra names they want as SANs
extra_sans = hookenv.config('extra_sans')
if extra_sans and not extra_sans == "":
sans.extend(extra_sans.split())
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('config.changed.extra_sans', 'certificates.available',
'kube-api-endpoint.available')
def update_certificate(tls, kube_api_endpoint):
# Using the config.changed.extra_sans flag to catch changes.
# IP changes will take ~5 minutes or so to propagate, but
# it will update.
send_data(tls, kube_api_endpoint)
@when('certificates.server.cert.available',
'kubernetes-master.components.started',
'tls_client.server.certificate.written')
def kick_api_server(tls):
# need to be idempotent and don't want to kick the api server
# without need
if data_changed('cert', tls.get_server_cert()):
# certificate changed, so restart the api server
hookenv.log("Certificate information changed, restarting api server")
restart_apiserver()
tls_client.reset_certificate_write_flag('server')
@when('kubernetes-master.components.started')
def configure_cdk_addons():
''' Configure CDK addons '''
remove_state('cdk-addons.configured')
dbEnabled = str(hookenv.config('enable-dashboard-addons')).lower()
dnsEnabled = str(hookenv.config('enable-kube-dns')).lower()
args = [
'arch=' + arch(),
'dns-ip=' + get_deprecated_dns_ip(),
'dns-domain=' + hookenv.config('dns_domain'),
'enable-dashboard=' + dbEnabled,
'enable-kube-dns=' + dnsEnabled
]
check_call(['snap', 'set', 'cdk-addons'] + args)
if not addons_ready():
hookenv.status_set('waiting', 'Waiting to retry addon deployment')
remove_state('cdk-addons.configured')
return
set_state('cdk-addons.configured')
@retry(times=3, delay_secs=20)
def addons_ready():
"""
Test if the add ons got installed
Returns: True is the addons got applied
"""
try:
check_call(['cdk-addons.apply'])
return True
except CalledProcessError:
hookenv.log("Addons are not ready yet.")
return False
@when('loadbalancer.available', 'certificates.ca.available',
'certificates.client.cert.available', 'authentication.setup')
def loadbalancer_kubeconfig(loadbalancer, ca, client):
# Get the potential list of loadbalancers from the relation object.
hosts = loadbalancer.get_addresses_ports()
# Get the public address of loadbalancers so users can access the cluster.
address = hosts[0].get('public-address')
# Get the port of the loadbalancer so users can access the cluster.
port = hosts[0].get('port')
server = 'https://{0}:{1}'.format(address, port)
build_kubeconfig(server)
@when('certificates.ca.available', 'certificates.client.cert.available',
'authentication.setup')
@when_not('loadbalancer.available')
def create_self_config(ca, client):
'''Create a kubernetes configuration for the master unit.'''
server = 'https://{0}:{1}'.format(hookenv.unit_get('public-address'), 6443)
build_kubeconfig(server)
@when('ceph-storage.available')
def ceph_state_control(ceph_admin):
''' Determine if we should remove the state that controls the re-render
and execution of the ceph-relation-changed event because there
are changes in the relationship data, and we should re-render any
configs, keys, and/or service pre-reqs '''
ceph_relation_data = {
'mon_hosts': ceph_admin.mon_hosts(),
'fsid': ceph_admin.fsid(),
'auth_supported': ceph_admin.auth(),
'hostname': socket.gethostname(),
'key': ceph_admin.key()
}
# Re-execute the rendering if the data has changed.
if data_changed('ceph-config', ceph_relation_data):
remove_state('ceph-storage.configured')
@when('ceph-storage.available')
@when_not('ceph-storage.configured')
def ceph_storage(ceph_admin):
'''Ceph on kubernetes will require a few things - namely a ceph
configuration, and the ceph secret key file used for authentication.
This method will install the client package, and render the requisit files
in order to consume the ceph-storage relation.'''
ceph_context = {
'mon_hosts': ceph_admin.mon_hosts(),
'fsid': ceph_admin.fsid(),
'auth_supported': ceph_admin.auth(),
'use_syslog': "true",
'ceph_public_network': '',
'ceph_cluster_network': '',
'loglevel': 1,
'hostname': socket.gethostname(),
}
# Install the ceph common utilities.
apt_install(['ceph-common'], fatal=True)
etc_ceph_directory = '/etc/ceph'
if not os.path.isdir(etc_ceph_directory):
os.makedirs(etc_ceph_directory)
charm_ceph_conf = os.path.join(etc_ceph_directory, 'ceph.conf')
# Render the ceph configuration from the ceph conf template
render('ceph.conf', charm_ceph_conf, ceph_context)
# The key can rotate independently of other ceph config, so validate it
admin_key = os.path.join(etc_ceph_directory,
'ceph.client.admin.keyring')
try:
with open(admin_key, 'w') as key_file:
key_file.write("[client.admin]\n\tkey = {}\n".format(
ceph_admin.key()))
except IOError as err:
hookenv.log("IOError writing admin.keyring: {}".format(err))
# Enlist the ceph-admin key as a kubernetes secret
if ceph_admin.key():
encoded_key = base64.b64encode(ceph_admin.key().encode('utf-8'))
else:
# We didn't have a key, and cannot proceed. Do not set state and
# allow this method to re-execute
return
context = {'secret': encoded_key.decode('ascii')}
render('ceph-secret.yaml', '/tmp/ceph-secret.yaml', context)
try:
# At first glance this is deceptive. The apply stanza will create if
# it doesn't exist, otherwise it will update the entry, ensuring our
# ceph-secret is always reflective of what we have in /etc/ceph
# assuming we have invoked this anytime that file would change.
cmd = ['kubectl', 'apply', '-f', '/tmp/ceph-secret.yaml']
check_call(cmd)
os.remove('/tmp/ceph-secret.yaml')
except: # NOQA
# the enlistment in kubernetes failed, return and prepare for re-exec
return
# when complete, set a state relating to configuration of the storage
# backend that will allow other modules to hook into this and verify we
# have performed the necessary pre-req steps to interface with a ceph
# deployment.
set_state('ceph-storage.configured')
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('config.changed.authorization-mode',
'kubernetes-master.components.started')
def switch_auth_mode():
config = hookenv.config()
mode = config.get('authorization-mode')
if data_changed('auth-mode', mode):
remove_state('kubernetes-master.components.started')
@when('kubernetes-master.components.started')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = (
'snap.kube-apiserver.daemon',
'snap.kube-controller-manager.daemon',
'snap.kube-scheduler.daemon'
)
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = (
'snap.kube-apiserver.daemon',
'snap.kube-controller-manager.daemon',
'snap.kube-scheduler.daemon'
)
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def is_privileged():
"""Return boolean indicating whether or not to set allow-privileged=true.
"""
privileged = hookenv.config('allow-privileged').lower()
if privileged == 'auto':
return is_state('kubernetes-master.gpu.enabled')
else:
return privileged == 'true'
@when('config.changed.allow-privileged')
@when('kubernetes-master.components.started')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
remove_state('kubernetes-master.components.started')
remove_state('config.changed.allow-privileged')
@when('config.changed.api-extra-args')
@when('kubernetes-master.components.started')
@when('leadership.set.auto_storage_backend')
@when('etcd.available')
def on_config_api_extra_args_change(etcd):
configure_apiserver(etcd.get_connection_string(),
getStorageBackend())
@when('config.changed.controller-manager-extra-args')
@when('kubernetes-master.components.started')
def on_config_controller_manager_extra_args_change():
configure_controller_manager()
@when('config.changed.scheduler-extra-args')
@when('kubernetes-master.components.started')
def on_config_scheduler_extra_args_change():
configure_scheduler()
@when('kube-control.gpu.available')
@when('kubernetes-master.components.started')
@when_not('kubernetes-master.gpu.enabled')
def on_gpu_available(kube_control):
"""The remote side (kubernetes-worker) is gpu-enabled.
We need to run in privileged mode.
"""
config = hookenv.config()
if config['allow-privileged'].lower() == "false":
hookenv.status_set(
'active',
'GPUs available. Set allow-privileged="auto" to enable.'
)
return
remove_state('kubernetes-master.components.started')
set_state('kubernetes-master.gpu.enabled')
@when('kubernetes-master.gpu.enabled')
@when_not('kubernetes-master.privileged')
def disable_gpu_mode():
"""We were in gpu mode, but the operator has set allow-privileged="false",
so we can't run in gpu mode anymore.
"""
remove_state('kubernetes-master.gpu.enabled')
@hook('stop')
def shutdown():
""" Stop the kubernetes master services
"""
service_stop('snap.kube-apiserver.daemon')
service_stop('snap.kube-controller-manager.daemon')
service_stop('snap.kube-scheduler.daemon')
def restart_apiserver():
prev_state, prev_msg = hookenv.status_get()
hookenv.status_set('maintenance', 'Restarting kube-apiserver')
host.service_restart('snap.kube-apiserver.daemon')
hookenv.status_set(prev_state, prev_msg)
def restart_controller_manager():
prev_state, prev_msg = hookenv.status_get()
hookenv.status_set('maintenance', 'Restarting kube-controller-manager')
host.service_restart('snap.kube-controller-manager.daemon')
hookenv.status_set(prev_state, prev_msg)
def restart_scheduler():
prev_state, prev_msg = hookenv.status_get()
hookenv.status_set('maintenance', 'Restarting kube-scheduler')
host.service_restart('snap.kube-scheduler.daemon')
hookenv.status_set(prev_state, prev_msg)
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def build_kubeconfig(server):
'''Gather the relevant data for Kubernetes configuration objects and create
a config object with that information.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
ca_exists = ca and os.path.isfile(ca)
client_pass = get_password('basic_auth.csv', 'admin')
# Do we have everything we need?
if ca_exists and client_pass:
# Create an absolute path for the kubeconfig file.
kubeconfig_path = os.path.join(os.sep, 'home', 'ubuntu', 'config')
# Create the kubeconfig on this system so users can access the cluster.
create_kubeconfig(kubeconfig_path, server, ca,
user='admin', password=client_pass)
# Make the config file readable by the ubuntu users so juju scp works.
cmd = ['chown', 'ubuntu:ubuntu', kubeconfig_path]
check_call(cmd)
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
def get_dns_ip():
cmd = "kubectl get service --namespace kube-system kube-dns --output json"
output = check_output(cmd, shell=True).decode()
svc = json.loads(output)
return svc['spec']['clusterIP']
def get_deprecated_dns_ip():
'''We previously hardcoded the dns ip. This function returns the old
hardcoded value for use with older versions of cdk_addons.'''
interface = ipaddress.IPv4Interface(service_cidr())
ip = interface.network.network_address + 10
return ip.exploded
def get_kubernetes_service_ip():
'''Get the IP address for the kubernetes service based on the cidr.'''
interface = ipaddress.IPv4Interface(service_cidr())
# Add .1 at the end of the network
ip = interface.network.network_address + 1
return ip.exploded
def handle_etcd_relation(reldata):
''' Save the client credentials and set appropriate daemon flags when
etcd declares itself as available'''
# Define where the etcd tls files will be kept.
etcd_dir = '/root/cdk/etcd'
# Create paths to the etcd client ca, key, and cert file locations.
ca = os.path.join(etcd_dir, 'client-ca.pem')
key = os.path.join(etcd_dir, 'client-key.pem')
cert = os.path.join(etcd_dir, 'client-cert.pem')
# Save the client credentials (in relation data) to the paths provided.
reldata.save_client_credentials(key, cert, ca)
def parse_extra_args(config_key):
elements = hookenv.config().get(config_key, '').split()
args = {}
for element in elements:
if '=' in element:
key, _, value = element.partition('=')
args[key] = value
else:
args[element] = 'true'
return args
def configure_kubernetes_service(service, base_args, extra_args_key):
db = unitdata.kv()
prev_args_key = 'kubernetes-master.prev_args.' + service
prev_args = db.get(prev_args_key) or {}
extra_args = parse_extra_args(extra_args_key)
args = {}
for arg in prev_args:
# remove previous args by setting to null
args[arg] = 'null'
for k, v in base_args.items():
args[k] = v
for k, v in extra_args.items():
args[k] = v
cmd = ['snap', 'set', service] + ['%s=%s' % item for item in args.items()]
check_call(cmd)
db.set(prev_args_key, args)
def configure_apiserver(etcd_connection_string, leader_etcd_version):
api_opts = {}
# Get the tls paths from the layer data.
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
client_cert_path = layer_options.get('client_certificate_path')
client_key_path = layer_options.get('client_key_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
if is_privileged():
api_opts['allow-privileged'] = 'true'
set_state('kubernetes-master.privileged')
else:
api_opts['allow-privileged'] = 'false'
remove_state('kubernetes-master.privileged')
# Handle static options for now
api_opts['service-cluster-ip-range'] = service_cidr()
api_opts['min-request-timeout'] = '300'
api_opts['v'] = '4'
api_opts['tls-cert-file'] = server_cert_path
api_opts['tls-private-key-file'] = server_key_path
api_opts['kubelet-certificate-authority'] = ca_cert_path
api_opts['kubelet-client-certificate'] = client_cert_path
api_opts['kubelet-client-key'] = client_key_path
api_opts['logtostderr'] = 'true'
api_opts['insecure-bind-address'] = '127.0.0.1'
api_opts['insecure-port'] = '8080'
api_opts['storage-backend'] = leader_etcd_version
api_opts['basic-auth-file'] = '/root/cdk/basic_auth.csv'
api_opts['token-auth-file'] = '/root/cdk/known_tokens.csv'
api_opts['service-account-key-file'] = '/root/cdk/serviceaccount.key'
api_opts['kubelet-preferred-address-types'] = \
'[InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP]'
etcd_dir = '/root/cdk/etcd'
etcd_ca = os.path.join(etcd_dir, 'client-ca.pem')
etcd_key = os.path.join(etcd_dir, 'client-key.pem')
etcd_cert = os.path.join(etcd_dir, 'client-cert.pem')
api_opts['etcd-cafile'] = etcd_ca
api_opts['etcd-keyfile'] = etcd_key
api_opts['etcd-certfile'] = etcd_cert
api_opts['etcd-servers'] = etcd_connection_string
admission_control = [
'Initializers',
'NamespaceLifecycle',
'LimitRanger',
'ServiceAccount',
'ResourceQuota',
'DefaultTolerationSeconds'
]
auth_mode = hookenv.config('authorization-mode')
if 'Node' in auth_mode:
admission_control.append('NodeRestriction')
api_opts['authorization-mode'] = auth_mode
if get_version('kube-apiserver') < (1, 6):
hookenv.log('Removing DefaultTolerationSeconds from admission-control')
admission_control.remove('DefaultTolerationSeconds')
if get_version('kube-apiserver') < (1, 7):
hookenv.log('Removing Initializers from admission-control')
admission_control.remove('Initializers')
api_opts['admission-control'] = ','.join(admission_control)
configure_kubernetes_service('kube-apiserver', api_opts, 'api-extra-args')
restart_apiserver()
def configure_controller_manager():
controller_opts = {}
# Get the tls paths from the layer data.
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
# Default to 3 minute resync. TODO: Make this configurable?
controller_opts['min-resync-period'] = '3m'
controller_opts['v'] = '2'
controller_opts['root-ca-file'] = ca_cert_path
controller_opts['logtostderr'] = 'true'
controller_opts['master'] = 'http://127.0.0.1:8080'
controller_opts['service-account-private-key-file'] = \
'/root/cdk/serviceaccount.key'
configure_kubernetes_service('kube-controller-manager', controller_opts,
'controller-manager-extra-args')
restart_controller_manager()
def configure_scheduler():
scheduler_opts = {}
scheduler_opts['v'] = '2'
scheduler_opts['logtostderr'] = 'true'
scheduler_opts['master'] = 'http://127.0.0.1:8080'
configure_kubernetes_service('kube-scheduler', scheduler_opts,
'scheduler-extra-args')
restart_scheduler()
def setup_basic_auth(password=None, username='admin', uid='admin',
groups=None):
'''Create the htacces file and the tokens.'''
root_cdk = '/root/cdk'
if not os.path.isdir(root_cdk):
os.makedirs(root_cdk)
htaccess = os.path.join(root_cdk, 'basic_auth.csv')
if not password:
password = token_generator()
with open(htaccess, 'w') as stream:
if groups:
stream.write('{0},{1},{2},"{3}"'.format(password,
username, uid, groups))
else:
stream.write('{0},{1},{2}'.format(password, username, uid))
def setup_tokens(token, username, user, groups=None):
'''Create a token file for kubernetes authentication.'''
root_cdk = '/root/cdk'
if not os.path.isdir(root_cdk):
os.makedirs(root_cdk)
known_tokens = os.path.join(root_cdk, 'known_tokens.csv')
if not token:
token = token_generator()
with open(known_tokens, 'a') as stream:
if groups:
stream.write('{0},{1},{2},"{3}"\n'.format(token,
username,
user,
groups))
else:
stream.write('{0},{1},{2}\n'.format(token, username, user))
def get_password(csv_fname, user):
'''Get the password of user within the csv file provided.'''
root_cdk = '/root/cdk'
tokens_fname = os.path.join(root_cdk, csv_fname)
if not os.path.isfile(tokens_fname):
return None
with open(tokens_fname, 'r') as stream:
for line in stream:
record = line.split(',')
if record[1] == user:
return record[0]
return None
def get_token(username):
"""Grab a token from the static file if present. """
return get_password('known_tokens.csv', username)
def set_token(password, save_salt):
''' Store a token so it can be recalled later by token_generator.
param: password - the password to be stored
param: save_salt - the key to store the value of the token.'''
db = unitdata.kv()
db.set(save_salt, password)
return db.get(save_salt)
def token_generator(length=32):
''' Generate a random token for use in passwords and account tokens.
param: length - the length of the token to generate'''
alpha = string.ascii_letters + string.digits
token = ''.join(random.SystemRandom().choice(alpha) for _ in range(length))
return token
@retry(times=3, delay_secs=10)
def all_kube_system_pods_running():
''' Check pod status in the kube-system namespace. Returns True if all
pods are running, False otherwise. '''
cmd = ['kubectl', 'get', 'po', '-n', 'kube-system', '-o', 'json']
try:
output = check_output(cmd).decode('utf-8')
except CalledProcessError:
hookenv.log('failed to get kube-system pod status')
return False
result = json.loads(output)
for pod in result['items']:
status = pod['status']['phase']
# Evicted nodes should re-spawn
if status != 'Running' and \
pod['status'].get('reason', '') != 'Evicted':
return False
return True
def apiserverVersion():
cmd = 'kube-apiserver --version'.split()
version_string = check_output(cmd).decode('utf-8')
return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3])
def touch(fname):
try:
os.utime(fname, None)
except OSError:
open(fname, 'a').close()
def getStorageBackend():
storage_backend = hookenv.config('storage-backend')
if storage_backend == 'auto':
storage_backend = leader_get('auto_storage_backend')
return storage_backend
| apache-2.0 | -5,692,883,639,161,156,000 | 35.352628 | 79 | 0.654836 | false |
harayz/raspberry_pwn | src/pentest/sqlmap/lib/core/session.py | 7 | 1913 | #!/usr/bin/env python
"""
Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import re
from lib.core.common import Backend
from lib.core.common import Format
from lib.core.common import hashDBWrite
from lib.core.data import kb
from lib.core.data import logger
from lib.core.enums import HASHDB_KEYS
from lib.core.enums import OS
from lib.core.settings import SUPPORTED_DBMS
def setDbms(dbms):
"""
@param dbms: database management system to be set into the knowledge
base as fingerprint.
@type dbms: C{str}
"""
hashDBWrite(HASHDB_KEYS.DBMS, dbms)
_ = "(%s)" % ("|".join([alias for alias in SUPPORTED_DBMS]))
_ = re.search("^%s" % _, dbms, re.I)
if _:
dbms = _.group(1)
Backend.setDbms(dbms)
logger.info("the back-end DBMS is %s" % Backend.getDbms())
def setOs():
"""
Example of kb.bannerFp dictionary:
{
'sp': set(['Service Pack 4']),
'dbmsVersion': '8.00.194',
'dbmsServicePack': '0',
'distrib': set(['2000']),
'dbmsRelease': '2000',
'type': set(['Windows'])
}
"""
infoMsg = ""
if not kb.bannerFp:
return
if "type" in kb.bannerFp:
Backend.setOs(Format.humanize(kb.bannerFp["type"]))
infoMsg = "the back-end DBMS operating system is %s" % Backend.getOs()
if "distrib" in kb.bannerFp:
kb.osVersion = Format.humanize(kb.bannerFp["distrib"])
infoMsg += " %s" % kb.osVersion
if "sp" in kb.bannerFp:
kb.osSP = int(Format.humanize(kb.bannerFp["sp"]).replace("Service Pack ", ""))
elif "sp" not in kb.bannerFp and Backend.isOs(OS.WINDOWS):
kb.osSP = 0
if Backend.getOs() and kb.osVersion and kb.osSP:
infoMsg += " Service Pack %d" % kb.osSP
if infoMsg:
logger.info(infoMsg)
hashDBWrite(HASHDB_KEYS.OS, Backend.getOs())
| gpl-3.0 | -698,861,386,747,189,600 | 23.844156 | 86 | 0.620491 | false |
rysson/filmkodi | plugin.video.mrknow/lib/utils/pyDes.py | 2 | 32243 | #############################################################################
# Documentation #
#############################################################################
# Author: Todd Whiteman
# Date: 16th March, 2009
# Verion: 2.0.0
# License: Public Domain - free to do as you wish
# Homepage: http://twhiteman.netfirms.com/des.html
#
# This is a pure python implementation of the DES encryption algorithm.
# It's pure python to avoid portability issues, since most DES
# implementations are programmed in C (for performance reasons).
#
# Triple DES class is also implemented, utilising the DES base. Triple DES
# is either DES-EDE3 with a 24 byte key, or DES-EDE2 with a 16 byte key.
#
# See the README.txt that should come with this python module for the
# implementation methods used.
#
# Thanks to:
# * David Broadwell for ideas, comments and suggestions.
# * Mario Wolff for pointing out and debugging some triple des CBC errors.
# * Santiago Palladino for providing the PKCS5 padding technique.
# * Shaya for correcting the PAD_PKCS5 triple des CBC errors.
#
"""A pure python implementation of the DES and TRIPLE DES encryption algorithms.
Class initialization
--------------------
pyDes.des(key, [mode], [IV], [pad], [padmode])
pyDes.triple_des(key, [mode], [IV], [pad], [padmode])
key -> Bytes containing the encryption key. 8 bytes for DES, 16 or 24 bytes
for Triple DES
mode -> Optional argument for encryption type, can be either
pyDes.ECB (Electronic Code Book) or pyDes.CBC (Cypher Block Chaining)
IV -> Optional Initial Value bytes, must be supplied if using CBC mode.
Length must be 8 bytes.
pad -> Optional argument, set the pad character (PAD_NORMAL) to use during
all encrypt/decrpt operations done with this instance.
padmode -> Optional argument, set the padding mode (PAD_NORMAL or PAD_PKCS5)
to use during all encrypt/decrpt operations done with this instance.
I recommend to use PAD_PKCS5 padding, as then you never need to worry about any
padding issues, as the padding can be removed unambiguously upon decrypting
data that was encrypted using PAD_PKCS5 padmode.
Common methods
--------------
encrypt(data, [pad], [padmode])
decrypt(data, [pad], [padmode])
data -> Bytes to be encrypted/decrypted
pad -> Optional argument. Only when using padmode of PAD_NORMAL. For
encryption, adds this characters to the end of the data block when
data is not a multiple of 8 bytes. For decryption, will remove the
trailing characters that match this pad character from the last 8
bytes of the unencrypted data block.
padmode -> Optional argument, set the padding mode, must be one of PAD_NORMAL
or PAD_PKCS5). Defaults to PAD_NORMAL.
Example
-------
from pyDes import *
data = "Please encrypt my data"
k = des("DESCRYPT", CBC, "\0\0\0\0\0\0\0\0", pad=None, padmode=PAD_PKCS5)
# For Python3, you'll need to use bytes, i.e.:
# data = b"Please encrypt my data"
# k = des(b"DESCRYPT", CBC, b"\0\0\0\0\0\0\0\0", pad=None, padmode=PAD_PKCS5)
d = k.encrypt(data)
print "Encrypted: %r" % d
print "Decrypted: %r" % k.decrypt(d)
assert k.decrypt(d, padmode=PAD_PKCS5) == data
See the module source (pyDes.py) for more examples of use.
You can also run the pyDes.py file without and arguments to see a simple test.
Note: This code was not written for high-end systems needing a fast
implementation, but rather a handy portable solution with small usage.
"""
import sys
# _pythonMajorVersion is used to handle Python2 and Python3 differences.
_pythonMajorVersion = sys.version_info[0]
# Modes of crypting / cyphering
ECB = 0
CBC = 1
# Modes of padding
PAD_NORMAL = 1
PAD_PKCS5 = 2
# PAD_PKCS5: is a method that will unambiguously remove all padding
# characters after decryption, when originally encrypted with
# this padding mode.
# For a good description of the PKCS5 padding technique, see:
# http://www.faqs.org/rfcs/rfc1423.html
# The base class shared by des and triple des.
class _baseDes(object):
def __init__(self, mode=ECB, IV=None, pad=None, padmode=PAD_NORMAL):
if IV:
IV = self._guardAgainstUnicode(IV)
if pad:
pad = self._guardAgainstUnicode(pad)
self.block_size = 8
# Sanity checking of arguments.
if pad and padmode == PAD_PKCS5:
raise ValueError("Cannot use a pad character with PAD_PKCS5")
if IV and len(IV) != self.block_size:
raise ValueError("Invalid Initial Value (IV), must be a multiple of " + str(self.block_size) + " bytes")
# Set the passed in variables
self._mode = mode
self._iv = IV
self._padding = pad
self._padmode = padmode
def getKey(self):
"""getKey() -> bytes"""
return self.__key
def setKey(self, key):
"""Will set the crypting key for this object."""
key = self._guardAgainstUnicode(key)
self.__key = key
def getMode(self):
"""getMode() -> pyDes.ECB or pyDes.CBC"""
return self._mode
def setMode(self, mode):
"""Sets the type of crypting mode, pyDes.ECB or pyDes.CBC"""
self._mode = mode
def getPadding(self):
"""getPadding() -> bytes of length 1. Padding character."""
return self._padding
def setPadding(self, pad):
"""setPadding() -> bytes of length 1. Padding character."""
if pad is not None:
pad = self._guardAgainstUnicode(pad)
self._padding = pad
def getPadMode(self):
"""getPadMode() -> pyDes.PAD_NORMAL or pyDes.PAD_PKCS5"""
return self._padmode
def setPadMode(self, mode):
"""Sets the type of padding mode, pyDes.PAD_NORMAL or pyDes.PAD_PKCS5"""
self._padmode = mode
def getIV(self):
"""getIV() -> bytes"""
return self._iv
def setIV(self, IV):
"""Will set the Initial Value, used in conjunction with CBC mode"""
if not IV or len(IV) != self.block_size:
raise ValueError("Invalid Initial Value (IV), must be a multiple of " + str(self.block_size) + " bytes")
IV = self._guardAgainstUnicode(IV)
self._iv = IV
def _padData(self, data, pad, padmode):
# Pad data depending on the mode
if padmode is None:
# Get the default padding mode.
padmode = self.getPadMode()
if pad and padmode == PAD_PKCS5:
raise ValueError("Cannot use a pad character with PAD_PKCS5")
if padmode == PAD_NORMAL:
if len(data) % self.block_size == 0:
# No padding required.
return data
if not pad:
# Get the default padding.
pad = self.getPadding()
if not pad:
raise ValueError("Data must be a multiple of " + str(self.block_size) + " bytes in length. Use padmode=PAD_PKCS5 or set the pad character.")
data += (self.block_size - (len(data) % self.block_size)) * pad
elif padmode == PAD_PKCS5:
pad_len = 8 - (len(data) % self.block_size)
if _pythonMajorVersion < 3:
data += pad_len * chr(pad_len)
else:
data += bytes([pad_len] * pad_len)
return data
def _unpadData(self, data, pad, padmode):
# Unpad data depending on the mode.
if not data:
return data
if pad and padmode == PAD_PKCS5:
raise ValueError("Cannot use a pad character with PAD_PKCS5")
if padmode is None:
# Get the default padding mode.
padmode = self.getPadMode()
if padmode == PAD_NORMAL:
if not pad:
# Get the default padding.
pad = self.getPadding()
if pad:
data = data[:-self.block_size] + \
data[-self.block_size:].rstrip(pad)
elif padmode == PAD_PKCS5:
if _pythonMajorVersion < 3:
pad_len = ord(data[-1])
else:
pad_len = data[-1]
data = data[:-pad_len]
return data
def _guardAgainstUnicode(self, data):
# Only accept byte strings or ascii unicode values, otherwise
# there is no way to correctly decode the data into bytes.
if _pythonMajorVersion < 3:
if isinstance(data, unicode):
raise ValueError("pyDes can only work with bytes, not Unicode strings.")
else:
if isinstance(data, str):
# Only accept ascii unicode values.
try:
return data.encode('ascii')
except UnicodeEncodeError:
pass
raise ValueError("pyDes can only work with encoded strings, not Unicode.")
return data
#############################################################################
# DES #
#############################################################################
class des(_baseDes):
"""DES encryption/decrytpion class
Supports ECB (Electronic Code Book) and CBC (Cypher Block Chaining) modes.
pyDes.des(key,[mode], [IV])
key -> Bytes containing the encryption key, must be exactly 8 bytes
mode -> Optional argument for encryption type, can be either pyDes.ECB
(Electronic Code Book), pyDes.CBC (Cypher Block Chaining)
IV -> Optional Initial Value bytes, must be supplied if using CBC mode.
Must be 8 bytes in length.
pad -> Optional argument, set the pad character (PAD_NORMAL) to use
during all encrypt/decrpt operations done with this instance.
padmode -> Optional argument, set the padding mode (PAD_NORMAL or
PAD_PKCS5) to use during all encrypt/decrpt operations done
with this instance.
"""
# Permutation and translation tables for DES
__pc1 = [56, 48, 40, 32, 24, 16, 8,
0, 57, 49, 41, 33, 25, 17,
9, 1, 58, 50, 42, 34, 26,
18, 10, 2, 59, 51, 43, 35,
62, 54, 46, 38, 30, 22, 14,
6, 61, 53, 45, 37, 29, 21,
13, 5, 60, 52, 44, 36, 28,
20, 12, 4, 27, 19, 11, 3
]
# number left rotations of pc1
__left_rotations = [
1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1
]
# permuted choice key (table 2)
__pc2 = [
13, 16, 10, 23, 0, 4,
2, 27, 14, 5, 20, 9,
22, 18, 11, 3, 25, 7,
15, 6, 26, 19, 12, 1,
40, 51, 30, 36, 46, 54,
29, 39, 50, 44, 32, 47,
43, 48, 38, 55, 33, 52,
45, 41, 49, 35, 28, 31
]
# initial permutation IP
__ip = [57, 49, 41, 33, 25, 17, 9, 1,
59, 51, 43, 35, 27, 19, 11, 3,
61, 53, 45, 37, 29, 21, 13, 5,
63, 55, 47, 39, 31, 23, 15, 7,
56, 48, 40, 32, 24, 16, 8, 0,
58, 50, 42, 34, 26, 18, 10, 2,
60, 52, 44, 36, 28, 20, 12, 4,
62, 54, 46, 38, 30, 22, 14, 6
]
# Expansion table for turning 32 bit blocks into 48 bits
__expansion_table = [
31, 0, 1, 2, 3, 4,
3, 4, 5, 6, 7, 8,
7, 8, 9, 10, 11, 12,
11, 12, 13, 14, 15, 16,
15, 16, 17, 18, 19, 20,
19, 20, 21, 22, 23, 24,
23, 24, 25, 26, 27, 28,
27, 28, 29, 30, 31, 0
]
# The (in)famous S-boxes
__sbox = [
# S1
[14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7,
0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8,
4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0,
15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13],
# S2
[15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10,
3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5,
0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15,
13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9],
# S3
[10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8,
13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1,
13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7,
1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12],
# S4
[7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15,
13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9,
10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4,
3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14],
# S5
[2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9,
14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6,
4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14,
11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3],
# S6
[12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11,
10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8,
9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6,
4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13],
# S7
[4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1,
13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6,
1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2,
6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12],
# S8
[13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7,
1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2,
7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8,
2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11],
]
# 32-bit permutation function P used on the output of the S-boxes
__p = [
15, 6, 19, 20, 28, 11,
27, 16, 0, 14, 22, 25,
4, 17, 30, 9, 1, 7,
23,13, 31, 26, 2, 8,
18, 12, 29, 5, 21, 10,
3, 24
]
# final permutation IP^-1
__fp = [
39, 7, 47, 15, 55, 23, 63, 31,
38, 6, 46, 14, 54, 22, 62, 30,
37, 5, 45, 13, 53, 21, 61, 29,
36, 4, 44, 12, 52, 20, 60, 28,
35, 3, 43, 11, 51, 19, 59, 27,
34, 2, 42, 10, 50, 18, 58, 26,
33, 1, 41, 9, 49, 17, 57, 25,
32, 0, 40, 8, 48, 16, 56, 24
]
# Type of crypting being done
ENCRYPT = 0x00
DECRYPT = 0x01
# Initialisation
def __init__(self, key, mode=ECB, IV=None, pad=None, padmode=PAD_NORMAL):
# Sanity checking of arguments.
if len(key) != 8:
raise ValueError("Invalid DES key size. Key must be exactly 8 bytes long.")
_baseDes.__init__(self, mode, IV, pad, padmode)
self.key_size = 8
self.L = []
self.R = []
self.Kn = [ [0] * 48 ] * 16 # 16 48-bit keys (K1 - K16)
self.final = []
self.setKey(key)
def setKey(self, key):
"""Will set the crypting key for this object. Must be 8 bytes."""
_baseDes.setKey(self, key)
self.__create_sub_keys()
def __String_to_BitList(self, data):
"""Turn the string data, into a list of bits (1, 0)'s"""
if _pythonMajorVersion < 3:
# Turn the strings into integers. Python 3 uses a bytes
# class, which already has this behaviour.
data = [ord(c) for c in data]
l = len(data) * 8
result = [0] * l
pos = 0
for ch in data:
i = 7
while i >= 0:
if ch & (1 << i) != 0:
result[pos] = 1
else:
result[pos] = 0
pos += 1
i -= 1
return result
def __BitList_to_String(self, data):
"""Turn the list of bits -> data, into a string"""
result = []
pos = 0
c = 0
while pos < len(data):
c += data[pos] << (7 - (pos % 8))
if (pos % 8) == 7:
result.append(c)
c = 0
pos += 1
if _pythonMajorVersion < 3:
return ''.join([ chr(c) for c in result ])
else:
return bytes(result)
def __permutate(self, table, block):
"""Permutate this block with the specified table"""
return list(map(lambda x: block[x], table))
# Transform the secret key, so that it is ready for data processing
# Create the 16 subkeys, K[1] - K[16]
def __create_sub_keys(self):
"""Create the 16 subkeys K[1] to K[16] from the given key"""
key = self.__permutate(des.__pc1, self.__String_to_BitList(self.getKey()))
i = 0
# Split into Left and Right sections
self.L = key[:28]
self.R = key[28:]
while i < 16:
j = 0
# Perform circular left shifts
while j < des.__left_rotations[i]:
self.L.append(self.L[0])
del self.L[0]
self.R.append(self.R[0])
del self.R[0]
j += 1
# Create one of the 16 subkeys through pc2 permutation
self.Kn[i] = self.__permutate(des.__pc2, self.L + self.R)
i += 1
# Main part of the encryption algorithm, the number cruncher :)
def __des_crypt(self, block, crypt_type):
"""Crypt the block of data through DES bit-manipulation"""
block = self.__permutate(des.__ip, block)
self.L = block[:32]
self.R = block[32:]
# Encryption starts from Kn[1] through to Kn[16]
if crypt_type == des.ENCRYPT:
iteration = 0
iteration_adjustment = 1
# Decryption starts from Kn[16] down to Kn[1]
else:
iteration = 15
iteration_adjustment = -1
i = 0
while i < 16:
# Make a copy of R[i-1], this will later become L[i]
tempR = self.R[:]
# Permutate R[i - 1] to start creating R[i]
self.R = self.__permutate(des.__expansion_table, self.R)
# Exclusive or R[i - 1] with K[i], create B[1] to B[8] whilst here
self.R = list(map(lambda x, y: x ^ y, self.R, self.Kn[iteration]))
B = [self.R[:6], self.R[6:12], self.R[12:18], self.R[18:24], self.R[24:30], self.R[30:36], self.R[36:42], self.R[42:]]
# Optimization: Replaced below commented code with above
#j = 0
#B = []
#while j < len(self.R):
# self.R[j] = self.R[j] ^ self.Kn[iteration][j]
# j += 1
# if j % 6 == 0:
# B.append(self.R[j-6:j])
# Permutate B[1] to B[8] using the S-Boxes
j = 0
Bn = [0] * 32
pos = 0
while j < 8:
# Work out the offsets
m = (B[j][0] << 1) + B[j][5]
n = (B[j][1] << 3) + (B[j][2] << 2) + (B[j][3] << 1) + B[j][4]
# Find the permutation value
v = des.__sbox[j][(m << 4) + n]
# Turn value into bits, add it to result: Bn
Bn[pos] = (v & 8) >> 3
Bn[pos + 1] = (v & 4) >> 2
Bn[pos + 2] = (v & 2) >> 1
Bn[pos + 3] = v & 1
pos += 4
j += 1
# Permutate the concatination of B[1] to B[8] (Bn)
self.R = self.__permutate(des.__p, Bn)
# Xor with L[i - 1]
self.R = list(map(lambda x, y: x ^ y, self.R, self.L))
# Optimization: This now replaces the below commented code
#j = 0
#while j < len(self.R):
# self.R[j] = self.R[j] ^ self.L[j]
# j += 1
# L[i] becomes R[i - 1]
self.L = tempR
i += 1
iteration += iteration_adjustment
# Final permutation of R[16]L[16]
self.final = self.__permutate(des.__fp, self.R + self.L)
return self.final
# Data to be encrypted/decrypted
def crypt(self, data, crypt_type):
"""Crypt the data in blocks, running it through des_crypt()"""
# Error check the data
if not data:
return ''
if len(data) % self.block_size != 0:
if crypt_type == des.DECRYPT: # Decryption must work on 8 byte blocks
raise ValueError("Invalid data length, data must be a multiple of " + str(self.block_size) + " bytes\n.")
if not self.getPadding():
raise ValueError("Invalid data length, data must be a multiple of " + str(self.block_size) + " bytes\n. Try setting the optional padding character")
else:
data += (self.block_size - (len(data) % self.block_size)) * self.getPadding()
# print "Len of data: %f" % (len(data) / self.block_size)
if self.getMode() == CBC:
if self.getIV():
iv = self.__String_to_BitList(self.getIV())
else:
raise ValueError("For CBC mode, you must supply the Initial Value (IV) for ciphering")
# Split the data into blocks, crypting each one seperately
i = 0
dict = {}
result = []
#cached = 0
#lines = 0
while i < len(data):
# Test code for caching encryption results
#lines += 1
#if dict.has_key(data[i:i+8]):
#print "Cached result for: %s" % data[i:i+8]
# cached += 1
# result.append(dict[data[i:i+8]])
# i += 8
# continue
block = self.__String_to_BitList(data[i:i+8])
# Xor with IV if using CBC mode
if self.getMode() == CBC:
if crypt_type == des.ENCRYPT:
block = list(map(lambda x, y: x ^ y, block, iv))
#j = 0
#while j < len(block):
# block[j] = block[j] ^ iv[j]
# j += 1
processed_block = self.__des_crypt(block, crypt_type)
if crypt_type == des.DECRYPT:
processed_block = list(map(lambda x, y: x ^ y, processed_block, iv))
#j = 0
#while j < len(processed_block):
# processed_block[j] = processed_block[j] ^ iv[j]
# j += 1
iv = block
else:
iv = processed_block
else:
processed_block = self.__des_crypt(block, crypt_type)
# Add the resulting crypted block to our list
#d = self.__BitList_to_String(processed_block)
#result.append(d)
result.append(self.__BitList_to_String(processed_block))
#dict[data[i:i+8]] = d
i += 8
# print "Lines: %d, cached: %d" % (lines, cached)
# Return the full crypted string
if _pythonMajorVersion < 3:
return ''.join(result)
else:
return bytes.fromhex('').join(result)
def encrypt(self, data, pad=None, padmode=None):
"""encrypt(data, [pad], [padmode]) -> bytes
data : Bytes to be encrypted
pad : Optional argument for encryption padding. Must only be one byte
padmode : Optional argument for overriding the padding mode.
The data must be a multiple of 8 bytes and will be encrypted
with the already specified key. Data does not have to be a
multiple of 8 bytes if the padding character is supplied, or
the padmode is set to PAD_PKCS5, as bytes will then added to
ensure the be padded data is a multiple of 8 bytes.
"""
data = self._guardAgainstUnicode(data)
if pad is not None:
pad = self._guardAgainstUnicode(pad)
data = self._padData(data, pad, padmode)
return self.crypt(data, des.ENCRYPT)
def decrypt(self, data, pad=None, padmode=None):
"""decrypt(data, [pad], [padmode]) -> bytes
data : Bytes to be encrypted
pad : Optional argument for decryption padding. Must only be one byte
padmode : Optional argument for overriding the padding mode.
The data must be a multiple of 8 bytes and will be decrypted
with the already specified key. In PAD_NORMAL mode, if the
optional padding character is supplied, then the un-encrypted
data will have the padding characters removed from the end of
the bytes. This pad removal only occurs on the last 8 bytes of
the data (last data block). In PAD_PKCS5 mode, the special
padding end markers will be removed from the data after decrypting.
"""
data = self._guardAgainstUnicode(data)
if pad is not None:
pad = self._guardAgainstUnicode(pad)
data = self.crypt(data, des.DECRYPT)
return self._unpadData(data, pad, padmode)
#############################################################################
# Triple DES #
#############################################################################
class triple_des(_baseDes):
"""Triple DES encryption/decrytpion class
This algorithm uses the DES-EDE3 (when a 24 byte key is supplied) or
the DES-EDE2 (when a 16 byte key is supplied) encryption methods.
Supports ECB (Electronic Code Book) and CBC (Cypher Block Chaining) modes.
pyDes.des(key, [mode], [IV])
key -> Bytes containing the encryption key, must be either 16 or
24 bytes long
mode -> Optional argument for encryption type, can be either pyDes.ECB
(Electronic Code Book), pyDes.CBC (Cypher Block Chaining)
IV -> Optional Initial Value bytes, must be supplied if using CBC mode.
Must be 8 bytes in length.
pad -> Optional argument, set the pad character (PAD_NORMAL) to use
during all encrypt/decrpt operations done with this instance.
padmode -> Optional argument, set the padding mode (PAD_NORMAL or
PAD_PKCS5) to use during all encrypt/decrpt operations done
with this instance.
"""
def __init__(self, key, mode=ECB, IV=None, pad=None, padmode=PAD_NORMAL):
_baseDes.__init__(self, mode, IV, pad, padmode)
self.setKey(key)
def setKey(self, key):
"""Will set the crypting key for this object. Either 16 or 24 bytes long."""
self.key_size = 24 # Use DES-EDE3 mode
if len(key) != self.key_size:
if len(key) == 16: # Use DES-EDE2 mode
self.key_size = 16
else:
raise ValueError("Invalid triple DES key size. Key must be either 16 or 24 bytes long")
if self.getMode() == CBC:
if not self.getIV():
# Use the first 8 bytes of the key
self._iv = key[:self.block_size]
if len(self.getIV()) != self.block_size:
raise ValueError("Invalid IV, must be 8 bytes in length")
self.__key1 = des(key[:8], self._mode, self._iv,
self._padding, self._padmode)
self.__key2 = des(key[8:16], self._mode, self._iv,
self._padding, self._padmode)
if self.key_size == 16:
self.__key3 = self.__key1
else:
self.__key3 = des(key[16:], self._mode, self._iv,
self._padding, self._padmode)
_baseDes.setKey(self, key)
# Override setter methods to work on all 3 keys.
def setMode(self, mode):
"""Sets the type of crypting mode, pyDes.ECB or pyDes.CBC"""
_baseDes.setMode(self, mode)
for key in (self.__key1, self.__key2, self.__key3):
key.setMode(mode)
def setPadding(self, pad):
"""setPadding() -> bytes of length 1. Padding character."""
_baseDes.setPadding(self, pad)
for key in (self.__key1, self.__key2, self.__key3):
key.setPadding(pad)
def setPadMode(self, mode):
"""Sets the type of padding mode, pyDes.PAD_NORMAL or pyDes.PAD_PKCS5"""
_baseDes.setPadMode(self, mode)
for key in (self.__key1, self.__key2, self.__key3):
key.setPadMode(mode)
def setIV(self, IV):
"""Will set the Initial Value, used in conjunction with CBC mode"""
_baseDes.setIV(self, IV)
for key in (self.__key1, self.__key2, self.__key3):
key.setIV(IV)
def encrypt(self, data, pad=None, padmode=None):
"""encrypt(data, [pad], [padmode]) -> bytes
data : bytes to be encrypted
pad : Optional argument for encryption padding. Must only be one byte
padmode : Optional argument for overriding the padding mode.
The data must be a multiple of 8 bytes and will be encrypted
with the already specified key. Data does not have to be a
multiple of 8 bytes if the padding character is supplied, or
the padmode is set to PAD_PKCS5, as bytes will then added to
ensure the be padded data is a multiple of 8 bytes.
"""
ENCRYPT = des.ENCRYPT
DECRYPT = des.DECRYPT
data = self._guardAgainstUnicode(data)
if pad is not None:
pad = self._guardAgainstUnicode(pad)
# Pad the data accordingly.
data = self._padData(data, pad, padmode)
if self.getMode() == CBC:
self.__key1.setIV(self.getIV())
self.__key2.setIV(self.getIV())
self.__key3.setIV(self.getIV())
i = 0
result = []
while i < len(data):
block = self.__key1.crypt(data[i:i+8], ENCRYPT)
block = self.__key2.crypt(block, DECRYPT)
block = self.__key3.crypt(block, ENCRYPT)
self.__key1.setIV(block)
self.__key2.setIV(block)
self.__key3.setIV(block)
result.append(block)
i += 8
if _pythonMajorVersion < 3:
return ''.join(result)
else:
return bytes.fromhex('').join(result)
else:
data = self.__key1.crypt(data, ENCRYPT)
data = self.__key2.crypt(data, DECRYPT)
return self.__key3.crypt(data, ENCRYPT)
def decrypt(self, data, pad=None, padmode=None):
"""decrypt(data, [pad], [padmode]) -> bytes
data : bytes to be encrypted
pad : Optional argument for decryption padding. Must only be one byte
padmode : Optional argument for overriding the padding mode.
The data must be a multiple of 8 bytes and will be decrypted
with the already specified key. In PAD_NORMAL mode, if the
optional padding character is supplied, then the un-encrypted
data will have the padding characters removed from the end of
the bytes. This pad removal only occurs on the last 8 bytes of
the data (last data block). In PAD_PKCS5 mode, the special
padding end markers will be removed from the data after
decrypting, no pad character is required for PAD_PKCS5.
"""
ENCRYPT = des.ENCRYPT
DECRYPT = des.DECRYPT
data = self._guardAgainstUnicode(data)
if pad is not None:
pad = self._guardAgainstUnicode(pad)
if self.getMode() == CBC:
self.__key1.setIV(self.getIV())
self.__key2.setIV(self.getIV())
self.__key3.setIV(self.getIV())
i = 0
result = []
while i < len(data):
iv = data[i:i+8]
block = self.__key3.crypt(iv, DECRYPT)
block = self.__key2.crypt(block, ENCRYPT)
block = self.__key1.crypt(block, DECRYPT)
self.__key1.setIV(iv)
self.__key2.setIV(iv)
self.__key3.setIV(iv)
result.append(block)
i += 8
if _pythonMajorVersion < 3:
data = ''.join(result)
else:
data = bytes.fromhex('').join(result)
else:
data = self.__key3.crypt(data, DECRYPT)
data = self.__key2.crypt(data, ENCRYPT)
data = self.__key1.crypt(data, DECRYPT)
return self._unpadData(data, pad, padmode)
| apache-2.0 | 5,527,490,423,028,265,000 | 36.799531 | 164 | 0.526719 | false |
Talanor/findmyhash | findmyhash/services/MD5Decryption.py | 1 | 1145 | from bs4 import BeautifulSoup
import requests
from findmyhash.algo import Algo
from findmyhash.errors import *
from .Service import Service
class MD5DECRYPTION(Service):
NAME = "md5decryption"
HOST = "http://md5decryption.com"
ALGO_SUPPORTED = [Algo.MD5]
@classmethod
def algo_supported(cls, algo: Algo) -> bool:
return algo in cls.ALGO_SUPPORTED
@classmethod
def crack_MD5(cls, hash: str) -> str:
r = requests.post(
cls.HOST,
data={
"hash": hash,
"submit": "Decrypt+It!"
},
headers={
"Content-Type": "application/x-www-form-urlencoded"
}
)
if r.status_code == 200:
soup = BeautifulSoup(r.text, 'html.parser')
found = soup.find_all("font")
return found[5].parent.parent.contents[1]
raise HashNotFound
@classmethod
def crack(cls, hash: str, algo: Algo) -> str:
res = ""
if algo == Algo.MD5:
res = cls.crack_MD5(hash)
else:
raise NotImplementedError
return res | gpl-3.0 | -4,836,506,783,230,636,000 | 25.045455 | 67 | 0.551965 | false |
sajeeshcs/nested_quota_latest | nova/api/openstack/compute/contrib/flavor_rxtx.py | 79 | 2175 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Flavor Rxtx API extension."""
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
authorize = extensions.soft_extension_authorizer('compute', 'flavor_rxtx')
class FlavorRxtxController(wsgi.Controller):
def _extend_flavors(self, req, flavors):
for flavor in flavors:
db_flavor = req.get_db_flavor(flavor['id'])
key = 'rxtx_factor'
flavor[key] = db_flavor['rxtx_factor'] or ""
def _show(self, req, resp_obj):
if not authorize(req.environ['nova.context']):
return
if 'flavor' in resp_obj.obj:
self._extend_flavors(req, [resp_obj.obj['flavor']])
@wsgi.extends
def show(self, req, resp_obj, id):
return self._show(req, resp_obj)
@wsgi.extends(action='create')
def create(self, req, resp_obj, body):
return self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
if not authorize(req.environ['nova.context']):
return
self._extend_flavors(req, list(resp_obj.obj['flavors']))
class Flavor_rxtx(extensions.ExtensionDescriptor):
"""Support to show the rxtx status of a flavor."""
name = "FlavorRxtx"
alias = "os-flavor-rxtx"
namespace = ("http://docs.openstack.org/compute/ext/"
"flavor_rxtx/api/v1.1")
updated = "2012-08-29T00:00:00Z"
def get_controller_extensions(self):
controller = FlavorRxtxController()
extension = extensions.ControllerExtension(self, 'flavors', controller)
return [extension]
| apache-2.0 | -1,045,542,845,519,476,400 | 32.984375 | 79 | 0.65977 | false |
andmos/ansible | lib/ansible/modules/cloud/amazon/ec2_scaling_policy.py | 46 | 6822 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = """
module: ec2_scaling_policy
short_description: Create or delete AWS scaling policies for Autoscaling groups
description:
- Can create or delete scaling policies for autoscaling groups
- Referenced autoscaling groups must already exist
version_added: "1.6"
author: "Zacharie Eakin (@Zeekin)"
options:
state:
description:
- register or deregister the policy
required: true
choices: ['present', 'absent']
name:
description:
- Unique name for the scaling policy
required: true
asg_name:
description:
- Name of the associated autoscaling group
required: true
adjustment_type:
description:
- The type of change in capacity of the autoscaling group
required: false
choices: ['ChangeInCapacity','ExactCapacity','PercentChangeInCapacity']
scaling_adjustment:
description:
- The amount by which the autoscaling group is adjusted by the policy
required: false
min_adjustment_step:
description:
- Minimum amount of adjustment when policy is triggered
required: false
cooldown:
description:
- The minimum period of time between which autoscaling actions can take place
required: false
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = '''
- ec2_scaling_policy:
state: present
region: US-XXX
name: "scaledown-policy"
adjustment_type: "ChangeInCapacity"
asg_name: "slave-pool"
scaling_adjustment: -1
min_adjustment_step: 1
cooldown: 300
'''
try:
import boto.ec2.autoscale
import boto.exception
from boto.ec2.autoscale import ScalingPolicy
from boto.exception import BotoServerError
except ImportError:
pass # Taken care of by ec2.HAS_BOTO
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (AnsibleAWSError, HAS_BOTO, connect_to_aws, ec2_argument_spec,
get_aws_connection_info)
def create_scaling_policy(connection, module):
sp_name = module.params.get('name')
adjustment_type = module.params.get('adjustment_type')
asg_name = module.params.get('asg_name')
scaling_adjustment = module.params.get('scaling_adjustment')
min_adjustment_step = module.params.get('min_adjustment_step')
cooldown = module.params.get('cooldown')
scalingPolicies = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name])
if not scalingPolicies:
sp = ScalingPolicy(
name=sp_name,
adjustment_type=adjustment_type,
as_name=asg_name,
scaling_adjustment=scaling_adjustment,
min_adjustment_step=min_adjustment_step,
cooldown=cooldown)
try:
connection.create_scaling_policy(sp)
policy = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name])[0]
module.exit_json(changed=True, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment,
cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
except BotoServerError as e:
module.fail_json(msg=str(e))
else:
policy = scalingPolicies[0]
changed = False
# min_adjustment_step attribute is only relevant if the adjustment_type
# is set to percentage change in capacity, so it is a special case
if getattr(policy, 'adjustment_type') == 'PercentChangeInCapacity':
if getattr(policy, 'min_adjustment_step') != module.params.get('min_adjustment_step'):
changed = True
# set the min adjustment step in case the user decided to change their
# adjustment type to percentage
setattr(policy, 'min_adjustment_step', module.params.get('min_adjustment_step'))
# check the remaining attributes
for attr in ('adjustment_type', 'scaling_adjustment', 'cooldown'):
if getattr(policy, attr) != module.params.get(attr):
changed = True
setattr(policy, attr, module.params.get(attr))
try:
if changed:
connection.create_scaling_policy(policy)
policy = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name])[0]
module.exit_json(changed=changed, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment,
cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
except BotoServerError as e:
module.fail_json(msg=str(e))
def delete_scaling_policy(connection, module):
sp_name = module.params.get('name')
asg_name = module.params.get('asg_name')
scalingPolicies = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name])
if scalingPolicies:
try:
connection.delete_policy(sp_name, asg_name)
module.exit_json(changed=True)
except BotoServerError as e:
module.exit_json(changed=False, msg=str(e))
else:
module.exit_json(changed=False)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
adjustment_type=dict(type='str', choices=['ChangeInCapacity', 'ExactCapacity', 'PercentChangeInCapacity']),
asg_name=dict(required=True, type='str'),
scaling_adjustment=dict(type='int'),
min_adjustment_step=dict(type='int'),
cooldown=dict(type='int'),
state=dict(default='present', choices=['present', 'absent']),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
state = module.params.get('state')
try:
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
if state == 'present':
create_scaling_policy(connection, module)
elif state == 'absent':
delete_scaling_policy(connection, module)
if __name__ == '__main__':
main()
| gpl-3.0 | 6,513,407,901,141,586,000 | 34.905263 | 156 | 0.658898 | false |
smasala/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/style/filter_unittest.py | 124 | 9830 | # Copyright (C) 2010 Chris Jerdonek ([email protected])
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for filter.py."""
import unittest2 as unittest
from filter import _CategoryFilter as CategoryFilter
from filter import validate_filter_rules
from filter import FilterConfiguration
# On Testing __eq__() and __ne__():
#
# In the tests below, we deliberately do not use assertEqual() or
# assertNotEquals() to test __eq__() or __ne__(). We do this to be
# very explicit about what we are testing, especially in the case
# of assertNotEquals().
#
# Part of the reason is that it is not immediately clear what
# expression the unittest module uses to assert "not equals" -- the
# negation of __eq__() or __ne__(), which are not necessarily
# equivalent expresions in Python. For example, from Python's "Data
# Model" documentation--
#
# "There are no implied relationships among the comparison
# operators. The truth of x==y does not imply that x!=y is
# false. Accordingly, when defining __eq__(), one should
# also define __ne__() so that the operators will behave as
# expected."
#
# (from http://docs.python.org/reference/datamodel.html#object.__ne__ )
class ValidateFilterRulesTest(unittest.TestCase):
"""Tests validate_filter_rules() function."""
def test_validate_filter_rules(self):
all_categories = ["tabs", "whitespace", "build/include"]
bad_rules = [
"tabs",
"*tabs",
" tabs",
" +tabs",
"+whitespace/newline",
"+xxx",
]
good_rules = [
"+tabs",
"-tabs",
"+build"
]
for rule in bad_rules:
self.assertRaises(ValueError, validate_filter_rules,
[rule], all_categories)
for rule in good_rules:
# This works: no error.
validate_filter_rules([rule], all_categories)
class CategoryFilterTest(unittest.TestCase):
"""Tests CategoryFilter class."""
def test_init(self):
"""Test __init__ method."""
# Test that the attributes are getting set correctly.
filter = CategoryFilter(["+"])
self.assertEqual(["+"], filter._filter_rules)
def test_init_default_arguments(self):
"""Test __init__ method default arguments."""
filter = CategoryFilter()
self.assertEqual([], filter._filter_rules)
def test_str(self):
"""Test __str__ "to string" operator."""
filter = CategoryFilter(["+a", "-b"])
self.assertEqual(str(filter), "+a,-b")
def test_eq(self):
"""Test __eq__ equality function."""
filter1 = CategoryFilter(["+a", "+b"])
filter2 = CategoryFilter(["+a", "+b"])
filter3 = CategoryFilter(["+b", "+a"])
# See the notes at the top of this module about testing
# __eq__() and __ne__().
self.assertTrue(filter1.__eq__(filter2))
self.assertFalse(filter1.__eq__(filter3))
def test_ne(self):
"""Test __ne__ inequality function."""
# By default, __ne__ always returns true on different objects.
# Thus, just check the distinguishing case to verify that the
# code defines __ne__.
#
# Also, see the notes at the top of this module about testing
# __eq__() and __ne__().
self.assertFalse(CategoryFilter().__ne__(CategoryFilter()))
def test_should_check(self):
"""Test should_check() method."""
filter = CategoryFilter()
self.assertTrue(filter.should_check("everything"))
# Check a second time to exercise cache.
self.assertTrue(filter.should_check("everything"))
filter = CategoryFilter(["-"])
self.assertFalse(filter.should_check("anything"))
# Check a second time to exercise cache.
self.assertFalse(filter.should_check("anything"))
filter = CategoryFilter(["-", "+ab"])
self.assertTrue(filter.should_check("abc"))
self.assertFalse(filter.should_check("a"))
filter = CategoryFilter(["+", "-ab"])
self.assertFalse(filter.should_check("abc"))
self.assertTrue(filter.should_check("a"))
class FilterConfigurationTest(unittest.TestCase):
"""Tests FilterConfiguration class."""
def _config(self, base_rules, path_specific, user_rules):
"""Return a FilterConfiguration instance."""
return FilterConfiguration(base_rules=base_rules,
path_specific=path_specific,
user_rules=user_rules)
def test_init(self):
"""Test __init__ method."""
# Test that the attributes are getting set correctly.
# We use parameter values that are different from the defaults.
base_rules = ["-"]
path_specific = [(["path"], ["+a"])]
user_rules = ["+"]
config = self._config(base_rules, path_specific, user_rules)
self.assertEqual(base_rules, config._base_rules)
self.assertEqual(path_specific, config._path_specific)
self.assertEqual(user_rules, config._user_rules)
def test_default_arguments(self):
# Test that the attributes are getting set correctly to the defaults.
config = FilterConfiguration()
self.assertEqual([], config._base_rules)
self.assertEqual([], config._path_specific)
self.assertEqual([], config._user_rules)
def test_eq(self):
"""Test __eq__ method."""
# See the notes at the top of this module about testing
# __eq__() and __ne__().
self.assertTrue(FilterConfiguration().__eq__(FilterConfiguration()))
# Verify that a difference in any argument causes equality to fail.
config = FilterConfiguration()
# These parameter values are different from the defaults.
base_rules = ["-"]
path_specific = [(["path"], ["+a"])]
user_rules = ["+"]
self.assertFalse(config.__eq__(FilterConfiguration(
base_rules=base_rules)))
self.assertFalse(config.__eq__(FilterConfiguration(
path_specific=path_specific)))
self.assertFalse(config.__eq__(FilterConfiguration(
user_rules=user_rules)))
def test_ne(self):
"""Test __ne__ method."""
# By default, __ne__ always returns true on different objects.
# Thus, just check the distinguishing case to verify that the
# code defines __ne__.
#
# Also, see the notes at the top of this module about testing
# __eq__() and __ne__().
self.assertFalse(FilterConfiguration().__ne__(FilterConfiguration()))
def test_base_rules(self):
"""Test effect of base_rules on should_check()."""
base_rules = ["-b"]
path_specific = []
user_rules = []
config = self._config(base_rules, path_specific, user_rules)
self.assertTrue(config.should_check("a", "path"))
self.assertFalse(config.should_check("b", "path"))
def test_path_specific(self):
"""Test effect of path_rules_specifier on should_check()."""
base_rules = ["-"]
path_specific = [(["path1"], ["+b"]),
(["path2"], ["+c"])]
user_rules = []
config = self._config(base_rules, path_specific, user_rules)
self.assertFalse(config.should_check("c", "path1"))
self.assertTrue(config.should_check("c", "path2"))
# Test that first match takes precedence.
self.assertFalse(config.should_check("c", "path2/path1"))
def test_path_with_different_case(self):
"""Test a path that differs only in case."""
base_rules = ["-"]
path_specific = [(["Foo/"], ["+whitespace"])]
user_rules = []
config = self._config(base_rules, path_specific, user_rules)
self.assertFalse(config.should_check("whitespace", "Fooo/bar.txt"))
self.assertTrue(config.should_check("whitespace", "Foo/bar.txt"))
# Test different case.
self.assertTrue(config.should_check("whitespace", "FOO/bar.txt"))
def test_user_rules(self):
"""Test effect of user_rules on should_check()."""
base_rules = ["-"]
path_specific = []
user_rules = ["+b"]
config = self._config(base_rules, path_specific, user_rules)
self.assertFalse(config.should_check("a", "path"))
self.assertTrue(config.should_check("b", "path"))
| bsd-3-clause | 5,322,118,931,330,562,000 | 37.398438 | 80 | 0.612411 | false |
jolth/websiteDevMicrosystem | web/wsgiserver/ssl_builtin.py | 79 | 2589 | """A library for integrating Python's builtin ``ssl`` library with CherryPy.
The ssl module must be importable for SSL functionality.
To use this module, set ``CherryPyWSGIServer.ssl_adapter`` to an instance of
``BuiltinSSLAdapter``.
"""
try:
import ssl
except ImportError:
ssl = None
from cherrypy import wsgiserver
class BuiltinSSLAdapter(wsgiserver.SSLAdapter):
"""A wrapper for integrating Python's builtin ssl module with CherryPy."""
certificate = None
"""The filename of the server SSL certificate."""
private_key = None
"""The filename of the server's private key file."""
def __init__(self, certificate, private_key, certificate_chain=None):
if ssl is None:
raise ImportError("You must install the ssl module to use HTTPS.")
self.certificate = certificate
self.private_key = private_key
self.certificate_chain = certificate_chain
def bind(self, sock):
"""Wrap and return the given socket."""
return sock
def wrap(self, sock):
"""Wrap and return the given socket, plus WSGI environ entries."""
try:
s = ssl.wrap_socket(sock, do_handshake_on_connect=True,
server_side=True, certfile=self.certificate,
keyfile=self.private_key, ssl_version=ssl.PROTOCOL_SSLv23)
except ssl.SSLError, e:
if e.errno == ssl.SSL_ERROR_EOF:
# This is almost certainly due to the cherrypy engine
# 'pinging' the socket to assert it's connectable;
# the 'ping' isn't SSL.
return None, {}
elif e.errno == ssl.SSL_ERROR_SSL:
if e.args[1].endswith('http request'):
# The client is speaking HTTP to an HTTPS server.
raise wsgiserver.NoSSLError
raise
return s, self.get_environ(s)
# TODO: fill this out more with mod ssl env
def get_environ(self, sock):
"""Create WSGI environ entries to be merged into each request."""
cipher = sock.cipher()
ssl_environ = {
"wsgi.url_scheme": "https",
"HTTPS": "on",
'SSL_PROTOCOL': cipher[1],
'SSL_CIPHER': cipher[0]
## SSL_VERSION_INTERFACE string The mod_ssl program version
## SSL_VERSION_LIBRARY string The OpenSSL program version
}
return ssl_environ
def makefile(self, sock, mode='r', bufsize=-1):
return wsgiserver.CP_fileobject(sock, mode, bufsize)
| apache-2.0 | -7,540,007,263,591,561,000 | 34.958333 | 78 | 0.599846 | false |
GheRivero/ansible | lib/ansible/modules/cloud/cloudstack/cs_instance_facts.py | 37 | 8632 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, René Moser <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_instance_facts
short_description: Gathering facts from the API of instances from Apache CloudStack based clouds.
description:
- Gathering facts from the API of an instance.
version_added: "2.1"
author: "René Moser (@resmo)"
options:
name:
description:
- Name or display name of the instance.
required: true
domain:
description:
- Domain the instance is related to.
account:
description:
- Account the instance is related to.
project:
description:
- Project the instance is related to.
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
- name: gather instance facts
cs_instance_facts:
name: web-vm-1
delegate_to: localhost
register: vm
- debug:
var: cloudstack_instance
- debug:
var: vm
'''
RETURN = '''
---
id:
description: UUID of the instance.
returned: success
type: string
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
name:
description: Name of the instance.
returned: success
type: string
sample: web-01
display_name:
description: Display name of the instance.
returned: success
type: string
sample: web-01
group:
description: Group name of the instance is related.
returned: success
type: string
sample: web
created:
description: Date of the instance was created.
returned: success
type: string
sample: 2014-12-01T14:57:57+0100
password_enabled:
description: True if password setting is enabled.
returned: success
type: boolean
sample: true
password:
description: The password of the instance if exists.
returned: success
type: string
sample: Ge2oe7Do
ssh_key:
description: Name of SSH key deployed to instance.
returned: success
type: string
sample: key@work
domain:
description: Domain the instance is related to.
returned: success
type: string
sample: example domain
account:
description: Account the instance is related to.
returned: success
type: string
sample: example account
project:
description: Name of project the instance is related to.
returned: success
type: string
sample: Production
default_ip:
description: Default IP address of the instance.
returned: success
type: string
sample: 10.23.37.42
public_ip:
description: Public IP address with instance via static NAT rule.
returned: success
type: string
sample: 1.2.3.4
iso:
description: Name of ISO the instance was deployed with.
returned: success
type: string
sample: Debian-8-64bit
template:
description: Name of template the instance was deployed with.
returned: success
type: string
sample: Debian-8-64bit
service_offering:
description: Name of the service offering the instance has.
returned: success
type: string
sample: 2cpu_2gb
zone:
description: Name of zone the instance is in.
returned: success
type: string
sample: ch-gva-2
state:
description: State of the instance.
returned: success
type: string
sample: Running
security_groups:
description: Security groups the instance is in.
returned: success
type: list
sample: '[ "default" ]'
affinity_groups:
description: Affinity groups the instance is in.
returned: success
type: list
sample: '[ "webservers" ]'
tags:
description: List of resource tags associated with the instance.
returned: success
type: dict
sample: '[ { "key": "foo", "value": "bar" } ]'
hypervisor:
description: Hypervisor related to this instance.
returned: success
type: string
sample: KVM
host:
description: Host the instance is running on.
returned: success and instance is running
type: string
sample: host01.example.com
version_added: '2.6'
instance_name:
description: Internal name of the instance (ROOT admin only).
returned: success
type: string
sample: i-44-3992-VM
volumes:
description: List of dictionaries of the volumes attached to the instance.
returned: success
type: list
sample: '[ { name: "ROOT-1369", type: "ROOT", size: 10737418240 }, { name: "data01, type: "DATADISK", size: 10737418240 } ]'
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import AnsibleCloudStack, cs_argument_spec
class AnsibleCloudStackInstanceFacts(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackInstanceFacts, self).__init__(module)
self.instance = None
self.returns = {
'group': 'group',
'hypervisor': 'hypervisor',
'instancename': 'instance_name',
'publicip': 'public_ip',
'passwordenabled': 'password_enabled',
'password': 'password',
'serviceofferingname': 'service_offering',
'isoname': 'iso',
'templatename': 'template',
'keypair': 'ssh_key',
'hostname': 'host',
}
self.facts = {
'cloudstack_instance': None,
}
def get_instance(self):
instance = self.instance
if not instance:
instance_name = self.module.params.get('name')
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'fetch_list': True,
}
# Do not pass zoneid, as the instance name must be unique across zones.
instances = self.query_api('listVirtualMachines', **args)
if instances:
for v in instances:
if instance_name.lower() in [v['name'].lower(), v['displayname'].lower(), v['id']]:
self.instance = v
break
return self.instance
def get_volumes(self, instance):
volume_details = []
if instance:
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'virtualmachineid': instance['id'],
'fetch_list': True,
}
volumes = self.query_api('listVolumes', **args)
if volumes:
for vol in volumes:
volume_details.append({'size': vol['size'], 'type': vol['type'], 'name': vol['name']})
return volume_details
def run(self):
instance = self.get_instance()
if not instance:
self.module.fail_json(msg="Instance not found: %s" % self.module.params.get('name'))
return instance
def get_result(self, instance):
super(AnsibleCloudStackInstanceFacts, self).get_result(instance)
if instance:
if 'securitygroup' in instance:
security_groups = []
for securitygroup in instance['securitygroup']:
security_groups.append(securitygroup['name'])
self.result['security_groups'] = security_groups
if 'affinitygroup' in instance:
affinity_groups = []
for affinitygroup in instance['affinitygroup']:
affinity_groups.append(affinitygroup['name'])
self.result['affinity_groups'] = affinity_groups
if 'nic' in instance:
for nic in instance['nic']:
if nic['isdefault'] and 'ipaddress' in nic:
self.result['default_ip'] = nic['ipaddress']
volumes = self.get_volumes(instance)
if volumes:
self.result['volumes'] = volumes
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
domain=dict(),
account=dict(),
project=dict(),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
acs_instance_facts = AnsibleCloudStackInstanceFacts(module=module)
cs_instance_facts = acs_instance_facts.get_result_and_facts(
facts_name='cloudstack_instance',
resource=acs_instance_facts.run()
)
module.exit_json(**cs_instance_facts)
if __name__ == '__main__':
main()
| gpl-3.0 | 5,068,647,328,051,567,000 | 27.862876 | 126 | 0.629896 | false |
ebar0n/django | django/urls/conf.py | 90 | 2946 | """Functions for use in URLsconfs."""
from functools import partial
from importlib import import_module
from django.core.exceptions import ImproperlyConfigured
from .resolvers import (
LocalePrefixPattern, RegexPattern, RoutePattern, URLPattern, URLResolver,
)
def include(arg, namespace=None):
app_name = None
if isinstance(arg, tuple):
# Callable returning a namespace hint.
try:
urlconf_module, app_name = arg
except ValueError:
if namespace:
raise ImproperlyConfigured(
'Cannot override the namespace for a dynamic module that '
'provides a namespace.'
)
raise ImproperlyConfigured(
'Passing a %d-tuple to include() is not supported. Pass a '
'2-tuple containing the list of patterns and app_name, and '
'provide the namespace argument to include() instead.' % len(arg)
)
else:
# No namespace hint - use manually provided namespace.
urlconf_module = arg
if isinstance(urlconf_module, str):
urlconf_module = import_module(urlconf_module)
patterns = getattr(urlconf_module, 'urlpatterns', urlconf_module)
app_name = getattr(urlconf_module, 'app_name', app_name)
if namespace and not app_name:
raise ImproperlyConfigured(
'Specifying a namespace in include() without providing an app_name '
'is not supported. Set the app_name attribute in the included '
'module, or pass a 2-tuple containing the list of patterns and '
'app_name instead.',
)
namespace = namespace or app_name
# Make sure the patterns can be iterated through (without this, some
# testcases will break).
if isinstance(patterns, (list, tuple)):
for url_pattern in patterns:
pattern = getattr(url_pattern, 'pattern', None)
if isinstance(pattern, LocalePrefixPattern):
raise ImproperlyConfigured(
'Using i18n_patterns in an included URLconf is not allowed.'
)
return (urlconf_module, app_name, namespace)
def _path(route, view, kwargs=None, name=None, Pattern=None):
if isinstance(view, (list, tuple)):
# For include(...) processing.
pattern = Pattern(route, is_endpoint=False)
urlconf_module, app_name, namespace = view
return URLResolver(
pattern,
urlconf_module,
kwargs,
app_name=app_name,
namespace=namespace,
)
elif callable(view):
pattern = Pattern(route, name=name, is_endpoint=True)
return URLPattern(pattern, view, kwargs, name)
else:
raise TypeError('view must be a callable or a list/tuple in the case of include().')
path = partial(_path, Pattern=RoutePattern)
re_path = partial(_path, Pattern=RegexPattern)
| bsd-3-clause | -2,431,681,168,064,921,600 | 37.25974 | 92 | 0.624915 | false |
poulpito/Flexget | flexget/api/plugins/tvmaze_lookup.py | 4 | 6090 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from flask import jsonify
from flask_restplus import inputs
from flexget.api import api, APIResource
from flexget.api.app import NotFoundError, BadRequest, etag
from flexget.plugins.internal.api_tvmaze import APITVMaze as tvm
tvmaze_api = api.namespace('tvmaze', description='TVMaze Shows')
class ObjectsContainer(object):
actor_object = {
'type': 'object',
'properties': {
"last_update": {'type': 'string', 'format': 'date-time'},
"medium_image": {'type': 'string'},
"name": {'type': 'string'},
"original_image": {'type': 'string'},
"tvmaze_id": {'type': 'integer'},
"url": {'type': 'string'}
}
}
schedule_object = {
'type': 'object',
'properties': {
"days": {'type': 'array', 'items': {'type': 'string'}},
"time": {'type': 'string'}
}
}
tvmaze_series_object = {
'type': 'object',
'properties': {
'tvmaze_id': {'type': 'integer'},
'status': {'type': 'string'},
'rating': {'type': 'number'},
'genres': {'type': 'array', 'items': {'type': 'string'}},
'weight': {'type': 'integer'},
'updated': {'type': 'string', 'format': 'date-time'},
'name': {'type': 'string'},
'language': {'type': 'string'},
'schedule': schedule_object,
'url': {'type': 'string', 'format': 'url'},
'original_image': {'type': 'string'},
'medium_image': {'type': 'string'},
'tvdb_id': {'type': 'integer'},
'tvrage_id': {'type': 'integer'},
'premiered': {'type': 'string', 'format': 'date-time'},
'year': {'type': 'integer'},
'summary': {'type': 'string'},
'webchannel': {'type': ['string', 'null']},
'runtime': {'type': 'integer'},
'show_type': {'type': 'string'},
'network': {'type': ['string', 'null']},
'last_update': {'type': 'string', 'format': 'date-time'}
},
'required': ['tvmaze_id', 'status', 'rating', 'genres', 'weight', 'updated', 'name', 'language',
'schedule', 'url', 'original_image', 'medium_image', 'tvdb_id', 'tvrage_id', 'premiered', 'year',
'summary', 'webchannel', 'runtime', 'show_type', 'network', 'last_update'],
'additionalProperties': False
}
tvmaze_episode_object = {
'type': 'object',
'properties': {
'tvmaze_id': {'type': 'integer'},
'series_id': {'type': 'integer'},
'number': {'type': 'integer'},
'season_number': {'type': 'integer'},
'title': {'type': 'string'},
'airdate': {'type': 'string', 'format': 'date-time'},
'url': {'type': 'string'},
'original_image': {'type': ['string', 'null']},
'medium_image': {'type': ['string', 'null']},
'airstamp': {'type': 'string', 'format': 'date-time'},
'runtime': {'type': 'integer'},
'summary': {'type': 'string'},
'last_update': {'type': 'string', 'format': 'date-time'}
},
'required': ['tvmaze_id', 'series_id', 'number', 'season_number', 'title', 'airdate', 'url', 'original_image',
'medium_image', 'airstamp', 'runtime', 'summary', 'last_update'],
'additionalProperties': False
}
tvmaze_series_schema = api.schema('tvmaze_series_schema', ObjectsContainer.tvmaze_series_object)
tvmaze_episode_schema = api.schema('tvmaze_episode_schema', ObjectsContainer.tvmaze_episode_object)
@tvmaze_api.route('/series/<string:title>/')
@api.doc(params={'title': 'TV Show name or TVMaze ID'})
class TVDBSeriesSearchApi(APIResource):
@etag
@api.response(200, 'Successfully found show', model=tvmaze_series_schema)
@api.response(NotFoundError)
def get(self, title, session=None):
"""TVMaze series lookup"""
try:
tvmaze_id = int(title)
except ValueError:
tvmaze_id = None
try:
if tvmaze_id:
series = tvm.series_lookup(tvmaze_id=tvmaze_id, session=session)
else:
series = tvm.series_lookup(series_name=title, session=session)
except LookupError as e:
raise NotFoundError(e.args[0])
return jsonify(series.to_dict())
episode_parser = api.parser()
episode_parser.add_argument('season_num', type=int, help='Season number')
episode_parser.add_argument('ep_num', type=int, help='Episode number')
episode_parser.add_argument('air_date', type=inputs.date_from_iso8601, help="Air date in the format of '2012-01-01'")
@tvmaze_api.route('/episode/<int:tvmaze_id>/')
@api.doc(params={'tvmaze_id': 'TVMaze ID of show'})
@api.doc(parser=episode_parser)
class TVDBEpisodeSearchAPI(APIResource):
@etag
@api.response(200, 'Successfully found episode', tvmaze_episode_schema)
@api.response(NotFoundError)
@api.response(BadRequest)
def get(self, tvmaze_id, session=None):
"""TVMaze episode lookup"""
args = episode_parser.parse_args()
air_date = args.get('air_date')
season_num = args.get('season_num')
ep_num = args.get('ep_num')
kwargs = {'tvmaze_id': tvmaze_id,
'session': session}
if air_date:
kwargs['series_id_type'] = 'date'
kwargs['series_date'] = air_date
elif season_num and ep_num:
kwargs['series_id_type'] = 'ep'
kwargs['series_season'] = season_num
kwargs['series_episode'] = ep_num
else:
raise BadRequest('not enough parameters sent for lookup')
try:
episode = tvm.episode_lookup(**kwargs)
except LookupError as e:
raise NotFoundError(e.args[0])
return jsonify(episode.to_dict())
| mit | 208,084,740,349,788,400 | 39.065789 | 118 | 0.546798 | false |
Poles/Poles | platforms/windows/JsonCpp/scons-local-2.3.0/SCons/Options/__init__.py | 11 | 2679 | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Options/__init__.py 2013/03/03 09:48:35 garyo"
__doc__ = """Place-holder for the old SCons.Options module hierarchy
This is for backwards compatibility. The new equivalent is the Variables/
class hierarchy. These will have deprecation warnings added (some day),
and will then be removed entirely (some day).
"""
import SCons.Variables
import SCons.Warnings
from BoolOption import BoolOption # okay
from EnumOption import EnumOption # okay
from ListOption import ListOption # naja
from PackageOption import PackageOption # naja
from PathOption import PathOption # okay
warned = False
class Options(SCons.Variables.Variables):
def __init__(self, *args, **kw):
global warned
if not warned:
msg = "The Options class is deprecated; use the Variables class instead."
SCons.Warnings.warn(SCons.Warnings.DeprecatedOptionsWarning, msg)
warned = True
SCons.Variables.Variables.__init__(self, *args, **kw)
def AddOptions(self, *args, **kw):
return SCons.Variables.Variables.AddVariables(self, *args, **kw)
def UnknownOptions(self, *args, **kw):
return SCons.Variables.Variables.UnknownVariables(self, *args, **kw)
def FormatOptionHelpText(self, *args, **kw):
return SCons.Variables.Variables.FormatVariableHelpText(self, *args,
**kw)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 | 8,733,869,453,094,611,000 | 38.985075 | 113 | 0.729003 | false |
TelekomCloud/aptexport | aptexport/__init__.py | 1 | 2923 | # -*- coding: utf-8 -*-
# Copyright 2013 Thomas Bechtold <[email protected]>
# Copyright 2013 Deutsche Telekom AG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import apt
import hashlib
class Package(object):
def __init__(self, pkg):
#handle only packages of type 'apt.package.Package'
if not isinstance(pkg, apt.package.Package):
raise Exception("pkg type not 'apt.package.Package'")
self.__pkg = pkg
#get the version to get information from
if hasattr(self.__pkg, "installed") and self.__pkg.installed:
self.__pkg_version = pkg.installed
elif hasattr(self.__pkg, "candidate") and self.__pkg.candidate:
self.__pkg_version = pkg.candidate
elif hasattr(self.__pkg, "versions") and self.__pkg.versions and \
len(self.__pkg.versions) > 0:
self.__pkg_version = self.__pkg.versions[0]
else:
raise Exception("Can not get a version for pkg '{0}'".format(
pkg.fullname))
def __repr__(self):
return "<%(name)s, %(version)s>" % self.as_dict()
def as_dict(self):
"""get package information as dict"""
p = dict()
p["name"] = self.__pkg.name
p["uri"] = self.__pkg_version.uri
p["version"] = self.__pkg_version.version
p["summary"] = self.__pkg_version.summary
p["sha256"] = self.__pkg_version.sha256
# fake a checksum value, since we can't find a real one
if p["sha256"] is None:
p["sha256"] = hashlib.sha256( p["name"] + "-" + p["version"] ).hexdigest()
p["provider"] = "apt"
p["architecture"] = self.__pkg_version.architecture
return p
class PackageListApt(object):
"""create list with all/only installed deb packages"""
def __init__(self, rootdir="/", cache_update=False):
self.__cache_update = cache_update
self.__cache = apt.Cache(rootdir=rootdir, memonly=False)
#update the apt-cache before using it? Need to be root todo this
if cache_update:
self.__cache.update()
self.__cache.open()
def package_list_apt(self, only_installed):
"""iterate over the packages"""
for pkg in self.__cache:
if only_installed and not pkg.is_installed:
continue
yield (Package(pkg).as_dict())
| apache-2.0 | 4,680,683,016,800,820,000 | 38.5 | 86 | 0.6182 | false |
henryfjordan/django | django/contrib/postgres/forms/hstore.py | 313 | 1484 | import json
from django import forms
from django.core.exceptions import ValidationError
from django.utils import six
from django.utils.translation import ugettext_lazy as _
__all__ = ['HStoreField']
class HStoreField(forms.CharField):
"""A field for HStore data which accepts JSON input."""
widget = forms.Textarea
default_error_messages = {
'invalid_json': _('Could not load JSON data.'),
}
def prepare_value(self, value):
if isinstance(value, dict):
return json.dumps(value)
return value
def to_python(self, value):
if not value:
return {}
if not isinstance(value, dict):
try:
value = json.loads(value)
except ValueError:
raise ValidationError(
self.error_messages['invalid_json'],
code='invalid_json',
)
# Cast everything to strings for ease.
for key, val in value.items():
value[key] = six.text_type(val)
return value
def has_changed(self, initial, data):
"""
Return True if data differs from initial.
"""
# For purposes of seeing whether something has changed, None is
# the same as an empty dict, if the data or initial value we get
# is None, replace it w/ {}.
initial_value = self.to_python(initial)
return super(HStoreField, self).has_changed(initial_value, data)
| bsd-3-clause | -6,580,583,502,507,640,000 | 30.574468 | 72 | 0.593666 | false |
ingenieroariel/geonode | geonode/base/management/commands/updategeoip.py | 3 | 2478 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2017 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from __future__ import print_function
import os
import logging
import gzip
import urllib
from six import StringIO
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils.translation import ugettext_noop as _
log = logging.getLogger(__name__)
URL = 'http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz'
class Command(BaseCommand):
"""
Update GeoIP database
"""
def add_arguments(self, parser):
parser.add_argument('-f', '--file', dest='file', default=settings.GEOIP_PATH,
help=_("Write result to file, default GEOIP_PATH: {}".format(settings.GEOIP_PATH)))
parser.add_argument('-u', '--url', dest='url', default=URL,
help=_("Fetch database from specific url. If nothing provided, default {} will be used"))
parser.add_argument('-o', '--overwrite', dest='overwrite', action='store_true', default=False,
help=_("Overwrite file if exists"))
def handle(self, *args, **options):
fname = options['file']
fbase = '.'.join(os.path.basename(options['url']).split('.')[:-1])
if not options['overwrite'] and os.path.exists(fname):
log.warning("File exists, won't overwrite %s", fname)
return
log.info("Requesting %s", options['url'])
r = urllib.urlopen(options['url'])
data = StringIO(r.read())
with gzip.GzipFile(fileobj=data) as zfile:
log.info("Writing to %s", fname)
with open(fname, 'wb') as tofile:
tofile.write(zfile.read())
| gpl-3.0 | 9,002,786,486,742,212,000 | 38.967742 | 117 | 0.613801 | false |
xin3liang/platform_external_chromium_org | chrome/test/ispy/common/cloud_bucket.py | 124 | 2185 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Abstract injector class for GS requests."""
class FileNotFoundError(Exception):
"""Thrown by a subclass of CloudBucket when a file is not found."""
pass
class BaseCloudBucket(object):
"""An abstract base class for working with GS."""
def UploadFile(self, path, contents, content_type):
"""Uploads a file to GS.
Args:
path: where in GS to upload the file.
contents: the contents of the file to be uploaded.
content_type: the MIME Content-Type of the file.
"""
raise NotImplementedError
def DownloadFile(self, path):
"""Downsloads a file from GS.
Args:
path: the location in GS to download the file from.
Returns:
String contents of the file downloaded.
Raises:
bucket_injector.NotFoundException: if the file is not found.
"""
raise NotImplementedError
def UpdateFile(self, path, contents):
"""Uploads a file to GS.
Args:
path: location of the file in GS to update.
contents: the contents of the file to be updated.
"""
raise NotImplementedError
def RemoveFile(self, path):
"""Removes a file from GS.
Args:
path: the location in GS to download the file from.
"""
raise NotImplementedError
def FileExists(self, path):
"""Checks if a file exists in GS.
Args:
path: the location in GS of the file.
Returns:
boolean representing whether the file exists in GS.
"""
raise NotImplementedError
def GetImageURL(self, path):
"""Gets a URL to an item in GS from its path.
Args:
path: the location in GS of a file.
Returns:
an url to a file in GS.
Raises:
bucket_injector.NotFoundException: if the file is not found.
"""
raise NotImplementedError
def GetAllPaths(self, prefix):
"""Gets paths to files in GS that start with a prefix.
Args:
prefix: the prefix to filter files in GS.
Returns:
a generator of paths to files in GS.
"""
raise NotImplementedError
| bsd-3-clause | 6,522,325,679,659,914,000 | 23.010989 | 72 | 0.663158 | false |
goshow-jp/Kraken | tests/ComponentTests/tentacle_build.py | 2 | 1650 |
from kraken import plugins
from kraken.core.maths import Vec3
from kraken_components.generic.tentacle_component import TentacleComponentGuide, TentacleComponentRig
from kraken.core.profiler import Profiler
from kraken.helpers.utility_methods import logHierarchy
Profiler.getInstance().push("tentacle_build")
tentacleGuide = TentacleComponentGuide("tentacle")
tentacleGuide.loadData({
"name": "tentacle",
"location": "L",
"numJoints": 12,
"jointPositions": [Vec3(0.9811, 12, -1.237),
Vec3(5.4488, 11, -1.237),
Vec3(4.0, 10, -1.237),
Vec3(6.841, 9, -1.237),
Vec3(9.841, 8, -1.237),
Vec3(9.841, 7, -1.237),
Vec3(9.841, 6, -1.237),
Vec3(9.841, 5, -1.237),
Vec3(9.841, 4, -1.237),
Vec3(9.841, 3, -1.237),
Vec3(9.841, 2, -1.237),
Vec3(9.841, 1, -1.237)]
})
# Save the hand guide data for persistence.
saveData = tentacleGuide.saveData()
tentacleGuideData = tentacleGuide.getRigBuildData()
tentacle = TentacleComponentRig()
tentacle.loadData(tentacleGuideData)
builder = plugins.getBuilder()
builder.build(tentacle)
Profiler.getInstance().pop()
if __name__ == "__main__":
print Profiler.getInstance().generateReport()
else:
for each in tentacle.getItems().values():
# Only log hierarchy for Layer objects as Layers in this test are added to
# the component since there is no rig object.
if each.isTypeOf('Layer'):
logHierarchy(each)
| bsd-3-clause | 6,119,032,252,521,689,000 | 31.352941 | 101 | 0.594545 | false |
PoundPay/wtforms | docs/conf.py | 4 | 6200 | # -*- coding: utf-8 -*-
#
# WTForms documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 01 15:29:36 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
def _fix_import_path():
"""
Don't want to pollute the config globals, so do path munging
here in this function
"""
import sys, os
try:
import wtforms
except ImportError:
parent_dir = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
build_lib = os.path.join(parent_dir, 'build', 'lib')
if os.path.isdir(build_lib):
sys.path.insert(0, build_lib)
else:
sys.path.insert(0, parent_dir)
_fix_import_path()
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'WTForms'
copyright = '2010 by Thomas Johansson, James Crasta'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = '1.0.2'
# The full version, including alpha/beta/rc tags.
release = '1.0.2dev'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
#exclude_dirs = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'WTFormsdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'WTForms.tex', 'WTForms Documentation',
'Thomas Johansson, James Crasta', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| bsd-3-clause | -4,541,721,910,906,658,000 | 30.291667 | 100 | 0.684355 | false |
wri/gfw-api | lib/oauth2client/locked_file.py | 144 | 11379 | # Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Locked file interface that should work on Unix and Windows pythons.
This module first tries to use fcntl locking to ensure serialized access
to a file, then falls back on a lock file if that is unavialable.
Usage:
f = LockedFile('filename', 'r+b', 'rb')
f.open_and_lock()
if f.is_locked():
print 'Acquired filename with r+b mode'
f.file_handle().write('locked data')
else:
print 'Aquired filename with rb mode'
f.unlock_and_close()
"""
__author__ = '[email protected] (David T McWherter)'
import errno
import logging
import os
import time
from oauth2client import util
logger = logging.getLogger(__name__)
class CredentialsFileSymbolicLinkError(Exception):
"""Credentials files must not be symbolic links."""
class AlreadyLockedException(Exception):
"""Trying to lock a file that has already been locked by the LockedFile."""
pass
def validate_file(filename):
if os.path.islink(filename):
raise CredentialsFileSymbolicLinkError(
'File: %s is a symbolic link.' % filename)
class _Opener(object):
"""Base class for different locking primitives."""
def __init__(self, filename, mode, fallback_mode):
"""Create an Opener.
Args:
filename: string, The pathname of the file.
mode: string, The preferred mode to access the file with.
fallback_mode: string, The mode to use if locking fails.
"""
self._locked = False
self._filename = filename
self._mode = mode
self._fallback_mode = fallback_mode
self._fh = None
def is_locked(self):
"""Was the file locked."""
return self._locked
def file_handle(self):
"""The file handle to the file. Valid only after opened."""
return self._fh
def filename(self):
"""The filename that is being locked."""
return self._filename
def open_and_lock(self, timeout, delay):
"""Open the file and lock it.
Args:
timeout: float, How long to try to lock for.
delay: float, How long to wait between retries.
"""
pass
def unlock_and_close(self):
"""Unlock and close the file."""
pass
class _PosixOpener(_Opener):
"""Lock files using Posix advisory lock files."""
def open_and_lock(self, timeout, delay):
"""Open the file and lock it.
Tries to create a .lock file next to the file we're trying to open.
Args:
timeout: float, How long to try to lock for.
delay: float, How long to wait between retries.
Raises:
AlreadyLockedException: if the lock is already acquired.
IOError: if the open fails.
CredentialsFileSymbolicLinkError if the file is a symbolic link.
"""
if self._locked:
raise AlreadyLockedException('File %s is already locked' %
self._filename)
self._locked = False
validate_file(self._filename)
try:
self._fh = open(self._filename, self._mode)
except IOError, e:
# If we can't access with _mode, try _fallback_mode and don't lock.
if e.errno == errno.EACCES:
self._fh = open(self._filename, self._fallback_mode)
return
lock_filename = self._posix_lockfile(self._filename)
start_time = time.time()
while True:
try:
self._lock_fd = os.open(lock_filename,
os.O_CREAT|os.O_EXCL|os.O_RDWR)
self._locked = True
break
except OSError, e:
if e.errno != errno.EEXIST:
raise
if (time.time() - start_time) >= timeout:
logger.warn('Could not acquire lock %s in %s seconds' % (
lock_filename, timeout))
# Close the file and open in fallback_mode.
if self._fh:
self._fh.close()
self._fh = open(self._filename, self._fallback_mode)
return
time.sleep(delay)
def unlock_and_close(self):
"""Unlock a file by removing the .lock file, and close the handle."""
if self._locked:
lock_filename = self._posix_lockfile(self._filename)
os.close(self._lock_fd)
os.unlink(lock_filename)
self._locked = False
self._lock_fd = None
if self._fh:
self._fh.close()
def _posix_lockfile(self, filename):
"""The name of the lock file to use for posix locking."""
return '%s.lock' % filename
try:
import fcntl
class _FcntlOpener(_Opener):
"""Open, lock, and unlock a file using fcntl.lockf."""
def open_and_lock(self, timeout, delay):
"""Open the file and lock it.
Args:
timeout: float, How long to try to lock for.
delay: float, How long to wait between retries
Raises:
AlreadyLockedException: if the lock is already acquired.
IOError: if the open fails.
CredentialsFileSymbolicLinkError if the file is a symbolic link.
"""
if self._locked:
raise AlreadyLockedException('File %s is already locked' %
self._filename)
start_time = time.time()
validate_file(self._filename)
try:
self._fh = open(self._filename, self._mode)
except IOError, e:
# If we can't access with _mode, try _fallback_mode and don't lock.
if e.errno == errno.EACCES:
self._fh = open(self._filename, self._fallback_mode)
return
# We opened in _mode, try to lock the file.
while True:
try:
fcntl.lockf(self._fh.fileno(), fcntl.LOCK_EX)
self._locked = True
return
except IOError, e:
# If not retrying, then just pass on the error.
if timeout == 0:
raise e
if e.errno != errno.EACCES:
raise e
# We could not acquire the lock. Try again.
if (time.time() - start_time) >= timeout:
logger.warn('Could not lock %s in %s seconds' % (
self._filename, timeout))
if self._fh:
self._fh.close()
self._fh = open(self._filename, self._fallback_mode)
return
time.sleep(delay)
def unlock_and_close(self):
"""Close and unlock the file using the fcntl.lockf primitive."""
if self._locked:
fcntl.lockf(self._fh.fileno(), fcntl.LOCK_UN)
self._locked = False
if self._fh:
self._fh.close()
except ImportError:
_FcntlOpener = None
try:
import pywintypes
import win32con
import win32file
class _Win32Opener(_Opener):
"""Open, lock, and unlock a file using windows primitives."""
# Error #33:
# 'The process cannot access the file because another process'
FILE_IN_USE_ERROR = 33
# Error #158:
# 'The segment is already unlocked.'
FILE_ALREADY_UNLOCKED_ERROR = 158
def open_and_lock(self, timeout, delay):
"""Open the file and lock it.
Args:
timeout: float, How long to try to lock for.
delay: float, How long to wait between retries
Raises:
AlreadyLockedException: if the lock is already acquired.
IOError: if the open fails.
CredentialsFileSymbolicLinkError if the file is a symbolic link.
"""
if self._locked:
raise AlreadyLockedException('File %s is already locked' %
self._filename)
start_time = time.time()
validate_file(self._filename)
try:
self._fh = open(self._filename, self._mode)
except IOError, e:
# If we can't access with _mode, try _fallback_mode and don't lock.
if e.errno == errno.EACCES:
self._fh = open(self._filename, self._fallback_mode)
return
# We opened in _mode, try to lock the file.
while True:
try:
hfile = win32file._get_osfhandle(self._fh.fileno())
win32file.LockFileEx(
hfile,
(win32con.LOCKFILE_FAIL_IMMEDIATELY|
win32con.LOCKFILE_EXCLUSIVE_LOCK), 0, -0x10000,
pywintypes.OVERLAPPED())
self._locked = True
return
except pywintypes.error, e:
if timeout == 0:
raise e
# If the error is not that the file is already in use, raise.
if e[0] != _Win32Opener.FILE_IN_USE_ERROR:
raise
# We could not acquire the lock. Try again.
if (time.time() - start_time) >= timeout:
logger.warn('Could not lock %s in %s seconds' % (
self._filename, timeout))
if self._fh:
self._fh.close()
self._fh = open(self._filename, self._fallback_mode)
return
time.sleep(delay)
def unlock_and_close(self):
"""Close and unlock the file using the win32 primitive."""
if self._locked:
try:
hfile = win32file._get_osfhandle(self._fh.fileno())
win32file.UnlockFileEx(hfile, 0, -0x10000, pywintypes.OVERLAPPED())
except pywintypes.error, e:
if e[0] != _Win32Opener.FILE_ALREADY_UNLOCKED_ERROR:
raise
self._locked = False
if self._fh:
self._fh.close()
except ImportError:
_Win32Opener = None
class LockedFile(object):
"""Represent a file that has exclusive access."""
@util.positional(4)
def __init__(self, filename, mode, fallback_mode, use_native_locking=True):
"""Construct a LockedFile.
Args:
filename: string, The path of the file to open.
mode: string, The mode to try to open the file with.
fallback_mode: string, The mode to use if locking fails.
use_native_locking: bool, Whether or not fcntl/win32 locking is used.
"""
opener = None
if not opener and use_native_locking:
if _Win32Opener:
opener = _Win32Opener(filename, mode, fallback_mode)
if _FcntlOpener:
opener = _FcntlOpener(filename, mode, fallback_mode)
if not opener:
opener = _PosixOpener(filename, mode, fallback_mode)
self._opener = opener
def filename(self):
"""Return the filename we were constructed with."""
return self._opener._filename
def file_handle(self):
"""Return the file_handle to the opened file."""
return self._opener.file_handle()
def is_locked(self):
"""Return whether we successfully locked the file."""
return self._opener.is_locked()
def open_and_lock(self, timeout=0, delay=0.05):
"""Open the file, trying to lock it.
Args:
timeout: float, The number of seconds to try to acquire the lock.
delay: float, The number of seconds to wait between retry attempts.
Raises:
AlreadyLockedException: if the lock is already acquired.
IOError: if the open fails.
"""
self._opener.open_and_lock(timeout, delay)
def unlock_and_close(self):
"""Unlock and close a file."""
self._opener.unlock_and_close()
| gpl-2.0 | -4,348,401,927,123,820,000 | 29.506702 | 77 | 0.615696 | false |
Frodox/buildbot | master/buildbot/test/unit/test_data_forceschedulers.py | 1 | 7282 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from twisted.internet import defer
from twisted.trial import unittest
from buildbot.data import forceschedulers
from buildbot.schedulers.forcesched import ForceScheduler
from buildbot.test.util import endpoint
expected_default = {
'all_fields': [{'columns': 1,
'default': '',
'fields': [{'default': '',
'fullName': 'username',
'hide': False,
'label': 'Your name:',
'maxsize': None,
'multiple': False,
'name': 'username',
'need_email': True,
'regex': None,
'required': False,
'size': 30,
'tablabel': 'Your name:',
'type': 'username'},
{'default': 'force build',
'fullName': 'reason',
'hide': False,
'label': 'reason',
'maxsize': None,
'multiple': False,
'name': 'reason',
'regex': None,
'required': False,
'size': 20,
'tablabel': 'reason',
'type': 'text'}],
'fullName': None,
'hide': False,
'label': '',
'layout': 'vertical',
'maxsize': None,
'multiple': False,
'name': '',
'regex': None,
'required': False,
'tablabel': '',
'type': 'nested'},
{'columns': 2,
'default': '',
'fields': [{'default': '',
'fullName': 'branch',
'hide': False,
'label': 'Branch:',
'multiple': False,
'maxsize': None,
'name': 'branch',
'regex': None,
'required': False,
'size': 10,
'tablabel': 'Branch:',
'type': 'text'},
{'default': '',
'fullName': 'project',
'hide': False,
'label': 'Project:',
'maxsize': None,
'multiple': False,
'name': 'project',
'regex': None,
'required': False,
'size': 10,
'tablabel': 'Project:',
'type': 'text'},
{'default': '',
'fullName': 'repository',
'hide': False,
'label': 'Repository:',
'maxsize': None,
'multiple': False,
'name': 'repository',
'regex': None,
'required': False,
'size': 10,
'tablabel': 'Repository:',
'type': 'text'},
{'default': '',
'fullName': 'revision',
'hide': False,
'label': 'Revision:',
'maxsize': None,
'multiple': False,
'name': 'revision',
'regex': None,
'required': False,
'size': 10,
'tablabel': 'Revision:',
'type': 'text'}],
'fullName': None,
'hide': False,
'label': '',
'layout': 'vertical',
'maxsize': None,
'multiple': False,
'name': '',
'regex': None,
'required': False,
'tablabel': '',
'type': 'nested'}],
'builder_names': [u'builder'],
'button_name': u'defaultforce',
'label': u'defaultforce',
'name': u'defaultforce',
'enabled': True}
class ForceschedulerEndpoint(endpoint.EndpointMixin, unittest.TestCase):
endpointClass = forceschedulers.ForceSchedulerEndpoint
resourceTypeClass = forceschedulers.ForceScheduler
maxDiff = None
def setUp(self):
self.setUpEndpoint()
scheds = [ForceScheduler(
name="defaultforce",
builderNames=["builder"])]
self.master.allSchedulers = lambda: scheds
def tearDown(self):
self.tearDownEndpoint()
@defer.inlineCallbacks
def test_get_existing(self):
res = yield self.callGet(('forceschedulers', "defaultforce"))
self.validateData(res)
self.assertEqual(res, expected_default)
@defer.inlineCallbacks
def test_get_missing(self):
res = yield self.callGet(('forceschedulers', 'foo'))
self.assertEqual(res, None)
class ForceSchedulersEndpoint(endpoint.EndpointMixin, unittest.TestCase):
endpointClass = forceschedulers.ForceSchedulersEndpoint
resourceTypeClass = forceschedulers.ForceScheduler
maxDiff = None
def setUp(self):
self.setUpEndpoint()
scheds = [ForceScheduler(
name="defaultforce",
builderNames=["builder"])]
self.master.allSchedulers = lambda: scheds
def tearDown(self):
self.tearDownEndpoint()
@defer.inlineCallbacks
def test_get_existing(self):
res = yield self.callGet(('forceschedulers', ))
self.assertEqual(res, [expected_default])
| gpl-2.0 | -8,218,860,624,813,654,000 | 39.455556 | 79 | 0.40813 | false |
msopentechcn/open-hackathon | open-hackathon-client/src/client/log.py | 2 | 2041 | # -*- coding: utf-8 -*-
"""
This file is covered by the LICENSING file in the root of this project.
"""
import logging
from os.path import realpath, dirname, join
from logging import config, DEBUG, INFO
__all__ = ["log"]
class Log(object):
"""Wrapper of Python logging module for easier usage
:Example:
from hackathon.log import log
log.info("message of INFO level ")
log.error(exception) # where exception is of type Exception or it inheritances
.. notes:: make sure directory '/var/log/open-hackathon/' exists and accessible
"""
def debug(self, debug):
"""write message into log file with DEBUG level
:type debug: str|unicode
:param debug: message to write
"""
if self.logger.isEnabledFor(DEBUG):
self.logger.debug(debug)
def info(self, info):
"""write message into log file with INFO level
:type info: str|unicode
:param info: message to write
"""
if self.logger.isEnabledFor(INFO):
self.logger.info(info)
def warn(self, warn):
"""write message into log file with WARN level
:type warn: str|unicode
:param warn: message to write
"""
self.logger.warn(warn)
def error(self, exception):
"""write exception message and stack trace into log file with ERROR level
:type exception: Exception
:param exception: exception to write
"""
self.logger.error(str(exception), exc_info=1)
def critical(self, critical):
"""write message into log file with FATAL level
:type critical: str|unicode
:param critical: message to write
"""
self.logger.critical(critical)
def __init__(self):
"""initialize the log wrapper through 'logging.conf' file which should be in the same dir of this file"""
logging.config.fileConfig(join(dirname(realpath(__file__)), "logging.conf"))
self.logger = logging.getLogger("myLogger")
log = Log()
| mit | 1,412,838,218,007,144,700 | 26.958904 | 113 | 0.624204 | false |
trnewman/VT-USRP-daughterboard-drivers | gnuradio-core/src/lib/filter/generate_gr_freq_xlating_fir_filter_XXX.py | 17 | 1498 | #!/bin/env python
# -*- python -*-
#
# Copyright 2003,2004 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import re
from generate_utils import *
# files to generate
fx_signatures = [ 'scf', 'scc', 'fcf', 'fcc', 'ccf', 'ccc' ]
roots = ['gr_freq_xlating_fir_filter_XXX']
def expand_h_cc_i (root, code3):
d = init_dict (root, code3)
expand_template (d, root + '.h.t')
expand_template (d, root + '.cc.t')
expand_template (d, root + '.i.t')
def init_dict (root, code3):
name = re.sub ('X+', code3, root)
d = standard_dict (name, code3)
d['FIR_TYPE'] = 'gr_fir_' + i_code (code3) + 'cc'
return d
def generate ():
for r in roots:
for s in fx_signatures:
expand_h_cc_i (r, s)
if __name__ == '__main__':
generate ()
| gpl-3.0 | 6,673,921,086,226,694,000 | 27.264151 | 70 | 0.658211 | false |
shacknetisp/vepybot | plugins/core/list.py | 1 | 1340 | # -*- coding: utf-8 -*-
import bot
class M_List(bot.Module):
index = "list"
def register(self):
self.addcommand(
self.list,
"list",
"List modules or, if <module> is specified, commands in a module, "
"<module> can be * for all commands.",
["[module]..."])
def list(self, context, args):
args.default("module", "")
m = args.getstr("module")
if m:
if m == "*":
x = []
for m in self.server.modules:
x += [(("%s %s" % (m, x))
if len(self.server.numcommands[x]) > 1 else x)
for x in self.server.modules[m].commands]
return "%s" % (', '.join(sorted(x)))
if m not in self.server.modules:
return "'%s' is not a loaded module." % m
return ("%s" % (', '.join(
self.server.modules[m].commands)) or
"This module has no commands.")
else:
mkeys = sorted(list(self.server.modules.keys()))
n = []
for m in mkeys:
m = self.server.modules[m]
if not m.hidden:
n.append(m.index)
return "%s" % (', '.join(n))
bot.register.module(M_List)
| mit | -2,722,776,577,327,463,400 | 30.162791 | 79 | 0.436567 | false |
jupierce/openshift-tools | scripts/monitoring/cron-send-elb-status.py | 11 | 4395 | #!/usr/bin/env python
# vim: expandtab:tabstop=4:shiftwidth=4
'''
Collect information about node within ELB
'''
#
# Copyright 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Disabling invalid-name because pylint doesn't like the naming conention we have.
# pylint: disable=invalid-name
# Disabling the import-errors
# pylint: disable=import-error
# Disabling line too long for one character
# pylint: disable=line-too-long
# pylint: disable=pointless-string-statement
# pylint: disable=deprecated-lambda
# pylint: disable=bad-builtin
# pylint: disable=bare-except
from ConfigParser import SafeConfigParser
from openshift_tools.monitoring.metric_sender import MetricSender
import argparse
import urllib2
import yaml
import boto.ec2.elb
import boto.utils
def parse_args():
''' parse the args from the cli '''
parser = argparse.ArgumentParser(description='ELB status checker')
parser.add_argument('-v', '--verbose', action='store_true', default=None, help='Verbose?')
parser.add_argument('--debug', action='store_true', default=None, help='Debug?')
return parser.parse_args()
def get_aws_creds(creds_file):
''' Get AWS authentication details from .aws/credentials file '''
parser = SafeConfigParser()
parser.read(creds_file)
aws_access_key = parser.get('ops_monitoring', 'aws_access_key_id')
aws_secret_key = parser.get('ops_monitoring', 'aws_secret_access_key')
return [aws_access_key, aws_secret_key]
def get_instance_id():
''' Get this instance AWS ID '''
instance_id = urllib2.urlopen('http://instance-data/latest/meta-data/instance-id').read()
return instance_id
def get_instance_region():
''' Get instances region '''
instance_zone = urllib2.urlopen('http://instance-data/latest/meta-data/placement/availability-zone').read()
instance_region = instance_zone[:-1]
return instance_region
def get_instance_name(zagg_client_file):
''' Get this instance name '''
with open(zagg_client_file, 'r') as f:
config = yaml.load(f)
host_name = config["host"]["name"]
return host_name
def main():
''' Gather and examine details about this node within ELBs '''
args = parse_args()
aws_access, aws_secret = get_aws_creds('/root/.aws/credentials')
instance_region = get_instance_region()
elb = boto.ec2.elb.connect_to_region(instance_region, aws_access_key_id=aws_access,
aws_secret_access_key=aws_secret)
instance_name = get_instance_name('/etc/openshift_tools/metric_sender.yaml')
''' Define what instance type this node is, only master/infra are in ELBs '''
if "master" in instance_name:
instance_type = "master"
if args.verbose:
print "Instance %s type is master." % instance_name
elif "infra" in instance_name:
instance_type = "infra"
if args.verbose:
print "Instance %s type is infra." % instance_name
else:
print "%s is not an infra or master node. Nothing to do."
exit()
''' Fetch the load balancers and make sure this instance is within them '''
try:
elbs = elb.get_all_load_balancers()
except:
print "Rate limit reached, skipping."
exit()
instance_id = get_instance_id()
instance_missing = 0
for i in elbs:
if instance_type in i.name:
if not filter(lambda x: x.id == instance_id, i.instances):
instance_missing = 1
if args.verbose:
print "Instance %s is missing from ELB %s!" % (instance_id, i.name)
''' Now that we know if this instance is missing, feed zabbix '''
mts = MetricSender(verbose=args.verbose, debug=args.debug)
mts.add_metric({'openshift.aws.elb.status' : instance_missing})
mts.send_metrics()
if __name__ == '__main__':
main()
| apache-2.0 | -786,151,733,788,392,800 | 32.549618 | 111 | 0.67008 | false |
jhaux/tensorflow | tensorflow/python/training/saver_test_utils.py | 25 | 3105 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Utility classes for testing checkpointing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.ops import gen_lookup_ops
from tensorflow.python.training import saver as saver_module
class CheckpointedOp(object):
"""Op with a custom checkpointing implementation.
Defined as part of the test because the MutableHashTable Python code is
currently in contrib.
"""
# pylint: disable=protected-access
def __init__(self, name, table_ref=None):
if table_ref is None:
self.table_ref = gen_lookup_ops._mutable_hash_table_v2(
key_dtype=dtypes.string, value_dtype=dtypes.float32, name=name)
else:
self.table_ref = table_ref
self._name = name
self._saveable = CheckpointedOp.CustomSaveable(self, name)
ops_lib.add_to_collection(ops_lib.GraphKeys.SAVEABLE_OBJECTS,
self._saveable)
@property
def name(self):
return self._name
@property
def saveable(self):
return self._saveable
def insert(self, keys, values):
return gen_lookup_ops._lookup_table_insert_v2(self.table_ref, keys, values)
def lookup(self, keys, default):
return gen_lookup_ops._lookup_table_find_v2(self.table_ref, keys, default)
def keys(self):
return self._export()[0]
def values(self):
return self._export()[1]
def _export(self):
return gen_lookup_ops._lookup_table_export_v2(self.table_ref, dtypes.string,
dtypes.float32)
class CustomSaveable(saver_module.BaseSaverBuilder.SaveableObject):
"""A custom saveable for CheckpointedOp."""
def __init__(self, table, name):
tensors = table._export()
specs = [
saver_module.BaseSaverBuilder.SaveSpec(tensors[0], "",
name + "-keys"),
saver_module.BaseSaverBuilder.SaveSpec(tensors[1], "",
name + "-values")
]
super(CheckpointedOp.CustomSaveable, self).__init__(table, specs, name)
def restore(self, restore_tensors, shapes):
return gen_lookup_ops._lookup_table_import_v2(
self.op.table_ref, restore_tensors[0], restore_tensors[1])
# pylint: enable=protected-access
| apache-2.0 | -2,012,425,366,591,154,200 | 35.104651 | 80 | 0.656683 | false |
HenryHu/pybbs | PostEntry.py | 1 | 5083 | #!/usr/bin/env python
# vim: set fileencoding=utf-8 :
import struct
import Config
import time
from Util import Util
from cstruct import CStruct
# PostEntry.accessed[0]
FILE_SIGN = 0x1 #/* In article mode, Sign , Bigman 2000.8.12 ,in accessed[0] */
# not used
FILE_OWND = 0x2 #/* accessed array */
# combined into big post
FILE_TOTAL = 0x2 #// aqua 2008.11.4
FILE_VISIT = 0x4
FILE_MARKED = 0x8
FILE_DIGEST = 0x10 #/* Digest Mode*/ /*For SmallPig Digest Mode */
FILE_REPLIED = 0x20 #/* in mail ,added by alex, 96.9.7 */
FILE_FORWARDED = 0x40 #/* in mail ,added by alex, 96.9.7 */
FILE_IMPORTED = 0x80 #/* Leeward 98.04.15 */
# not used:
# /* roy 2003.07.21 */
FILE_WWW_POST = 0x1 #/* post by www */
FILE_ON_TOP = 0x2 #/* on top mode */
FILE_VOTE = 0x4 #/* article with votes */
# PostEntry.accessed[1]
#ifdef FILTER # not def
FILE_CENSOR = 0x20 #/* for accessed[1], flyriver, 2002.9.29 */
BADWORD_IMG_FILE = "etc/badwordv3.img"
#endif
FILE_READ = 0x1 #/* Ownership flags used in fileheader structure in accessed[1] */
FILE_DEL = 0x2 #/* In article mode, Sign , Bigman 2000.8.12 ,in accessed[1] */
FILE_MAILBACK = 0x4 #/* reply articles mail to owner's mailbox, accessed[1] */
#ifdef COMMEND_ARTICLE # not def
FILE_COMMEND = 0x8 #/* 推荐文章,stiger , in accessed[1], */
#endif
FILE_ROOTANON = 0x10 #/* if the root article was posted anonymously, accessed[1] */
class PostEntry(CStruct):
parser = struct.Struct('%dsIII44sH2s%ds%ds%dsIIII%dsI12s' % (Config.FILENAME_LEN, Config.OWNER_LEN, Config.OWNER_LEN, (34 - Config.OWNER_LEN), Config.STRLEN))
_fields = [['filename', 1], 'id', 'groupid','reid', 'unsued1',
'attachflag', 'innflag', ['owner',1, Config.OWNER_LEN], ['realowner', 1, Config.OWNER_LEN],
'unsued2', 'rootcrc', 'eff_size', 'posttime', 'attachment',
['title',1, Config.ARTICLE_TITLE_LEN], 'level', ['accessed', 2, '=12B']]
size = parser.size
def CheckFlag(self, pos, flag):
return bool(self.accessed[pos] & flag)
def SetFlag(self, pos, flag, val):
if (val):
self.accessed[pos] |= flag
else:
self.accessed[pos] &= ~flag
def IsRootPostAnonymous(self):
return self.CheckFlag(1, FILE_ROOTANON)
def SetRootPostAnonymous(self, rootanon):
return self.SetFlag(1, FILE_ROOTANON, rootanon)
def NeedMailBack(self):
return self.CheckFlag(1, FILE_MAILBACK)
def SetMailBack(self, need):
return self.SetFlag(1, FILE_MAILBACK, need)
def IsMarked(self):
return self.CheckFlag(0, FILE_MARKED)
def Mark(self, mark):
return self.SetFlag(0, FILE_MARKED, mark)
def CannotReply(self):
return self.CheckFlag(1, FILE_READ)
def SetCannotReply(self, val):
return self.SetFlag(1, FILE_READ, val)
def IsReplied(self):
return self.CheckFlag(0, FILE_REPLIED)
def IsForwarded(self):
return self.CheckFlag(0, FILE_FORWARDED)
def InDigest(self):
return self.CheckFlag(0, FILE_DIGEST)
def IsRead(self):
return self.CheckFlag(0, FILE_READ)
def SetRead(self, val):
return self.SetFlag(0, FILE_READ, val)
def UpdateDeleteTime(self):
self.accessed[-1] = int(time.time()) / (3600 * 24) % 100;
def GetPostTime(self):
if '.' in self.filename:
return int(self.filename.split('.')[1])
else:
return 0
def CanBeDeleted(self, user, board):
return user.IsOwner(self) or user.IsSysop() or board.IsMyBM(user)
def CanBeEdit(self, user, board):
return user.IsOwner(self) or user.IsSysop() or board.IsMyBM(user)
def GetInfo(self, mode = 'post'):
post = {'title': Util.gbkDec(self.title)}
post['attachflag'] = self.attachflag
post['attachment'] = self.attachment
post['owner'] = Util.gbkDec(self.owner)
try:
post['posttime'] = self.GetPostTime()
except:
post['posttime'] = 0
flags = []
if (self.IsMarked()):
flags += ['marked']
if (mode == 'post'):
post['xid'] = self.id
post['thread'] = self.groupid
post['reply_to'] = self.reid
post['size'] = self.eff_size
if (self.CannotReply()):
flags += ['noreply']
if (self.InDigest()):
flags += ['g']
if (mode == 'mail'):
if (self.IsReplied()):
flags += ['replied']
if (self.IsForwarded()):
flags += ['forwarded']
if (self.IsRead()):
post['read'] = True
else:
post['read'] = False
post['flags'] = flags
return post
def GetInfoExtended(self, user, board, mode = 'post'):
info = self.GetInfo(mode)
if self.CanBeDeleted(user, board):
info['flags'] += ['deletable']
return info
def is_anony(self):
return self.owner != self.realowner
| bsd-2-clause | -3,583,318,180,514,345,000 | 31.741935 | 162 | 0.581478 | false |
SorX14/playbulbcandle | playbulbcandle/playbulbcandle.py | 1 | 4940 | # MIT License
# Copyright (c) 2016 Steve Parker
import subprocess
import shlex
from subprocess import call
class PlayBulbCandle:
commands = {
'setName': '0x001C', # writeReq needed get/set
'setEffect': '0x0014', # get/set
'setColor': '0x0016', # get/set
'getType': '0x0023',
'getFamily': '0x0025',
'getFirmwareVersion': '0x0027',
'getAppVersion': '0x0029',
'getManufacturer': '0x002b',
'getBatteryLevel': '0x001f',
}
def __init__(self, address, getName = True):
self.address = address
self.name = None
if getName:
self.getName()
def rawComms(self, args):
output,error = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
# Retry logic
if 'busy' in error or 'not implement' in error or 'timed out' in error:
output,error = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
ans = output.replace('Characteristic value/descriptor: ','')
ans = ans.replace('\n','').strip()
# Determine read or write
commType = 'write'
if args[3] == '--char-read':
commType = 'read'
# Determine address
commAddress = args[5]
result = {
"device": {
"address": self.address,
"name": self.name
},
"result": "success",
"msg": ans,
"type": commType,
"address": commAddress
}
# Remove message if its a write
if commType == 'write':
result['cmd'] = args[7]
else:
result['parsed'] = self.hexToAscii(ans)
if len(error) > 0:
result['result'] = 'error'
result['msg'] = error
return result
return result
def writeReq(self, address, value):
try:
cmd = 'gatttool -b ' + self.address + ' --char-write-req -a ' + address + ' -n "' + value + '"'
args = shlex.split(cmd)
return self.rawComms(args)
except:
pass
def write(self, address, value):
try:
cmd = 'gatttool -b ' + self.address + ' --char-write -a ' + address + ' -n "' + value + '"'
args = shlex.split(cmd)
return self.rawComms(args)
except:
pass
def read(self, address):
try:
cmd = 'gatttool -b ' + self.address + ' --char-read -a ' + address
args = shlex.split(cmd)
return self.rawComms(args)
except:
pass
def hexToAscii(self, val):
args = shlex.split(val)
result = ''
for arg in args:
result = result + arg.decode('hex')
return result
def asciiToHex(self, val):
args = list(val)
result = ''
for arg in args:
result = result + arg.encode('hex')
return result
def constrainArg(self, val, min = 0, max = 255):
if (val < min):
return min
if (val > max):
return max
return val
def setName(self, value):
self.name = value
return self.writeReq(self.commands['setName'], self.asciiToHex(value))
def getName(self):
result = self.read(self.commands['setName'])
self.name = result['parsed']
return result
def setEffect(self, white, red, green, blue, mode, speed):
validModes = {'off': 'FF', 'fade': '01', 'jumpRgb': '02', 'fadeRgb': '03', 'candle': '04'}
if mode not in validModes:
raise AttributeError('Invalid mode')
if mode == 'candle':
speed = 0
value = "%0.2X%0.2X%0.2X%0.2X%s00%0.2X00" % (self.constrainArg(white), self.constrainArg(red), self.constrainArg(green), self.constrainArg(blue), validModes[mode], self.constrainArg(speed))
return self.write(self.commands['setEffect'], value)
def getEffect(self):
modes = {1: 'fade', 2: 'jumpRgb', 3: 'fadeRgb', 4: 'candle', 255: 'off'}
result = self.read(self.commands['setEffect'])
args = shlex.split(result['msg'])
result['parsed'] = {
'white': int(args[0], 16),
'red': int(args[1], 16),
'green': int(args[2], 16),
'blue': int(args[3], 16),
'mode': modes[int(args[4], 16)],
'speed': int(args[6], 16)
}
return result
def setColor(self, white, red, green, blue):
value = "%0.2X%0.2X%0.2X%0.2X" % (self.constrainArg(white), self.constrainArg(red), self.constrainArg(green), self.constrainArg(blue))
self.setEffect(0, 0, 0, 0, 'off', 0) # Sometimes blowing out the candle causes the effect to show next
return self.write(self.commands['setColor'], value)
def getColor(self):
result = self.read(self.commands['setColor'])
args = shlex.split(result['msg'])
result['parsed'] = {
'white': int(args[0], 16),
'red': int(args[1], 16),
'green': int(args[2], 16),
'blue': int(args[3], 16)
}
return result
def getType(self):
return self.read(self.commands['getType'])
def getFamily(self):
return self.read(self.commands['getFamily'])
def getFirmwareVersion(self):
return self.read(self.commands['getFirmwareVersion'])
def getAppVersion(self):
return self.read(self.commands['getAppVersion'])
def getManufacturer(self):
return self.read(self.commands['getManufacturer'])
def getBatteryLevel(self):
result = self.read(self.commands['getBatteryLevel']);
result['parsed'] = int(result['msg'], 16)
return result
def off(self):
self.setColor(0, 0, 0, 0) | mit | 2,557,605,885,988,404,000 | 24.86911 | 191 | 0.64332 | false |
openstack/tempest | tempest/api/volume/admin/test_volume_services_negative.py | 1 | 3274 | # Copyright 2018 FiberHome Telecommunication Technologies CO.,LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.volume import base
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
class VolumeServicesNegativeTest(base.BaseVolumeAdminTest):
"""Negative tests of volume services"""
@classmethod
def resource_setup(cls):
super(VolumeServicesNegativeTest, cls).resource_setup()
services = cls.admin_volume_services_client.list_services()['services']
cls.host = services[0]['host']
cls.binary = services[0]['binary']
@decorators.attr(type='negative')
@decorators.idempotent_id('3246ce65-ba70-4159-aa3b-082c28e4b484')
def test_enable_service_with_invalid_host(self):
"""Test enabling volume service with invalid host should fail"""
self.assertRaises(lib_exc.NotFound,
self.admin_volume_services_client.enable_service,
host='invalid_host', binary=self.binary)
@decorators.attr(type='negative')
@decorators.idempotent_id('c571f179-c6e6-4c50-a0ab-368b628a8ac1')
def test_disable_service_with_invalid_binary(self):
"""Test disabling volume service with invalid binary should fail"""
self.assertRaises(lib_exc.NotFound,
self.admin_volume_services_client.disable_service,
host=self.host, binary='invalid_binary')
@decorators.attr(type='negative')
@decorators.idempotent_id('77767b36-5e8f-4c68-a0b5-2308cc21ec64')
def test_disable_log_reason_with_no_reason(self):
"""Test disabling volume service with none reason should fail"""
self.assertRaises(lib_exc.BadRequest,
self.admin_volume_services_client.disable_log_reason,
host=self.host, binary=self.binary,
disabled_reason=None)
@decorators.attr(type='negative')
@decorators.idempotent_id('712bfab8-1f44-4eb5-a632-fa70bf78f05e')
def test_freeze_host_with_invalid_host(self):
"""Test freezing volume service with invalid host should fail"""
self.assertRaises(lib_exc.BadRequest,
self.admin_volume_services_client.freeze_host,
host='invalid_host')
@decorators.attr(type='negative')
@decorators.idempotent_id('7c6287c9-d655-47e1-9a11-76f6657a6dce')
def test_thaw_host_with_invalid_host(self):
"""Test thawing volume service with invalid host should fail"""
self.assertRaises(lib_exc.BadRequest,
self.admin_volume_services_client.thaw_host,
host='invalid_host')
| apache-2.0 | 7,304,404,735,367,229,000 | 45.771429 | 79 | 0.668601 | false |
mtdewulf/incubator-airflow | airflow/contrib/operators/mysql_to_gcs.py | 44 | 9126 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import time
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.hooks.mysql_hook import MySqlHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from collections import OrderedDict
from datetime import date, datetime
from decimal import Decimal
from MySQLdb.constants import FIELD_TYPE
from tempfile import NamedTemporaryFile
class MySqlToGoogleCloudStorageOperator(BaseOperator):
"""
Copy data from MySQL to Google cloud storage in JSON format.
"""
template_fields = ('sql', 'bucket', 'filename', 'schema_filename')
template_ext = ('.sql',)
ui_color = '#a0e08c'
@apply_defaults
def __init__(self,
sql,
bucket,
filename,
schema_filename=None,
approx_max_file_size_bytes=1900000000,
mysql_conn_id='mysql_default',
google_cloud_storage_conn_id='google_cloud_storage_default',
delegate_to=None,
*args,
**kwargs):
"""
:param sql: The SQL to execute on the MySQL table.
:type sql: string
:param bucket: The bucket to upload to.
:type bucket: string
:param filename: The filename to use as the object name when uploading
to Google cloud storage. A {} should be specified in the filename
to allow the operator to inject file numbers in cases where the
file is split due to size.
:type filename: string
:param schema_filename: If set, the filename to use as the object name
when uploading a .json file containing the BigQuery schema fields
for the table that was dumped from MySQL.
:type schema_filename: string
:param approx_max_file_size_bytes: This operator supports the ability
to split large table dumps into multiple files (see notes in the
filenamed param docs above). Google cloud storage allows for files
to be a maximum of 4GB. This param allows developers to specify the
file size of the splits.
:type approx_max_file_size_bytes: long
:param mysql_conn_id: Reference to a specific MySQL hook.
:type mysql_conn_id: string
:param google_cloud_storage_conn_id: Reference to a specific Google
cloud storage hook.
:type google_cloud_storage_conn_id: string
:param delegate_to: The account to impersonate, if any. For this to
work, the service account making the request must have domain-wide
delegation enabled.
"""
super(MySqlToGoogleCloudStorageOperator, self).__init__(*args, **kwargs)
self.sql = sql
self.bucket = bucket
self.filename = filename
self.schema_filename = schema_filename
self.approx_max_file_size_bytes = approx_max_file_size_bytes
self.mysql_conn_id = mysql_conn_id
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
def execute(self, context):
cursor = self._query_mysql()
files_to_upload = self._write_local_data_files(cursor)
# If a schema is set, create a BQ schema JSON file.
if self.schema_filename:
files_to_upload.update(self._write_local_schema_file(cursor))
# Flush all files before uploading
for file_handle in files_to_upload.values():
file_handle.flush()
self._upload_to_gcs(files_to_upload)
# Close all temp file handles.
for file_handle in files_to_upload.values():
file_handle.close()
def _query_mysql(self):
"""
Queries mysql and returns a cursor to the results.
"""
mysql = MySqlHook(mysql_conn_id=self.mysql_conn_id)
conn = mysql.get_conn()
cursor = conn.cursor()
cursor.execute(self.sql)
return cursor
def _write_local_data_files(self, cursor):
"""
Takes a cursor, and writes results to a local file.
:return: A dictionary where keys are filenames to be used as object
names in GCS, and values are file handles to local files that
contain the data for the GCS objects.
"""
schema = map(lambda schema_tuple: schema_tuple[0], cursor.description)
file_no = 0
tmp_file_handle = NamedTemporaryFile(delete=True)
tmp_file_handles = {self.filename.format(file_no): tmp_file_handle}
for row in cursor:
# Convert datetime objects to utc seconds, and decimals to floats
row = map(self.convert_types, row)
row_dict = dict(zip(schema, row))
# TODO validate that row isn't > 2MB. BQ enforces a hard row size of 2MB.
json.dump(row_dict, tmp_file_handle)
# Append newline to make dumps BigQuery compatible.
tmp_file_handle.write('\n')
# Stop if the file exceeds the file size limit.
if tmp_file_handle.tell() >= self.approx_max_file_size_bytes:
file_no += 1
tmp_file_handle = NamedTemporaryFile(delete=True)
tmp_file_handles[self.filename.format(file_no)] = tmp_file_handle
return tmp_file_handles
def _write_local_schema_file(self, cursor):
"""
Takes a cursor, and writes the BigQuery schema for the results to a
local file system.
:return: A dictionary where key is a filename to be used as an object
name in GCS, and values are file handles to local files that
contains the BigQuery schema fields in .json format.
"""
schema = []
for field in cursor.description:
# See PEP 249 for details about the description tuple.
field_name = field[0]
field_type = self.type_map(field[1])
# Always allow TIMESTAMP to be nullable. MySQLdb returns None types
# for required fields because some MySQL timestamps can't be
# represented by Python's datetime (e.g. 0000-00-00 00:00:00).
field_mode = 'NULLABLE' if field[6] or field_type == 'TIMESTAMP' else 'REQUIRED'
schema.append({
'name': field_name,
'type': field_type,
'mode': field_mode,
})
logging.info('Using schema for %s: %s', self.schema_filename, schema)
tmp_schema_file_handle = NamedTemporaryFile(delete=True)
json.dump(schema, tmp_schema_file_handle)
return {self.schema_filename: tmp_schema_file_handle}
def _upload_to_gcs(self, files_to_upload):
"""
Upload all of the file splits (and optionally the schema .json file) to
Google cloud storage.
"""
hook = GoogleCloudStorageHook(google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
for object, tmp_file_handle in files_to_upload.items():
hook.upload(self.bucket, object, tmp_file_handle.name, 'application/json')
@classmethod
def convert_types(cls, value):
"""
Takes a value from MySQLdb, and converts it to a value that's safe for
JSON/Google cloud storage/BigQuery. Dates are converted to UTC seconds.
Decimals are converted to floats.
"""
if type(value) in (datetime, date):
return time.mktime(value.timetuple())
elif isinstance(value, Decimal):
return float(value)
else:
return value
@classmethod
def type_map(cls, mysql_type):
"""
Helper function that maps from MySQL fields to BigQuery fields. Used
when a schema_filename is set.
"""
d = {
FIELD_TYPE.INT24: 'INTEGER',
FIELD_TYPE.TINY: 'INTEGER',
FIELD_TYPE.BIT: 'INTEGER',
FIELD_TYPE.DATETIME: 'TIMESTAMP',
FIELD_TYPE.DECIMAL: 'FLOAT',
FIELD_TYPE.NEWDECIMAL: 'FLOAT',
FIELD_TYPE.DOUBLE: 'FLOAT',
FIELD_TYPE.FLOAT: 'FLOAT',
FIELD_TYPE.INT24: 'INTEGER',
FIELD_TYPE.LONG: 'INTEGER',
FIELD_TYPE.LONGLONG: 'INTEGER',
FIELD_TYPE.SHORT: 'INTEGER',
FIELD_TYPE.TIMESTAMP: 'TIMESTAMP',
FIELD_TYPE.YEAR: 'INTEGER',
}
return d[mysql_type] if mysql_type in d else 'STRING'
| apache-2.0 | -262,116,739,518,235,070 | 39.741071 | 101 | 0.61922 | false |
ecologylab/BigSemanticsService | Scripts/main/bs_service_tester.py | 1 | 2051 | #!/usr/bin/python
import urllib2
import urllib
from simple_config import load_config
tester_config = load_config("tester.conf")
class ServiceTester:
def __init__(self, config=None):
if config is None:
config = tester_config
self.config = config
self.timeout_seconds = config["timeout_seconds"]
self.tests = config["tests"]
def test_service(self):
code = -1
fatal = ""
non_fatal = ""
for t in self.tests:
mmd = t.get("mmd")
doc = t.get("doc")
path = t.get("path")
include = t.get("include")
url = self.get_test_url(mmd, doc, path)
(code, content, error) = self.access_and_download(url)
if code < 0:
error_msg = "(no error message available)"
if error is not None:
error = str(error)
fatal = "Access error when trying {}:\n{}".format(url, error)
elif code != 200:
fatal = "Service error when trying {}, HTTP code: {}".format(url, code)
else:
if content.find(include) < 0:
non_fatal +=\
"Expected content not found for {}: {}\n".format(url, include)
return (code, fatal, non_fatal)
def get_test_url(self, mmd, doc, path):
base_url = "http://" + self.config["service_host"]
if mmd is not None:
return base_url + "/BigSemanticsService/mmd.xml?name=" + mmd
elif doc is not None:
params = urllib.urlencode({"reload": "true", "url": doc})
return base_url + "/BigSemanticsService/metadata.xml?" + params
elif path is not None:
return base_url + path
else:
return None
def access_and_download(self, url):
f = None
code = -1
content = None
error = None
try:
f = urllib2.urlopen(url, None, self.timeout_seconds)
code = f.getcode()
content = u'\n'.join(f.readlines())
except Exception as e:
code = -1
error = e
finally:
if f is not None:
f.close()
return (code, content, error)
| apache-2.0 | -2,235,816,616,534,470,400 | 26.09589 | 79 | 0.56509 | false |
ubgarbage/gae-blog | django/contrib/sessions/backends/cache.py | 55 | 1979 | from django.contrib.sessions.backends.base import SessionBase, CreateError
from django.core.cache import cache
KEY_PREFIX = "django.contrib.sessions.cache"
class SessionStore(SessionBase):
"""
A cache-based session store.
"""
def __init__(self, session_key=None):
self._cache = cache
super(SessionStore, self).__init__(session_key)
def load(self):
session_data = self._cache.get(KEY_PREFIX + self.session_key)
if session_data is not None:
return session_data
self.create()
return {}
def create(self):
# Because a cache can fail silently (e.g. memcache), we don't know if
# we are failing to create a new session because of a key collision or
# because the cache is missing. So we try for a (large) number of times
# and then raise an exception. That's the risk you shoulder if using
# cache backing.
for i in xrange(10000):
self.session_key = self._get_new_session_key()
try:
self.save(must_create=True)
except CreateError:
continue
self.modified = True
return
raise RuntimeError("Unable to create a new session key.")
def save(self, must_create=False):
if must_create:
func = self._cache.add
else:
func = self._cache.set
result = func(KEY_PREFIX + self.session_key, self._get_session(no_load=must_create),
self.get_expiry_age())
if must_create and not result:
raise CreateError
def exists(self, session_key):
if self._cache.has_key(KEY_PREFIX + session_key):
return True
return False
def delete(self, session_key=None):
if session_key is None:
if self._session_key is None:
return
session_key = self._session_key
self._cache.delete(KEY_PREFIX + session_key)
| bsd-3-clause | 8,399,615,209,777,623,000 | 33.12069 | 92 | 0.593734 | false |
ooici/marine-integrations | mi/instrument/harvard/massp/rga/test/test_driver.py | 1 | 29215 | """
@package mi.instrument.harvard.massp.rga.test.test_driver
@file marine-integrations/mi/instrument/harvard/massp/rga/driver.py
@author Peter Cable
@brief Test cases for rga driver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/test_driver
$ bin/test_driver -u [-t testname]
$ bin/test_driver -i [-t testname]
$ bin/test_driver -q [-t testname]
"""
__author__ = 'Peter Cable'
__license__ = 'Apache 2.0'
import json
import struct
import time
import unittest
import ntplib
from nose.plugins.attrib import attr
from mock import Mock, call
from mi.idk.unit_test import InstrumentDriverTestCase, ParameterTestConfigKey
from mi.idk.unit_test import InstrumentDriverUnitTestCase
from mi.idk.unit_test import InstrumentDriverIntegrationTestCase
from mi.idk.unit_test import InstrumentDriverQualificationTestCase
from mi.idk.unit_test import DriverTestMixin
from mi.core.exceptions import InstrumentStateException
from mi.core.exceptions import InstrumentCommandException
from mi.core.exceptions import InstrumentProtocolException
from mi.core.instrument.data_particle import RawDataParticle, CommonDataParticleType
from mi.core.instrument.instrument_driver import DriverConfigKey, ResourceAgentState
from mi.core.instrument.port_agent_client import PortAgentPacket
from mi.instrument.harvard.massp.rga.driver import InstrumentDriver
from mi.instrument.harvard.massp.rga.driver import RGAStatusParticleKey
from mi.instrument.harvard.massp.rga.driver import RGASampleParticleKey
from mi.instrument.harvard.massp.rga.driver import ParameterConstraints
from mi.instrument.harvard.massp.rga.driver import DataParticleType
from mi.instrument.harvard.massp.rga.driver import InstrumentCommand
from mi.instrument.harvard.massp.rga.driver import ProtocolState
from mi.instrument.harvard.massp.rga.driver import ProtocolEvent
from mi.instrument.harvard.massp.rga.driver import Capability
from mi.instrument.harvard.massp.rga.driver import Parameter
from mi.instrument.harvard.massp.rga.driver import Protocol
from mi.instrument.harvard.massp.rga.driver import Prompt
from mi.instrument.harvard.massp.rga.driver import NEWLINE
from mi.core.log import get_logger
log = get_logger()
rga_startup_config = {
DriverConfigKey.PARAMETERS: {
Parameter.EE: 70,
Parameter.IE: 1,
Parameter.VF: 90,
Parameter.NF: 3,
Parameter.SA: 10,
Parameter.MI: 1,
Parameter.MF: 100,
Parameter.FL: 1.0,
Parameter.HV: 0,
}
}
###
# Driver parameters for the tests
###
InstrumentDriverTestCase.initialize(
driver_module='mi.instrument.harvard.massp.rga.driver',
driver_class="InstrumentDriver",
instrument_agent_resource_id='YAQ3KY',
instrument_agent_name='harvard_massp_rga',
instrument_agent_packet_config=DataParticleType(),
driver_startup_config=rga_startup_config
)
#################################### RULES ####################################
# #
# Common capabilities in the base class #
# #
# Instrument specific stuff in the derived class #
# #
# Generator spits out either stubs or comments describing test this here, #
# test that there. #
# #
# Qualification tests are driven through the instrument_agent #
# #
###############################################################################
###
# Driver constant definitions
###
###############################################################################
# DRIVER TEST MIXIN #
# Defines a set of constants and assert methods used for data particle #
# verification #
# #
# In python mixin classes are classes designed such that they wouldn't be #
# able to stand on their own, but are inherited by other classes generally #
# using multiple inheritance. #
# #
# This class defines a configuration structure for testing and common assert #
# methods for validating data particles. #
###############################################################################
class DriverTestMixinSub(DriverTestMixin):
# Create some short names for the parameter test config
TYPE = ParameterTestConfigKey.TYPE
READONLY = ParameterTestConfigKey.READONLY
STARTUP = ParameterTestConfigKey.STARTUP
DA = ParameterTestConfigKey.DIRECT_ACCESS
VALUE = ParameterTestConfigKey.VALUE
REQUIRED = ParameterTestConfigKey.REQUIRED
DEFAULT = ParameterTestConfigKey.DEFAULT
STATES = ParameterTestConfigKey.STATES
def assert_sample_data_particle(self, data_particle):
"""
Verify a particle is a know particle to this driver and verify the particle is
correct
@param data_particle: Data particle of unknown type produced by the driver
"""
if isinstance(data_particle, RawDataParticle):
self.assert_particle_raw(data_particle)
else:
self.fail("Unknown Particle Detected: %s" % data_particle)
responses = {
'IN0': 0,
'EE70': 0,
'EE?': 70,
'IE1': 0,
'IE?': 1,
'VF90': 0,
'VF?': 90,
'NF?': 3,
'SA?': 10,
'MI?': 1,
'MF?': 100,
'FL1.0': 0,
'FL?': 0.9976,
'AP?': 251,
'ID?': 'FAKEID',
'SC1': '\xba\xdd\xca\xfe' * 252,
'ER?': 0,
'HV?': 0,
'HV0': 0,
}
sample_data = list(struct.unpack('<252i', responses['SC1']))
_sample_parameters = {
RGASampleParticleKey.SCAN_DATA: {TYPE: list, VALUE: sample_data, REQUIRED: True},
}
_status_parameters = {
RGAStatusParticleKey.ID: {TYPE: unicode, VALUE: responses['ID?'], REQUIRED: True},
RGAStatusParticleKey.EE: {TYPE: int, VALUE: responses['EE?'], REQUIRED: True},
RGAStatusParticleKey.IE: {TYPE: int, VALUE: responses['IE?'], REQUIRED: True},
RGAStatusParticleKey.VF: {TYPE: int, VALUE: responses['VF?'], REQUIRED: True},
RGAStatusParticleKey.NF: {TYPE: int, VALUE: responses['NF?'], REQUIRED: True},
RGAStatusParticleKey.ER: {TYPE: int, VALUE: responses['ER?'], REQUIRED: True},
RGAStatusParticleKey.SA: {TYPE: int, VALUE: responses['SA?'], REQUIRED: True},
RGAStatusParticleKey.MI: {TYPE: int, VALUE: responses['MI?'], REQUIRED: True},
RGAStatusParticleKey.MF: {TYPE: int, VALUE: responses['MF?'], REQUIRED: True},
RGAStatusParticleKey.AP: {TYPE: int, VALUE: responses['AP?'], REQUIRED: True},
RGAStatusParticleKey.HV: {TYPE: int, VALUE: responses['HV?'], REQUIRED: True},
RGAStatusParticleKey.FL: {TYPE: float, VALUE: 1.0, REQUIRED: True},
RGAStatusParticleKey.FL_ACTUAL: {TYPE: float, VALUE: responses['FL?'], REQUIRED: True},
}
_driver_parameters = {
# Parameters defined in the IOS
Parameter.ID: {TYPE: str, READONLY: True, DA: False, STARTUP: False},
Parameter.AP: {TYPE: int, READONLY: True, DA: False, STARTUP: False},
Parameter.ER: {TYPE: int, READONLY: True, DA: False, STARTUP: False},
Parameter.EE: {TYPE: int, READONLY: False, DA: False, STARTUP: True},
Parameter.IE: {TYPE: int, READONLY: False, DA: False, STARTUP: True},
Parameter.VF: {TYPE: int, READONLY: False, DA: False, STARTUP: True},
Parameter.NF: {TYPE: int, READONLY: False, DA: False, STARTUP: True},
Parameter.SA: {TYPE: int, READONLY: False, DA: False, STARTUP: True},
Parameter.MI: {TYPE: int, READONLY: False, DA: False, STARTUP: True},
Parameter.MF: {TYPE: int, READONLY: False, DA: False, STARTUP: True},
Parameter.HV: {TYPE: int, READONLY: False, DA: False, STARTUP: True},
Parameter.FL: {TYPE: float, READONLY: False, DA: False, STARTUP: True},
Parameter.FL_ACTUAL: {TYPE: float, READONLY: True, DA: False, STARTUP: True},
Parameter.ERROR_REASON: {TYPE: str, READONLY: True, DA: False, STARTUP: False},
}
_driver_capabilities = {
# capabilities defined in the IOS
Capability.START_SCAN: {STATES: [ProtocolState.COMMAND]},
Capability.STOP_SCAN: {STATES: [ProtocolState.SCAN]},
Capability.CLEAR: {STATES: [ProtocolState.ERROR]},
}
_capabilities = {
ProtocolState.UNKNOWN: ['DRIVER_EVENT_DISCOVER'],
ProtocolState.COMMAND: ['DRIVER_EVENT_GET',
'DRIVER_EVENT_SET',
'DRIVER_EVENT_START_DIRECT',
'PROTOCOL_EVENT_START_SCAN'],
ProtocolState.SCAN: ['PROTOCOL_EVENT_TAKE_SCAN',
'PROTOCOL_EVENT_STOP_SCAN',
'PROTOCOL_EVENT_TIMEOUT',
'PROTOCOL_EVENT_ERROR'],
ProtocolState.DIRECT_ACCESS: ['DRIVER_EVENT_STOP_DIRECT', 'EXECUTE_DIRECT'],
ProtocolState.ERROR: ['PROTOCOL_EVENT_CLEAR', 'DRIVER_EVENT_GET']
}
def _send_port_agent_packet(self, driver, data):
"""
Send the supplied data to the driver in a port agent packet
@param driver: instrument driver instance
@param data: data to be sent
"""
ts = ntplib.system_to_ntp_time(time.time())
port_agent_packet = PortAgentPacket()
port_agent_packet.attach_data(data)
port_agent_packet.attach_timestamp(ts)
port_agent_packet.pack_header()
# Push the response into the driver
driver._protocol.got_data(port_agent_packet)
def my_send(self, driver):
"""
Side effect function generator - will send responses based on input
@param driver Instrument driver instance
@returns side effect function
"""
def inner(data):
"""
Inner function for side effect generator
@param data Data to send
@returns length of response
"""
data = data.replace(NEWLINE, '')
log.trace('my_send data: %r', data)
my_response = str(self.responses.get(data))
if my_response is not None:
log.trace("my_send: data: %r, my_response: %r", data, my_response)
# scans repeat over and over, sleep between them to prevent overloading cpu
if data == 'SC1':
time.sleep(0.9)
self._send_port_agent_packet(driver, my_response + '\n' + NEWLINE)
return len(my_response)
return inner
def assert_rga_sample_particle(self, particle, verify_values=False):
log.debug('assert_rga_sample_particle: %r', particle)
self.assert_data_particle_keys(RGASampleParticleKey, self._sample_parameters)
self.assert_data_particle_header(particle, DataParticleType.RGA_SAMPLE)
self.assert_data_particle_parameters(particle, self._sample_parameters, verify_values)
def assert_rga_status_particle(self, particle, verify_values=False):
log.debug('assert_rga_status_particle: %r', particle)
self.assert_data_particle_keys(RGAStatusParticleKey, self._status_parameters)
self.assert_data_particle_header(particle, DataParticleType.RGA_STATUS)
self.assert_data_particle_parameters(particle, self._status_parameters, verify_values)
###############################################################################
# UNIT TESTS #
# Unit tests test the method calls and parameters using Mock. #
# #
# These tests are especially useful for testing parsers and other data #
# handling. The tests generally focus on small segments of code, like a #
# single function call, but more complex code using Mock objects. However #
# if you find yourself mocking too much maybe it is better as an #
# integration test. #
# #
# Unit tests do not start up external processes like the port agent or #
# driver process. #
###############################################################################
# noinspection PyProtectedMember
@attr('UNIT', group='mi')
class DriverUnitTest(InstrumentDriverUnitTestCase, DriverTestMixinSub):
def setUp(self):
InstrumentDriverUnitTestCase.setUp(self)
def test_connect(self, initial_protocol_state=ProtocolState.COMMAND):
"""
Verify driver can transition to the COMMAND state
"""
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_initialize_driver(driver, initial_protocol_state)
driver._connection.send.side_effect = self.my_send(driver)
driver._protocol.set_init_params(rga_startup_config)
driver._protocol._init_params()
return driver
def test_driver_enums(self):
"""
Verify that all driver enumeration has no duplicate values that might cause confusion. Also
do a little extra validation for the Capabilities
"""
self.assert_enum_has_no_duplicates(DataParticleType())
self.assert_enum_has_no_duplicates(ProtocolState())
self.assert_enum_has_no_duplicates(ProtocolEvent())
self.assert_enum_has_no_duplicates(Parameter())
self.assert_enum_has_no_duplicates(InstrumentCommand())
# Test capabilities for duplicates, them verify that capabilities is a subset of protocol events
self.assert_enum_has_no_duplicates(Capability())
self.assert_enum_complete(Capability(), ProtocolEvent())
def test_capabilities(self):
"""
Verify the FSM reports capabilities as expected. All states defined in this dict must
also be defined in the protocol FSM.
"""
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_capabilities(driver, self._capabilities)
def test_start_scan(self):
"""
Send a start scan event to the driver.
Use the side_effect on send from the mock port agent to simulate instrument responses.
This checks the chunker and particle generation, since the chunker and particles are
dynamic based on instrument parameters.
"""
driver = self.test_connect()
self.clear_data_particle_queue()
driver._protocol._protocol_fsm.on_event(Capability.START_SCAN)
self.assertEqual(driver._protocol.get_current_state(), ProtocolState.SCAN)
particles = []
# loop, because the monkey patched time doesn't reliably sleep long enough...
now = time.time()
while time.time() < (now+5):
time.sleep(1)
for p in self._data_particle_received:
particle_dict = json.loads(p)
stream_type = particle_dict.get('stream_name')
self.assertIsNotNone(stream_type)
if stream_type != CommonDataParticleType.RAW:
particles.append((p, stream_type))
log.debug("Non raw particles: %s ", particles)
self.assertGreaterEqual(len(particles), 1)
for p, stream_name in particles:
if stream_name == DataParticleType.RGA_STATUS:
self.assert_rga_status_particle(p, True)
else:
self.assert_rga_sample_particle(p, True)
def test_sample_missing_data(self):
"""
Send a start scan event to the driver, but don't return enough data. Verify that no
sample particle is produced but the driver starts another scan.
"""
orig_scan = self.responses['SC1']
self.responses['SC1'] = 'this is a bad scan, man!'
driver = self.test_connect()
# side effect for our Mocked on_event
def my_on_event(event):
log.debug('my_on_event: event: %r', event)
driver._protocol._protocol_fsm.on_event_actual(event)
# swap out on_event with a Mock object now
on_event_mock = Mock()
on_event_mock.side_effect = my_on_event
driver._protocol._protocol_fsm.on_event_actual = driver._protocol._protocol_fsm.on_event
driver._protocol._protocol_fsm.on_event = on_event_mock
driver._protocol._protocol_fsm.on_event(Capability.START_SCAN)
self.assertEqual(driver._protocol.get_current_state(), ProtocolState.SCAN)
# clear the particle queue to remove the status particle
self.clear_data_particle_queue()
# sleep a bit
time.sleep(15)
# check for the correct calls
on_event_mock.assert_has_calls([call(Capability.START_SCAN),
call(Capability.TAKE_SCAN),
call(ProtocolEvent.TIMEOUT)])
self.responses['SC1'] = orig_scan
# check there are no particles
self.assertEqual(len(self._data_particle_received), 0)
def test_error_byte(self):
"""
Respond with an error and verify the FSM transitions to an error state.
"""
driver = self.test_connect()
# set up responses to return an error when the filament is enabled
self.responses['FL1.0'] = 1
try:
driver._protocol._protocol_fsm.on_event(Capability.START_SCAN)
self.assertTrue(False, msg='Failed to raise an exception when the error byte was set')
except InstrumentStateException:
# we threw an exception as expected.
pass
finally:
# restore responses so other tests don't fail!
self.responses['FL1.0'] = 0
# make sure we moved to the ERROR state
time.sleep(.1)
self.assertEqual(driver._protocol.get_current_state(), ProtocolState.ERROR)
# clear the error, assert we moved back to COMMAND
driver._protocol._protocol_fsm.on_event(Capability.CLEAR)
self.assertEqual(driver._protocol.get_current_state(), ProtocolState.COMMAND)
def test_protocol_filter_capabilities(self):
"""
This tests driver filter_capabilities.
Iterate through available capabilities, and verify that they can pass successfully through the filter.
Test silly made up capabilities to verify they are blocked by filter.
"""
mock_callback = Mock()
protocol = Protocol(Prompt, NEWLINE, mock_callback)
driver_capabilities = Capability.list()
test_capabilities = Capability.list()
# Add a bogus capability that will be filtered out.
test_capabilities.append("BOGUS_CAPABILITY")
# Verify "BOGUS_CAPABILITY was filtered out
self.assertEquals(sorted(driver_capabilities),
sorted(protocol._filter_capabilities(test_capabilities)))
def test_driver_schema(self):
"""
get the driver schema and verify it is configured properly
"""
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_driver_schema(driver, self._driver_parameters, self._driver_capabilities)
###############################################################################
# INTEGRATION TESTS #
# Integration test test the direct driver / instrument interaction #
# but making direct calls via zeromq. #
# - Common Integration tests test the driver through the instrument agent #
# and common for all drivers (minimum requirement for ION ingestion) #
###############################################################################
@attr('INT', group='mi')
class DriverIntegrationTest(InstrumentDriverIntegrationTestCase, DriverTestMixinSub):
def setUp(self):
InstrumentDriverIntegrationTestCase.setUp(self)
def test_take_scan(self):
"""
Start a scan and verify status and sample particles are generated.
"""
self.assert_initialize_driver()
self.assert_driver_command(Capability.START_SCAN)
self.assert_state_change(ProtocolState.SCAN, 5)
self.assert_async_particle_generation(DataParticleType.RGA_STATUS, self.assert_rga_status_particle)
self.assert_async_particle_generation(DataParticleType.RGA_SAMPLE, self.assert_rga_sample_particle, 2, 600)
self.assert_driver_command(Capability.STOP_SCAN)
@unittest.skip("This takes a very long time... Don't run it unless you mean it!")
def test_scan_parameters(self):
"""
Step through a sequence of configuration parameters to test scan timing. Data is in confluence.
"""
self.assert_initialize_driver()
self.assert_set(Parameter.MI, 5, no_get=True)
for mf in range(10, 100, 5):
self.assert_set(Parameter.MF, mf, no_get=True)
for nf in range(1, 8):
self.clear_events()
self.assert_set(Parameter.NF, nf, no_get=True)
self.assert_driver_command(Capability.START_SCAN)
self.assert_state_change(ProtocolState.SCAN, 5)
self.assert_async_particle_generation(DataParticleType.RGA_STATUS, Mock())
self.assert_async_particle_generation(DataParticleType.RGA_SAMPLE, Mock(), 2, 900)
self.assert_driver_command(Capability.STOP_SCAN)
self.assert_state_change(ProtocolState.COMMAND, 5)
# while this is an integration test, it can be run without access to the instrument
def test_get_parameters(self):
"""
Verify we can get all parameters
"""
self.assert_initialize_driver()
startup_params = self.test_config.driver_startup_config[DriverConfigKey.PARAMETERS]
for key, value in startup_params.items():
self.assert_get(key, value)
# while this is an integration test, it can be run without access to the instrument
def test_set_parameters(self):
"""
Verify we can set all parameters
"""
self.assert_initialize_driver()
constraints = ParameterConstraints.dict()
parameters = Parameter.reverse_dict()
startup_params = self.test_config.driver_startup_config[DriverConfigKey.PARAMETERS]
for key, value in startup_params.iteritems():
if key in parameters and parameters[key] in constraints:
_, minimum, maximum = constraints[parameters[key]]
self.assert_set(key, maximum-1)
else:
self.assert_set(key, value + 1)
self.assert_set_bulk(startup_params)
# while this is an integration test, it can be run without access to the instrument
def test_out_of_range(self):
"""
Verify out of range values raise exceptions
"""
self.assert_initialize_driver()
constraints = ParameterConstraints.dict()
parameters = Parameter.dict()
log.debug(constraints)
for key in constraints:
_, minimum, maximum = constraints[key]
parameter = parameters[key]
self.assert_set_exception(parameter, minimum - 1)
self.assert_set_exception(parameter, maximum + 1)
self.assert_set_exception(parameter, "strings aren't valid here!")
def test_set_bogus_parameter(self):
"""
Verify bogus parameters raise exceptions
"""
self.assert_initialize_driver()
self.assert_set_exception('BOGUS', 'CHEESE')
def test_state_transitions(self):
"""
Verify state transitions
"""
self.assert_initialize_driver()
self.assert_driver_command(Capability.START_SCAN)
self.assert_state_change(ProtocolState.SCAN, 5)
self.assert_driver_command(Capability.STOP_SCAN)
self.assert_state_change(ProtocolState.COMMAND, 5)
# verify the filament is off
self.assert_get(Parameter.FL_ACTUAL, 0.0)
def test_bad_command(self):
"""
Verify bad commands raise exceptions
"""
self.assert_initialize_driver()
self.assert_driver_command_exception('BAD_COMMAND', exception_class=InstrumentCommandException)
def test_incomplete_config(self):
"""
Break our startup config, then verify the driver raises an exception
"""
# grab the old config
startup_params = self.test_config.driver_startup_config[DriverConfigKey.PARAMETERS]
old_value = startup_params[Parameter.EE]
try:
# delete a required parameter
del (startup_params[Parameter.EE])
# re-init to take our broken config
self.init_driver_process_client()
self.assert_initialize_driver()
# request start scan
self.assert_driver_command(Capability.START_SCAN)
self.assertTrue(False, msg='Failed to raise exception on missing parameter')
except Exception as e:
self.assertTrue(self._driver_exception_match(e, InstrumentProtocolException))
finally:
startup_params[Parameter.EE] = old_value
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for doing final testing of ion #
# integration. The generally aren't used for instrument debugging and should #
# be tackled after all unit and integration tests are complete #
###############################################################################
@attr('QUAL', group='mi')
class DriverQualificationTest(InstrumentDriverQualificationTestCase, DriverTestMixinSub):
def setUp(self):
InstrumentDriverQualificationTestCase.setUp(self)
def test_direct_access_telnet_mode(self):
"""
This test manually tests that the Instrument Driver properly supports
direct access to the physical instrument. (telnet mode)
"""
self.assert_direct_access_start_telnet()
self.assertTrue(self.tcp_client)
self.tcp_client.send_data(InstrumentCommand.ID + '?' + NEWLINE)
self.assertTrue(self.tcp_client.expect('SRSRGA200'))
self.assert_direct_access_stop_telnet()
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 5)
def test_poll(self):
"""
A scan is the closest thing we have to a poll here...
"""
self.assert_enter_command_mode()
self.assert_particle_polled(Capability.START_SCAN,
self.assert_rga_status_particle,
DataParticleType.RGA_STATUS,
timeout=30)
self.assert_particle_async(DataParticleType.RGA_SAMPLE, self.assert_rga_sample_particle, timeout=100)
self.assert_execute_resource(Capability.STOP_SCAN)
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 5)
def test_get_set_parameters(self):
"""
verify that all parameters can be get set properly, this includes
ensuring that read only parameters fail on set.
"""
self.assert_enter_command_mode()
constraints = ParameterConstraints.dict()
parameters = Parameter.reverse_dict()
startup_params = self.test_config.driver_startup_config[DriverConfigKey.PARAMETERS]
for key, value in startup_params.items():
self.assert_get_parameter(key, value)
if key in parameters and parameters[key] in constraints:
_, minimum, maximum = constraints[parameters[key]]
self.assert_set_parameter(key, maximum-1)
else:
self.assert_set_parameter(key, value + 1)
def test_reset(self):
"""
Verify the agent can be reset
Overridden, driver does not have autosample
"""
self.assert_enter_command_mode()
self.assert_reset()
def test_discover(self):
"""
Overridden, driver does not have autosample
"""
# Verify the agent is in command mode
self.assert_enter_command_mode()
# Now reset and try to discover. This will stop the driver which holds the current
# instrument state.
self.assert_reset()
self.assert_discover(ResourceAgentState.COMMAND)
| bsd-2-clause | -1,572,916,429,620,256,500 | 43.603053 | 115 | 0.604313 | false |
ddong8/ihasy | lib/sendmail.py | 1 | 1171 | #!/usr/bin/env python
# coding=utf-8
#
# Copyright 2017 ihasy.com
# Do have a faith in what you're doing.
# Make your life a story worth telling.
import smtplib
import sys
import email
from email.mime.text import MIMEText
send_mail_host = 'smtp_host'
send_mail_user = 'smtp_user'
send_mail_user_name = u'send_mail_user_name'
send_mail_pswd = 'send_mail_password'
send_mail_postfix = 'send_mail_postfix'
get_mail_user = 'get_mail_user'
charset = 'utf-8'
get_mail_postfix = 'get_mail_postfix'
get_mail_host = 'get_mail_host'
def send(sub, content, reciver = get_mail_user + get_mail_postfix):
send_mail_address = send_mail_user_name + '<' + send_mail_user + '@' + send_mail_postfix + '>'
msg = email.mime.text.MIMEText(content,'html',charset)
msg['Subject'] = email.Header.Header(sub,charset)
msg['From'] = send_mail_address
msg['to'] = to_adress = reciver
try:
stp = smtplib.SMTP()
stp.connect(send_mail_host)
stp.login(send_mail_user,send_mail_pswd)
stp.sendmail(send_mail_address,to_adress,msg.as_string())
stp.close()
return True
except Exception,e:
print(e)
return False
| bsd-3-clause | 4,690,865,384,309,086,000 | 28.275 | 98 | 0.663535 | false |
NMGRL/pychron | pychron/processing/analyses/analysis_group.py | 1 | 34170 | # ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import math
from numpy import array, nan, average
# ============= enthought library imports =======================
from traits.api import List, Property, cached_property, Str, Bool, Int, Event, Float, Any, Enum, on_trait_change
from uncertainties import ufloat, nominal_value, std_dev
from pychron.core.stats import calculate_mswd_probability
from pychron.core.stats.core import calculate_mswd, calculate_weighted_mean, validate_mswd
from pychron.core.utils import alphas
from pychron.experiment.utilities.runid import make_aliquot
from pychron.processing.analyses.analysis import IdeogramPlotable
from pychron.processing.analyses.preferred import Preferred
from pychron.processing.arar_age import ArArAge
from pychron.processing.argon_calculations import calculate_plateau_age, age_equation, calculate_isochron
from pychron.pychron_constants import MSEM, SD, SUBGROUPING_ATTRS, ERROR_TYPES, WEIGHTED_MEAN, \
DEFAULT_INTEGRATED, SUBGROUPINGS, ARITHMETIC_MEAN, PLATEAU_ELSE_WEIGHTED_MEAN, WEIGHTINGS, FLECK, NULL_STR, \
ISOCHRON, MSE, SE
def AGProperty(*depends):
d = 'dirty,analyses:[temp_status]'
if depends:
d = '{},{}'.format(','.join(depends), d)
return Property(depends_on=d)
def MetaDataProperty(*depends):
d = 'metadata_refresh_needed'
if depends:
d = '{},{}'.format(','.join(depends), d)
return Property(depends_on=d)
class AnalysisGroup(IdeogramPlotable):
attribute = Str('uage')
analyses = List
nanalyses = AGProperty()
age_span = AGProperty()
weighted_age = AGProperty()
arith_age = AGProperty()
integrated_age = AGProperty()
isochron_age = AGProperty()
j_err = AGProperty()
j = AGProperty()
total_n = AGProperty()
weighted_mean_f = AGProperty()
integrated_age_weighting = Enum(WEIGHTINGS)
include_j_error_in_integrated = Bool(False)
age_error_kind = Enum(*ERROR_TYPES)
kca_error_kind = Enum(*ERROR_TYPES)
kcl_error_kind = Enum(*ERROR_TYPES)
rad40_error_kind = Enum(*ERROR_TYPES)
moles_k39_error_kind = Enum(*ERROR_TYPES)
signal_k39_error_kind = Enum(*ERROR_TYPES)
mswd = Property
isochron_age_error_kind = Str(SE)
isochron_method = Str('York')
identifier = Any
aliquot = Any
repository_identifier = Any
irradiation = Any
irradiation_level = Any
irradiation_position = Any
sample = Any
project = Any
material = Any
igsn = Any
lithology = Any
lithology_type = Any
lithology_group = Any
lithology_class = Any
latitude = Any
longitude = Any
reference = Any
rlocation = Any
arar_constants = Any
production_ratios = Any
monitor_info = Any
monitor_age = Any
monitor_reference = Any
age_units = Any
grainsize = Any
unit = Str
location = Str
# age_scalar = Property
# age_units = AGProperty()
# external errors
include_j_error_in_mean = Bool(True)
include_j_position_error = Bool(False)
include_decay_error_mean = Bool(False)
# percent_39Ar = AGProperty()
dirty = Event
isochron_3640 = None
isochron_regressor = None
exclude_non_plateau = Bool(False)
omit_by_tag = Bool(True)
def __init__(self, *args, **kw):
super(AnalysisGroup, self).__init__(make_arar_constants=False, *args, **kw)
def _analyses_changed(self, new):
if new:
a = new[0]
for attr in ('identifier',
'aliquot',
'repository_identifier',
'igsn',
'sample',
'material',
'grainsize',
'project',
'irradiation',
'irradiation_position',
'irradiation_level',
'irradiation_label',
'unit',
'lithology',
'lithology_type',
'lithology_group',
'lithology_class',
'latitude',
'longitude',
'reference',
'rlocation',
'production_ratios',
'arar_constants',
'monitor_age',
'monitor_reference'):
try:
setattr(self, attr, getattr(a, attr))
except AttributeError:
pass
try:
self.monitor_info = a.monitor_age, a.monitor_reference
except AttributeError:
pass
self.age_units = self.arar_constants.age_units
def attr_stats(self, attr):
w, sd, sem, (vs, es) = self._calculate_weighted_mean(attr, error_kind='both')
mi, ma, total_dev, mswd, valid_mswd = 0, 0, 0, 0, False
if len(vs):
mswd = calculate_mswd(vs, es, wm=w)
valid_mswd = validate_mswd(mswd, self.nanalyses)
mi = min(vs)
ma = max(vs)
total_dev = (ma - mi) / ma * 100
else:
print('atafdsa', attr)
return {'mean': w,
'sd': sd,
'sem': sem,
'mswd': mswd,
'valid_mswd': valid_mswd,
'min': mi, 'max': ma, 'total_dev': total_dev}
def get_mswd_tuple(self):
mswd = self.mswd
valid_mswd = validate_mswd(mswd, self.nanalyses)
return mswd, valid_mswd, self.nanalyses, calculate_mswd_probability(mswd, self.nanalyses - 1)
def set_external_error(self, individual, mean, decay, dirty=False):
self.include_j_position_error = individual
self.include_j_error_in_mean = mean
self.include_decay_error_mean = decay
if dirty:
self.dirty = True
def get_arithmetic_mean(self, *args, **kw):
return self._calculate_arithmetic_mean(*args, **kw)
def get_weighted_mean(self, *args, **kw):
return self._get_weighted_mean(*args, **kw)
def plateau_analyses(self):
return
def _is_omitted(self, ai, **kw):
return ai.is_omitted(omit_by_tag=self.omit_by_tag, **kw)
def get_omitted_by_tag(self, ans, tags=None):
return [i for i, ai in enumerate(ans) if self._is_omitted(ai, tags=tags)]
def clean_analyses(self):
return (ai for ai in self.analyses if not self._is_omitted(ai))
def do_omit_non_plateau(self):
self.calculate_plateau()
ans = [a for a in self.analyses if isinstance(a, ArArAge) and not self._is_omitted(a)]
for a in ans:
if not self.get_is_plateau_step(a):
a.temp_status = 'omit'
def get_isochron_data(self, exclude_non_plateau=False):
ans = [a for a in self.analyses if isinstance(a, ArArAge)]
if (exclude_non_plateau or self.exclude_non_plateau) and hasattr(self, 'get_is_plateau_step'):
def test(ai):
a = self._is_omitted(ai)
b = not self.get_is_plateau_step(ai)
return a or b
else:
def test(ai):
return self._is_omitted(ai)
exclude = [i for i, x in enumerate(ans) if test(x)]
if ans:
return calculate_isochron(ans, self.isochron_age_error_kind, reg=self.isochron_method, exclude=exclude)
def calculate_isochron_age(self, exclude_non_plateau=False):
try:
args = self.get_isochron_data(exclude_non_plateau)
except BaseException:
return
if args:
age = args[0]
self.isochron_3640 = args[1]
reg = args[2]
self.isochron_regressor = reg
v, e = nominal_value(age), std_dev(age)
e = self._modify_error(v, e, self.isochron_age_error_kind, mswd=reg.mswd)
return ufloat(v, e)
def isochron_mswd(self):
if not self.isochron_3640:
self.calculate_isochron_age()
mswd, v, n, p = 0, '', 0, 0
reg = self.isochron_regressor
if reg:
mswd, v, n, p = reg.mswd, reg.valid_mswd, reg.n, reg.mswd_pvalue
return mswd, v, n, p
@property
def featuregroup_id(self):
if self.analyses:
return getattr(self.analyses[0], 'featuregroup_id')
# properties
@property
def flatlon(self):
r = NULL_STR
if self.latitude is not None and self.longitude is not None:
try:
r = '{:0.3f},{:0.3f}'.format(self.latitude, self.longitude)
except ValueError:
r = '{},{}'.format(self.latitude, self.longitude)
return r
@property
def isochron_4036(self):
if self.isochron_3640:
v = 1 / self.isochron_3640
else:
v = ufloat(0, 0)
return v
@property
def nratio(self):
return '{}/{}'.format(self.nanalyses, len(self.analyses))
@property
def labnumber(self):
return self.identifier
@property
def age_attr(self):
return 'uage_w_position_err' if self.include_j_position_error else 'uage'
def _get_mswd(self):
attr = self.attribute
if attr.startswith('uage'):
attr = 'uage'
if self.include_j_position_error:
attr = 'uage_w_position_err'
return self._calculate_mswd(attr)
@cached_property
def _get_age_span(self):
ans = self.clean_analyses()
ages = [nominal_value(a.age) for a in ans]
ret = 0
if ages:
ret = max(ages) - min(ages)
return ret
@cached_property
def _get_j_err(self):
j = self.j
try:
e = (std_dev(j) / nominal_value(j)) if j is not None else 0
except ZeroDivisionError:
e = nan
return e
@cached_property
def _get_j(self):
j = ufloat(0, 0)
if self.analyses:
j = self.analyses[0].j
return j
@cached_property
def _get_isochron_age(self):
try:
a = self.calculate_isochron_age()
except BaseException:
a = None
if a is None:
a = ufloat(0, 0)
return a
@cached_property
def _get_arith_age(self):
v, e = self._calculate_arithmetic_mean(self.age_attr)
e = self._modify_error(v, e, self.age_error_kind)
aa = ufloat(v, e)
return self._apply_external_err(aa)
@cached_property
def _get_weighted_age(self):
attr = self.attribute
if attr.startswith('uage'):
attr = self.age_attr
v, e = self._calculate_weighted_mean(attr, self.age_error_kind)
me = self._modify_error(v, e, self.age_error_kind)
try:
wa = ufloat(v, max(0, me))
return self._apply_external_err(wa)
except AttributeError:
return ufloat(0, 0)
@cached_property
def _get_weighted_mean_f(self):
v, e = self._calculate_weighted_mean('uF', self.age_error_kind)
me = self._modify_error(v, e, self.age_error_kind)
try:
wa = ufloat(v, max(0, me))
return wa
# return self._apply_j_err(wa)
except AttributeError:
return ufloat(0, 0)
@cached_property
def _get_total_n(self):
return len(self.analyses)
@cached_property
def _get_nanalyses(self):
return len(list(self.clean_analyses()))
# private functions
def _calculate_mswd(self, attr, values=None):
m = 0
if values is None:
values = self._get_values(attr)
if values:
vs, es = values
m = calculate_mswd(vs, es)
return m
def _apply_external_err(self, wa, force=False):
def func(aa):
v, e = nominal_value(aa), std_dev(aa)
v = abs(v)
try:
pa = e / v
except ZeroDivisionError:
pa = 0
return v, e, pa
if self.include_j_error_in_mean:
v, e, pa = func(wa)
ne = (pa ** 2 + self.j_err ** 2) ** 0.5
wa = ufloat(v, ne * v)
if self.include_decay_error_mean:
v, e, pa = func(wa)
k = self.arar_constants.lambda_k
de = 0
try:
de = std_dev(k) / nominal_value(k)
except ZeroDivisionError:
pass
ne = (pa ** 2 + de ** 2) ** 0.5
wa = ufloat(v, ne * v)
return wa
def _modify_error(self, v, e, kind, mswd=None):
if mswd is None:
mswd = self.mswd
if kind in (MSE, MSEM):
e *= mswd ** 0.5 if mswd > 1 else 1
return e
def _get_weighted_mean(self, attr, kind=None):
if attr == 'age':
return self.weighted_age
if kind is None:
kind = getattr(self, '{}_error_kind'.format(attr), SD)
v, e = self._calculate_weighted_mean(attr, error_kind=kind)
mswd = self._calculate_mswd(attr)
e = self._modify_error(v, e, kind, mswd)
return ufloat(v, e)
def _get_values(self, attr):
vs = (ai.get_value(attr) for ai in self.clean_analyses())
ans = [vi for vi in vs if vi is not None]
if ans:
vs = array([nominal_value(v) for v in ans])
es = array([std_dev(v) for v in ans])
if attr not in ('lab_temperature', 'peak_center', 'lab_humidity', 'lab_airpressure'):
idx = es.astype(bool)
vs = vs[idx]
es = es[idx]
return vs, es
def _calculate_mean(self, attr, use_weights=True, error_kind=None):
def sd(a, v, e):
n = len(v)
if n == 1:
we = e[0]
else:
we = (sum((a - v) ** 2) / (n - 1)) ** 0.5
return we
args = self._get_values(attr)
sem = 0
if args:
vs, es = args
if use_weights and any(es):
av, werr = calculate_weighted_mean(vs, es)
if error_kind == 'both':
sem = werr
werr = sd(av, vs, es)
elif error_kind == SD:
werr = sd(av, vs, es)
else:
av = vs.mean()
werr = vs.std(ddof=1)
sem = werr / len(vs) ** 0.5
else:
av, werr = 0, 0
if error_kind == 'both':
return av, werr, sem, args
else:
return av, werr
def _calculate_integrated_mean_error(self, weighting, ks, rs):
sks = ks.sum()
weights = None
fs = rs / ks
errors = array([std_dev(f) for f in fs])
values = array([nominal_value(f) for f in fs])
if weighting == 'Volume':
vpercent = ks / sks
weights = [nominal_value(wi) for wi in (vpercent * errors) ** 2]
elif weighting == 'Variance':
weights = 1 / errors ** 2
if weights is not None:
wmean, sum_weights = average(values, weights=weights, returned=True)
if weighting == 'Volume':
werr = sum_weights ** 0.5
else:
werr = sum_weights ** -0.5
f = ufloat(wmean, werr)
else:
f = rs.sum() / sks
return f
def _calculate_integrated(self, attr, kind='total', weighting=None):
uv = ufloat(0, 0)
if kind == 'total':
ans = self.analyses
elif kind == 'valid':
ans = list(self.clean_analyses())
elif kind == 'plateau':
ans = list(self.plateau_analyses())
ans = [a for a in ans if not isinstance(a, InterpretedAgeGroup)]
if ans:
prs = ans[0].production_ratios
def apply_pr(r, k):
pr = 1
if prs:
pr = prs.get(k, 1)
if not pr:
pr = 1.0
# pr = 1 / pr
v = r * pr
return v
if attr in ('kca', 'kcl', 'signal_k39'):
ks = array([ai.k39 for ai in ans])
if attr == 'kca':
cas = array([ai.get_non_ar_isotope('ca37') for ai in ans])
f = self._calculate_integrated_mean_error(weighting, ks, cas)
try:
uv = 1 / apply_pr(f, 'Ca_K')
except ZeroDivisionError:
uv = 0
elif attr == 'kcl':
cls = array([ai.get_non_ar_isotope('cl38') for ai in ans])
f = self._calculate_integrated_mean_error(weighting, ks, cls)
try:
uv = 1 / apply_pr(f, 'Cl_K')
except ZeroDivisionError:
uv = 0
elif attr == 'signal_k39':
uv = ks.sum()
elif attr == 'radiogenic_yield':
ns = [ai.rad40 for ai in ans]
ds = [ai.total40 for ai in ans]
uv = sum(ns) / sum(ds) * 100
elif attr == 'moles_k39':
uv = sum([ai.moles_k39 for ai in ans])
elif attr == 'age':
uv = self._calculate_integrated_age(ans, weighting)
return uv
def _calculate_arithmetic_mean(self, attr):
if attr == 'age':
return self.arith_age
return self._calculate_mean(attr, use_weights=False)
def _calculate_weighted_mean(self, attr, error_kind=None):
return self._calculate_mean(attr, use_weights=True, error_kind=error_kind)
def _calculate_integrated_age(self, ans, weighting=None):
ret = ufloat(0, 0)
if ans and all((not isinstance(a, InterpretedAgeGroup) for a in ans)):
if weighting is None:
weighting = self.integrated_age_weighting
rs = array([a.get_computed_value('rad40') for a in ans])
ks = array([a.k39 for a in ans])
f = self._calculate_integrated_mean_error(weighting, ks, rs)
j = self.j
if not self.include_j_error_in_integrated:
j = nominal_value(j)
try:
ret = age_equation(f, j, arar_constants=self.arar_constants)
except ZeroDivisionError:
pass
return ret
class StepHeatAnalysisGroup(AnalysisGroup):
plateau_age = AGProperty()
integrated_age = AGProperty()
integrated_include_omitted = Bool(True)
include_j_error_in_plateau = Bool(True)
plateau_steps_str = Str
plateau_steps = None
nsteps = Int
fixed_step_low = Str
fixed_step_high = Str
plateau_age_error_kind = Str
plateau_nsteps = Int(3)
plateau_gas_fraction = Float(50)
plateau_overlap_sigma = Int(2)
plateau_mswd = Float
plateau_mswd_valid = Bool
plateau_method = Str(FLECK)
total_ar39 = AGProperty()
total_k2o = AGProperty()
def set_isochron_trapped(self, state, include_error=None):
v = None
if state:
self.calculate_isochron_age()
v = self.isochron_4036
if not include_error:
v = ufloat(nominal_value(v), std_dev=0)
for a in self.analyses:
a.arar_constants.trapped_atm4036 = v
a.recalculate_age(force=True)
@property
def integrated_enabled(self):
"""
see issue 1565.
Total integrated age only appropriate for single-aliquot groups or subgroups
:return:
"""
return self.nanalyses > 1 and len({a.aliquot for a in self.analyses}) == 1
def plateau_analyses(self):
return [a for a in self.clean_analyses() if self.get_is_plateau_step(a)]
@cached_property
def _get_total_k2o(self):
total = sum([a.total_k2o if isinstance(a, StepHeatAnalysisGroup) else a.k2o for a in self.analyses])
return nominal_value(total)
@cached_property
def _get_total_ar39(self):
total = sum([a.k39 for a in self.analyses])
return nominal_value(total)
def plateau_total_ar39(self):
ptotal = sum([a.k39 for a in self.plateau_analyses()])
return nominal_value(ptotal / self.total_ar39 * 100)
def valid_total_ar39(self):
cleantotal = sum([a.k39 for a in self.clean_analyses()])
return nominal_value(cleantotal / self.total_ar39 * 100)
def cumulative_ar39(self, idx):
cum = 0
for i, a in enumerate(self.analyses):
if i > idx:
break
cum += a.k39
return nominal_value(cum / self.total_ar39 * 100)
def get_plateau_mswd_tuple(self):
return self.plateau_mswd, self.plateau_mswd_valid, \
self.nsteps, calculate_mswd_probability(self.plateau_mswd, self.nsteps - 1)
def calculate_plateau(self):
return self.plateau_age
def get_is_plateau_step(self, an):
if isinstance(an, int):
idx = an
an = self.analyses[idx]
else:
idx = self.analyses.index(an)
plateau_step = False
if self.plateau_steps:
if not self._is_omitted(an):
ps, pe = self.plateau_steps
plateau_step = ps <= idx <= pe
return plateau_step
@cached_property
def _get_integrated_age(self):
if self.integrated_include_omitted:
ans = self.analyses
else:
ans = list(self.clean_analyses())
return self._calculate_integrated_age(ans)
@property
def fixed_steps(self):
l, h = '', ''
if self.fixed_step_low:
l = self.fixed_step_low
if self.fixed_step_high:
h = self.fixed_step_high
if not (l is None and h is None):
return l, h
@cached_property
def _get_plateau_age(self):
ans = self.analyses
v, e = 0, 0
self.plateau_steps = None
self.plateau_steps_str = ''
self.nsteps = 0
self.plateau_mswd = 0
self.plateau_mswd_valid = False
if all((not isinstance(ai, InterpretedAgeGroup) for ai in ans)):
if ans:
ages = [ai.age for ai in ans]
errors = [ai.age_err for ai in ans]
k39 = [nominal_value(ai.k39) for ai in ans]
options = {'nsteps': self.plateau_nsteps,
'gas_fraction': self.plateau_gas_fraction,
'overlap_sigma': self.plateau_overlap_sigma,
'fixed_steps': self.fixed_steps}
excludes = [i for i, ai in enumerate(ans) if self._is_omitted(ai)]
args = calculate_plateau_age(ages, errors, k39, method=self.plateau_method,
options=options, excludes=excludes)
if args:
v, e, pidx = args
if pidx[0] == pidx[1]:
return
self.plateau_steps = pidx
self.plateau_steps_str = '{}-{}'.format(alphas(pidx[0]),
alphas(pidx[1]))
step_idxs = [i for i in range(pidx[0], pidx[1] + 1) if not self._is_omitted(ans[i])]
self.nsteps = len(step_idxs)
pages = array([ages[i] for i in step_idxs])
perrs = array([errors[i] for i in step_idxs])
mswd = calculate_mswd(pages, perrs)
self.plateau_mswd_valid = validate_mswd(mswd, self.nsteps)
self.plateau_mswd = mswd
if self.plateau_age_error_kind == SD:
e = array(pages).std()
else:
e = self._modify_error(v, e,
self.plateau_age_error_kind,
mswd=mswd)
if math.isnan(e):
e = 0
a = ufloat(v, max(0, e))
self._apply_external_err(a, force=self.include_j_error_in_mean or self.include_j_error_in_plateau)
return a
class InterpretedAgeGroup(StepHeatAnalysisGroup, Preferred):
uuid = Str
all_analyses = List
subgroup_id = Int
# preferred_values = List
name = Str
use = Bool
lithology_classes = List
lithology_groups = List
lithology_types = List
lithologies = List
comments = Str
preferred_age = Property
# modeled_j = ''
# modeled_j_err = ''
# F = ''
# F_err = ''
# rundate = ''
def __init__(self, *args, **kw):
super(InterpretedAgeGroup, self).__init__(*args, **kw)
super(Preferred, self).__init__()
if self.analyses:
self.has_subgroups(self.analyses)
def set_preferred_age(self, pk, ek):
pv = self._get_pv('age')
pv.error_kind = ek
pv.kind = pk
pv.dirty = True
def ages(self, asfloat=True):
vs = {k: getattr(self, k) for k in ('weighted_age', 'plateau_age', 'isochron_age', 'integrated_age')}
if asfloat:
es = {}
for k, v in vs.items():
vs[k] = nominal_value(v)
es['{}_err'.format(k)] = std_dev(v)
vs.update(es)
return vs
@property
def age(self):
return self.preferred_age
@property
def uage(self):
return self.age
@property
def uage_w_j_err(self):
return self.age
@property
def uage_w_position_err(self):
return self.age
@property
def kca(self):
pv = self._get_pv('kca')
return pv.uvalue
@property
def kcl(self):
pv = self._get_pv('kcl')
return pv.uvalue
@property
def radiogenic_yield(self):
pv = self._get_pv('radiogenic_yield')
return pv.uvalue
@property
def moles_k39(self):
pv = self._get_pv('moles_k39')
return pv.uvalue
@property
def k39(self):
return self.signal_k39
@property
def signal_k39(self):
pv = self._get_pv('signal_k39')
return pv.uvalue
def get_value(self, attr):
if hasattr(self, attr):
ret = getattr(self, attr)
else:
ret = ufloat(0, 0)
return ret
@on_trait_change('analyses')
def has_subgroups(self, new):
hs = any((isinstance(a, InterpretedAgeGroup) for a in new))
for pv in self.preferred_values:
if pv.attr == 'age':
continue
if hs:
if pv.attr in ('kca', 'kcl', 'moles_k39', 'signal_k39'):
pv.kind = ARITHMETIC_MEAN
else:
pv.kind = WEIGHTED_MEAN
pv.kinds = [WEIGHTED_MEAN, ARITHMETIC_MEAN]
else:
pv.kinds = SUBGROUPINGS
@on_trait_change('preferred_values:[kind, error_kind, dirty, weighting]')
def handle_preferred_change(self, obj, name, old, new):
if obj.attr == 'age':
if 'Plateau' in obj.kind:
self.plateau_age_error_kind = obj.error_kind
if obj.kind != 'Plateau':
self.age_error_kind = obj.error_kind
else:
self.age_error_kind = obj.error_kind
self.dirty = True
v = self._get_preferred_age()
obj.value = nominal_value(v)
obj.error = std_dev(v)
self.dirty = True
else:
v, k = self._get_preferred_(obj.attr, obj.kind, obj.error_kind, obj.weighting)
obj.value = nominal_value(v)
obj.error = std_dev(v)
obj.computed_kind = k
def preferred_values_to_dict(self):
return [pv.to_dict() for pv in self.preferred_values]
def get_preferred_age(self):
return self._get_preferred_age()
def get_ma_scaled_age(self):
a = self._get_preferred_age()
return self.arar_constants.scale_age(a, 'Ma')
def scaled_age(self, a, units='Ma'):
return self.arar_constants.scale_age(a, units)
def get_preferred_mswd(self):
pv = self._get_pv('age')
if pv.computed_kind.lower() == 'plateau':
return self.plateau_mswd
else:
return self.mswd
def get_preferred_mswd_tuple(self):
pv = self._get_pv('age')
k = pv.computed_kind.lower()
t = self.get_mswd_tuple()
if k == 'plateau':
t = self.get_plateau_mswd_tuple()
return t
def set_preferred_kinds(self, sg=None):
naliquots = len({a.aliquot for a in self.analyses})
default_ek = MSEM if naliquots > 1 else SD
default_vk = WEIGHTED_MEAN if naliquots > 1 else DEFAULT_INTEGRATED
for k in SUBGROUPING_ATTRS:
if sg is None:
if k == 'age':
# if only 1 aliquot in group assume step heat
if naliquots > 1:
vk, ek = WEIGHTED_MEAN, MSEM
else:
vk, ek = PLATEAU_ELSE_WEIGHTED_MEAN, MSEM
else:
vk = default_vk
ek = default_ek
else:
vk = sg.get('{}_kind'.format(k), default_vk)
ek = sg.get('{}_error_kind'.format(k), default_ek)
self.set_preferred_kind(k, vk, ek)
def set_preferred_kind(self, attr, k, ek):
pv = self._get_pv(attr)
pv.error_kind = ek
pv.kind = k
pv.dirty = True
def get_preferred_kind(self, attr):
pv = self.get_preferred_obj(attr)
return pv.kind
def get_preferred_obj(self, attr):
if attr == 'age':
# force preferred age
_ = self.preferred_age
pv = self._get_pv(attr)
return pv
# get preferred objects
def _get_preferred_age(self):
pa = ufloat(0, 0)
pv = self._get_pv('age')
pak = pv.kind.lower().replace(' ', '_')
pv.computed_kind = pv.kind
if pak in ('weighted_mean', 'wt._mean'):
pa = self.weighted_age
elif pak == 'arithmetic_mean':
pa = self.arith_age
elif pak == 'isochron':
pa = self.isochron_age
elif pak == 'isochron_of_plateau_steps':
self.calculate_plateau()
if not self.plateau_steps:
pa = self.isochron_age
pv.computed_kind = ISOCHRON
else:
pa = self.calculate_isochron_age(exclude_non_plateau=True)
elif pak == 'integrated':
pa = self._calculate_integrated('age', 'valid', pv.weighting)
elif pak == 'plateau':
pa = self.plateau_age
elif pak == 'valid_integrated':
pa = self._calculate_integrated('age', 'valid', pv.weighting)
elif pak == 'total_integrated':
pa = self._calculate_integrated('age', 'total', pv.weighting)
elif pak == 'plateau_integrated':
pa = self._calculate_integrated('age', 'plateau', pv.weighting)
elif pak == 'plateau_else_weighted_mean':
pa = self.plateau_age
pv.computed_kind = 'Plateau'
if not self.plateau_steps:
pa = self.weighted_age
pv.computed_kind = WEIGHTED_MEAN
return pa
def _get_preferred_(self, attr, kind, error_kind, weighting):
setattr(self, '{}_error_kind'.format(attr), error_kind)
self.dirty = True
pk = kind.lower().replace(' ', '_')
if pk == 'weighted_mean':
pa = self._get_weighted_mean(attr)
elif pk == 'valid_integrated':
pa = self._calculate_integrated(attr, 'valid', weighting)
elif pk == 'total_integrated':
pa = self._calculate_integrated(attr, 'total', weighting)
elif pk == 'plateau_integrated':
pa = self._calculate_integrated(attr, 'plateau', weighting)
elif pk == 'plateau_else_valid_integrated':
if self.plateau_age:
kind = 'Plateau'
pa = self._calculate_integrated(attr, 'plateau', weighting)
else:
kind = 'Valid'
pa = self._calculate_integrated(attr, 'valid', weighting)
else:
pa = self._calculate_arithmetic_mean(attr)
if isinstance(pa, tuple):
pa = ufloat(*pa)
return pa, kind
def _name_default(self):
name = ''
if self.analyses:
name = make_aliquot(self.aliquot)
return name
def _get_nanalyses(self):
pv = self._get_pv('age')
k = pv.computed_kind.lower()
if k == 'plateau':
n = self.nsteps
else:
n = super(InterpretedAgeGroup, self)._get_nanalyses()
return n
def _value_string(self, t):
try:
v = getattr(self, t)
a, e = nominal_value(v), std_dev(v)
except AttributeError:
a, e = NULL_STR, NULL_STR
return a, e
def __getattr__(self, item):
return ''
# ============= EOF =============================================
| apache-2.0 | -6,850,001,143,041,851,000 | 29.923077 | 115 | 0.523354 | false |
code-sauce/tensorflow | tensorflow/python/ops/gradients_impl.py | 1 | 37128 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements the graph generation for computation of gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import warnings
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_grad # pylint: disable=unused-import
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_grad # pylint: disable=unused-import
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import image_grad # pylint: disable=unused-import
from tensorflow.python.ops import linalg_grad # pylint: disable=unused-import
from tensorflow.python.ops import linalg_ops # pylint: disable=unused-import
from tensorflow.python.ops import logging_ops # pylint: disable=unused-import
from tensorflow.python.ops import math_grad # pylint: disable=unused-import
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import tf_logging as logging
# Warn the user if we convert a sparse representation to dense with at
# least this number of elements.
_LARGE_SPARSE_NUM_ELEMENTS = 100000000
def _IndexedSlicesToTensor(value, dtype=None, name=None, as_ref=False):
"""Converts an IndexedSlices object `value` to a Tensor.
NOTE(mrry): This function is potentially expensive.
Args:
value: An ops.IndexedSlices object.
dtype: The dtype of the Tensor to be returned.
name: Optional name to use for the returned Tensor.
as_ref: True if a ref is requested.
Returns:
A dense Tensor representing the values in the given IndexedSlices.
Raises:
ValueError: If the IndexedSlices does not have the same dtype.
"""
_ = as_ref
if dtype and not dtype.is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for IndexedSlices with dtype %s" %
(dtype.name, value.dtype.name))
if value.dense_shape is None:
raise ValueError(
"Tensor conversion requested for IndexedSlices without dense_shape: %s"
% str(value))
# TODO(mrry): Consider adding static shape information to
# IndexedSlices, to avoid using numpy here.
dense_shape_value = tensor_util.constant_value(value.dense_shape)
if dense_shape_value is not None:
num_elements = np.prod(dense_shape_value)
if num_elements >= _LARGE_SPARSE_NUM_ELEMENTS:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor with %d elements. "
"This may consume a large amount of memory." % num_elements)
else:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor of unknown shape. "
"This may consume a large amount of memory.")
return math_ops.unsorted_segment_sum(
value.values, value.indices, value.dense_shape[0], name=name)
ops.register_tensor_conversion_function(ops.IndexedSlices,
_IndexedSlicesToTensor)
def _MarkReachedOps(from_ops, reached_ops):
"""Mark all ops reached from "from_ops".
Args:
from_ops: list of Operations.
reached_ops: list of booleans, indexed by operation id.
"""
queue = collections.deque()
queue.extend(from_ops)
while queue:
op = queue.popleft()
if not reached_ops[op._id]:
reached_ops[op._id] = True
for output in op.outputs:
queue.extend(output.consumers())
def _GatherInputs(to_ops, reached_ops):
"""List all inputs of to_ops that are in reached_ops.
Args:
to_ops: list of Operations.
reached_ops: list of booleans, indexed by operation id.
Returns:
The list of all inputs of to_ops that are in reached_ops.
That list includes all elements of to_ops.
"""
inputs = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if reached_ops[op._id]:
inputs.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops[op._id] = False
for inp in op.inputs:
queue.append(inp.op)
return inputs
def _PendingCount(graph, to_ops, from_ops, colocate_gradients_with_ops):
"""Initialize the pending count for ops between two lists of Operations.
'pending_count[op._id]' indicates the number of backprop inputs
to this operation.
Args:
graph: a Graph.
to_ops: list of Operations.
from_ops: list of Operations.
colocate_gradients_with_ops: Python bool. See docstring of gradients().
Returns:
A tuple containing: (1) a list of integers indexed by operation id,
indicating the number of backprop inputs to this operation, and (2)
a ControlFlowState object which is not None if the ops between from_ops
and to_ops contain control flow loops.
"""
# Mark reachable ops from from_ops.
reached_ops = [False] * (graph._last_id + 1)
for op in to_ops:
reached_ops[op._id] = True
_MarkReachedOps(from_ops, reached_ops)
# Mark between ops.
between_ops = [False] * (graph._last_id + 1)
between_op_list = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if reached_ops[op._id]:
between_ops[op._id] = True
between_op_list.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops[op._id] = False
for inp in op.inputs:
queue.append(inp.op)
# 'loop_state' is None if there are no while loops.
loop_state = control_flow_ops.MaybeCreateControlFlowState(
between_op_list, between_ops, colocate_gradients_with_ops)
# Initialize pending count for between ops.
pending_count = [0] * (graph._last_id + 1)
for op in between_op_list:
for x in op.inputs:
if between_ops[x.op._id]:
pending_count[x.op._id] += 1
return pending_count, loop_state
def _AsList(x):
return x if isinstance(x, (list, tuple)) else [x]
def _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops):
"""Fill in default values for grad_ys.
Args:
grad_ys: List of gradients, can contain None.
ys: List of tensors.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
Returns:
A list of gradients to use, without None.
Raises:
ValueError: If sizes of gradients and inputs don't match
TypeError: If type of any gradient is not valid for its input.
"""
if len(grad_ys) != len(ys):
raise ValueError("Passed %d grad_ys for %d ys" % (len(grad_ys), len(ys)))
grad_ys = ops.convert_n_to_tensor_or_indexed_slices(grad_ys, name="grad_y")
for i in xrange(len(grad_ys)):
grad_y = grad_ys[i]
y = ys[i]
if grad_y is None:
if y.dtype.is_complex:
raise TypeError(
"Gradients of complex tensors must set grad_ys (y.dtype = %r)" %
y.dtype)
with _maybe_colocate_with(y.op, colocate_gradients_with_ops):
grad_ys[i] = array_ops.fill(
array_ops.shape(y), constant_op.constant(
1, dtype=y.dtype))
continue
if y.dtype.is_floating or y.dtype.is_integer:
if not grad_y.dtype.is_floating and not grad_y.dtype.is_integer:
raise TypeError("Gradient type %s generated for real or "
"integer-valued tensor %s with type %s must be "
"real or integer" %
(dtypes.as_dtype(grad_y.dtype).name, y,
dtypes.as_dtype(y.dtype).name))
elif y.dtype.is_complex:
if not grad_y.dtype.is_complex:
raise TypeError("Gradient type %s generated for complex-valued "
"tensor %s with type %s must be real" %
(dtypes.as_dtype(grad_y.dtype).name, y,
dtypes.as_dtype(y.dtype).name))
else:
raise TypeError("Tensor %s with type %s must be numeric "
"to obtain a default gradient" %
(y, dtypes.as_dtype(y.dtype).name))
return grad_ys
def _IsTrainable(tensor):
dtype = dtypes.as_dtype(tensor.dtype)
return dtype.base_dtype in (dtypes.float16, dtypes.float32, dtypes.float64,
dtypes.complex64, dtypes.complex128)
def _VerifyGeneratedGradients(grads, op):
"""Verify that gradients are valid in number and type.
Args:
grads: List of generated gradients.
op: Operation for which the gradients where generated.
Raises:
ValueError: if sizes of gradients and inputs don't match.
TypeError: if type of any gradient is not valid for its input.
"""
if len(grads) != len(op.inputs):
raise ValueError("Num gradients %d generated for op %s do not match num "
"inputs %d" % (len(grads), op.node_def, len(op.inputs)))
for i in xrange(len(grads)):
grad = grads[i]
inp = op.inputs[i]
if grad is None:
continue
if grad.dtype.is_floating:
if not inp.dtype.is_floating:
raise TypeError("Gradient type %s generated for real-valued op %s "
"with type %s must be real" %
(dtypes.as_dtype(grad.dtype).name, op.node_def,
dtypes.as_dtype(inp.dtype).name))
elif grad.dtype.is_complex:
if not inp.dtype.is_complex:
raise TypeError("Gradient type %s generated for complex-valued op %s"
" with type %s must be complex" %
(dtypes.as_dtype(grad.dtype).name, op.node_def,
dtypes.as_dtype(inp.dtype).name))
else:
raise TypeError("Gradient type %s generated for op %s "
"with type %s must be either real or complex" %
(dtypes.as_dtype(grad.dtype).name, op.node_def,
dtypes.as_dtype(inp.dtype).name))
def _StopOps(from_ops, pending_count):
"""The set of ops that terminate the gradient computation.
This computes the frontier of the forward graph *before* which backprop
should stop. Operations in the returned set will not be differentiated.
This set is defined as the subset of `from_ops` containing ops that have
no predecessor in `from_ops`. `pending_count` is the result of
`_PendingCount(g, xs, from_ops)`. An 'op' has predecessors in `from_ops`
iff pending_count[op._id] > 0.
Args:
from_ops: list of Operations.
pending_count: List of integers, indexed by operation id.
Returns:
The set of operations.
"""
stop_ops = set()
for op in from_ops:
is_stop_op = True
for inp in op.inputs:
if pending_count[inp.op._id] > 0:
is_stop_op = False
break
if is_stop_op:
stop_ops.add(op._id)
return stop_ops
@contextlib.contextmanager
def _maybe_colocate_with(op, colocate_gradients_with_ops):
"""Context to colocate with `op` if `colocate_gradients_with_ops`."""
if colocate_gradients_with_ops:
with ops.colocate_with(op):
yield
else:
yield
def _SymGrad(op, out_grads):
"""Backprop through a function call node op given its outputs' gradients."""
f_in = [x for x in op.inputs] + out_grads
f_types = [x.dtype for x in op.inputs]
f = attr_value_pb2.NameAttrList()
f.name = op.type
for k in op.node_def.attr:
f.attr[k].CopyFrom(op.node_def.attr[k])
# pylint: disable=protected-access
in_grads = functional_ops._symbolic_gradient(input=f_in, Tout=f_types, f=f)
# pylint: enable=protected-access
return in_grads
def _MaybeCompile(scope, op, func, grad_fn):
"""Compile the calculation in grad_fn if op was marked as compiled."""
scope = scope.rstrip("/").replace("/", "_")
if func is not None:
xla_compile = func.definition.attr["_XlaCompile"].b
xla_scope = func.definition.attr["_XlaScope"].s.decode()
else:
try:
xla_compile = op.get_attr("_XlaCompile")
xla_scope = op.get_attr("_XlaScope").decode()
except ValueError:
return grad_fn() # Exit early
if not xla_compile:
return grad_fn() # Exit early
attrs = {"_XlaCompile": attr_value_pb2.AttrValue(b=xla_compile),
"_XlaScope": attr_value_pb2.AttrValue(
s=("%s_grad_%s" % (xla_scope, scope)).encode())}
with ops.get_default_graph()._attr_scope(attrs): # pylint: disable=protected-access
return grad_fn()
def gradients(ys,
xs,
grad_ys=None,
name="gradients",
colocate_gradients_with_ops=False,
gate_gradients=False,
aggregation_method=None):
"""Constructs symbolic partial derivatives of sum of `ys` w.r.t. x in `xs`.
`ys` and `xs` are each a `Tensor` or a list of tensors. `grad_ys`
is a list of `Tensor`, holding the gradients received by the
`ys`. The list must be the same length as `ys`.
`gradients()` adds ops to the graph to output the partial
derivatives of `ys` with respect to `xs`. It returns a list of
`Tensor` of length `len(xs)` where each tensor is the `sum(dy/dx)`
for y in `ys`.
`grad_ys` is a list of tensors of the same length as `ys` that holds
the initial gradients for each y in `ys`. When `grad_ys` is None,
we fill in a tensor of '1's of the shape of y for each y in `ys`. A
user can provide their own initial `grad_ys` to compute the
derivatives using a different initial gradient for each y (e.g., if
one wanted to weight the gradient differently for each value in
each y).
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
grad_ys: Optional. A `Tensor` or list of tensors the same size as
`ys` and holding the gradients computed for each y in `ys`.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'gradients'.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
gate_gradients: If True, add a tuple around the gradients returned
for an operations. This avoids some race conditions.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
Returns:
A list of `sum(dy/dx)` for each x in `xs`.
Raises:
LookupError: if one of the operations between `x` and `y` does not
have a registered gradient function.
ValueError: if the arguments are invalid.
"""
ys = _AsList(ys)
xs = _AsList(xs)
if grad_ys is None:
grad_ys = [None] * len(ys)
else:
grad_ys = _AsList(grad_ys)
with ops.name_scope(name, "gradients", ys + xs + grad_ys) as grad_scope:
ys = ops.convert_n_to_tensor_or_indexed_slices(ys, name="y")
xs = [x.handle if isinstance(x, resource_variable_ops.ResourceVariable)
else x
for x in xs]
xs = ops.convert_n_to_tensor_or_indexed_slices(xs, name="x")
grad_ys = _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops)
# The approach we take here is as follows: Create a list of all ops in the
# subgraph between the ys and xs. Visit these ops in reverse order of ids
# to ensure that when we visit an op the gradients w.r.t its outputs have
# been collected. Then aggregate these gradients if needed, call the op's
# gradient function, and add the generated gradients to the gradients for
# its input.
# Initialize the pending count for ops in the connected subgraph from ys
# to the xs.
to_ops = [t.op for t in ys]
from_ops = [t.op for t in xs]
pending_count, loop_state = _PendingCount(ops.get_default_graph(), to_ops,
from_ops,
colocate_gradients_with_ops)
# Iterate over the collected ops.
#
# grads: op => list of gradients received on each output endpoint of the
# op. The gradients for each endpoint are initially collected as a list.
# When it is time to call the op's gradient function, for each endpoint we
# aggregate the list of received gradients into a Add() Operation if there
# is more than one.
grads = {}
# Add the initial gradients for the ys.
for y, grad_y in zip(ys, grad_ys):
_SetGrad(grads, y, grad_y)
# Initialize queue with to_ops.
queue = collections.deque()
# Add the ops in 'to_ops' into the queue.
to_ops_set = set()
for op in to_ops:
# 'ready' handles the case where one output gradient relies on
# another output's gradient.
# pylint: disable=protected-access
ready = (pending_count[op._id] == 0)
if ready and op._id not in to_ops_set:
to_ops_set.add(op._id)
queue.append(op)
# pylint: enable=protected-access
if loop_state:
loop_exits = loop_state.ProcessUnusedLoopExits(pending_count, to_ops_set)
for y in loop_exits:
if _IsTrainable(y):
_SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
queue.append(y.op)
# The set of 'from_ops'.
stop_ops = _StopOps(from_ops, pending_count)
while queue:
# generate gradient subgraph for op.
op = queue.popleft()
with _maybe_colocate_with(op, colocate_gradients_with_ops):
if loop_state:
loop_state.EnterGradWhileContext(op, before=True)
out_grads = _AggregatedGrads(grads, op, loop_state, aggregation_method)
if loop_state:
loop_state.ExitGradWhileContext(op, before=True)
grad_fn = None
# pylint: disable=protected-access
func_call = None
is_func_call = ops.get_default_graph()._is_function(op.type)
has_out_grads = any(isinstance(g, ops.Tensor) or g for g in out_grads)
if has_out_grads and (op._id not in stop_ops):
if is_func_call:
func_call = ops.get_default_graph()._get_function(op.type)
grad_fn = func_call.python_grad_func
# pylint: enable=protected-access
else:
# A grad_fn must be defined, either as a function or as None
# for ops that do not have gradients.
try:
grad_fn = ops.get_gradient_function(op)
except LookupError:
raise LookupError(
"No gradient defined for operation '%s' (op type: %s)" %
(op.name, op.type))
if loop_state:
loop_state.EnterGradWhileContext(op, before=False)
if (grad_fn or is_func_call) and has_out_grads:
# NOTE: If _AggregatedGrads didn't compute a value for the i'th
# output, it means that the cost does not depend on output[i],
# therefore dC/doutput[i] is 0.
for i, out_grad in enumerate(out_grads):
if (not isinstance(out_grad, ops.Tensor) and
not out_grad) and _IsTrainable(op.outputs[i]):
# Only floating-point outputs get a zero gradient. Gradient
# functions should ignore the gradient for other outputs.
# TODO(apassos) gradients of resource handles might be an
# issue here because of zeros.
if loop_state:
out_grads[i] = loop_state.ZerosLike(op, i)
else:
out_grads[i] = control_flow_ops.ZerosLikeOutsideLoop(op, i)
with ops.name_scope(op.name + "_grad"):
# pylint: disable=protected-access
with ops.get_default_graph()._original_op(op):
# pylint: enable=protected-access
if grad_fn:
# If grad_fn was found, do not use SymbolicGradient even for
# functions.
in_grads = _MaybeCompile(
grad_scope, op, func_call, lambda: grad_fn(op, *out_grads))
else:
# For function call ops, we add a 'SymbolicGradient'
# node to the graph to compute gradients.
in_grads = _MaybeCompile(
grad_scope, op, func_call, lambda: _SymGrad(op, out_grads))
in_grads = _AsList(in_grads)
_VerifyGeneratedGradients(in_grads, op)
if gate_gradients and len(
[x for x in in_grads if x is not None]) > 1:
in_grads = control_flow_ops.tuple(in_grads)
_LogOpGradients(op, out_grads, in_grads)
else:
# If no grad_fn is defined or none of out_grads is available,
# just propagate a list of None backwards.
in_grads = [None] * len(op.inputs)
for t_in, in_grad in zip(op.inputs, in_grads):
if in_grad is not None:
if (isinstance(in_grad, ops.Tensor) and
t_in.dtype != dtypes.resource):
in_grad.set_shape(t_in.get_shape())
_SetGrad(grads, t_in, in_grad)
if loop_state:
loop_state.ExitGradWhileContext(op, before=False)
# Update pending count for the inputs of op and enqueue ready ops.
_UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state)
if loop_state:
loop_state.PostProcessing()
return [_GetGrad(grads, x) for x in xs]
def _HasAnyNotNoneGrads(grads, op):
"""Return true iff op has real gradient."""
out_grads = _GetGrads(grads, op)
for out_grad in out_grads:
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
return True
if out_grad and isinstance(out_grad, collections.Sequence):
if any([g is not None for g in out_grad]):
return True
return False
def _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state):
"""Update pending count for the inputs of op and enqueue ready ops."""
for x in op.inputs:
# pylint: disable=protected-access
pending_count[x.op._id] -= 1
ready = (pending_count[x.op._id] == 0)
if loop_state and not ready:
ready = (pending_count[x.op._id] > 0 and
control_flow_ops.IsLoopSwitch(x.op))
# pylint: enable=protected-access
if ready:
if control_flow_ops.IsLoopExit(x.op):
# if x is an exit without real gradient, defer processing them.
grad_state = loop_state.GetGradState(x.op, before=False)
grad_state.deferred_exits.append(x)
grad_state.pending_exits_count -= 1
if grad_state.pending_exits_count == 0:
# We now have all the exits so process them.
has_real_grad = False
for y in grad_state.deferred_exits:
if _HasAnyNotNoneGrads(grads, y.op):
has_real_grad = True
queue.append(y.op)
else:
grad_state.unused_exits.append(y)
if has_real_grad:
# For an unused exit, if it has floating-point outputs, backprop
# a zero gradient. Otherwise, just ignore it.
for y in grad_state.unused_exits:
if _IsTrainable(y):
_SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
queue.append(y.op)
else:
# All exits are "unused" so use None as gradient.
for y in grad_state.unused_exits:
queue.append(y.op)
else:
queue.append(x.op)
def _SetGrad(grads, t, grad):
"""Sets gradient "grad" in "grads" for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
op_grads = [[] for _ in xrange(len(op.outputs))]
grads[op] = op_grads
t_grads = op_grads[t.value_index]
if isinstance(t_grads, list):
t_grads.append(grad)
else:
assert control_flow_ops.IsLoopSwitch(op)
op_grads[t.value_index] = grad
def _GetGrad(grads, t):
"""Gets gradient for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
return None
t_grad = op_grads[t.value_index]
assert not isinstance(t_grad, list), (
"gradients list should have been aggregated by now.")
return t_grad
def _GetGrads(grads, op):
"""Gets all gradients for op."""
if op in grads:
return grads[op]
else:
return [[] for _ in xrange(len(op.outputs))]
def _HandleNestedIndexedSlices(grad):
assert isinstance(grad, ops.IndexedSlices)
if isinstance(grad.values, ops.Tensor):
return grad
else:
assert isinstance(grad.values, ops.IndexedSlices)
g = _HandleNestedIndexedSlices(grad.values)
return ops.IndexedSlices(g.values,
array_ops.gather(grad.indices, g.indices),
g.dense_shape)
def _AccumulatorShape(inputs):
shape = tensor_shape.unknown_shape()
for i in inputs:
if isinstance(i, ops.Tensor):
shape = shape.merge_with(i.get_shape())
return shape
def _LogOpGradients(op, out_grads, in_grads):
"""Log the in and out grads of an op."""
logging.vlog(1, "Gradient for '" + op.name + "'")
def _FilterGrad(x):
if x is None:
return False
if isinstance(x, (list, tuple)):
return bool(x)
else:
return True
logging.vlog(1, " in --> %s",
", ".join([x.name for x in out_grads if _FilterGrad(x)]))
logging.vlog(1, " out --> %s",
", ".join([x.name for x in in_grads if _FilterGrad(x)]))
def _MultiDeviceAddN(tensor_list):
"""Adds tensors from potentially multiple devices."""
# Basic function structure comes from control_flow_ops.group().
# Sort tensors according to their devices.
tensors_on_device = collections.defaultdict(lambda: [])
for tensor in tensor_list:
tensors_on_device[tensor.device].append(tensor)
# For each device, add the tensors on that device first.
# Then gather the partial sums from multiple devices.
# TODO(sjhwang): Create hierarchical aggregation tree as pbar's suggestion.
# E.g., aggregate per GPU, then per task, and so on.
summands = []
def DeviceKey(dev):
return "" if dev is None else dev
for dev in sorted(six.iterkeys(tensors_on_device), key=DeviceKey):
tensors = tensors_on_device[dev]
with ops.colocate_with(tensors[0].op, ignore_existing=True):
summands.append(math_ops.add_n(tensors))
return math_ops.add_n(summands)
class AggregationMethod(object):
"""A class listing aggregation methods used to combine gradients.
Computing partial derivatives can require aggregating gradient
contributions. This class lists the various methods that can
be used to combine gradients in the graph:
* `ADD_N`: All of the gradient terms are summed as part of one
operation using the "AddN" op. It has the property that all
gradients must be ready before any aggregation is performed.
* `DEFAULT`: The system-chosen default aggregation method.
"""
ADD_N = 0
DEFAULT = ADD_N
# The following are experimental and may not be supported in future releases.
EXPERIMENTAL_TREE = 1
EXPERIMENTAL_ACCUMULATE_N = 2
def _AggregatedGrads(grads, op, loop_state, aggregation_method=None):
"""Get the aggregated gradients for op.
Args:
grads: The map of memoized gradients.
op: The op to get gradients for.
loop_state: An object for maintaining the state of the while loops in the
graph. It is of type ControlFlowState. None if the graph
contains no while loops.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
Returns:
A list of gradients, one per each output of `op`. If the gradients
for a particular output is a list, this function aggregates it
before returning.
Raises:
TypeError: if the incoming grads are not Tensors or IndexedSlices.
ValueError: if the arguments are invalid.
"""
if aggregation_method is None:
aggregation_method = AggregationMethod.DEFAULT
if aggregation_method not in [
AggregationMethod.ADD_N, AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
]:
raise ValueError("Invalid aggregation_method specified %s." %
aggregation_method)
out_grads = _GetGrads(grads, op)
for i, out_grad in enumerate(out_grads):
if loop_state:
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
assert control_flow_ops.IsLoopSwitch(op)
continue
# Grads have to be Tensors or IndexedSlices
if (isinstance(out_grad, collections.Sequence) and not all([
isinstance(g, (ops.Tensor, ops.IndexedSlices)) for g in out_grad
if g is not None
])):
raise TypeError("gradients have to be either all Tensors "
"or all IndexedSlices")
# Aggregate multiple gradients, and convert [] to None.
if out_grad:
if len(out_grad) < 2:
used = "nop"
out_grads[i] = out_grad[0]
elif all([isinstance(g, ops.Tensor) for g in out_grad if g is not None]):
tensor_shape = _AccumulatorShape(out_grad)
if (aggregation_method == AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
and len(out_grad) > 2 and tensor_shape.is_fully_defined()):
# The benefit of using AccumulateN is that its inputs can be combined
# in any order and this can allow the expression to be evaluated with
# a smaller memory footprint. When used with gpu_allocator_retry,
# it is possible to compute a sum of terms which are much larger than
# total GPU memory.
# AccumulateN can currently only be used if we know the shape for
# an accumulator variable. If this is not known, or if we only have
# 2 grads then we fall through to the "tree" case below.
used = "accumulate_n"
out_grads[i] = math_ops.accumulate_n(out_grad)
elif aggregation_method in [
AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
]:
# Aggregate all gradients by doing pairwise sums: this may
# reduce performance, but it can improve memory because the
# gradients can be released earlier.
#
# TODO(vrv): Consider replacing this with a version of
# tf.AddN() that eagerly frees its inputs as soon as they are
# ready, so the order of this tree does not become a problem.
used = "tree"
with ops.name_scope(op.name + "_gradient_sum"):
running_sum = out_grad[0]
for grad in out_grad[1:]:
running_sum = math_ops.add_n([running_sum, grad])
out_grads[i] = running_sum
else:
used = "add_n"
out_grads[i] = _MultiDeviceAddN(out_grad)
logging.vlog(2, " _AggregatedGrads %d x %s using %s",
len(out_grad), tensor_shape, used)
else:
out_grad = math_ops._as_indexed_slices_list(
[g for g in out_grad if g is not None])
out_grad = [_HandleNestedIndexedSlices(x) for x in out_grad]
# Form IndexedSlices out of the concatenated values and
# indices.
out_grads[i] = ops.IndexedSlices(
array_ops.concat([x.values for x in out_grad], 0),
array_ops.concat([x.indices for x in out_grad], 0),
out_grad[0].dense_shape)
else: # not out_grad
# out_grads[i] is [], thus its aggregation is simply None.
out_grads[i] = None
return out_grads
# TODO(vrv): Make this available when we want to make it public.
def _hessian_vector_product(ys, xs, v):
"""Multiply the Hessian of `ys` wrt `xs` by `v`.
This is an efficient construction that uses a backprop-like approach
to compute the product between the Hessian and another vector. The
Hessian is usually too large to be explicitly computed or even
represented, but this method allows us to at least multiply by it
for the same big-O cost as backprop.
Implicit Hessian-vector products are the main practical, scalable way
of using second derivatives with neural networks. They allow us to
do things like construct Krylov subspaces and approximate conjugate
gradient descent.
Example: if `y` = 1/2 `x`^T A `x`, then `hessian_vector_product(y,
x, v)` will return an expression that evaluates to the same values
as (A + A.T) `v`.
Args:
ys: A scalar value, or a tensor or list of tensors to be summed to
yield a scalar.
xs: A list of tensors that we should construct the Hessian over.
v: A list of tensors, with the same shapes as xs, that we want to
multiply by the Hessian.
Returns:
A list of tensors (or if the list would be length 1, a single tensor)
containing the product between the Hessian and `v`.
Raises:
ValueError: `xs` and `v` have different length.
"""
# Validate the input
length = len(xs)
if len(v) != length:
raise ValueError("xs and v must have the same length.")
# First backprop
grads = gradients(ys, xs)
assert len(grads) == length
elemwise_products = [
math_ops.multiply(grad_elem, array_ops.stop_gradient(v_elem))
for grad_elem, v_elem in zip(grads, v) if grad_elem is not None
]
# Second backprop
return gradients(elemwise_products, xs)
def hessians(ys, xs, name="hessians", colocate_gradients_with_ops=False,
gate_gradients=False, aggregation_method=None):
"""Constructs the Hessian of sum of `ys` with respect to `x` in `xs`.
`hessians()` adds ops to the graph to output the Hessian matrix of `ys`
with respect to `xs`. It returns a list of `Tensor` of length `len(xs)`
where each tensor is the Hessian of `sum(ys)`. This function currently
only supports evaluating the Hessian with respect to (a list of) one-
dimensional tensors.
The Hessian is a matrix of second-order partial derivatives of a scalar
tensor (see https://en.wikipedia.org/wiki/Hessian_matrix for more details).
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'hessians'.
colocate_gradients_with_ops: See `gradients()` documentation for details.
gate_gradients: See `gradients()` documentation for details.
aggregation_method: See `gradients()` documentation for details.
Returns:
A list of Hessian matrices of `sum(y)` for each `x` in `xs`.
Raises:
LookupError: if one of the operations between `xs` and `ys` does not
have a registered gradient function.
ValueError: if the arguments are invalid or not supported. Currently,
this function only supports one-dimensional `x` in `xs`.
"""
xs = _AsList(xs)
kwargs = {
'colocate_gradients_with_ops': colocate_gradients_with_ops,
'gate_gradients': gate_gradients,
'aggregation_method': aggregation_method
}
# Compute a hessian matrix for each x in xs
hessians = []
for i, x in enumerate(xs):
# Check dimensions
ndims = x.get_shape().ndims
if ndims is None:
raise ValueError('Cannot compute Hessian because the dimensionality of '
'element number %d of `xs` cannot be determined' % i)
elif ndims != 1:
raise ValueError('Computing hessians is currently only supported for '
'one-dimensional tensors. Element number %d of `xs` has '
'%d dimensions.' % (i, ndims))
with ops.name_scope(name + '_first_derivative'):
# Compute the partial derivatives of the input with respect to all
# elements of `x`
_gradients = gradients(ys, x, **kwargs)[0]
# Unpack the gradients into a list so we can take derivatives with
# respect to each element
_gradients = array_ops.unstack(_gradients)
with ops.name_scope(name + '_second_derivative'):
# Compute the partial derivatives with respect to each element of the list
_hess = [gradients(_gradient, x, **kwargs)[0] for _gradient in _gradients]
# Pack the list into a matrix and add to the list of hessians
hessians.append(array_ops.stack(_hess, name=name))
return hessians
| apache-2.0 | -1,326,089,417,103,896,800 | 37.675 | 86 | 0.647732 | false |
gena701/Seleniun_php4dvd_lesson2_Rovinsky | php4dvd/php4dvd_negative.py | 1 | 2371 | # -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, time, re
class Untitled(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.base_url = "http://localhost/"
self.verificationErrors = []
self.accept_next_alert = True
def test_untitled(self):
driver = self.driver
driver.get(self.base_url + "/php4dvd/")
driver.find_element_by_id("username").clear()
driver.find_element_by_id("username").send_keys("admin")
driver.find_element_by_name("password").clear()
driver.find_element_by_name("password").send_keys("admin")
driver.find_element_by_name("submit").click()
driver.find_element_by_css_selector("img[alt=\"Add movie\"]").click()
driver.find_element_by_name("name").clear()
driver.find_element_by_name("name").send_keys() #send_keys("aaaaaaaaaaaaaaaaa")
driver.find_element_by_name("year").clear()
driver.find_element_by_name("year").send_keys("1977")
driver.find_element_by_css_selector("img[alt=\"Save\"]").click()
driver.find_element_by_id("submit").click()
driver.find_element_by_css_selector("img[alt=\"Own\"]").click()
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException as e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException as e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
| apache-2.0 | -3,795,401,780,178,088,000 | 37.868852 | 87 | 0.639814 | false |
ntddk/pemu | roms/seabios/scripts/checkrom.py | 17 | 3347 | #!/usr/bin/env python
# Script to check a bios image and report info on it.
#
# Copyright (C) 2008 Kevin O'Connor <[email protected]>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import sys, struct
import layoutrom, buildrom
from python23compat import as_bytes
def subst(data, offset, new):
return data[:offset] + new + data[offset + len(new):]
def checksum(data, start, size, csum):
sumbyte = buildrom.checksum(data[start:start+size])
return subst(data, start+csum, sumbyte)
def main():
# Get args
objinfo, finalsize, rawfile, outfile = sys.argv[1:]
# Read in symbols
objinfofile = open(objinfo, 'r')
symbols = layoutrom.parseObjDump(objinfofile, 'in')[1]
# Read in raw file
f = open(rawfile, 'rb')
rawdata = f.read()
f.close()
datasize = len(rawdata)
finalsize = int(finalsize) * 1024
if finalsize == 0:
finalsize = 64*1024
if datasize > 64*1024:
finalsize = 128*1024
if datasize > 128*1024:
finalsize = 256*1024
if datasize > finalsize:
print("Error! ROM doesn't fit (%d > %d)" % (datasize, finalsize))
print(" You have to either increate the size (CONFIG_ROM_SIZE)")
print(" or turn off some features (such as hardware support not")
print(" needed) to make it fit. Trying a more recent gcc version")
print(" might work too.")
sys.exit(1)
# Sanity checks
start = symbols['code32flat_start'].offset
end = symbols['code32flat_end'].offset
expend = layoutrom.BUILD_BIOS_ADDR + layoutrom.BUILD_BIOS_SIZE
if end != expend:
print("Error! Code does not end at 0x%x (got 0x%x)" % (
expend, end))
sys.exit(1)
if datasize > finalsize:
print("Error! Code is too big (0x%x vs 0x%x)" % (
datasize, finalsize))
sys.exit(1)
expdatasize = end - start
if datasize != expdatasize:
print("Error! Unknown extra data (0x%x vs 0x%x)" % (
datasize, expdatasize))
sys.exit(1)
# Fix up CSM Compatibility16 table
if 'csm_compat_table' in symbols and 'entry_csm' in symbols:
# Field offsets within EFI_COMPATIBILITY16_TABLE
ENTRY_FIELD_OFS = 14 # Compatibility16CallOffset (UINT16)
SIZE_FIELD_OFS = 5 # TableLength (UINT8)
CSUM_FIELD_OFS = 4 # TableChecksum (UINT8)
tableofs = symbols['csm_compat_table'].offset - symbols['code32flat_start'].offset
entry_addr = symbols['entry_csm'].offset - layoutrom.BUILD_BIOS_ADDR
entry_addr = struct.pack('<H', entry_addr)
rawdata = subst(rawdata, tableofs+ENTRY_FIELD_OFS, entry_addr)
tsfield = tableofs+SIZE_FIELD_OFS
tablesize = ord(rawdata[tsfield:tsfield+1])
rawdata = checksum(rawdata, tableofs, tablesize, CSUM_FIELD_OFS)
# Print statistics
runtimesize = end - symbols['code32init_end'].offset
print("Total size: %d Fixed: %d Free: %d (used %.1f%% of %dKiB rom)" % (
datasize, runtimesize, finalsize - datasize
, (datasize / float(finalsize)) * 100.0
, int(finalsize / 1024)))
# Write final file
f = open(outfile, 'wb')
f.write((as_bytes("\0") * (finalsize - datasize)) + rawdata)
f.close()
if __name__ == '__main__':
main()
| gpl-2.0 | 3,845,735,010,519,052,000 | 34.231579 | 90 | 0.617867 | false |
dorapanda/qemu-2.0.0-with-fm3 | roms/seabios/scripts/buildrom.py | 16 | 1215 | #!/usr/bin/env python
# Fill in checksum/size of an option rom, and pad it to proper length.
#
# Copyright (C) 2009 Kevin O'Connor <[email protected]>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import sys
def alignpos(pos, alignbytes):
mask = alignbytes - 1
return (pos + mask) & ~mask
def checksum(data):
ords = map(ord, data)
return sum(ords)
def main():
inname = sys.argv[1]
outname = sys.argv[2]
# Read data in
f = open(inname, 'rb')
data = f.read()
f.close()
count = len(data)
# Pad to a 512 byte boundary
data += "\0" * (alignpos(count, 512) - count)
count = len(data)
# Check if a pci header is present
pcidata = ord(data[24:25]) + (ord(data[25:26]) << 8)
if pcidata != 0:
data = data[:pcidata + 16] + chr(count/512) + chr(0) + data[pcidata + 18:]
# Fill in size field; clear checksum field
data = data[:2] + chr(count/512) + data[3:6] + "\0" + data[7:]
# Checksum rom
newsum = (256 - checksum(data)) & 0xff
data = data[:6] + chr(newsum) + data[7:]
# Write new rom
f = open(outname, 'wb')
f.write(data)
f.close()
if __name__ == '__main__':
main()
| gpl-2.0 | -7,472,691,905,652,102,000 | 23.3 | 82 | 0.583539 | false |
v-iam/azure-sdk-for-python | azure-mgmt-authorization/azure/mgmt/authorization/models/resource_type.py | 2 | 1278 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ResourceType(Model):
"""Resource Type.
:param name: The resource type name.
:type name: str
:param display_name: The resource type display name.
:type display_name: str
:param operations: The resource type operations.
:type operations: list of :class:`ProviderOperation
<azure.mgmt.authorization.models.ProviderOperation>`
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'operations': {'key': 'operations', 'type': '[ProviderOperation]'},
}
def __init__(self, name=None, display_name=None, operations=None):
self.name = name
self.display_name = display_name
self.operations = operations
| mit | 991,610,272,054,192,900 | 34.5 | 76 | 0.591549 | false |
irvingprog/pilas | pilas/camara.py | 6 | 3475 | # -*- encoding: utf-8 -*-
# Pilas engine - A video game framework.
#
# Copyright 2010 - Hugo Ruscitti
# License: LGPLv3 (see http://www.gnu.org/licenses/lgpl.html)
#
# Website - http://www.pilas-engine.com.ar
import pilas
class Camara(object):
"""Representa el punto de vista de la ventana.
Los atributos ``x`` e ``y`` indican cual debe ser el
punto central de la pantalla. Por defecto estos
valores con (0, 0)."""
def __init__(self):
"""Inicializa la cámara.
"""
pass
@pilas.utils.interpolable
def _set_x(self, x):
"""Define la posición horizontal de la cámara.
:param x: Posición horizontal.
"""
pilas.escena_actual().mueve_camara.emitir(x=x, y=self.y, dx=x-self.x, dy=0)
pilas.mundo.motor.definir_centro_de_la_camara(x, self.y)
def _get_x(self):
"""Retorna la posición horizontal."""
x, y = pilas.mundo.motor.obtener_centro_de_la_camara()
return x
@pilas.utils.interpolable
def _set_y(self, y):
"""Define la posición vertical de la cámara.
:param y: Posición vertical.
"""
pilas.escena_actual().mueve_camara.emitir(x=self.x, y=y, dx=0, dy=y-self.y)
pilas.mundo.motor.definir_centro_de_la_camara(self.x, y)
def _get_y(self):
"""Retorna la posición vertical."""
x, y = pilas.mundo.motor.obtener_centro_de_la_camara()
return y
x = property(_get_x, _set_x)
y = property(_get_y, _set_y)
def obtener_area_visible(self):
"""Retorna el area del escenario que está visible por la cámara.
Por ejemplo, si la cámara está en posición inicial, esta
función podría retornar:
>>> pilas.escena_actual().camara.obtener_area_visible()
(0, 640, 240, -240)
y si movemos la cámara un poco para la derecha:
>>> pilas.escena_actual().camara.x = 100
>>> pilas.escena_actual().camara.obtener_area_visible()
(100, 740, 240, -240)
Es decir, la tupla representa un rectángulo de la forma::
(izquierda, derecha, arriba, abajo)
En nuestro caso, el último ejemplo muestra que cuando
la cámara se mueve a ``x = 100`` el area de pantalla
visible es ``(izquierda=100, derecha=740, arriba=240, abajo=-240)``.
¡ ha quedado invisible todo lo que está a la izquierda de ``x=100`` !
Esta función es útil para ``despetar`` actores o simplemente
Si quieres saber si un actor está fuera de la pantalla hay un
atajo, existe un método llamado ``esta_fuera_de_la_pantalla`` en
los propios actores:
>>> mi_actor = pilas.actores.Mono(x=0, y=0)
>>> mi_actor.esta_fuera_de_la_pantalla()
False
>>> pilas.escena_actual().camara.x == 900
>>> mi_actor.esta_fuera_de_la_pantalla()
True
"""
ancho, alto = pilas.mundo.obtener_area()
return (self.x - ancho/2, self.x + ancho/2, self.y + alto/2, self.y - alto/2)
def desplazar(self, posicion):
"""Mueve la cámara hacia una posición en particular.
:param posicion: La posición destino, a donde enfocar.
"""
x, y = posicion
return (x - self.x, y - self.y)
def reiniciar(self):
"""Mueve la cámara a la posicion inicial (0,0). """
pilas.mundo.motor.definir_centro_de_la_camara(0, 0)
| lgpl-3.0 | 5,689,999,541,757,785,000 | 31.5 | 85 | 0.594194 | false |
barnsnake351/nova | nova/db/sqlalchemy/migrate_repo/versions/250_remove_instance_groups_metadata.py | 81 | 1198 | # Copyright 2014 Red Hat, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Table
def upgrade(migrate_engine):
"""Remove the instance_group_metadata table."""
meta = MetaData(bind=migrate_engine)
if migrate_engine.has_table('instance_group_metadata'):
group_metadata = Table('instance_group_metadata', meta, autoload=True)
group_metadata.drop()
if migrate_engine.has_table('shadow_instance_group_metadata'):
shadow_group_metadata = Table('shadow_instance_group_metadata', meta,
autoload=True)
shadow_group_metadata.drop()
| apache-2.0 | -422,589,223,119,459,840 | 37.645161 | 78 | 0.693656 | false |
ybrs/terminator-forked | build/lib.linux-x86_64-2.7/terminatorlib/titlebar.py | 3 | 10522 | #!/usr/bin/python
# Terminator by Chris Jones <[email protected]>
# GPL v2 only
"""titlebar.py - classes necessary to provide a terminal title bar"""
import gtk
import gobject
from version import APP_NAME
from util import dbg
from terminator import Terminator
from editablelabel import EditableLabel
# pylint: disable-msg=R0904
# pylint: disable-msg=W0613
class Titlebar(gtk.EventBox):
"""Class implementing the Titlebar widget"""
terminator = None
terminal = None
config = None
oldtitle = None
termtext = None
sizetext = None
label = None
ebox = None
groupicon = None
grouplabel = None
groupentry = None
bellicon = None
__gsignals__ = {
'clicked': (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
'edit-done': (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
'create-group': (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE,
(gobject.TYPE_STRING,)),
}
def __init__(self, terminal):
"""Class initialiser"""
gtk.EventBox.__init__(self)
self.__gobject_init__()
self.terminator = Terminator()
self.terminal = terminal
self.config = self.terminal.config
self.label = EditableLabel()
self.label.connect('edit-done', self.on_edit_done)
self.ebox = gtk.EventBox()
grouphbox = gtk.HBox()
self.grouplabel = gtk.Label()
self.groupicon = gtk.Image()
self.bellicon = gtk.Image()
self.bellicon.set_no_show_all(True)
self.groupentry = gtk.Entry()
self.groupentry.set_no_show_all(True)
self.groupentry.connect('focus-out-event', self.groupentry_cancel)
self.groupentry.connect('activate', self.groupentry_activate)
self.groupentry.connect('key-press-event', self.groupentry_keypress)
groupsend_type = self.terminator.groupsend_type
if self.terminator.groupsend == groupsend_type['all']:
icon_name = 'all'
elif self.terminator.groupsend == groupsend_type['group']:
icon_name = 'group'
elif self.terminator.groupsend == groupsend_type['off']:
icon_name = 'off'
self.set_from_icon_name('_active_broadcast_%s' % icon_name,
gtk.ICON_SIZE_MENU)
grouphbox.pack_start(self.groupicon, False, True, 2)
grouphbox.pack_start(self.grouplabel, False, True, 2)
grouphbox.pack_start(self.groupentry, False, True, 2)
self.ebox.add(grouphbox)
self.ebox.show_all()
self.bellicon.set_from_icon_name('terminal-bell', gtk.ICON_SIZE_MENU)
hbox = gtk.HBox()
hbox.pack_start(self.ebox, False, True, 0)
hbox.pack_start(gtk.VSeparator(), False, True, 0)
hbox.pack_start(self.label, True, True)
hbox.pack_end(self.bellicon, False, False, 2)
self.add(hbox)
hbox.show_all()
self.set_no_show_all(True)
self.show()
self.connect('button-press-event', self.on_clicked)
def connect_icon(self, func):
"""Connect the supplied function to clicking on the group icon"""
self.ebox.connect('button-release-event', func)
def update(self, other=None):
"""Update our contents"""
default_bg = False
if self.config['title_hide_sizetext']:
self.label.set_text("%s" % self.termtext)
else:
self.label.set_text("%s %s" % (self.termtext, self.sizetext))
if other:
term = self.terminal
terminator = self.terminator
if other == 'window-focus-out':
title_fg = self.config['title_inactive_fg_color']
title_bg = self.config['title_inactive_bg_color']
icon = '_receive_off'
default_bg = True
group_fg = self.config['title_inactive_fg_color']
group_bg = self.config['title_inactive_bg_color']
elif term != other and term.group and term.group == other.group:
if terminator.groupsend == terminator.groupsend_type['off']:
title_fg = self.config['title_inactive_fg_color']
title_bg = self.config['title_inactive_bg_color']
icon = '_receive_off'
default_bg = True
else:
title_fg = self.config['title_receive_fg_color']
title_bg = self.config['title_receive_bg_color']
icon = '_receive_on'
group_fg = self.config['title_receive_fg_color']
group_bg = self.config['title_receive_bg_color']
elif term != other and not term.group or term.group != other.group:
if terminator.groupsend == terminator.groupsend_type['all']:
title_fg = self.config['title_receive_fg_color']
title_bg = self.config['title_receive_bg_color']
icon = '_receive_on'
else:
title_fg = self.config['title_inactive_fg_color']
title_bg = self.config['title_inactive_bg_color']
icon = '_receive_off'
default_bg = True
group_fg = self.config['title_inactive_fg_color']
group_bg = self.config['title_inactive_bg_color']
else:
# We're the active terminal
title_fg = self.config['title_transmit_fg_color']
title_bg = self.config['title_transmit_bg_color']
if terminator.groupsend == terminator.groupsend_type['all']:
icon = '_active_broadcast_all'
elif terminator.groupsend == terminator.groupsend_type['group']:
icon = '_active_broadcast_group'
else:
icon = '_active_broadcast_off'
group_fg = self.config['title_transmit_fg_color']
group_bg = self.config['title_transmit_bg_color']
self.label.modify_fg(gtk.STATE_NORMAL,
gtk.gdk.color_parse(title_fg))
self.grouplabel.modify_fg(gtk.STATE_NORMAL,
gtk.gdk.color_parse(group_fg))
self.modify_bg(gtk.STATE_NORMAL,
gtk.gdk.color_parse(title_bg))
if not self.get_desired_visibility():
if default_bg == True:
color = term.get_style().bg[gtk.STATE_NORMAL]
else:
color = gtk.gdk.color_parse(title_bg)
self.update_visibility()
self.ebox.modify_bg(gtk.STATE_NORMAL,
gtk.gdk.color_parse(group_bg))
self.set_from_icon_name(icon, gtk.ICON_SIZE_MENU)
def update_visibility(self):
"""Make the titlebar be visible or not"""
if not self.get_desired_visibility():
dbg('hiding titlebar')
self.hide()
self.label.hide()
else:
dbg('showing titlebar')
self.show()
self.label.show()
def get_desired_visibility(self):
"""Returns True if the titlebar is supposed to be visible. False if
not"""
if self.editing() == True or self.terminal.group:
dbg('implicit desired visibility')
return(True)
else:
dbg('configured visibility: %s' % self.config['show_titlebar'])
return(self.config['show_titlebar'])
def set_from_icon_name(self, name, size = gtk.ICON_SIZE_MENU):
"""Set an icon for the group label"""
if not name:
self.groupicon.hide()
return
self.groupicon.set_from_icon_name(APP_NAME + name, size)
self.groupicon.show()
def update_terminal_size(self, width, height):
"""Update the displayed terminal size"""
self.sizetext = "%sx%s" % (width, height)
self.update()
def set_terminal_title(self, widget, title):
"""Update the terminal title"""
self.termtext = title
self.update()
# Return False so we don't interrupt any chains of signal handling
return False
def set_group_label(self, name):
"""Set the name of the group"""
if name:
self.grouplabel.set_text(name)
self.grouplabel.show()
else:
self.grouplabel.hide()
self.update_visibility()
def on_clicked(self, widget, event):
"""Handle a click on the label"""
self.show()
self.label.show()
self.emit('clicked')
def on_edit_done(self, widget):
"""Re-emit an edit-done signal from an EditableLabel"""
self.emit('edit-done')
def editing(self):
"""Determine if we're currently editing a group name or title"""
return(self.groupentry.get_property('visible') or self.label.editing())
def create_group(self):
"""Create a new group"""
self.groupentry.show()
self.groupentry.grab_focus()
self.update_visibility()
def groupentry_cancel(self, widget, event):
"""Hide the group name entry"""
self.groupentry.set_text('')
self.groupentry.hide()
self.get_parent().grab_focus()
def groupentry_activate(self, widget):
"""Actually cause a group to be created"""
groupname = self.groupentry.get_text()
dbg('Titlebar::groupentry_activate: creating group: %s' % groupname)
self.groupentry_cancel(None, None)
self.emit('create-group', groupname)
def groupentry_keypress(self, widget, event):
"""Handle keypresses on the entry widget"""
key = gtk.gdk.keyval_name(event.keyval)
if key == 'Escape':
self.groupentry_cancel(None, None)
def icon_bell(self):
"""A bell signal requires we display our bell icon"""
self.bellicon.show()
gobject.timeout_add(1000, self.icon_bell_hide)
def icon_bell_hide(self):
"""Handle a timeout which means we now hide the bell icon"""
self.bellicon.hide()
return(False)
def get_custom_string(self):
"""If we have a custom string set, return it, otherwise None"""
if self.label.is_custom():
return(self.label.get_text())
else:
return(None)
def set_custom_string(self, string):
"""Set a custom string"""
self.label.set_text(string)
self.label.set_custom()
gobject.type_register(Titlebar)
| gpl-2.0 | -3,598,565,055,309,124,600 | 36.44484 | 80 | 0.573465 | false |
kernelmilowill/PDMQBACKTEST | vn.ksgold/pyscript/generate_data_type.py | 36 | 2369 | # encoding: UTF-8
__author__ = 'CHENXY'
# C++和python类型的映射字典
type_dict = {
'int': 'int',
'char': 'string',
'double': 'float',
'short': 'int',
'unsigned': 'string'
}
def process_line(line):
"""处理每行"""
if '///' in line: # 注释
py_line = process_comment(line)
elif 'typedef' in line: # 类型申明
py_line = process_typedef(line)
elif '#define' in line: # 定义常量
py_line = process_define(line)
elif line == '\n': # 空行
py_line = line
else:
py_line = ''
return py_line
def process_comment(line):
"""处理注释"""
# if line[3] == '/':
# py_line = ''
# else:
# py_line = '#' + line[3:]
py_line = '#' + line[3:]
return py_line
def process_typedef(line):
"""处理类型申明"""
content = line.split(' ')
type_ = type_dict[content[1]]
if content[1] != 'unsigned':
keyword = content[-1]
else:
keyword = content[-1]
keyword = keyword.replace(';\n', '')
print content, keyword
if '[' in keyword:
i = keyword.index('[')
keyword = keyword[:i]
else:
keyword = keyword.replace(';\n', '') # 删除行末分号
py_line = 'typedefDict["%s"] = "%s"\n' % (keyword, type_)
return py_line
def process_define(line):
"""处理定义常量"""
content = line.split(' ')
constant = content[1]
if len(content)>2:
value = content[-1]
py_line = 'defineDict["%s"] = %s' % (constant, value)
else:
py_line = ''
return py_line
def main():
"""主函数"""
try:
fcpp = open('KSUserApiDataTypeEx.h','r')
fpy = open('ksgold_data_type.py', 'w')
fpy.write('# encoding: UTF-8\n')
fpy.write('\n')
fpy.write('defineDict = {}\n')
fpy.write('typedefDict = {}\n')
fpy.write('\n')
for n, line in enumerate(fcpp):
py_line = process_line(line)
if py_line:
fpy.write(py_line.decode('gbk').encode('utf-8'))
print n
fcpp.close()
fpy.close()
print u'data_type.py生成过程完成'
except Exception, e:
print u'data_type.py生成过程出错'
print e
if __name__ == '__main__':
main()
| mit | -1,662,868,652,153,844,700 | 20 | 64 | 0.488652 | false |
wolverineav/neutron | neutron/tests/unit/plugins/ml2/drivers/macvtap/test_macvtap_common.py | 9 | 2462 | # Copyright (c) 2016 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import mock
from neutron.plugins.ml2.drivers.macvtap import macvtap_common as m_common
from neutron.tests import base
MOCKED_HASH = "MOCKEDHASH"
class MockSHA(object):
def hexdigest(self):
return MOCKED_HASH
class MacvtapCommonTestCase(base.BaseTestCase):
@mock.patch.object(hashlib, 'sha1', return_value=MockSHA())
def test_get_vlan_device_name(self, mocked_hash):
# only the first six chars of the hash are being used in the algorithm
hash_used = MOCKED_HASH[0:6]
self.assertEqual('10charrrrr.1',
m_common.get_vlan_device_name('10charrrrr', "1"))
self.assertEqual('11ch' + hash_used + '.1',
m_common.get_vlan_device_name('11charrrrrr', "1"))
self.assertEqual('14ch' + hash_used + '.1',
m_common.get_vlan_device_name('14charrrrrrrrr', "1"))
self.assertEqual('14ch' + hash_used + '.1111',
m_common.get_vlan_device_name('14charrrrrrrrr',
"1111"))
def test_get_vlan_subinterface_name_advanced(self):
"""Ensure the same hash is used for long interface names.
If the generated vlan device name would be too long, make sure that
everything before the '.' is equal. This might be helpful when
debugging problems.
"""
max_device_name = "15charrrrrrrrrr"
vlan_dev_name1 = m_common.get_vlan_device_name(max_device_name,
"1")
vlan_dev_name2 = m_common.get_vlan_device_name(max_device_name,
"1111")
self.assertEqual(vlan_dev_name1.partition(".")[0],
vlan_dev_name2.partition(".")[0])
| apache-2.0 | 7,914,396,332,615,137,000 | 40.033333 | 78 | 0.60723 | false |
gqwest-erp/server | openerp/addons/report_webkit/report_helper.py | 42 | 3183 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2010 Camptocamp SA (http://www.camptocamp.com)
# All Right Reserved
#
# Author : Nicolas Bessi (Camptocamp)
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
from openerp import pooler
class WebKitHelper(object):
"""Set of usefull report helper"""
def __init__(self, cursor, uid, report_id, context):
"constructor"
self.cursor = cursor
self.uid = uid
self.pool = pooler.get_pool(self.cursor.dbname)
self.report_id = report_id
def embed_image(self, type, img, width=0, height=0) :
"Transform a DB image into an embedded HTML image"
if width :
width = 'width="%spx"'%(width)
else :
width = ' '
if height :
height = 'height="%spx"'%(height)
else :
height = ' '
toreturn = '<img %s %s src="data:image/%s;base64,%s" />'%(
width,
height,
type,
str(img))
return toreturn
def get_logo_by_name(self, name):
"""Return logo by name"""
header_obj = self.pool.get('ir.header_img')
header_img_id = header_obj.search(
self.cursor,
self.uid,
[('name','=',name)]
)
if not header_img_id :
return u''
if isinstance(header_img_id, list):
header_img_id = header_img_id[0]
head = header_obj.browse(self.cursor, self.uid, header_img_id)
return (head.img, head.type)
def embed_logo_by_name(self, name, width=0, height=0):
"""Return HTML embedded logo by name"""
img, type = self.get_logo_by_name(name)
return self.embed_image(type, img, width, height)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -5,188,245,272,848,504,000 | 36.892857 | 80 | 0.570217 | false |
cchurch/ansible | test/units/modules/storage/netapp/test_na_ontap_command.py | 17 | 5689 | # (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
''' unit test for ONTAP Command Ansible module '''
from __future__ import print_function
import json
import pytest
from units.compat import unittest
from units.compat.mock import patch, Mock
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
import ansible.module_utils.netapp as netapp_utils
from ansible.modules.storage.netapp.na_ontap_command \
import NetAppONTAPCommand as my_module # module under test
if not netapp_utils.has_netapp_lib():
pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
def set_module_args(args):
"""prepare arguments so that they will be picked up during module creation"""
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class MockONTAPConnection(object):
''' mock server connection to ONTAP host '''
def __init__(self, kind=None, parm1=None):
''' save arguments '''
self.type = kind
self.parm1 = parm1
self.xml_in = None
self.xml_out = None
def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
''' mock invoke_successfully returning xml data '''
self.xml_in = xml
# print(xml.to_string())
if self.type == 'version':
priv = xml.get_child_content('priv')
xml = self.build_version(priv)
self.xml_out = xml
return xml
@staticmethod
def build_version(priv):
''' build xml data for version '''
prefix = 'NetApp Release'
if priv == 'advanced':
prefix = '\n' + prefix
xml = netapp_utils.zapi.NaElement('results')
xml.add_new_child('cli-output', prefix)
# print(xml.to_string())
return xml
class TestMyModule(unittest.TestCase):
''' a group of related Unit Tests '''
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
self.server = MockONTAPConnection(kind='version')
# whether to use a mock or a simulator
self.use_vsim = False
def test_module_fail_when_required_args_missing(self):
''' required arguments are reported as errors '''
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({})
my_module()
print('Info: %s' % exc.value.args[0]['msg'])
@staticmethod
def set_default_args(vsim=False):
''' populate hostname/username/password '''
if vsim:
hostname = '10.193.78.219'
username = 'admin'
password = 'netapp1!'
else:
hostname = 'hostname'
username = 'username'
password = 'password'
return dict({
'hostname': hostname,
'username': username,
'password': password,
'https': True,
'validate_certs': False
})
def call_command(self, module_args, vsim=False):
''' utility function to call apply '''
module_args.update(self.set_default_args(vsim=vsim))
set_module_args(module_args)
my_obj = my_module()
my_obj.asup_log_for_cserver = Mock(return_value=None)
if not vsim:
# mock the connection
my_obj.server = self.server
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
msg = exc.value.args[0]['msg']
return msg
def test_default_priv(self):
''' make sure privilege is not required '''
module_args = {
'command': 'version',
}
msg = self.call_command(module_args, vsim=self.use_vsim)
needle = b'<cli-output>NetApp Release'
assert needle in msg
print('Version (raw): %s' % msg)
def test_admin_priv(self):
''' make sure admin is accepted '''
module_args = {
'command': 'version',
'privilege': 'admin',
}
msg = self.call_command(module_args, vsim=self.use_vsim)
needle = b'<cli-output>NetApp Release'
assert needle in msg
print('Version (raw): %s' % msg)
def test_advanced_priv(self):
''' make sure advanced is not required '''
module_args = {
'command': 'version',
'privilege': 'advanced',
}
msg = self.call_command(module_args, vsim=self.use_vsim)
# Interestingly, the ZAPI returns a slightly different response
needle = b'<cli-output>\nNetApp Release'
assert needle in msg
print('Version (raw): %s' % msg)
| gpl-3.0 | 2,356,692,688,492,965,400 | 32.662722 | 92 | 0.602039 | false |
BdEINSALyon/resa | bookings/views/bookings.py | 1 | 6910 | import datetime as dt
import logging
import dateutil.parser
from django.contrib import messages
from django.contrib.auth.decorators import login_required, permission_required
from django.core.paginator import Paginator, EmptyPage
from django.shortcuts import get_object_or_404, redirect
from django.urls import reverse, reverse_lazy
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_lazy as _
from django.views.generic import CreateView, DeleteView, DetailView, UpdateView
from django.views.generic.base import ContextMixin
from bookings.adhesion import AdhesionAPI
from bookings.forms import BookingFormForm
from bookings.models import ResourceCategory, Resource, Booking, BookingOccurrence
log = logging.getLogger(__name__)
class BaseBookingView(ContextMixin):
booking = None
def get_context_data(self, **kwargs):
context = super(BaseBookingView, self).get_context_data(**kwargs)
page = self.request.GET.get('occ_page', 1)
occurrences = self.booking.get_occurrences().order_by('start')
paginator = Paginator(occurrences, 10)
try:
occurrences = paginator.page(page)
except EmptyPage:
occurrences = paginator.page(paginator.num_pages)
context['occurrences'] = occurrences
return context
class BookingCreateView(CreateView):
model = Booking
fields = ['contact_first_name', 'contact_last_name', 'contact_email', 'contact_phone', 'contact_asso',
'reason', 'details']
template_name = 'bookings/booking/booking_new.html'
decorators = [login_required, permission_required('bookings.add_booking')]
start = None
end = None
booking = None
object = None
resource = None
def get_form(self, form_class=None):
form = super(BookingCreateView, self).get_form(form_class=form_class)
type = self.resource.category.type
if type == ResourceCategory.ASSO:
form.fields['contact_asso'].required = True
elif type == ResourceCategory.STUDENT:
del form.fields['contact_asso']
return form
def get_success_url(self):
return reverse('bookings:occurrence-new', kwargs={'booking_pk': self.booking.pk}) \
+ '?start=' + str(self.start.isoformat()) \
+ '&end=' + str(self.end.isoformat()) \
+ '&resource=' + str(self.resource.id)
def dispatch(self, request, *args, **kwargs):
self.resource = get_object_or_404(Resource, pk=self.request.GET.get('resource'))
start = self.request.GET.get('start')
end = self.request.GET.get('end')
if start is not None:
self.start = dateutil.parser.parse(start)
else:
self.start = dt.datetime.now()
if end is not None:
self.end = dateutil.parser.parse(end)
else:
self.end = self.start
return super(BookingCreateView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(BookingCreateView, self).get_context_data(**kwargs)
context['start'] = self.start
context['end'] = self.end
context['resource_id'] = self.resource.id
return context
@method_decorator(decorators)
def post(self, request, *args, **kwargs):
form = self.get_form()
if form.is_valid():
self.booking = form.save()
messages.success(request, _('Réservation créée avec succès'))
return self.form_valid(form)
else:
return self.form_invalid(form)
@method_decorator(decorators)
def get(self, request, *args, **kwargs):
return super(BookingCreateView, self).get(request, *args, **kwargs)
class BookingDetailView(DetailView, BaseBookingView):
model = Booking
template_name = 'bookings/booking/booking_detail.html'
booking = None
def dispatch(self, request, *args, **kwargs):
self.booking = get_object_or_404(Booking, pk=self.kwargs['pk'])
return super(BookingDetailView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(BookingDetailView, self).get_context_data(**kwargs)
resource_requires_form = Resource.objects.filter(category__booking_form=True)
form_needed = BookingOccurrence \
.objects \
.filter(booking=self.booking) \
.filter(resources__in=resource_requires_form) \
.distinct() \
.exists()
if form_needed:
context['booking_form'] = BookingFormForm(booking=self.booking)
return context
@method_decorator(login_required)
def get(self, request, *args, **kwargs):
return super(BookingDetailView, self).get(request, *args, **kwargs)
@method_decorator(login_required)
def post(self, request, *args, **kwargs):
return redirect(to='bookings:booking-form', pk=request.POST.get('occurrence'))
class BookingUpdateView(UpdateView, BaseBookingView):
model = Booking
template_name = 'bookings/booking/booking_edit.html'
fields = ['contact_first_name', 'contact_last_name', 'contact_email', 'contact_phone', 'contact_asso',
'reason', 'details']
decorators = [login_required, permission_required('bookings.change_booking')]
booking = None
def dispatch(self, request, *args, **kwargs):
self.booking = get_object_or_404(Booking, pk=self.kwargs['pk'])
return super(BookingUpdateView, self).dispatch(request, *args, **kwargs)
@method_decorator(decorators)
def get(self, request, *args, **kwargs):
return super(BookingUpdateView, self).get(request, *args, **kwargs)
@method_decorator(decorators)
def post(self, request, *args, **kwargs):
return super(BookingUpdateView, self).post(request, *args, **kwargs)
class BookingDeleteView(DeleteView, BaseBookingView):
model = Booking
decorators = [login_required, permission_required('bookings.delete_booking')]
booking = None
template_name = 'bookings/booking/booking_delete.html'
def dispatch(self, request, *args, **kwargs):
self.booking = self.get_object()
return super(BookingDeleteView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(BookingDeleteView, self).get_context_data(**kwargs)
context['booking'] = self.booking
return context
def get_success_url(self):
return reverse_lazy('bookings:home')
@method_decorator(decorators)
def get(self, request, *args, **kwargs):
return super(BookingDeleteView, self).get(request, *args, **kwargs)
@method_decorator(decorators)
def delete(self, request, *args, **kwargs):
return super(BookingDeleteView, self).delete(request, *args, **kwargs)
| gpl-3.0 | 8,385,060,456,491,568,000 | 34.96875 | 106 | 0.66044 | false |
kchodorow/tensorflow | tensorflow/tools/graph_transforms/python/transform_graph_test.py | 170 | 3295 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for StatSummarizer Python wrapper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.platform import test
from tensorflow.tools.graph_transforms import TransformGraph
class TransformGraphTest(test.TestCase):
# This test constructs a graph with a relu op that's not used by the normal
# inference path, and then tests that the strip_unused transform removes it as
# expected.
def testTransformGraph(self):
input_graph_def = graph_pb2.GraphDef()
const_op1 = input_graph_def.node.add()
const_op1.op = "Const"
const_op1.name = "const_op1"
const_op1.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
const_op1.attr["value"].CopyFrom(
attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
[1, 2], dtypes.float32, [1, 2])))
const_op2 = input_graph_def.node.add()
const_op2.op = "Const"
const_op2.name = "const_op2"
const_op2.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
const_op2.attr["value"].CopyFrom(
attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
[3, 4], dtypes.float32, [1, 2])))
# Create an add that has two constants as inputs.
add_op = input_graph_def.node.add()
add_op.op = "Add"
add_op.attr["T"].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
add_op.name = "add_op"
add_op.input.extend(["const_op1", "const_op2"])
# Create a relu that reads from the add.
relu_op = input_graph_def.node.add()
relu_op.op = "Relu"
relu_op.attr["T"].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
relu_op.name = "relu_op"
relu_op.input.extend(["add_op"])
# We're specifying that add_op is the final output, and so the relu isn't
# needed.
input_names = []
output_names = ["add_op"]
transforms = ["strip_unused_nodes"]
transformed_graph_def = TransformGraph(input_graph_def, input_names,
output_names, transforms)
# We expect that the relu is no longer present after running the transform.
for node in transformed_graph_def.node:
self.assertNotEqual("Relu", node.op)
if __name__ == "__main__":
test.main()
| apache-2.0 | 8,465,957,494,656,100,000 | 37.764706 | 80 | 0.676176 | false |
gchaimovitz/CouchPotatoServer | couchpotato/core/plugins/base.py | 11 | 15483 | import threading
from urllib import quote, getproxies
from urlparse import urlparse
import os.path
import time
import traceback
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.encoding import ss, toSafeString, \
toUnicode, sp
from couchpotato.core.helpers.variable import md5, isLocalIP, scanForPassword, tryInt, getIdentifier, \
randomString
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
import requests
from requests.packages.urllib3 import Timeout
from requests.packages.urllib3.exceptions import MaxRetryError
from tornado import template
log = CPLog(__name__)
class Plugin(object):
_class_name = None
_database = None
plugin_path = None
enabled_option = 'enabled'
_needs_shutdown = False
_running = None
_locks = {}
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:45.0) Gecko/20100101 Firefox/45.0'
http_last_use = {}
http_last_use_queue = {}
http_time_between_calls = 0
http_failed_request = {}
http_failed_disabled = {}
def __new__(cls, *args, **kwargs):
new_plugin = super(Plugin, cls).__new__(cls)
new_plugin.registerPlugin()
return new_plugin
def registerPlugin(self):
addEvent('app.do_shutdown', self.doShutdown)
addEvent('plugin.running', self.isRunning)
self._running = []
# Setup database
if self._database:
addEvent('database.setup', self.databaseSetup)
def databaseSetup(self):
for index_name in self._database:
klass = self._database[index_name]
fireEvent('database.setup_index', index_name, klass)
def conf(self, attr, value = None, default = None, section = None):
class_name = self.getName().lower().split(':')[0].lower()
return Env.setting(attr, section = section if section else class_name, value = value, default = default)
def deleteConf(self, attr):
return Env._settings.delete(attr, section = self.getName().lower().split(':')[0].lower())
def getName(self):
return self._class_name or self.__class__.__name__
def setName(self, name):
self._class_name = name
def renderTemplate(self, parent_file, templ, **params):
t = template.Template(open(os.path.join(os.path.dirname(parent_file), templ), 'r').read())
return t.generate(**params)
def createFile(self, path, content, binary = False):
path = sp(path)
self.makeDir(os.path.dirname(path))
if os.path.exists(path):
log.debug('%s already exists, overwriting file with new version', path)
write_type = 'w+' if not binary else 'w+b'
# Stream file using response object
if isinstance(content, requests.models.Response):
# Write file to temp
with open('%s.tmp' % path, write_type) as f:
for chunk in content.iter_content(chunk_size = 1048576):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
# Rename to destination
os.rename('%s.tmp' % path, path)
else:
try:
f = open(path, write_type)
f.write(content)
f.close()
try:
os.chmod(path, Env.getPermission('file'))
except:
log.error('Failed writing permission to file "%s": %s', (path, traceback.format_exc()))
except:
log.error('Unable to write file "%s": %s', (path, traceback.format_exc()))
if os.path.isfile(path):
os.remove(path)
def makeDir(self, path):
path = sp(path)
try:
if not os.path.isdir(path):
os.makedirs(path, Env.getPermission('folder'))
os.chmod(path, Env.getPermission('folder'))
return True
except Exception as e:
log.error('Unable to create folder "%s": %s', (path, e))
return False
def deleteEmptyFolder(self, folder, show_error = True, only_clean = None):
folder = sp(folder)
for item in os.listdir(folder):
full_folder = sp(os.path.join(folder, item))
if not only_clean or (item in only_clean and os.path.isdir(full_folder)):
for subfolder, dirs, files in os.walk(full_folder, topdown = False):
try:
os.rmdir(subfolder)
except:
if show_error:
log.info2('Couldn\'t remove directory %s: %s', (subfolder, traceback.format_exc()))
try:
os.rmdir(folder)
except:
if show_error:
log.error('Couldn\'t remove empty directory %s: %s', (folder, traceback.format_exc()))
# http request
def urlopen(self, url, timeout = 30, data = None, headers = None, files = None, show_error = True, stream = False):
url = quote(ss(url), safe = "%/:=&?~#+!$,;'@()*[]")
if not headers: headers = {}
if not data: data = {}
# Fill in some headers
parsed_url = urlparse(url)
host = '%s%s' % (parsed_url.hostname, (':' + str(parsed_url.port) if parsed_url.port else ''))
headers['Referer'] = headers.get('Referer', '%s://%s' % (parsed_url.scheme, host))
headers['Host'] = headers.get('Host', None)
headers['User-Agent'] = headers.get('User-Agent', self.user_agent)
headers['Accept-encoding'] = headers.get('Accept-encoding', 'gzip')
headers['Connection'] = headers.get('Connection', 'keep-alive')
headers['Cache-Control'] = headers.get('Cache-Control', 'max-age=0')
use_proxy = Env.setting('use_proxy')
proxy_url = None
if use_proxy:
proxy_server = Env.setting('proxy_server')
proxy_username = Env.setting('proxy_username')
proxy_password = Env.setting('proxy_password')
if proxy_server:
loc = "{0}:{1}@{2}".format(proxy_username, proxy_password, proxy_server) if proxy_username else proxy_server
proxy_url = {
"http": "http://"+loc,
"https": "https://"+loc,
}
else:
proxy_url = getproxies()
r = Env.get('http_opener')
# Don't try for failed requests
if self.http_failed_disabled.get(host, 0) > 0:
if self.http_failed_disabled[host] > (time.time() - 900):
log.info2('Disabled calls to %s for 15 minutes because so many failed requests.', host)
if not show_error:
raise Exception('Disabled calls to %s for 15 minutes because so many failed requests' % host)
else:
return ''
else:
del self.http_failed_request[host]
del self.http_failed_disabled[host]
self.wait(host, url)
status_code = None
try:
kwargs = {
'headers': headers,
'data': data if len(data) > 0 else None,
'timeout': timeout,
'files': files,
'verify': False, #verify_ssl, Disable for now as to many wrongly implemented certificates..
'stream': stream,
'proxies': proxy_url,
}
method = 'post' if len(data) > 0 or files else 'get'
log.info('Opening url: %s %s, data: %s', (method, url, [x for x in data.keys()] if isinstance(data, dict) else 'with data'))
response = r.request(method, url, **kwargs)
status_code = response.status_code
if response.status_code == requests.codes.ok:
data = response if stream else response.content
else:
response.raise_for_status()
self.http_failed_request[host] = 0
except (IOError, MaxRetryError, Timeout):
if show_error:
log.error('Failed opening url in %s: %s %s', (self.getName(), url, traceback.format_exc(0)))
# Save failed requests by hosts
try:
# To many requests
if status_code in [429]:
self.http_failed_request[host] = 1
self.http_failed_disabled[host] = time.time()
if not self.http_failed_request.get(host):
self.http_failed_request[host] = 1
else:
self.http_failed_request[host] += 1
# Disable temporarily
if self.http_failed_request[host] > 5 and not isLocalIP(host):
self.http_failed_disabled[host] = time.time()
except:
log.debug('Failed logging failed requests for %s: %s', (url, traceback.format_exc()))
raise
self.http_last_use[host] = time.time()
return data
def wait(self, host = '', url = ''):
if self.http_time_between_calls == 0:
return
try:
if host not in self.http_last_use_queue:
self.http_last_use_queue[host] = []
self.http_last_use_queue[host].append(url)
while True and not self.shuttingDown():
wait = (self.http_last_use.get(host, 0) - time.time()) + self.http_time_between_calls
if self.http_last_use_queue[host][0] != url:
time.sleep(.1)
continue
if wait > 0:
log.debug('Waiting for %s, %d seconds', (self.getName(), max(1, wait)))
time.sleep(min(wait, 30))
else:
self.http_last_use_queue[host] = self.http_last_use_queue[host][1:]
self.http_last_use[host] = time.time()
break
except:
log.error('Failed handling waiting call: %s', traceback.format_exc())
time.sleep(self.http_time_between_calls)
def beforeCall(self, handler):
self.isRunning('%s.%s' % (self.getName(), handler.__name__))
def afterCall(self, handler):
self.isRunning('%s.%s' % (self.getName(), handler.__name__), False)
def doShutdown(self, *args, **kwargs):
self.shuttingDown(True)
return True
def shuttingDown(self, value = None):
if value is None:
return self._needs_shutdown
self._needs_shutdown = value
def isRunning(self, value = None, boolean = True):
if value is None:
return self._running
if boolean:
self._running.append(value)
else:
try:
self._running.remove(value)
except:
log.error("Something went wrong when finishing the plugin function. Could not find the 'is_running' key")
def getCache(self, cache_key, url = None, **kwargs):
use_cache = not len(kwargs.get('data', {})) > 0 and not kwargs.get('files')
if use_cache:
cache_key_md5 = md5(cache_key)
cache = Env.get('cache').get(cache_key_md5)
if cache:
if not Env.get('dev'): log.debug('Getting cache %s', cache_key)
return cache
if url:
try:
cache_timeout = 300
if 'cache_timeout' in kwargs:
cache_timeout = kwargs.get('cache_timeout')
del kwargs['cache_timeout']
data = self.urlopen(url, **kwargs)
if data and cache_timeout > 0 and use_cache:
self.setCache(cache_key, data, timeout = cache_timeout)
return data
except:
if not kwargs.get('show_error', True):
raise
log.debug('Failed getting cache: %s', (traceback.format_exc(0)))
return ''
def setCache(self, cache_key, value, timeout = 300):
cache_key_md5 = md5(cache_key)
log.debug('Setting cache %s', cache_key)
Env.get('cache').set(cache_key_md5, value, timeout)
return value
def createNzbName(self, data, media, unique_tag = False):
release_name = data.get('name')
tag = self.cpTag(media, unique_tag = unique_tag)
# Check if password is filename
name_password = scanForPassword(data.get('name'))
if name_password:
release_name, password = name_password
tag += '{{%s}}' % password
elif data.get('password'):
tag += '{{%s}}' % data.get('password')
max_length = 127 - len(tag) # Some filesystems don't support 128+ long filenames
return '%s%s' % (toSafeString(toUnicode(release_name)[:max_length]), tag)
def createFileName(self, data, filedata, media, unique_tag = False):
name = self.createNzbName(data, media, unique_tag = unique_tag)
if data.get('protocol') == 'nzb' and 'DOCTYPE nzb' not in filedata and '</nzb>' not in filedata:
return '%s.%s' % (name, 'rar')
return '%s.%s' % (name, data.get('protocol'))
def cpTag(self, media, unique_tag = False):
tag = ''
if Env.setting('enabled', 'renamer') or unique_tag:
identifier = getIdentifier(media) or ''
unique_tag = ', ' + randomString() if unique_tag else ''
tag = '.cp('
tag += identifier
tag += ', ' if unique_tag and identifier else ''
tag += randomString() if unique_tag else ''
tag += ')'
return tag if len(tag) > 7 else ''
def checkFilesChanged(self, files, unchanged_for = 60):
now = time.time()
file_too_new = False
file_time = []
for cur_file in files:
# File got removed while checking
if not os.path.isfile(cur_file):
file_too_new = now
break
# File has changed in last 60 seconds
file_time = self.getFileTimes(cur_file)
for t in file_time:
if t > now - unchanged_for:
file_too_new = tryInt(time.time() - t)
break
if file_too_new:
break
if file_too_new:
try:
time_string = time.ctime(file_time[0])
except:
try:
time_string = time.ctime(file_time[1])
except:
time_string = 'unknown'
return file_too_new, time_string
return False, None
def getFileTimes(self, file_path):
return [os.path.getmtime(file_path), os.path.getctime(file_path) if os.name != 'posix' else 0]
def isDisabled(self):
return not self.isEnabled()
def isEnabled(self):
return self.conf(self.enabled_option) or self.conf(self.enabled_option) is None
def acquireLock(self, key):
lock = self._locks.get(key)
if not lock:
self._locks[key] = threading.RLock()
log.debug('Acquiring lock: %s', key)
self._locks.get(key).acquire()
def releaseLock(self, key):
lock = self._locks.get(key)
if lock:
log.debug('Releasing lock: %s', key)
self._locks.get(key).release()
| gpl-3.0 | 7,182,638,869,135,260,000 | 33.560268 | 136 | 0.542337 | false |
jeremysanders/veusz | veusz/utils/feedback.py | 3 | 6020 | # Copyright (C) 2018 Jeremy S. Sanders
# Email: Jeremy Sanders <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
###############################################################################
from __future__ import division, absolute_import, print_function
from collections import defaultdict
import datetime
import sys
import atexit
import platform
import sip
import numpy as N
from .. import qtall as qt
from .utilfuncs import rrepr
from .version import version
from ..compat import citems, curlrequest, curlencode
"""Feedback module for providing information about usage.
Note: careful not to send a unique identifier and to reset counts to
ensure lack of traceability.
"""
# patch this to disable any feedback
disableFeedback=False
# for QSettings
_org='veusz.org'
_app='veusz-feedback'
_url='https://barmag.net/veusz-feedback/'
# min send interval in days
_mininterval = 7
# min interval to try sending in days
_minattemptinterval = 1
class Feedback:
"""Keep track of number of activities."""
def __init__(self):
# counts of widget creation
self.widgetcts = defaultdict(int)
# counts of data import
self.importcts = defaultdict(int)
# counts of data export
self.exportcts = defaultdict(int)
# singleton
feedback = Feedback()
@atexit.register
def updatects():
"""Add saved counts with values from app."""
#print("running updates")
setn = qt.QSettings(_org, _app)
# get statistics and reset in config file
widgetcts = eval(setn.value('counts/widget', '{}'))
importcts = eval(setn.value('counts/import', '{}'))
exportcts = eval(setn.value('counts/export', '{}'))
# add existing counts
for k, v in citems(feedback.widgetcts):
widgetcts[k] = widgetcts.get(k, 0) + v
for k, v in citems(feedback.importcts):
importcts[k] = importcts.get(k, 0) + v
for k, v in citems(feedback.exportcts):
exportcts[k] = exportcts.get(k, 0) + v
setn.setValue('counts/widget', rrepr(widgetcts))
setn.setValue('counts/import', rrepr(importcts))
setn.setValue('counts/export', rrepr(exportcts))
class FeedbackCheckThread(qt.QThread):
"""Async thread to send feedback."""
def run(self):
from ..setting import settingdb
# exit if disabled
if (settingdb['feedback_disabled'] or
disableFeedback or
not settingdb['feedback_asked_user']):
#print('disabled')
return
setn = qt.QSettings(_org, _app)
# keep track of when we successfully sent the data (lastsent)
# and when we last tried (lastattempt), so we don't send too
# often
today = datetime.date.today()
today_tpl = (today.year, today.month, today.day)
# don't try to send too often
lastattempt = setn.value('last-attempt', '(2000,1,1)')
lastattempt = datetime.date(*eval(lastattempt))
delta_attempt = (today-lastattempt).days
if delta_attempt<_minattemptinterval:
#print("too soon 1")
return
lastsent = setn.value('last-sent')
if not lastsent:
delta_sent = -1
else:
lastsent = datetime.date(*eval(lastsent))
delta_sent = (today-lastsent).days
# are we within the send period
if delta_sent<_mininterval:
#print("too soon 2")
return
# avoid accessing url too often by updating date first
setn.setValue('last-attempt', repr(today_tpl))
# get statistics and reset in config file
widgetcts = setn.value('counts/widget', '{}')
importcts = setn.value('counts/import', '{}')
exportcts = setn.value('counts/export', '{}')
try:
winver = str(sys.getwindowsversion())
except Exception:
winver = 'N/A'
# construct post message - these are the data sent to the
# remote server
args = {
'interval': str(delta_sent),
'veusz-version': version(),
'python-version': sys.version,
'python-version_info': repr(tuple(sys.version_info)),
'python-platform': sys.platform,
'platform-machine': platform.machine(),
'windows-version': winver,
'numpy-version': N.__version__,
'qt-version': qt.qVersion(),
'pyqt-version': qt.PYQT_VERSION_STR,
'sip-version': sip.SIP_VERSION_STR,
'locale': qt.QLocale().name(),
'widgetcts': widgetcts,
'importcts': importcts,
'exportcts': exportcts,
}
postdata = curlencode(args).encode('utf8')
# now post the data
try:
f = curlrequest.urlopen(_url, postdata)
retn = f.readline().decode('utf8').strip()
f.close()
if retn == 'ok':
#print("success")
# reset in stats file and set date last done
setn.setValue('counts/widget', '{}')
setn.setValue('counts/import', '{}')
setn.setValue('counts/export', '{}')
setn.setValue('last-sent', repr(today_tpl))
except Exception as e:
#print("failure",e)
pass
| gpl-2.0 | 8,184,528,813,620,718,000 | 32.076923 | 79 | 0.604983 | false |
waltharius/NewsBlur | vendor/tweepy/utils.py | 32 | 1355 | # Tweepy
# Copyright 2010 Joshua Roesslein
# See LICENSE for details.
from datetime import datetime
import time
import htmlentitydefs
import re
import locale
from urllib import quote
from email.utils import parsedate
def parse_datetime(string):
return datetime(*(parsedate(string)[:6]))
def parse_html_value(html):
return html[html.find('>')+1:html.rfind('<')]
def parse_a_href(atag):
start = atag.find('"') + 1
end = atag.find('"', start)
return atag[start:end]
def convert_to_utf8_str(arg):
# written by Michael Norton (http://docondev.blogspot.com/)
if isinstance(arg, unicode):
arg = arg.encode('utf-8')
elif not isinstance(arg, str):
arg = str(arg)
return arg
def import_simplejson():
try:
import simplejson as json
except ImportError:
try:
import json # Python 2.6+
except ImportError:
try:
from django.utils import simplejson as json # Google App Engine
except ImportError:
raise ImportError, "Can't load a json library"
return json
def list_to_csv(item_list):
if item_list:
return ','.join([str(i) for i in item_list])
def urlencode_noplus(query):
return '&'.join(['%s=%s' % (quote(str(k), ''), quote(str(v), '')) \
for k, v in query.iteritems()])
| mit | 8,848,843,691,041,695,000 | 21.583333 | 80 | 0.620664 | false |
damdam-s/OpenUpgrade | addons/l10n_ve/__openerp__.py | 260 | 2960 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2008 Tiny SPRL (<http://tiny.be>). All Rights Reserved
##############################################################################
# Module programed and financed by:
# Vauxoo, C.A. (<http://vauxoo.com>).
# Our Community team mantain this module:
# https://launchpad.net/~openerp-venezuela
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Venezuela - Accounting',
'version': '1.0',
'author': ['OpenERP SA', 'Vauxoo'],
'category': 'Localization/Account Charts',
'description':
"""
Chart of Account for Venezuela.
===============================
Venezuela doesn't have any chart of account by law, but the default
proposed in OpenERP should comply with some Accepted best practices in Venezuela,
this plan comply with this practices.
This module has been tested as base for more of 1000 companies, because
it is based in a mixtures of most common software in the Venezuelan
market what will allow for sure to accountants feel them first steps with
OpenERP more confortable.
This module doesn't pretend be the total localization for Venezuela,
but it will help you to start really quickly with OpenERP in this country.
This module give you.
---------------------
- Basic taxes for Venezuela.
- Have basic data to run tests with community localization.
- Start a company from 0 if your needs are basic from an accounting PoV.
We recomend install account_anglo_saxon if you want valued your
stocks as Venezuela does with out invoices.
If you install this module, and select Custom chart a basic chart will be proposed,
but you will need set manually account defaults for taxes.
""",
'depends': ['account',
'base_vat',
'account_chart'
],
'demo': [],
'data': ['data/account_tax_code.xml',
'data/account_user_types.xml',
'data/account_chart.xml',
'data/account_tax.xml',
'data/l10n_chart_ve_wizard.xml'
],
'auto_install': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -9,216,028,413,622,194,000 | 37.947368 | 84 | 0.631081 | false |
ennoborg/gramps | gramps/gui/glade/catalog/grampswidgets.py | 11 | 1186 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# glade/catalog/grampswidgets.py
from gi.repository import Gtk
class ValidatableMaskedEntry(Gtk.Entry):
__gtype_name__ = 'ValidatableMaskedEntry'
class UndoableEntry(Gtk.Entry):
__gtype_name__ = 'UndoableEntry'
class StyledTextEditor(Gtk.TextView):
__gtype_name__ = 'StyledTextEditor'
class UndoableBuffer(Gtk.TextBuffer):
__gtype_name__ = 'UndoableBuffer'
| gpl-2.0 | 8,875,095,909,280,215,000 | 32.885714 | 79 | 0.751265 | false |
Kismuz/btgym | btgym/research/model_based/model/bivariate.py | 1 | 29118 | import numpy as np
from collections import namedtuple
from btgym.research.model_based.model.rec import Zscore, ZscoreState, Covariance, CovarianceState
from btgym.research.model_based.model.rec import OUEstimatorState
from btgym.research.model_based.model.univariate import OUProcess, TimeSeriesModel
BivariateTSModelState = namedtuple('BivariateTSModelState', ['p', 's', 'stat', 'ps_stat'])
class BivariateTSModel:
"""
Two-factor bivariate time-series model.
Motivating papers:
Eduardo Schwartz, James E. Smith, "Short-Term Variations and Long-Term Dynamics in Commodity Prices",
in "Management Science", Vol. 46, No. 7, July 2000 pp. 893–911
Harris, D., "Principal components analysis of cointegrated time series," in "Econometric Theory", Vol. 13, 1997
"""
# TODO: trajectory generator uses simplified algorithm: entire trajectory is generated out of single model state
# TODO: proper state-space model approach
# TODO: should be: sample [randomized?] trajectory of states -> sample realisation trajectory of same length
# Decomposition matrix:
u_decomp = np.asarray([[.5, .5], [.5, -.5]])
# Reconstruction (inverse u_decomp):
u_recon = np.asarray([[1., 1.], [1., -1.]])
def __init__(
self,
max_length,
analyzer_window,
p_analyzer_grouping=None,
s_analyzer_grouping=None,
alpha=None,
filter_alpha=None,
stat_alpha=None,
ps_alpha=None,
):
"""
Args:
max_length: uint, maximum time-series trajectory length to keep;
analyzer_window: uint, SSA embedding window (shared for P and S analyzers);
p_analyzer_grouping: P process SSA decomposition triples grouping,
iterable of pairs convertible to python slices, i.e.:
grouping=[[0,1], [1,2], [2, None]];
s_analyzer_grouping: P process SSA decomposition triples grouping, se above;
alpha: float in [0, 1], SSA and processes estimators decaying factor;
filter_alpha: float in [0, 1], processes smoothing decaying factor;
stat_alpha: float in [0, 1], time-series statistics tracking decaying factor;
ps_alpha: float in [0, 1], P|S processes covariance tracking decaying factor;
"""
max_length = np.atleast_1d(max_length)
analyzer_window = np.atleast_1d(analyzer_window)
alpha = np.atleast_1d(alpha)
filter_alpha = np.atleast_1d(filter_alpha)
# Max. variance factor component (average):
self.p = TimeSeriesModel(
max_length[0],
analyzer_window[0],
p_analyzer_grouping,
alpha[0],
filter_alpha[0]
)
# Max. stationarity factor component (difference):
self.s = TimeSeriesModel(
max_length[-1],
analyzer_window[-1],
s_analyzer_grouping,
alpha[-1],
filter_alpha[-1]
)
# Statistics of original data:
self.stat = Zscore(2, stat_alpha)
# Stochastic processes covariance:
self.ps_stat = Covariance(2, ps_alpha)
def ready(self):
return self.s.ready() and self.p.ready()
def get_state(self):
return BivariateTSModelState(
p=self.p.get_state(),
s=self.s.get_state(),
stat=self.stat.get_state(),
ps_stat=self.ps_stat.get_state()
)
@staticmethod
def get_random_state(p_params, s_params, mean=(100, 100), variance=(1, 1), ps_corrcoef=(-1, 1)):
"""
Samples random uniform model state w.r.t. parameters intervals given.
Args:
p_params: dict, P stochastic process parameters, see kwargs at: OUProcess.get_random_state
s_params: dict, S stochastic process parameters, see kwargs at: OUProcess.get_random_state
mean: iterable of floats as [lower_bound, upper_bound], time-series means sampling interval.
variance: iterable of floats as [lower_bound, upper_bound], time-series variances sampling interval.
ps_corrcoef: iterable of floats as [lower_bound, upper_bound], correlation coefficient
for P and S process innovations, -1 <= ps_corrcoef <= 1
Returns:
instance of BivariateTSModelState
Note:
negative means are allowed.
"""
sample = dict()
for name, param, low_threshold in zip(
['mean', 'variance', 'ps_corrcoef'], [mean, variance, ps_corrcoef], [-np.inf, 1e-8, -1.0]):
interval = np.asarray(param)
assert interval.ndim == 1 and interval[0] <= interval[-1], \
' Expected param `{}` as iterable of ordered values as: [lower_bound, upper_bound], got: {}'.format(
name, interval
)
assert interval[0] >= low_threshold, \
'Expected param `{}` lower bound be no less than {}, got: {}'.format(name, low_threshold, interval[0])
sample[name] = np.random.uniform(low=interval[0], high=interval[-1], size=2)
# Correlation matrix instead of covariance - it is ok as it gets normalized when sampling anyway:
rho = np.eye(2)
rho[0, 1] = rho[1, 0] = sample['ps_corrcoef'][0]
# TODO: log-uniform sampling for s, p params
return BivariateTSModelState(
p=TimeSeriesModel.get_random_state(**p_params),
s=TimeSeriesModel.get_random_state(**s_params),
stat=ZscoreState(
mean=sample['mean'],
variance=sample['variance']
),
ps_stat=CovarianceState(
mean=np.zeros(2),
variance=np.ones(2),
covariance=rho,
),
)
@staticmethod
def _decompose(trajectory, mean, variance, u):
"""
Returns orthonormal decomposition of pair [X1, X2].
Static method, can be used as stand-along function.
Args:
trajectory: time-series data of shape [2, num_points]
mean: data mean of size [2]
variance: data variance of size [2]
u: [2, 2] decomposition matrix
Returns:
data projection of size [2, num_pints], where first (P) component is average and second (S) is difference
of original time-series.
"""
assert len(trajectory.shape) == 2 and trajectory.shape[0] == 2, \
'Expected data as array of size [2, num_points], got: {}'.format(trajectory.shape)
assert mean.shape == (2,) and variance.shape == (2,), \
'Expected mean and variance as vectors of size [2], got: {}, {}'.format(mean.shape, variance.shape)
assert u.shape == (2, 2), 'Expected U as 2x2 matrix, got: {}'.format(u.shape)
# Z-score data:
# Mind swapped STD!
norm_data = (trajectory - mean[:, None]) / np.clip(variance[:, None], 1e-8, None) ** .5
ps_decomposition = np.matmul(u, norm_data)
return ps_decomposition
@staticmethod
def _reconstruct(ps_decomposition, mean, variance, u):
"""
Returns original data [X1, X2] given orthonormal P|S decomposition .
Static method, can be used as stand-along function.
Args:
ps_decomposition: data ps-decomposition of size [2, num_points]
mean: original data mean of size [2]
variance: original data variance of size [2]
u: [2, 2] reconstruction matrix
Returns:
reconstructed data of size [2, num_pints]
"""
assert len(ps_decomposition.shape) == 2 and ps_decomposition.shape[0] == 2, \
'Expected data as array of size [2, num_points], got: {}'.format(ps_decomposition.shape)
assert mean.shape == (2,) and variance.shape == (2,), \
'Expected mean and variance as vectors of size [2], got: {}, {}'.format(mean.shape, variance.shape)
assert u.shape == (2, 2), 'Expected U as 2x2 matrix, got: {}'.format(u.shape)
return np.matmul(u, ps_decomposition) * variance[:, None] ** .5 + mean[:, None]
def decompose(self, trajectory):
"""
Returns orthonormal decomposition of pair [X1, X2] w.r.t current statistics.
Args:
trajectory: time-series data of shape [2, num_points]
Returns:
tuple (P, S), where first (P) component is average and second (S) is difference
of original time-series, of size [num_points] each
"""
ps_decomp = self._decompose(trajectory, self.stat.mean, self.stat.variance, self.u_decomp)
return ps_decomp[0, :], ps_decomp[1, :]
def reconstruct(self, p, s, mean=None, variance=None):
"""
Returns original data [X1, X2] given orthonormal P|S decomposition.
Args:
p: data p-component of shape [num_points]
s: data s-component of shape [num_points]
mean: original data mean of size [2] or None
variance: original data variance of size [2] or None
Returns:
reconstructed data of size [2, num_pints]
Notes:
if either mean or variance arg is not given - stored mean and variance are used.
"""
assert p.shape == s.shape, ' Expected components be same size but got: {} and {}'.format(p.shape, s.shape)
if mean is None or variance is None:
mean = self.stat.mean
variance = self.stat.variance
ps = np.stack([p, s], axis=0)
return self._reconstruct(ps, mean, variance, self.u_recon)
def reset(self, init_trajectory):
"""
Resets model parameters and trajectories given initial data.
Args:
init_trajectory: initial time-series observations of size [2, num_points]
"""
_ = self.stat.reset(init_trajectory)
p_data, s_data = self.decompose(init_trajectory)
self.p.reset(p_data)
self.s.reset(s_data)
residuals = np.stack(
[self.p.process.estimator.residuals, self.s.process.estimator.residuals],
axis=0
)
_ = self.ps_stat.reset(residuals)
def update(self, trajectory, disjoint=False):
"""
Updates model parameters and trajectories given new data.
Args:
trajectory: time-series update observations of size [2, num_points], where:
num_points <= min{p_params[max_length], s_params[max_length]} is necessary
to keep model trajectory continuous
disjoint: bool, indicates whether update given is continuous or disjoint w.r.t. previous one
"""
_ = self.stat.update(trajectory) # todo: this stat.estimator does not respect `disjoint` arg.; ?!!
p_data, s_data = self.decompose(trajectory)
self.p.update(p_data, disjoint)
self.s.update(s_data, disjoint)
residuals = np.stack(
[self.p.process.estimator.residuals, self.s.process.estimator.residuals],
axis=0
)
_ = self.ps_stat.update(residuals)
def transform(self, trajectory=None, state=None, size=None):
"""
Returns per-component analyzer data decomposition.
Args:
trajectory: bivariate data to decompose of size [2, num_points] or None
state: instance of BivariateTSModelState or None
size: uint, size of decomposition to get, or None
Returns:
array of [size or num_points], array of [size or num_points], ZscoreState(2)
- SSA transformations of P, S components of given trajectory w.r.t. given state
- bivariate trajectory statistics (means and variances)
Notes:
if no `trajectory` is given - returns stored data decomposition
if no `state` arg. is given - uses stored analyzer state.
if no 'size` arg is given - decomposes full [stored or given] trajectory
"""
if state is not None:
assert isinstance(state, BivariateTSModelState),\
'Expected `state as instance of BivariateTSModelState but got: {}`'.format(type(state))
s_state = state.s
p_state = state.p
stat = state.stat
else:
assert trajectory is None, 'When `trajectory` arg. is given, `state` is required'
p_state = None
s_state = None
stat = self.stat.get_state()
if trajectory is not None:
ps_data = self._decompose(trajectory, stat.mean, stat.variance, self.u_decomp)
p_data = ps_data[0, :]
s_data = ps_data[1, :]
else:
p_data = None
s_data = None
p_transformed = self.p.transform(p_data, p_state, size)
s_transformed = self.s.transform(s_data, s_state, size)
return p_transformed, s_transformed, stat
def get_trajectory(self, size=None, reconstruct=True):
"""
Returns stored decomposition fragment and [optionally] time-series reconstruction.
TODO: reconstruction is freaky due to only last stored statistic is used
Args:
size: uint, fragment length to get in [1, ..., max_length] or None
reconstruct: bool, if True - also return data reconstruction
Returns:
array of [size ... max_length], array of [size ... max_length], array of size [2, size ... max_length]
or
array of [size ... max_length], array of [size ... max_length], None
P,C [, and 2D trajectory] series as [ x[-size], x[-size+1], ... x[-1] ], up to length [size];
if no `size` arg. is given - returns entire stored trajectory, up to length [max_length].
"""
p_data = self.p.get_trajectory(size)
s_data = self.s.get_trajectory(size)
if reconstruct:
trajectory = self.reconstruct(p_data, s_data)
else:
trajectory = None
return p_data, s_data, trajectory
@staticmethod
def generate_trajectory_fn(batch_size, size, state, reconstruct=False, u_recon=None):
"""
Generates batch of time-series realisations given model state.
Static method, can be used as stand-along function.
Args:
batch_size: uint, number of trajectories to generates
size: uint, trajectory length to generate
state: instance of BivariateTSModelState;
reconstruct: bool, if True - return time-series along with P, S trajectories, return None otherwise
u_recon: reconstruction matrix of size [2, 2] or None; required if reconstruct=True;
Returns:
generated P and S processes realisations of size [batch_size, 2, size];
generated time-series reconstructions of size [batch_size, 2, size] or None;
"""
assert isinstance(state, BivariateTSModelState), \
'Expected `state` as instance of BivariateTSModelState, got: {}'.format(type(state))
if reconstruct:
assert u_recon is not None, 'reconstruct=True but reconstruction matrix is not provided.'
# Unpack:
p_state = state.p.process
s_state = state.s.process
# Get all samples for single batch (faster):
p_params = OUProcess.sample_naive_unbiased(p_state, batch_size)
s_params = OUProcess.sample_naive_unbiased(s_state, batch_size)
# Concatenate batch-wise:
parameters = OUEstimatorState(
mu=np.concatenate([p_params.mu, s_params.mu]),
log_theta=np.concatenate([p_params.log_theta, s_params.log_theta]),
log_sigma=np.concatenate([p_params.log_sigma, s_params.log_sigma]),
)
driver_df = np.concatenate(
[
np.tile(p_state.driver_df, batch_size),
np.tile(s_state.driver_df, batch_size),
]
)
# Access multivariate generator_fn directly to get batch of bivariate OU:
batch_2x = OUProcess.generate_trajectory_fn(2 * batch_size, size, parameters, driver_df)
batch_2x = np.reshape(batch_2x, [2, batch_size, -1])
batch_2x = np.swapaxes(batch_2x, 0, 1)
if reconstruct:
x = np.matmul(u_recon, batch_2x) * state.stat.variance[None, :, None] ** .5 \
+ state.stat.mean[None, :, None]
else:
x = None
return batch_2x, x
@staticmethod
def generate_bivariate_trajectory_fn(batch_size, size, state, reconstruct=False, u_recon=None):
"""
Generates batch of time-series realisations given model state.
Static method, can be used as stand-along function.
Args:
batch_size: uint, number of trajectories to generates
size: uint, trajectory length to generate
state: instance of BivariateTSModelState;
reconstruct: bool, if True - return time-series along with P, S trajectories, return None otherwise
u_recon: reconstruction matrix of size [2, 2] or None; required if reconstruct=True;
Returns:
generated P and S processes realisations of size [batch_size, 2, size];
generated time-series reconstructions of size [batch_size, 2, size] or None;
"""
assert isinstance(state, BivariateTSModelState), \
'Expected `state` as instance of BivariateTSModelState, got: {}'.format(type(state))
if reconstruct:
assert u_recon is not None, 'reconstruct=True but reconstruction matrix is not provided.'
# Unpack:
p_state = state.p.process
s_state = state.s.process
# Get all samples for single batch (faster):
p_params = OUProcess.sample_naive_unbiased(p_state, 1)
s_params = OUProcess.sample_naive_unbiased(s_state, 1)
# Concatenate batch-wise:
parameters = OUEstimatorState(
mu=np.concatenate([p_params.mu, s_params.mu]),
log_theta=np.concatenate([p_params.log_theta, s_params.log_theta]),
log_sigma=np.concatenate([p_params.log_sigma, s_params.log_sigma]),
)
driver_df = np.asarray([p_state.driver_df, s_state.driver_df])
# Access multivariate generator_fn directly to get batch of 2d correlated OU's:
batch_2d = OUProcess.generate_multivariate_trajectory_fn(
batch_size=batch_size,
size=size,
parameters=parameters,
t_df=driver_df,
covariance=state.ps_stat.covariance
)
batch_2d = np.swapaxes(batch_2d, 1, 2)
if reconstruct:
x = np.matmul(u_recon, batch_2d) * state.stat.variance[None, :, None] ** .5 \
+ state.stat.mean[None, :, None]
else:
x = None
return batch_2d, x
def generate(self, batch_size, size, state=None, reconstruct=True):
"""
Generates batch of time-series realisations given model state.
Args:
batch_size: uint, number of trajectories to generates
size: uint, trajectory length to generate
state: instance of BivariateTSModelState or None;
if no state provided - current state is used.
reconstruct: bool, if True - return time-series along with P, S trajectories, return None otherwise
Returns:
generated P and S processes realisations of size [batch_size, 2, size];
generated time-series reconstructions of size [batch_size, 2, size] or None;
"""
if state is None:
# Fit student-t df:
_ = self.p.process.driver_estimator.fit()
_ = self.s.process.driver_estimator.fit()
state = self.get_state()
# return self.generate_trajectory_fn(batch_size, size, state, reconstruct, self.u_recon)
return self.generate_bivariate_trajectory_fn(batch_size, size, state, reconstruct, self.u_recon)
class BivariatePriceModel(BivariateTSModel):
"""
Wrapper class for positive-valued time-series.
Internally works with log-transformed data.
"""
def reset(self, init_trajectory):
"""
Resets model parameters and trajectories given initial data.
Args:
init_trajectory: initial time-series observations of size [2, num_points]
"""
return super().reset(np.log(init_trajectory))
def update(self, trajectory, disjoint=False):
"""
Updates model parameters and trajectories given new data.
Args:
trajectory: time-series update observations of size [2, num_points], where:
num_points <= min{p_params[max_length], s_params[max_length]} is necessary
to keep model trajectory continuous
disjoint: bool, indicates whether update given is continuous or disjoint w.r.t. previous one
"""
return super().update(np.log(trajectory), disjoint)
def transform(self, trajectory=None, state=None, size=None):
"""
Returns per-component analyzer data decomposition.
Args:
trajectory: data to decompose of size [2, num_points] or None
state: instance of BivariateTSModelState or None
size: uint, size of decomposition to get, or None
Returns:
array of [size or num_points], array of [size or num_points], ZscoreState(2)
- SSA transformations of P, S components of given trajectory w.r.t. given state
- bivariate trajectory statistics (means and variances)
Notes:
if no `trajectory` is given - returns stored data decomposition
if no `state` arg. is given - uses stored analyzer state.
if no 'size` arg is given - decomposes full [stored or given] trajectory
"""
if trajectory is not None:
trajectory = np.log(trajectory)
return super().transform(trajectory, state, size)
def get_trajectory(self, size=None, reconstruct=True):
"""
Returns stored decomposition fragment and [optionally] time-series reconstruction.
TODO: reconstruction is freaky due to only last stored statistic is used
Args:
size: uint, fragment length to get in [1, ..., max_length] or None
reconstruct: bool, if True - also return data reconstruction
Returns:
array of [size ... max_length], array of [size ... max_length], array of size [2, size ... max_length]
or
array of [size ... max_length], array of [size ... max_length], None
P,C [, and 2D trajectory] series as [ x[-size], x[-size+1], ... x[-1] ], up to length [size];
if no `size` arg. is given - returns entire stored trajectory, up to length [max_length].
"""
p_data, s_data, trajectory = super().get_trajectory(size, reconstruct)
if reconstruct:
trajectory = np.exp(trajectory)
return p_data, s_data, trajectory
@staticmethod
def get_random_state(p_params, s_params, mean=(100, 100), variance=(1, 1), ps_corrcoef=(-1, 1)):
"""
Samples random uniform model state w.r.t. intervals given.
Args:
p_params: dict, P stochastic process parameters, see kwargs at: OUProcess.get_random_state
s_params: dict, S stochastic process parameters, see kwargs at: OUProcess.get_random_state
mean: iterable of floats as [0 < lower_bound, upper_bound], time-series means sampling interval.
variance: iterable of floats as [0 < lower_bound, upper_bound], time-series variances sampling interval.
ps_corrcoef: iterable of floats as [-1 <= lower_bound, upper_bound <= 1], correlation coefficient
for P and S process innovations.
Returns:
instance of BivariateTSModelState
Note:
negative means are rejected;
P and S processes fitted over log_transformed data;
"""
sample = dict()
for name, param, low_threshold in zip(
['mean', 'variance', 'ps_corrcoef'], [mean, variance, ps_corrcoef], [1e-8, 1e-8, -1.0]):
interval = np.asarray(param)
assert interval.ndim == 1 and interval[0] <= interval[-1], \
' Expected param `{}` as iterable of ordered values as: [lower_bound, upper_bound], got: {}'.format(
name, interval
)
assert interval[0] >= low_threshold, \
'Expected param `{}` lower bound be no less than {}, got: {}'.format(name, low_threshold, interval[0])
sample[name] = np.random.uniform(low=interval[0], high=interval[-1], size=2)
# Correlation matrix instead of covariance - it is ok as it gets normalized when sampling anyway:
rho = np.eye(2)
rho[0, 1] = rho[1, 0] = sample['ps_corrcoef'][0]
# Log_transform mean and variance (those is biased estimates but ok for rnd. samples):
log_variance = np.log(sample['variance'] / sample['mean'] ** 2 + 1)
log_mean = np.log(sample['mean']) - .5 * log_variance
# Inverse transform memo:
# mean = exp(log_mean + 0.5 * log_var)
# var = mean**2 * (exp(log_var) -1)
return BivariateTSModelState(
p=TimeSeriesModel.get_random_state(**p_params),
s=TimeSeriesModel.get_random_state(**s_params),
stat=ZscoreState(
mean=log_mean,
variance=log_variance
),
ps_stat=CovarianceState(
mean=np.zeros(2),
variance=np.ones(2),
covariance=rho,
),
)
@staticmethod
def generate_trajectory_fn(batch_size, size, state, reconstruct=False, u_recon=None):
"""
Generates batch of time-series realisations given model state.
Static method, can be used as stand-along function.
Args:
batch_size: uint, number of trajectories to generates
size: uint, trajectory length to generate
state: instance of BivariateTSModelState;
reconstruct: bool, if True - return time-series along with P, S trajectories, return None otherwise
u_recon: reconstruction matrix of size [2, 2] or None; required if reconstruct=True;
Returns:
generated P and S processes realisations of size [batch_size, 2, size];
generated time-series reconstructions of size [batch_size, 2, size] or None;
"""
batch_2x, x = BivariateTSModel.generate_trajectory_fn(batch_size, size, state, reconstruct, u_recon)
if reconstruct:
x = np.exp(x)
return batch_2x, x
@staticmethod
def generate_bivariate_trajectory_fn(batch_size, size, state, reconstruct=False, u_recon=None):
"""
Generates batch of time-series realisations given model state.
Static method, can be used as stand-along function.
Args:
batch_size: uint, number of trajectories to generates
size: uint, trajectory length to generate
state: instance of BivariateTSModelState;
reconstruct: bool, if True - return time-series along with P, S trajectories, return None otherwise
u_recon: reconstruction matrix of size [2, 2] or None; required if reconstruct=True;
Returns:
generated P and S processes realisations of size [batch_size, 2, size];
generated time-series reconstructions of size [batch_size, 2, size] or None;
"""
batch_2d, x = BivariateTSModel.generate_bivariate_trajectory_fn(batch_size, size, state, reconstruct, u_recon)
if reconstruct:
x = np.exp(x)
return batch_2d, x
class BPM(BivariatePriceModel):
"""
Wrapper class with de-facto disabled analyzer
in favor to state lightness an computation speed.
"""
def __init__(
self,
*args,
analyzer_window=None,
p_analyzer_grouping=None,
s_analyzer_grouping=None,
**kwargs
):
super().__init__(
*args,
analyzer_window=[2, 2],
p_analyzer_grouping=None,
s_analyzer_grouping=None,
**kwargs
)
| lgpl-3.0 | 8,580,367,377,898,408,000 | 40.182461 | 122 | 0.58813 | false |
donjordano/skype4py | unittests/skypetest.py | 17 | 26067 | import unittest
import skype4pytest
from Skype4Py.skype import *
class SkypeTest(skype4pytest.TestCase):
def setUpObject(self):
self.obj = self.skype
# Methods
# =======
def testApiSecurityContextEnabled(self):
# Returned type: bool
def test():
self.obj.ApiSecurityContextEnabled('spam')
self.failUnlessRaises(SkypeAPIError, test)
def testApplication(self):
# Returned type: Application
t = self.obj.Application('spam')
self.assertInstance(t, Application)
def testAsyncSearchUsers(self):
# Returned type: int
self.api.enqueue('SEARCH USERS spam',
'USERS eggs, sausage, bacon')
t = self.obj.AsyncSearchUsers('spam')
self.assertInstance(t, int)
self.assertEqual(t, 0)
self.failUnless(self.api.is_empty())
def testAttach(self):
self.api.set_attachment_status(apiAttachUnknown)
self.obj.Attach()
self.assertEqual(self.obj.AttachmentStatus, apiAttachSuccess)
def testCall(self):
# Returned type: Call
self.api.enqueue('GET CALL 345 STATUS',
'CALL 345 STATUS spam')
t = self.obj.Call(345)
self.assertInstance(t, Call)
self.assertEqual(t.Id, 345)
self.assertEqual(t.Status, 'spam')
self.failUnless(self.api.is_empty())
def testCalls(self):
# Returned type: CallCollection
self.api.enqueue('SEARCH CALLS spam',
'CALLS 123, 456, 789')
t = self.obj.Calls('spam')
self.assertInstance(t, CallCollection)
self.assertEqual(len(t), 3)
self.assertEqual(t[1].Id, 456)
self.failUnless(self.api.is_empty())
def testChangeUserStatus(self):
self.api.enqueue('GET USERSTATUS',
'USERSTATUS spam')
self.api.enqueue('SET USERSTATUS eggs',
'USERSTATUS eggs')
self.obj.ChangeUserStatus('eggs')
self.assertEqual(self.obj.CurrentUserStatus, 'eggs')
self.failUnless(self.api.is_empty())
def testChat(self):
# Returned type: chat.Chat
self.api.enqueue('GET CHAT spam STATUS',
'CHAT spam STATUS eggs')
t = self.obj.Chat('spam')
self.assertInstance(t, Chat)
self.assertEqual(t.Name, 'spam')
self.assertEqual(t.Status, 'eggs')
self.failUnless(self.api.is_empty())
def testClearCallHistory(self):
self.api.enqueue('CLEAR CALLHISTORY ALL spam')
self.obj.ClearCallHistory('spam')
self.failUnless(self.api.is_empty())
def testClearChatHistory(self):
self.api.enqueue('CLEAR CHATHISTORY')
self.obj.ClearChatHistory()
self.failUnless(self.api.is_empty())
def testClearVoicemailHistory(self):
self.api.enqueue('CLEAR VOICEMAILHISTORY')
self.obj.ClearVoicemailHistory()
self.failUnless(self.api.is_empty())
def testCommand(self):
# Returned type: Command
t = self.obj.Command('SPAM')
self.assertInstance(t, Command)
self.assertEqual(t.Command, 'SPAM')
def testConference(self):
# Returned type: Conference
self.api.enqueue('SEARCH CALLS ',
'CALLS 123, 456')
self.api.enqueue('GET CALL 123 CONF_ID',
'CALL 123 CONF_ID 789')
self.api.enqueue('GET CALL 456 CONF_ID',
'CALL 456 CONF_ID 789')
t = self.obj.Conference(789)
self.assertInstance(t, Conference)
self.assertEqual(t.Id, 789)
self.failUnless(self.api.is_empty())
def testCreateChatUsingBlob(self):
# Returned type: chat.Chat
self.api.enqueue('CHAT CREATEUSINGBLOB spam',
'CHAT eggs NAME eggs')
t = self.obj.CreateChatUsingBlob('spam')
self.assertInstance(t, Chat)
self.assertEqual(t.Name, 'eggs')
self.failUnless(self.api.is_empty())
def testCreateChatWith(self):
# Returned type: Chat
self.api.enqueue('CHAT CREATE spam, eggs',
'CHAT sausage STATUS bacon')
t = self.obj.CreateChatWith('spam', 'eggs')
self.assertInstance(t, Chat)
self.failUnless(self.api.is_empty())
def testCreateGroup(self):
# Returned type: Group
self.api.enqueue('SEARCH GROUPS CUSTOM',
'GROUPS 123, 789')
self.api.enqueue('CREATE GROUP spam')
self.api.enqueue('SEARCH GROUPS CUSTOM',
'GROUPS 123, 456, 789')
self.api.enqueue('GET GROUP 456 DISPLAYNAME',
'GROUP 456 DISPLAYNAME spam')
t = self.obj.CreateGroup('spam')
self.assertInstance(t, Group)
self.assertEqual(t.Id, 456)
self.assertEqual(t.DisplayName, 'spam')
self.failUnless(self.api.is_empty())
def testCreateSms(self):
# Returned type: SmsMessage
self.api.enqueue('CREATE SMS OUTGOING +1234567890',
'SMS 123 TYPE OUTGOING')
t = self.obj.CreateSms(smsMessageTypeOutgoing, '+1234567890')
self.assertInstance(t, SmsMessage)
self.assertEqual(t.Id, 123)
self.failUnless(self.api.is_empty())
def testDeleteGroup(self):
self.api.enqueue('DELETE GROUP 789')
self.obj.DeleteGroup(789)
self.failUnless(self.api.is_empty())
def testEnableApiSecurityContext(self):
def test():
self.obj.EnableApiSecurityContext('spam')
self.failUnlessRaises(SkypeAPIError, test)
def testFindChatUsingBlob(self):
# Returned type: chat.Chat
self.api.enqueue('CHAT FINDUSINGBLOB spam',
'CHAT eggs STATUS MULTI_SUBSCRIBED')
t = self.obj.FindChatUsingBlob('spam')
self.assertInstance(t, Chat)
self.assertEqual(t.Name, 'eggs')
self.failUnless(self.api.is_empty())
def testGreeting(self):
# Returned type: Voicemail
self.api.enqueue('SEARCH VOICEMAILS',
'VOICEMAILS 123, 456')
self.api.enqueue('GET VOICEMAIL 123 PARTNER_HANDLE',
'VOICEMAIL 123 PARTNER_HANDLE spam')
self.api.enqueue('GET VOICEMAIL 123 TYPE',
'VOICEMAIL 123 TYPE DEFAULT_GREETING')
t = self.obj.Greeting('spam')
self.assertInstance(t, Voicemail)
self.assertEqual(t.Id, 123)
self.failUnless(self.api.is_empty())
def testMessage(self):
# Returned type: ChatMessage
self.api.enqueue('GET CHATMESSAGE 123 STATUS',
'CHATMESSAGE 123 STATUS RECEIVED')
t = self.obj.Message(123)
self.assertInstance(t, ChatMessage)
self.assertEqual(t.Id, 123)
self.assertEqual(t.Status, cmsReceived)
self.failUnless(self.api.is_empty())
def testMessages(self):
# Returned type: ChatMessageCollection
self.api.enqueue('SEARCH CHATMESSAGES spam',
'CHATMESSAGES 123, 456')
t = self.obj.Messages('spam')
self.assertInstance(t, ChatMessageCollection)
self.assertEqual(len(t), 2)
self.failUnless(self.api.is_empty())
def testPlaceCall(self):
# Returned type: Call
self.api.enqueue('SEARCH ACTIVECALLS',
'ACTIVECALLS ')
self.api.enqueue('CALL spam',
'CALL 123 STATUS UNPLACED')
t = self.obj.PlaceCall('spam')
self.assertInstance(t, Call)
self.assertEqual(t.Id, 123)
self.failUnless(self.api.is_empty())
def testPrivilege(self):
# Returned type: bool
self.api.enqueue('GET PRIVILEGE SPAM',
'PRIVILEGE SPAM TRUE')
t = self.obj.Privilege('spam')
self.assertInstance(t, bool)
self.assertEqual(t, True)
self.failUnless(self.api.is_empty())
def testProfile(self):
# Returned type: unicode or None
self.api.enqueue('GET PROFILE FULLNAME',
'PROFILE FULLNAME spam eggs')
t = self.obj.Profile('FULLNAME')
self.assertInstance(t, unicode)
self.assertEqual(t, 'spam eggs')
self.failUnless(self.api.is_empty())
def testProperty(self):
# Returned type: unicode or None
self.api.enqueue('GET CHAT spam STATUS',
'CHAT spam STATUS DIALOG')
t = self.obj.Property('CHAT', 'spam', 'STATUS')
self.assertInstance(t, unicode)
self.assertEqual(t, chsDialog)
self.failUnless(self.api.is_empty())
def testRegisterEventHandler(self):
# Returned type: bool
from threading import Event
event = Event()
def handler(user, mood_text):
self.assertEqual(user.Handle, 'spam')
self.assertEqual(mood_text, 'eggs')
event.set()
t = self.obj.RegisterEventHandler('UserMood', handler)
self.assertInstance(t, bool)
self.assertEqual(t, True)
self.api.schedule(0, 'USER spam MOOD_TEXT eggs')
event.wait(1)
self.assertEqual(event.isSet(), True)
t = self.obj.UnregisterEventHandler('UserMood', handler)
self.assertInstance(t, bool)
self.assertEqual(t, True)
t = self.obj.UnregisterEventHandler('UserMood', handler)
self.assertEqual(t, False)
def testResetCache(self):
self.obj._CacheDict['SPAM'] = 'EGGS'
self.obj.ResetCache()
self.assertEqual(len(self.obj._CacheDict), 0)
def testSearchForUsers(self):
# Returned type: UserCollection
self.api.enqueue('SEARCH USERS spam',
'USERS eggs, sausage')
t = self.obj.SearchForUsers('spam')
self.assertInstance(t, UserCollection)
self.assertEqual(len(t), 2)
self.failUnless(self.api.is_empty())
def testSendCommand(self):
self.api.enqueue('SPAM',
'EGGS')
command = self.obj.Command('SPAM')
self.obj.SendCommand(command)
self.assertEqual(command.Reply, 'EGGS')
self.failUnless(self.api.is_empty())
def testSendMessage(self):
# Returned type: ChatMessage
self.api.enqueue('CHAT CREATE spam',
'CHAT eggs STATUS DIALOG')
self.api.enqueue('CHATMESSAGE eggs sausage',
'CHATMESSAGE 123 STATUS SENDING')
t = self.obj.SendMessage('spam', 'sausage')
self.assertInstance(t, ChatMessage)
self.assertEqual(t.Id, 123)
self.failUnless(self.api.is_empty())
def testSendSms(self):
# Returned type: SmsMessage
self.api.enqueue('CREATE SMS OUTGOING spam',
'SMS 123 TYPE OUTGOING')
self.api.enqueue('SET SMS 123 BODY eggs',
'SMS 123 BODY eggs')
self.api.enqueue('ALTER SMS 123 SEND')
t = self.obj.SendSms('spam', Body='eggs')
self.assertInstance(t, SmsMessage)
self.assertEqual(t.Id, 123)
self.failUnless(self.api.is_empty())
def testSendVoicemail(self):
# Returned type: Voicemail
self.api.enqueue('CALLVOICEMAIL spam',
'CALL 123 STATUS ROUTING')
self.api.protocol = 6
t = self.obj.SendVoicemail('spam')
# TODO: As of now the method does not yet return the Voicemail object.
#self.assertInstance(t, Voicemail)
#self.assertEqual(t.Id, 345)
self.failUnless(self.api.is_empty())
def testUser(self):
# Returned type: User
self.api.enqueue('GET CURRENTUSERHANDLE',
'CURRENTUSERHANDLE spam')
self.api.enqueue('GET USER spam ONLINESTATUS',
'USER spam ONLINESTATUS OFFLINE')
t = self.obj.User()
self.assertInstance(t, User)
self.assertEqual(t.Handle, 'spam')
self.assertEqual(t.OnlineStatus, olsOffline)
self.failUnless(self.api.is_empty())
def testVariable(self):
# Returned type: unicode or None
self.api.enqueue('GET SPAM',
'SPAM eggs')
t = self.obj.Variable('SPAM')
self.assertInstance(t, unicode)
self.assertEqual(t, 'eggs')
self.failUnless(self.api.is_empty())
def testVoicemail(self):
# Returned type: Voicemail
self.api.enqueue('GET VOICEMAIL 345 TYPE',
'VOICEMAIL 345 TYPE OUTGOING')
t = self.obj.Voicemail(345)
self.assertInstance(t, Voicemail)
self.assertEqual(t.Id, 345)
self.assertEqual(t.Type, vmtOutgoing)
self.failUnless(self.api.is_empty())
# Properties
# ==========
def testActiveCalls(self):
# Readable, Type: CallCollection
self.api.enqueue('SEARCH ACTIVECALLS',
'ACTIVECALLS 123, 456')
t = self.obj.ActiveCalls
self.assertInstance(t, CallCollection)
self.assertEqual(len(t), 2)
self.failUnless(self.api.is_empty())
def testActiveChats(self):
# Readable, Type: ChatCollection
self.api.enqueue('SEARCH ACTIVECHATS',
'ACTIVECHATS spam, eggs, sausage, ham')
t = self.obj.ActiveChats
self.assertInstance(t, ChatCollection)
self.assertEqual(len(t), 4)
self.failUnless(self.api.is_empty())
def _testActiveFileTransfers(self):
# Readable, Type: FileTransferCollection
self.api.enqueue('SEARCH ACTIVEFILETRANSFERS',
'ACTIVEFILETRANSFERS 123, 456, 789')
t = self.obj.ActiveFileTransfers
self.assertInstance(t, FileTransferCollection)
self.assertEqual(len(t), 3)
self.failUnless(self.api.is_empty())
def testApiWrapperVersion(self):
# Readable, Type: str
t = self.obj.ApiWrapperVersion
self.assertInstance(t, str)
import pkg_resources
v = pkg_resources.get_distribution("Skype4Py").version
self.assertEqual(t, v)
def testAttachmentStatus(self):
# Readable, Type: int
t = self.obj.AttachmentStatus
self.assertInstance(t, int)
# API emulator is always attached.
self.assertEqual(t, apiAttachSuccess)
def testBookmarkedChats(self):
# Readable, Type: ChatCollection
self.api.enqueue('SEARCH BOOKMARKEDCHATS',
'BOOKMARKEDCHATS spam, eggs, ham')
t = self.obj.BookmarkedChats
self.assertInstance(t, ChatCollection)
self.assertEqual(len(t), 3)
self.failUnless(self.api.is_empty())
def testCache(self):
# Readable, Writable, Type: bool
t = self.obj.Cache
self.assertInstance(t, bool)
self.assertEqual(t, True)
self.obj.Cache = False
t = self.obj.Cache
self.assertEqual(t, False)
def testChats(self):
# Readable, Type: ChatCollection
self.api.enqueue('SEARCH CHATS',
'CHATS spam, eggs')
t = self.obj.Chats
self.assertInstance(t, ChatCollection)
self.assertEqual(len(t), 2)
self.failUnless(self.api.is_empty())
def testClient(self):
# Readable, Type: Client
t = self.obj.Client
self.assertInstance(t, Client)
def testCommandId(self):
# Readable, Writable, Type: bool
t = self.obj.CommandId
self.assertInstance(t, bool)
self.assertEqual(t, True)
def test():
self.obj.CommandId = False
self.failUnlessRaises(SkypeError, test)
def testConferences(self):
# Readable, Type: ConferenceCollection
self.api.enqueue('SEARCH CALLS ',
'CALLS 123, 456')
self.api.enqueue('GET CALL 123 CONF_ID',
'CALL 123 CONF_ID 789')
self.api.enqueue('GET CALL 456 CONF_ID',
'CALL 456 CONF_ID 789')
t = self.obj.Conferences
self.assertInstance(t, ConferenceCollection)
self.assertEqual(len(t), 1)
self.assertEqual(t[0].Id, 789)
self.failUnless(self.api.is_empty())
def testConnectionStatus(self):
# Readable, Type: str
self.api.enqueue('GET CONNSTATUS',
'CONNSTATUS CONNECTING')
t = self.obj.ConnectionStatus
self.assertInstance(t, str)
self.assertEqual(t, conConnecting)
self.failUnless(self.api.is_empty())
def testConvert(self):
# Readable, Type: Conversion
t = self.obj.Convert
self.assertInstance(t, Conversion)
def testCurrentUser(self):
# Readable, Type: User
self.api.enqueue('GET CURRENTUSERHANDLE',
'CURRENTUSERHANDLE spam')
t = self.obj.CurrentUser
self.assertInstance(t, User)
self.assertEqual(t.Handle, 'spam')
self.failUnless(self.api.is_empty())
def testCurrentUserHandle(self):
# Readable, Type: str
self.api.enqueue('GET CURRENTUSERHANDLE',
'CURRENTUSERHANDLE spam')
t = self.obj.CurrentUserHandle
self.assertInstance(t, str)
self.assertEqual(t, 'spam')
self.failUnless(self.api.is_empty())
def testCurrentUserProfile(self):
# Readable, Type: Profile
t = self.obj.CurrentUserProfile
self.assertInstance(t, Profile)
def testCurrentUserStatus(self):
# Readable, Writable, Type: str
self.api.enqueue('GET USERSTATUS',
'USERSTATUS NA')
t = self.obj.CurrentUserStatus
self.assertInstance(t, str)
self.assertEqual(t, cusNotAvailable)
self.failUnless(self.api.is_empty())
self.api.enqueue('SET USERSTATUS AWAY',
'USERSTATUS AWAY')
self.obj.CurrentUserStatus = cusAway
self.failUnless(self.api.is_empty())
def testCustomGroups(self):
# Readable, Type: GroupCollection
self.api.enqueue('SEARCH GROUPS CUSTOM',
'GROUPS 123, 456, 789')
t = self.obj.CustomGroups
self.assertInstance(t, GroupCollection)
self.assertEqual(len(t), 3)
self.failUnless(self.api.is_empty())
def testFileTransfers(self):
# Readable, Type: FileTransferCollection
self.api.enqueue('SEARCH FILETRANSFERS',
'FILETRANSFERS 123, 456')
t = self.obj.FileTransfers
self.assertInstance(t, FileTransferCollection)
self.assertEqual(len(t), 2)
self.failUnless(self.api.is_empty())
def testFocusedContacts(self):
# Readable, Type: UserCollection
self.api.enqueue('GET CONTACTS_FOCUSED',
'CONTACTS FOCUSED spam, eggs')
t = self.obj.FocusedContacts
self.assertInstance(t, UserCollection)
self.assertEqual(len(t), 2)
self.failUnless(self.api.is_empty())
def testFriendlyName(self):
# Readable, Writable, Type: unicode
self.obj.FriendlyName = 'spam'
t = self.obj.FriendlyName
self.assertInstance(t, unicode)
self.assertEqual(t, 'spam')
def testFriends(self):
# Readable, Type: UserCollection
self.api.enqueue('SEARCH FRIENDS',
'FRIENDS spam, eggs, sausage')
t = self.obj.Friends
self.assertInstance(t, UserCollection)
self.assertEqual(len(t), 3)
self.failUnless(self.api.is_empty())
def testGroups(self):
# Readable, Type: GroupCollection
self.api.enqueue('SEARCH GROUPS ALL',
'GROUPS 123, 456')
t = self.obj.Groups
self.assertInstance(t, GroupCollection)
self.assertEqual(len(t), 2)
self.failUnless(self.api.is_empty())
def testHardwiredGroups(self):
# Readable, Type: GroupCollection
self.api.enqueue('SEARCH GROUPS HARDWIRED',
'GROUPS 123, 456, 789')
t = self.obj.HardwiredGroups
self.assertInstance(t, GroupCollection)
self.assertEqual(len(t), 3)
self.failUnless(self.api.is_empty())
def testMissedCalls(self):
# Readable, Type: CallCollection
self.api.enqueue('SEARCH MISSEDCALLS',
'MISSEDCALLS 123, 456')
t = self.obj.MissedCalls
self.assertInstance(t, CallCollection)
self.assertEqual(len(t), 2)
self.failUnless(self.api.is_empty())
def testMissedChats(self):
# Readable, Type: ChatCollection
self.api.enqueue('SEARCH MISSEDCHATS',
'MISSEDCHATS spam, eggs, ham')
t = self.obj.MissedChats
self.assertInstance(t, ChatCollection)
self.assertEqual(len(t), 3)
self.failUnless(self.api.is_empty())
def testMissedMessages(self):
# Readable, Type: ChatMessageCollection
self.api.enqueue('SEARCH MISSEDCHATMESSAGES',
'MISSEDCHATMESSAGES 123, 456, 789')
t = self.obj.MissedMessages
self.assertInstance(t, ChatMessageCollection)
self.assertEqual(len(t), 3)
self.failUnless(self.api.is_empty())
def testMissedSmss(self):
# Readable, Type: SmsMessageCollection
self.api.enqueue('SEARCH MISSEDSMSS',
'MISSEDSMSS 123, 456')
t = self.obj.MissedSmss
self.assertInstance(t, SmsMessageCollection)
self.assertEqual(len(t), 2)
self.failUnless(self.api.is_empty())
def testMissedVoicemails(self):
# Readable, Type: VoicemailCollection
self.api.enqueue('SEARCH MISSEDVOICEMAILS',
'MISSEDVOICEMAILS 123, 456, 7, 8, 9')
t = self.obj.MissedVoicemails
self.assertInstance(t, VoicemailCollection)
self.assertEqual(len(t), 5)
self.failUnless(self.api.is_empty())
def testMute(self):
# Readable, Writable, Type: bool
self.api.enqueue('GET MUTE',
'MUTE ON')
t = self.obj.Mute
self.assertInstance(t, bool)
self.assertEqual(t, True)
self.failUnless(self.api.is_empty())
self.api.enqueue('SET MUTE OFF',
'MUTE OFF')
self.obj.Mute = False
self.failUnless(self.api.is_empty())
def testPredictiveDialerCountry(self):
# Readable, Type: str
self.api.enqueue('GET PREDICTIVE_DIALER_COUNTRY',
'PREDICTIVE_DIALER_COUNTRY de')
t = self.obj.PredictiveDialerCountry
self.assertInstance(t, str)
self.assertEqual(t, 'de')
self.failUnless(self.api.is_empty())
def testProtocol(self):
# Readable, Writable, Type: int
t = self.obj.Protocol
self.assertInstance(t, int)
from Skype4Py.api import DEFAULT_PROTOCOL
self.assertEqual(t, DEFAULT_PROTOCOL)
self.api.enqueue('PROTOCOL 10')
self.obj.Protocol = 10
t = self.obj.Protocol
self.assertEqual(t, 10)
self.failUnless(self.api.is_empty())
def testRecentChats(self):
# Readable, Type: ChatCollection
self.api.enqueue('SEARCH RECENTCHATS',
'RECENTCHATS spam, eggs')
t = self.obj.RecentChats
self.assertInstance(t, ChatCollection)
self.assertEqual(len(t), 2)
self.failUnless(self.api.is_empty())
def testSettings(self):
# Readable, Type: Settings
t = self.obj.Settings
self.assertInstance(t, Settings)
def testSilentMode(self):
# Readable, Writable, Type: bool
self.api.enqueue('GET SILENT_MODE',
'SILENT_MODE ON')
t = self.obj.SilentMode
self.assertInstance(t, bool)
self.assertEqual(t, True)
self.failUnless(self.api.is_empty())
self.api.enqueue('SET SILENT_MODE OFF',
'SILENT_MODE OFF')
self.obj.SilentMode = False
self.failUnless(self.api.is_empty())
def testSmss(self):
# Readable, Type: SmsMessageCollection
self.api.enqueue('SEARCH SMSS',
'SMSS 123, 456, 789')
t = self.obj.Smss
self.assertInstance(t, SmsMessageCollection)
self.assertEqual(len(t), 3)
self.failUnless(self.api.is_empty())
def testTimeout(self):
# Readable, Writable, Type: float, int or long
t = self.obj.Timeout
self.assertInstance(t, int)
from Skype4Py.api import DEFAULT_TIMEOUT
self.assertEqual(t, DEFAULT_TIMEOUT)
self.obj.Timeout = 23.4
t = self.obj.Timeout
self.assertEqual(t, 23.4)
def testUsersWaitingAuthorization(self):
# Readable, Type: UserCollection
self.api.enqueue('SEARCH USERSWAITINGMYAUTHORIZATION',
'USERSWAITINGMYAUTHORIZATION spam, eggs, ham')
t = self.obj.UsersWaitingAuthorization
self.assertInstance(t, UserCollection)
self.assertEqual(len(t), 3)
self.failUnless(self.api.is_empty())
def testVersion(self):
# Readable, Type: str
self.api.enqueue('GET SKYPEVERSION',
'SKYPEVERSION spam.eggs')
t = self.obj.Version
self.assertInstance(t, str)
self.assertEqual(t, 'spam.eggs')
self.failUnless(self.api.is_empty())
def testVoicemails(self):
# Readable, Type: VoicemailCollection
self.api.enqueue('SEARCH VOICEMAILS',
'VOICEMAILS 123, 456, 789')
t = self.obj.Voicemails
self.assertInstance(t, VoicemailCollection)
self.assertEqual(len(t), 3)
self.failUnless(self.api.is_empty())
def suite():
return unittest.TestSuite([
unittest.defaultTestLoader.loadTestsFromTestCase(SkypeTest),
])
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -4,160,989,439,882,215,400 | 35.204167 | 78 | 0.593279 | false |
jhutar/spacewalk | client/rhel/rhn-client-tools/src/up2date_client/gui.py | 4 | 13579 | #
# GUI for Update Agent
# Copyright (c) 1999--2016 Red Hat, Inc. Distributed under GPLv2.
#
# Authors:
# Preston Brown <[email protected]>
# Adrian Likins <[email protected]>
# Daniel Benamy <[email protected]>
import os
import sys
import gtk
import gtk.glade
gtk.glade.bindtextdomain("rhn-client-tools", "/usr/share/locale")
# We have to import gnome.ui before using glade for our GnomeUi widgets.
# ie the druid. Get rid of these widgets, and we won't need this import.
# see http://www.async.com.br/faq/pygtk/index.py?req=show&file=faq22.005.htp
import gnome.ui
import signal
try: # python2
import xmlrpclib
except ImportError: # python3
import xmlrpc.client as xmlrpclib
import gettext
t = gettext.translation('rhn-client-tools', fallback=True)
# Python 3 translations don't have a ugettext method
if not hasattr(t, 'ugettext'):
t.ugettext = t.gettext
_ = t.ugettext
from up2date_client import up2dateErrors
from up2date_client import config
from up2date_client import rhnreg
from up2date_client import messageWindow
from up2date_client import rhnregGui
class Gui(rhnregGui.StartPage, rhnregGui.ChooseServerPage, rhnregGui.LoginPage,
rhnregGui.ReviewSubscriptionPage, rhnregGui.CreateProfilePage,
rhnregGui.ProvideCertificatePage, rhnregGui.FinishPage,
rhnregGui.ChooseChannelPage):
def __init__(self):
self.cfg = config.initUp2dateConfig()
gladeFile = "/usr/share/rhn/up2date_client/gui.glade"
self.xml = gtk.glade.XML(gladeFile, "mainWin", domain="rhn-client-tools")
self.xml.signal_autoconnect (
{ "onDruidCancel" : self.onDruidCancel,
"onStartPagePrepare" : self.onStartPagePrepare,
"onStartPageNext" : self.onStartPageNext,
"onChooseServerPagePrepare" : self.onChooseServerPagePrepare,
"onChooseServerPageNext" : self.onChooseServerPageNext,
"onLoginPagePrepare" : self.onLoginPagePrepare,
"onLoginPageNext" : self.onLoginPageNext,
"onChooseChannelPageNext" : self.onChooseChannelPageNext,
"onChooseChannelPageBack" : self.onChooseChannelPageBack,
"onChooseChannelPagePrepare" : self.onChooseChannelPagePrepare,
"onCreateProfilePagePrepare" : self.onCreateProfilePagePrepare,
"onCreateProfilePageNext" : self.onCreateProfilePageNext,
"onCreateProfilePageBack" : self.onCreateProfilePageBack,
"onReviewSubscriptionPagePrepare" : self.onReviewSubscriptionPagePrepare,
"onReviewSubscriptionPageNext" : self.onReviewSubscriptionPageNext,
"onProvideCertificatePageBack" : self.onProvideCertificatePageBack,
"onProvideCertificatePageNext" : self.onProvideCertificatePageNext,
"onFinishPagePrepare" : self.onFinishPagePrepare,
"onFinishPageFinish" : self.onFinishPageFinish,
} )
rhnregGui.StartPage.__init__(self)
rhnregGui.ChooseServerPage.__init__(self)
rhnregGui.LoginPage.__init__(self)
rhnregGui.ChooseChannelPage.__init__(self)
rhnregGui.CreateProfilePage.__init__(self)
rhnregGui.ReviewSubscriptionPage.__init__(self)
rhnregGui.ProvideCertificatePage.__init__(self)
rhnregGui.FinishPage.__init__(self)
# Pack all the pages into the empty druid screens
contents = self.startPageVbox()
container = self.xml.get_widget("startPageVbox")
container.pack_start(contents, True)
contents = self.chooseServerPageVbox()
container = self.xml.get_widget("chooseServerPageVbox")
container.pack_start(contents, True)
contents = self.loginPageVbox()
container = self.xml.get_widget("loginPageVbox")
container.pack_start(contents, True)
contents = self.chooseChannelPageVbox()
container = self.xml.get_widget("chooseChannelPageVbox")
container.pack_start(contents, True)
contents = self.createProfilePageVbox()
container = self.xml.get_widget("createProfilePageVbox")
container.pack_start(contents, True)
contents = self.reviewSubscriptionPageVbox()
container = self.xml.get_widget("reviewSubscriptionPageVbox")
container.pack_start(contents, True)
contents = self.provideCertificatePageVbox()
container = self.xml.get_widget("provideCertificatePageVbox")
container.pack_start(contents, True)
contents = self.finishPageVbox()
container = self.xml.get_widget("finishPageVbox")
container.pack_start(contents, True)
self.initProfile = False
self.oemInfo = {}
self.productInfo = {}
self.already_registered_already_shown = False
self.rhsm_already_registered_already_shown = False
self.druid = self.xml.get_widget("druid")
self.mainWin = self.xml.get_widget("mainWin")
self.mainWin.connect("delete-event", gtk.main_quit)
self.mainWin.connect("hide", gtk.main_quit)
# It's better to get widgets in advance so bugs don't hide in get_widget
# calls that only get executed periodically.
self.startPage = self.xml.get_widget("startPage")
self.chooseServerPage = self.xml.get_widget("chooseServerPage")
self.provideCertificatePage = self.xml.get_widget("provideCertificatePage")
self.loginPage = self.xml.get_widget("loginPage")
self.chooseChannelPage = self.xml.get_widget("chooseChannelPage")
self.createProfilePage = self.xml.get_widget("createProfilePage")
self.reviewSubscriptionPage = \
self.xml.get_widget("reviewSubscriptionPage")
self.finishPage = self.xml.get_widget("finishPage")
# Set up cursor changing functions. Overriding functions that aren't in
# classes like this could be called a hack, but I think it's the best
# we can do with the current overall setup and isn't too bad.
def mySetBusyCursor():
cursor = gtk.gdk.Cursor(gtk.gdk.WATCH)
self.mainWin.window.set_cursor(cursor)
while gtk.events_pending():
gtk.main_iteration(False)
def mySetArrowCursor():
cursor = gtk.gdk.Cursor(gtk.gdk.LEFT_PTR)
self.mainWin.window.set_cursor(cursor)
while gtk.events_pending():
gtk.main_iteration(False)
rhnregGui.setBusyCursor = mySetBusyCursor
rhnregGui.setArrowCursor = mySetArrowCursor
self.mainWin.show_all()
# Druid doesn't signal prepare to the first page when starting up
self.onStartPagePrepare(None, None, manualPrepare=True)
def onDruidCancel(self, dummy):
dialog = rhnregGui.ConfirmQuitDialog()
if dialog.rc == 1:
self.druid.set_page(self.finishPage)
else:
return True
def fatalError(self, error, wrap=1):
rhnregGui.setArrowCursor()
# FIXME
if wrap:
text = messageWindow.wrap_text(error)
else:
text = error
dlg = messageWindow.ErrorDialog(text,self.mainWin)
gtk.main_quit()
sys.exit(1)
def onStartPagePrepare(self, page, dummy, manualPrepare=False):
if not manualPrepare:
self.startPage.emit_stop_by_name("prepare")
self.druid.set_buttons_sensitive(False, True, True, False)
if rhnreg.rhsm_registered() and not self.rhsm_already_registered_already_shown:
# Dialog constructor returns when dialog closes
dialog = rhnregGui.AlreadyRegisteredSubscriptionManagerDialog()
if dialog.rc == 0:
sys.exit(0)
self.rhsm_already_registered_already_shown = True
if rhnreg.registered() and not self.already_registered_already_shown:
# Dialog constructor returns when dialog closes
dialog = rhnregGui.AlreadyRegisteredDialog()
if dialog.rc == 0:
sys.exit(0)
self.already_registered_already_shown = True
def onStartPageNext(self, page, dummy):
self.druid.set_buttons_sensitive(True, True, True, False)
def onChooseServerPagePrepare(self, page, dummy):
self.chooseServerPage.emit_stop_by_name("prepare")
self.chooseServerPagePrepare()
def onChooseServerPageNext(self, page, dummy):
try:
ret = self.chooseServerPageApply()
if ret is False: # Everything is ok
self.druid.set_page(self.loginPage)
except (up2dateErrors.SSLCertificateVerifyFailedError,\
up2dateErrors.SSLCertificateFileNotFound):
self.setUrlInWidget()
self.druid.set_page(self.provideCertificatePage)
return True
def onLoginPagePrepare(self, page, dummy):
self.loginPage.emit_stop_by_name("prepare")
self.loginXml.get_widget("loginUserEntry").grab_focus()
self.loginPagePrepare()
def onLoginPageNext(self, page, dummy):
"""This must manually switch pages because another function calls it to
advance the druid. It returns True to inform the druid of this.
"""
ret = self.loginPageVerify()
if ret:
return ret
ret = self.loginPageApply()
if ret:
return ret
self.goToPageAfterLogin()
return True
def goToPageAfterLogin(self):
"""This function is used by the create new account dialog so it doesn't
need to have any knowledge of the screen mechanism or order.
"""
if rhnregGui.ChooseChannelPage.chooseChannelShouldBeShown(self):
self.druid.set_page(self.chooseChannelPage)
else:
self.druid.set_page(self.createProfilePage)
def onChooseChannelPageBack(self, page, dummy):
self.druid.set_page(self.loginPage)
return True
def onChooseChannelPageNext(self, page, dummy):
self.chooseChannelPageApply()
if self.chose_all_updates or \
self.chose_default_channel is False:
dialog = rhnregGui.ConfirmAllUpdatesDialog()
if dialog.rc == 0:
self.druid.set_page(self.chooseChannelPage)
return True
else:
self.druid.set_page(self.createProfilePage)
return True
def onChooseChannelPagePrepare(self, page, dummy):
self.chooseChannelPagePrepare()
self.chooseChannelPage.emit_stop_by_name("prepare")
def onCreateProfilePagePrepare(self, page, dummy):
self.createProfilePagePrepare()
self.createProfilePage.emit_stop_by_name("prepare")
def onCreateProfilePageNext(self, page, dummy):
ret = self.createProfilePageVerify()
if ret:
return ret
ret = self.createProfilePageApply()
if ret:
return ret
def onCreateProfilePageBack(self, page, dummy):
if rhnregGui.ChooseChannelPage.chooseChannelShouldBeShown(self):
self.druid.set_page(self.chooseChannelPage)
else:
self.druid.set_page(self.chooseServerPage)
return True
def onReviewSubscriptionPagePrepare(self, page, dummy):
self.reviewSubscriptionPagePrepare()
self.druid.set_buttons_sensitive(False, True, False, False)
self.reviewSubscriptionPage.emit_stop_by_name("prepare")
def onReviewSubscriptionPageNext(self, page, dummy):
self.druid.set_page(self.finishPage)
return True
def onProvideCertificatePageBack(self, page=None, dummy=None):
self.druid.set_page(self.chooseServerPage)
return True
def onProvideCertificatePageNext(self, page=None, dummy=None):
status = self.provideCertificatePageApply()
if status == 0:
self.druid.set_page(self.loginPage)
elif status == 1:
self.druid.set_page(self.finishPage)
elif status == 3:
self.druid.set_page(self.chooseServerPage)
else:
assert status == 2
pass
return True
def onFinishPagePrepare(self, page=None, dummy=None):
self.druid.set_buttons_sensitive(False, False, False, False)
self.druid.set_show_finish(True)
# Stopping the signal is needed to make the druid buttons change the way
# I want. I have no idea why.
self.finishPage.emit_stop_by_name("prepare")
if rhnregGui.hasBaseChannelAndUpdates():
self.druid.finish.set_label(_("_Finish"))
title = _("Updates Configured")
else:
self.druid.finish.set_label(_("_Exit"))
title = _("Software Updates Not Set Up")
self.finishPagePrepare()
self.mainWin.set_title(title)
self.finishPage.set_title(title)
def onFinishPageFinish(self, page, dummy=None):
gtk.main_quit()
def rootWarning():
dlg = messageWindow.ErrorDialog(_("You must run rhn_register as root."))
# dlg.run_and_close()
def errorWindow(msg):
dlg = messageWindow.ErrorDialog(messageWindow.wrap_text(msg))
# dlg.run_and_close()
def main():
signal.signal(signal.SIGINT, signal.SIG_DFL)
if os.geteuid() != 0:
rootWarning()
sys.exit(1)
gui = Gui()
gtk.main()
if __name__ == "__main__":
try:
main()
except xmlrpclib.ResponseError:
print(sys.exc_info()[1])
except IOError:
e = sys.exc_info()[1]
print(_("There was some sort of I/O error: %s") % e.errmsg)
| gpl-2.0 | 5,942,793,527,331,354,000 | 36.824513 | 87 | 0.655056 | false |
stevegood/filesync-server | lib/ubuntuone/storage/rzlib.py | 6 | 18704 | # Copyright 2008-2015 Canonical
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further info, check http://launchpad.net/filesync-server
"""Resumable decompression
A ctypes interface to zlib decompress/inflate functions that mimics
zlib.decompressobj interface but also supports getting and setting the
z_stream state to suspend/serialize it and then resume the decompression
at a later time.
"""
import cPickle
import ctypes
import zlib
if zlib.ZLIB_VERSION != '1.2.3.3' and zlib.ZLIB_VERSION != '1.2.3.4':
raise zlib.error("zlib version not supported: %s" % (zlib.ZLIB_VERSION))
if zlib.ZLIB_VERSION == '1.2.3.3':
# from inftrees.h
ENOUGH = 2048
elif zlib.ZLIB_VERSION == '1.2.3.4':
ENOUGH_LENS = 852
ENOUGH_DISTS = 592
ENOUGH = ENOUGH_LENS + ENOUGH_DISTS
# from inflate.h
#/*
# gzip header information passed to and from zlib routines. See RFC 1952
# for more details on the meanings of these fields.
#*/
#typedef struct gz_header_s {
# int text; /* true if compressed data believed to be text */
# uLong time; /* modification time */
# int xflags; /* extra flags (not used when writing a gzip file) */
# int os; /* operating system */
# Bytef *extra; /* pointer to extra field or Z_NULL if none */
# uInt extra_len; /* extra field length (valid if extra != Z_NULL) */
# uInt extra_max; /* space at extra (only when reading header) */
# Bytef *name; /* pointer to zero-terminated file name or Z_NULL */
# uInt name_max; /* space at name (only when reading header) */
# Bytef *comment; /* pointer to zero-terminated comment or Z_NULL */
# uInt comm_max; /* space at comment (only when reading header) */
# int hcrc; /* true if there was or will be a header crc */
# int done; /* true when done reading gzip header (not used
# when writing a gzip file) */
#} gz_header;
Bytefp = ctypes.POINTER(ctypes.c_ubyte)
class GZHeader(ctypes.Structure):
"""gz_header_s structure."""
_fields_ = [
('text', ctypes.c_int),
('time', ctypes.c_ulong),
('xflags', ctypes.c_int),
('os', ctypes.c_int),
('extra', Bytefp),
('extra_len', ctypes.c_uint),
('extra_max', ctypes.c_uint),
('name', Bytefp),
('name_max', ctypes.c_uint),
('comment', Bytefp),
('comm_max', ctypes.c_uint),
('hcrc', ctypes.c_int),
('done', ctypes.c_int),
]
#/* Structure for decoding tables. Each entry provides either the
# information needed to do the operation requested by the code that
# indexed that table entry, or it provides a pointer to another
# table that indexes more bits of the code. op indicates whether
# the entry is a pointer to another table, a literal, a length or
# distance, an end-of-block, or an invalid code. For a table
# pointer, the low four bits of op is the number of index bits of
# that table. For a length or distance, the low four bits of op
# is the number of extra bits to get after the code. bits is
# the number of bits in this code or part of the code to drop off
# of the bit buffer. val is the actual byte to output in the case
# of a literal, the base length or distance, or the offset from
# the current table to the next table. Each entry is four bytes. */
#typedef struct {
# unsigned char op; /* operation, extra bits, table bits */
# unsigned char bits; /* bits in this part of the code */
# unsigned short val; /* offset in table or code value */
#} code;
class Code(ctypes.Structure):
"""code structure."""
_fields_ = [
('op', ctypes.c_ubyte),
('bits', ctypes.c_ubyte),
('val', ctypes.c_ushort),
]
#/* state maintained between inflate() calls. Approximately 7K bytes. */
#struct inflate_state {
# inflate_mode mode; /* current inflate mode */
# int last; /* true if processing last block */
# int wrap; /* bit 0 true for zlib, bit 1 true for gzip */
# int havedict; /* true if dictionary provided */
# int flags; /* gzip header method and flags (0 if zlib) */
# unsigned dmax; /* zlib header max distance (INFLATE_STRICT)*/
# unsigned long check; /* protected copy of check value */
# unsigned long total; /* protected copy of output count */
# gz_headerp head; /* where to save gzip header information */
# /* sliding window */
# unsigned wbits; /* log base 2 of requested window size */
# unsigned wsize; /* window size or zero if not using window */
# unsigned whave; /* valid bytes in the window */
# unsigned write; /* window write index */
# unsigned char FAR *window; /* allocated sliding window, if needed */
# /* bit accumulator */
# unsigned long hold; /* input bit accumulator */
# unsigned bits; /* number of bits in "in" */
# /* for string and stored block copying */
# unsigned length; /* literal or length of data to copy */
# unsigned offset; /* distance back to copy string from */
# /* for table and code decoding */
# unsigned extra; /* extra bits needed */
# /* fixed and dynamic code tables */
# code const FAR *lencode; /* starting table for length/literal codes */
# code const FAR *distcode; /* starting table for distance codes */
# unsigned lenbits; /* index bits for lencode */
# unsigned distbits; /* index bits for distcode */
# /* dynamic table building */
# unsigned ncode; /* number of code length code lengths */
# unsigned nlen; /* number of length code lengths */
# unsigned ndist; /* number of distance code lengths */
# unsigned have; /* number of code lengths in lens[] */
# code FAR *next; /* next available space in codes[] */
# unsigned short lens[320]; /* temporary storage for code lengths */
# unsigned short work[288]; /* work area for code table building */
# code codes[ENOUGH]; /* space for code tables */
#};
if zlib.ZLIB_VERSION == '1.2.3.4':
extra_fields = [
('sane', ctypes.c_int),
('back', ctypes.c_int),
('was', ctypes.c_uint),
]
extra_attr = tuple([i[0] for i in extra_fields])
else:
extra_fields = []
extra_attr = ()
class InflateState(ctypes.Structure):
"""inflate_state structure."""
_fields_ = [
('mode', ctypes.c_int),
('last', ctypes.c_int),
('wrap', ctypes.c_int),
('havedict', ctypes.c_int),
('flags', ctypes.c_int),
('dmax', ctypes.c_uint),
('check', ctypes.c_ulong),
('total', ctypes.c_ulong),
('head', ctypes.POINTER(GZHeader)),
('wbits', ctypes.c_uint),
('wsize', ctypes.c_uint),
('whave', ctypes.c_uint),
('write', ctypes.c_uint),
('window', ctypes.POINTER(ctypes.c_ubyte)),
('hold', ctypes.c_ulong),
('bits', ctypes.c_uint),
('length', ctypes.c_uint),
('offset', ctypes.c_uint),
('extra', ctypes.c_uint),
('lencode', ctypes.POINTER(Code)),
('distcode', ctypes.POINTER(Code)),
('lenbits', ctypes.c_uint),
('distbits', ctypes.c_uint),
('ncode', ctypes.c_uint),
('nlen', ctypes.c_uint),
('ndist', ctypes.c_uint),
('have', ctypes.c_uint),
('next', ctypes.POINTER(Code)),
('lens', ctypes.c_ushort * 320),
('work', ctypes.c_ushort * 288),
('codes', Code * ENOUGH)
] + extra_fields
simple_attr = ('last', 'wrap', 'havedict', 'flags', 'dmax',
'check', 'total', 'wbits', 'wsize', 'whave', 'write',
'hold', 'bits', 'length', 'offset', 'offset',
'extra', 'lenbits', 'distbits', 'ncode', 'nlen',
'ndist', 'have', 'mode') + extra_attr
def get_state(self):
"""Get the state of inflate_state struct."""
# head will be always a NULL pointer, as we use raw in/delfate
state = {}
# first get the pointers offsets
#lencode = ctypes.string_at(self.lencode, ctypes.sizeof(Code))
lencode_addr = ctypes.addressof(self.lencode.contents)
codes_start = ctypes.addressof(self.codes)
lencode = lencode_addr - codes_start
#distcode = ctypes.string_at(self.distcode, ctypes.sizeof(Code))
distcode = ctypes.addressof(self.distcode.contents) - codes_start
#next = ctypes.string_at(self.next, ctypes.sizeof(Code))
next = ctypes.addressof(self.next.contents) - codes_start
# now get the raw memory data
codes = ctypes.string_at(ctypes.pointer(self.codes),
ctypes.sizeof(self.codes))
lens = ctypes.string_at(ctypes.pointer(self.lens),
ctypes.sizeof(self.lens))
work = ctypes.string_at(ctypes.pointer(self.work),
ctypes.sizeof(self.work))
if self.window:
window = ctypes.string_at(self.window, self.wsize)
else:
window = None
if self.head:
raise ValueError("gzip resume isn't supported.")
state = {'lencode': lencode, 'distcode': distcode, 'codes': codes,
'window': window, 'lens': lens, 'work': work, 'next': next,
'head': None}
# now add the basic type attributes to the state dict
for attr_name in self.simple_attr:
state[attr_name] = getattr(self, attr_name)
return state
def set_state(self, old_state, zalloc):
"""Set the state of this inflate state.
@param old_state: the old state dict.
@param zalloc: the zalloc function (in case we need to allocate space
for the window).
"""
if old_state['head']:
raise ValueError("gzip resume isn't supported.")
# set the basic type attributes from the old state dict
for attr_name in self.simple_attr:
setattr(self, attr_name, old_state[attr_name])
# set the data from the array attributes.
ctypes.memmove(ctypes.pointer(self.codes),
ctypes.c_char_p(old_state['codes']),
ctypes.sizeof(self.codes))
ctypes.memmove(ctypes.pointer(self.lens),
ctypes.c_char_p(old_state['lens']),
ctypes.sizeof(self.lens))
ctypes.memmove(ctypes.pointer(self.work),
ctypes.c_char_p(old_state['work']),
ctypes.sizeof(self.work))
# fix the Code pointers
codes_start = ctypes.addressof(self.codes)
self.lencode = ctypes.pointer(
Code.from_address(codes_start + old_state['lencode']))
self.distcode = ctypes.pointer(
Code.from_address(codes_start + old_state['distcode']))
self.next = ctypes.pointer(
Code.from_address(codes_start + old_state['next']))
# set the window
if old_state['window']:
if not self.window:
# we don't have the window mem allocated
addr = zalloc(ctypes.c_uint(1 << self.wbits),
ctypes.sizeof(ctypes.c_ubyte))
self.window = ctypes.cast(addr, ctypes.POINTER(ctypes.c_ubyte))
# set the contents of the window, we don't care about the size as
# in our use case it's always 1<<zlib.MAX_WBITS.
ctypes.memmove(self.window, ctypes.c_char_p(old_state['window']),
1 << self.wbits)
# this structure is based on this lines from /usr/include/zlib.h
#
# typedef struct z_stream_s {
# Bytef *next_in; /* next input byte */
# uInt avail_in; /* number of bytes available at next_in */
# uLong total_in; /* total nb of input bytes read so far */
#
# Bytef *next_out; /* next output byte should be put there */
# uInt avail_out; /* remaining free space at next_out */
# uLong total_out; /* total nb of bytes output so far */
#
# char *msg; /* last error message, NULL if no error */
# struct internal_state FAR *state; /* not visible by applications */
#
# alloc_func zalloc; /* used to allocate the internal state */
# free_func zfree; /* used to free the internal state */
# voidpf opaque; /* private data object passed to zalloc and zfree */
#
# int data_type; /* best guess about the data type: binary or text */
# uLong adler; /* adler32 value of the uncompressed data */
# uLong reserved; /* reserved for future use */
# } z_stream;
class ResumableZStream(ctypes.Structure):
"""z_stream structure."""
_fields_ = [
("next_in", ctypes.POINTER(ctypes.c_ubyte)),
("avail_in", ctypes.c_uint),
("total_in", ctypes.c_ulong),
("next_out", ctypes.POINTER(ctypes.c_ubyte)),
("avail_out", ctypes.c_uint),
("total_out", ctypes.c_ulong),
("msg", ctypes.c_char_p),
("state", ctypes.POINTER(InflateState)),
("zalloc", ctypes.c_void_p),
("zfree", ctypes.c_void_p),
("opaque", ctypes.c_void_p),
("data_type", ctypes.c_int),
("adler", ctypes.c_ulong),
("reserved", ctypes.c_ulong),
]
def get_state(self):
"""Returns the context as a string."""
# sanity checks
if self.next_in and self.avail_in > 0:
raise ValueError("There are pending bytes to process in next_in")
if self.msg:
raise ValueError("Can't serialize a stream in a error state.")
if self.state:
inflate_state = self.state.contents.get_state()
else:
inflate_state = {}
state = {'total_in': self.total_in,
'total_out': self.total_out,
'avail_in': self.avail_in,
'avail_out': self.avail_out,
'data_type': self.data_type,
'adler': self.adler,
'reserved': self.reserved,
'msg': self.msg,
'state': inflate_state,
'zlib_version': zlib.ZLIB_VERSION}
return cPickle.dumps(state)
def set_state(self, old_state):
"""Set the context with a string of data."""
old_state = cPickle.loads(old_state)
# first check the version
if old_state['zlib_version'] != zlib.ZLIB_VERSION:
raise VersionError("zlib_version: %s, not supported (%s)" %
(old_state['zlib_version'], zlib.ZLIB_VERSION))
# set the data
self.total_in = old_state['total_in']
self.total_out = old_state['total_out']
self.avail_in = old_state['avail_in']
self.avail_out = old_state['avail_out']
self.data_type = old_state['data_type']
self.adler = old_state['adler']
self.reserved = old_state['reserved']
inflate_state = old_state['state']
# build the zalloc function, see zutil.c
zcalloc = ctypes.CFUNCTYPE(ctypes.c_void_p)(self.zalloc)
zalloc = lambda items, size: zcalloc(self.opaque, items, size)
if self.state and inflate_state:
# set the inflate_state state
self.state.contents.set_state(inflate_state, zalloc)
class PyTypeObject(ctypes.Structure):
"""PyTypeObject structure."""
_fields_ = [
("ob_refcnt", ctypes.c_size_t),
("ob_type", ctypes.c_void_p),
("ob_size", ctypes.c_size_t),
("tp_name", ctypes.c_char_p)
]
class PyObject(ctypes.Structure):
"""PyObject structure."""
_fields_ = [
("ob_refcnt", ctypes.c_size_t),
("ob_type", ctypes.POINTER(PyTypeObject))
]
# PyObject *
PyObjectPtr = ctypes.POINTER(PyObject)
class CompObject(PyObject):
"""zlibmodule.c CompObject structure."""
_fields_ = [
('zst', ResumableZStream),
('unused_data', PyObjectPtr),
('unconsumed_tail', PyObjectPtr),
('is_initialised', ctypes.c_int)
]
class Decompress(object):
"""A zlib.Decompress wrapper that supports get/setting the state."""
def __init__(self, decompress_obj=None):
if decompress_obj is None:
decompress_obj = zlib.decompressobj()
self._do = decompress_obj
# get the C Decompress object
self._c_do = ctypes.cast(ctypes.c_void_p(id(self._do)),
ctypes.POINTER(CompObject)).contents
@property
def unconsumed_tail(self):
"""The uncosumed tail."""
return self._do.unconsumed_tail
@property
def unused_data(self):
"""The unused_data."""
return self._do.unused_data
def decompress(self, *args, **kwargs):
"""See zlib.decompressobj().decompress method."""
return self._do.decompress(*args, **kwargs)
def flush(self, *args, **kwargs):
"""See zlib.decompressobj().flush method."""
return self._do.flush(*args, **kwargs)
def copy(self):
"""See zlib.decompressobj().copy method."""
return Decompress(self._do.copy())
def set_state(self, z_stream_state):
"""Set the specified z_stream state."""
self._c_do.zst.set_state(z_stream_state)
def get_state(self):
"""Get the current z_stream state."""
return self._c_do.zst.get_state()
def decompressobj(z_stream_state=None, wbits=zlib.MAX_WBITS):
"""Returns a custom Decompress object instance."""
do = Decompress(decompress_obj=zlib.decompressobj(wbits))
if z_stream_state is not None:
do.set_state(z_stream_state)
return do
class VersionError(Exception):
"""Exception used for version mismatch in z_stream.set_state."""
| agpl-3.0 | -1,096,422,801,738,369,800 | 38.965812 | 79 | 0.582228 | false |
robovm/robovm-studio | python/lib/Lib/site-packages/django/contrib/gis/gdal/tests/test_envelope.py | 332 | 3742 | from django.contrib.gis.gdal import Envelope, OGRException
from django.utils import unittest
class TestPoint(object):
def __init__(self, x, y):
self.x = x
self.y = y
class EnvelopeTest(unittest.TestCase):
def setUp(self):
self.e = Envelope(0, 0, 5, 5)
def test01_init(self):
"Testing Envelope initilization."
e1 = Envelope((0, 0, 5, 5))
e2 = Envelope(0, 0, 5, 5)
e3 = Envelope(0, '0', '5', 5) # Thanks to ww for this
e4 = Envelope(e1._envelope)
self.assertRaises(OGRException, Envelope, (5, 5, 0, 0))
self.assertRaises(OGRException, Envelope, 5, 5, 0, 0)
self.assertRaises(OGRException, Envelope, (0, 0, 5, 5, 3))
self.assertRaises(OGRException, Envelope, ())
self.assertRaises(ValueError, Envelope, 0, 'a', 5, 5)
self.assertRaises(TypeError, Envelope, u'foo')
self.assertRaises(OGRException, Envelope, (1, 1, 0, 0))
try:
Envelope(0, 0, 0, 0)
except OGRException:
self.fail("shouldn't raise an exception for min_x == max_x or min_y == max_y")
def test02_properties(self):
"Testing Envelope properties."
e = Envelope(0, 0, 2, 3)
self.assertEqual(0, e.min_x)
self.assertEqual(0, e.min_y)
self.assertEqual(2, e.max_x)
self.assertEqual(3, e.max_y)
self.assertEqual((0, 0), e.ll)
self.assertEqual((2, 3), e.ur)
self.assertEqual((0, 0, 2, 3), e.tuple)
self.assertEqual('POLYGON((0.0 0.0,0.0 3.0,2.0 3.0,2.0 0.0,0.0 0.0))', e.wkt)
self.assertEqual('(0.0, 0.0, 2.0, 3.0)', str(e))
def test03_equivalence(self):
"Testing Envelope equivalence."
e1 = Envelope(0.523, 0.217, 253.23, 523.69)
e2 = Envelope((0.523, 0.217, 253.23, 523.69))
self.assertEqual(e1, e2)
self.assertEqual((0.523, 0.217, 253.23, 523.69), e1)
def test04_expand_to_include_pt_2_params(self):
"Testing Envelope expand_to_include -- point as two parameters."
self.e.expand_to_include(2, 6)
self.assertEqual((0, 0, 5, 6), self.e)
self.e.expand_to_include(-1, -1)
self.assertEqual((-1, -1, 5, 6), self.e)
def test05_expand_to_include_pt_2_tuple(self):
"Testing Envelope expand_to_include -- point as a single 2-tuple parameter."
self.e.expand_to_include((10, 10))
self.assertEqual((0, 0, 10, 10), self.e)
self.e.expand_to_include((-10, -10))
self.assertEqual((-10, -10, 10, 10), self.e)
def test06_expand_to_include_extent_4_params(self):
"Testing Envelope expand_to_include -- extent as 4 parameters."
self.e.expand_to_include(-1, 1, 3, 7)
self.assertEqual((-1, 0, 5, 7), self.e)
def test06_expand_to_include_extent_4_tuple(self):
"Testing Envelope expand_to_include -- extent as a single 4-tuple parameter."
self.e.expand_to_include((-1, 1, 3, 7))
self.assertEqual((-1, 0, 5, 7), self.e)
def test07_expand_to_include_envelope(self):
"Testing Envelope expand_to_include with Envelope as parameter."
self.e.expand_to_include(Envelope(-1, 1, 3, 7))
self.assertEqual((-1, 0, 5, 7), self.e)
def test08_expand_to_include_point(self):
"Testing Envelope expand_to_include with Point as parameter."
self.e.expand_to_include(TestPoint(-1, 1))
self.assertEqual((-1, 0, 5, 5), self.e)
self.e.expand_to_include(TestPoint(10, 10))
self.assertEqual((-1, 0, 10, 10), self.e)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(EnvelopeTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| apache-2.0 | -3,063,919,043,058,236,000 | 38.389474 | 90 | 0.601015 | false |
jkarnows/scikit-learn | benchmarks/bench_glm.py | 297 | 1493 | """
A comparison of different methods in GLM
Data comes from a random square matrix.
"""
from datetime import datetime
import numpy as np
from sklearn import linear_model
from sklearn.utils.bench import total_seconds
if __name__ == '__main__':
import pylab as pl
n_iter = 40
time_ridge = np.empty(n_iter)
time_ols = np.empty(n_iter)
time_lasso = np.empty(n_iter)
dimensions = 500 * np.arange(1, n_iter + 1)
for i in range(n_iter):
print('Iteration %s of %s' % (i, n_iter))
n_samples, n_features = 10 * i + 3, 10 * i + 3
X = np.random.randn(n_samples, n_features)
Y = np.random.randn(n_samples)
start = datetime.now()
ridge = linear_model.Ridge(alpha=1.)
ridge.fit(X, Y)
time_ridge[i] = total_seconds(datetime.now() - start)
start = datetime.now()
ols = linear_model.LinearRegression()
ols.fit(X, Y)
time_ols[i] = total_seconds(datetime.now() - start)
start = datetime.now()
lasso = linear_model.LassoLars()
lasso.fit(X, Y)
time_lasso[i] = total_seconds(datetime.now() - start)
pl.figure('scikit-learn GLM benchmark results')
pl.xlabel('Dimensions')
pl.ylabel('Time (s)')
pl.plot(dimensions, time_ridge, color='r')
pl.plot(dimensions, time_ols, color='g')
pl.plot(dimensions, time_lasso, color='b')
pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left')
pl.axis('tight')
pl.show()
| bsd-3-clause | 2,730,777,487,060,854,000 | 24.741379 | 62 | 0.600804 | false |
D3f0/AutobahnPython | examples/wamp/rpc/profile/client.py | 26 | 2975 | ###############################################################################
##
## Copyright 2011 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys
from twisted.python import log
from twisted.internet import reactor
from twisted.internet.defer import Deferred, \
DeferredList, \
gatherResults, \
returnValue, \
inlineCallbacks
from autobahn.websocket import connectWS
from autobahn.wamp import WampClientFactory, WampClientProtocol
class MyClientProtocol(WampClientProtocol):
def println(self, msg):
print msg
@inlineCallbacks
def onSessionOpen(self):
self.prefix("calc", "http://example.com/simple/calc#")
yield self.test1()
yield self.test2(1.123)
yield self.test3(1.123)
yield self.test2(0)
yield self.test3(0)
yield self.test4(0)
self.sendClose()
reactor.stop()
@inlineCallbacks
def test1(self):
r = yield self.call("calc:println", "\nStarting test 1 ..\n")
s = 0
for i in xrange(10):
s += yield self.call("calc:sum", range(10))
print s
@inlineCallbacks
def test2(self, delay):
r = yield self.call("calc:println", "\nStarting test 2 with delay = %s..\n" % delay)
s = 0
for i in xrange(10):
s += yield self.call("calc:asum", range(10), delay)
print s
@inlineCallbacks
def test3(self, delay, n = 10):
r = yield self.call("calc:println", "\nStarting test 3 with delay = %s ..\n" % delay)
d = []
for i in xrange(n):
d.append(self.call("calc:wsum", range(10), delay))
r = yield gatherResults(d).addCallback(lambda l: self.println(sum(l)))
@inlineCallbacks
def test4(self, delay, n = 10):
r = yield self.call("calc:println", "\nStarting test 4 with delay = %s ..\n" % delay)
d = []
for i in xrange(n):
d.append(self.call("calc:sum", range(10)))
d.append(self.call("calc:wsum", range(10), delay))
r = yield gatherResults(d).addCallback(lambda l: self.println(sum(l)))
if __name__ == '__main__':
log.startLogging(sys.stdout)
factory = WampClientFactory("ws://localhost:9000")
factory.protocol = MyClientProtocol
connectWS(factory)
reactor.run()
| apache-2.0 | -2,050,565,982,252,551,000 | 30.989247 | 91 | 0.587563 | false |
fkie/rosrepo | src/rosrepo/cmd_bash.py | 1 | 2161 | # coding=utf-8
#
# ROSREPO
# Manage ROS workspaces with multiple Gitlab repositories
#
# Author: Timo Röhling
#
# Copyright 2016 Fraunhofer FKIE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import os
import sys
from .workspace import get_workspace_location
from .util import path_has_prefix
def print_var(key, value, terse, export):
sys.stdout.write("%s\n" % value if terse else "%s%s=%s\n" % ("export " if export else "", key, value))
def run(args):
wsdir = get_workspace_location(args.workspace)
if not args.var:
args.var = ["ROS_WORKSPACE", "ROS_PACKAGE_PATH"]
for key in args.var:
if key == "ROS_WORKSPACE":
print_var(key, wsdir, args.terse, args.export)
elif key == "ROS_PACKAGE_PATH":
has_srcdir = False
srcdir = os.path.join(wsdir, "src")
path = os.environ["ROS_PACKAGE_PATH"] if "ROS_PACKAGE_PATH" in os.environ else ""
new_path = []
for path in path.split(os.pathsep):
if path_has_prefix(path, srcdir):
if not has_srcdir:
has_srcdir = True
new_path.append(srcdir)
elif path:
new_path.append(path)
if not has_srcdir:
new_path.insert(0, srcdir)
print_var(key, os.pathsep.join(new_path), args.terse, args.export)
else:
if key in os.environ:
print_var(key, os.environ[key], args.terse, args.export)
else:
if not args.terse:
sys.stdout.write("# variable %s is not set\n" % key)
return 0
| apache-2.0 | 7,318,476,151,282,463,000 | 34.409836 | 106 | 0.608333 | false |
hsheep/limon_vbox | statan.py | 2 | 4061 | # Limon
# Copyright (C) 2015 Monnappa
#
# This file is part of Limon.
#
# Limon is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Limon is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Limon. If not, see <http://www.gnu.org/licenses/>.
"""
@author: Monnappa K A
@license: GNU General Public License 3.0
@contact: [email protected]
@Description: Static Analysis Module
"""
import magic
import hashlib
import json
import urllib2
import urllib
import sys
import os
import yara
import subprocess
class Static:
def __init__(self, mal_file):
self.file = mal_file
self.md5 = ""
def filetype(self):
if os.path.exists(self.file):
try:
m = magic.open(magic.MAGIC_NONE)
m.load()
ftype = m.file(self.file)
return ftype
except AttributeError:
ftype = magic.from_file(self.file)
return ftype
else:
print "No such file or directory:", self.file
sys.exit()
def get_file_size(self):
fr = open(self.file, 'rb')
size = len(fr.read())
fr.close()
return size
def md5sum(self):
if os.path.exists(self.file):
f = open(self.file, 'rb')
m = hashlib.md5(f.read())
self.md5 = m.hexdigest()
return self.md5
else:
print "No such file or directory:", self.file
sys.exit()
def yararules(self, rulesfile):
rules = yara.compile(rulesfile)
matches = rules.match(self.file)
return matches
def virustotal(self, key):
url = "https://www.virustotal.com/api/get_file_report.json"
md5 = self.md5
parameters = {'resource' : md5, "key" : key}
encoded_parameters = urllib.urlencode(parameters)
try:
request = urllib2.Request(url, encoded_parameters)
response = urllib2.urlopen(request)
json_obj = response.read()
json_obj_dict = json.loads(json_obj)
if json_obj_dict['result'] ==0:
print "\t " + "No match found for " + self.md5
else:
avresults = json_obj_dict['report'][1]
return avresults
except urllib2.URLError as error:
print "Cannot get results from Virustotal: " + str(error)
def ssdeep(self):
fhash = subprocess.check_output(["ssdeep", self.file])
splitted = fhash.split("\n")
return splitted[1]
def ssdeep_compare(self, master_ssdeep_file):
output = subprocess.check_output(["ssdeep", "-m", master_ssdeep_file, self.file])
return output
def ascii_strings(self):
output = subprocess.check_output(["strings", "-a", self.file])
return output
def unicode_strings(self):
output = subprocess.check_output(["strings", "-a", "-el", self.file])
return output
def dependencies(self):
try:
output = subprocess.check_output(["ldd", self.file])
return output
except:
pass
def elf_header(self):
output = subprocess.check_output(["readelf","-h",self.file])
return output
def program_header(self):
output = subprocess.check_output(["readelf","-l",self.file])
return output
def section_header(self):
output = subprocess.check_output(["readelf","-S",self.file])
return output
def symbols(self):
output = subprocess.check_output(["readelf","-s",self.file])
return output
| gpl-3.0 | -8,080,967,079,609,413,000 | 28.642336 | 89 | 0.59345 | false |
bjohare/cloughjordan.ie | wp-content/themes/executive-pro/api/OpenLayers-2.13.1/tools/minimize.py | 297 | 2088 | # Minimal Python Minimizer
# Copyright 2008, Christopher Schmidt
# Released under the MIT License
#
# Taken from: http://svn.crschmidt.net/personal/python/minimize.py
# $Id: minimize.py 6 2008-01-03 06:33:35Z crschmidt $
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import re
def strip_comments_helper(data):
"""remove all /* */ format comments and surrounding whitespace."""
p = re.compile(r'[\s]*/\*.*?\*/[\s]*', re.DOTALL)
return p.sub('',data)
def minimize(data, exclude=None):
"""Central function call. This will call all other compression
functions. To add further compression algorithms, simply add
functions whose names end in _helper which take a string as input
and return a more compressed string as output."""
for key, item in globals().iteritems():
if key.endswith("_helper"):
func_key = key[:-7]
if not exclude or not func_key in exclude:
data = item(data)
return data
if __name__ == "__main__":
import sys
print minimize(open(sys.argv[1]).read())
| cc0-1.0 | 7,518,979,838,242,824,000 | 43.425532 | 79 | 0.713123 | false |
confluentinc/examples | clients/cloud/python/producer_ccsr.py | 1 | 3189 | #!/usr/bin/env python
#
# Copyright 2020 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =============================================================================
#
# Produce messages to Confluent Cloud
# Using Confluent Python Client for Apache Kafka
# Writes Avro data, integration with Confluent Cloud Schema Registry
#
# =============================================================================
from confluent_kafka import Producer, KafkaError
from confluent_kafka.avro import AvroProducer
import json
import ccloud_lib
if __name__ == '__main__':
# Initialization
args = ccloud_lib.parse_args()
config_file = args.config_file
topic = args.topic
conf = ccloud_lib.read_ccloud_config(config_file)
# Create AvroProducer instance
p = AvroProducer({
'bootstrap.servers': conf['bootstrap.servers'],
'sasl.mechanisms': conf['sasl.mechanisms'],
'security.protocol': conf['security.protocol'],
'sasl.username': conf['sasl.username'],
'sasl.password': conf['sasl.password'],
'schema.registry.url': conf['schema.registry.url'],
'schema.registry.basic.auth.credentials.source': conf['basic.auth.credentials.source'],
'schema.registry.basic.auth.user.info': conf['schema.registry.basic.auth.user.info']
}, default_key_schema=ccloud_lib.schema_key, default_value_schema=ccloud_lib.schema_value)
# Create topic if needed
ccloud_lib.create_topic(conf, topic)
# Optional per-message on_delivery handler (triggered by poll() or flush())
# when a message has been successfully delivered or
# permanently failed delivery (after retries).
def acked(err, msg):
"""Delivery report handler called on
successful or failed delivery of message
"""
if err is not None:
print("Failed to deliver message: {}".format(err))
else:
print("Produced record to topic {} partition [{}] @ offset {}"
.format(msg.topic(), msg.partition(), msg.offset()))
for n in range(10):
name_object = ccloud_lib.Name()
name_object.name = "alice"
record_key = name_object.to_dict()
count_object = ccloud_lib.Count()
count_object.count = n
record_value = count_object.to_dict()
print("Producing Avro record: {}\t{}".format(name_object.name, count_object.count))
p.produce(topic=topic, key=record_key, value=record_value, on_delivery=acked)
# p.poll() serves delivery reports (on_delivery)
# from previous produce() calls.
p.poll(0)
p.flush(10)
print("10 messages were produced to topic {}!".format(topic))
| apache-2.0 | -5,537,956,176,325,862,000 | 37.890244 | 95 | 0.640326 | false |
PaulBrownMagic/LED_Arcade | constants.py | 1 | 1687 | import numpy as np
from games.constants import *
k = BLACK
g = GREEN
w = WHITE
r = RED
b = BLUE
m = DARK_RED
n = DARK_BLUE
p = PURPLE
o = ORANGE
y = YELLOW
NAV_SCREENS = {
"Snake":
np.array(
[
[m, w, m, m, m, m, w, m],
[w, m, m, m, m, m, m, w],
[m, w, m, m, m, m, w, m],
[m, m, g, g, g, g, m, m],
[m, m, g, m, m, m, b, m],
[m, m, g, g, g, g, m, m],
[m, m, m, m, m, g, m, m],
[m, m, g, g, g, g, m, m],
],
dtype=int),
"Purple Rain":
np.array(
[
[n, w, n, n, n, n, w, n],
[w, n, n, n, n, n, n, w],
[n, w, n, n, n, n, w, n],
[n, p, n, n, n, n, n, n],
[n, n, p, n, p, p, n, n],
[p, n, n, n, n, p, n, n],
[n, n, n, n, n, n, n, p],
[p, n, n, n, g, n, n, n],
],
dtype=int),
"Pacman":
np.array(
[
[k, w, k, k, k, k, w, k],
[w, k, k, k, k, k, k, w],
[k, w, k, k, k, k, w, k],
[o, b, o, b, b, o, b, o],
[o, o, o, k, k, k, k, k],
[o, b, o, b, b, o, b, r],
[o, b, o, o, o, y, b, k],
[o, o, o, b, b, k, k, k],
],
dtype=int)
}
EXIT_SCREEN = {
"Exit":
np.array(
[
[k, w, k, k, k, k, w, k],
[w, k, k, r, r, k, k, w],
[k, w, r, k, k, r, w, k],
[k, r, k, r, k, k, r, k],
[k, r, k, k, r, k, r, k],
[k, k, r, k, k, r, k, k],
[k, k, k, r, r, k, r, k],
[k, k, k, k, k, k, k, r],
],
dtype=int)
}
| gpl-3.0 | 5,019,395,558,668,937,000 | 22.760563 | 37 | 0.275637 | false |
russel1237/scikit-learn | examples/svm/plot_weighted_samples.py | 188 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause | 3,849,041,128,792,661,000 | 29.84127 | 78 | 0.669583 | false |
thwindbell/pox | pox/proto/dns_spy.py | 45 | 4072 | # Copyright 2011-2012 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This component spies on DNS replies, stores the results, and raises events
when things are looked up or when its stored mappings are updated.
Similar to NOX's DNSSpy component, but with more features.
"""
from pox.core import core
import pox.openflow.libopenflow_01 as of
import pox.lib.packet as pkt
import pox.lib.packet.dns as pkt_dns
from pox.lib.addresses import IPAddr
from pox.lib.revent import *
log = core.getLogger()
class DNSUpdate (Event):
def __init__ (self, item):
Event.__init__()
self.item = item
class DNSLookup (Event):
def __init__ (self, rr):
Event.__init__()
self.name = rr.name
self.qtype = rr.qtype
self.rr = rr
for t in pkt_dns.rrtype_to_str.values():
setattr(self, t, False)
t = pkt_dns.rrtype_to_str.get(rr.qtype)
if t is not None:
setattr(self, t, True)
setattr(self, "OTHER", False)
else:
setattr(self, "OTHER", True)
class DNSSpy (EventMixin):
_eventMixin_events = set([ DNSUpdate, DNSLookup ])
def __init__ (self, install_flow = True):
self._install_flow = install_flow
self.ip_to_name = {}
self.name_to_ip = {}
self.cname = {}
core.openflow.addListeners(self)
# Add handy function to console
core.Interactive.variables['lookup'] = self.lookup
def _handle_ConnectionUp (self, event):
if self._install_flow:
msg = of.ofp_flow_mod()
msg.match = of.ofp_match()
msg.match.dl_type = pkt.ethernet.IP_TYPE
msg.match.nw_proto = pkt.ipv4.UDP_PROTOCOL
msg.match.tp_src = 53
msg.actions.append(of.ofp_action_output(port = of.OFPP_CONTROLLER))
event.connection.send(msg)
def lookup (self, something):
if something in self.name_to_ip:
return self.name_to_ip[something]
if something in self.cname:
return self.lookup(self.cname[something])
try:
return self.ip_to_name.get(IPAddr(something))
except:
return None
def _record (self, ip, name):
# Handle reverse lookups correctly?
modified = False
val = self.ip_to_name.setdefault(ip, [])
if name not in val:
val.insert(0, name)
modified = True
val = self.name_to_ip.setdefault(name, [])
if ip not in val:
val.insert(0, ip)
modified = True
return modified
def _record_cname (self, name, cname):
modified = False
val = self.cname.setdefault(name, [])
if name not in val:
val.insert(0, cname)
modified = True
return modified
def _handle_PacketIn (self, event):
p = event.parsed.find('dns')
if p is not None and p.parsed:
log.debug(p)
for q in p.questions:
if q.qclass != 1: continue # Internet only
self.raiseEvent(DNSLookup, q)
def process_q (entry):
if entry.qclass != 1:
# Not internet
return
if entry.qtype == pkt.dns.rr.CNAME_TYPE:
if self._record_cname(entry.name, entry.rddata):
self.raiseEvent(DNSUpdate, entry.name)
log.info("add cname entry: %s %s" % (entry.rddata, entry.name))
elif entry.qtype == pkt.dns.rr.A_TYPE:
if self._record(entry.rddata, entry.name):
self.raiseEvent(DNSUpdate, entry.name)
log.info("add dns entry: %s %s" % (entry.rddata, entry.name))
for answer in p.answers:
process_q(answer)
for addition in p.additional:
process_q(addition)
def launch (no_flow = False):
core.registerNew(DNSSpy, not no_flow)
| apache-2.0 | 2,496,770,750,653,394,400 | 26.890411 | 75 | 0.647102 | false |
michaelkirk/QGIS | python/plugins/processing/algs/lidar/fusion/CanopyModel.py | 1 | 5453 | # -*- coding: utf-8 -*-
"""
***************************************************************************
CanopyModel.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
---------------------
Date : June 2014
Copyright : (C) 2014 by Agresta S. Coop.
Email : iescamochero at agresta dot org
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from processing.core.parameters import ParameterFile
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterSelection
from processing.core.parameters import ParameterString
from processing.core.outputs import OutputFile
from FusionAlgorithm import FusionAlgorithm
from FusionUtils import FusionUtils
class CanopyModel(FusionAlgorithm):
INPUT = 'INPUT'
OUTPUT_DTM = 'OUTPUT_DTM'
CELLSIZE = 'CELLSIZE'
XYUNITS = 'XYUNITS'
ZUNITS = 'ZUNITS'
UNITS = ['Meter', 'Feet']
GROUND = 'GROUND'
MEDIAN = 'MEDIAN'
SMOOTH = 'SMOOTH'
SLOPE = 'SLOPE'
CLASS = 'CLASS'
ADVANCED_MODIFIERS = 'ADVANCED_MODIFIERS'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Canopy Model')
self.group, self.i18n_group = self.trAlgorithm('Points')
self.addParameter(ParameterFile(
self.INPUT, self.tr('Input LAS layer')))
self.addParameter(ParameterNumber(
self.CELLSIZE, self.tr('Cellsize'), 0, None, 10.0))
self.addParameter(ParameterSelection(
self.XYUNITS, self.tr('XY Units'), self.UNITS))
self.addParameter(ParameterSelection(
self.ZUNITS, self.tr('Z Units'), self.UNITS))
self.addOutput(OutputFile(
self.OUTPUT_DTM, self.tr('DTM Output Surface'), 'dtm'))
ground = ParameterFile(
self.GROUND, self.tr('Input ground DTM layer'), False, True)
ground.isAdvanced = True
self.addParameter(ground)
median = ParameterString(
self.MEDIAN, self.tr('Median'), '', False, True)
median.isAdvanced = True
self.addParameter(median)
smooth = ParameterString(
self.SMOOTH, self.tr('Smooth'), '', False, True)
smooth.isAdvanced = True
self.addParameter(smooth)
slope = ParameterString(
self.SLOPE, self.tr('Slope'), '', False, True)
slope.isAdvanced = True
self.addParameter(slope)
class_var = ParameterString(
self.CLASS, self.tr('Class'), '', False, True)
class_var.isAdvanced = True
self.addParameter(class_var)
advance_modifiers = ParameterString(
self.ADVANCED_MODIFIERS, self.tr('Additional modifiers'), '', False, True)
advance_modifiers.isAdvanced = True
self.addParameter(advance_modifiers)
def processAlgorithm(self, progress):
commands = [os.path.join(FusionUtils.FusionPath(), 'CanopyModel.exe')]
commands.append('/verbose')
ground = self.getParameterValue(self.GROUND)
if str(ground).strip():
commands.append('/ground:' + str(ground))
median = self.getParameterValue(self.MEDIAN)
if str(median).strip():
commands.append('/median:' + str(median))
smooth = self.getParameterValue(self.SMOOTH)
if str(smooth).strip():
commands.append('/smooth:' + str(smooth))
slope = self.getParameterValue(self.SLOPE)
if str(slope).strip():
commands.append('/slope:' + str(slope))
class_var = self.getParameterValue(self.CLASS)
if str(class_var).strip():
commands.append('/class:' + str(class_var))
advance_modifiers = str(self.getParameterValue(self.ADVANCED_MODIFIERS)).strip()
if advance_modifiers:
commands.append(advance_modifiers)
commands.append(self.getOutputValue(self.OUTPUT_DTM))
commands.append(str(self.getParameterValue(self.CELLSIZE)))
commands.append(self.UNITS[self.getParameterValue(self.XYUNITS)][0])
commands.append(self.UNITS[self.getParameterValue(self.ZUNITS)][0])
commands.append('0')
commands.append('0')
commands.append('0')
commands.append('0')
files = self.getParameterValue(self.INPUT).split(';')
if len(files) == 1:
commands.append(self.getParameterValue(self.INPUT))
else:
FusionUtils.createFileList(files)
commands.append(FusionUtils.tempFileListFilepath())
FusionUtils.runFusion(commands, progress)
| gpl-2.0 | -8,131,902,818,092,337,000 | 41.601563 | 88 | 0.579864 | false |
yuvipanda/nbresuse | jupyter_resource_usage/config.py | 1 | 3739 | import os
from traitlets import Bool
from traitlets import default
from traitlets import Dict
from traitlets import Float
from traitlets import Int
from traitlets import List
from traitlets import TraitType
from traitlets import Union
from traitlets.config import Configurable
try:
# Traitlets >= 4.3.3
from traitlets import Callable
except ImportError:
from .utils import Callable
class PSUtilMetric(TraitType):
"""A trait describing the format to specify a metric from the psutil package"""
info_text = "A dictionary specifying the function/method name, any keyword arguments, and if a named tuple is returned, which attribute of the named tuple to select"
def validate(self, obj, value):
if isinstance(value, dict):
keys = list(value.keys())
if "name" in keys:
keys.remove("name")
if all(key in ["kwargs", "attribute"] for key in keys):
return value
self.error(obj, value)
class ResourceUseDisplay(Configurable):
"""
Holds server-side configuration for jupyter-resource-usage
"""
process_memory_metrics = List(
trait=PSUtilMetric(),
default_value=[{"name": "memory_info", "attribute": "rss"}],
)
system_memory_metrics = List(
trait=PSUtilMetric(),
default_value=[{"name": "virtual_memory", "attribute": "total"}],
)
process_cpu_metrics = List(
trait=PSUtilMetric(),
default_value=[{"name": "cpu_percent", "kwargs": {"interval": 0.05}}],
)
system_cpu_metrics = List(
trait=PSUtilMetric(), default_value=[{"name": "cpu_count"}]
)
mem_warning_threshold = Float(
default_value=0.1,
help="""
Warn user with flashing lights when memory usage is within this fraction
memory limit.
For example, if memory limit is 128MB, `mem_warning_threshold` is 0.1,
we will start warning the user when they use (128 - (128 * 0.1)) MB.
Set to 0 to disable warning.
""",
).tag(config=True)
mem_limit = Union(
trait_types=[Int(), Callable()],
help="""
Memory limit to display to the user, in bytes.
Can also be a function which calculates the memory limit.
Note that this does not actually limit the user's memory usage!
Defaults to reading from the `MEM_LIMIT` environment variable. If
set to 0, the max memory available is displayed.
""",
).tag(config=True)
@default("mem_limit")
def _mem_limit_default(self):
return int(os.environ.get("MEM_LIMIT", 0))
track_cpu_percent = Bool(
default_value=False,
help="""
Set to True in order to enable reporting of CPU usage statistics.
""",
).tag(config=True)
cpu_warning_threshold = Float(
default_value=0.1,
help="""
Warn user with flashing lights when CPU usage is within this fraction
CPU usage limit.
For example, if CPU limit is 150%, `cpu_warning_threshold` is 0.1,
we will start warning the user when they use (150 - (150 * 0.1)) %.
Set to 0 to disable warning.
""",
).tag(config=True)
cpu_limit = Union(
trait_types=[Float(), Callable()],
default_value=0,
help="""
CPU usage limit to display to the user.
Note that this does not actually limit the user's CPU usage!
Defaults to reading from the `CPU_LIMIT` environment variable. If
set to 0, the total CPU count available is displayed.
""",
).tag(config=True)
@default("cpu_limit")
def _cpu_limit_default(self):
return float(os.environ.get("CPU_LIMIT", 0))
| bsd-2-clause | 5,189,088,166,958,866,000 | 29.153226 | 169 | 0.622359 | false |
krieger-od/nwjs_chromium.src | build/android/pylib/local/device/local_device_environment.py | 15 | 1536 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from pylib.base import environment
from pylib.device import adb_wrapper
from pylib.device import device_errors
from pylib.device import device_utils
from pylib.utils import parallelizer
class LocalDeviceEnvironment(environment.Environment):
def __init__(self, args, _error_func):
super(LocalDeviceEnvironment, self).__init__()
self._device = args.test_device
self._devices = []
self._max_tries = 1 + args.num_retries
self._tool_name = args.tool
#override
def SetUp(self):
# TODO(jbudorick): This can be refined to support filters etc.
available_devices = adb_wrapper.AdbWrapper.GetDevices()
if not available_devices:
raise device_errors.NoDevicesError
if self._device:
if self._device not in available_devices:
raise device_errors.DeviceUnreachableError(
'Could not find device %r' % self._device)
self._devices = [device_utils.DeviceUtils(self._device)]
else:
self._devices = [
device_utils.DeviceUtils(s)
for s in available_devices]
@property
def devices(self):
return self._devices
@property
def parallel_devices(self):
return parallelizer.SyncParallelizer(self._devices)
@property
def max_tries(self):
return self._max_tries
@property
def tool(self):
return self._tool_name
#override
def TearDown(self):
pass
| bsd-3-clause | 8,907,183,995,380,310,000 | 26.428571 | 72 | 0.699219 | false |
PepperPD/edx-pepper-platform | common/djangoapps/track/tests.py | 13 | 2915 | """Tests for student tracking"""
import mock
from django.test import TestCase
from django.core.urlresolvers import reverse, NoReverseMatch
from track.models import TrackingLog
from track.views import user_track
from nose.plugins.skip import SkipTest
class TrackingTest(TestCase):
"""
Tests that tracking logs correctly handle events
"""
def test_post_answers_to_log(self):
"""
Checks that student answer requests submitted to track.views via POST
are correctly logged in the TrackingLog db table
"""
requests = [
{"event": "my_event", "event_type": "my_event_type", "page": "my_page"},
{"event": "{'json': 'object'}", "event_type": unichr(512), "page": "my_page"}
]
with mock.patch.dict('django.conf.settings.MITX_FEATURES', {'ENABLE_SQL_TRACKING_LOGS': True}):
for request_params in requests:
try: # because /event maps to two different views in lms and cms, we're only going to test lms here
response = self.client.post(reverse(user_track), request_params)
except NoReverseMatch:
raise SkipTest()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'success')
tracking_logs = TrackingLog.objects.order_by('-dtcreated')
log = tracking_logs[0]
self.assertEqual(log.event, request_params["event"])
self.assertEqual(log.event_type, request_params["event_type"])
self.assertEqual(log.page, request_params["page"])
def test_get_answers_to_log(self):
"""
Checks that student answer requests submitted to track.views via GET
are correctly logged in the TrackingLog db table
"""
requests = [
{"event": "my_event", "event_type": "my_event_type", "page": "my_page"},
{"event": "{'json': 'object'}", "event_type": unichr(512), "page": "my_page"}
]
with mock.patch.dict('django.conf.settings.MITX_FEATURES', {'ENABLE_SQL_TRACKING_LOGS': True}):
for request_params in requests:
try: # because /event maps to two different views in lms and cms, we're only going to test lms here
response = self.client.get(reverse(user_track), request_params)
except NoReverseMatch:
raise SkipTest()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'success')
tracking_logs = TrackingLog.objects.order_by('-dtcreated')
log = tracking_logs[0]
self.assertEqual(log.event, request_params["event"])
self.assertEqual(log.event_type, request_params["event_type"])
self.assertEqual(log.page, request_params["page"])
| agpl-3.0 | 4,216,444,625,121,535,000 | 47.583333 | 116 | 0.601715 | false |
Basis/pip | pip/backwardcompat/__init__.py | 3 | 2814 | """Stuff that differs in different Python versions"""
import os
import imp
import sys
import site
__all__ = ['WindowsError']
uses_pycache = hasattr(imp, 'cache_from_source')
class NeverUsedException(Exception):
"""this exception should never be raised"""
try:
WindowsError = WindowsError
except NameError:
WindowsError = NeverUsedException
try:
#new in Python 3.3
PermissionError = PermissionError
except NameError:
PermissionError = NeverUsedException
console_encoding = sys.__stdout__.encoding
if sys.version_info >= (3,):
from io import StringIO, BytesIO
from functools import reduce
from urllib.error import URLError, HTTPError
from queue import Queue, Empty
from urllib.request import url2pathname
from urllib.request import urlretrieve
from email import message as emailmessage
import urllib.parse as urllib
import urllib.request as urllib2
import configparser as ConfigParser
import xmlrpc.client as xmlrpclib
import urllib.parse as urlparse
import http.client as httplib
def cmp(a, b):
return (a > b) - (a < b)
def b(s):
return s.encode('utf-8')
def u(s):
return s.decode('utf-8')
def console_to_str(s):
try:
return s.decode(console_encoding)
except UnicodeDecodeError:
return s.decode('utf_8')
def get_http_message_param(http_message, param, default_value):
return http_message.get_param(param, default_value)
bytes = bytes
string_types = (str,)
raw_input = input
else:
from cStringIO import StringIO
from urllib2 import URLError, HTTPError
from Queue import Queue, Empty
from urllib import url2pathname, urlretrieve
from email import Message as emailmessage
import urllib
import urllib2
import urlparse
import ConfigParser
import xmlrpclib
import httplib
def b(s):
return s
def u(s):
return s
def console_to_str(s):
return s
def get_http_message_param(http_message, param, default_value):
result = http_message.getparam(param)
return result or default_value
bytes = str
string_types = (basestring,)
reduce = reduce
cmp = cmp
raw_input = raw_input
BytesIO = StringIO
from distutils.sysconfig import get_python_lib, get_python_version
#site.USER_SITE was created in py2.6
user_site = getattr(site, 'USER_SITE', None)
def product(*args, **kwds):
# product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
# product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
pools = list(map(tuple, args)) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x + [y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
| mit | -7,101,435,583,464,615,000 | 24.125 | 69 | 0.665601 | false |
varunarya10/nova_test_latest | nova/api/openstack/compute/contrib/fixed_ips.py | 72 | 3685 | # Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
import webob.exc
from nova.api.openstack import extensions
from nova import exception
from nova.i18n import _
from nova import objects
authorize = extensions.extension_authorizer('compute', 'fixed_ips')
class FixedIPController(object):
def show(self, req, id):
"""Return data about the given fixed ip."""
context = req.environ['nova.context']
authorize(context)
attrs = ['network', 'instance']
try:
fixed_ip = objects.FixedIP.get_by_address(context, id,
expected_attrs=attrs)
except exception.FixedIpNotFoundForAddress as ex:
raise webob.exc.HTTPNotFound(explanation=ex.format_message())
except exception.FixedIpInvalid as ex:
raise webob.exc.HTTPBadRequest(explanation=ex.format_message())
fixed_ip_info = {"fixed_ip": {}}
if fixed_ip is None:
msg = _("Fixed IP %s has been deleted") % id
raise webob.exc.HTTPNotFound(explanation=msg)
fixed_ip_info['fixed_ip']['cidr'] = str(fixed_ip.network.cidr)
fixed_ip_info['fixed_ip']['address'] = str(fixed_ip.address)
if fixed_ip.instance:
fixed_ip_info['fixed_ip']['hostname'] = fixed_ip.instance.hostname
fixed_ip_info['fixed_ip']['host'] = fixed_ip.instance.host
else:
fixed_ip_info['fixed_ip']['hostname'] = None
fixed_ip_info['fixed_ip']['host'] = None
return fixed_ip_info
def action(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
if 'reserve' in body:
return self._set_reserved(context, id, True)
elif 'unreserve' in body:
return self._set_reserved(context, id, False)
else:
raise webob.exc.HTTPBadRequest(
explanation="No valid action specified")
def _set_reserved(self, context, address, reserved):
try:
fixed_ip = objects.FixedIP.get_by_address(context, address)
fixed_ip.reserved = reserved
fixed_ip.save()
except exception.FixedIpNotFoundForAddress:
msg = _("Fixed IP %s not found") % address
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.FixedIpInvalid as ex:
raise webob.exc.HTTPBadRequest(explanation=ex.format_message())
return webob.Response(status_int=202)
class Fixed_ips(extensions.ExtensionDescriptor):
"""Fixed IPs support."""
name = "FixedIPs"
alias = "os-fixed-ips"
namespace = "http://docs.openstack.org/compute/ext/fixed_ips/api/v2"
updated = "2012-10-18T19:25:27Z"
def get_resources(self):
member_actions = {'action': 'POST'}
resources = []
resource = extensions.ResourceExtension('os-fixed-ips',
FixedIPController(),
member_actions=member_actions)
resources.append(resource)
return resources
| apache-2.0 | 4,144,579,086,615,899,600 | 36.222222 | 78 | 0.61791 | false |
anas-taji/sale-workflow | __unported__/sale_fiscal_position_update/sale.py | 37 | 3620 | # -*- coding: utf-8 -*-
#
#
# Sale Fiscal Position Update module for OpenERP
# Copyright (C) 2011-2014 Julius Network Solutions SARL <[email protected]>
# Copyright (C) 2014 Akretion (http://www.akretion.com)
# @author Mathieu Vatel <mathieu _at_ julius.fr>
# @author Alexis de Lattre <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp.osv import orm
from openerp.tools.translate import _
class sale_order(orm.Model):
_inherit = "sale.order"
def fiscal_position_change(
self, cr, uid, ids, fiscal_position, order_line,
context=None):
'''Function executed by the on_change on the fiscal_position field
of a sale order ; it updates taxes on all order lines'''
assert len(ids) in (0, 1), 'One ID max'
fp_obj = self.pool['account.fiscal.position']
res = {}
line_dict = self.resolve_2many_commands(
cr, uid, 'order_line', order_line, context=context)
lines_without_product = []
if fiscal_position:
fp = fp_obj.browse(cr, uid, fiscal_position, context=context)
else:
fp = False
for line in line_dict:
# Reformat line_dict so as to be compatible with what is
# accepted in res['value']
for key, value in line.iteritems():
if isinstance(value, tuple) and len(value) == 2:
line[key] = value[0]
if line.get('product_id'):
product = self.pool['product.product'].browse(
cr, uid, line.get('product_id'), context=context)
taxes = product.taxes_id
tax_ids = fp_obj.map_tax(
cr, uid, fp, taxes, context=context)
line['tax_id'] = [(6, 0, tax_ids)]
else:
lines_without_product.append(line.get('name'))
res['value'] = {}
res['value']['order_line'] = line_dict
if lines_without_product:
res['warning'] = {'title': _('Warning')}
if len(lines_without_product) == len(line_dict):
res['warning']['message'] = _(
"The Sale Order Lines were not updated to the new "
"Fiscal Position because they don't have Products.\n"
"You should update the Taxes of each "
"Sale Order Line manually.")
else:
display_line_names = ''
for name in lines_without_product:
display_line_names += "- %s\n" % name
res['warning']['message'] = _(
"The following Sale Order Lines were not updated "
"to the new Fiscal Position because they don't have a "
"Product:\n %s\nYou should update the "
"Taxes of these Sale Order Lines manually."
) % display_line_names,
return res
| agpl-3.0 | -6,561,602,966,098,915,000 | 42.095238 | 78 | 0.575691 | false |
xzturn/tensorflow | tensorflow/compiler/tests/gather_test.py | 25 | 9047 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for XLA Gather Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
FLAGS = flags.FLAGS
class GatherTest(xla_test.XLATestCase):
def _buildParams(self, data, dtype):
data = data.astype(dtype.as_numpy_dtype)
# For complex types, adds an index-dependent imaginary component so we can
# tell we got the right value.
if dtype.is_complex:
return data + 10j * data
return data
def testScalar1D(self):
with self.session() as session, self.test_scope():
data = np.array([0, 1, 2, 3, 7, 5])
for dtype in self.all_tf_types:
for indices in 4, [4], [1, 2, 2, 4, 5]:
params_np = self._buildParams(data, dtype)
params = array_ops.placeholder(dtype=dtype)
indices_tf = constant_op.constant(indices)
gather_t = array_ops.gather(params, indices_tf)
gather_val = session.run(gather_t, feed_dict={params: params_np})
np_val = constant_op.constant(params_np[indices])
self.assertAllEqual(np_val, gather_val)
def testScalar2D(self):
with self.session() as session, self.test_scope():
data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],
[12, 13, 14]])
for dtype in self.all_tf_types:
for axis in 0, 1, -1:
params_np = self._buildParams(data, dtype)
params = array_ops.placeholder(dtype=dtype)
indices = constant_op.constant(2)
gather_t = array_ops.gather(params, indices, axis=axis)
gather_val = session.run(gather_t, feed_dict={params: params_np})
expected = constant_op.constant(
np.take(params_np, 2, axis=axis), dtype)
self.assertAllEqual(expected, gather_val)
def testSimpleTwoD32(self):
with self.session() as session, self.test_scope():
data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],
[12, 13, 14]])
for dtype in self.all_tf_types:
for axis in 0, 1, -1:
params_np = self._buildParams(data, dtype)
params = array_ops.placeholder(dtype=dtype)
# The indices must be in bounds for any axis.
indices = constant_op.constant([0, 1, 0, 2])
gather_t = array_ops.gather(params, indices, axis=axis)
gather_val = session.run(gather_t, feed_dict={params: params_np})
expected = constant_op.constant(
np.take(params_np, [0, 1, 0, 2], axis=axis), dtype)
self.assertAllEqual(expected, gather_val)
def testSimpleTwoD32_Int64Indices(self):
if np.int64 not in self.int_types:
return
with self.session() as session, self.test_scope():
data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],
[12, 13, 14]])
# The indices must be in bounds for any axis.
indices_np = np.array([0, 1, 0, 2])
for dtype in self.all_tf_types:
for axis in 0, 1, -1:
params_np = self._buildParams(data, dtype)
params = array_ops.placeholder(dtype=dtype)
indices = array_ops.placeholder(dtype=dtypes.int64)
gather_t = array_ops.gather(params, indices, axis=axis)
gather_val = session.run(
gather_t, feed_dict={
params: params_np,
indices: indices_np
})
expected = constant_op.constant(
np.take(params_np, [0, 1, 0, 2], axis=axis), dtype)
self.assertAllEqual(expected, gather_val)
def testHigherRank(self):
"""Check that scalar and empty indices shapes work as well."""
shape = (2, 1, 3, 2)
for indices_shape in (), (0,), (2, 0), (2, 3):
for dtype in self.all_tf_types:
for axis in 0, 1, 2, 3, -1, -2:
params = self._buildParams(np.random.randn(*shape), dtype)
indices = np.random.randint(shape[axis], size=indices_shape)
with self.session() as sess, self.test_scope():
tf_params = array_ops.placeholder(dtype=dtype)
tf_indices = constant_op.constant(indices, dtype=dtypes.int32)
gather = array_ops.gather(tf_params, tf_indices, axis=axis)
gather_value = sess.run(gather, feed_dict={tf_params: params})
gather_np = constant_op.constant(
np.take(params, indices, axis=axis), dtype)
self.assertAllEqual(gather_np, gather_value)
def testIndicesWithDifferentDimensions(self):
with self.session():
for dtype in self.numeric_tf_types:
params = array_ops.placeholder(dtype=dtype)
indices = array_ops.placeholder(dtype=np.int32)
with self.test_scope():
gather = array_ops.gather(params, indices)
self.assertAllEqual(
7, gather.eval(feed_dict={params: [4, 7, 2], indices: 1}))
self.assertAllEqual(
[7], gather.eval(feed_dict={params: [4, 7, 2], indices: [1]}))
self.assertAllEqual(
[[7]], gather.eval(feed_dict={params: [4, 7, 2], indices: [[1]]}))
def testGatherPrecision(self):
with self.session() as session, self.test_scope():
data = np.array([[0, 0, 0, 0], [0, 2 * (1 + np.exp2(-8)), 0, 0],
[0, 0, 0, 0], [0.015789, 0.0985, 0.55789, 0.3842]])
indices = np.array([1, 2, 3, 1])
dtype = dtypes.float32
params_np = self._buildParams(data, dtype)
params = array_ops.placeholder(dtype=dtype)
indices_tf = constant_op.constant(indices)
gather_t = array_ops.gather(params, indices_tf)
gather_val = session.run(gather_t, feed_dict={params: params_np})
np_val = params_np[indices]
self.assertAllEqual(np_val, gather_val)
class GatherBenchmark(test.Benchmark):
"""Microbenchmarks for the gather op."""
def _benchmarkGather(self, name, axis, gather_indices, use_xla_jit):
def BuilderFn():
inputs = variables.Variable(
array_ops.zeros([100, 100, 10, 100, 50], dtype=dtypes.float32),
dtype=dtypes.float32,
name='input')
indices = variables.Variable(
gather_indices, dtype=dtypes.int32, name='indices')
gather_t = array_ops.gather(inputs, indices, axis=axis)
return '%s.axis%d' % (name, axis), [gather_t]
xla_test.Benchmark(self, BuilderFn, use_xla_jit=use_xla_jit, device='cpu')
def _benchmarkSliceGather(self, axis, use_xla_jit):
"""Benchmarks a gather op that's really a dynamic slice."""
self._benchmarkGather('slice_gather', axis, [1], use_xla_jit)
def _benchmarkNontrivialGather(self, axis, use_xla_jit):
self._benchmarkGather('nontrivial_gather', axis, [9, 1, 0, 2] * 4,
use_xla_jit)
def benchmarkSliceGatherAxis0(self):
self._benchmarkSliceGather(axis=0, use_xla_jit=False)
def benchmarkSliceGatherAxis0XLA(self):
self._benchmarkSliceGather(axis=0, use_xla_jit=True)
def benchmarkSliceGatherAxis1(self):
self._benchmarkSliceGather(axis=1, use_xla_jit=False)
def benchmarkSliceGatherAxis1XLA(self):
self._benchmarkSliceGather(axis=1, use_xla_jit=True)
def benchmarkSliceGatherAxis4(self):
self._benchmarkSliceGather(axis=4, use_xla_jit=False)
def benchmarkSliceGatherAxis4XLA(self):
self._benchmarkSliceGather(axis=4, use_xla_jit=True)
def benchmarkNontrivialGatherAxis0(self):
self._benchmarkNontrivialGather(axis=0, use_xla_jit=False)
def benchmarkNontrivialGatherAxis0XLA(self):
self._benchmarkNontrivialGather(axis=0, use_xla_jit=True)
def benchmarkNontrivialGatherAxis1(self):
self._benchmarkNontrivialGather(axis=1, use_xla_jit=False)
def benchmarkNontrivialGatherAxis1XLA(self):
self._benchmarkNontrivialGather(axis=1, use_xla_jit=True)
def benchmarkNontrivialGatherAxis4(self):
self._benchmarkNontrivialGather(axis=4, use_xla_jit=False)
def benchmarkNontrivialGatherAxis4XLA(self):
self._benchmarkNontrivialGather(axis=4, use_xla_jit=True)
if __name__ == '__main__':
test.main()
| apache-2.0 | 8,877,263,278,294,215,000 | 39.936652 | 80 | 0.636344 | false |
mackjoner/peewee | setup.py | 1 | 2733 | import os
import sys
from distutils.core import setup
from distutils.extension import Extension
f = open(os.path.join(os.path.dirname(__file__), 'README.rst'))
readme = f.read()
f.close()
setup_kwargs = {}
try:
from Cython.Distutils import build_ext
except ImportError:
cython_installed = False
else:
cython_installed = True
speedups_ext_module = Extension(
'playhouse._speedups',
['playhouse/speedups.pyx'])
sqlite_ext_module = Extension(
'playhouse._sqlite_ext',
['playhouse/_sqlite_ext.pyx'])
def check_libsqlite():
import shutil
import tempfile
from textwrap import dedent
import distutils.ccompiler
import distutils.sysconfig
from distutils.errors import CompileError, LinkError
libraries = ['sqlite3']
c_code = dedent("""
#include <sqlite3.h>
int main(int argc, char **argv) {
return (sqlite3_libversion_number() > 3080000) ? 0 : 1;
}""")
tmp_dir = tempfile.mkdtemp(prefix='tmp_peewee_')
binary = os.path.join(tmp_dir, 'test_peewee')
filename = binary + '.c'
with open(filename, 'w') as fh:
fh.write(c_code)
compiler = distutils.ccompiler.new_compiler()
assert isinstance(compiler, distutils.ccompiler.CCompiler)
distutils.sysconfig.customize_compiler(compiler)
try:
compiler.link_executable(
compiler.compile([filename]),
binary,
libraries=libraries,
output_dir=tmp_dir)
except CompileError:
print('libsqlite3 compile error')
return False
except LinkError:
print('libsqlite3 link error')
return False
finally:
shutil.rmtree(tmp_dir)
return True
ext_modules = []
if cython_installed:
ext_modules.append(speedups_ext_module)
if check_libsqlite() and sys.version_info[0] == 2:
# Sorry, no python 3.
ext_modules.append(sqlite_ext_module)
if ext_modules:
setup_kwargs.update(
cmdclass={'build_ext': build_ext},
ext_modules=ext_modules)
setup(
name='peewee',
version=__import__('peewee').__version__,
description='a little orm',
long_description=readme,
author='Charles Leifer',
author_email='[email protected]',
url='http://github.com/coleifer/peewee/',
packages=['playhouse'],
py_modules=['peewee', 'pwiz'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
],
test_suite='tests',
scripts = ['pwiz.py'],
**setup_kwargs
)
| mit | 7,474,014,866,984,116,000 | 26.059406 | 67 | 0.634834 | false |
lbartoletti/QGIS | tests/src/python/test_qgssearchwidgetwrapper.py | 25 | 21843 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsSearchWidgetWrapper.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '2016-05'
__copyright__ = 'Copyright 2016, The QGIS Project'
import qgis # NOQA
from qgis.gui import (QgsSearchWidgetWrapper,
QgsDefaultSearchWidgetWrapper,
QgsValueMapSearchWidgetWrapper,
QgsValueRelationSearchWidgetWrapper,
QgsCheckboxSearchWidgetWrapper,
QgsDateTimeSearchWidgetWrapper,
QgsRelationReferenceSearchWidgetWrapper)
from qgis.core import (QgsVectorLayer,
QgsFeature,
QgsProject,
QgsRelation
)
from qgis.PyQt.QtCore import QDateTime, QDate, QTime
from qgis.PyQt.QtWidgets import QWidget
from qgis.testing import start_app, unittest
start_app()
class PyQgsSearchWidgetWrapper(unittest.TestCase):
def testFlagToString(self):
# test converting QgsSearchWidgetWrapper.FilterFlag to string
tests = [QgsSearchWidgetWrapper.EqualTo,
QgsSearchWidgetWrapper.NotEqualTo,
QgsSearchWidgetWrapper.GreaterThan,
QgsSearchWidgetWrapper.LessThan,
QgsSearchWidgetWrapper.GreaterThanOrEqualTo,
QgsSearchWidgetWrapper.LessThanOrEqualTo,
QgsSearchWidgetWrapper.Between,
QgsSearchWidgetWrapper.CaseInsensitive,
QgsSearchWidgetWrapper.Contains,
QgsSearchWidgetWrapper.DoesNotContain,
QgsSearchWidgetWrapper.IsNull,
QgsSearchWidgetWrapper.IsNotNull,
QgsSearchWidgetWrapper.IsNotBetween
]
for t in tests:
self.assertTrue(len(QgsSearchWidgetWrapper.toString(t)) > 0)
def testExclusiveFlags(self):
# test flag exclusive/non exclusive
exclusive = QgsSearchWidgetWrapper.exclusiveFilterFlags()
non_exclusive = QgsSearchWidgetWrapper.nonExclusiveFilterFlags()
for e in exclusive:
self.assertFalse(e in non_exclusive)
class PyQgsDefaultSearchWidgetWrapper(unittest.TestCase):
def testCreateExpression(self):
""" Test creating an expression using the widget"""
layer = QgsVectorLayer("Point?field=fldtxt:string&field=fldint:integer&field=flddate:datetime",
"test", "memory")
parent = QWidget()
w = QgsDefaultSearchWidgetWrapper(layer, 0)
w.initWidget(parent)
line_edit = w.lineEdit()
line_edit.setText('test')
case_sensitive = w.caseSensitiveCheckBox()
case_sensitive.setChecked(False)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldtxt" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldtxt" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), 'lower("fldtxt")=lower(\'test\')')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), 'lower("fldtxt")<>lower(\'test\')')
case_sensitive.setChecked(True)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldtxt"=\'test\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldtxt"<>\'test\'')
case_sensitive.setChecked(False)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.Contains), '"fldtxt" ILIKE \'%test%\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.DoesNotContain), 'NOT ("fldtxt" ILIKE \'%test%\')')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.StartsWith), '"fldtxt" ILIKE \'test%\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EndsWith), '"fldtxt" ILIKE \'%test\'')
case_sensitive.setChecked(True)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.Contains), '"fldtxt" LIKE \'%test%\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.DoesNotContain), 'NOT ("fldtxt" LIKE \'%test%\')')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.StartsWith), '"fldtxt" LIKE \'test%\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EndsWith), '"fldtxt" LIKE \'%test\'')
case_sensitive.setChecked(False)
# numeric field
parent = QWidget()
w = QgsDefaultSearchWidgetWrapper(layer, 1)
w.initWidget(parent)
# may need updating if widget layout changes:
line_edit = w.lineEdit()
line_edit.setText('5.5')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldint"=5.5')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldint"<>5.5')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.GreaterThan), '"fldint">5.5')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.LessThan), '"fldint"<5.5')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.GreaterThanOrEqualTo), '"fldint">=5.5')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.LessThanOrEqualTo), '"fldint"<=5.5')
# date/time/datetime
parent = QWidget()
w = QgsDefaultSearchWidgetWrapper(layer, 2)
w.initWidget(parent)
# may need updating if widget layout changes:
line_edit = w.lineEdit()
line_edit.setText('2015-06-03')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"flddate"=\'2015-06-03\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"flddate"<>\'2015-06-03\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.GreaterThan), '"flddate">\'2015-06-03\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.LessThan), '"flddate"<\'2015-06-03\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.GreaterThanOrEqualTo), '"flddate">=\'2015-06-03\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.LessThanOrEqualTo), '"flddate"<=\'2015-06-03\'')
class PyQgsValueMapSearchWidgetWrapper(unittest.TestCase):
def testCreateExpression(self):
""" Test creating an expression using the widget"""
layer = QgsVectorLayer("Point?field=fldtxt:string&field=fldint:integer", "test", "memory")
w = QgsValueMapSearchWidgetWrapper(layer, 0)
config = {"map": [{"val1": 1},
{"val2": 200}]}
w.setConfig(config)
c = w.widget()
# first, set it to the "select value" item
c.setCurrentIndex(0)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldtxt" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldtxt" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '')
c.setCurrentIndex(1)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldtxt" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldtxt" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldtxt"=\'1\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldtxt"<>\'1\'')
c.setCurrentIndex(2)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldtxt" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldtxt" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldtxt"=\'200\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldtxt"<>\'200\'')
# try with numeric field
w = QgsValueMapSearchWidgetWrapper(layer, 1)
w.setConfig(config)
c = w.widget()
c.setCurrentIndex(1)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldint" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldint" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldint"=1')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldint"<>1')
class PyQgsValueRelationSearchWidgetWrapper(unittest.TestCase):
def testCreateExpression(self):
""" Test creating an expression using the widget"""
layer = QgsVectorLayer("Point?field=fldtxt:string&field=fldint:integer", "test", "memory")
# setup value relation
parent_layer = QgsVectorLayer("Point?field=stringkey:string&field=intkey:integer&field=display:string", "parent", "memory")
f1 = QgsFeature(parent_layer.fields(), 1)
f1.setAttributes(['a', 1, 'value a'])
f2 = QgsFeature(parent_layer.fields(), 2)
f2.setAttributes(['b', 2, 'value b'])
f3 = QgsFeature(parent_layer.fields(), 3)
f3.setAttributes(['c', 3, 'value c'])
parent_layer.dataProvider().addFeatures([f1, f2, f3])
QgsProject.instance().addMapLayers([layer, parent_layer])
config = {"Layer": parent_layer.id(),
"Key": 'stringkey',
"Value": 'display'}
w = QgsValueRelationSearchWidgetWrapper(layer, 0)
w.setConfig(config)
c = w.widget()
# first, set it to the "select value" item
c.setCurrentIndex(0)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldtxt" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldtxt" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '')
c.setCurrentIndex(1)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldtxt" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldtxt" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldtxt"=\'a\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldtxt"<>\'a\'')
c.setCurrentIndex(2)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldtxt" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldtxt" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldtxt"=\'b\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldtxt"<>\'b\'')
# try with numeric field
w = QgsValueRelationSearchWidgetWrapper(layer, 1)
config['Key'] = 'intkey'
w.setConfig(config)
c = w.widget()
c.setCurrentIndex(c.findText('value c'))
self.assertEqual(c.currentIndex(), 3)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldint" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldint" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldint"=3')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldint"<>3')
# try with allow null set
w = QgsValueRelationSearchWidgetWrapper(layer, 1)
config['AllowNull'] = True
w.setConfig(config)
c = w.widget()
c.setCurrentIndex(c.findText('value c'))
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldint" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldint" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldint"=3')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldint"<>3')
# try with line edit
w = QgsValueRelationSearchWidgetWrapper(layer, 1)
config['UseCompleter'] = True
w.setConfig(config)
l = w.widget()
l.setText('value b')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldint" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldint" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldint"=2')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldint"<>2')
class PyQgsCheckboxSearchWidgetWrapper(unittest.TestCase):
def testCreateExpression(self):
""" Test creating an expression using the widget"""
layer = QgsVectorLayer("Point?field=fldtxt:string&field=fldint:integer", "test", "memory")
w = QgsCheckboxSearchWidgetWrapper(layer, 0)
config = {"CheckedState": 5,
"UncheckedState": 9}
w.setConfig(config)
c = w.widget()
# first check with string field type
c.setChecked(True)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldtxt" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldtxt" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldtxt"=\'5\'')
c.setChecked(False)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldtxt" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldtxt" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldtxt"=\'9\'')
# try with numeric field
w = QgsCheckboxSearchWidgetWrapper(layer, 1)
w.setConfig(config)
c = w.widget()
c.setChecked(True)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldint" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldint" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldint"=5')
c.setChecked(False)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldint" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldint" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldint"=9')
class PyQgsDateTimeSearchWidgetWrapper(unittest.TestCase):
def testCreateExpression(self):
""" Test creating an expression using the widget"""
layer = QgsVectorLayer("Point?field=date:date&field=time:time&field=datetime:datetime", "test", "memory")
w = QgsDateTimeSearchWidgetWrapper(layer, 0)
config = {"field_format": 'yyyy-MM-dd',
"display_format": 'yyyy-MM-dd'}
w.setConfig(config)
c = w.widget()
# first check with date field type
c.setDateTime(QDateTime(QDate(2013, 4, 5), QTime()))
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"date" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"date" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"date"=\'2013-04-05\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"date"<>\'2013-04-05\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.GreaterThan), '"date">\'2013-04-05\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.LessThan), '"date"<\'2013-04-05\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.GreaterThanOrEqualTo), '"date">=\'2013-04-05\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.LessThanOrEqualTo), '"date"<=\'2013-04-05\'')
# time field type
w = QgsDateTimeSearchWidgetWrapper(layer, 1)
config = {"field_format": 'HH:mm:ss',
"display_format": 'HH:mm:ss'}
w.setConfig(config)
c = w.widget()
c.setDateTime(QDateTime(QDate(2013, 4, 5), QTime(13, 14, 15)))
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"time" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"time" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"time"=\'13:14:15\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"time"<>\'13:14:15\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.GreaterThan), '"time">\'13:14:15\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.LessThan), '"time"<\'13:14:15\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.GreaterThanOrEqualTo), '"time">=\'13:14:15\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.LessThanOrEqualTo), '"time"<=\'13:14:15\'')
# datetime field type
w = QgsDateTimeSearchWidgetWrapper(layer, 2)
config = {"field_format": 'yyyy-MM-dd HH:mm:ss',
"display_format": 'yyyy-MM-dd HH:mm:ss'}
w.setConfig(config)
c = w.widget()
c.setDateTime(QDateTime(QDate(2013, 4, 5), QTime(13, 14, 15)))
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"datetime" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"datetime" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"datetime"=\'2013-04-05 13:14:15\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"datetime"<>\'2013-04-05 13:14:15\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.GreaterThan), '"datetime">\'2013-04-05 13:14:15\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.LessThan), '"datetime"<\'2013-04-05 13:14:15\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.GreaterThanOrEqualTo), '"datetime">=\'2013-04-05 13:14:15\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.LessThanOrEqualTo), '"datetime"<=\'2013-04-05 13:14:15\'')
class PyQgsRelationReferenceSearchWidgetWrapper(unittest.TestCase):
def testCreateExpression(self):
""" Test creating an expression using the widget"""
layer = QgsVectorLayer("Point?field=fldtxt:string&field=fldint:integer", "test", "memory")
# setup value relation
parent_layer = QgsVectorLayer("Point?field=stringkey:string&field=intkey:integer&field=display:string", "parent", "memory")
f1 = QgsFeature(parent_layer.fields(), 1)
f1.setAttributes(['a', 1, 'value a'])
f2 = QgsFeature(parent_layer.fields(), 2)
f2.setAttributes(['b', 2, 'value b'])
f3 = QgsFeature(parent_layer.fields(), 3)
f3.setAttributes(['c', 3, 'value c'])
parent_layer.dataProvider().addFeatures([f1, f2, f3])
QgsProject.instance().addMapLayers([layer, parent_layer])
relationManager = QgsProject.instance().relationManager()
relation = QgsRelation()
relation.setId('relation')
relation.setReferencingLayer(layer.id())
relation.setReferencedLayer(parent_layer.id())
relation.addFieldPair('fldtxt', 'stringkey')
self.assertTrue(relation.isValid())
relationManager.addRelation(relation)
# Everything valid
config = {'Relation': relation.id(), 'AllowNULL': True}
w = QgsRelationReferenceSearchWidgetWrapper(layer, 0, None)
w.setConfig(config)
w.widget().setForeignKey('a')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldtxt" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldtxt" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldtxt"=\'a\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldtxt"<>\'a\'')
w.widget().setForeignKey('b')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldtxt" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldtxt" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldtxt"=\'b\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldtxt"<>\'b\'')
w.widget().setForeignKey('c')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldtxt" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldtxt" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldtxt"=\'c\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldtxt"<>\'c\'')
w.widget().setForeignKey(None)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldtxt" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldtxt" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldtxt" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldtxt" IS NOT NULL')
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -7,183,082,521,320,770,000 | 53.335821 | 131 | 0.686444 | false |
cakeboss893/volatility | volatility/plugins/procdump.py | 44 | 9643 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
# Copyright (c) 2008 Brendan Dolan-Gavitt <[email protected]>
#
# Additional Authors:
# Mike Auty <[email protected]>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import os
import struct
import volatility.plugins.taskmods as taskmods
import volatility.debug as debug
import volatility.obj as obj
import volatility.exceptions as exceptions
class ProcExeDump(taskmods.DllList):
"""Dump a process to an executable file sample"""
def __init__(self, config, *args, **kwargs):
taskmods.DllList.__init__(self, config, *args, **kwargs)
config.add_option('DUMP-DIR', short_option = 'D', default = None,
cache_invalidator = False,
help = 'Directory in which to dump executable files')
config.add_option("UNSAFE", short_option = "u", default = False, action = 'store_true',
help = 'Bypasses certain sanity checks when creating image')
def dump_pe(self, space, base, dump_file):
"""
Dump a PE from an AS into a file.
@param space: an AS to use
@param base: PE base address
@param dump_file: dumped file name
@returns a string status message
"""
of = open(os.path.join(self._config.DUMP_DIR, dump_file), 'wb')
try:
for offset, code in self.get_image(space, base):
of.seek(offset)
of.write(code)
result = "OK: {0}".format(dump_file)
except ValueError, ve:
result = "Error: {0}".format(ve)
except exceptions.SanityCheckException, ve:
result = "Error: {0} Try -u/--unsafe".format(ve)
finally:
of.close()
return result
def render_text(self, outfd, data):
"""Renders the tasks to disk images, outputting progress as they go"""
if self._config.DUMP_DIR == None:
debug.error("Please specify a dump directory (--dump-dir)")
if not os.path.isdir(self._config.DUMP_DIR):
debug.error(self._config.DUMP_DIR + " is not a directory")
self.table_header(outfd,
[("Process(V)", "[addrpad]"),
("ImageBase", "[addrpad]"),
("Name", "20"),
("Result", "")])
for task in data:
task_space = task.get_process_address_space()
if task_space == None:
result = "Error: Cannot acquire process AS"
elif task.Peb == None:
# we must use m() here, because any other attempt to
# reference task.Peb will try to instantiate the _PEB
result = "Error: PEB at {0:#x} is paged".format(task.m('Peb'))
elif task_space.vtop(task.Peb.ImageBaseAddress) == None:
result = "Error: ImageBaseAddress at {0:#x} is paged".format(task.Peb.ImageBaseAddress)
else:
dump_file = "executable." + str(task.UniqueProcessId) + ".exe"
result = self.dump_pe(task_space,
task.Peb.ImageBaseAddress,
dump_file)
self.table_row(outfd,
task.obj_offset,
task.Peb.ImageBaseAddress,
task.ImageFileName,
result)
def round(self, addr, align, up = False):
"""Rounds down an address based on an alignment"""
if addr % align == 0:
return addr
else:
if up:
return (addr + (align - (addr % align)))
return (addr - (addr % align))
def get_nt_header(self, addr_space, base_addr):
"""Returns the NT Header object for a task"""
dos_header = obj.Object("_IMAGE_DOS_HEADER", offset = base_addr,
vm = addr_space)
return dos_header.get_nt_header()
def get_code(self, addr_space, data_start, data_size, offset):
"""Returns a single section of re-created data from a file image"""
first_block = 0x1000 - data_start % 0x1000
full_blocks = ((data_size + (data_start % 0x1000)) / 0x1000) - 1
left_over = (data_size + data_start) % 0x1000
paddr = addr_space.vtop(data_start)
code = ""
# Deal with reads that are smaller than a block
if data_size < first_block:
data_read = addr_space.zread(data_start, data_size)
if paddr == None:
if self._config.verbose:
debug.debug("Memory Not Accessible: Virtual Address: 0x{0:x} File Offset: 0x{1:x} Size: 0x{2:x}\n".format(data_start, offset, data_size))
code += data_read
return (offset, code)
data_read = addr_space.zread(data_start, first_block)
if paddr == None:
if self._config.verbose:
debug.debug("Memory Not Accessible: Virtual Address: 0x{0:x} File Offset: 0x{1:x} Size: 0x{2:x}\n".format(data_start, offset, first_block))
code += data_read
# The middle part of the read
new_vaddr = data_start + first_block
for _i in range(0, full_blocks):
data_read = addr_space.zread(new_vaddr, 0x1000)
if addr_space.vtop(new_vaddr) == None:
if self._config.verbose:
debug.debug("Memory Not Accessible: Virtual Address: 0x{0:x} File Offset: 0x{1:x} Size: 0x{2:x}\n".format(new_vaddr, offset, 0x1000))
code += data_read
new_vaddr = new_vaddr + 0x1000
# The last part of the read
if left_over > 0:
data_read = addr_space.zread(new_vaddr, left_over)
if addr_space.vtop(new_vaddr) == None:
if self._config.verbose:
debug.debug("Memory Not Accessible: Virtual Address: 0x{0:x} File Offset: 0x{1:x} Size: 0x{2:x}\n".format(new_vaddr, offset, left_over))
code += data_read
return (offset, code)
def get_image(self, addr_space, base_addr):
"""Outputs an executable disk image of a process"""
nt_header = self.get_nt_header(addr_space = addr_space,
base_addr = base_addr)
soh = nt_header.OptionalHeader.SizeOfHeaders
header = addr_space.zread(base_addr, soh)
yield (0, header)
fa = nt_header.OptionalHeader.FileAlignment
for sect in nt_header.get_sections(self._config.UNSAFE):
foa = self.round(sect.PointerToRawData, fa)
if foa != sect.PointerToRawData:
debug.warning("Section start on disk not aligned to file alignment.\n")
debug.warning("Adjusted section start from {0} to {1}.\n".format(sect.PointerToRawData, foa))
yield self.get_code(addr_space,
sect.VirtualAddress + base_addr,
sect.SizeOfRawData, foa)
class ProcMemDump(ProcExeDump):
"""Dump a process to an executable memory sample"""
def replace_header_field(self, sect, header, item, value):
"""Replaces a field in a sector header"""
field_size = item.size()
start = item.obj_offset - sect.obj_offset
end = start + field_size
newval = struct.pack(item.format_string, int(value))
result = header[:start] + newval + header[end:]
return result
def get_image(self, addr_space, base_addr):
"""Outputs an executable memory image of a process"""
nt_header = self.get_nt_header(addr_space, base_addr)
sa = nt_header.OptionalHeader.SectionAlignment
shs = addr_space.profile.get_obj_size('_IMAGE_SECTION_HEADER')
yield self.get_code(addr_space, base_addr, nt_header.OptionalHeader.SizeOfImage, 0)
prevsect = None
sect_sizes = []
for sect in nt_header.get_sections(self._config.UNSAFE):
if prevsect is not None:
sect_sizes.append(sect.VirtualAddress - prevsect.VirtualAddress)
prevsect = sect
if prevsect is not None:
sect_sizes.append(self.round(prevsect.Misc.VirtualSize, sa, up = True))
counter = 0
start_addr = nt_header.FileHeader.SizeOfOptionalHeader + (nt_header.OptionalHeader.obj_offset - base_addr)
for sect in nt_header.get_sections(self._config.UNSAFE):
sectheader = addr_space.read(sect.obj_offset, shs)
# Change the PointerToRawData
sectheader = self.replace_header_field(sect, sectheader, sect.PointerToRawData, sect.VirtualAddress)
sectheader = self.replace_header_field(sect, sectheader, sect.SizeOfRawData, sect_sizes[counter])
sectheader = self.replace_header_field(sect, sectheader, sect.Misc.VirtualSize, sect_sizes[counter])
yield (start_addr + (counter * shs), sectheader)
counter += 1
| gpl-2.0 | -398,268,075,228,094,660 | 42.436937 | 157 | 0.586332 | false |
pytorch/vision | test/common_utils.py | 1 | 12886 | import os
import shutil
import tempfile
import contextlib
import unittest
import argparse
import sys
import torch
import __main__
import random
import inspect
from numbers import Number
from torch._six import string_classes
from collections import OrderedDict
import numpy as np
from PIL import Image
from _assert_utils import assert_equal
IS_PY39 = sys.version_info.major == 3 and sys.version_info.minor == 9
PY39_SEGFAULT_SKIP_MSG = "Segmentation fault with Python 3.9, see https://github.com/pytorch/vision/issues/3367"
PY39_SKIP = unittest.skipIf(IS_PY39, PY39_SEGFAULT_SKIP_MSG)
IN_CIRCLE_CI = os.getenv("CIRCLECI", False) == 'true'
IN_RE_WORKER = os.environ.get("INSIDE_RE_WORKER") is not None
IN_FBCODE = os.environ.get("IN_FBCODE_TORCHVISION") == "1"
CUDA_NOT_AVAILABLE_MSG = 'CUDA device not available'
CIRCLECI_GPU_NO_CUDA_MSG = "We're in a CircleCI GPU machine, and this test doesn't need cuda."
@contextlib.contextmanager
def get_tmp_dir(src=None, **kwargs):
tmp_dir = tempfile.mkdtemp(**kwargs)
if src is not None:
os.rmdir(tmp_dir)
shutil.copytree(src, tmp_dir)
try:
yield tmp_dir
finally:
shutil.rmtree(tmp_dir)
def set_rng_seed(seed):
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
class MapNestedTensorObjectImpl(object):
def __init__(self, tensor_map_fn):
self.tensor_map_fn = tensor_map_fn
def __call__(self, object):
if isinstance(object, torch.Tensor):
return self.tensor_map_fn(object)
elif isinstance(object, dict):
mapped_dict = {}
for key, value in object.items():
mapped_dict[self(key)] = self(value)
return mapped_dict
elif isinstance(object, (list, tuple)):
mapped_iter = []
for iter in object:
mapped_iter.append(self(iter))
return mapped_iter if not isinstance(object, tuple) else tuple(mapped_iter)
else:
return object
def map_nested_tensor_object(object, tensor_map_fn):
impl = MapNestedTensorObjectImpl(tensor_map_fn)
return impl(object)
def is_iterable(obj):
try:
iter(obj)
return True
except TypeError:
return False
# adapted from TestCase in torch/test/common_utils to accept non-string
# inputs and set maximum binary size
class TestCase(unittest.TestCase):
precision = 1e-5
def assertEqual(self, x, y, prec=None, message='', allow_inf=False):
"""
This is copied from pytorch/test/common_utils.py's TestCase.assertEqual
"""
if isinstance(prec, str) and message == '':
message = prec
prec = None
if prec is None:
prec = self.precision
if isinstance(x, torch.Tensor) and isinstance(y, Number):
self.assertEqual(x.item(), y, prec=prec, message=message,
allow_inf=allow_inf)
elif isinstance(y, torch.Tensor) and isinstance(x, Number):
self.assertEqual(x, y.item(), prec=prec, message=message,
allow_inf=allow_inf)
elif isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor):
def assertTensorsEqual(a, b):
super(TestCase, self).assertEqual(a.size(), b.size(), message)
if a.numel() > 0:
if (a.device.type == 'cpu' and (a.dtype == torch.float16 or a.dtype == torch.bfloat16)):
# CPU half and bfloat16 tensors don't have the methods we need below
a = a.to(torch.float32)
b = b.to(a)
if (a.dtype == torch.bool) != (b.dtype == torch.bool):
raise TypeError("Was expecting both tensors to be bool type.")
else:
if a.dtype == torch.bool and b.dtype == torch.bool:
# we want to respect precision but as bool doesn't support substraction,
# boolean tensor has to be converted to int
a = a.to(torch.int)
b = b.to(torch.int)
diff = a - b
if a.is_floating_point():
# check that NaNs are in the same locations
nan_mask = torch.isnan(a)
self.assertTrue(torch.equal(nan_mask, torch.isnan(b)), message)
diff[nan_mask] = 0
# inf check if allow_inf=True
if allow_inf:
inf_mask = torch.isinf(a)
inf_sign = inf_mask.sign()
self.assertTrue(torch.equal(inf_sign, torch.isinf(b).sign()), message)
diff[inf_mask] = 0
# TODO: implement abs on CharTensor (int8)
if diff.is_signed() and diff.dtype != torch.int8:
diff = diff.abs()
max_err = diff.max()
tolerance = prec + prec * abs(a.max())
self.assertLessEqual(max_err, tolerance, message)
super(TestCase, self).assertEqual(x.is_sparse, y.is_sparse, message)
super(TestCase, self).assertEqual(x.is_quantized, y.is_quantized, message)
if x.is_sparse:
x = self.safeCoalesce(x)
y = self.safeCoalesce(y)
assertTensorsEqual(x._indices(), y._indices())
assertTensorsEqual(x._values(), y._values())
elif x.is_quantized and y.is_quantized:
self.assertEqual(x.qscheme(), y.qscheme(), prec=prec,
message=message, allow_inf=allow_inf)
if x.qscheme() == torch.per_tensor_affine:
self.assertEqual(x.q_scale(), y.q_scale(), prec=prec,
message=message, allow_inf=allow_inf)
self.assertEqual(x.q_zero_point(), y.q_zero_point(),
prec=prec, message=message,
allow_inf=allow_inf)
elif x.qscheme() == torch.per_channel_affine:
self.assertEqual(x.q_per_channel_scales(), y.q_per_channel_scales(), prec=prec,
message=message, allow_inf=allow_inf)
self.assertEqual(x.q_per_channel_zero_points(), y.q_per_channel_zero_points(),
prec=prec, message=message,
allow_inf=allow_inf)
self.assertEqual(x.q_per_channel_axis(), y.q_per_channel_axis(),
prec=prec, message=message)
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(x.int_repr().to(torch.int32),
y.int_repr().to(torch.int32), prec=prec,
message=message, allow_inf=allow_inf)
else:
assertTensorsEqual(x, y)
elif isinstance(x, string_classes) and isinstance(y, string_classes):
super(TestCase, self).assertEqual(x, y, message)
elif type(x) == set and type(y) == set:
super(TestCase, self).assertEqual(x, y, message)
elif isinstance(x, dict) and isinstance(y, dict):
if isinstance(x, OrderedDict) and isinstance(y, OrderedDict):
self.assertEqual(x.items(), y.items(), prec=prec,
message=message, allow_inf=allow_inf)
else:
self.assertEqual(set(x.keys()), set(y.keys()), prec=prec,
message=message, allow_inf=allow_inf)
key_list = list(x.keys())
self.assertEqual([x[k] for k in key_list],
[y[k] for k in key_list],
prec=prec, message=message,
allow_inf=allow_inf)
elif is_iterable(x) and is_iterable(y):
super(TestCase, self).assertEqual(len(x), len(y), message)
for x_, y_ in zip(x, y):
self.assertEqual(x_, y_, prec=prec, message=message,
allow_inf=allow_inf)
elif isinstance(x, bool) and isinstance(y, bool):
super(TestCase, self).assertEqual(x, y, message)
elif isinstance(x, Number) and isinstance(y, Number):
inf = float("inf")
if abs(x) == inf or abs(y) == inf:
if allow_inf:
super(TestCase, self).assertEqual(x, y, message)
else:
self.fail("Expected finite numeric values - x={}, y={}".format(x, y))
return
super(TestCase, self).assertLessEqual(abs(x - y), prec, message)
else:
super(TestCase, self).assertEqual(x, y, message)
@contextlib.contextmanager
def freeze_rng_state():
rng_state = torch.get_rng_state()
if torch.cuda.is_available():
cuda_rng_state = torch.cuda.get_rng_state()
yield
if torch.cuda.is_available():
torch.cuda.set_rng_state(cuda_rng_state)
torch.set_rng_state(rng_state)
def cycle_over(objs):
for idx, obj1 in enumerate(objs):
for obj2 in objs[:idx] + objs[idx + 1:]:
yield obj1, obj2
def int_dtypes():
return torch.testing.integral_types()
def float_dtypes():
return torch.testing.floating_types()
@contextlib.contextmanager
def disable_console_output():
with contextlib.ExitStack() as stack, open(os.devnull, "w") as devnull:
stack.enter_context(contextlib.redirect_stdout(devnull))
stack.enter_context(contextlib.redirect_stderr(devnull))
yield
def cpu_and_gpu():
import pytest # noqa
return ('cpu', pytest.param('cuda', marks=pytest.mark.needs_cuda))
def needs_cuda(test_func):
import pytest # noqa
return pytest.mark.needs_cuda(test_func)
def _create_data(height=3, width=3, channels=3, device="cpu"):
# TODO: When all relevant tests are ported to pytest, turn this into a module-level fixture
tensor = torch.randint(0, 256, (channels, height, width), dtype=torch.uint8, device=device)
pil_img = Image.fromarray(tensor.permute(1, 2, 0).contiguous().cpu().numpy())
return tensor, pil_img
def _create_data_batch(height=3, width=3, channels=3, num_samples=4, device="cpu"):
# TODO: When all relevant tests are ported to pytest, turn this into a module-level fixture
batch_tensor = torch.randint(
0, 256,
(num_samples, channels, height, width),
dtype=torch.uint8,
device=device
)
return batch_tensor
def _assert_equal_tensor_to_pil(tensor, pil_image, msg=None):
np_pil_image = np.array(pil_image)
if np_pil_image.ndim == 2:
np_pil_image = np_pil_image[:, :, None]
pil_tensor = torch.as_tensor(np_pil_image.transpose((2, 0, 1)))
if msg is None:
msg = "tensor:\n{} \ndid not equal PIL tensor:\n{}".format(tensor, pil_tensor)
assert_equal(tensor.cpu(), pil_tensor, check_stride=False, msg=msg)
def _assert_approx_equal_tensor_to_pil(tensor, pil_image, tol=1e-5, msg=None, agg_method="mean",
allowed_percentage_diff=None):
# TODO: we could just merge this into _assert_equal_tensor_to_pil
np_pil_image = np.array(pil_image)
if np_pil_image.ndim == 2:
np_pil_image = np_pil_image[:, :, None]
pil_tensor = torch.as_tensor(np_pil_image.transpose((2, 0, 1))).to(tensor)
if allowed_percentage_diff is not None:
# Assert that less than a given %age of pixels are different
assert (tensor != pil_tensor).to(torch.float).mean() <= allowed_percentage_diff
# error value can be mean absolute error, max abs error
# Convert to float to avoid underflow when computing absolute difference
tensor = tensor.to(torch.float)
pil_tensor = pil_tensor.to(torch.float)
err = getattr(torch, agg_method)(torch.abs(tensor - pil_tensor)).item()
assert err < tol
def _test_fn_on_batch(batch_tensors, fn, scripted_fn_atol=1e-8, **fn_kwargs):
transformed_batch = fn(batch_tensors, **fn_kwargs)
for i in range(len(batch_tensors)):
img_tensor = batch_tensors[i, ...]
transformed_img = fn(img_tensor, **fn_kwargs)
assert_equal(transformed_img, transformed_batch[i, ...])
if scripted_fn_atol >= 0:
scripted_fn = torch.jit.script(fn)
# scriptable function test
s_transformed_batch = scripted_fn(batch_tensors, **fn_kwargs)
torch.testing.assert_close(transformed_batch, s_transformed_batch, rtol=1e-5, atol=scripted_fn_atol)
| bsd-3-clause | -8,803,150,722,910,903,000 | 40.301282 | 112 | 0.568602 | false |
cherylyli/stress-aid | env/lib/python3.5/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.py | 353 | 14161 | """Module for supporting the lxml.etree library. The idea here is to use as much
of the native library as possible, without using fragile hacks like custom element
names that break between releases. The downside of this is that we cannot represent
all possible trees; specifically the following are known to cause problems:
Text or comments as siblings of the root element
Docypes with no name
When any of these things occur, we emit a DataLossWarning
"""
from __future__ import absolute_import, division, unicode_literals
# pylint:disable=protected-access
import warnings
import re
import sys
from . import base
from ..constants import DataLossWarning
from .. import constants
from . import etree as etree_builders
from .. import _ihatexml
import lxml.etree as etree
fullTree = True
tag_regexp = re.compile("{([^}]*)}(.*)")
comment_type = etree.Comment("asd").tag
class DocumentType(object):
def __init__(self, name, publicId, systemId):
self.name = name
self.publicId = publicId
self.systemId = systemId
class Document(object):
def __init__(self):
self._elementTree = None
self._childNodes = []
def appendChild(self, element):
self._elementTree.getroot().addnext(element._element)
def _getChildNodes(self):
return self._childNodes
childNodes = property(_getChildNodes)
def testSerializer(element):
rv = []
infosetFilter = _ihatexml.InfosetFilter(preventDoubleDashComments=True)
def serializeElement(element, indent=0):
if not hasattr(element, "tag"):
if hasattr(element, "getroot"):
# Full tree case
rv.append("#document")
if element.docinfo.internalDTD:
if not (element.docinfo.public_id or
element.docinfo.system_url):
dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name
else:
dtd_str = """<!DOCTYPE %s "%s" "%s">""" % (
element.docinfo.root_name,
element.docinfo.public_id,
element.docinfo.system_url)
rv.append("|%s%s" % (' ' * (indent + 2), dtd_str))
next_element = element.getroot()
while next_element.getprevious() is not None:
next_element = next_element.getprevious()
while next_element is not None:
serializeElement(next_element, indent + 2)
next_element = next_element.getnext()
elif isinstance(element, str) or isinstance(element, bytes):
# Text in a fragment
assert isinstance(element, str) or sys.version_info[0] == 2
rv.append("|%s\"%s\"" % (' ' * indent, element))
else:
# Fragment case
rv.append("#document-fragment")
for next_element in element:
serializeElement(next_element, indent + 2)
elif element.tag == comment_type:
rv.append("|%s<!-- %s -->" % (' ' * indent, element.text))
if hasattr(element, "tail") and element.tail:
rv.append("|%s\"%s\"" % (' ' * indent, element.tail))
else:
assert isinstance(element, etree._Element)
nsmatch = etree_builders.tag_regexp.match(element.tag)
if nsmatch is not None:
ns = nsmatch.group(1)
tag = nsmatch.group(2)
prefix = constants.prefixes[ns]
rv.append("|%s<%s %s>" % (' ' * indent, prefix,
infosetFilter.fromXmlName(tag)))
else:
rv.append("|%s<%s>" % (' ' * indent,
infosetFilter.fromXmlName(element.tag)))
if hasattr(element, "attrib"):
attributes = []
for name, value in element.attrib.items():
nsmatch = tag_regexp.match(name)
if nsmatch is not None:
ns, name = nsmatch.groups()
name = infosetFilter.fromXmlName(name)
prefix = constants.prefixes[ns]
attr_string = "%s %s" % (prefix, name)
else:
attr_string = infosetFilter.fromXmlName(name)
attributes.append((attr_string, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
if element.text:
rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
indent += 2
for child in element:
serializeElement(child, indent)
if hasattr(element, "tail") and element.tail:
rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail))
serializeElement(element, 0)
return "\n".join(rv)
def tostring(element):
"""Serialize an element and its child nodes to a string"""
rv = []
def serializeElement(element):
if not hasattr(element, "tag"):
if element.docinfo.internalDTD:
if element.docinfo.doctype:
dtd_str = element.docinfo.doctype
else:
dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name
rv.append(dtd_str)
serializeElement(element.getroot())
elif element.tag == comment_type:
rv.append("<!--%s-->" % (element.text,))
else:
# This is assumed to be an ordinary element
if not element.attrib:
rv.append("<%s>" % (element.tag,))
else:
attr = " ".join(["%s=\"%s\"" % (name, value)
for name, value in element.attrib.items()])
rv.append("<%s %s>" % (element.tag, attr))
if element.text:
rv.append(element.text)
for child in element:
serializeElement(child)
rv.append("</%s>" % (element.tag,))
if hasattr(element, "tail") and element.tail:
rv.append(element.tail)
serializeElement(element)
return "".join(rv)
class TreeBuilder(base.TreeBuilder):
documentClass = Document
doctypeClass = DocumentType
elementClass = None
commentClass = None
fragmentClass = Document
implementation = etree
def __init__(self, namespaceHTMLElements, fullTree=False):
builder = etree_builders.getETreeModule(etree, fullTree=fullTree)
infosetFilter = self.infosetFilter = _ihatexml.InfosetFilter(preventDoubleDashComments=True)
self.namespaceHTMLElements = namespaceHTMLElements
class Attributes(dict):
def __init__(self, element, value=None):
if value is None:
value = {}
self._element = element
dict.__init__(self, value) # pylint:disable=non-parent-init-called
for key, value in self.items():
if isinstance(key, tuple):
name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1]))
else:
name = infosetFilter.coerceAttribute(key)
self._element._element.attrib[name] = value
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
if isinstance(key, tuple):
name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1]))
else:
name = infosetFilter.coerceAttribute(key)
self._element._element.attrib[name] = value
class Element(builder.Element):
def __init__(self, name, namespace):
name = infosetFilter.coerceElement(name)
builder.Element.__init__(self, name, namespace=namespace)
self._attributes = Attributes(self)
def _setName(self, name):
self._name = infosetFilter.coerceElement(name)
self._element.tag = self._getETreeTag(
self._name, self._namespace)
def _getName(self):
return infosetFilter.fromXmlName(self._name)
name = property(_getName, _setName)
def _getAttributes(self):
return self._attributes
def _setAttributes(self, attributes):
self._attributes = Attributes(self, attributes)
attributes = property(_getAttributes, _setAttributes)
def insertText(self, data, insertBefore=None):
data = infosetFilter.coerceCharacters(data)
builder.Element.insertText(self, data, insertBefore)
def appendChild(self, child):
builder.Element.appendChild(self, child)
class Comment(builder.Comment):
def __init__(self, data):
data = infosetFilter.coerceComment(data)
builder.Comment.__init__(self, data)
def _setData(self, data):
data = infosetFilter.coerceComment(data)
self._element.text = data
def _getData(self):
return self._element.text
data = property(_getData, _setData)
self.elementClass = Element
self.commentClass = Comment
# self.fragmentClass = builder.DocumentFragment
base.TreeBuilder.__init__(self, namespaceHTMLElements)
def reset(self):
base.TreeBuilder.reset(self)
self.insertComment = self.insertCommentInitial
self.initial_comments = []
self.doctype = None
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
if fullTree:
return self.document._elementTree
else:
return self.document._elementTree.getroot()
def getFragment(self):
fragment = []
element = self.openElements[0]._element
if element.text:
fragment.append(element.text)
fragment.extend(list(element))
if element.tail:
fragment.append(element.tail)
return fragment
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
if not name:
warnings.warn("lxml cannot represent empty doctype", DataLossWarning)
self.doctype = None
else:
coercedName = self.infosetFilter.coerceElement(name)
if coercedName != name:
warnings.warn("lxml cannot represent non-xml doctype", DataLossWarning)
doctype = self.doctypeClass(coercedName, publicId, systemId)
self.doctype = doctype
def insertCommentInitial(self, data, parent=None):
assert parent is None or parent is self.document
assert self.document._elementTree is None
self.initial_comments.append(data)
def insertCommentMain(self, data, parent=None):
if (parent == self.document and
self.document._elementTree.getroot()[-1].tag == comment_type):
warnings.warn("lxml cannot represent adjacent comments beyond the root elements", DataLossWarning)
super(TreeBuilder, self).insertComment(data, parent)
def insertRoot(self, token):
"""Create the document root"""
# Because of the way libxml2 works, it doesn't seem to be possible to
# alter information like the doctype after the tree has been parsed.
# Therefore we need to use the built-in parser to create our initial
# tree, after which we can add elements like normal
docStr = ""
if self.doctype:
assert self.doctype.name
docStr += "<!DOCTYPE %s" % self.doctype.name
if (self.doctype.publicId is not None or
self.doctype.systemId is not None):
docStr += (' PUBLIC "%s" ' %
(self.infosetFilter.coercePubid(self.doctype.publicId or "")))
if self.doctype.systemId:
sysid = self.doctype.systemId
if sysid.find("'") >= 0 and sysid.find('"') >= 0:
warnings.warn("DOCTYPE system cannot contain single and double quotes", DataLossWarning)
sysid = sysid.replace("'", 'U00027')
if sysid.find("'") >= 0:
docStr += '"%s"' % sysid
else:
docStr += "'%s'" % sysid
else:
docStr += "''"
docStr += ">"
if self.doctype.name != token["name"]:
warnings.warn("lxml cannot represent doctype with a different name to the root element", DataLossWarning)
docStr += "<THIS_SHOULD_NEVER_APPEAR_PUBLICLY/>"
root = etree.fromstring(docStr)
# Append the initial comments:
for comment_token in self.initial_comments:
comment = self.commentClass(comment_token["data"])
root.addprevious(comment._element)
# Create the root document and add the ElementTree to it
self.document = self.documentClass()
self.document._elementTree = root.getroottree()
# Give the root element the right name
name = token["name"]
namespace = token.get("namespace", self.defaultNamespace)
if namespace is None:
etree_tag = name
else:
etree_tag = "{%s}%s" % (namespace, name)
root.tag = etree_tag
# Add the root element to the internal child/open data structures
root_element = self.elementClass(name, namespace)
root_element._element = root
self.document._childNodes.append(root_element)
self.openElements.append(root_element)
# Reset to the default insert comment function
self.insertComment = self.insertCommentMain
| mit | 6,003,815,002,988,796,000 | 37.585831 | 121 | 0.557305 | false |
elemel/tics | lib/tics/image.py | 1 | 3627 | # Copyright (c) 2009 Mikael Lind
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import random, struct
from OpenGL.GL import *
from tics.triangle import Triangle
class Image(object):
def __init__(self, (width, height), triangles):
self.__width = width
self.__height = height
self.__triangles = tuple(triangles)
@property
def resolution(self):
return self.__width, self.__height
def draw(self):
glBegin(GL_TRIANGLES)
for triangle in self.__triangles:
triangle.draw()
glEnd()
@staticmethod
def generate(resolution, triangle_count):
triangles = [Triangle.generate() for _ in xrange(triangle_count)]
return Image(resolution, triangles)
@staticmethod
def read(f):
width, height, triangle_count = struct.unpack("!HHH", f.read(6))
triangles = [Triangle.read(f) for _ in xrange(triangle_count)]
return Image((width, height), triangles)
@staticmethod
def load(path):
f = open(path, "rb")
try:
return Image.read(f)
finally:
f.close()
def write(self, f):
f.write(struct.pack("!HHH", self.__width, self.__height,
len(self.__triangles)))
for triangle in self.__triangles:
triangle.write(f)
def save(self, path):
f = open(path, "wb")
try:
self.write(f)
finally:
f.close()
def mutate(self):
if random.random() < 0.5:
triangles = list(self.__triangles)
i = random.randrange(len(triangles))
triangles[i] = triangles[i].mutate()
return Image((self.__width, self.__height), triangles)
else:
mutate_func = random.choice([self.__move_triangle,
self.__replace_triangle])
return mutate_func()
def __move_triangle(self):
triangles = list(self.__triangles)
i = random.randrange(len(triangles))
j = random.randrange(len(triangles))
triangle = triangles.pop(i)
triangles.insert(j, triangle)
return Image((self.__width, self.__height), triangles)
def __replace_triangle(self):
triangles = list(self.__triangles)
i = random.randrange(len(triangles))
if random.random() < 0.5:
j = len(triangles)
else:
j = random.randrange(len(triangles))
triangles.pop(i)
triangles.insert(j, Triangle.generate())
return Image((self.__width, self.__height), triangles)
| mit | 7,257,524,811,728,129,000 | 33.875 | 73 | 0.624483 | false |
yxwzaxns/cowry | server/core/utils.py | 1 | 4227 | """functions helper."""
from ast import literal_eval
import base64
import os
import hashlib
import random
import uuid
import time
import shutil
import re
import socket
import _thread
import OpenSSL
import redis
def addAppPath(path):
"""Add a path to sys path."""
os.sys.path.append(path)
def getCwd():
"""pass."""
return os.getcwd()
def checkAbsPath(path):
"""pass."""
return os.path.isabs(path)
def prettySize(num, suffix='B'):
"""pass."""
num = int(num)
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "{:.3f} {}{}".format(num, unit, suffix)
num /= 1024.0
def getSizeByPath(filepath):
"""pass."""
return os.path.getsize(filepath)
def getBaseNameByPath(filepath):
"""pass."""
return os.path.basename(filepath)
def getDirNameByPath(filepath):
"""pass."""
return os.path.dirname(filepath)
def calculateHashCodeForFile(filepath):
"""pass."""
try:
with open(filepath, 'rb') as f:
fileHashCode = hashlib.md5(f.read()).hexdigest()
except Exception as e:
return (1, str(e))
return fileHashCode
def calculateHashCodeForString(string, method='md5'):
"""pass."""
return getattr(hashlib, method)(string.encode('utf8')).hexdigest()
# return hashlib.md5(str.encode('utf8')).hexdigest()
def calculateFingerprintForSSHKey(line):
key = base64.b64decode(line.strip().split()[1].encode('ascii'))
fp_plain = hashlib.md5(key).hexdigest()
return ':'.join(a+b for a,b in zip(fp_plain[::2], fp_plain[1::2]))
def check_public_key(key):
# key = base64.b64decode(line.strip().split()[1].encode('ascii'))
# fp_plain = hashlib.md5(key).hexdigest()
return True
def generateRandomDigitFromRange(start, end):
"""pass."""
return random.randrange(start, end)
def rebuildDictFromBytes(bytestr):
"""pass."""
return literal_eval(bytestr.decode('utf8'))
def startNewThread(work, params=()):
"""pass."""
if params:
_thread.start_new_thread(work, params)
else:
_thread.start_new_thread(work, ())
def seperateFileName(filename):
"""pass."""
return os.path.splitext(filename)
def getFileContent(filepath, method= ''):
"""pass."""
mode = 'r{}'.format(method)
with open(filepath, mode) as f:
content = f.read()
return content
def generateAuthToken():
"""pass."""
return uuid.uuid4().hex.upper()
def generateGUID():
return uuid.uuid1().hex.upper()
def getCurrentTime():
"""pass."""
return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
def joinFilePath(*params):
"""pass."""
params = [x for x in params]
return os.path.join(*params)
def deleteFile(filepath):
"""pass."""
try:
os.remove(filepath)
except Exception as e:
return (1, str(e))
else:
return (0, 'ok')
def copyfile(src, dst):
"""pass."""
try:
shutil.copyfile(src, dst)
except Exception as e:
return (1, str(e))
else:
return (0, 'ok')
def getenv(name):
"""pass."""
return os.getenv(name)
def setenv(name, value):
"""pass."""
os.environ[name] = str(value)
def makeDirs(filepath):
"""pass."""
return os.makedirs(filepath)
def delfolder(folderpath):
"""pass."""
if checkFolderExists(folderpath):
shutil.rmtree(folderpath)
def checkFileExists(filepath):
"""pass."""
return os.path.isfile(filepath)
def checkFolderExists(path):
"""pass."""
return os.path.isdir(path)
def verifyDomain(domain):
"""pass."""
reg = r'^[a-z0-9]([a-z0-9-]+\.){1,}[a-z0-9]+\Z'
return re.search(reg, domain)
def getHostAddr():
"""pass."""
return socket.gethostbyname(socket.gethostname())
def importCert(path):
return OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, path)
def getCertInfo(path):
filehash = calculateHashCodeForFile(path)
with open(path, 'r') as f:
certfile = f.read()
cert = importCert(certfile)
cert_digest = cert.digest("sha256")
cert_info = {'digest': cert_digest.decode(),
'filehash': filehash}
return cert_info
def send_info(info):
pass
| mit | -8,886,317,746,345,027,000 | 22.098361 | 77 | 0.616513 | false |
xorpaul/check_mk | web/plugins/userdb/user_attributes.py | 6 | 3483 | #!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 [email protected] |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
declare_user_attribute(
"force_authuser",
Checkbox(
title = _("Visibility of Hosts/Services"),
label = _("Only show hosts and services the user is a contact for"),
help = _("When this option is checked, then the status GUI will only "
"display hosts and services that the user is a contact for - "
"even if he has the permission for seeing all objects."),
),
permission = "general.see_all"
)
declare_user_attribute(
"force_authuser_webservice",
Checkbox(
title = _("Visibility of Hosts/Services (Webservice)"),
label = _("Export only hosts and services the user is a contact for"),
help = _("When this option is checked, then the Multisite webservice "
"will only export hosts and services that the user is a contact for - "
"even if he has the permission for seeing all objects."),
),
permission = "general.see_all"
)
declare_user_attribute(
"disable_notifications",
Checkbox(
title = _("Disable Notifications"),
label = _("Temporarily disable <b>all</b> notifications!"),
help = _("When this option is active the you will not get <b>any</b> "
"alerts or other notifications via email, SMS or similar. "
"This overrides all other notification settings or rules, so make "
"sure that you know what you do."),
),
permission = "general.disable_notifications",
domain = "check_mk",
)
declare_user_attribute(
"start_url",
TextAscii(title = _("Start-URL to display in main frame"),
help = _("When you point your browser to the Multisite GUI, usually the dashboard "
"is shown in the main (right) frame. You can replace this with any other "
"URL you like here."),
size = 80,
default_value = "dashboard.py",
attrencode = True),
domain = "multisite")
| gpl-2.0 | -5,502,059,362,757,719,000 | 44.233766 | 97 | 0.530864 | false |
quarkslab/irma | probe/modules/antivirus/bitdefender/bitdefender.py | 1 | 2967 | #
# Copyright (c) 2013-2018 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
import logging
import re
import os
import tempfile
from pathlib import Path
from modules.antivirus.base import AntivirusUnix
log = logging.getLogger(__name__)
class BitdefenderForUnices(AntivirusUnix):
name = "Bitdefender Antivirus Scanner (Linux)"
# ==================================
# Constructor and destructor stuff
# ==================================
def __init__(self, *args, **kwargs):
# class super class constructor
super().__init__(*args, **kwargs)
# create a temporary filename
fd, self._log_path = tempfile.mkstemp()
self._log_path = Path(self._log_path)
os.close(fd)
# scan tool variables
self.scan_args = (
"--action=ignore", # action to take for an infected file
"--no-list", # do not display scanned files
"--log={log}".format(log=self._log_path)
)
self.scan_patterns = [
re.compile('(?P<file>\S+)\s+(infected:|suspected:)\s+'
'(?P<name>.+?)$', re.IGNORECASE | re.MULTILINE),
]
def __del__(self):
if hasattr(self, '_log_path') and self._log_path.exists():
self._log_path.unlink()
# ==========================================
# Antivirus methods (need to be overriden)
# ==========================================
def check_scan_results(self, paths, res):
retcode, _, stderr = res
stdout = self._log_path.read_text()
return super().check_scan_results(paths, (retcode, stdout, stderr))
def get_version(self):
"""return the version of the antivirus"""
return self._run_and_parse(
'--version',
regexp='(?P<version>\d+(\.\d+)+)',
group='version')
def get_database(self):
"""return list of files in the database"""
# extract folder where are installed definition files
search_paths = [
Path('/opt/BitDefender-scanner/var/lib/scan/Plugins/'),
]
return self.locate('*', search_paths, syspath=False)
def get_scan_path(self):
"""return the full path of the scan tool"""
return self.locate_one("bdscan")
def get_virus_database_version(self):
"""Return the Virus Database version"""
self._run_and_parse(
'--info',
regexp='Engine signatures: (?P<dbversion>\d+)',
group='dbversion')
| apache-2.0 | 4,776,286,396,992,539,000 | 31.966667 | 75 | 0.568588 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.