repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
flavour/cert
|
refs/heads/master
|
modules/s3/pyvttbl/dictset.py
|
4
|
# Copyright (c) 2011, Roger Lew [see LICENSE.txt]
# This software is funded in part by NIH Grant P20 RR016454.
"""This module contains the DictSet class"""
# Python 2 to 3 workarounds
import sys
if sys.version_info[0] == 2:
_xrange = xrange
elif sys.version_info[0] == 3:
from functools import reduce
_xrange = range
from copy import copy, deepcopy
import collections
# for unique_combinations method
def _rep_generator(A, times, each):
"""like r's rep function, but returns a generator
Examples:
>>> g=_rep_generator([1,2,3],times=1,each=3)
>>> [v for v in g]
[1, 1, 1, 2, 2, 2, 3, 3, 3]
>>> g=_rep_generator([1,2,3],times=3,each=1)
>>> [v for v in g]
[1, 2, 3, 1, 2, 3, 1, 2, 3]
>>> g=_rep_generator([1,2,3],times=2,each=2)
>>> [v for v in g]
[1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3]
"""
return (a for t in _xrange(times) for a in A for e in _xrange(each))
class DictSet(dict):
"""A dictionary of sets that behaves like a set."""
def __init__(*args, **kwds): # args[0] -> 'self'
"""
DictSet() -> new empty dictionary of sets
DictSet(mapping) -> new dictionary of sets initialized from a
mapping object's (key, value) pairs.
Because the values become sets they must be iterable
DictSet(iterable) -> new dictionary of sets initialized as if via:
d = DictSet()
for k, v in iterable:
d[k] = set(v)
DictSet(**kwargs) -> new dictionary of sets initialized with the
name=value pairs in the keyword argument list.
For example: DictSet(one=[1], two=[2])
"""
# passing self with *args ensures that we can use
# self as keyword for initializing a DictSet
# Example: DictSet(self='abc', other='efg')
# call update or complain about having too many arguments
if len(args) == 1:
args[0].update({}, **kwds)
elif len(args) == 2:
args[0].update(args[1], **kwds)
elif len(args) > 2:
raise TypeError(
'DictSet expected at most 1 arguments, got %d' % (len(args) - 1))
def update(*args, **kwds): # args[0] -> 'self'
"""
DS.update(E, **F) -> None.
Update DS from the union of DictSet/dict/iterable E and F.
If E has a .keys() method, does:
for k in E:
DS[k] |= set(E[k])
If E lacks .keys() method, does:
for (k, v) in E:
DS[k] |= set(v)
In either case, this is followed by:
for k in F:
DS[k] |= set(F[k])
DS|=E <==> DS.update(E)
"""
# check the length of args
if len(args) > 2:
raise TypeError(
'DictSet expected at most 1 arguments, got %d' % (len(args) - 1))
# Make sure args can be mapped to a DictSet before
# we start adding them.
elif len(args) == 2:
obj = args[1]
# if obj is a DictType we can avoid checking
# to make sure it is hashable an iterable
if type(obj) == DictSet:
pass
# Check using duck typing
elif hasattr(obj, '__getitem__'):
# obj is dict or dict subclass
if hasattr(obj, 'keys'):
for k, val in obj.items():
if not isinstance(k, collections.Hashable):
raise TypeError(
"unhashable type: '%s'" % type(k).__name__)
if not hasattr(val,'__iter__'):
if not isinstance(val, str):
raise TypeError(
"'%s' object is not iterable" % type(val).__name__)
# obj is list/tuple or list/tuple subclass
else:
for item in obj:
try:
(k, val)=item
except:
raise TypeError(
'could not unpack arg to key/value pairs')
if not isinstance(k, collections.Hashable):
raise TypeError(
"unhashable type: '%s'" % type(k).__name__)
if not hasattr(val,'__iter__'):
if not isinstance(val, str):
raise TypeError(
"'%s' object is not iterable" % type(val).__name__)
# obj is not iterable, e.g. an int, float, etc.
else:
raise TypeError(
"'%s' object is not iterable" % type(obj).__name__)
# check the keyword arguments
for (k, val) in kwds.items():
# unhashable keyword argumnents don't make it to the point
# so we just need to check that the values are iterable
if not hasattr(val,'__iter__'):
if not isinstance(val, str):
raise TypeError(
"'%s' object is not iterable" % type(val).__name__)
# At this point we can be fairly certain the args and kwds
# will successfully initialize. Now we can go back through
# args and kwds and add them to ds
if len(args) == 2:
obj = args[1]
# obj is dict or dict subclass
if hasattr(obj, 'keys'):
for k, val in obj.items():
if not k in args[0].keys():
args[0][k] = set(val)
args[0][k] |= set(val)
# obj is list/tuple or list/tuple subclass
else:
for item in obj:
(k, val) = item
if not k in args[0].keys():
args[0][k] = set(val)
args[0][k] |= set(val)
# Now add keyword arguments
for (k, val) in kwds.items():
if not k in args[0].keys():
args[0][k] = set(val)
args[0][k] |= set(val)
def __ior__(self, E): # overloads |=
"""
DS.update(E, **F) -> None.
Update DS from the union of DictSet/dict/iterable E and F.
If E has a .keys() method, does:
for k in E:
DS[k] |= set(E[k])
If E lacks .keys() method, does:
for (k, v) in E:
DS[k] |= set(v)
In either case, this is followed by:
for k in F:
DS[k] |= set(F[k])
DS|=E <==> DS.update(E)
"""
if not isinstance(E, DictSet):
E = DictSet(copy(E))
return self.union(E)
def __eq__(self, E): # overloads ==
"""
Returns the equality comparison of DS with E typed
as a DictSet. If E cannot be broadcast into a DictSet
returns False.
DS==E <==> DS.__eq__(E)
"""
# Fails of d is not mappable with iterable values
try:
E = DictSet(E)
except:
return False
# check to see if self and E have the same keys
# if they don't we know they aren't equal and
# can return False
if len(set(k for (k, v) in self.items() if len(v) != 0) ^
set(k for (k, v) in E.items() if len(v) != 0)) > 0:
return False
# at this point we know they have the same keys
# if all the non-empty set differences have 0 cardinality
# the sets are equal
s = 0
for k in self.keys():
s += len(self.get(k, []) ^ E.get(k, []))
return s == 0
def __ne__(self, E): # overloads !=
"""
Returns the non-equality comparison of ES with E type
as a DictSet. If E cannot be broadcast into a DictSet
returns False.
DS==E <==> DS.__ne__(E)
"""
# Fails of d is not mappable with iterable values
try:
E = DictSet(E)
except:
return True
# check to see if self and d have the same keys
# if they don't we know they aren't equal and
# can return False
if len(set(k for (k, v) in self.items() if len(v) != 0) ^
set(k for (k, v) in E.items() if len(v) != 0)) > 0:
return True
# at this point we know they have the same keys
# if all the set differences have 0 cardinality
# the sets are equal
s = 0
for k in self.keys():
s += len(self.get(k, []) ^ E.get(k, []))
return s != 0
def issubset(self, E):
"""
Report whether all the sets of this DictSet are subsets of the E.
DS<=E <==> DS.issubset(E)
"""
if not isinstance(E, DictSet):
E = DictSet(copy(E))
if self == E == {}:
return True
b = True
for k in set(self) | set(E):
if not self.get(k, []) <= E.get(k, []):
b = False
return b
def __le__(self, E): # overloads <=
"""
Report whether all the sets of this DictSet are subsets of the E.
DS<=E <==> DS.issubset(E)
"""
return self.issubset(E)
def issuperset(self, E):
"""
Report whether all the sets of this DictSet are supersets of the E.
DS>=E <==> DS.issuperset(E)
"""
if not isinstance(E, DictSet):
E = DictSet(copy(E))
if self == E == {}:
return True
b = True
for k in set(self) | set(E):
if not self.get(k, []) >= E.get(k, []):
b = False
return b
def __ge__(self, E): # overloads >=
"""
Report whether all the sets of this DictSet are supersets of the E.
DS>=E <==> DS.issuperset(E)
"""
return self.issuperset(E)
def union(self, E):
"""
Return the union of the sets of self with the sets of E.
(i.e. all elements that are in either sets of the DictSets.)
DS|E <==> DS.union(E)
"""
if not isinstance(E, DictSet):
E = DictSet(copy(E))
foo = deepcopy(self)
for k in set(foo.keys()) | set(E.keys()):
foo.setdefault(k, [])
foo[k].update(E.get(k, []))
if not foo[k]:
del foo[k] # delete if empty set
return foo
def __or__(self, E): # overloads |
"""
Return the union of the sets of self with the sets of E.
(i.e. all elements that are in either sets of the DictSets.)
DS|E <==> DS.union(E)
"""
return self.union(E)
def intersection(self, E):
"""
Return the intersection of the sets of self with the sets of E.
(i.e. elements that are common to all of the sets of the
DictSets.)
DS&E <==> DS.intersection(E)
"""
if not isinstance(E, DictSet):
E = DictSet(copy(E))
# handle case where d=={}
if E == {}:
return DictSet()
foo = deepcopy(self)
for k in set(foo.keys()) | set(E.keys()):
foo.setdefault(k, [])
foo[k].intersection_update(E.get(k, []))
if not foo[k]:
del foo[k] # delete if empty set
return foo
def __and__(self, E): # overloads &
"""
Return the intersection of the sets of self with the sets of E.
(i.e. elements that are common to all of the sets of the
DictSets.)
DS&E <==> DS.intersection(E)
"""
return self.intersection(E)
def difference(self, E):
"""
Return the difference of the sets of self with the sets of E.
(i.e. all elements that are in the sets of this DictSet but
not the others.)
DS-E <==> DS.difference(E)
"""
if not isinstance(E, DictSet):
E = DictSet(copy(E))
foo = deepcopy(self)
for k in set(foo.keys()) | set(E.keys()):
foo.setdefault(k, [])
foo[k].difference_update(E.get(k, []))
if not foo[k]:
del foo[k] # delete if empty set
return foo
def __sub__(self, E): # overloads -
"""
Return the difference of the sets of self with the sets of E.
(i.e. all elements that are in the sets of this DictSet but
not the others.)
DS-E <==> DS.difference(E)
"""
return self.difference(E)
def symmetric_difference(self, E):
"""
Return the symmetric difference of the sets of self with the
sets of E.
(i.e. for each DictSet all elements that are in exactly one
of the sets .)
DS^E <==> DS.symmetric_difference(E)
"""
if not isinstance(E, DictSet):
E = DictSet(copy(E))
foo = deepcopy(self)
for k in set(foo.keys()) | set(E.keys()):
foo.setdefault(k, [])
foo[k].symmetric_difference_update(E.get(k, []))
if not foo[k]:
del foo[k] # delete if empty set
return foo
def __xor__(self, E): # overloads ^
"""
Return the symmetric difference of the sets of self with the
sets of E.
(i.e. for each DictSet all elements that are in exactly one
of the sets .)
DS^E <==> DS.symmetric_difference(E)
"""
return self.symmetric_difference(E)
def intersection_update(self, E):
"""
Update a DictSet with the intersection of itself and E.
DS&=E <==> DS.intersection_update(E)
"""
if not isinstance(E, DictSet):
E = DictSet(copy(E))
for k in set(self) | set(E):
self.setdefault(k, [])
self[k].intersection_update(E.get(k, []))
if len(self[k]) == 0:
del self[k]
def __iand__(self, E): # overloads &=
"""
Update a DictSet with the intersection of itself and E.
DS&=E <==> DS.intersection_update(E)
"""
return self.intersection(E)
def difference_update(self, E):
"""
Update a DictSet with the difference of itself and E.
DS-=E <==> DS.difference_update(E)
"""
if not isinstance(E, DictSet):
E = DictSet(copy(E))
for k in set(self)|set(E):
self.setdefault(k, [])
self[k].difference_update(E.get(k, []))
if len(self[k]) == 0:
del self[k]
def __isub__(self, E): # overloads -=
"""
Update a DictSet with the difference of itself and E.
DS-=E <==> DS.difference_update(E)
"""
return self.difference(E)
def symmetric_difference_update(self, E):
"""
Update a DictSet with the symmetric difference of
itself and E.
DS^=E <==> DS.symmetric_difference_update(E)
"""
if not isinstance(E, DictSet):
E = DictSet(copy(E))
for k in set(self) | set(E):
self.setdefault(k, [])
self[k].symmetric_difference_update(E.get(k, []))
if len(self[k]) == 0:
del self[k]
def __ixor__(self, E): # overloads ^=
"""
Update a DictSet with the symmetric difference of
itself and E.
DS^=E <==> DS.symmetric_difference_update(E)
"""
return self.symmetric_difference(E)
def add(self, k, v=None):
"""
Add an element v to a set DS[k].
This has no effect if the element v is already present in DS[k].
When v is not supplied adds a new set at DS[k].
Raises KeyError if k is not hashable.
"""
if k not in self.keys():
self[k] = set()
if v != None:
self[k].add(v)
def __setitem__(self, k, v):
"""DS.__setitem__(k, v) <==> x[k]=set(v)"""
if isinstance(v, set):
super(DictSet, self).__setitem__(k, v)
else:
try:
super(DictSet, self).__setitem__(k, set(v))
except:
raise
def __contains__(self, k):
"""
True if DS has a key k and len(DS[k])!=0, else False
DS.__contains__(k) <==> k in D
"""
return k in [key for (key, val) in self.items() if len(val) > 0]
def __iter__(self):
"""
Iterate over keys with non-zero lengths.
DS.__iter__(k) <==> for k in D
"""
for (key, val) in self.items():
if len(val) > 0:
yield key
def get(self, k, v=None):
"""
DS.get(k[,v]) -> DS[v] if k in DS, else set(v).
v defaults to None.
"""
if k in self:
return self[k]
if v == None:
return
try:
return set(v)
except:
raise
def setdefault(self, k, v=None):
"""
DS.setdefault(k[,v]) -> DS.get(k, v), also set DS[k]=set(v)
if k not in D. v defaults to None.
"""
if k in self:
return self[k]
if v == None:
return
else:
try:
super(DictSet, self).__setitem__(k, set(v))
except:
raise
return self[k]
def copy(self):
"""DS.copy() -> a shallow copy of DS."""
return copy(self)
def remove(self, k, v=None):
"""
Remove element v from a set DS[k]; it must be a member.
If the element v is not a member of D[k], raise a KeyError.
If v is not supplied removes DS[k]; it must be an item.
if D[k] is not an item, raise a KeyError.
"""
if k not in self.keys():
raise KeyError(k)
if v != None:
self[k].remove(v)
else:
del self[k]
def discard(self, k, v=None):
"""
Remove element v from a set DS[k]; it must be a member.
If the element v is not a member of D[k], do nothing.
If v is not supplied removes DS[k].
If D[k] is not an item, raise a KeyError.
"""
if v != None:
try:
self[k].discard(v)
except:
pass
else:
try:
del self[k]
except:
pass
# borrowed from the collections.OrderedDict in the standard library
def __repr__(self):
"""DS.__repr__() <==> repr(DS)"""
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self.items()))
def unique_combinations(self, keys=None):
"""
Returns a generator yielding the unique combination of
elements. Both the keys of DS and the elements of the
sets are sorted.
When a key list (the keys argument) is supplied only the
unique combinations of the sets specified by the keys are
yielded by the generator.
The combinations are sorted by slowest repeating to fastest
repeating.
"""
# it the keys argument is not supplied assume the
# user wants the unique combinations of all the
# elements of all the sets
if keys == None:
keys = sorted(self.keys())
# eliminate keys to sets that have zero cardinality
try:
keys = [k for k in keys if k in self]
except:
raise TypeError("'%s' object is not iterable"
%type(keys).__name__)
# if the keys list is empty we can return an empty generator
if len(keys) == 0:
yield
else:
# the number of unique combinations is the product
# of the cardinalities of the non-zero sets
N = reduce(int.__mul__,(len(self[k]) for k in keys))
# now we need to build a dict of generators so we
# can build a generator or generators. To do this
# we need to figure out the each and times
# parameters to pass to rep()
gen_dict = {}
each = 1
times = 0
prev_n = 0
for i, k in enumerate(reversed(keys)):
if i != 0:
each *= prev_n
times = N / (len(self[k]) * each)
prev_n = len(self[k])
gen_dict[k] = _rep_generator(sorted(self[k]),
int(times),int(each))
# Now we just have to yield the results
for i in _xrange(N):
yield [next(gen_dict[k]) for k in keys]
@classmethod
def fromkeys(cls, seq, values=None):
"""
Create a new DictSet with keys from seq and values set to
set(values). When values is not supplied the values are
initialized as empty sets.
"""
d = cls()
for key in seq:
if values == None:
d[key] = set()
else:
d[key] = set(values)
return d
|
Belgabor/django
|
refs/heads/master
|
django/contrib/gis/db/backends/spatialite/models.py
|
403
|
"""
The GeometryColumns and SpatialRefSys models for the SpatiaLite backend.
"""
from django.db import models
from django.contrib.gis.db.backends.base import SpatialRefSysMixin
class GeometryColumns(models.Model):
"""
The 'geometry_columns' table from SpatiaLite.
"""
f_table_name = models.CharField(max_length=256)
f_geometry_column = models.CharField(max_length=256)
type = models.CharField(max_length=30)
coord_dimension = models.IntegerField()
srid = models.IntegerField(primary_key=True)
spatial_index_enabled = models.IntegerField()
class Meta:
db_table = 'geometry_columns'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the
the feature table name.
"""
return 'f_table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the
the feature geometry column.
"""
return 'f_geometry_column'
def __unicode__(self):
return "%s.%s - %dD %s field (SRID: %d)" % \
(self.f_table_name, self.f_geometry_column,
self.coord_dimension, self.type, self.srid)
class SpatialRefSys(models.Model, SpatialRefSysMixin):
"""
The 'spatial_ref_sys' table from SpatiaLite.
"""
srid = models.IntegerField(primary_key=True)
auth_name = models.CharField(max_length=256)
auth_srid = models.IntegerField()
ref_sys_name = models.CharField(max_length=256)
proj4text = models.CharField(max_length=2048)
@property
def wkt(self):
from django.contrib.gis.gdal import SpatialReference
return SpatialReference(self.proj4text).wkt
class Meta:
db_table = 'spatial_ref_sys'
managed = False
|
daspecster/google-cloud-python
|
refs/heads/master
|
language/unit_tests/test_entity.py
|
1
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class TestEntity(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.language.entity import Entity
return Entity
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_constructor_defaults(self):
from google.cloud.language.entity import Mention
from google.cloud.language.entity import MentionType
from google.cloud.language.entity import TextSpan
name = 'Italian'
entity_type = 'LOCATION'
wiki_url = 'http://en.wikipedia.org/wiki/Italy'
metadata = {
'foo': 'bar',
'wikipedia_url': wiki_url,
}
salience = 0.19960518
mentions = [Mention(
mention_type=MentionType.PROPER,
text=TextSpan(content='Italian', begin_offset=0),
)]
entity = self._make_one(name, entity_type, metadata,
salience, mentions)
self.assertEqual(entity.name, name)
self.assertEqual(entity.entity_type, entity_type)
self.assertEqual(entity.metadata, metadata)
self.assertEqual(entity.salience, salience)
self.assertEqual(entity.mentions, mentions)
def test_from_api_repr(self):
from google.cloud.language.entity import EntityType
from google.cloud.language.entity import Mention
from google.cloud.language.entity import MentionType
klass = self._get_target_class()
name = 'Italy'
entity_type = EntityType.LOCATION
salience = 0.223
wiki_url = 'http://en.wikipedia.org/wiki/Italy'
mention1 = 'Italy'
mention2 = 'To Italy'
mention3 = 'From Italy'
payload = {
'name': name,
'type': entity_type,
'salience': salience,
'metadata': {'wikipedia_url': wiki_url},
'mentions': [
{'text': {'content': mention1, 'beginOffset': 3},
'type': 'PROPER'},
{'text': {'content': mention2, 'beginOffset': 5},
'type': 'PROPER'},
{'text': {'content': mention3, 'beginOffset': 8},
'type': 'PROPER'},
],
}
entity = klass.from_api_repr(payload)
self.assertEqual(entity.name, name)
self.assertEqual(entity.entity_type, entity_type)
self.assertEqual(entity.salience, salience)
self.assertEqual(entity.metadata, {'wikipedia_url': wiki_url})
# Assert that we got back Mention objects for each mention.
self.assertIsInstance(entity.mentions[0], Mention)
self.assertIsInstance(entity.mentions[1], Mention)
self.assertIsInstance(entity.mentions[2], Mention)
# Assert that the text (and string coercison) are correct.
self.assertEqual([str(i) for i in entity.mentions],
[mention1, mention2, mention3])
# Assert that the begin offsets are preserved.
self.assertEqual([i.text.begin_offset for i in entity.mentions],
[3, 5, 8])
# Assert that the mention types are preserved.
for mention in entity.mentions:
self.assertEqual(mention.mention_type, MentionType.PROPER)
class TestMention(unittest.TestCase):
PAYLOAD = {
'text': {'content': 'Greece', 'beginOffset': 42},
'type': 'PROPER',
}
def test_constructor(self):
from google.cloud.language.entity import Mention
from google.cloud.language.entity import MentionType
from google.cloud.language.entity import TextSpan
mention = Mention(
text=TextSpan(content='snails', begin_offset=90),
mention_type=MentionType.COMMON,
)
self.assertIsInstance(mention.text, TextSpan)
self.assertEqual(mention.text.content, 'snails')
self.assertEqual(mention.text.begin_offset, 90)
self.assertEqual(mention.mention_type, MentionType.COMMON)
def test_from_api_repr(self):
from google.cloud.language.entity import Mention
from google.cloud.language.entity import MentionType
from google.cloud.language.entity import TextSpan
mention = Mention.from_api_repr(self.PAYLOAD)
self.assertIsInstance(mention, Mention)
self.assertIsInstance(mention.text, TextSpan)
self.assertEqual(mention.text.content, 'Greece')
self.assertEqual(mention.text.begin_offset, 42)
self.assertEqual(mention.mention_type, MentionType.PROPER)
def test_dunder_str(self):
from google.cloud.language.entity import Mention
mention = Mention.from_api_repr(self.PAYLOAD)
self.assertEqual(str(mention), 'Greece')
class TestTextSpan(unittest.TestCase):
def test_constructor(self):
from google.cloud.language.entity import TextSpan
text = TextSpan(content='Winston Churchill', begin_offset=1945)
self.assertIsInstance(text, TextSpan)
self.assertEqual(text.content, str(text), 'Winston Churchill')
self.assertEqual(text.begin_offset, 1945)
def test_from_api_repr(self):
from google.cloud.language.entity import TextSpan
text = TextSpan.from_api_repr({
'beginOffset': 1953,
'content': 'Queen Elizabeth',
})
self.assertIsInstance(text, TextSpan)
self.assertEqual(text.content, str(text), 'Queen Elizabeth')
self.assertEqual(text.begin_offset, 1953)
|
Laurawly/tvm-1
|
refs/heads/master
|
tests/python/contrib/test_rpc_tracker.py
|
4
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import logging
import numpy as np
import time
import multiprocessing
from tvm import rpc
def check_server_drop():
"""test when server drops"""
try:
from tvm.rpc import tracker, proxy, base
from tvm.rpc.base import TrackerCode
@tvm.register_func("rpc.test2.addone")
def addone(x):
return x + 1
def _put(tclient, value):
base.sendjson(tclient._sock, value)
base.recvjson(tclient._sock)
tserver = tracker.Tracker("localhost", 8888)
tproxy = proxy.Proxy("localhost", 8881, tracker_addr=("localhost", tserver.port))
tclient = rpc.connect_tracker("localhost", tserver.port)
server0 = rpc.Server(
"localhost", port=9099, tracker_addr=("localhost", tserver.port), key="abc"
)
server1 = rpc.Server(
"localhost", port=9099, tracker_addr=("localhost", tserver.port), key="xyz"
)
server2 = rpc.Server("localhost", tproxy.port, is_proxy=True, key="xyz")
server3 = rpc.Server("localhost", tproxy.port, is_proxy=True, key="xyz1")
# Fault tolerence to un-handled requested value
_put(tclient, [TrackerCode.REQUEST, "abc", "", 1])
_put(tclient, [TrackerCode.REQUEST, "xyz1", "", 1])
# Fault tolerence to stale worker value
_put(tclient, [TrackerCode.PUT, "xyz", (server1.port, "abc")])
_put(tclient, [TrackerCode.PUT, "xyz", (server1.port, "abcxxx")])
_put(tclient, [TrackerCode.PUT, "xyz", (tproxy.port, "abcxxx11")])
# Fault tolerence server timeout
def check_timeout(timeout, sleeptime):
def myfunc(remote):
time.sleep(sleeptime)
f1 = remote.get_function("rpc.test2.addone")
assert f1(10) == 11
try:
tclient.request_and_run("xyz", myfunc, session_timeout=timeout)
except RuntimeError:
pass
print(tclient.text_summary())
try:
remote = tclient.request("xyz", priority=0, session_timeout=timeout)
remote2 = tclient.request("xyz", session_timeout=timeout)
time.sleep(sleeptime)
f1 = remote.get_function("rpc.test2.addone")
assert f1(10) == 11
f1 = remote2.get_function("rpc.test2.addone")
assert f1(10) == 11
except tvm.error.TVMError as e:
pass
remote3 = tclient.request("abc")
f1 = remote3.get_function("rpc.test2.addone")
remote3 = tclient.request("xyz1")
f1 = remote3.get_function("rpc.test2.addone")
assert f1(10) == 11
check_timeout(0.01, 0.1)
check_timeout(2, 0)
tserver.terminate()
server0.terminate()
server1.terminate()
server2.terminate()
server3.terminate()
tproxy.terminate()
except ImportError:
print("Skip because tornado is not available")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
check_server_drop()
|
Frozenball/alembic
|
refs/heads/master
|
alembic/command.py
|
9
|
import os
from .script import ScriptDirectory
from .environment import EnvironmentContext
from . import util, autogenerate as autogen
def list_templates(config):
"""List available templates"""
config.print_stdout("Available templates:\n")
for tempname in os.listdir(config.get_template_directory()):
with open(os.path.join(
config.get_template_directory(),
tempname,
'README')) as readme:
synopsis = next(readme)
config.print_stdout("%s - %s", tempname, synopsis)
config.print_stdout("\nTemplates are used via the 'init' command, e.g.:")
config.print_stdout("\n alembic init --template pylons ./scripts")
def init(config, directory, template='generic'):
"""Initialize a new scripts directory."""
if os.access(directory, os.F_OK):
raise util.CommandError("Directory %s already exists" % directory)
template_dir = os.path.join(config.get_template_directory(),
template)
if not os.access(template_dir, os.F_OK):
raise util.CommandError("No such template %r" % template)
util.status("Creating directory %s" % os.path.abspath(directory),
os.makedirs, directory)
versions = os.path.join(directory, 'versions')
util.status("Creating directory %s" % os.path.abspath(versions),
os.makedirs, versions)
script = ScriptDirectory(directory)
for file_ in os.listdir(template_dir):
file_path = os.path.join(template_dir, file_)
if file_ == 'alembic.ini.mako':
config_file = os.path.abspath(config.config_file_name)
if os.access(config_file, os.F_OK):
util.msg("File %s already exists, skipping" % config_file)
else:
script._generate_template(
file_path,
config_file,
script_location=directory
)
elif os.path.isfile(file_path):
output_file = os.path.join(directory, file_)
script._copy_file(
file_path,
output_file
)
util.msg("Please edit configuration/connection/logging "\
"settings in %r before proceeding." % config_file)
def revision(config, message=None, autogenerate=False, sql=False):
"""Create a new revision file."""
script = ScriptDirectory.from_config(config)
template_args = {
'config': config # Let templates use config for
# e.g. multiple databases
}
imports = set()
environment = util.asbool(
config.get_main_option("revision_environment")
)
if autogenerate:
environment = True
def retrieve_migrations(rev, context):
if script.get_revision(rev) is not script.get_revision("head"):
raise util.CommandError("Target database is not up to date.")
autogen._produce_migration_diffs(context, template_args, imports)
return []
elif environment:
def retrieve_migrations(rev, context):
return []
if environment:
with EnvironmentContext(
config,
script,
fn=retrieve_migrations,
as_sql=sql,
template_args=template_args,
):
script.run_env()
script.generate_revision(util.rev_id(), message, **template_args)
def upgrade(config, revision, sql=False, tag=None):
"""Upgrade to a later version."""
script = ScriptDirectory.from_config(config)
starting_rev = None
if ":" in revision:
if not sql:
raise util.CommandError("Range revision not allowed")
starting_rev, revision = revision.split(':', 2)
def upgrade(rev, context):
return script._upgrade_revs(revision, rev)
with EnvironmentContext(
config,
script,
fn=upgrade,
as_sql=sql,
starting_rev=starting_rev,
destination_rev=revision,
tag=tag
):
script.run_env()
def downgrade(config, revision, sql=False, tag=None):
"""Revert to a previous version."""
script = ScriptDirectory.from_config(config)
starting_rev = None
if ":" in revision:
if not sql:
raise util.CommandError("Range revision not allowed")
starting_rev, revision = revision.split(':', 2)
elif sql:
raise util.CommandError("downgrade with --sql requires <fromrev>:<torev>")
def downgrade(rev, context):
return script._downgrade_revs(revision, rev)
with EnvironmentContext(
config,
script,
fn=downgrade,
as_sql=sql,
starting_rev=starting_rev,
destination_rev=revision,
tag=tag
):
script.run_env()
def history(config, rev_range=None):
"""List changeset scripts in chronological order."""
script = ScriptDirectory.from_config(config)
if rev_range is not None:
if ":" not in rev_range:
raise util.CommandError(
"History range requires [start]:[end], "
"[start]:, or :[end]")
base, head = rev_range.strip().split(":")
else:
base = head = None
def _display_history(config, script, base, head):
for sc in script.walk_revisions(
base=base or "base",
head=head or "head"):
if sc.is_head:
config.print_stdout("")
config.print_stdout(sc.log_entry)
def _display_history_w_current(config, script, base=None, head=None):
def _display_current_history(rev, context):
if head is None:
_display_history(config, script, base, rev)
elif base is None:
_display_history(config, script, rev, head)
return []
with EnvironmentContext(
config,
script,
fn=_display_current_history
):
script.run_env()
if base == "current":
_display_history_w_current(config, script, head=head)
elif head == "current":
_display_history_w_current(config, script, base=base)
else:
_display_history(config, script, base, head)
def branches(config):
"""Show current un-spliced branch points"""
script = ScriptDirectory.from_config(config)
for sc in script.walk_revisions():
if sc.is_branch_point:
config.print_stdout(sc)
for rev in sc.nextrev:
config.print_stdout("%s -> %s",
" " * len(str(sc.down_revision)),
script.get_revision(rev)
)
def current(config, head_only=False):
"""Display the current revision for each database."""
script = ScriptDirectory.from_config(config)
def display_version(rev, context):
rev = script.get_revision(rev)
if head_only:
config.print_stdout("%s%s" % (
rev.revision if rev else None,
" (head)" if rev and rev.is_head else ""))
else:
config.print_stdout("Current revision for %s: %s",
util.obfuscate_url_pw(
context.connection.engine.url),
rev)
return []
with EnvironmentContext(
config,
script,
fn=display_version
):
script.run_env()
def stamp(config, revision, sql=False, tag=None):
"""'stamp' the revision table with the given revision; don't
run any migrations."""
script = ScriptDirectory.from_config(config)
def do_stamp(rev, context):
if sql:
current = False
else:
current = context._current_rev()
dest = script.get_revision(revision)
if dest is not None:
dest = dest.revision
context._update_current_rev(current, dest)
return []
with EnvironmentContext(
config,
script,
fn=do_stamp,
as_sql=sql,
destination_rev=revision,
tag=tag
):
script.run_env()
def splice(config, parent, child):
"""'splice' two branches, creating a new revision file.
this command isn't implemented right now.
"""
raise NotImplementedError()
|
usmschuck/canvas
|
refs/heads/custom
|
vendor/bundle/ruby/1.9.1/gems/pygments.rb-0.5.2/vendor/simplejson/scripts/make_docs.py
|
44
|
#!/usr/bin/env python
import os
import subprocess
import shutil
SPHINX_BUILD = 'sphinx-build'
DOCTREES_DIR = 'build/doctrees'
HTML_DIR = 'docs'
for dirname in DOCTREES_DIR, HTML_DIR:
if not os.path.exists(dirname):
os.makedirs(dirname)
open(os.path.join(HTML_DIR, '.nojekyll'), 'w').close()
res = subprocess.call([
SPHINX_BUILD, '-d', DOCTREES_DIR, '-b', 'html', '.', 'docs',
])
raise SystemExit(res)
|
stuntman723/rap-analyzer
|
refs/heads/master
|
rap_analyzer/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/schema.py
|
30
|
import psycopg2
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_create_sequence = "CREATE SEQUENCE %(sequence)s"
sql_delete_sequence = "DROP SEQUENCE IF EXISTS %(sequence)s CASCADE"
sql_set_sequence_max = "SELECT setval('%(sequence)s', MAX(%(column)s)) FROM %(table)s"
sql_create_varchar_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s varchar_pattern_ops)%(extra)s"
sql_create_text_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s text_pattern_ops)%(extra)s"
def quote_value(self, value):
return psycopg2.extensions.adapt(value)
def _model_indexes_sql(self, model):
output = super(DatabaseSchemaEditor, self)._model_indexes_sql(model)
if not model._meta.managed or model._meta.proxy or model._meta.swapped:
return output
for field in model._meta.local_fields:
db_type = field.db_type(connection=self.connection)
if db_type is not None and (field.db_index or field.unique):
# Fields with database column types of `varchar` and `text` need
# a second index that specifies their operator class, which is
# needed when performing correct LIKE queries outside the
# C locale. See #12234.
#
# The same doesn't apply to array fields such as varchar[size]
# and text[size], so skip them.
if '[' in db_type:
continue
if db_type.startswith('varchar'):
output.append(self._create_index_sql(
model, [field], suffix='_like', sql=self.sql_create_varchar_index))
elif db_type.startswith('text'):
output.append(self._create_index_sql(
model, [field], suffix='_like', sql=self.sql_create_text_index))
return output
def _alter_column_type_sql(self, table, old_field, new_field, new_type):
"""
Makes ALTER TYPE with SERIAL make sense.
"""
if new_type.lower() == "serial":
column = new_field.column
sequence_name = "%s_%s_seq" % (table, column)
return (
(
self.sql_alter_column_type % {
"column": self.quote_name(column),
"type": "integer",
},
[],
),
[
(
self.sql_delete_sequence % {
"sequence": self.quote_name(sequence_name),
},
[],
),
(
self.sql_create_sequence % {
"sequence": self.quote_name(sequence_name),
},
[],
),
(
self.sql_alter_column % {
"table": self.quote_name(table),
"changes": self.sql_alter_column_default % {
"column": self.quote_name(column),
"default": "nextval('%s')" % self.quote_name(sequence_name),
}
},
[],
),
(
self.sql_set_sequence_max % {
"table": self.quote_name(table),
"column": self.quote_name(column),
"sequence": self.quote_name(sequence_name),
},
[],
),
],
)
else:
return super(DatabaseSchemaEditor, self)._alter_column_type_sql(
table, old_field, new_field, new_type
)
|
tictakk/servo
|
refs/heads/ticbranch
|
tests/wpt/web-platform-tests/tools/pywebsocket/src/example/cookie_wsh.py
|
451
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-style
# license that can be found in the COPYING file or at
# https://developers.google.com/open-source/licenses/bsd
import urlparse
def _add_set_cookie(request, value):
request.extra_headers.append(('Set-Cookie', value))
def web_socket_do_extra_handshake(request):
components = urlparse.urlparse(request.uri)
command = components[4]
ONE_DAY_LIFE = 'Max-Age=86400'
if command == 'set':
_add_set_cookie(request, '; '.join(['foo=bar', ONE_DAY_LIFE]))
elif command == 'set_httponly':
_add_set_cookie(request,
'; '.join(['httpOnlyFoo=bar', ONE_DAY_LIFE, 'httpOnly']))
elif command == 'clear':
_add_set_cookie(request, 'foo=0; Max-Age=0')
_add_set_cookie(request, 'httpOnlyFoo=0; Max-Age=0')
def web_socket_transfer_data(request):
pass
|
mmcdermo/helpinghand
|
refs/heads/master
|
server/venv/lib/python2.7/site-packages/django/core/files/utils.py
|
901
|
class FileProxyMixin(object):
"""
A mixin class used to forward file methods to an underlaying file
object. The internal file object has to be called "file"::
class FileProxy(FileProxyMixin):
def __init__(self, file):
self.file = file
"""
encoding = property(lambda self: self.file.encoding)
fileno = property(lambda self: self.file.fileno)
flush = property(lambda self: self.file.flush)
isatty = property(lambda self: self.file.isatty)
newlines = property(lambda self: self.file.newlines)
read = property(lambda self: self.file.read)
readinto = property(lambda self: self.file.readinto)
readline = property(lambda self: self.file.readline)
readlines = property(lambda self: self.file.readlines)
seek = property(lambda self: self.file.seek)
softspace = property(lambda self: self.file.softspace)
tell = property(lambda self: self.file.tell)
truncate = property(lambda self: self.file.truncate)
write = property(lambda self: self.file.write)
writelines = property(lambda self: self.file.writelines)
xreadlines = property(lambda self: self.file.xreadlines)
def __iter__(self):
return iter(self.file)
|
akrherz/dep
|
refs/heads/main
|
scripts/util/convergance.py
|
2
|
import matplotlib.pyplot as plt
import random
import numpy as np
(fig, ax) = plt.subplots(1, 1)
x = []
y = []
cnts = []
data = []
for i in range(250):
x.append(i)
data.append(random.randint(0, 100))
std = np.std(data)
avg = np.average(data)
y.append(avg)
cnt = np.sum(np.where(data > (avg + std * 2), 1, 0))
cnt += np.sum(np.where(data < (avg - std * 2), 1, 0))
cnts.append((len(data) - cnt) / float(len(data)) * 100.0)
ax.plot(x, y)
ax2 = ax.twinx()
ax2.plot(x, cnts, color="r")
ax2.set_ylabel("Percentage within 2 sigma [%]", color="r")
ax2.set_ylim(0, 102)
ax.set_xlabel("Random Sample Size Increase")
ax.set_ylabel("Average", color="b")
ax.set_ylim(0, 102)
ax.set_title("Random Sampling between 0 and 100")
ax.grid(True)
ax.set_yticks([0, 25, 50, 75, 100])
fig.savefig("test.png")
|
KagamiChan/shadowsocks
|
refs/heads/master
|
shadowsocks/local.py
|
10
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014 clowwindy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, \
with_statement
import sys
import os
import logging
import signal
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../'))
from shadowsocks import utils, daemon, encrypt, eventloop, tcprelay, udprelay,\
asyncdns
def main():
utils.check_python()
# fix py2exe
if hasattr(sys, "frozen") and sys.frozen in \
("windows_exe", "console_exe"):
p = os.path.dirname(os.path.abspath(sys.executable))
os.chdir(p)
config = utils.get_config(True)
daemon.daemon_exec(config)
utils.print_shadowsocks()
encrypt.try_cipher(config['password'], config['method'])
try:
logging.info("starting local at %s:%d" %
(config['local_address'], config['local_port']))
dns_resolver = asyncdns.DNSResolver()
tcp_server = tcprelay.TCPRelay(config, dns_resolver, True)
udp_server = udprelay.UDPRelay(config, dns_resolver, True)
loop = eventloop.EventLoop()
dns_resolver.add_to_loop(loop)
tcp_server.add_to_loop(loop)
udp_server.add_to_loop(loop)
def handler(signum, _):
logging.warn('received SIGQUIT, doing graceful shutting down..')
tcp_server.close(next_tick=True)
udp_server.close(next_tick=True)
signal.signal(getattr(signal, 'SIGQUIT', signal.SIGTERM), handler)
def int_handler(signum, _):
sys.exit(1)
signal.signal(signal.SIGINT, int_handler)
loop.run()
except (KeyboardInterrupt, IOError, OSError) as e:
logging.error(e)
if config['verbose']:
import traceback
traceback.print_exc()
os._exit(1)
if __name__ == '__main__':
main()
|
calfonso/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/system/net_logging.py
|
57
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: net_logging
version_added: "2.4"
author: "Ganesh Nalawade (@ganeshrn)"
short_description: Manage logging on network devices
description:
- This module provides declarative management of logging
on network devices.
options:
dest:
description:
- Destination of the logs.
choices: ['console', 'host']
name:
description:
- If value of C(dest) is I(host) it indicates file-name
the host name to be notified.
facility:
description:
- Set logging facility.
level:
description:
- Set logging severity levels.
aggregate:
description: List of logging definitions.
purge:
description:
- Purge logging not defined in the I(aggregate) parameter.
default: no
state:
description:
- State of the logging configuration.
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure console logging
net_logging:
dest: console
facility: any
level: critical
- name: remove console logging configuration
net_logging:
dest: console
state: absent
- name: configure host logging
net_logging:
dest: host
name: 1.1.1.1
facility: kernel
level: critical
- name: Configure file logging using aggregate
net_logging:
dest: file
aggregate:
- name: test-1
facility: pfe
level: critical
- name: test-2
facility: kernel
level: emergency
- name: Delete file logging using aggregate
net_logging:
dest: file
aggregate:
- name: test-1
facility: pfe
level: critical
- name: test-2
facility: kernel
level: emergency
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- logging console critical
"""
|
foss-transportationmodeling/rettina-server
|
refs/heads/master
|
.env/local/lib/python2.7/encodings/mac_greek.py
|
593
|
""" Python Character Mapping Codec mac_greek generated from 'MAPPINGS/VENDORS/APPLE/GREEK.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-greek',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xb9' # 0x81 -> SUPERSCRIPT ONE
u'\xb2' # 0x82 -> SUPERSCRIPT TWO
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xb3' # 0x84 -> SUPERSCRIPT THREE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u0385' # 0x87 -> GREEK DIALYTIKA TONOS
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u0384' # 0x8B -> GREEK TONOS
u'\xa8' # 0x8C -> DIAERESIS
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xa3' # 0x92 -> POUND SIGN
u'\u2122' # 0x93 -> TRADE MARK SIGN
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\u2022' # 0x96 -> BULLET
u'\xbd' # 0x97 -> VULGAR FRACTION ONE HALF
u'\u2030' # 0x98 -> PER MILLE SIGN
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xa6' # 0x9B -> BROKEN BAR
u'\u20ac' # 0x9C -> EURO SIGN # before Mac OS 9.2.2, was SOFT HYPHEN
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u2020' # 0xA0 -> DAGGER
u'\u0393' # 0xA1 -> GREEK CAPITAL LETTER GAMMA
u'\u0394' # 0xA2 -> GREEK CAPITAL LETTER DELTA
u'\u0398' # 0xA3 -> GREEK CAPITAL LETTER THETA
u'\u039b' # 0xA4 -> GREEK CAPITAL LETTER LAMDA
u'\u039e' # 0xA5 -> GREEK CAPITAL LETTER XI
u'\u03a0' # 0xA6 -> GREEK CAPITAL LETTER PI
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u03a3' # 0xAA -> GREEK CAPITAL LETTER SIGMA
u'\u03aa' # 0xAB -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
u'\xa7' # 0xAC -> SECTION SIGN
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\xb0' # 0xAE -> DEGREE SIGN
u'\xb7' # 0xAF -> MIDDLE DOT
u'\u0391' # 0xB0 -> GREEK CAPITAL LETTER ALPHA
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\xa5' # 0xB4 -> YEN SIGN
u'\u0392' # 0xB5 -> GREEK CAPITAL LETTER BETA
u'\u0395' # 0xB6 -> GREEK CAPITAL LETTER EPSILON
u'\u0396' # 0xB7 -> GREEK CAPITAL LETTER ZETA
u'\u0397' # 0xB8 -> GREEK CAPITAL LETTER ETA
u'\u0399' # 0xB9 -> GREEK CAPITAL LETTER IOTA
u'\u039a' # 0xBA -> GREEK CAPITAL LETTER KAPPA
u'\u039c' # 0xBB -> GREEK CAPITAL LETTER MU
u'\u03a6' # 0xBC -> GREEK CAPITAL LETTER PHI
u'\u03ab' # 0xBD -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
u'\u03a8' # 0xBE -> GREEK CAPITAL LETTER PSI
u'\u03a9' # 0xBF -> GREEK CAPITAL LETTER OMEGA
u'\u03ac' # 0xC0 -> GREEK SMALL LETTER ALPHA WITH TONOS
u'\u039d' # 0xC1 -> GREEK CAPITAL LETTER NU
u'\xac' # 0xC2 -> NOT SIGN
u'\u039f' # 0xC3 -> GREEK CAPITAL LETTER OMICRON
u'\u03a1' # 0xC4 -> GREEK CAPITAL LETTER RHO
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u03a4' # 0xC6 -> GREEK CAPITAL LETTER TAU
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\u03a5' # 0xCB -> GREEK CAPITAL LETTER UPSILON
u'\u03a7' # 0xCC -> GREEK CAPITAL LETTER CHI
u'\u0386' # 0xCD -> GREEK CAPITAL LETTER ALPHA WITH TONOS
u'\u0388' # 0xCE -> GREEK CAPITAL LETTER EPSILON WITH TONOS
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u2013' # 0xD0 -> EN DASH
u'\u2015' # 0xD1 -> HORIZONTAL BAR
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u0389' # 0xD7 -> GREEK CAPITAL LETTER ETA WITH TONOS
u'\u038a' # 0xD8 -> GREEK CAPITAL LETTER IOTA WITH TONOS
u'\u038c' # 0xD9 -> GREEK CAPITAL LETTER OMICRON WITH TONOS
u'\u038e' # 0xDA -> GREEK CAPITAL LETTER UPSILON WITH TONOS
u'\u03ad' # 0xDB -> GREEK SMALL LETTER EPSILON WITH TONOS
u'\u03ae' # 0xDC -> GREEK SMALL LETTER ETA WITH TONOS
u'\u03af' # 0xDD -> GREEK SMALL LETTER IOTA WITH TONOS
u'\u03cc' # 0xDE -> GREEK SMALL LETTER OMICRON WITH TONOS
u'\u038f' # 0xDF -> GREEK CAPITAL LETTER OMEGA WITH TONOS
u'\u03cd' # 0xE0 -> GREEK SMALL LETTER UPSILON WITH TONOS
u'\u03b1' # 0xE1 -> GREEK SMALL LETTER ALPHA
u'\u03b2' # 0xE2 -> GREEK SMALL LETTER BETA
u'\u03c8' # 0xE3 -> GREEK SMALL LETTER PSI
u'\u03b4' # 0xE4 -> GREEK SMALL LETTER DELTA
u'\u03b5' # 0xE5 -> GREEK SMALL LETTER EPSILON
u'\u03c6' # 0xE6 -> GREEK SMALL LETTER PHI
u'\u03b3' # 0xE7 -> GREEK SMALL LETTER GAMMA
u'\u03b7' # 0xE8 -> GREEK SMALL LETTER ETA
u'\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA
u'\u03be' # 0xEA -> GREEK SMALL LETTER XI
u'\u03ba' # 0xEB -> GREEK SMALL LETTER KAPPA
u'\u03bb' # 0xEC -> GREEK SMALL LETTER LAMDA
u'\u03bc' # 0xED -> GREEK SMALL LETTER MU
u'\u03bd' # 0xEE -> GREEK SMALL LETTER NU
u'\u03bf' # 0xEF -> GREEK SMALL LETTER OMICRON
u'\u03c0' # 0xF0 -> GREEK SMALL LETTER PI
u'\u03ce' # 0xF1 -> GREEK SMALL LETTER OMEGA WITH TONOS
u'\u03c1' # 0xF2 -> GREEK SMALL LETTER RHO
u'\u03c3' # 0xF3 -> GREEK SMALL LETTER SIGMA
u'\u03c4' # 0xF4 -> GREEK SMALL LETTER TAU
u'\u03b8' # 0xF5 -> GREEK SMALL LETTER THETA
u'\u03c9' # 0xF6 -> GREEK SMALL LETTER OMEGA
u'\u03c2' # 0xF7 -> GREEK SMALL LETTER FINAL SIGMA
u'\u03c7' # 0xF8 -> GREEK SMALL LETTER CHI
u'\u03c5' # 0xF9 -> GREEK SMALL LETTER UPSILON
u'\u03b6' # 0xFA -> GREEK SMALL LETTER ZETA
u'\u03ca' # 0xFB -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
u'\u03cb' # 0xFC -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
u'\u0390' # 0xFD -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
u'\u03b0' # 0xFE -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
u'\xad' # 0xFF -> SOFT HYPHEN # before Mac OS 9.2.2, was undefined
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
sureshthalamati/spark
|
refs/heads/master
|
examples/src/main/python/ml/feature_hasher_example.py
|
67
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from pyspark.sql import SparkSession
# $example on$
from pyspark.ml.feature import FeatureHasher
# $example off$
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("FeatureHasherExample")\
.getOrCreate()
# $example on$
dataset = spark.createDataFrame([
(2.2, True, "1", "foo"),
(3.3, False, "2", "bar"),
(4.4, False, "3", "baz"),
(5.5, False, "4", "foo")
], ["real", "bool", "stringNum", "string"])
hasher = FeatureHasher(inputCols=["real", "bool", "stringNum", "string"],
outputCol="features")
featurized = hasher.transform(dataset)
featurized.show(truncate=False)
# $example off$
spark.stop()
|
corbinmunce/domoticz
|
refs/heads/development
|
plugins/examples/Pinger.py
|
14
|
# ICMP Plugin
#
# Author: Dnpwwo, 2017 - 2018
#
"""
<plugin key="ICMP" name="Pinger (ICMP)" author="dnpwwo" version="3.1.1">
<description>
ICMP Pinger Plugin.<br/><br/>
Specify comma delimted addresses (IP or DNS names) of devices that are to be pinged.<br/>
When remote devices are found a matching Domoticz device is created in the Devices tab.
</description>
<params>
<param field="Address" label="Address(es) comma separated" width="300px" required="true" default="127.0.0.1"/>
<param field="Mode1" label="Ping Frequency" width="40px">
<options>
<option label="2" value="2"/>
<option label="3" value="3"/>
<option label="4" value="4"/>
<option label="5" value="5"/>
<option label="6" value="6"/>
<option label="8" value="8"/>
<option label="10" value="10" default="true" />
<option label="12" value="12"/>
<option label="14" value="14"/>
<option label="16" value="16"/>
<option label="18" value="18"/>
<option label="20" value="20"/>
</options>
</param>
<param field="Mode5" label="Time Out Lost Devices" width="75px">
<options>
<option label="True" value="True" default="true"/>
<option label="False" value="False" />
</options>
</param>
<param field="Mode6" label="Debug" width="150px">
<options>
<option label="None" value="0" default="true" />
<option label="Python Only" value="2"/>
<option label="Basic Debugging" value="62"/>
<option label="Basic+Messages" value="126"/>
<option label="Connections Only" value="16"/>
<option label="Connections+Python" value="18"/>
<option label="Connections+Queue" value="144"/>
<option label="All" value="-1"/>
</options>
</param>
</params>
</plugin>
"""
import Domoticz
from datetime import datetime
class IcmpDevice:
Address = ""
icmpConn = None
def __init__(self, destination):
self.Address = destination
self.Open()
def __str__(self):
if (self.icmpConn != None):
return str(self.icmpConn)
else:
return "None"
def Open(self):
if (self.icmpConn != None):
self.Close()
self.icmpConn = Domoticz.Connection(Name=self.Address, Transport="ICMP/IP", Protocol="ICMP", Address=self.Address)
self.icmpConn.Listen()
def Send(self):
if (self.icmpConn == None):
self.Open()
else:
self.icmpConn.Send("Domoticz")
def Close(self):
self.icmpConn = None
class BasePlugin:
icmpConn = None
icmpList = []
nextDev = 0
def onStart(self):
if Parameters["Mode6"] != "0":
DumpConfigToLog()
Domoticz.Debugging(int(Parameters["Mode6"]))
Domoticz.Heartbeat(int(Parameters["Mode1"]))
# Find devices that already exist, create those that don't
self.icmpList = Parameters["Address"].replace(" ", "").split(",")
for destination in self.icmpList:
Domoticz.Debug("Endpoint '"+destination+"' found.")
deviceFound = False
for Device in Devices:
if (("Name" in Devices[Device].Options) and (Devices[Device].Options["Name"] == destination)): deviceFound = True
if (deviceFound == False):
Domoticz.Device(Name=destination, Unit=len(Devices)+1, Type=243, Subtype=31, Image=17, Options={"Custom":"1;ms"}).Create()
Domoticz.Device(Name=destination, Unit=len(Devices)+1, Type=17, Subtype=0, Image=17, Options={"Name":destination,"Related":str(len(Devices))}).Create()
# Mark all devices as connection lost if requested
deviceLost = 0
if Parameters["Mode5"] == "True":
deviceLost = 1
for Device in Devices:
UpdateDevice(Device, Devices[Device].nValue, Devices[Device].sValue, deviceLost)
def onConnect(self, Connection, Status, Description):
if (Status == 0):
Domoticz.Log("Successful connect to: "+Connection.Address+" which is surprising because ICMP is connectionless.")
else:
Domoticz.Log("Failed to connect to: "+Connection.Address+", Description: "+Description)
self.icmpConn = None
def onMessage(self, Connection, Data):
Domoticz.Debug("onMessage called for connection: '"+Connection.Name+"'")
if Parameters["Mode6"] == "1":
DumpICMPResponseToLog(Data)
if isinstance(Data, dict) and (Data["Status"] == 0):
iUnit = -1
for Device in Devices:
if ("Name" in Devices[Device].Options):
Domoticz.Debug("Checking: '"+Connection.Name+"' against '"+Devices[Device].Options["Name"]+"'")
if (Devices[Device].Options["Name"] == Connection.Name):
iUnit = Device
break
if (iUnit > 0):
# Device found, set it to On and if elapsed time suplied update related device
UpdateDevice(iUnit, 1, "On", 0)
relatedDevice = int(Devices[iUnit].Options["Related"])
if ("ElapsedMs" in Data):
UpdateDevice(relatedDevice, Data["ElapsedMs"], str(Data["ElapsedMs"]), 0)
else:
Domoticz.Log("Device: '"+Connection.Name+"' returned '"+Data["Description"]+"'.")
if Parameters["Mode6"] == "1":
DumpICMPResponseToLog(Data)
TimedOut = 0
if Parameters["Mode5"] == "True": TimedOut = 1
for Device in Devices:
if (("Name" in Devices[Device].Options) and (Devices[Device].Options["Name"] == Connection.Name)):
UpdateDevice(Device, 0, "Off", TimedOut)
self.icmpConn = None
def onHeartbeat(self):
Domoticz.Debug("Heartbeating...")
# No response to previous heartbeat so mark as Off
if (self.icmpConn != None):
for Device in Devices:
if (("Name" in Devices[Device].Options) and (Devices[Device].Options["Name"] == self.icmpConn.Name)):
Domoticz.Log("Device: '"+Devices[Device].Options["Name"]+"' address '"+self.icmpConn.Address+"' - No response.")
TimedOut = 0
if Parameters["Mode5"] == "True": TimedOut = 1
UpdateDevice(Device, 0, "Off", TimedOut)
break
self.icmpConn = None
Domoticz.Debug("Heartbeating '"+self.icmpList[self.nextDev]+"'")
self.icmpConn = IcmpDevice(self.icmpList[self.nextDev])
self.nextDev += 1
if (self.nextDev >= len(self.icmpList)):
self.nextDev = 0
global _plugin
_plugin = BasePlugin()
def onStart():
global _plugin
_plugin.onStart()
def onConnect(Connection, Status, Description):
global _plugin
_plugin.onConnect(Connection, Status, Description)
def onMessage(Connection, Data):
global _plugin
_plugin.onMessage(Connection, Data)
def onHeartbeat():
global _plugin
_plugin.onHeartbeat()
def UpdateDevice(Unit, nValue, sValue, TimedOut):
# Make sure that the Domoticz device still exists (they can be deleted) before updating it
if (Unit in Devices):
if (Devices[Unit].nValue != nValue) or (Devices[Unit].sValue != sValue) or (Devices[Unit].TimedOut != TimedOut):
Devices[Unit].Update(nValue=nValue, sValue=str(sValue), TimedOut=TimedOut)
Domoticz.Log("Update "+str(nValue)+":'"+str(sValue)+"' ("+Devices[Unit].Name+")")
return
def DumpConfigToLog():
for x in Parameters:
if Parameters[x] != "":
Domoticz.Log( "'" + x + "':'" + str(Parameters[x]) + "'")
Domoticz.Log("Device count: " + str(len(Devices)))
for x in Devices:
Domoticz.Log("Device: " + str(x) + " - " + str(Devices[x]))
Domoticz.Log("Device ID: '" + str(Devices[x].ID) + "'")
Domoticz.Log("Device Name: '" + Devices[x].Name + "'")
Domoticz.Log("Device nValue: " + str(Devices[x].nValue))
Domoticz.Log("Device sValue: '" + Devices[x].sValue + "'")
Domoticz.Log("Device LastLevel: " + str(Devices[x].LastLevel))
return
def DumpICMPResponseToLog(icmpList):
if isinstance(icmpList, dict):
Domoticz.Log("ICMP Details ("+str(len(icmpList))+"):")
for x in icmpList:
if isinstance(icmpList[x], dict):
Domoticz.Log("--->'"+x+" ("+str(len(icmpList[x]))+"):")
for y in icmpList[x]:
Domoticz.Log("------->'" + y + "':'" + str(icmpList[x][y]) + "'")
else:
Domoticz.Log("--->'" + x + "':'" + str(icmpList[x]) + "'")
else:
Domoticz.Log(Data.decode("utf-8", "ignore"))
|
fnouama/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/contrib/localflavor/se/forms.py
|
311
|
# -*- coding: utf-8 -*-
"""
Swedish specific Form helpers
"""
import re
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.core.validators import EMPTY_VALUES
from django.contrib.localflavor.se.utils import (id_number_checksum,
validate_id_birthday, format_personal_id_number, valid_organisation,
format_organisation_number)
__all__ = ('SECountySelect', 'SEOrganisationNumberField',
'SEPersonalIdentityNumberField', 'SEPostalCodeField')
SWEDISH_ID_NUMBER = re.compile(r'^(?P<century>\d{2})?(?P<year>\d{2})(?P<month>\d{2})(?P<day>\d{2})(?P<sign>[\-+])?(?P<serial>\d{3})(?P<checksum>\d)$')
SE_POSTAL_CODE = re.compile(r'^[1-9]\d{2} ?\d{2}$')
class SECountySelect(forms.Select):
"""
A Select form widget that uses a list of the Swedish counties (län) as its
choices.
The cleaned value is the official county code -- see
http://en.wikipedia.org/wiki/Counties_of_Sweden for a list.
"""
def __init__(self, attrs=None):
from se_counties import COUNTY_CHOICES
super(SECountySelect, self).__init__(attrs=attrs,
choices=COUNTY_CHOICES)
class SEOrganisationNumberField(forms.CharField):
"""
A form field that validates input as a Swedish organisation number
(organisationsnummer).
It accepts the same input as SEPersonalIdentityField (for sole
proprietorships (enskild firma). However, co-ordination numbers are not
accepted.
It also accepts ordinary Swedish organisation numbers with the format
NNNNNNNNNN.
The return value will be YYYYMMDDXXXX for sole proprietors, and NNNNNNNNNN
for other organisations.
"""
default_error_messages = {
'invalid': _('Enter a valid Swedish organisation number.'),
}
def clean(self, value):
value = super(SEOrganisationNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
match = SWEDISH_ID_NUMBER.match(value)
if not match:
raise forms.ValidationError(self.error_messages['invalid'])
gd = match.groupdict()
# Compare the calculated value with the checksum
if id_number_checksum(gd) != int(gd['checksum']):
raise forms.ValidationError(self.error_messages['invalid'])
# First: check if this is a real organisation_number
if valid_organisation(gd):
return format_organisation_number(gd)
# Is this a single properitor (enskild firma)?
try:
birth_day = validate_id_birthday(gd, False)
return format_personal_id_number(birth_day, gd)
except ValueError:
raise forms.ValidationError(self.error_messages['invalid'])
class SEPersonalIdentityNumberField(forms.CharField):
"""
A form field that validates input as a Swedish personal identity number
(personnummer).
The correct formats are YYYYMMDD-XXXX, YYYYMMDDXXXX, YYMMDD-XXXX,
YYMMDDXXXX and YYMMDD+XXXX.
A + indicates that the person is older than 100 years, which will be taken
into consideration when the date is validated.
The checksum will be calculated and checked. The birth date is checked to
be a valid date.
By default, co-ordination numbers (samordningsnummer) will be accepted. To
only allow real personal identity numbers, pass the keyword argument
coordination_number=False to the constructor.
The cleaned value will always have the format YYYYMMDDXXXX.
"""
def __init__(self, coordination_number=True, *args, **kwargs):
self.coordination_number = coordination_number
super(SEPersonalIdentityNumberField, self).__init__(*args, **kwargs)
default_error_messages = {
'invalid': _('Enter a valid Swedish personal identity number.'),
'coordination_number': _('Co-ordination numbers are not allowed.'),
}
def clean(self, value):
value = super(SEPersonalIdentityNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
match = SWEDISH_ID_NUMBER.match(value)
if match is None:
raise forms.ValidationError(self.error_messages['invalid'])
gd = match.groupdict()
# compare the calculated value with the checksum
if id_number_checksum(gd) != int(gd['checksum']):
raise forms.ValidationError(self.error_messages['invalid'])
# check for valid birthday
try:
birth_day = validate_id_birthday(gd)
except ValueError:
raise forms.ValidationError(self.error_messages['invalid'])
# make sure that co-ordination numbers do not pass if not allowed
if not self.coordination_number and int(gd['day']) > 60:
raise forms.ValidationError(self.error_messages['coordination_number'])
return format_personal_id_number(birth_day, gd)
class SEPostalCodeField(forms.RegexField):
"""
A form field that validates input as a Swedish postal code (postnummer).
Valid codes consist of five digits (XXXXX). The number can optionally be
formatted with a space after the third digit (XXX XX).
The cleaned value will never contain the space.
"""
default_error_messages = {
'invalid': _('Enter a Swedish postal code in the format XXXXX.'),
}
def __init__(self, *args, **kwargs):
super(SEPostalCodeField, self).__init__(SE_POSTAL_CODE, *args, **kwargs)
def clean(self, value):
return super(SEPostalCodeField, self).clean(value).replace(' ', '')
|
ezcall-net-tw/EZCall
|
refs/heads/master
|
jni/pjsip/sources/tests/pjsua/scripts-media-playrec/100_resample_lf_11_8.py
|
59
|
# $Id: 100_resample_lf_11_8.py 2052 2008-06-25 18:18:32Z nanang $
#
from inc_cfg import *
# simple test
test_param = TestParam(
"Resample (large filter) 11 KHZ to 8 KHZ",
[
InstanceParam("endpt", "--null-audio --quality 10 --clock-rate 8000 --play-file wavs/input.11.wav --rec-file wavs/tmp.8.wav")
]
)
|
4eek/edx-platform
|
refs/heads/master
|
lms/djangoapps/commerce/tests/test_views.py
|
85
|
""" Tests for commerce views. """
import json
from uuid import uuid4
from nose.plugins.attrib import attr
import ddt
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import TestCase
import mock
from student.tests.factories import UserFactory
class UserMixin(object):
""" Mixin for tests involving users. """
def setUp(self):
super(UserMixin, self).setUp()
self.user = UserFactory()
def _login(self):
""" Log into LMS. """
self.client.login(username=self.user.username, password='test')
@attr('shard_1')
@ddt.ddt
class ReceiptViewTests(UserMixin, TestCase):
""" Tests for the receipt view. """
def test_login_required(self):
""" The view should redirect to the login page if the user is not logged in. """
self.client.logout()
response = self.client.post(reverse('commerce:checkout_receipt'))
self.assertEqual(response.status_code, 302)
def post_to_receipt_page(self, post_data):
""" DRY helper """
response = self.client.post(reverse('commerce:checkout_receipt'), params={'basket_id': 1}, data=post_data)
self.assertEqual(response.status_code, 200)
return response
@ddt.data('decision', 'reason_code', 'signed_field_names', None)
def test_is_cybersource(self, post_key):
"""
Ensure the view uses three specific POST keys to detect a request initiated by Cybersource.
"""
self._login()
post_data = {'decision': 'REJECT', 'reason_code': '200', 'signed_field_names': 'dummy'}
if post_key is not None:
# a key will be missing; we will not expect the receipt page to handle a cybersource decision
del post_data[post_key]
expected_pattern = r"<title>(\s+)Receipt"
else:
expected_pattern = r"<title>(\s+)Payment Failed"
response = self.post_to_receipt_page(post_data)
self.assertRegexpMatches(response.content, expected_pattern)
@ddt.data('ACCEPT', 'REJECT', 'ERROR')
def test_cybersource_decision(self, decision):
"""
Ensure the view renders a page appropriately depending on the Cybersource decision.
"""
self._login()
post_data = {'decision': decision, 'reason_code': '200', 'signed_field_names': 'dummy'}
expected_pattern = r"<title>(\s+)Receipt" if decision == 'ACCEPT' else r"<title>(\s+)Payment Failed"
response = self.post_to_receipt_page(post_data)
self.assertRegexpMatches(response.content, expected_pattern)
@ddt.data(True, False)
@mock.patch('commerce.views.is_user_payment_error')
def test_cybersource_message(self, is_user_message_expected, mock_is_user_payment_error):
"""
Ensure that the page displays the right message for the reason_code (it
may be a user error message or a system error message).
"""
mock_is_user_payment_error.return_value = is_user_message_expected
self._login()
response = self.post_to_receipt_page({'decision': 'REJECT', 'reason_code': '99', 'signed_field_names': 'dummy'})
self.assertTrue(mock_is_user_payment_error.called)
self.assertTrue(mock_is_user_payment_error.call_args[0][0], '99')
user_message = "There was a problem with this transaction"
system_message = "A system error occurred while processing your payment"
self.assertRegexpMatches(response.content, user_message if is_user_message_expected else system_message)
self.assertNotRegexpMatches(response.content, user_message if not is_user_message_expected else system_message)
@mock.patch.dict(settings.FEATURES, {"IS_EDX_DOMAIN": True})
def test_hide_nav_header(self):
self._login()
post_data = {'decision': 'ACCEPT', 'reason_code': '200', 'signed_field_names': 'dummy'}
response = self.post_to_receipt_page(post_data)
# Verify that the header navigation links are hidden for the edx.org version
self.assertNotContains(response, "How it Works")
self.assertNotContains(response, "Find courses")
self.assertNotContains(response, "Schools & Partners")
|
0xddaa/pwndbg
|
refs/heads/stable
|
pwndbg/elftypes.py
|
5
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# mayhem/datatypes/elf.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import ctypes
import sys
import six
import pwndbg.arch
import pwndbg.ctypes
import pwndbg.events
Elf32_Addr = ctypes.c_uint32
Elf32_Half = ctypes.c_uint16
Elf32_Off = ctypes.c_uint32
Elf32_Sword = ctypes.c_int32
Elf32_Word = ctypes.c_uint32
Elf64_Addr = ctypes.c_uint64
Elf64_Half = ctypes.c_uint16
Elf64_SHalf = ctypes.c_int16
Elf64_Off = ctypes.c_uint64
Elf64_Sword = ctypes.c_int32
Elf64_Word = ctypes.c_uint32
Elf64_Xword = ctypes.c_uint64
Elf64_Sxword = ctypes.c_int64
AT_CONSTANTS = {
0 : 'AT_NULL', # /* End of vector */
1 : 'AT_IGNORE', # /* Entry should be ignored */
2 : 'AT_EXECFD', # /* File descriptor of program */
3 : 'AT_PHDR', # /* Program headers for program */
4 : 'AT_PHENT', # /* Size of program header entry */
5 : 'AT_PHNUM', # /* Number of program headers */
6 : 'AT_PAGESZ', # /* System page size */
7 : 'AT_BASE', # /* Base address of interpreter */
8 : 'AT_FLAGS', # /* Flags */
9 : 'AT_ENTRY', # /* Entry point of program */
10: 'AT_NOTELF', # /* Program is not ELF */
11: 'AT_UID', # /* Real uid */
12: 'AT_EUID', # /* Effective uid */
13: 'AT_GID', # /* Real gid */
14: 'AT_EGID', # /* Effective gid */
15: 'AT_PLATFORM', # /* String identifying platform */
16: 'AT_HWCAP', # /* Machine dependent hints about processor capabilities */
17: 'AT_CLKTCK', # /* Frequency of times() */
18: 'AT_FPUCW',
19: 'AT_DCACHEBSIZE',
20: 'AT_ICACHEBSIZE',
21: 'AT_UCACHEBSIZE',
22: 'AT_IGNOREPPC',
23: 'AT_SECURE',
24: 'AT_BASE_PLATFORM', # String identifying real platforms
25: 'AT_RANDOM', # Address of 16 random bytes
31: 'AT_EXECFN', # Filename of executable
32: 'AT_SYSINFO',
33: 'AT_SYSINFO_EHDR',
34: 'AT_L1I_CACHESHAPE',
35: 'AT_L1D_CACHESHAPE',
36: 'AT_L2_CACHESHAPE',
37: 'AT_L3_CACHESHAPE',
}
class constants:
EI_MAG0 = 0
EI_MAG1 = 1
EI_MAG2 = 2
EI_MAG3 = 3
EI_CLASS = 4
EI_DATA = 5
EI_VERSION = 6
EI_OSABI = 7
EI_ABIVERSION = 8
EI_PAD = 9
EI_NIDENT = 16
ELFMAG0 = 0x7f
ELFMAG1 = ord('E')
ELFMAG2 = ord('L')
ELFMAG3 = ord('F')
ELFCLASSNONE = 0
ELFCLASS32 = 1
ELFCLASS64 = 2
ELFDATANONE = 0
ELFDATA2LSB = 1
ELFDATA2MSB = 2
# Legal values for Elf_Phdr.p_type (segment type).
PT_NULL = 0
PT_LOAD = 1
PT_DYNAMIC = 2
PT_INTERP = 3
PT_NOTE = 4
PT_SHLIB = 5
PT_PHDR = 6
PT_TLS = 7
# Legal values for Elf_Ehdr.e_type (object file type).
ET_NONE = 0
ET_REL = 1
ET_EXEC = 2
ET_DYN = 3
ET_CORE = 4
# Legal values for Elf_Dyn.d_tag (dynamic entry type).
DT_NULL = 0
DT_NEEDED = 1
DT_PLTRELSZ = 2
DT_PLTGOT = 3
DT_HASH = 4
DT_STRTAB = 5
DT_SYMTAB = 6
DT_RELA = 7
DT_RELASZ = 8
DT_RELAENT = 9
DT_STRSZ = 10
DT_SYMENT = 11
DT_INIT = 12
DT_FINI = 13
DT_SONAME = 14
DT_RPATH = 15
DT_SYMBOLIC = 16
DT_REL = 17
DT_RELSZ = 18
DT_RELENT = 19
DT_PLTREL = 20
DT_DEBUG = 21
DT_TEXTREL = 22
DT_JMPREL = 23
DT_ENCODING = 32
# Legal values for Elf_Shdr.sh_type (section type).
SHT_NULL = 0
SHT_PROGBITS = 1
SHT_SYMTAB = 2
SHT_STRTAB = 3
SHT_RELA = 4
SHT_HASH = 5
SHT_DYNAMIC = 6
SHT_NOTE = 7
SHT_NOBITS = 8
SHT_REL = 9
SHT_SHLIB = 10
SHT_DYNSYM = 11
SHT_NUM = 12
# Legal values for ST_TYPE subfield of Elf_Sym.st_info (symbol type).
STT_NOTYPE = 0
STT_OBJECT = 1
STT_FUNC = 2
STT_SECTION = 3
STT_FILE = 4
STT_COMMON = 5
STT_TLS = 6
#
# Notes used in ET_CORE. Architectures export some of the arch register sets
# using the corresponding note types via the PTRACE_GETREGSET and
# PTRACE_SETREGSET requests.
#
NT_PRSTATUS = 1
NT_PRFPREG = 2
NT_PRPSINFO = 3
NT_TASKSTRUCT = 4
NT_AUXV = 6
#
# Note to userspace developers: size of NT_SIGINFO note may increase
# in the future to accomodate more fields, don't assume it is fixed!
#
NT_SIGINFO = 0x53494749
NT_FILE = 0x46494c45
NT_PRXFPREG = 0x46e62b7f
NT_PPC_VMX = 0x100
NT_PPC_SPE = 0x101
NT_PPC_VSX = 0x102
NT_386_TLS = 0x200
NT_386_IOPERM = 0x201
NT_X86_XSTATE = 0x202
NT_S390_HIGH_GPRS = 0x300
NT_S390_TIMER = 0x301
NT_S390_TODCMP = 0x302
NT_S390_TODPREG = 0x303
NT_S390_CTRS = 0x304
NT_S390_PREFIX = 0x305
NT_S390_LAST_BREAK = 0x306
NT_S390_SYSTEM_CALL = 0x307
NT_S390_TDB = 0x308
NT_ARM_VFP = 0x400
NT_ARM_TLS = 0x401
NT_ARM_HW_BREAK = 0x402
NT_ARM_HW_WATCH = 0x403
NT_METAG_CBUF = 0x500
NT_METAG_RPIPE = 0x501
NT_METAG_TLS = 0x502
AT_NULL = 0
AT_IGNORE = 1
AT_EXECFD = 2
AT_PHDR = 3
AT_PHENT = 4
AT_PHNUM = 5
AT_PAGESZ = 6
AT_BASE = 7
AT_FLAGS = 8
AT_ENTRY = 9
AT_NOTELF = 10
AT_UID = 11
AT_EUID = 12
AT_GID = 13
AT_EGID = 14
AT_PLATFORM = 15
AT_HWCAP = 16
AT_CLKTCK = 17
AT_FPUCW = 18
AT_DCACHEBSIZE = 19
AT_ICACHEBSIZE = 20
AT_UCACHEBSIZE = 21
AT_IGNOREPPC = 22
AT_SECURE = 23
AT_BASE_PLATFORM = 24
AT_RANDOM = 25
AT_EXECFN = 31
AT_SYSINFO = 32
AT_SYSINFO_EHDR = 33
AT_L1I_CACHESHAPE = 34
AT_L1D_CACHESHAPE = 35
AT_L2_CACHESHAPE = 36
AT_L3_CACHESHAPE = 37
class Elf32_Ehdr(pwndbg.ctypes.Structure):
_fields_ = [("e_ident", (ctypes.c_ubyte * 16)),
("e_type", Elf32_Half),
("e_machine", Elf32_Half),
("e_version", Elf32_Word),
("e_entry", Elf32_Addr),
("e_phoff", Elf32_Off),
("e_shoff", Elf32_Off),
("e_flags", Elf32_Word),
("e_ehsize", Elf32_Half),
("e_phentsize", Elf32_Half),
("e_phnum", Elf32_Half),
("e_shentsize", Elf32_Half),
("e_shnum", Elf32_Half),
("e_shstrndx", Elf32_Half),]
class Elf64_Ehdr(pwndbg.ctypes.Structure):
_fields_ = [("e_ident", (ctypes.c_ubyte * 16)),
("e_type", Elf64_Half),
("e_machine", Elf64_Half),
("e_version", Elf64_Word),
("e_entry", Elf64_Addr),
("e_phoff", Elf64_Off),
("e_shoff", Elf64_Off),
("e_flags", Elf64_Word),
("e_ehsize", Elf64_Half),
("e_phentsize", Elf64_Half),
("e_phnum", Elf64_Half),
("e_shentsize", Elf64_Half),
("e_shnum", Elf64_Half),
("e_shstrndx", Elf64_Half),]
class Elf32_Phdr(pwndbg.ctypes.Structure):
_fields_ = [("p_type", Elf32_Word),
("p_offset", Elf32_Off),
("p_vaddr", Elf32_Addr),
("p_paddr", Elf32_Addr),
("p_filesz", Elf32_Word),
("p_memsz", Elf32_Word),
("p_flags", Elf32_Word),
("p_align", Elf32_Word),]
class Elf64_Phdr(pwndbg.ctypes.Structure):
_fields_ = [("p_type", Elf64_Word),
("p_flags", Elf64_Word),
("p_offset", Elf64_Off),
("p_vaddr", Elf64_Addr),
("p_paddr", Elf64_Addr),
("p_filesz", Elf64_Xword),
("p_memsz", Elf64_Xword),
("p_align", Elf64_Xword),]
|
rjschwei/azure-sdk-for-python
|
refs/heads/master
|
azure-batch/azure/batch/models/file_delete_from_task_options.py
|
3
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class FileDeleteFromTaskOptions(Model):
"""Additional parameters for the File_delete_from_task operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
def __init__(self, timeout=30, client_request_id=None, return_client_request_id=False, ocp_date=None):
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
|
UrLab/beta402
|
refs/heads/master
|
telepathy/templatetags/__init__.py
|
107
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
|
kymbert/behave
|
refs/heads/master
|
more.features/steps/tutorial_steps.py
|
15
|
# -*- coding: utf-8 -*-
"""Step implementations for tutorial example."""
from behave import *
@given('we have behave installed')
def step_impl(context):
pass
@when('we implement a test')
def step_impl(context):
assert True is not False
@then('behave will test it for us!')
def step_impl(context):
assert context.failed is False
|
digitvaran/digitvaran
|
refs/heads/master
|
resources/views.py
|
1
|
from django.shortcuts import render
from resources import models
def home(request):
"Homepage for requests"
data={}
template='resources/home.html'
return render(request,template,data)
def search(request):
"Searching for resources"
data={}
template='resources/search.html'
if request.method=='GET':
data['placeholder']='Search'
if request.method=='POST':
searchstr=request.POST.get('searchstring')
data['placeholder']=searchstr
data['results']=models.Audiobook.objects.filter(description__icontains=searchstr)
return render(request,template,data)
def browse(request):
"Browse resources"
data={}
template='resources/browse.html'
data['resources']=models.Audiobook.objects.all()
return render(request,template,data)
|
aftabnaveed/virtual-work-monitor
|
refs/heads/master
|
MainGui.py
|
1
|
from PyQt4 import QtCore, QtGui
import time
class SaveScreenshotThread(QtCore.QThread):
capture = QtCore.pyqtSignal(object)
gui = None
def __init__(self, gui):
QtCore.QThread.__init__(self)
self.gui = gui
def run(self):
print "Saving " + self.gui.fileName
self.gui.saveScreenshotButton.setDisabled(True)
self.capture.emit(self.gui)
#self.gui.show()
class MainGui(QtGui.QWidget):
fileName = 'screenshot'
def __init__(self):
super(MainGui, self).__init__()
self.screenshotLabel = QtGui.QLabel()
self.screenshotLabel.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
self.screenshotLabel.setAlignment(QtCore.Qt.AlignCenter)
self.screenshotLabel.setMinimumSize(240, 160)
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.captureAndSaveScreen)
#enabling below would execute the time only once,
#self.timer.setSingleShot(True)
self.timer.start(5000)
def createOptionsGroupBox(self):
optionsGroupBox = QtGui.QGroupBox("Options")
self.delaySpinBox = QtGui.QSpinBox()
self.delaySpinBox.setSuffix(" s")
self.delaySpinBox.setMaximum(60)
self.delayBoxLabel = QtGui.QLabel("Screenshot Delay")
self.hideThisWindowCheckBox = QtGui.QCheckBox("Hide This Window")
optionsGroupBoxLayout = QtGui.QGridLayout()
optionsGroupBoxLayout.addWidget(self.delayBoxLabel, 0, 0)
optionsGroupBoxLayout.addWidget(self.delaySpinBox, 0, 1)
optionsGroupBoxLayout.addWidget(self.hideThisWindowCheckBox, 1, 0, 1, 2)
optionsGroupBox.setLayout(optionsGroupBoxLayout)
return optionsGroupBox
def createButtonsLayout(self):
self.newScreenshotButton = self.createButton("New Screenshot", self.newScreenshot)
self.saveScreenshotButton = self.createButton("Save Screenshot", self.saveScreenshot)
self.quitScreenshotButton = self.createButton("Quit", self.close)
self.buttonsLayout = QtGui.QHBoxLayout()
self.buttonsLayout.addStretch()
self.buttonsLayout.addWidget(self.newScreenshotButton)
self.buttonsLayout.addWidget(self.saveScreenshotButton)
self.buttonsLayout.addWidget(self.quitScreenshotButton)
return self.buttonsLayout
def shootScreen(self):
#if self.delaySpinBox.value() != 0:
QtGui.qApp.beep()
self.originalPixmap = None
self.originalPixmap = QtGui.QPixmap.grabWindow(QtGui.QApplication.desktop().winId())
return self.updateScreenshotLabel()
def updateScreenshotLabel(self):
self.screenshotLabel.setPixmap(self.originalPixmap.scaled(self.screenshotLabel.size(), QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation))
return self.screenshotLabel
def createButton(self, text, member):
button = QtGui.QPushButton(text)
button.clicked.connect(member)
return button
def newScreenshot(self):
pass
def saveScreenshot(self):
format = 'png'
initialPath = QtCore.QDir.currentPath() + "untitled." + format
#fileName = QtGui.QFileDialog.getSaveFileName(self, "Save As", initialPath, "%s Files(*.%s);;All Files(*)" % (format.upper(), format))
self.saveThread = SaveScreenshotThread(self)
self.saveThread.capture.connect(self.captureAndSaveScreen)
self.saveThread.start()
def captureAndSaveScreen(self):
self.fileName = "screenshot_" + str(time.time()) + ".png"
self.originalPixmap = None
self.originalPixmap = QtGui.QPixmap.grabWindow(QtGui.QApplication.desktop().winId())
self.originalPixmap.save(self.fileName, 'png')
self.saveScreenshotButton.setDisabled(False)
def quitScreenshotButton(self):
pass
|
abdullah2891/remo
|
refs/heads/master
|
remo/profiles/migrations/0048_rename_remobot.py
|
3
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"""Rename the display name of Remo bot."""
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
remobot = orm['profiles.UserProfile'].objects.get(user__username='remobot')
remobot.display_name = 'ReMoBot'
remobot.save()
def backwards(self, orm):
"Write your backwards methods here."
pass
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'profiles.functionalarea': {
'Meta': {'ordering': "['name']", 'object_name': 'FunctionalArea'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'blank': 'True'})
},
u'profiles.useravatar': {
'Meta': {'object_name': 'UserAvatar'},
'avatar_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '400'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 7, 20, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'profiles.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'users_added'", 'null': 'True', 'to': u"orm['auth.User']"}),
'bio': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'birth_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'country': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'current_streak_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_joined_program': ('django.db.models.fields.DateField', [], {'blank': 'True'}),
'diaspora_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'default': "''", 'unique': 'True', 'max_length': '50', 'blank': 'True'}),
'facebook_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'first_report_notification': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'functional_areas': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'users_matching'", 'symmetrical': 'False', 'to': u"orm['profiles.FunctionalArea']"}),
'gender': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'irc_channels': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'irc_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'is_unavailable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'jabber_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'linkedin_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'local_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'lon': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'longest_streak_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'longest_streak_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'mentor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'mentees'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'mozillian_username': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'mozillians_profile_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'personal_blog_feed': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'personal_website_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'private_email': ('django.db.models.fields.EmailField', [], {'default': "''", 'max_length': '75', 'null': 'True'}),
'receive_email_on_add_comment': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'receive_email_on_add_event_comment': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'receive_email_on_add_voting_comment': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'registration_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'second_report_notification': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'tracked_functional_areas': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'users_tracking'", 'symmetrical': 'False', 'to': u"orm['profiles.FunctionalArea']"}),
'twitter_account': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '16', 'blank': 'True'}),
'unavailability_task_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'}),
'wiki_profile_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200'})
},
u'profiles.userstatus': {
'Meta': {'ordering': "['-expected_date', '-created_on']", 'object_name': 'UserStatus'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'expected_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'replacement_rep': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'replaced_rep'", 'null': 'True', 'to': u"orm['auth.User']"}),
'return_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'status'", 'to': u"orm['auth.User']"})
}
}
complete_apps = ['profiles']
symmetrical = True
|
rosmo/ansible
|
refs/heads/devel
|
lib/ansible/modules/storage/netapp/na_elementsw_account.py
|
16
|
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
Element Software Account Manager
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: na_elementsw_account
short_description: NetApp Element Software Manage Accounts
extends_documentation_fragment:
- netapp.solidfire
version_added: '2.7'
author: NetApp Ansible Team (@carchi8py) <[email protected]>
description:
- Create, destroy, or update accounts on Element SW
options:
state:
description:
- Whether the specified account should exist or not.
required: true
choices: ['present', 'absent']
element_username:
description:
- Unique username for this account. (May be 1 to 64 characters in length).
required: true
aliases:
- account_id
from_name:
description:
- ID or Name of the account to rename.
- Required to create an account called 'element_username' by renaming 'from_name'.
version_added: '2.8'
initiator_secret:
description:
- CHAP secret to use for the initiator. Should be 12-16 characters long and impenetrable.
- The CHAP initiator secrets must be unique and cannot be the same as the target CHAP secret.
- If not specified, a random secret is created.
target_secret:
description:
- CHAP secret to use for the target (mutual CHAP authentication).
- Should be 12-16 characters long and impenetrable.
- The CHAP target secrets must be unique and cannot be the same as the initiator CHAP secret.
- If not specified, a random secret is created.
attributes:
description: List of Name/Value pairs in JSON object format.
status:
description:
- Status of the account.
'''
EXAMPLES = """
- name: Create Account
na_elementsw_account:
hostname: "{{ elementsw_hostname }}"
username: "{{ elementsw_username }}"
password: "{{ elementsw_password }}"
state: present
element_username: TenantA
- name: Modify Account
na_elementsw_account:
hostname: "{{ elementsw_hostname }}"
username: "{{ elementsw_username }}"
password: "{{ elementsw_password }}"
state: present
status: locked
element_username: TenantA
- name: Rename Account
na_elementsw_account:
hostname: "{{ elementsw_hostname }}"
username: "{{ elementsw_username }}"
password: "{{ elementsw_password }}"
state: present
element_username: TenantA_Renamed
from_name: TenantA
- name: Rename and modify Account
na_elementsw_account:
hostname: "{{ elementsw_hostname }}"
username: "{{ elementsw_username }}"
password: "{{ elementsw_password }}"
state: present
status: locked
element_username: TenantA_Renamed
from_name: TenantA
- name: Delete Account
na_elementsw_account:
hostname: "{{ elementsw_hostname }}"
username: "{{ elementsw_username }}"
password: "{{ elementsw_password }}"
state: absent
element_username: TenantA_Renamed
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.netapp_elementsw_module import NaElementSWModule
HAS_SF_SDK = netapp_utils.has_sf_sdk()
class ElementSWAccount(object):
"""
Element SW Account
"""
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
element_username=dict(required=True, aliases=["account_id"], type='str'),
from_name=dict(required=False, default=None),
initiator_secret=dict(required=False, type='str'),
target_secret=dict(required=False, type='str'),
attributes=dict(required=False, type='dict'),
status=dict(required=False, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
params = self.module.params
# set up state variables
self.state = params.get('state')
self.element_username = params.get('element_username')
self.from_name = params.get('from_name')
self.initiator_secret = params.get('initiator_secret')
self.target_secret = params.get('target_secret')
self.attributes = params.get('attributes')
self.status = params.get('status')
if HAS_SF_SDK is False:
self.module.fail_json(msg="Unable to import the Element SW Python SDK")
else:
self.sfe = netapp_utils.create_sf_connection(module=self.module)
self.elementsw_helper = NaElementSWModule(self.sfe)
# add telemetry attributes
if self.attributes is not None:
self.attributes.update(self.elementsw_helper.set_element_attributes(source='na_elementsw_account'))
else:
self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_account')
def get_account(self, username):
"""
Get Account
:description: Get Account object from account id or name
:return: Details about the account. None if not found.
:rtype: object (Account object)
"""
account_list = self.sfe.list_accounts()
for account in account_list.accounts:
# Check and get account object for a given name
if str(account.account_id) == username:
return account
elif account.username == username:
return account
return None
def create_account(self):
"""
Create the Account
"""
try:
self.sfe.add_account(username=self.element_username,
initiator_secret=self.initiator_secret,
target_secret=self.target_secret,
attributes=self.attributes)
except Exception as e:
self.module.fail_json(msg='Error creating account %s: %s' % (self.element_username, to_native(e)),
exception=traceback.format_exc())
def delete_account(self):
"""
Delete the Account
"""
try:
self.sfe.remove_account(account_id=self.account_id)
except Exception as e:
self.module.fail_json(msg='Error deleting account %s: %s' % (self.account_id, to_native(e)),
exception=traceback.format_exc())
def rename_account(self):
"""
Rename the Account
"""
try:
self.sfe.modify_account(account_id=self.account_id,
username=self.element_username,
status=self.status,
initiator_secret=self.initiator_secret,
target_secret=self.target_secret,
attributes=self.attributes)
except Exception as e:
self.module.fail_json(msg='Error renaming account %s: %s' % (self.account_id, to_native(e)),
exception=traceback.format_exc())
def update_account(self):
"""
Update the Account if account already exists
"""
try:
self.sfe.modify_account(account_id=self.account_id,
status=self.status,
initiator_secret=self.initiator_secret,
target_secret=self.target_secret,
attributes=self.attributes)
except Exception as e:
self.module.fail_json(msg='Error updating account %s: %s' % (self.account_id, to_native(e)),
exception=traceback.format_exc())
def apply(self):
"""
Process the account operation on the Element OS Cluster
"""
changed = False
update_account = False
account_detail = self.get_account(self.element_username)
if account_detail is None and self.state == 'present':
changed = True
elif account_detail is not None:
# If account found
self.account_id = account_detail.account_id
if self.state == 'absent':
changed = True
else:
# If state - present, check for any parameter of exising account needs modification.
if account_detail.username is not None and self.element_username is not None and \
account_detail.username != self.element_username:
update_account = True
changed = True
elif account_detail.status is not None and self.status is not None \
and account_detail.status != self.status:
update_account = True
changed = True
elif account_detail.initiator_secret is not None and self.initiator_secret is not None \
and account_detail.initiator_secret != self.initiator_secret:
update_account = True
changed = True
elif account_detail.target_secret is not None and self.target_secret is not None \
and account_detail.target_secret != self.target_secret:
update_account = True
changed = True
elif account_detail.attributes is not None and self.attributes is not None \
and account_detail.attributes != self.attributes:
update_account = True
changed = True
if changed:
if self.module.check_mode:
# Skipping the changes
pass
else:
if self.state == 'present':
if update_account:
self.update_account()
else:
if self.from_name is not None:
# If from_name is defined
account_exists = self.get_account(self.from_name)
if account_exists is not None:
# If resource pointed by from_name exists, rename the account to name
self.account_id = account_exists.account_id
self.rename_account()
else:
# If resource pointed by from_name does not exists, error out
self.module.fail_json(msg="Resource does not exist : %s" % self.from_name)
else:
# If from_name is not defined, create from scratch.
self.create_account()
elif self.state == 'absent':
self.delete_account()
self.module.exit_json(changed=changed)
def main():
"""
Main function
"""
na_elementsw_account = ElementSWAccount()
na_elementsw_account.apply()
if __name__ == '__main__':
main()
|
laosiaudi/tensorflow
|
refs/heads/master
|
tensorflow/python/kernel_tests/random_gamma_test.py
|
21
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.random_ops.random_gamma."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
class RandomGammaTest(tf.test.TestCase):
"""This is a medium test due to the moments computation taking some time."""
def setUp(self):
np.random.seed(137)
tf.set_random_seed(137)
def _Sampler(self, num, alpha, beta, dtype, use_gpu, seed=None):
def func():
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
rng = tf.random_gamma([num], alpha, beta=beta, dtype=dtype, seed=seed)
ret = np.empty([10, num])
for i in xrange(10):
ret[i, :] = sess.run(rng)
return ret
return func
def testMomentsFloat32(self):
self._testMoments(tf.float32)
def testMomentsFloat64(self):
self._testMoments(tf.float64)
def _testMoments(self, dt):
try:
from scipy import stats # pylint: disable=g-import-not-at-top
except ImportError as e:
tf.logging.warn("Cannot test moments: %s" % e)
return
# Check the given array of samples matches the given theoretical moment
# function at different orders. The test is considered passing if the
# z-tests of all statistical moments are all below z_limit.
# Parameters:
# max_moments: the largest moments of the distribution to be tested
# stride: the distance between samples to check for statistical properties
# 0 means the n-th moment of each sample
# any other strides tests for spatial correlation between samples;
# z_limit: the maximum z-test we would consider the test to pass;
# The moments test is a z-value test. This is the largest z-value
# we want to tolerate. Since the z-test approximates a unit normal
# distribution, it should almost definitely never exceed 6.
z_limit = 6.0
for stride in 0, 1, 4, 17:
alphas = [0.2, 1.0, 3.0]
if dt == tf.float64:
alphas = [0.01] + alphas
for alpha in alphas:
for scale in 9, 17:
# Gamma moments only defined for values less than the scale param.
max_moment = min(6, scale // 2)
sampler = self._Sampler(
20000, alpha, 1 / scale, dt, use_gpu=False, seed=12345)
moments = [0] * (max_moment + 1)
moments_sample_count = [0] * (max_moment + 1)
x = np.array(sampler().flat) # sampler does 10x samples
for k in range(len(x)):
moment = 1.
for i in range(max_moment + 1):
index = k + i * stride
if index >= len(x):
break
moments[i] += moment
moments_sample_count[i] += 1
moment *= x[index]
for i in range(max_moment + 1):
moments[i] /= moments_sample_count[i]
for i in range(1, max_moment + 1):
g = stats.gamma(alpha, scale=scale)
if stride == 0:
moments_i_mean = g.moment(i)
moments_i_squared = g.moment(2 * i)
else:
moments_i_mean = pow(g.moment(1), i)
moments_i_squared = pow(g.moment(2), i)
# Calculate moment variance safely:
# This is just
# (moments_i_squared - moments_i_mean**2) / moments_sample_count[i]
normalized_moments_i_var = (
moments_i_mean / moments_sample_count[i] * (
moments_i_squared/moments_i_mean - moments_i_mean))
# Assume every operation has a small numerical error.
# It takes i multiplications to calculate one i-th moment.
error_per_moment = i * np.finfo(dt.as_numpy_dtype).eps
total_variance = (
normalized_moments_i_var + error_per_moment)
tiny = np.finfo(dt.as_numpy_dtype).tiny
self.assertGreaterEqual(total_variance, 0)
if total_variance < tiny:
total_variance = tiny
# z_test is approximately a unit normal distribution.
z_test = abs(
(moments[i] - moments_i_mean) / math.sqrt(total_variance))
self.assertLess(z_test, z_limit)
def _testZeroDensity(self, alpha):
"""Zero isn't in the support of the gamma distribution.
But quantized floating point math has its limits.
TODO(bjp): Implement log-gamma sampler for small-shape distributions.
Args:
alpha: float shape value to test
"""
try:
from scipy import stats # pylint: disable=g-import-not-at-top
except ImportError as e:
tf.logging.warn("Cannot test zero density proportions: %s" % e)
return
allowable_zeros = {
tf.float16: stats.gamma(alpha).cdf(np.finfo(np.float16).tiny),
tf.float32: stats.gamma(alpha).cdf(np.finfo(np.float32).tiny),
tf.float64: stats.gamma(alpha).cdf(np.finfo(np.float64).tiny)
}
failures = []
for use_gpu in [False, True]:
for dt in tf.float16, tf.float32, tf.float64:
sampler = self._Sampler(
10000, alpha, 1.0, dt, use_gpu=use_gpu, seed=12345)
x = sampler()
allowable = allowable_zeros[dt] * x.size
allowable = allowable * 2 if allowable < 10 else allowable * 1.05
if np.sum(x <= 0) > allowable:
failures += [(use_gpu, dt)]
self.assertEqual([], failures)
def testNonZeroSmallShape(self):
self._testZeroDensity(0.01)
def testNonZeroSmallishShape(self):
self._testZeroDensity(0.35)
# Asserts that different trials (1000 samples per trial) is unlikely
# to see the same sequence of values. Will catch buggy
# implementations which uses the same random number seed.
def testDistinct(self):
for use_gpu in [False, True]:
for dt in tf.float16, tf.float32, tf.float64:
sampler = self._Sampler(1000, 2.0, 1.0, dt, use_gpu=use_gpu)
x = sampler()
y = sampler()
# Number of different samples.
count = (x == y).sum()
count_limit = 20 if dt == tf.float16 else 10
if count >= count_limit:
print(use_gpu, dt)
print("x = ", x)
print("y = ", y)
print("count = ", count)
self.assertLess(count, count_limit)
# Checks that the CPU and GPU implementation returns the same results,
# given the same random seed
def testCPUGPUMatch(self):
for dt in tf.float16, tf.float32, tf.float64:
results = {}
for use_gpu in [False, True]:
sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=12345)
results[use_gpu] = sampler()
if dt == tf.float16:
self.assertAllClose(results[False], results[True], rtol=1e-3, atol=1e-3)
else:
self.assertAllClose(results[False], results[True], rtol=1e-6, atol=1e-6)
def testSeed(self):
for use_gpu in [False, True]:
for dt in tf.float16, tf.float32, tf.float64:
sx = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=345)
sy = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=345)
self.assertAllEqual(sx(), sy())
def testNoCSE(self):
"""CSE = constant subexpression eliminator.
SetIsStateful() should prevent two identical random ops from getting
merged.
"""
for dtype in tf.float16, tf.float32, tf.float64:
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
rnd1 = tf.random_gamma([24], 2.0, dtype=dtype)
rnd2 = tf.random_gamma([24], 2.0, dtype=dtype)
diff = rnd2 - rnd1
self.assertGreater(np.linalg.norm(diff.eval()), 0.1)
def testShape(self):
# Fully known shape.
rnd = tf.random_gamma([150], 2.0)
self.assertEqual([150], rnd.get_shape().as_list())
rnd = tf.random_gamma([150], 2.0, beta=[3.0, 4.0])
self.assertEqual([150, 2], rnd.get_shape().as_list())
rnd = tf.random_gamma([150], tf.ones([1, 2, 3]))
self.assertEqual([150, 1, 2, 3], rnd.get_shape().as_list())
rnd = tf.random_gamma([20, 30], tf.ones([1, 2, 3]))
self.assertEqual([20, 30, 1, 2, 3], rnd.get_shape().as_list())
rnd = tf.random_gamma([123], tf.placeholder(tf.float32, shape=(2,)))
self.assertEqual([123, 2], rnd.get_shape().as_list())
# Partially known shape.
rnd = tf.random_gamma(tf.placeholder(tf.int32, shape=(1,)), tf.ones([7, 3]))
self.assertEqual([None, 7, 3], rnd.get_shape().as_list())
rnd = tf.random_gamma(tf.placeholder(tf.int32, shape=(3,)), tf.ones([9, 6]))
self.assertEqual([None, None, None, 9, 6], rnd.get_shape().as_list())
# Unknown shape.
rnd = tf.random_gamma(tf.placeholder(tf.int32), tf.placeholder(tf.float32))
self.assertIs(None, rnd.get_shape().ndims)
rnd = tf.random_gamma([50], tf.placeholder(tf.float32))
self.assertIs(None, rnd.get_shape().ndims)
if __name__ == "__main__":
tf.test.main()
|
entpy/eb-django
|
refs/heads/master
|
essere_benessere/essere_benessere/urls.py
|
1
|
from django.conf.urls import patterns, include, url
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# frontend URLs
url(r'^$', 'website.views.index', name='index'),
url(r'^chi-siamo/', 'website.views.about_us', name='about_us'),
url(r'^i-nostri-servizi/', 'website.views.our_services', name='our_services'),
url(r'^contatti/', 'website.views.contacts', name='contacts'),
url(r'^le-nostre-offerte/', 'website.views.our_offers', name='our_offers'),
url(r'^ricevi-offerte/', 'website.views.get_offers', name='get_offers'),
url(r'^sbiancamento-dentale/', 'website.views.dental_whitening', name='dental_whitening'),
url(r'^termini-di-utilizzo/', 'website.views.terms_of_use', name='terms_of_use'),
url(r'^cookie-policy/', 'website.views.cookie_policy', name='cookie_policy'),
# admin URLs
url(r'^admin/', include(admin.site.urls)),
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
gareging/SDN_Framework
|
refs/heads/master
|
ryu/tests/unit/lib/test_mod/ddd/__init__.py
|
26
|
# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
gsingh93/rust
|
refs/heads/master
|
src/etc/regex-match-tests.py
|
58
|
#!/usr/bin/env python2
# Copyright 2014 The Rust Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution and at
# http://rust-lang.org/COPYRIGHT.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
from __future__ import absolute_import, division, print_function
import argparse
import datetime
import os.path as path
def print_tests(tests):
print('\n'.join([test_tostr(t) for t in tests]))
def read_tests(f):
basename, _ = path.splitext(path.basename(f))
tests = []
for lineno, line in enumerate(open(f), 1):
fields = filter(None, map(str.strip, line.split('\t')))
if not (4 <= len(fields) <= 5) \
or 'E' not in fields[0] or fields[0][0] == '#':
continue
opts, pat, text, sgroups = fields[0:4]
groups = [] # groups as integer ranges
if sgroups == 'NOMATCH':
groups = [None]
elif ',' in sgroups:
noparen = map(lambda s: s.strip('()'), sgroups.split(')('))
for g in noparen:
s, e = map(str.strip, g.split(','))
if s == '?' and e == '?':
groups.append(None)
else:
groups.append((int(s), int(e)))
else:
# This skips tests that should result in an error.
# There aren't many, so I think we can just capture those
# manually. Possibly fix this in future.
continue
if pat == 'SAME':
pat = tests[-1][1]
if '$' in opts:
pat = pat.decode('string_escape')
text = text.decode('string_escape')
if 'i' in opts:
pat = '(?i)%s' % pat
name = '%s_%d' % (basename, lineno)
tests.append((name, pat, text, groups))
return tests
def test_tostr(t):
lineno, pat, text, groups = t
options = map(group_tostr, groups)
return 'mat!{match_%s, r"%s", r"%s", %s}' \
% (lineno, pat, '' if text == "NULL" else text, ', '.join(options))
def group_tostr(g):
if g is None:
return 'None'
else:
return 'Some((%d, %d))' % (g[0], g[1])
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Generate match tests from an AT&T POSIX test file.')
aa = parser.add_argument
aa('files', nargs='+',
help='A list of dat AT&T POSIX test files. See src/libregexp/testdata')
args = parser.parse_args()
tests = []
for f in args.files:
tests += read_tests(f)
tpl = '''// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-tidy-linelength
// DO NOT EDIT. Automatically generated by 'src/etc/regexp-match-tests'
// on {date}.
'''
print(tpl.format(date=str(datetime.datetime.now())))
for f in args.files:
print('// Tests from %s' % path.basename(f))
print_tests(read_tests(f))
print('')
|
BTCLITE/BitcoinLITE-BTCL
|
refs/heads/master
|
share/qt/make_spinner.py
|
4415
|
#!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
|
vlachoudis/sl4a
|
refs/heads/master
|
python/src/Doc/includes/sqlite3/executescript.py
|
140
|
import sqlite3
con = sqlite3.connect(":memory:")
cur = con.cursor()
cur.executescript("""
create table person(
firstname,
lastname,
age
);
create table book(
title,
author,
published
);
insert into book(title, author, published)
values (
'Dirk Gently''s Holistic Detective Agency',
'Douglas Adams',
1987
);
""")
|
Azure/azure-sdk-for-python
|
refs/heads/sync-eng/common-js-nightly-docs-2-1768-ForTestPipeline
|
sdk/network/azure-mgmt-privatedns/azure/mgmt/privatedns/operations/_record_sets_operations.py
|
1
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class RecordSetsOperations(object):
"""RecordSetsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.privatedns.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def create_or_update(
self,
resource_group_name, # type: str
private_zone_name, # type: str
record_type, # type: Union[str, "_models.RecordType"]
relative_record_set_name, # type: str
parameters, # type: "_models.RecordSet"
if_match=None, # type: Optional[str]
if_none_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.RecordSet"
"""Creates or updates a record set within a Private DNS zone.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param private_zone_name: The name of the Private DNS zone (without a terminating dot).
:type private_zone_name: str
:param record_type: The type of DNS record in this record set. Record sets of type SOA can be
updated but not created (they are created when the Private DNS zone is created).
:type record_type: str or ~azure.mgmt.privatedns.models.RecordType
:param relative_record_set_name: The name of the record set, relative to the name of the zone.
:type relative_record_set_name: str
:param parameters: Parameters supplied to the CreateOrUpdate operation.
:type parameters: ~azure.mgmt.privatedns.models.RecordSet
:param if_match: The ETag of the record set. Omit this value to always overwrite the current
record set. Specify the last-seen ETag value to prevent accidentally overwriting any concurrent
changes.
:type if_match: str
:param if_none_match: Set to '*' to allow a new record set to be created, but to prevent
updating an existing record set. Other values will be ignored.
:type if_none_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RecordSet, or the result of cls(response)
:rtype: ~azure.mgmt.privatedns.models.RecordSet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RecordSet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'privateZoneName': self._serialize.url("private_zone_name", private_zone_name, 'str'),
'recordType': self._serialize.url("record_type", record_type, 'str'),
'relativeRecordSetName': self._serialize.url("relative_record_set_name", relative_record_set_name, 'str', skip_quote=True),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'RecordSet')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RecordSet', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RecordSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/{recordType}/{relativeRecordSetName}'} # type: ignore
def update(
self,
resource_group_name, # type: str
private_zone_name, # type: str
record_type, # type: Union[str, "_models.RecordType"]
relative_record_set_name, # type: str
parameters, # type: "_models.RecordSet"
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.RecordSet"
"""Updates a record set within a Private DNS zone.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param private_zone_name: The name of the Private DNS zone (without a terminating dot).
:type private_zone_name: str
:param record_type: The type of DNS record in this record set.
:type record_type: str or ~azure.mgmt.privatedns.models.RecordType
:param relative_record_set_name: The name of the record set, relative to the name of the zone.
:type relative_record_set_name: str
:param parameters: Parameters supplied to the Update operation.
:type parameters: ~azure.mgmt.privatedns.models.RecordSet
:param if_match: The ETag of the record set. Omit this value to always overwrite the current
record set. Specify the last-seen ETag value to prevent accidentally overwriting concurrent
changes.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RecordSet, or the result of cls(response)
:rtype: ~azure.mgmt.privatedns.models.RecordSet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RecordSet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'privateZoneName': self._serialize.url("private_zone_name", private_zone_name, 'str'),
'recordType': self._serialize.url("record_type", record_type, 'str'),
'relativeRecordSetName': self._serialize.url("relative_record_set_name", relative_record_set_name, 'str', skip_quote=True),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'RecordSet')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RecordSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/{recordType}/{relativeRecordSetName}'} # type: ignore
def delete(
self,
resource_group_name, # type: str
private_zone_name, # type: str
record_type, # type: Union[str, "_models.RecordType"]
relative_record_set_name, # type: str
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes a record set from a Private DNS zone. This operation cannot be undone.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param private_zone_name: The name of the Private DNS zone (without a terminating dot).
:type private_zone_name: str
:param record_type: The type of DNS record in this record set. Record sets of type SOA cannot
be deleted (they are deleted when the Private DNS zone is deleted).
:type record_type: str or ~azure.mgmt.privatedns.models.RecordType
:param relative_record_set_name: The name of the record set, relative to the name of the zone.
:type relative_record_set_name: str
:param if_match: The ETag of the record set. Omit this value to always delete the current
record set. Specify the last-seen ETag value to prevent accidentally deleting any concurrent
changes.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-09-01"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'privateZoneName': self._serialize.url("private_zone_name", private_zone_name, 'str'),
'recordType': self._serialize.url("record_type", record_type, 'str'),
'relativeRecordSetName': self._serialize.url("relative_record_set_name", relative_record_set_name, 'str', skip_quote=True),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/{recordType}/{relativeRecordSetName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
private_zone_name, # type: str
record_type, # type: Union[str, "_models.RecordType"]
relative_record_set_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.RecordSet"
"""Gets a record set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param private_zone_name: The name of the Private DNS zone (without a terminating dot).
:type private_zone_name: str
:param record_type: The type of DNS record in this record set.
:type record_type: str or ~azure.mgmt.privatedns.models.RecordType
:param relative_record_set_name: The name of the record set, relative to the name of the zone.
:type relative_record_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RecordSet, or the result of cls(response)
:rtype: ~azure.mgmt.privatedns.models.RecordSet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RecordSet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'privateZoneName': self._serialize.url("private_zone_name", private_zone_name, 'str'),
'recordType': self._serialize.url("record_type", record_type, 'str'),
'relativeRecordSetName': self._serialize.url("relative_record_set_name", relative_record_set_name, 'str', skip_quote=True),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RecordSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/{recordType}/{relativeRecordSetName}'} # type: ignore
def list_by_type(
self,
resource_group_name, # type: str
private_zone_name, # type: str
record_type, # type: Union[str, "_models.RecordType"]
top=None, # type: Optional[int]
recordsetnamesuffix=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.RecordSetListResult"]
"""Lists the record sets of a specified type in a Private DNS zone.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param private_zone_name: The name of the Private DNS zone (without a terminating dot).
:type private_zone_name: str
:param record_type: The type of record sets to enumerate.
:type record_type: str or ~azure.mgmt.privatedns.models.RecordType
:param top: The maximum number of record sets to return. If not specified, returns up to 100
record sets.
:type top: int
:param recordsetnamesuffix: The suffix label of the record set name to be used to filter the
record set enumeration. If this parameter is specified, the returned enumeration will only
contain records that end with ".:code:`<recordsetnamesuffix>`".
:type recordsetnamesuffix: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RecordSetListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.privatedns.models.RecordSetListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RecordSetListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_type.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'privateZoneName': self._serialize.url("private_zone_name", private_zone_name, 'str'),
'recordType': self._serialize.url("record_type", record_type, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if recordsetnamesuffix is not None:
query_parameters['$recordsetnamesuffix'] = self._serialize.query("recordsetnamesuffix", recordsetnamesuffix, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RecordSetListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_type.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/{recordType}'} # type: ignore
def list(
self,
resource_group_name, # type: str
private_zone_name, # type: str
top=None, # type: Optional[int]
recordsetnamesuffix=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.RecordSetListResult"]
"""Lists all record sets in a Private DNS zone.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param private_zone_name: The name of the Private DNS zone (without a terminating dot).
:type private_zone_name: str
:param top: The maximum number of record sets to return. If not specified, returns up to 100
record sets.
:type top: int
:param recordsetnamesuffix: The suffix label of the record set name to be used to filter the
record set enumeration. If this parameter is specified, the returned enumeration will only
contain records that end with ".:code:`<recordsetnamesuffix>`".
:type recordsetnamesuffix: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RecordSetListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.privatedns.models.RecordSetListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RecordSetListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'privateZoneName': self._serialize.url("private_zone_name", private_zone_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if recordsetnamesuffix is not None:
query_parameters['$recordsetnamesuffix'] = self._serialize.query("recordsetnamesuffix", recordsetnamesuffix, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RecordSetListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/ALL'} # type: ignore
|
abought/osf.io
|
refs/heads/develop
|
website/notifications/constants.py
|
4
|
NODE_SUBSCRIPTIONS_AVAILABLE = {
'comments': 'Comments added',
'file_updated': 'Files updated'
}
# Note: if the subscription starts with 'global_', it will be treated like a default
# subscription. If no notification type has been assigned, the user subscription
# will default to 'email_transactional'.
USER_SUBSCRIPTIONS_AVAILABLE = {
'global_comment_replies': 'Replies to your comments',
'global_comments': 'Comments added',
'global_file_updated': 'Files updated'
}
# Note: the python value None mean inherit from parent
NOTIFICATION_TYPES = {
'email_transactional': 'Email when a change occurs',
'email_digest': 'Daily email digest of all changes to this project',
'none': 'None'
}
# Formatted file provider names for notification emails
PROVIDERS = {
'osfstorage': 'OSF Storage',
'box': 'Box',
'dataverse': 'Dataverse',
'dropbox': 'Dropbox',
'figshare': 'figshare',
'github': 'GitHub',
'googledrive': 'Google Drive',
's3': 'Amazon S3'
}
|
adsabs/kibtools
|
refs/heads/master
|
kibtools/tests/stub_data.py
|
2
|
import json
all_dashboards = {
"_shards": {
"failed": 0,
"successful": 1,
"total": 1
},
"hits": {
"hits": [
{
"_id": "GETDash",
"_index": ".kibana",
"_score": 1.0,
"_source": {
"description": "",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}}}]}"
},
"panelsJSON": "[{\"id\":\"GETViz\",\"type\":\"visualization\",\"size_x\":3,\"size_y\":2,\"col\":1,\"row\":1}]",
"title": "GETDash",
"version": 1
},
"_type": "dashboard"
},
{
"_id": "GETDash2",
"_index": ".kibana",
"_score": 1.0,
"_source": {
"description": "",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"query\":\"*\",\"analyze_wildcard\":true}}}]}"
},
"panelsJSON": "[{\"id\":\"GETViz\",\"type\":\"visualization\",\"size_x\":3,\"size_y\":2,\"col\":1,\"row\":1}]",
"title": "GETDash2",
"version": 1
},
"_type": "dashboard"
}
],
"max_score": 1.0,
"total": 2
},
"timed_out": False,
"took": 1
}
all_visualizations = {
"_shards": {
"failed": 0,
"successful": 1,
"total": 1
},
"hits": {
"hits": [
{
"_id": "GETViz",
"_index": ".kibana",
"_score": 1.0,
"_source": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[]}"
},
"savedSearchId": "GET",
"title": "GETViz",
"version": 1,
"visState": "{\"aggs\":[{\"id\":\"1\",\"params\":{},\"schema\":\"metric\",\"type\":\"count\"},{\"id\":\"2\",\"params\":{\"extended_bounds\":{},\"field\":\"@timestamp\",\"interval\":\"auto\",\"min_doc_count\":1},\"schema\":\"segment\",\"type\":\"date_histogram\"}],\"listeners\":{},\"params\":{\"addLegend\":true,\"addTooltip\":true,\"defaultYExtents\":false,\"mode\":\"stacked\",\"shareYAxis\":true},\"type\":\"area\"}"
},
"_type": "visualization"
}
],
"max_score": 1.0,
"total": 1
},
"timed_out": False,
"took": 1
}
all_searches = {
"_shards": {
"failed": 0,
"successful": 1,
"total": 1
},
"hits": {
"hits": [
{
"_id": "GET",
"_index": ".kibana",
"_score": 1.0,
"_source": {
"columns": [
"_source"
],
"description": "",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"[logstash-]YYYY.MM.DD\",\"highlight\":{\"pre_tags\":[\"@kibana-highlighted-field@\"],\"post_tags\":[\"@/kibana-highlighted-field@\"],\"fields\":{\"*\":{}}},\"filter\":[],\"query\":{\"query_string\":{\"query\":\"GET\",\"analyze_wildcard\":true}}}"
},
"sort": [
"@timestamp",
"desc"
],
"title": "GET",
"version": 1
},
"_type": "search"
}
],
"max_score": 1.0,
"total": 1
},
"timed_out": False,
"took": 1
}
stub_data = {
'_search/dashboard': all_dashboards,
'_search/visualization': all_visualizations,
'_search/search': all_searches,
}
|
PaddlePaddle/models
|
refs/heads/develop
|
PaddleCV/video/models/nonlocal_model/nonlocal_helper.py
|
1
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import paddle
import paddle.fluid as fluid
from paddle.fluid import ParamAttr
# 3d spacetime nonlocal (v1, spatial downsample)
def spacetime_nonlocal(blob_in, dim_in, dim_out, batch_size, prefix, dim_inner, cfg, \
test_mode = False, max_pool_stride = 2):
#------------
cur = blob_in
# we do projection to convert each spacetime location to a feature
# theta original size
# e.g., (8, 1024, 4, 14, 14) => (8, 1024, 4, 14, 14)
theta = fluid.layers.conv3d(
input=cur,
num_filters=dim_inner,
filter_size=[1, 1, 1],
stride=[1, 1, 1],
padding=[0, 0, 0],
param_attr=ParamAttr(
name=prefix + '_theta' + "_w",
initializer=fluid.initializer.Normal(
loc=0.0, scale=cfg.NONLOCAL.conv_init_std)),
bias_attr=ParamAttr(
name=prefix + '_theta' + "_b",
initializer=fluid.initializer.Constant(value=0.))
if (cfg.NONLOCAL.no_bias == 0) else False,
name=prefix + '_theta')
theta_shape = theta.shape
# phi and g: half spatial size
# e.g., (8, 1024, 4, 14, 14) => (8, 1024, 4, 7, 7)
if cfg.NONLOCAL.use_maxpool:
max_pool = fluid.layers.pool3d(
input=cur,
pool_size=[1, max_pool_stride, max_pool_stride],
pool_type='max',
pool_stride=[1, max_pool_stride, max_pool_stride],
pool_padding=[0, 0, 0],
name=prefix + '_pool')
else:
max_pool = cur
phi = fluid.layers.conv3d(
input=max_pool,
num_filters=dim_inner,
filter_size=[1, 1, 1],
stride=[1, 1, 1],
padding=[0, 0, 0],
param_attr=ParamAttr(
name=prefix + '_phi' + "_w",
initializer=fluid.initializer.Normal(
loc=0.0, scale=cfg.NONLOCAL.conv_init_std)),
bias_attr=ParamAttr(
name=prefix + '_phi' + "_b",
initializer=fluid.initializer.Constant(value=0.))
if (cfg.NONLOCAL.no_bias == 0) else False,
name=prefix + '_phi')
phi_shape = phi.shape
g = fluid.layers.conv3d(
input=max_pool,
num_filters=dim_inner,
filter_size=[1, 1, 1],
stride=[1, 1, 1],
padding=[0, 0, 0],
param_attr=ParamAttr(
name=prefix + '_g' + "_w",
initializer=fluid.initializer.Normal(
loc=0.0, scale=cfg.NONLOCAL.conv_init_std)),
bias_attr=ParamAttr(
name=prefix + '_g' + "_b",
initializer=fluid.initializer.Constant(value=0.))
if (cfg.NONLOCAL.no_bias == 0) else False,
name=prefix + '_g')
g_shape = g.shape
# we have to use explicit batch size (to support arbitrary spacetime size)
# e.g. (8, 1024, 4, 14, 14) => (8, 1024, 784)
theta = fluid.layers.reshape(
theta, [-1, 0, theta_shape[2] * theta_shape[3] * theta_shape[4]])
theta = fluid.layers.transpose(theta, [0, 2, 1])
phi = fluid.layers.reshape(
phi, [-1, 0, phi_shape[2] * phi_shape[3] * phi_shape[4]])
theta_phi = fluid.layers.matmul(theta, phi, name=prefix + '_affinity')
g = fluid.layers.reshape(g, [-1, 0, g_shape[2] * g_shape[3] * g_shape[4]])
if cfg.NONLOCAL.use_softmax:
if cfg.NONLOCAL.use_scale is True:
theta_phi_sc = fluid.layers.scale(theta_phi, scale=dim_inner**-.5)
else:
theta_phi_sc = theta_phi
p = fluid.layers.softmax(
theta_phi_sc, name=prefix + '_affinity' + '_prob')
else:
# not clear about what is doing in xlw's code
p = None # not implemented
raise "Not implemented when not use softmax"
# note g's axis[2] corresponds to p's axis[2]
# e.g. g(8, 1024, 784_2) * p(8, 784_1, 784_2) => (8, 1024, 784_1)
p = fluid.layers.transpose(p, [0, 2, 1])
t = fluid.layers.matmul(g, p, name=prefix + '_y')
# reshape back
# e.g. (8, 1024, 784) => (8, 1024, 4, 14, 14)
t_shape = t.shape
# print(t_shape)
# print(theta_shape)
t_re = fluid.layers.reshape(t, shape=list(theta_shape))
blob_out = t_re
blob_out = fluid.layers.conv3d(
input=blob_out,
num_filters=dim_out,
filter_size=[1, 1, 1],
stride=[1, 1, 1],
padding=[0, 0, 0],
param_attr=ParamAttr(
name=prefix + '_out' + "_w",
initializer=fluid.initializer.Constant(value=0.)
if cfg.NONLOCAL.use_zero_init_conv else fluid.initializer.Normal(
loc=0.0, scale=cfg.NONLOCAL.conv_init_std)),
bias_attr=ParamAttr(
name=prefix + '_out' + "_b",
initializer=fluid.initializer.Constant(value=0.))
if (cfg.NONLOCAL.no_bias == 0) else False,
name=prefix + '_out')
blob_out_shape = blob_out.shape
if cfg.NONLOCAL.use_bn is True:
bn_name = prefix + "_bn"
blob_out = fluid.layers.batch_norm(
blob_out,
is_test=test_mode,
momentum=cfg.NONLOCAL.bn_momentum,
epsilon=cfg.NONLOCAL.bn_epsilon,
name=bn_name,
param_attr=ParamAttr(
name=bn_name + "_scale",
initializer=fluid.initializer.Constant(
value=cfg.NONLOCAL.bn_init_gamma),
regularizer=fluid.regularizer.L2Decay(
cfg.TRAIN.weight_decay_bn)),
bias_attr=ParamAttr(
name=bn_name + "_offset",
regularizer=fluid.regularizer.L2Decay(
cfg.TRAIN.weight_decay_bn)),
moving_mean_name=bn_name + "_mean",
moving_variance_name=bn_name + "_variance") # add bn
if cfg.NONLOCAL.use_affine is True:
affine_scale = fluid.layers.create_parameter(
shape=[blob_out_shape[1]],
dtype=blob_out.dtype,
attr=ParamAttr(name=prefix + '_affine' + '_s'),
default_initializer=fluid.initializer.Constant(value=1.))
affine_bias = fluid.layers.create_parameter(
shape=[blob_out_shape[1]],
dtype=blob_out.dtype,
attr=ParamAttr(name=prefix + '_affine' + '_b'),
default_initializer=fluid.initializer.Constant(value=0.))
blob_out = fluid.layers.affine_channel(
blob_out,
scale=affine_scale,
bias=affine_bias,
name=prefix + '_affine') # add affine
return blob_out
def add_nonlocal(blob_in,
dim_in,
dim_out,
batch_size,
prefix,
dim_inner,
cfg,
test_mode=False):
blob_out = spacetime_nonlocal(blob_in, \
dim_in, dim_out, batch_size, prefix, dim_inner, cfg, test_mode = test_mode)
blob_out = fluid.layers.elementwise_add(
blob_out, blob_in, name=prefix + '_sum')
return blob_out
# this is to reduce memory usage if the feature maps are big
# devide the feature maps into groups in the temporal dimension,
# and perform non-local operations inside each group.
def add_nonlocal_group(blob_in,
dim_in,
dim_out,
batch_size,
pool_stride,
height,
width,
group_size,
prefix,
dim_inner,
cfg,
test_mode=False):
group_num = int(pool_stride / group_size)
assert (pool_stride % group_size == 0), \
'nonlocal block {}: pool_stride({}) should be divided by group size({})'.format(prefix, pool_stride, group_size)
if group_num > 1:
blob_in = fluid.layers.transpose(
blob_in, [0, 2, 1, 3, 4], name=prefix + '_pre_trans1')
blob_in = fluid.layers.reshape(
blob_in,
[batch_size * group_num, group_size, dim_in, height, width],
name=prefix + '_pre_reshape1')
blob_in = fluid.layers.transpose(
blob_in, [0, 2, 1, 3, 4], name=prefix + '_pre_trans2')
blob_out = spacetime_nonlocal(
blob_in,
dim_in,
dim_out,
batch_size,
prefix,
dim_inner,
cfg,
test_mode=test_mode)
blob_out = fluid.layers.elementwise_add(
blob_out, blob_in, name=prefix + '_sum')
if group_num > 1:
blob_out = fluid.layers.transpose(
blob_out, [0, 2, 1, 3, 4], name=prefix + '_post_trans1')
blob_out = fluid.layers.reshape(
blob_out,
[batch_size, group_num * group_size, dim_out, height, width],
name=prefix + '_post_reshape1')
blob_out = fluid.layers.transpose(
blob_out, [0, 2, 1, 3, 4], name=prefix + '_post_trans2')
return blob_out
|
navotsil/Open-Knesset
|
refs/heads/master
|
notify/management/commands/notify.py
|
8
|
from __future__ import absolute_import
from django.core.management.base import NoArgsCommand
from django.contrib.auth.models import User,Group
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.utils.translation import ugettext as _
from django.utils import translation
from django.template.loader import render_to_string
from django.template import TemplateDoesNotExist
from django.conf import settings
from django.core.cache import cache
import datetime
from optparse import make_option
import logging
logger = logging.getLogger("open-knesset.notify")
from actstream.models import Follow, Action
from mailer import send_html_mail
from mks.models import Member
from laws.models import Bill, get_debated_bills
from agendas.models import Agenda
from notify.models import LastSent
from user.models import UserProfile
from committees.models import Topic
class Command(NoArgsCommand):
help = "Send e-mail notification to users that requested it."
requires_model_validation = False
update_models = [Member, Bill, Agenda, Topic, None]
from_email = getattr(settings, 'DEFAULT_FROM_EMAIL', '[email protected]')
days_back = getattr(settings, 'DEFAULT_NOTIFICATION_DAYS_BACK', 10)
lang = getattr(settings, 'LANGUAGE_CODE', 'he')
@property
def domain(self):
if not hasattr(self, '_domain'):
self._domain = Site.objects.get_current().domain
return self._domain
option_list = NoArgsCommand.option_list + (
make_option('--daily', action='store_true', dest='daily',
help="send notifications to users that requested a daily update"),
make_option('--weekly', action='store_true', dest='weekly',
help="send notifications to users that requested a weekly update"))
def agenda_update(self, agenda):
''' generate the general update email for this agenda.
this will be called, and its output added to the email,
if and only if there has been some update in it's data.
'''
mks = agenda.selected_instances(Member)
template_name = 'notify/agenda_update'
update_txt = render_to_string(template_name + '.txt',
{'mks':mks,
'domain':self.domain})
update_html = render_to_string(template_name + '.html',
{'mks':mks,
'domain':self.domain})
return (update_txt,update_html)
@classmethod
def get_model_headers(cls, model):
''' for a given model this function returns a tuple with
(model, text_header, html_header)
'''
try:
template_name = 'notify/%s_section' % model.__name__.lower()
return (model, render_to_string(template_name + '.txt'), render_to_string(template_name + '.html'))
except TemplateDoesNotExist:
return (model, model._meta.verbose_name_plural, '<h2>%s</h2>' % model._meta.verbose_name_plural.format())
except AttributeError:
return (model, _('Other Updates'), '<h2>%s</h2>' % _('Other Updates'))
def get_email_for_user(self, user):
''' return the body text and html for a user's email '''
updates = dict(zip(self.update_models, ([] for x in self.update_models))) # will contain the updates to be sent
updates_html = dict(zip(self.update_models, ([] for x in self.update_models)))
follows = Follow.objects.filter(user=user) # everything this user is following
# sometime a user follows something several times. we want to filter that out:
follows = set([f.actor for f in follows])
for f in follows:
if not f:
logger.warning('Follow object with None actor. ignoring')
continue
model_class = f.__class__
model_template = f.__class__.__name__.lower()
try:
model_name = f.__class__._meta.verbose_name
except AttributeError:
logger.warning('follows %d has no __class__?' % f.id)
model_name = ""
content_type = ContentType.objects.get_for_model(f)
if model_class in updates:
key = model_class
else:
key = None # put all updates for 'other' classes at the 'None' group
try: # get actions that happened since last update
last_sent = LastSent.objects.get(user=user, content_type=content_type, object_pk=f.id)
last_sent_time = last_sent.time
stream = Action.objects.filter(actor_content_type = content_type,
actor_object_id = f.id,
timestamp__gt=last_sent_time,
).order_by('-timestamp')
if stream: # if there are updates to be sent,
last_sent.save() # update timestamp of last sent
except LastSent.DoesNotExist: # never updated about this actor, send some updates
stream = Action.objects.filter(actor_content_type = content_type,
actor_object_id = f.id,
timestamp__gt=datetime.datetime.now()-datetime.timedelta(self.days_back),
).order_by('-timestamp')
last_sent = LastSent.objects.create(user=user,content_type=content_type, object_pk=f.id)
if stream: # this actor has some updates
try: # genereate the appropriate header for this actor class
header = render_to_string(('notify/%(model)s_header.txt' % {'model': model_template}),{'model':model_name,'object':f})
except TemplateDoesNotExist:
header = render_to_string(('notify/model_header.txt'),{'model':model_name,'object':f})
try:
header_html = render_to_string(('notify/%(model)s_header.html' % {'model': model_template}),{'model':model_name,'object':f,'domain':self.domain})
except TemplateDoesNotExist:
header_html = render_to_string(('notify/model_header.html'),{'model':model_name,'object':f,'domain':self.domain})
updates[key].append(header)
updates_html[key].append(header_html)
for action_instance in stream: # now generate the updates themselves
try:
action_output = render_to_string(('activity/%(verb)s/action_email.txt' % { 'verb':action_instance.verb.replace(' ','_') }),{ 'action':action_instance },None)
except TemplateDoesNotExist: # fallback to the generic template
action_output = render_to_string(('activity/action_email.txt'),{ 'action':action_instance },None)
try:
action_output_html = render_to_string(('activity/%(verb)s/action_email.html' % { 'verb':action_instance.verb.replace(' ','_') }),{ 'action':action_instance,'domain':self.domain },None)
except TemplateDoesNotExist: # fallback to the generic template
action_output_html = render_to_string(('activity/action_email.html'),{ 'action':action_instance,'domain':self.domain },None)
updates[key].append(action_output)
updates_html[key].append(action_output_html)
if model_class == Agenda:
txt,html = self.agenda_update(f)
updates[key].append(txt)
updates_html[key].append(html)
email_body = []
email_body_html = []
# Add the updates for followed models
for (model_class,title,title_html) in map(self.get_model_headers, self.update_models):
if updates[model_class]: # this model has some updates, add it to the email
email_body.append(title.format())
email_body.append('\n'.join(updates[model_class]))
email_body_html.append(title_html.format())
email_body_html.append(''.join(updates_html[model_class]))
if email_body or email_body_html:
# Generate party membership section if needed
up = UserProfile.objects.filter(user=user).select_related('party')
if up:
up = up[0]
party = up.party
if party:
num_members = cache.get('party_num_members_%d' % party.id,
None)
if not num_members:
num_members = party.userprofile_set.count()
cache.set('party_num_members_%d' % party.id,
num_members,
settings.LONG_CACHE_TIME)
else:
num_members = None
debated_bills = get_debated_bills() or []
template_name = 'notify/party_membership'
party_membership_txt = render_to_string(template_name + '.txt',
{'user':user,
'userprofile':up,
'num_members':num_members,
'bills':debated_bills,
'domain':self.domain})
party_membership_html = render_to_string(template_name + '.html',
{'user':user,
'userprofile':up,
'num_members':num_members,
'bills':debated_bills,
'domain':self.domain})
else:
logger.warning('Can\'t find user profile')
if email_body:
email_body.insert(0, party_membership_txt)
if email_body_html:
email_body_html.insert(0, party_membership_html)
return (email_body, email_body_html)
def handle_noargs(self, **options):
daily = options.get('daily', False)
weekly = options.get('weekly', False)
if not daily and not weekly:
print "use --daily or --weekly"
return
translation.activate(self.lang)
email_notification = []
if daily:
email_notification.append('D')
if weekly:
email_notification.append('W')
queued = 0
g = Group.objects.get(name='Valid Email')
for user in User.objects.filter(groups=g,
profiles__isnull=False)\
.exclude(email=''):
try:
user_profile = user.get_profile()
except UserProfile.DoesNotExist:
logger.warn('can\'t access user %d userprofile' % user.id)
continue
if (user_profile.email_notification in email_notification):
# if this user has requested emails in the frequency we are
# handling now
email_body, email_body_html = self.get_email_for_user(user)
if email_body: # there are some updates. generate email
header = render_to_string(('notify/header.txt'),{ 'user':user })
footer = render_to_string(('notify/footer.txt'),{ 'user':user,'domain':self.domain })
header_html = render_to_string(('notify/header.html'),{ 'user':user })
footer_html = render_to_string(('notify/footer.html'),{ 'user':user,'domain':self.domain })
send_html_mail(_('Open Knesset Updates'), "%s\n%s\n%s" % (header, '\n'.join(email_body), footer),
"%s\n%s\n%s" % (header_html, ''.join(email_body_html), footer_html),
self.from_email,
[user.email],
)
queued += 1
logger.info("%d email notifications queued for sending" % queued)
translation.deactivate()
|
shepdelacreme/ansible
|
refs/heads/devel
|
lib/ansible/plugins/lookup/cartesian.py
|
141
|
# (c) 2013, Bradley Young <[email protected]>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: cartesian
version_added: "2.1"
short_description: returns the cartesian product of lists
description:
- Takes the input lists and returns a list that represents the product of the input lists.
- It is clearer with an example, it turns [1, 2, 3], [a, b] into [1, a], [1, b], [2, a], [2, b], [3, a], [3, b].
You can see the exact syntax in the examples section.
options:
_raw:
description:
- a set of lists
required: True
"""
EXAMPLES = """
- name: Example of the change in the description
debug: msg="{{ [1,2,3]|lookup('cartesian', [a, b])}}"
- name: loops over the cartesian product of the supplied lists
debug: msg="{{item}}"
with_cartesian:
- "{{list1}}"
- "{{list2}}"
- [1,2,3,4,5,6]
"""
RETURN = """
_list:
description:
- list of lists composed of elements of the input lists
type: lists
"""
from itertools import product
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.utils.listify import listify_lookup_plugin_terms
class LookupModule(LookupBase):
"""
Create the cartesian product of lists
"""
def _lookup_variables(self, terms):
"""
Turn this:
terms == ["1,2,3", "a,b"]
into this:
terms == [[1,2,3], [a, b]]
"""
results = []
for x in terms:
intermediate = listify_lookup_plugin_terms(x, templar=self._templar, loader=self._loader)
results.append(intermediate)
return results
def run(self, terms, variables=None, **kwargs):
terms = self._lookup_variables(terms)
my_list = terms[:]
if len(my_list) == 0:
raise AnsibleError("with_cartesian requires at least one element in each list")
return [self._flatten(x) for x in product(*my_list)]
|
bitifirefly/edx-platform
|
refs/heads/master
|
lms/djangoapps/bulk_email/migrations/0009_force_unique_course_ids.py
|
114
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'CourseAuthorization', fields ['course_id']
db.create_unique('bulk_email_courseauthorization', ['course_id'])
def backwards(self, orm):
# Removing unique constraint on 'CourseAuthorization', fields ['course_id']
db.delete_unique('bulk_email_courseauthorization', ['course_id'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'bulk_email.courseauthorization': {
'Meta': {'object_name': 'CourseAuthorization'},
'course_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'email_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'bulk_email.courseemail': {
'Meta': {'object_name': 'CourseEmail'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'html_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'text_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'to_option': ('django.db.models.fields.CharField', [], {'default': "'myself'", 'max_length': '64'})
},
'bulk_email.courseemailtemplate': {
'Meta': {'object_name': 'CourseEmailTemplate'},
'html_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'plain_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'bulk_email.optout': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'Optout'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['bulk_email']
|
majintao0131/yaml-cpp.core
|
refs/heads/master
|
test/gmock-1.7.0/gtest/test/gtest_break_on_failure_unittest.py
|
2140
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's break-on-failure mode.
A user can ask Google Test to seg-fault when an assertion fails, using
either the GTEST_BREAK_ON_FAILURE environment variable or the
--gtest_break_on_failure flag. This script tests such functionality
by invoking gtest_break_on_failure_unittest_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import gtest_test_utils
import os
import sys
# Constants.
IS_WINDOWS = os.name == 'nt'
# The environment variable for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE'
# The command line flag for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure'
# The environment variable for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE'
# The environment variable for enabling/disabling the catch-exceptions mode.
CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS'
# Path to the gtest_break_on_failure_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_break_on_failure_unittest_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
def Run(command):
"""Runs a command; returns 1 if it was killed by a signal, or 0 otherwise."""
p = gtest_test_utils.Subprocess(command, env=environ)
if p.terminated_by_signal:
return 1
else:
return 0
# The tests.
class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable or
the --gtest_break_on_failure flag to turn assertion failures into
segmentation faults.
"""
def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault):
"""Runs gtest_break_on_failure_unittest_ and verifies that it does
(or does not) have a seg-fault.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
expect_seg_fault: 1 if the program is expected to generate a seg-fault;
0 otherwise.
"""
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % BREAK_ON_FAILURE_FLAG
else:
flag = '--%s' % BREAK_ON_FAILURE_FLAG
command = [EXE_PATH]
if flag:
command.append(flag)
if expect_seg_fault:
should_or_not = 'should'
else:
should_or_not = 'should not'
has_seg_fault = Run(command)
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' %
(BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(has_seg_fault == expect_seg_fault, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None,
flag_value=None,
expect_seg_fault=0)
def testEnvVar(self):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value=None,
expect_seg_fault=1)
def testFlag(self):
"""Tests using the --gtest_break_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
def testFlagOverridesEnvVar(self):
"""Tests that the flag overrides the environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='0',
flag_value='1',
expect_seg_fault=1)
self.RunAndVerify(env_var_value='1',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
def testBreakOnFailureOverridesThrowOnFailure(self):
"""Tests that gtest_break_on_failure overrides gtest_throw_on_failure."""
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None)
if IS_WINDOWS:
def testCatchExceptionsDoesNotInterfere(self):
"""Tests that gtest_catch_exceptions doesn't interfere."""
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None)
if __name__ == '__main__':
gtest_test_utils.Main()
|
rosmo/aurora
|
refs/heads/master
|
src/main/python/apache/thermos/monitoring/__init__.py
|
296
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
|
jigpu/input
|
refs/heads/master
|
scripts/gdb/linux/dmesg.py
|
249
|
#
# gdb helper commands and functions for Linux kernel debugging
#
# kernel log buffer dump
#
# Copyright (c) Siemens AG, 2011, 2012
#
# Authors:
# Jan Kiszka <[email protected]>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
from linux import utils
class LxDmesg(gdb.Command):
"""Print Linux kernel log buffer."""
def __init__(self):
super(LxDmesg, self).__init__("lx-dmesg", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
log_buf_addr = int(str(gdb.parse_and_eval("log_buf")).split()[0], 16)
log_first_idx = int(gdb.parse_and_eval("log_first_idx"))
log_next_idx = int(gdb.parse_and_eval("log_next_idx"))
log_buf_len = int(gdb.parse_and_eval("log_buf_len"))
inf = gdb.inferiors()[0]
start = log_buf_addr + log_first_idx
if log_first_idx < log_next_idx:
log_buf_2nd_half = -1
length = log_next_idx - log_first_idx
log_buf = utils.read_memoryview(inf, start, length).tobytes()
else:
log_buf_2nd_half = log_buf_len - log_first_idx
a = utils.read_memoryview(inf, start, log_buf_2nd_half)
b = utils.read_memoryview(inf, log_buf_addr, log_next_idx)
log_buf = a.tobytes() + b.tobytes()
pos = 0
while pos < log_buf.__len__():
length = utils.read_u16(log_buf[pos + 8:pos + 10])
if length == 0:
if log_buf_2nd_half == -1:
gdb.write("Corrupted log buffer!\n")
break
pos = log_buf_2nd_half
continue
text_len = utils.read_u16(log_buf[pos + 10:pos + 12])
text = log_buf[pos + 16:pos + 16 + text_len].decode()
time_stamp = utils.read_u64(log_buf[pos:pos + 8])
for line in text.splitlines():
gdb.write("[{time:12.6f}] {line}\n".format(
time=time_stamp / 1000000000.0,
line=line))
pos += length
LxDmesg()
|
kakwa/dnscherry
|
refs/heads/master
|
dnscherry/auth/modNone.py
|
1
|
# -*- coding: utf-8 -*-
# vim:set expandtab tabstop=4 shiftwidth=4:
# The MIT License (MIT)
# DnsCherry
# Copyright (c) 2014 Carpentier Pierre-Francois
import cherrypy
import dnscherry.auth
class Auth(dnscherry.auth.Auth):
def __init__(self, config, logger=None):
# no need for a logout button
self.logout_button = False
if 'auth.user_header_name' in config:
self.user_header_name = config['auth.none.user_header_name']
else:
self.user_header_name = None
def check_auth(self):
if self.user_header_name is None:
return 'unknown user'
else:
if self.user_header_name in cherrypy.request.headers:
return cherrypy.request.headers[self.user_header_name]
else:
raise cherrypy.HTTPError(
"403 Forbidden",
"You are not allowed to access this resource."
)
|
GustavoHennig/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/nxos/nxos_vpc.py
|
19
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_vpc
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages global VPC configuration
description:
- Manages global VPC configuration
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- The feature vpc must be enabled before this module can be used
- If not using management vrf, vrf must be globally on the device
before using in the pkl config
- Although source IP isn't required on the command line it is
required when using this module. The PKL VRF must also be configured
prior to using this module.
- Both pkl_src and pkl_dest are needed when changing PKL VRF.
options:
domain:
description:
- VPC domain
required: true
role_priority:
description:
- Role priority for device. Remember lower is better.
required: false
default: null
system_priority:
description:
- System priority device. Remember they must match between peers.
required: false
default: null
pkl_src:
description:
- Source IP address used for peer keepalive link
required: false
default: null
pkl_dest:
description:
- Destination (remote) IP address used for peer keepalive link
required: false
default: null
pkl_vrf:
description:
- VRF used for peer keepalive link
required: false
default: management
peer_gw:
description:
- Enables/Disables peer gateway
required: true
choices: ['true','false']
auto_recovery:
description:
- Enables/Disables auto recovery
required: true
choices: ['true','false']
delay_restore:
description:
- manages delay restore command and config value in seconds
required: false
default: null
state:
description:
- Manages desired state of the resource
required: true
choices: ['present','absent']
'''
EXAMPLES = '''
# configure a simple asn
- nxos_vpc:
domain: 100
role_priority: 1000
system_priority: 2000
pkl_dest: 192.168.100.4
pkl_src: 10.1.100.20
peer_gw: true
auto_recovery: true
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"auto_recovery": true, "domain": "100",
"peer_gw": true, "pkl_dest": "192.168.100.4",
"pkl_src": "10.1.100.20", "pkl_vrf": "management",
"role_priority": "1000", "system_priority": "2000"}
existing:
description: k/v pairs of existing VPC configuration
type: dict
sample: {"auto_recovery": true, "delay_restore": null,
"domain": "100", "peer_gw": true,
"pkl_dest": "192.168.100.2", "pkl_src": "10.1.100.20",
"pkl_vrf": "management", "role_priority": "1000",
"system_priority": "2000"}
end_state:
description: k/v pairs of VPC configuration after module execution
returned: always
type: dict
sample: {"auto_recovery": true, "domain": "100",
"peer_gw": true, "pkl_dest": "192.168.100.4",
"pkl_src": "10.1.100.20", "pkl_vrf": "management",
"role_priority": "1000", "system_priority": "2000"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["vpc domain 100",
"peer-keepalive destination 192.168.100.4 source 10.1.100.20 vrf management",
"auto-recovery", "peer-gateway"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import CustomNetworkConfig
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
if "section" not in command:
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_vrf_list(module):
command = 'show vrf all'
vrf_table = None
body = execute_show_command(command, module)
try:
vrf_table = body[0]['TABLE_vrf']['ROW_vrf']
except (KeyError, AttributeError):
return []
vrf_list = []
if vrf_table:
for each in vrf_table:
vrf_list.append(str(each['vrf_name'].lower()))
return vrf_list
def get_autorecovery(auto):
auto_recovery = auto.split(' ')[0]
if 'enabled' in auto_recovery.lower():
return True
else:
return False
def get_vpc_running_config(module):
command = 'show running section vpc'
body = execute_show_command(command, module, command_type='cli_show_ascii')
return body
def get_vpc(module):
vpc = {}
command = 'show vpc'
body = execute_show_command(command, module)[0]
domain = str(body['vpc-domain-id'])
auto_recovery = get_autorecovery(str(
body['vpc-auto-recovery-status']))
if domain != 'not configured':
delay_restore = None
pkl_src = None
role_priority = None
system_priority = None
pkl_dest = None
pkl_vrf = None
peer_gw = False
run = get_vpc_running_config(module)[0]
if run:
vpc_list = run.split('\n')
for each in vpc_list:
if 'delay restore' in each:
line = each.split()
if len(line) == 5:
delay_restore = line[-1]
if 'peer-keepalive destination' in each:
line = each.split()
pkl_dest = line[2]
for word in line:
if 'source' in word:
index = line.index(word)
pkl_src = line[index + 1]
if 'role priority' in each:
line = each.split()
role_priority = line[-1]
if 'system-priority' in each:
line = each.split()
system_priority = line[-1]
if 'peer-gateway' in each:
peer_gw = True
command = 'show vpc peer-keepalive'
body = execute_show_command(command, module)[0]
if body:
pkl_dest = body['vpc-keepalive-dest']
if 'N/A' in pkl_dest:
pkl_dest = None
elif len(pkl_dest) == 2:
pkl_dest = pkl_dest[0]
pkl_vrf = str(body['vpc-keepalive-vrf'])
vpc['domain'] = domain
vpc['auto_recovery'] = auto_recovery
vpc['delay_restore'] = delay_restore
vpc['pkl_src'] = pkl_src
vpc['role_priority'] = role_priority
vpc['system_priority'] = system_priority
vpc['pkl_dest'] = pkl_dest
vpc['pkl_vrf'] = pkl_vrf
vpc['peer_gw'] = peer_gw
else:
vpc = {}
return vpc
def get_commands_to_config_vpc(module, vpc, domain, existing):
vpc = dict(vpc)
domain_only = vpc.get('domain')
pkl_src = vpc.get('pkl_src')
pkl_dest = vpc.get('pkl_dest')
pkl_vrf = vpc.get('pkl_vrf') or existing.get('pkl_vrf')
vpc['pkl_vrf'] = pkl_vrf
commands = []
if pkl_src or pkl_dest:
if pkl_src is None:
vpc['pkl_src'] = existing.get('pkl_src')
elif pkl_dest is None:
vpc['pkl_dest'] = existing.get('pkl_dest')
pkl_command = 'peer-keepalive destination {pkl_dest}'.format(**vpc) \
+ ' source {pkl_src} vrf {pkl_vrf}'.format(**vpc)
commands.append(pkl_command)
elif pkl_vrf:
pkl_src = existing.get('pkl_src')
pkl_dest = existing.get('pkl_dest')
if pkl_src and pkl_dest:
pkl_command = ('peer-keepalive destination {0}'
' source {1} vrf {2}'.format(pkl_dest, pkl_src, pkl_vrf))
commands.append(pkl_command)
if vpc.get('auto_recovery') is False:
vpc['auto_recovery'] = 'no'
else:
vpc['auto_recovery'] = ''
if vpc.get('peer_gw') is False:
vpc['peer_gw'] = 'no'
else:
vpc['peer_gw'] = ''
CONFIG_ARGS = {
'role_priority': 'role priority {role_priority}',
'system_priority': 'system-priority {system_priority}',
'delay_restore': 'delay restore {delay_restore}',
'peer_gw': '{peer_gw} peer-gateway',
'auto_recovery': '{auto_recovery} auto-recovery',
}
for param, value in vpc.items():
command = CONFIG_ARGS.get(param, 'DNE').format(**vpc)
if command and command != 'DNE':
commands.append(command.strip())
command = None
if commands or domain_only:
commands.insert(0, 'vpc domain {0}'.format(domain))
return commands
def get_commands_to_remove_vpc_interface(portchannel, config_value):
commands = []
command = 'no vpc {0}'.format(config_value)
commands.append(command)
commands.insert(0, 'interface port-channel{0}'.format(portchannel))
return commands
def main():
argument_spec = dict(
domain=dict(required=True, type='str'),
role_priority=dict(required=False, type='str'),
system_priority=dict(required=False, type='str'),
pkl_src=dict(required=False),
pkl_dest=dict(required=False),
pkl_vrf=dict(required=False, default='management'),
peer_gw=dict(required=True, type='bool'),
auto_recovery=dict(required=True, type='bool'),
delay_restore=dict(required=False, type='str'),
state=dict(choices=['absent', 'present'], default='present'),
include_defaults=dict(default=False),
config=dict(),
save=dict(type='bool', default=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
domain = module.params['domain']
role_priority = module.params['role_priority']
system_priority = module.params['system_priority']
pkl_src = module.params['pkl_src']
pkl_dest = module.params['pkl_dest']
pkl_vrf = module.params['pkl_vrf']
peer_gw = module.params['peer_gw']
auto_recovery = module.params['auto_recovery']
delay_restore = module.params['delay_restore']
state = module.params['state']
args = dict(domain=domain, role_priority=role_priority,
system_priority=system_priority, pkl_src=pkl_src,
pkl_dest=pkl_dest, pkl_vrf=pkl_vrf, peer_gw=peer_gw,
auto_recovery=auto_recovery,
delay_restore=delay_restore)
if not (pkl_src and pkl_dest and pkl_vrf):
# if only the source or dest is set, it'll fail and ask to set the
# other
if pkl_src or pkl_dest:
module.fail_json(msg='source AND dest IP for pkl are required at '
'this time (although source is technically not '
' required by the device.)')
args.pop('pkl_src')
args.pop('pkl_dest')
args.pop('pkl_vrf')
if pkl_vrf:
if pkl_vrf.lower() not in get_vrf_list(module):
module.fail_json(msg='The VRF you are trying to use for the peer '
'keepalive link is not on device yet. Add it'
' first, please.')
proposed = dict((k, v) for k, v in args.items() if v is not None)
changed = False
existing = get_vpc(module)
end_state = existing
commands = []
if state == 'present':
delta = set(proposed.items()).difference(existing.items())
if delta:
command = get_commands_to_config_vpc(module, delta, domain, existing)
commands.append(command)
elif state == 'absent':
if existing:
if domain != existing['domain']:
module.fail_json(msg="You are trying to remove a domain that "
"does not exist on the device")
else:
commands.append('no vpc domain {0}'.format(domain))
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
load_config(module, cmds)
end_state = get_vpc(module)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
module.exit_json(**results)
if __name__ == '__main__':
main()
|
Intel-Corporation/tensorflow
|
refs/heads/master
|
tensorflow/contrib/mpi_collectives/python/ops/mpi_ops.py
|
46
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Inter-process communication using MPI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.mpi_collectives.ops import gen_mpi_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader
_mpi_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile('_mpi_ops.so'))
def size(name=None):
"""An op which returns the number of MPI processes.
This is equivalent to running `MPI_Comm_size(MPI_COMM_WORLD, ...)` to get the
size of the global communicator.
Returns:
An integer scalar containing the number of MPI processes.
"""
return gen_mpi_ops.mpi_size(name=name)
ops.NotDifferentiable('MPISize')
def rank(name=None):
"""An op which returns the MPI rank of the calling process.
This is equivalent to running `MPI_Comm_rank(MPI_COMM_WORLD, ...)` to get the
rank of the current process in the global communicator.
Returns:
An integer scalar with the MPI rank of the calling process.
"""
return gen_mpi_ops.mpi_rank(name=name)
ops.NotDifferentiable('MPIRank')
def init(name=None):
"""An op which initializes MPI on the device on which it is run.
All future MPI ops must be run on the same device that the `init` op was run
on.
"""
return gen_mpi_ops.mpi_init(name=name)
ops.NotDifferentiable('MPIInit')
def local_rank(name=None):
"""An op which returns the local MPI rank of the calling process, within the
node that it is running on. For example, if there are seven processes running
on a node, their local ranks will be zero through six, inclusive.
This is equivalent to running `MPI_Comm_rank(...)` on a new communicator
which only includes processes on the same node.
Returns:
An integer scalar with the local MPI rank of the calling process.
"""
return gen_mpi_ops.mpi_local_rank(name=name)
ops.NotDifferentiable('MPILocalRank')
def _allreduce(tensor, name=None):
"""An op which sums an input tensor over all the MPI processes.
The reduction operation is keyed by the name of the op. The tensor type and
shape must be the same on all MPI processes for a given name. The reduction
will not start until all processes are ready to send and receive the tensor.
Returns:
A tensor of the same shape and type as `tensor`, summed across all
processes.
"""
return gen_mpi_ops.mpi_allreduce(tensor, name=name)
ops.NotDifferentiable('MPIAllreduce')
def allgather(tensor, name=None):
"""An op which concatenates the input tensor with the same input tensor on
all other MPI processes.
The concatenation is done on the first dimension, so the input tensors on the
different processes must have the same rank and shape, except for the first
dimension, which is allowed to be different.
Returns:
A tensor of the same type as `tensor`, concatenated on dimension zero
across all processes. The shape is identical to the input shape, except for
the first dimension, which may be greater and is the sum of all first
dimensions of the tensors in different MPI processes.
"""
# Specify that first allgather is to collect the tensor gather sizes,
# indicated by passing in a scalar (0-D tensor) of value 0
sizes_flag = tf.constant(0, dtype=tf.int64, name='size_flag_const')
my_size = tf.slice(
tf.shape(tensor, out_type=tf.int64), [0], [1], name='size_slice')
if name is None:
name = 'allgather'
sizing_name = '{}_sizing'.format(name)
sizes = gen_mpi_ops.mpi_allgather(my_size, sizes_flag, name=sizing_name)
return gen_mpi_ops.mpi_allgather(tensor, sizes, name=name)
ops.NotDifferentiable('MPIAllgather')
|
jormaral/aifh
|
refs/heads/master
|
vol1/python-examples/examples/example_nm_xor.py
|
6
|
#!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 1: Fundamental Algorithms
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2013 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
============================================================================================================
This example makes use of the scipy minimize function's Nelder Mead optimization to fit an RBF network to
the XOR data set. The minimize function does not provide any means of getting iteration updates. As a result,
we simply display the score each time the score function finds a new "best score". This is why we only see score
output while training. The output is shown here.
It is also important to notice the output does not match the XOR exactly. For most XOR problems, this will be the
case. However, the output is close. For [0,0] and [1,1] we are much closer to 0 output than [1,0] and [0,1].
Optimization terminated successfully.
Current function value: 0.003416
Iterations: 3765
Function evaluations: 4879
[ 0. 0.] -> [0.058260150820555356]
[ 1. 0.] -> [0.97475248185226793]
[ 0. 1.] -> [0.90381500730453324]
[ 1. 1.] -> [0.019476173693234511]
"""
__author__ = 'jheaton'
import os
import sys
import numpy as np
from scipy.optimize import minimize
# Find the AIFH core files
aifh_dir = os.path.dirname(os.path.abspath(__file__))
aifh_dir = os.path.abspath(aifh_dir + os.sep + ".." + os.sep + "lib" + os.sep + "aifh")
sys.path.append(aifh_dir)
from normalize import Normalize
from rbf_network import RbfNetwork
from error import ErrorCalculation
# The input for the XOR operator.
training_input = np.array([
[0.0, 0.0],
[1.0, 0.0],
[0.0, 1.0],
[1.0, 1.0]])
# The ideal output for the XOR operator. Each row corresponds to a row in the training_input.
training_ideal = np.array([
[0.0],
[1.0],
[1.0],
[0.0],
])
# Create the network. Two inputs, one output.
network = RbfNetwork(2, 5, 1)
network.reset()
def score_funct(x):
"""
The score function. Calculate the MSE error between the actual network output and the ideal values for the XOR.
@param x: The long term memory that we are to score.
@return: The MSE error.
"""
network.copy_memory(x)
actual_output = []
for input_data in training_input:
output_data = network.compute_regression(input_data)
actual_output.append(output_data)
return ErrorCalculation.mse(np.array(actual_output), training_ideal)
# Use Nelder Mead to train.
x0 = list(network.long_term_memory)
res = minimize(score_funct, x0, method='nelder-mead', options={'disp': True, 'maxiter': 10000})
# Display the output for the XOR. XOR will not be trained perfectly. You should see that the (0,1) and (1,0) inputs
# are both close to 1.0, whereas the (1,1) and (0,0) are close to 0.0.
for input_data in training_input:
output_data = network.compute_regression(input_data)
print(str(input_data) + " -> " + str(output_data))
|
mattjjohnson007/ChromeWebLab
|
refs/heads/master
|
Sketchbots/sw/labqueue/lask/server/http/misc_handlers.py
|
7
|
# Copyright 2013 Google Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Part of Lask, the Web Lab Task management system.
Handlers for the LASK HTTP server.
"""
import webapp2
import time
import datetime
import calendar
from lask.core.model import *
#####################################################################
#
# Miscellaneous handlers
#
class HelloWorldHandler(webapp2.RequestHandler):
def get(self):
creds = UserCredentials.get_current()
self.response.write('OK')
class TestHandler(webapp2.RequestHandler):
def get(self):
pass
class TimeHandler(webapp2.RequestHandler):
__init_time_ts = None
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.__init_time_ts = time.clock()
def options(self,*args):
self.response.headers.add_header('Access-Control-Allow-Origin', '*')
self.response.headers.add_header('Access-Control-Allow-Methods', 'OPTIONS, GET');
self.response.headers.add_header('Access-Control-Allow-Headers', 'Content-Type, Depth, User-Agent, Cache-Control, Authorization');
self.response.status_int = 200
def get(self):
now_time_ts = time.clock()
now = datetime.datetime.utcnow()
self.response.write('{"result":{"server_time_utc":'+str(calendar.timegm(now.timetuple()))+',"server_time_utc_human":"'+str(now)+'","runtime":'+str(now_time_ts-self.__init_time_ts)+'}}')
|
cdsteinkuehler/MachineKit
|
refs/heads/MachineKit-ubc
|
lib/python/gladevcp/hal_graph.py
|
39
|
# vim: sts=4 sw=4 et
# GladeVcp Widgets
#
# Copyright (c) 2010 Pavel Shramov <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import gtk
import gobject
import cairo
import math
import gtk.glade
import time
from collections import deque
from hal_widgets import _HalWidgetBase, hal
MAX_INT = 0x7fffffff
def gdk_color_tuple(c):
if not c:
return 0, 0, 0
return c.red_float, c.green_float, c.blue_float
def mround(v, m):
vm = v % m
if vm == 0:
if v > 0: return v - m
if v < 0: return v + m
return 0
if v > 0: return v - vm
if v < 0: return v - vm + m
return 0
class HAL_Graph(gtk.DrawingArea, _HalWidgetBase):
__gtype_name__ = 'HAL_Graph'
__gproperties__ = {
'min' : ( gobject.TYPE_FLOAT, 'Min', 'Minimum value',
-MAX_INT, MAX_INT, 0, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'max' : ( gobject.TYPE_FLOAT, 'Max', 'Maximum value',
-MAX_INT, MAX_INT, 100, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'autoscale' : ( gobject.TYPE_BOOLEAN, 'Autoscale', 'Autoscale Y axis',
False, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'period' : ( gobject.TYPE_FLOAT, 'Period', 'TIme period to display',
-MAX_INT, MAX_INT, 60, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'tick' : ( gobject.TYPE_INT, 'Tick period', 'Data acquarison pariod in ms',
100, 10000, 500, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'zero' : ( gobject.TYPE_FLOAT, 'Zero', 'Zero value',
-MAX_INT, MAX_INT, 0, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'value' : ( gobject.TYPE_FLOAT, 'Value', 'Current meter value (for glade testing)',
-MAX_INT, MAX_INT, 0, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'yticks' : ( gobject.TYPE_FLOAT, 'Y Tick scale', 'Ticks on Y scale',
0, MAX_INT, 10, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'xticks' : ( gobject.TYPE_FLOAT, 'X Tick scale', 'Ticks on X scale (in seconds)',
0, MAX_INT, 10, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'fg_color' : ( gtk.gdk.Color.__gtype__, 'Graph color', "Set graphing color",
gobject.PARAM_READWRITE),
'bg_color' : ( gtk.gdk.Color.__gtype__, 'Background', "Choose background color",
gobject.PARAM_READWRITE),
'fg_fill' : ( gobject.TYPE_BOOLEAN, 'Fill graph', 'Fill area covered with graph',
False, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'force_width' : ( gobject.TYPE_INT, 'Forced width', 'Force bar width not dependent on widget size. -1 to disable',
-1, MAX_INT, -1, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'force_height' : ( gobject.TYPE_INT, 'Forced height', 'Force bar height not dependent on widget size. -1 to disable',
-1, MAX_INT, -1, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'time_format' : ( gobject.TYPE_STRING, 'Time format',
'Time format to display. Use any strftime capable formatting',
"%M:%S", gobject.PARAM_READWRITE|gobject.PARAM_CONSTRUCT),
'label' : ( gobject.TYPE_STRING, 'Graph label', 'Label to display',
"", gobject.PARAM_READWRITE|gobject.PARAM_CONSTRUCT),
'sublabel' : ( gobject.TYPE_STRING, 'Graph sub label', 'Sub text to display',
"", gobject.PARAM_READWRITE|gobject.PARAM_CONSTRUCT),
}
__gproperties = __gproperties__
def __init__(self):
super(HAL_Graph, self).__init__()
self.bg_color = gtk.gdk.Color('white')
self.fg_color = gtk.gdk.Color('red')
self.force_radius = None
self.ticks = deque()
self.ticks_saved = []
self.time_strings = {}
self.tick_period = 0.1
self.connect("button-press-event", self.snapshot)
self.connect("expose-event", self.expose)
self.add_events(gtk.gdk.BUTTON_PRESS_MASK)
self.tick = 500
self.tick_idx = 0
self.hal_pin = 0
gobject.timeout_add(self.tick, self.tick_poll, self.tick_idx)
def _hal_init(self):
_HalWidgetBase._hal_init(self)
self.hal_pin = self.hal.newpin(self.hal_name, hal.HAL_FLOAT, hal.HAL_IN)
def tick_poll(self, idx):
if self.tick_idx != idx:
return False
v = self.hal_pin and self.hal_pin.get()
now = time.time()
for (t,_) in list(self.ticks):
if t >= now - self.period:
break
self.ticks.popleft()
self.ticks.append((now, v))
self.queue_draw()
return True
def snapshot(self, widget, event):
if event.button != 1:
return
if self.ticks_saved:
self.ticks_saved = []
else:
self.ticks_saved = list(self.ticks)
def expose(self, widget, event):
w = self.allocation.width
h = self.allocation.height
fw = self.force_width
fh = self.force_height
aw = max(w, fw)
ah = max(h, fh)
#self.set_size_request(aw, ah)
if fw != -1: w = fw
if fh != -1: h = fh
cr = widget.window.cairo_create()
cr.set_line_width(2)
cr.set_source_rgb(0, 0, 0)
#print w, h, aw, ah, fw, fh
cr.set_line_width(2)
cr.translate((aw - w) / 2, (ah - h) / 2)
cr.rectangle(0, 0, w, h)
cr.clip_preserve()
cr.stroke()
cr.translate(1, 1)
w, h = w - 2, h - 2
cr.set_line_width(1)
cr.set_source_color(self.bg_color)
cr.rectangle(0, 0, w, h)
cr.stroke_preserve()
cr.fill()
#tw = self.tick_period * w / self.period
tnow = now = time.time()
if self.ticks_saved:
now = max(map(lambda x: x[0], self.ticks_saved))
cr.set_source_rgb(0, 0, 0)
def t2x(t, n=now):
p = (t - n + self.period) / self.period
if p < 0 or p > 1:
return None
return w * p
font_small = max(h/20, 10)
font_large = max(h/10, 20)
cr.set_font_size(font_small)
ymin, ymax = self.min, self.max
yticks = self.yticks
if self.autoscale:
tv = map(lambda x: x[1], self.ticks_saved + list(self.ticks))
if tv:
ymin, ymax = min(tv), max(tv)
ymin -= abs(ymin) * 0.1
ymax += abs(ymax) * 0.1
else:
ymin, ymax = -1.1, 1.1
yticks = 0
if not yticks:
if ymin == ymax:
ymin -= max(1, abs(ymin) * 0.1)
ymax += max(1, abs(ymax) * 0.1)
#print ymax, ymin, ymax - ymin
yround = 10 ** math.floor(math.log10((ymax - ymin) / 10))
yticks = mround((ymax - ymin) / 10, yround)
self.draw_xticks(cr, w, h, self.xticks, now, t2x)
self.draw_yticks(cr, w, h, ymin, ymax, yticks)
cr.set_source_rgb(0, 0, 0)
cr.set_font_size(font_large)
self.text_at(cr, self.label, w/2, font_large, yalign='top')
cr.set_font_size(font_small)
self.text_at(cr, self.sublabel, w/2, 2.5 * font_large, yalign='top')
cr.set_source_color(self.fg_color)
if self.ticks_saved:
self.draw_graph(cr, w, h, ymin, ymax, self.ticks_saved, t2x)
cr.set_source_rgba(*(gdk_color_tuple(self.fg_color) + (0.3,)))
self.draw_graph(cr, w, h, ymin, ymax, self.ticks, lambda t: t2x(t, tnow))
if not (self.flags() & gtk.PARENT_SENSITIVE):
cr.set_source_rgba(0, 0, 0, 0.3)
cr.set_operator(cairo.OPERATOR_DEST_OUT)
cr.rectangle(0, 0, w, h)
cr.stroke_preserve()
cr.fill()
return True
def text_at(self, cr, text, x, y, xalign='center', yalign='center'):
xbearing, ybearing, width, height, xadvance, yadvance = cr.text_extents(text)
#print xbearing, ybearing, width, height, xadvance, yadvance
if xalign == 'center':
x = x - width/2
elif xalign == 'right':
x = x - width
if yalign == 'center':
y = y + height/2
elif yalign == 'top':
y = y + height
cr.move_to(x, y)
cr.show_text(text)
def draw_graph(self, cr, w, h, ymin, ymax, ticks, t2x):
move = True
for (t, v) in ticks:
if v is None:
move = True
continue
v = min(max(v, ymin), ymax)
x = t2x(t)
if x is None:
move = True
continue
y = h * (1 - (v - ymin)/(ymax - ymin))
if move:
cr.move_to(x, y)
move = False
cr.line_to(x, y)
cr.stroke()
def draw_xticks(self, cr, w, h, xticks, now, t2x):
rnow = mround(now, xticks)
for t in range(0, int(self.period / xticks)):
ts = int(rnow - t * xticks)
x = t2x(ts)
if x is None: continue
cr.move_to(x, h)
cr.line_to(x, 0.98 * h)
s = self.time_string(ts)
self.text_at(cr, s, x, 0.96 * h, yalign='bottom')
cr.stroke()
def draw_yticks(self, cr, w, h, ymin, ymax, yticks):
ysize = ymax - ymin
rmax = mround(ymax, yticks)
rmin = mround(ymin, yticks)
rsize = rmax - rmin
for t in range(0, int(rsize / yticks) + 1):
v = rmin + yticks * t
y = h * (1 - (v - ymin)/ ysize)
cr.move_to(0, y)
cr.line_to(w/100, y)
cr.move_to(w, y)
cr.line_to(w - w/100, y)
self.text_at(cr, str(v), 0.02 * w, y, xalign='left', yalign='center')
cr.stroke()
cr.set_source_rgba(0.5, 0.5, 0.5, 0.5)
for t in range(0, int(rsize / yticks) + 1):
v = rmin + yticks * t
y = h * (1 - (v - ymin)/ ysize)
cr.move_to(0.1*w, y)
cr.line_to(0.9*w, y)
cr.stroke()
def time_string(self, ts):
if ts in self.time_strings:
return self.time_strings[ts]
s = time.strftime(self.time_format, time.localtime(ts))
self.time_strings[ts] = s
return s
def time_strings_clean(self, now):
for k in filter(lambda k: k < now):
del self.time_strings[k]
def set_value(self, v):
self.value = v
self.queue_draw()
def do_get_property(self, property):
name = property.name.replace('-', '_')
if name in self.__gproperties.keys():
return getattr(self, name)
else:
raise AttributeError('unknown property %s' % property.name)
def do_set_property(self, property, value):
name = property.name.replace('-', '_')
if name == 'tick':
self.tick_idx += 1
gobject.timeout_add(value, self.tick_poll, self.tick_idx)
if name in ['bg_color', 'fg_color']:
if not value:
return False
if name in self.__gproperties.keys():
setattr(self, name, value)
self.queue_draw()
else:
raise AttributeError('unknown property %s' % property.name)
if name in ['force_size', 'force_size']:
#print "Forcing size request %s" % name
self.set_size_request(self.force_size, self.force_size)
self.queue_draw()
return True
|
gnowxilef/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/firstpost.py
|
60
|
from __future__ import unicode_literals
from .common import InfoExtractor
class FirstpostIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?firstpost\.com/[^/]+/.*-(?P<id>[0-9]+)\.html'
_TEST = {
'url': 'http://www.firstpost.com/india/india-to-launch-indigenous-aircraft-carrier-monday-1025403.html',
'md5': 'ee9114957692f01fb1263ed87039112a',
'info_dict': {
'id': '1025403',
'ext': 'mp4',
'title': 'India to launch indigenous aircraft carrier INS Vikrant today',
'description': 'md5:feef3041cb09724e0bdc02843348f5f4',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
page = self._download_webpage(url, video_id)
title = self._html_search_meta('twitter:title', page, 'title', fatal=True)
description = self._html_search_meta('twitter:description', page, 'title')
data = self._download_xml(
'http://www.firstpost.com/getvideoxml-%s.xml' % video_id, video_id,
'Downloading video XML')
item = data.find('./playlist/item')
thumbnail = item.find('./image').text
formats = [
{
'url': details.find('./file').text,
'format_id': details.find('./label').text.strip(),
'width': int(details.find('./width').text.strip()),
'height': int(details.find('./height').text.strip()),
} for details in item.findall('./source/file_details') if details.find('./file').text
]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'formats': formats,
}
|
derekjchow/models
|
refs/heads/master
|
research/lfads/lfads.py
|
3
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
"""
LFADS - Latent Factor Analysis via Dynamical Systems.
LFADS is an unsupervised method to decompose time series data into
various factors, such as an initial condition, a generative
dynamical system, control inputs to that generator, and a low
dimensional description of the observed data, called the factors.
Additionally, the observations have a noise model (in this case
Poisson), so a denoised version of the observations is also created
(e.g. underlying rates of a Poisson distribution given the observed
event counts).
The main data structure being passed around is a dataset. This is a dictionary
of data dictionaries.
DATASET: The top level dictionary is simply name (string -> dictionary).
The nested dictionary is the DATA DICTIONARY, which has the following keys:
'train_data' and 'valid_data', whose values are the corresponding training
and validation data with shape
ExTxD, E - # examples, T - # time steps, D - # dimensions in data.
The data dictionary also has a few more keys:
'train_ext_input' and 'valid_ext_input', if there are know external inputs
to the system being modeled, these take on dimensions:
ExTxI, E - # examples, T - # time steps, I = # dimensions in input.
'alignment_matrix_cxf' - If you are using multiple days data, it's possible
that one can align the channels (see manuscript). If so each dataset will
contain this matrix, which will be used for both the input adapter and the
output adapter for each dataset. These matrices, if provided, must be of
size [data_dim x factors] where data_dim is the number of neurons recorded
on that day, and factors is chosen and set through the '--factors' flag.
'alignment_bias_c' - See alignment_matrix_cxf. This bias will used to
the offset for the alignment transformation. It will *subtract* off the
bias from the data, so pca style inits can align factors across sessions.
If one runs LFADS on data where the true rates are known for some trials,
(say simulated, testing data, as in the example shipped with the paper), then
one can add three more fields for plotting purposes. These are 'train_truth'
and 'valid_truth', and 'conversion_factor'. These have the same dimensions as
'train_data', and 'valid_data' but represent the underlying rates of the
observations. Finally, if one needs to convert scale for plotting the true
underlying firing rates, there is the 'conversion_factor' key.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import os
import tensorflow as tf
from distributions import LearnableDiagonalGaussian, DiagonalGaussianFromInput
from distributions import diag_gaussian_log_likelihood
from distributions import KLCost_GaussianGaussian, Poisson
from distributions import LearnableAutoRegressive1Prior
from distributions import KLCost_GaussianGaussianProcessSampled
from utils import init_linear, linear, list_t_bxn_to_tensor_bxtxn, write_data
from utils import log_sum_exp, flatten
from plot_lfads import plot_lfads
class GRU(object):
"""Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078).
"""
def __init__(self, num_units, forget_bias=1.0, weight_scale=1.0,
clip_value=np.inf, collections=None):
"""Create a GRU object.
Args:
num_units: Number of units in the GRU
forget_bias (optional): Hack to help learning.
weight_scale (optional): weights are scaled by ws/sqrt(#inputs), with
ws being the weight scale.
clip_value (optional): if the recurrent values grow above this value,
clip them.
collections (optional): List of additonal collections variables should
belong to.
"""
self._num_units = num_units
self._forget_bias = forget_bias
self._weight_scale = weight_scale
self._clip_value = clip_value
self._collections = collections
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
@property
def state_multiplier(self):
return 1
def output_from_state(self, state):
"""Return the output portion of the state."""
return state
def __call__(self, inputs, state, scope=None):
"""Gated recurrent unit (GRU) function.
Args:
inputs: A 2D batch x input_dim tensor of inputs.
state: The previous state from the last time step.
scope (optional): TF variable scope for defined GRU variables.
Returns:
A tuple (state, state), where state is the newly computed state at time t.
It is returned twice to respect an interface that works for LSTMs.
"""
x = inputs
h = state
if inputs is not None:
xh = tf.concat(axis=1, values=[x, h])
else:
xh = h
with tf.variable_scope(scope or type(self).__name__): # "GRU"
with tf.variable_scope("Gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not update.
r, u = tf.split(axis=1, num_or_size_splits=2, value=linear(xh,
2 * self._num_units,
alpha=self._weight_scale,
name="xh_2_ru",
collections=self._collections))
r, u = tf.sigmoid(r), tf.sigmoid(u + self._forget_bias)
with tf.variable_scope("Candidate"):
xrh = tf.concat(axis=1, values=[x, r * h])
c = tf.tanh(linear(xrh, self._num_units, name="xrh_2_c",
collections=self._collections))
new_h = u * h + (1 - u) * c
new_h = tf.clip_by_value(new_h, -self._clip_value, self._clip_value)
return new_h, new_h
class GenGRU(object):
"""Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078).
This version is specialized for the generator, but isn't as fast, so
we have two. Note this allows for l2 regularization on the recurrent
weights, but also implicitly rescales the inputs via the 1/sqrt(input)
scaling in the linear helper routine to be large magnitude, if there are
fewer inputs than recurrent state.
"""
def __init__(self, num_units, forget_bias=1.0,
input_weight_scale=1.0, rec_weight_scale=1.0, clip_value=np.inf,
input_collections=None, recurrent_collections=None):
"""Create a GRU object.
Args:
num_units: Number of units in the GRU
forget_bias (optional): Hack to help learning.
input_weight_scale (optional): weights are scaled ws/sqrt(#inputs), with
ws being the weight scale.
rec_weight_scale (optional): weights are scaled ws/sqrt(#inputs),
with ws being the weight scale.
clip_value (optional): if the recurrent values grow above this value,
clip them.
input_collections (optional): List of additonal collections variables
that input->rec weights should belong to.
recurrent_collections (optional): List of additonal collections variables
that rec->rec weights should belong to.
"""
self._num_units = num_units
self._forget_bias = forget_bias
self._input_weight_scale = input_weight_scale
self._rec_weight_scale = rec_weight_scale
self._clip_value = clip_value
self._input_collections = input_collections
self._rec_collections = recurrent_collections
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
@property
def state_multiplier(self):
return 1
def output_from_state(self, state):
"""Return the output portion of the state."""
return state
def __call__(self, inputs, state, scope=None):
"""Gated recurrent unit (GRU) function.
Args:
inputs: A 2D batch x input_dim tensor of inputs.
state: The previous state from the last time step.
scope (optional): TF variable scope for defined GRU variables.
Returns:
A tuple (state, state), where state is the newly computed state at time t.
It is returned twice to respect an interface that works for LSTMs.
"""
x = inputs
h = state
with tf.variable_scope(scope or type(self).__name__): # "GRU"
with tf.variable_scope("Gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not update.
r_x = u_x = 0.0
if x is not None:
r_x, u_x = tf.split(axis=1, num_or_size_splits=2, value=linear(x,
2 * self._num_units,
alpha=self._input_weight_scale,
do_bias=False,
name="x_2_ru",
normalized=False,
collections=self._input_collections))
r_h, u_h = tf.split(axis=1, num_or_size_splits=2, value=linear(h,
2 * self._num_units,
do_bias=True,
alpha=self._rec_weight_scale,
name="h_2_ru",
collections=self._rec_collections))
r = r_x + r_h
u = u_x + u_h
r, u = tf.sigmoid(r), tf.sigmoid(u + self._forget_bias)
with tf.variable_scope("Candidate"):
c_x = 0.0
if x is not None:
c_x = linear(x, self._num_units, name="x_2_c", do_bias=False,
alpha=self._input_weight_scale,
normalized=False,
collections=self._input_collections)
c_rh = linear(r*h, self._num_units, name="rh_2_c", do_bias=True,
alpha=self._rec_weight_scale,
collections=self._rec_collections)
c = tf.tanh(c_x + c_rh)
new_h = u * h + (1 - u) * c
new_h = tf.clip_by_value(new_h, -self._clip_value, self._clip_value)
return new_h, new_h
class LFADS(object):
"""LFADS - Latent Factor Analysis via Dynamical Systems.
LFADS is an unsupervised method to decompose time series data into
various factors, such as an initial condition, a generative
dynamical system, inferred inputs to that generator, and a low
dimensional description of the observed data, called the factors.
Additoinally, the observations have a noise model (in this case
Poisson), so a denoised version of the observations is also created
(e.g. underlying rates of a Poisson distribution given the observed
event counts).
"""
def __init__(self, hps, kind="train", datasets=None):
"""Create an LFADS model.
train - a model for training, sampling of posteriors is used
posterior_sample_and_average - sample from the posterior, this is used
for evaluating the expected value of the outputs of LFADS, given a
specific input, by averaging over multiple samples from the approx
posterior. Also used for the lower bound on the negative
log-likelihood using IWAE error (Importance Weighed Auto-encoder).
This is the denoising operation.
prior_sample - a model for generation - sampling from priors is used
Args:
hps: The dictionary of hyper parameters.
kind: the type of model to build (see above).
datasets: a dictionary of named data_dictionaries, see top of lfads.py
"""
print("Building graph...")
all_kinds = ['train', 'posterior_sample_and_average', 'posterior_push_mean',
'prior_sample']
assert kind in all_kinds, 'Wrong kind'
if hps.feedback_factors_or_rates == "rates":
assert len(hps.dataset_names) == 1, \
"Multiple datasets not supported for rate feedback."
num_steps = hps.num_steps
ic_dim = hps.ic_dim
co_dim = hps.co_dim
ext_input_dim = hps.ext_input_dim
cell_class = GRU
gen_cell_class = GenGRU
def makelambda(v): # Used with tf.case
return lambda: v
# Define the data placeholder, and deal with all parts of the graph
# that are dataset dependent.
self.dataName = tf.placeholder(tf.string, shape=())
# The batch_size to be inferred from data, as normal.
# Additionally, the data_dim will be inferred as well, allowing for a
# single placeholder for all datasets, regardless of data dimension.
if hps.output_dist == 'poisson':
# Enforce correct dtype
assert np.issubdtype(
datasets[hps.dataset_names[0]]['train_data'].dtype, int), \
"Data dtype must be int for poisson output distribution"
data_dtype = tf.int32
elif hps.output_dist == 'gaussian':
assert np.issubdtype(
datasets[hps.dataset_names[0]]['train_data'].dtype, float), \
"Data dtype must be float for gaussian output dsitribution"
data_dtype = tf.float32
else:
assert False, "NIY"
self.dataset_ph = dataset_ph = tf.placeholder(data_dtype,
[None, num_steps, None],
name="data")
self.train_step = tf.get_variable("global_step", [], tf.int64,
tf.zeros_initializer(),
trainable=False)
self.hps = hps
ndatasets = hps.ndatasets
factors_dim = hps.factors_dim
self.preds = preds = [None] * ndatasets
self.fns_in_fac_Ws = fns_in_fac_Ws = [None] * ndatasets
self.fns_in_fatcor_bs = fns_in_fac_bs = [None] * ndatasets
self.fns_out_fac_Ws = fns_out_fac_Ws = [None] * ndatasets
self.fns_out_fac_bs = fns_out_fac_bs = [None] * ndatasets
self.datasetNames = dataset_names = hps.dataset_names
self.ext_inputs = ext_inputs = None
if len(dataset_names) == 1: # single session
if 'alignment_matrix_cxf' in datasets[dataset_names[0]].keys():
used_in_factors_dim = factors_dim
in_identity_if_poss = False
else:
used_in_factors_dim = hps.dataset_dims[dataset_names[0]]
in_identity_if_poss = True
else: # multisession
used_in_factors_dim = factors_dim
in_identity_if_poss = False
for d, name in enumerate(dataset_names):
data_dim = hps.dataset_dims[name]
in_mat_cxf = None
in_bias_1xf = None
align_bias_1xc = None
if datasets and 'alignment_matrix_cxf' in datasets[name].keys():
dataset = datasets[name]
if hps.do_train_readin:
print("Initializing trainable readin matrix with alignment matrix" \
" provided for dataset:", name)
else:
print("Setting non-trainable readin matrix to alignment matrix" \
" provided for dataset:", name)
in_mat_cxf = dataset['alignment_matrix_cxf'].astype(np.float32)
if in_mat_cxf.shape != (data_dim, factors_dim):
raise ValueError("""Alignment matrix must have dimensions %d x %d
(data_dim x factors_dim), but currently has %d x %d."""%
(data_dim, factors_dim, in_mat_cxf.shape[0],
in_mat_cxf.shape[1]))
if datasets and 'alignment_bias_c' in datasets[name].keys():
dataset = datasets[name]
if hps.do_train_readin:
print("Initializing trainable readin bias with alignment bias " \
"provided for dataset:", name)
else:
print("Setting non-trainable readin bias to alignment bias " \
"provided for dataset:", name)
align_bias_c = dataset['alignment_bias_c'].astype(np.float32)
align_bias_1xc = np.expand_dims(align_bias_c, axis=0)
if align_bias_1xc.shape[1] != data_dim:
raise ValueError("""Alignment bias must have dimensions %d
(data_dim), but currently has %d."""%
(data_dim, in_mat_cxf.shape[0]))
if in_mat_cxf is not None and align_bias_1xc is not None:
# (data - alignment_bias) * W_in
# data * W_in - alignment_bias * W_in
# So b = -alignment_bias * W_in to accommodate PCA style offset.
in_bias_1xf = -np.dot(align_bias_1xc, in_mat_cxf)
if hps.do_train_readin:
# only add to IO transformations collection only if we want it to be
# learnable, because IO_transformations collection will be trained
# when do_train_io_only
collections_readin=['IO_transformations']
else:
collections_readin=None
in_fac_lin = init_linear(data_dim, used_in_factors_dim,
do_bias=True,
mat_init_value=in_mat_cxf,
bias_init_value=in_bias_1xf,
identity_if_possible=in_identity_if_poss,
normalized=False, name="x_2_infac_"+name,
collections=collections_readin,
trainable=hps.do_train_readin)
in_fac_W, in_fac_b = in_fac_lin
fns_in_fac_Ws[d] = makelambda(in_fac_W)
fns_in_fac_bs[d] = makelambda(in_fac_b)
with tf.variable_scope("glm"):
out_identity_if_poss = False
if len(dataset_names) == 1 and \
factors_dim == hps.dataset_dims[dataset_names[0]]:
out_identity_if_poss = True
for d, name in enumerate(dataset_names):
data_dim = hps.dataset_dims[name]
in_mat_cxf = None
if datasets and 'alignment_matrix_cxf' in datasets[name].keys():
dataset = datasets[name]
in_mat_cxf = dataset['alignment_matrix_cxf'].astype(np.float32)
if datasets and 'alignment_bias_c' in datasets[name].keys():
dataset = datasets[name]
align_bias_c = dataset['alignment_bias_c'].astype(np.float32)
align_bias_1xc = np.expand_dims(align_bias_c, axis=0)
out_mat_fxc = None
out_bias_1xc = None
if in_mat_cxf is not None:
out_mat_fxc = in_mat_cxf.T
if align_bias_1xc is not None:
out_bias_1xc = align_bias_1xc
if hps.output_dist == 'poisson':
out_fac_lin = init_linear(factors_dim, data_dim, do_bias=True,
mat_init_value=out_mat_fxc,
bias_init_value=out_bias_1xc,
identity_if_possible=out_identity_if_poss,
normalized=False,
name="fac_2_logrates_"+name,
collections=['IO_transformations'])
out_fac_W, out_fac_b = out_fac_lin
elif hps.output_dist == 'gaussian':
out_fac_lin_mean = \
init_linear(factors_dim, data_dim, do_bias=True,
mat_init_value=out_mat_fxc,
bias_init_value=out_bias_1xc,
normalized=False,
name="fac_2_means_"+name,
collections=['IO_transformations'])
out_fac_W_mean, out_fac_b_mean = out_fac_lin_mean
mat_init_value = np.zeros([factors_dim, data_dim]).astype(np.float32)
bias_init_value = np.ones([1, data_dim]).astype(np.float32)
out_fac_lin_logvar = \
init_linear(factors_dim, data_dim, do_bias=True,
mat_init_value=mat_init_value,
bias_init_value=bias_init_value,
normalized=False,
name="fac_2_logvars_"+name,
collections=['IO_transformations'])
out_fac_W_mean, out_fac_b_mean = out_fac_lin_mean
out_fac_W_logvar, out_fac_b_logvar = out_fac_lin_logvar
out_fac_W = tf.concat(
axis=1, values=[out_fac_W_mean, out_fac_W_logvar])
out_fac_b = tf.concat(
axis=1, values=[out_fac_b_mean, out_fac_b_logvar])
else:
assert False, "NIY"
preds[d] = tf.equal(tf.constant(name), self.dataName)
data_dim = hps.dataset_dims[name]
fns_out_fac_Ws[d] = makelambda(out_fac_W)
fns_out_fac_bs[d] = makelambda(out_fac_b)
pf_pairs_in_fac_Ws = zip(preds, fns_in_fac_Ws)
pf_pairs_in_fac_bs = zip(preds, fns_in_fac_bs)
pf_pairs_out_fac_Ws = zip(preds, fns_out_fac_Ws)
pf_pairs_out_fac_bs = zip(preds, fns_out_fac_bs)
this_in_fac_W = tf.case(pf_pairs_in_fac_Ws, exclusive=True)
this_in_fac_b = tf.case(pf_pairs_in_fac_bs, exclusive=True)
this_out_fac_W = tf.case(pf_pairs_out_fac_Ws, exclusive=True)
this_out_fac_b = tf.case(pf_pairs_out_fac_bs, exclusive=True)
# External inputs (not changing by dataset, by definition).
if hps.ext_input_dim > 0:
self.ext_input = tf.placeholder(tf.float32,
[None, num_steps, ext_input_dim],
name="ext_input")
else:
self.ext_input = None
ext_input_bxtxi = self.ext_input
self.keep_prob = keep_prob = tf.placeholder(tf.float32, [], "keep_prob")
self.batch_size = batch_size = int(hps.batch_size)
self.learning_rate = tf.Variable(float(hps.learning_rate_init),
trainable=False, name="learning_rate")
self.learning_rate_decay_op = self.learning_rate.assign(
self.learning_rate * hps.learning_rate_decay_factor)
# Dropout the data.
dataset_do_bxtxd = tf.nn.dropout(tf.to_float(dataset_ph), keep_prob)
if hps.ext_input_dim > 0:
ext_input_do_bxtxi = tf.nn.dropout(ext_input_bxtxi, keep_prob)
else:
ext_input_do_bxtxi = None
# ENCODERS
def encode_data(dataset_bxtxd, enc_cell, name, forward_or_reverse,
num_steps_to_encode):
"""Encode data for LFADS
Args:
dataset_bxtxd - the data to encode, as a 3 tensor, with dims
time x batch x data dims.
enc_cell: encoder cell
name: name of encoder
forward_or_reverse: string, encode in forward or reverse direction
num_steps_to_encode: number of steps to encode, 0:num_steps_to_encode
Returns:
encoded data as a list with num_steps_to_encode items, in order
"""
if forward_or_reverse == "forward":
dstr = "_fwd"
time_fwd_or_rev = range(num_steps_to_encode)
else:
dstr = "_rev"
time_fwd_or_rev = reversed(range(num_steps_to_encode))
with tf.variable_scope(name+"_enc"+dstr, reuse=False):
enc_state = tf.tile(
tf.Variable(tf.zeros([1, enc_cell.state_size]),
name=name+"_enc_t0"+dstr), tf.stack([batch_size, 1]))
enc_state.set_shape([None, enc_cell.state_size]) # tile loses shape
enc_outs = [None] * num_steps_to_encode
for i, t in enumerate(time_fwd_or_rev):
with tf.variable_scope(name+"_enc"+dstr, reuse=True if i > 0 else None):
dataset_t_bxd = dataset_bxtxd[:,t,:]
in_fac_t_bxf = tf.matmul(dataset_t_bxd, this_in_fac_W) + this_in_fac_b
in_fac_t_bxf.set_shape([None, used_in_factors_dim])
if ext_input_dim > 0 and not hps.inject_ext_input_to_gen:
ext_input_t_bxi = ext_input_do_bxtxi[:,t,:]
enc_input_t_bxfpe = tf.concat(
axis=1, values=[in_fac_t_bxf, ext_input_t_bxi])
else:
enc_input_t_bxfpe = in_fac_t_bxf
enc_out, enc_state = enc_cell(enc_input_t_bxfpe, enc_state)
enc_outs[t] = enc_out
return enc_outs
# Encode initial condition means and variances
# ([x_T, x_T-1, ... x_0] and [x_0, x_1, ... x_T] -> g0/c0)
self.ic_enc_fwd = [None] * num_steps
self.ic_enc_rev = [None] * num_steps
if ic_dim > 0:
enc_ic_cell = cell_class(hps.ic_enc_dim,
weight_scale=hps.cell_weight_scale,
clip_value=hps.cell_clip_value)
ic_enc_fwd = encode_data(dataset_do_bxtxd, enc_ic_cell,
"ic", "forward",
hps.num_steps_for_gen_ic)
ic_enc_rev = encode_data(dataset_do_bxtxd, enc_ic_cell,
"ic", "reverse",
hps.num_steps_for_gen_ic)
self.ic_enc_fwd = ic_enc_fwd
self.ic_enc_rev = ic_enc_rev
# Encoder control input means and variances, bi-directional encoding so:
# ([x_T, x_T-1, ..., x_0] and [x_0, x_1 ... x_T] -> u_t)
self.ci_enc_fwd = [None] * num_steps
self.ci_enc_rev = [None] * num_steps
if co_dim > 0:
enc_ci_cell = cell_class(hps.ci_enc_dim,
weight_scale=hps.cell_weight_scale,
clip_value=hps.cell_clip_value)
ci_enc_fwd = encode_data(dataset_do_bxtxd, enc_ci_cell,
"ci", "forward",
hps.num_steps)
if hps.do_causal_controller:
ci_enc_rev = None
else:
ci_enc_rev = encode_data(dataset_do_bxtxd, enc_ci_cell,
"ci", "reverse",
hps.num_steps)
self.ci_enc_fwd = ci_enc_fwd
self.ci_enc_rev = ci_enc_rev
# STOCHASTIC LATENT VARIABLES, priors and posteriors
# (initial conditions g0, and control inputs, u_t)
# Note that zs represent all the stochastic latent variables.
with tf.variable_scope("z", reuse=False):
self.prior_zs_g0 = None
self.posterior_zs_g0 = None
self.g0s_val = None
if ic_dim > 0:
self.prior_zs_g0 = \
LearnableDiagonalGaussian(batch_size, ic_dim, name="prior_g0",
mean_init=0.0,
var_min=hps.ic_prior_var_min,
var_init=hps.ic_prior_var_scale,
var_max=hps.ic_prior_var_max)
ic_enc = tf.concat(axis=1, values=[ic_enc_fwd[-1], ic_enc_rev[0]])
ic_enc = tf.nn.dropout(ic_enc, keep_prob)
self.posterior_zs_g0 = \
DiagonalGaussianFromInput(ic_enc, ic_dim, "ic_enc_2_post_g0",
var_min=hps.ic_post_var_min)
if kind in ["train", "posterior_sample_and_average",
"posterior_push_mean"]:
zs_g0 = self.posterior_zs_g0
else:
zs_g0 = self.prior_zs_g0
if kind in ["train", "posterior_sample_and_average", "prior_sample"]:
self.g0s_val = zs_g0.sample
else:
self.g0s_val = zs_g0.mean
# Priors for controller, 'co' for controller output
self.prior_zs_co = prior_zs_co = [None] * num_steps
self.posterior_zs_co = posterior_zs_co = [None] * num_steps
self.zs_co = zs_co = [None] * num_steps
self.prior_zs_ar_con = None
if co_dim > 0:
# Controller outputs
autocorrelation_taus = [hps.prior_ar_atau for x in range(hps.co_dim)]
noise_variances = [hps.prior_ar_nvar for x in range(hps.co_dim)]
self.prior_zs_ar_con = prior_zs_ar_con = \
LearnableAutoRegressive1Prior(batch_size, hps.co_dim,
autocorrelation_taus,
noise_variances,
hps.do_train_prior_ar_atau,
hps.do_train_prior_ar_nvar,
num_steps, "u_prior_ar1")
# CONTROLLER -> GENERATOR -> RATES
# (u(t) -> gen(t) -> factors(t) -> rates(t) -> p(x_t|z_t) )
self.controller_outputs = u_t = [None] * num_steps
self.con_ics = con_state = None
self.con_states = con_states = [None] * num_steps
self.con_outs = con_outs = [None] * num_steps
self.gen_inputs = gen_inputs = [None] * num_steps
if co_dim > 0:
# gen_cell_class here for l2 penalty recurrent weights
# didn't split the cell_weight scale here, because I doubt it matters
con_cell = gen_cell_class(hps.con_dim,
input_weight_scale=hps.cell_weight_scale,
rec_weight_scale=hps.cell_weight_scale,
clip_value=hps.cell_clip_value,
recurrent_collections=['l2_con_reg'])
with tf.variable_scope("con", reuse=False):
self.con_ics = tf.tile(
tf.Variable(tf.zeros([1, hps.con_dim*con_cell.state_multiplier]),
name="c0"),
tf.stack([batch_size, 1]))
self.con_ics.set_shape([None, con_cell.state_size]) # tile loses shape
con_states[-1] = self.con_ics
gen_cell = gen_cell_class(hps.gen_dim,
input_weight_scale=hps.gen_cell_input_weight_scale,
rec_weight_scale=hps.gen_cell_rec_weight_scale,
clip_value=hps.cell_clip_value,
recurrent_collections=['l2_gen_reg'])
with tf.variable_scope("gen", reuse=False):
if ic_dim == 0:
self.gen_ics = tf.tile(
tf.Variable(tf.zeros([1, gen_cell.state_size]), name="g0"),
tf.stack([batch_size, 1]))
else:
self.gen_ics = linear(self.g0s_val, gen_cell.state_size,
identity_if_possible=True,
name="g0_2_gen_ic")
self.gen_states = gen_states = [None] * num_steps
self.gen_outs = gen_outs = [None] * num_steps
gen_states[-1] = self.gen_ics
gen_outs[-1] = gen_cell.output_from_state(gen_states[-1])
self.factors = factors = [None] * num_steps
factors[-1] = linear(gen_outs[-1], factors_dim, do_bias=False,
normalized=True, name="gen_2_fac")
self.rates = rates = [None] * num_steps
# rates[-1] is collected to potentially feed back to controller
with tf.variable_scope("glm", reuse=False):
if hps.output_dist == 'poisson':
log_rates_t0 = tf.matmul(factors[-1], this_out_fac_W) + this_out_fac_b
log_rates_t0.set_shape([None, None])
rates[-1] = tf.exp(log_rates_t0) # rate
rates[-1].set_shape([None, hps.dataset_dims[hps.dataset_names[0]]])
elif hps.output_dist == 'gaussian':
mean_n_logvars = tf.matmul(factors[-1],this_out_fac_W) + this_out_fac_b
mean_n_logvars.set_shape([None, None])
means_t_bxd, logvars_t_bxd = tf.split(axis=1, num_or_size_splits=2,
value=mean_n_logvars)
rates[-1] = means_t_bxd
else:
assert False, "NIY"
# We support multiple output distributions, for example Poisson, and also
# Gaussian. In these two cases respectively, there are one and two
# parameters (rates vs. mean and variance). So the output_dist_params
# tensor will variable sizes via tf.concat and tf.split, along the 1st
# dimension. So in the case of gaussian, for example, it'll be
# batch x (D+D), where each D dims is the mean, and then variances,
# respectively. For a distribution with 3 parameters, it would be
# batch x (D+D+D).
self.output_dist_params = dist_params = [None] * num_steps
self.log_p_xgz_b = log_p_xgz_b = 0.0 # log P(x|z)
for t in range(num_steps):
# Controller
if co_dim > 0:
# Build inputs for controller
tlag = t - hps.controller_input_lag
if tlag < 0:
con_in_f_t = tf.zeros_like(ci_enc_fwd[0])
else:
con_in_f_t = ci_enc_fwd[tlag]
if hps.do_causal_controller:
# If controller is causal (wrt to data generation process), then it
# cannot see future data. Thus, excluding ci_enc_rev[t] is obvious.
# Less obvious is the need to exclude factors[t-1]. This arises
# because information flows from g0 through factors to the controller
# input. The g0 encoding is backwards, so we must necessarily exclude
# the factors in order to keep the controller input purely from a
# forward encoding (however unlikely it is that
# g0->factors->controller channel might actually be used in this way).
con_in_list_t = [con_in_f_t]
else:
tlag_rev = t + hps.controller_input_lag
if tlag_rev >= num_steps:
# better than zeros
con_in_r_t = tf.zeros_like(ci_enc_rev[0])
else:
con_in_r_t = ci_enc_rev[tlag_rev]
con_in_list_t = [con_in_f_t, con_in_r_t]
if hps.do_feed_factors_to_controller:
if hps.feedback_factors_or_rates == "factors":
con_in_list_t.append(factors[t-1])
elif hps.feedback_factors_or_rates == "rates":
con_in_list_t.append(rates[t-1])
else:
assert False, "NIY"
con_in_t = tf.concat(axis=1, values=con_in_list_t)
con_in_t = tf.nn.dropout(con_in_t, keep_prob)
with tf.variable_scope("con", reuse=True if t > 0 else None):
con_outs[t], con_states[t] = con_cell(con_in_t, con_states[t-1])
posterior_zs_co[t] = \
DiagonalGaussianFromInput(con_outs[t], co_dim,
name="con_to_post_co")
if kind == "train":
u_t[t] = posterior_zs_co[t].sample
elif kind == "posterior_sample_and_average":
u_t[t] = posterior_zs_co[t].sample
elif kind == "posterior_push_mean":
u_t[t] = posterior_zs_co[t].mean
else:
u_t[t] = prior_zs_ar_con.samples_t[t]
# Inputs to the generator (controller output + external input)
if ext_input_dim > 0 and hps.inject_ext_input_to_gen:
ext_input_t_bxi = ext_input_do_bxtxi[:,t,:]
if co_dim > 0:
gen_inputs[t] = tf.concat(axis=1, values=[u_t[t], ext_input_t_bxi])
else:
gen_inputs[t] = ext_input_t_bxi
else:
gen_inputs[t] = u_t[t]
# Generator
data_t_bxd = dataset_ph[:,t,:]
with tf.variable_scope("gen", reuse=True if t > 0 else None):
gen_outs[t], gen_states[t] = gen_cell(gen_inputs[t], gen_states[t-1])
gen_outs[t] = tf.nn.dropout(gen_outs[t], keep_prob)
with tf.variable_scope("gen", reuse=True): # ic defined it above
factors[t] = linear(gen_outs[t], factors_dim, do_bias=False,
normalized=True, name="gen_2_fac")
with tf.variable_scope("glm", reuse=True if t > 0 else None):
if hps.output_dist == 'poisson':
log_rates_t = tf.matmul(factors[t], this_out_fac_W) + this_out_fac_b
log_rates_t.set_shape([None, None])
rates[t] = dist_params[t] = tf.exp(log_rates_t) # rates feed back
rates[t].set_shape([None, hps.dataset_dims[hps.dataset_names[0]]])
loglikelihood_t = Poisson(log_rates_t).logp(data_t_bxd)
elif hps.output_dist == 'gaussian':
mean_n_logvars = tf.matmul(factors[t],this_out_fac_W) + this_out_fac_b
mean_n_logvars.set_shape([None, None])
means_t_bxd, logvars_t_bxd = tf.split(axis=1, num_or_size_splits=2,
value=mean_n_logvars)
rates[t] = means_t_bxd # rates feed back to controller
dist_params[t] = tf.concat(
axis=1, values=[means_t_bxd, tf.exp(logvars_t_bxd)])
loglikelihood_t = \
diag_gaussian_log_likelihood(data_t_bxd,
means_t_bxd, logvars_t_bxd)
else:
assert False, "NIY"
log_p_xgz_b += tf.reduce_sum(loglikelihood_t, [1])
# Correlation of inferred inputs cost.
self.corr_cost = tf.constant(0.0)
if hps.co_mean_corr_scale > 0.0:
all_sum_corr = []
for i in range(hps.co_dim):
for j in range(i+1, hps.co_dim):
sum_corr_ij = tf.constant(0.0)
for t in range(num_steps):
u_mean_t = posterior_zs_co[t].mean
sum_corr_ij += u_mean_t[:,i]*u_mean_t[:,j]
all_sum_corr.append(0.5 * tf.square(sum_corr_ij))
self.corr_cost = tf.reduce_mean(all_sum_corr) # div by batch and by n*(n-1)/2 pairs
# Variational Lower Bound on posterior, p(z|x), plus reconstruction cost.
# KL and reconstruction costs are normalized only by batch size, not by
# dimension, or by time steps.
kl_cost_g0_b = tf.zeros_like(batch_size, dtype=tf.float32)
kl_cost_co_b = tf.zeros_like(batch_size, dtype=tf.float32)
self.kl_cost = tf.constant(0.0) # VAE KL cost
self.recon_cost = tf.constant(0.0) # VAE reconstruction cost
self.nll_bound_vae = tf.constant(0.0)
self.nll_bound_iwae = tf.constant(0.0) # for eval with IWAE cost.
if kind in ["train", "posterior_sample_and_average", "posterior_push_mean"]:
kl_cost_g0_b = 0.0
kl_cost_co_b = 0.0
if ic_dim > 0:
g0_priors = [self.prior_zs_g0]
g0_posts = [self.posterior_zs_g0]
kl_cost_g0_b = KLCost_GaussianGaussian(g0_posts, g0_priors).kl_cost_b
kl_cost_g0_b = hps.kl_ic_weight * kl_cost_g0_b
if co_dim > 0:
kl_cost_co_b = \
KLCost_GaussianGaussianProcessSampled(
posterior_zs_co, prior_zs_ar_con).kl_cost_b
kl_cost_co_b = hps.kl_co_weight * kl_cost_co_b
# L = -KL + log p(x|z), to maximize bound on likelihood
# -L = KL - log p(x|z), to minimize bound on NLL
# so 'reconstruction cost' is negative log likelihood
self.recon_cost = - tf.reduce_mean(log_p_xgz_b)
self.kl_cost = tf.reduce_mean(kl_cost_g0_b + kl_cost_co_b)
lb_on_ll_b = log_p_xgz_b - kl_cost_g0_b - kl_cost_co_b
# VAE error averages outside the log
self.nll_bound_vae = -tf.reduce_mean(lb_on_ll_b)
# IWAE error averages inside the log
k = tf.cast(tf.shape(log_p_xgz_b)[0], tf.float32)
iwae_lb_on_ll = -tf.log(k) + log_sum_exp(lb_on_ll_b)
self.nll_bound_iwae = -iwae_lb_on_ll
# L2 regularization on the generator, normalized by number of parameters.
self.l2_cost = tf.constant(0.0)
if self.hps.l2_gen_scale > 0.0 or self.hps.l2_con_scale > 0.0:
l2_costs = []
l2_numels = []
l2_reg_var_lists = [tf.get_collection('l2_gen_reg'),
tf.get_collection('l2_con_reg')]
l2_reg_scales = [self.hps.l2_gen_scale, self.hps.l2_con_scale]
for l2_reg_vars, l2_scale in zip(l2_reg_var_lists, l2_reg_scales):
for v in l2_reg_vars:
numel = tf.reduce_prod(tf.concat(axis=0, values=tf.shape(v)))
numel_f = tf.cast(numel, tf.float32)
l2_numels.append(numel_f)
v_l2 = tf.reduce_sum(v*v)
l2_costs.append(0.5 * l2_scale * v_l2)
self.l2_cost = tf.add_n(l2_costs) / tf.add_n(l2_numels)
# Compute the cost for training, part of the graph regardless.
# The KL cost can be problematic at the beginning of optimization,
# so we allow an exponential increase in weighting the KL from 0
# to 1.
self.kl_decay_step = tf.maximum(self.train_step - hps.kl_start_step, 0)
self.l2_decay_step = tf.maximum(self.train_step - hps.l2_start_step, 0)
kl_decay_step_f = tf.cast(self.kl_decay_step, tf.float32)
l2_decay_step_f = tf.cast(self.l2_decay_step, tf.float32)
kl_increase_steps_f = tf.cast(hps.kl_increase_steps, tf.float32)
l2_increase_steps_f = tf.cast(hps.l2_increase_steps, tf.float32)
self.kl_weight = kl_weight = \
tf.minimum(kl_decay_step_f / kl_increase_steps_f, 1.0)
self.l2_weight = l2_weight = \
tf.minimum(l2_decay_step_f / l2_increase_steps_f, 1.0)
self.timed_kl_cost = kl_weight * self.kl_cost
self.timed_l2_cost = l2_weight * self.l2_cost
self.weight_corr_cost = hps.co_mean_corr_scale * self.corr_cost
self.cost = self.recon_cost + self.timed_kl_cost + \
self.timed_l2_cost + self.weight_corr_cost
if kind != "train":
# save every so often
self.seso_saver = tf.train.Saver(tf.global_variables(),
max_to_keep=hps.max_ckpt_to_keep)
# lowest validation error
self.lve_saver = tf.train.Saver(tf.global_variables(),
max_to_keep=hps.max_ckpt_to_keep_lve)
return
# OPTIMIZATION
# train the io matrices only
if self.hps.do_train_io_only:
self.train_vars = tvars = \
tf.get_collection('IO_transformations',
scope=tf.get_variable_scope().name)
# train the encoder only
elif self.hps.do_train_encoder_only:
tvars1 = \
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope='LFADS/ic_enc_*')
tvars2 = \
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope='LFADS/z/ic_enc_*')
self.train_vars = tvars = tvars1 + tvars2
# train all variables
else:
self.train_vars = tvars = \
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope=tf.get_variable_scope().name)
print("done.")
print("Model Variables (to be optimized): ")
total_params = 0
for i in range(len(tvars)):
shape = tvars[i].get_shape().as_list()
print(" ", i, tvars[i].name, shape)
total_params += np.prod(shape)
print("Total model parameters: ", total_params)
grads = tf.gradients(self.cost, tvars)
grads, grad_global_norm = tf.clip_by_global_norm(grads, hps.max_grad_norm)
opt = tf.train.AdamOptimizer(self.learning_rate, beta1=0.9, beta2=0.999,
epsilon=1e-01)
self.grads = grads
self.grad_global_norm = grad_global_norm
self.train_op = opt.apply_gradients(
zip(grads, tvars), global_step=self.train_step)
self.seso_saver = tf.train.Saver(tf.global_variables(),
max_to_keep=hps.max_ckpt_to_keep)
# lowest validation error
self.lve_saver = tf.train.Saver(tf.global_variables(),
max_to_keep=hps.max_ckpt_to_keep)
# SUMMARIES, used only during training.
# example summary
self.example_image = tf.placeholder(tf.float32, shape=[1,None,None,3],
name='image_tensor')
self.example_summ = tf.summary.image("LFADS example", self.example_image,
collections=["example_summaries"])
# general training summaries
self.lr_summ = tf.summary.scalar("Learning rate", self.learning_rate)
self.kl_weight_summ = tf.summary.scalar("KL weight", self.kl_weight)
self.l2_weight_summ = tf.summary.scalar("L2 weight", self.l2_weight)
self.corr_cost_summ = tf.summary.scalar("Corr cost", self.weight_corr_cost)
self.grad_global_norm_summ = tf.summary.scalar("Gradient global norm",
self.grad_global_norm)
if hps.co_dim > 0:
self.atau_summ = [None] * hps.co_dim
self.pvar_summ = [None] * hps.co_dim
for c in range(hps.co_dim):
self.atau_summ[c] = \
tf.summary.scalar("AR Autocorrelation taus " + str(c),
tf.exp(self.prior_zs_ar_con.logataus_1xu[0,c]))
self.pvar_summ[c] = \
tf.summary.scalar("AR Variances " + str(c),
tf.exp(self.prior_zs_ar_con.logpvars_1xu[0,c]))
# cost summaries, separated into different collections for
# training vs validation. We make placeholders for these, because
# even though the graph computes these costs on a per-batch basis,
# we want to report the more reliable metric of per-epoch cost.
kl_cost_ph = tf.placeholder(tf.float32, shape=[], name='kl_cost_ph')
self.kl_t_cost_summ = tf.summary.scalar("KL cost (train)", kl_cost_ph,
collections=["train_summaries"])
self.kl_v_cost_summ = tf.summary.scalar("KL cost (valid)", kl_cost_ph,
collections=["valid_summaries"])
l2_cost_ph = tf.placeholder(tf.float32, shape=[], name='l2_cost_ph')
self.l2_cost_summ = tf.summary.scalar("L2 cost", l2_cost_ph,
collections=["train_summaries"])
recon_cost_ph = tf.placeholder(tf.float32, shape=[], name='recon_cost_ph')
self.recon_t_cost_summ = tf.summary.scalar("Reconstruction cost (train)",
recon_cost_ph,
collections=["train_summaries"])
self.recon_v_cost_summ = tf.summary.scalar("Reconstruction cost (valid)",
recon_cost_ph,
collections=["valid_summaries"])
total_cost_ph = tf.placeholder(tf.float32, shape=[], name='total_cost_ph')
self.cost_t_summ = tf.summary.scalar("Total cost (train)", total_cost_ph,
collections=["train_summaries"])
self.cost_v_summ = tf.summary.scalar("Total cost (valid)", total_cost_ph,
collections=["valid_summaries"])
self.kl_cost_ph = kl_cost_ph
self.l2_cost_ph = l2_cost_ph
self.recon_cost_ph = recon_cost_ph
self.total_cost_ph = total_cost_ph
# Merged summaries, for easy coding later.
self.merged_examples = tf.summary.merge_all(key="example_summaries")
self.merged_generic = tf.summary.merge_all() # default key is 'summaries'
self.merged_train = tf.summary.merge_all(key="train_summaries")
self.merged_valid = tf.summary.merge_all(key="valid_summaries")
session = tf.get_default_session()
self.logfile = os.path.join(hps.lfads_save_dir, "lfads_log")
self.writer = tf.summary.FileWriter(self.logfile)
def build_feed_dict(self, train_name, data_bxtxd, ext_input_bxtxi=None,
keep_prob=None):
"""Build the feed dictionary, handles cases where there is no value defined.
Args:
train_name: The key into the datasets, to set the tf.case statement for
the proper readin / readout matrices.
data_bxtxd: The data tensor
ext_input_bxtxi (optional): The external input tensor
keep_prob: The drop out keep probability.
Returns:
The feed dictionary with TF tensors as keys and data as values, for use
with tf.Session.run()
"""
feed_dict = {}
B, T, _ = data_bxtxd.shape
feed_dict[self.dataName] = train_name
feed_dict[self.dataset_ph] = data_bxtxd
if self.ext_input is not None and ext_input_bxtxi is not None:
feed_dict[self.ext_input] = ext_input_bxtxi
if keep_prob is None:
feed_dict[self.keep_prob] = self.hps.keep_prob
else:
feed_dict[self.keep_prob] = keep_prob
return feed_dict
@staticmethod
def get_batch(data_extxd, ext_input_extxi=None, batch_size=None,
example_idxs=None):
"""Get a batch of data, either randomly chosen, or specified directly.
Args:
data_extxd: The data to model, numpy tensors with shape:
# examples x # time steps x # dimensions
ext_input_extxi (optional): The external inputs, numpy tensor with shape:
# examples x # time steps x # external input dimensions
batch_size: The size of the batch to return
example_idxs (optional): The example indices used to select examples.
Returns:
A tuple with two parts:
1. Batched data numpy tensor with shape:
batch_size x # time steps x # dimensions
2. Batched external input numpy tensor with shape:
batch_size x # time steps x # external input dims
"""
assert batch_size is not None or example_idxs is not None, "Problems"
E, T, D = data_extxd.shape
if example_idxs is None:
example_idxs = np.random.choice(E, batch_size)
ext_input_bxtxi = None
if ext_input_extxi is not None:
ext_input_bxtxi = ext_input_extxi[example_idxs,:,:]
return data_extxd[example_idxs,:,:], ext_input_bxtxi
@staticmethod
def example_idxs_mod_batch_size(nexamples, batch_size):
"""Given a number of examples, E, and a batch_size, B, generate indices
[0, 1, 2, ... B-1;
[B, B+1, ... 2*B-1;
...
]
returning those indices as a 2-dim tensor shaped like E/B x B. Note that
shape is only correct if E % B == 0. If not, then an extra row is generated
so that the remainder of examples is included. The extra examples are
explicitly to to the zero index (see randomize_example_idxs_mod_batch_size)
for randomized behavior.
Args:
nexamples: The number of examples to batch up.
batch_size: The size of the batch.
Returns:
2-dim tensor as described above.
"""
bmrem = batch_size - (nexamples % batch_size)
bmrem_examples = []
if bmrem < batch_size:
#bmrem_examples = np.zeros(bmrem, dtype=np.int32)
ridxs = np.random.permutation(nexamples)[0:bmrem].astype(np.int32)
bmrem_examples = np.sort(ridxs)
example_idxs = range(nexamples) + list(bmrem_examples)
example_idxs_e_x_edivb = np.reshape(example_idxs, [-1, batch_size])
return example_idxs_e_x_edivb, bmrem
@staticmethod
def randomize_example_idxs_mod_batch_size(nexamples, batch_size):
"""Indices 1:nexamples, randomized, in 2D form of
shape = (nexamples / batch_size) x batch_size. The remainder
is managed by drawing randomly from 1:nexamples.
Args:
nexamples: number of examples to randomize
batch_size: number of elements in batch
Returns:
The randomized, properly shaped indicies.
"""
assert nexamples > batch_size, "Problems"
bmrem = batch_size - nexamples % batch_size
bmrem_examples = []
if bmrem < batch_size:
bmrem_examples = np.random.choice(range(nexamples),
size=bmrem, replace=False)
example_idxs = range(nexamples) + list(bmrem_examples)
mixed_example_idxs = np.random.permutation(example_idxs)
example_idxs_e_x_edivb = np.reshape(mixed_example_idxs, [-1, batch_size])
return example_idxs_e_x_edivb, bmrem
def shuffle_spikes_in_time(self, data_bxtxd):
"""Shuffle the spikes in the temporal dimension. This is useful to
help the LFADS system avoid overfitting to individual spikes or fast
oscillations found in the data that are irrelevant to behavior. A
pure 'tabula rasa' approach would avoid this, but LFADS is sensitive
enough to pick up dynamics that you may not want.
Args:
data_bxtxd: numpy array of spike count data to be shuffled.
Returns:
S_bxtxd, a numpy array with the same dimensions and contents as
data_bxtxd, but shuffled appropriately.
"""
B, T, N = data_bxtxd.shape
w = self.hps.temporal_spike_jitter_width
if w == 0:
return data_bxtxd
max_counts = np.max(data_bxtxd)
S_bxtxd = np.zeros([B,T,N])
# Intuitively, shuffle spike occurances, 0 or 1, but since we have counts,
# Do it over and over again up to the max count.
for mc in range(1,max_counts+1):
idxs = np.nonzero(data_bxtxd >= mc)
data_ones = np.zeros_like(data_bxtxd)
data_ones[data_bxtxd >= mc] = 1
nfound = len(idxs[0])
shuffles_incrs_in_time = np.random.randint(-w, w, size=nfound)
shuffle_tidxs = idxs[1].copy()
shuffle_tidxs += shuffles_incrs_in_time
# Reflect on the boundaries to not lose mass.
shuffle_tidxs[shuffle_tidxs < 0] = -shuffle_tidxs[shuffle_tidxs < 0]
shuffle_tidxs[shuffle_tidxs > T-1] = \
(T-1)-(shuffle_tidxs[shuffle_tidxs > T-1] -(T-1))
for iii in zip(idxs[0], shuffle_tidxs, idxs[2]):
S_bxtxd[iii] += 1
return S_bxtxd
def shuffle_and_flatten_datasets(self, datasets, kind='train'):
"""Since LFADS supports multiple datasets in the same dynamical model,
we have to be careful to use all the data in a single training epoch. But
since the datasets my have different data dimensionality, we cannot batch
examples from data dictionaries together. Instead, we generate random
batches within each data dictionary, and then randomize these batches
while holding onto the dataname, so that when it's time to feed
the graph, the correct in/out matrices can be selected, per batch.
Args:
datasets: A dict of data dicts. The dataset dict is simply a
name(string)-> data dictionary mapping (See top of lfads.py).
kind: 'train' or 'valid'
Returns:
A flat list, in which each element is a pair ('name', indices).
"""
batch_size = self.hps.batch_size
ndatasets = len(datasets)
random_example_idxs = {}
epoch_idxs = {}
all_name_example_idx_pairs = []
kind_data = kind + '_data'
for name, data_dict in datasets.items():
nexamples, ntime, data_dim = data_dict[kind_data].shape
epoch_idxs[name] = 0
random_example_idxs, _ = \
self.randomize_example_idxs_mod_batch_size(nexamples, batch_size)
epoch_size = random_example_idxs.shape[0]
names = [name] * epoch_size
all_name_example_idx_pairs += zip(names, random_example_idxs)
np.random.shuffle(all_name_example_idx_pairs) # shuffle in place
return all_name_example_idx_pairs
def train_epoch(self, datasets, batch_size=None, do_save_ckpt=True):
"""Train the model through the entire dataset once.
Args:
datasets: A dict of data dicts. The dataset dict is simply a
name(string)-> data dictionary mapping (See top of lfads.py).
batch_size (optional): The batch_size to use
do_save_ckpt (optional): Should the routine save a checkpoint on this
training epoch?
Returns:
A tuple with 6 float values:
(total cost of the epoch, epoch reconstruction cost,
epoch kl cost, KL weight used this training epoch,
total l2 cost on generator, and the corresponding weight).
"""
ops_to_eval = [self.cost, self.recon_cost,
self.kl_cost, self.kl_weight,
self.l2_cost, self.l2_weight,
self.train_op]
collected_op_values = self.run_epoch(datasets, ops_to_eval, kind="train")
total_cost = total_recon_cost = total_kl_cost = 0.0
# normalizing by batch done in distributions.py
epoch_size = len(collected_op_values)
for op_values in collected_op_values:
total_cost += op_values[0]
total_recon_cost += op_values[1]
total_kl_cost += op_values[2]
kl_weight = collected_op_values[-1][3]
l2_cost = collected_op_values[-1][4]
l2_weight = collected_op_values[-1][5]
epoch_total_cost = total_cost / epoch_size
epoch_recon_cost = total_recon_cost / epoch_size
epoch_kl_cost = total_kl_cost / epoch_size
if do_save_ckpt:
session = tf.get_default_session()
checkpoint_path = os.path.join(self.hps.lfads_save_dir,
self.hps.checkpoint_name + '.ckpt')
self.seso_saver.save(session, checkpoint_path,
global_step=self.train_step)
return epoch_total_cost, epoch_recon_cost, epoch_kl_cost, \
kl_weight, l2_cost, l2_weight
def run_epoch(self, datasets, ops_to_eval, kind="train", batch_size=None,
do_collect=True, keep_prob=None):
"""Run the model through the entire dataset once.
Args:
datasets: A dict of data dicts. The dataset dict is simply a
name(string)-> data dictionary mapping (See top of lfads.py).
ops_to_eval: A list of tensorflow operations that will be evaluated in
the tf.session.run() call.
batch_size (optional): The batch_size to use
do_collect (optional): Should the routine collect all session.run
output as a list, and return it?
keep_prob (optional): The dropout keep probability.
Returns:
A list of lists, the internal list is the return for the ops for each
session.run() call. The outer list collects over the epoch.
"""
hps = self.hps
all_name_example_idx_pairs = \
self.shuffle_and_flatten_datasets(datasets, kind)
kind_data = kind + '_data'
kind_ext_input = kind + '_ext_input'
total_cost = total_recon_cost = total_kl_cost = 0.0
session = tf.get_default_session()
epoch_size = len(all_name_example_idx_pairs)
evaled_ops_list = []
for name, example_idxs in all_name_example_idx_pairs:
data_dict = datasets[name]
data_extxd = data_dict[kind_data]
if hps.output_dist == 'poisson' and hps.temporal_spike_jitter_width > 0:
data_extxd = self.shuffle_spikes_in_time(data_extxd)
ext_input_extxi = data_dict[kind_ext_input]
data_bxtxd, ext_input_bxtxi = self.get_batch(data_extxd, ext_input_extxi,
example_idxs=example_idxs)
feed_dict = self.build_feed_dict(name, data_bxtxd, ext_input_bxtxi,
keep_prob=keep_prob)
evaled_ops_np = session.run(ops_to_eval, feed_dict=feed_dict)
if do_collect:
evaled_ops_list.append(evaled_ops_np)
return evaled_ops_list
def summarize_all(self, datasets, summary_values):
"""Plot and summarize stuff in tensorboard.
Note that everything done in the current function is otherwise done on
a single, randomly selected dataset (except for summary_values, which are
passed in.)
Args:
datasets, the dictionary of datasets used in the study.
summary_values: These summary values are created from the training loop,
and so summarize the entire set of datasets.
"""
hps = self.hps
tr_kl_cost = summary_values['tr_kl_cost']
tr_recon_cost = summary_values['tr_recon_cost']
tr_total_cost = summary_values['tr_total_cost']
kl_weight = summary_values['kl_weight']
l2_weight = summary_values['l2_weight']
l2_cost = summary_values['l2_cost']
has_any_valid_set = summary_values['has_any_valid_set']
i = summary_values['nepochs']
session = tf.get_default_session()
train_summ, train_step = session.run([self.merged_train,
self.train_step],
feed_dict={self.l2_cost_ph:l2_cost,
self.kl_cost_ph:tr_kl_cost,
self.recon_cost_ph:tr_recon_cost,
self.total_cost_ph:tr_total_cost})
self.writer.add_summary(train_summ, train_step)
if has_any_valid_set:
ev_kl_cost = summary_values['ev_kl_cost']
ev_recon_cost = summary_values['ev_recon_cost']
ev_total_cost = summary_values['ev_total_cost']
eval_summ = session.run(self.merged_valid,
feed_dict={self.kl_cost_ph:ev_kl_cost,
self.recon_cost_ph:ev_recon_cost,
self.total_cost_ph:ev_total_cost})
self.writer.add_summary(eval_summ, train_step)
print("Epoch:%d, step:%d (TRAIN, VALID): total: %.2f, %.2f\
recon: %.2f, %.2f, kl: %.2f, %.2f, l2: %.5f,\
kl weight: %.2f, l2 weight: %.2f" % \
(i, train_step, tr_total_cost, ev_total_cost,
tr_recon_cost, ev_recon_cost, tr_kl_cost, ev_kl_cost,
l2_cost, kl_weight, l2_weight))
csv_outstr = "epoch,%d, step,%d, total,%.2f,%.2f, \
recon,%.2f,%.2f, kl,%.2f,%.2f, l2,%.5f, \
klweight,%.2f, l2weight,%.2f\n"% \
(i, train_step, tr_total_cost, ev_total_cost,
tr_recon_cost, ev_recon_cost, tr_kl_cost, ev_kl_cost,
l2_cost, kl_weight, l2_weight)
else:
print("Epoch:%d, step:%d TRAIN: total: %.2f recon: %.2f, kl: %.2f,\
l2: %.5f, kl weight: %.2f, l2 weight: %.2f" % \
(i, train_step, tr_total_cost, tr_recon_cost, tr_kl_cost,
l2_cost, kl_weight, l2_weight))
csv_outstr = "epoch,%d, step,%d, total,%.2f, recon,%.2f, kl,%.2f, \
l2,%.5f, klweight,%.2f, l2weight,%.2f\n"% \
(i, train_step, tr_total_cost, tr_recon_cost,
tr_kl_cost, l2_cost, kl_weight, l2_weight)
if self.hps.csv_log:
csv_file = os.path.join(self.hps.lfads_save_dir, self.hps.csv_log+'.csv')
with open(csv_file, "a") as myfile:
myfile.write(csv_outstr)
def plot_single_example(self, datasets):
"""Plot an image relating to a randomly chosen, specific example. We use
posterior sample and average by taking one example, and filling a whole
batch with that example, sample from the posterior, and then average the
quantities.
"""
hps = self.hps
all_data_names = datasets.keys()
data_name = np.random.permutation(all_data_names)[0]
data_dict = datasets[data_name]
has_valid_set = True if data_dict['valid_data'] is not None else False
cf = 1.0 # plotting concern
# posterior sample and average here
E, _, _ = data_dict['train_data'].shape
eidx = np.random.choice(E)
example_idxs = eidx * np.ones(hps.batch_size, dtype=np.int32)
train_data_bxtxd, train_ext_input_bxtxi = \
self.get_batch(data_dict['train_data'], data_dict['train_ext_input'],
example_idxs=example_idxs)
truth_train_data_bxtxd = None
if 'train_truth' in data_dict and data_dict['train_truth'] is not None:
truth_train_data_bxtxd, _ = self.get_batch(data_dict['train_truth'],
example_idxs=example_idxs)
cf = data_dict['conversion_factor']
# plotter does averaging
train_model_values = self.eval_model_runs_batch(data_name,
train_data_bxtxd,
train_ext_input_bxtxi,
do_average_batch=False)
train_step = train_model_values['train_steps']
feed_dict = self.build_feed_dict(data_name, train_data_bxtxd,
train_ext_input_bxtxi, keep_prob=1.0)
session = tf.get_default_session()
generic_summ = session.run(self.merged_generic, feed_dict=feed_dict)
self.writer.add_summary(generic_summ, train_step)
valid_data_bxtxd = valid_model_values = valid_ext_input_bxtxi = None
truth_valid_data_bxtxd = None
if has_valid_set:
E, _, _ = data_dict['valid_data'].shape
eidx = np.random.choice(E)
example_idxs = eidx * np.ones(hps.batch_size, dtype=np.int32)
valid_data_bxtxd, valid_ext_input_bxtxi = \
self.get_batch(data_dict['valid_data'],
data_dict['valid_ext_input'],
example_idxs=example_idxs)
if 'valid_truth' in data_dict and data_dict['valid_truth'] is not None:
truth_valid_data_bxtxd, _ = self.get_batch(data_dict['valid_truth'],
example_idxs=example_idxs)
else:
truth_valid_data_bxtxd = None
# plotter does averaging
valid_model_values = self.eval_model_runs_batch(data_name,
valid_data_bxtxd,
valid_ext_input_bxtxi,
do_average_batch=False)
example_image = plot_lfads(train_bxtxd=train_data_bxtxd,
train_model_vals=train_model_values,
train_ext_input_bxtxi=train_ext_input_bxtxi,
train_truth_bxtxd=truth_train_data_bxtxd,
valid_bxtxd=valid_data_bxtxd,
valid_model_vals=valid_model_values,
valid_ext_input_bxtxi=valid_ext_input_bxtxi,
valid_truth_bxtxd=truth_valid_data_bxtxd,
bidx=None, cf=cf, output_dist=hps.output_dist)
example_image = np.expand_dims(example_image, axis=0)
example_summ = session.run(self.merged_examples,
feed_dict={self.example_image : example_image})
self.writer.add_summary(example_summ)
def train_model(self, datasets):
"""Train the model, print per-epoch information, and save checkpoints.
Loop over training epochs. The function that actually does the
training is train_epoch. This function iterates over the training
data, one epoch at a time. The learning rate schedule is such
that it will stay the same until the cost goes up in comparison to
the last few values, then it will drop.
Args:
datasets: A dict of data dicts. The dataset dict is simply a
name(string)-> data dictionary mapping (See top of lfads.py).
"""
hps = self.hps
has_any_valid_set = False
for data_dict in datasets.values():
if data_dict['valid_data'] is not None:
has_any_valid_set = True
break
session = tf.get_default_session()
lr = session.run(self.learning_rate)
lr_stop = hps.learning_rate_stop
i = -1
train_costs = []
valid_costs = []
ev_total_cost = ev_recon_cost = ev_kl_cost = 0.0
lowest_ev_cost = np.Inf
while True:
i += 1
do_save_ckpt = True if i % 10 ==0 else False
tr_total_cost, tr_recon_cost, tr_kl_cost, kl_weight, l2_cost, l2_weight = \
self.train_epoch(datasets, do_save_ckpt=do_save_ckpt)
# Evaluate the validation cost, and potentially save. Note that this
# routine will not save a validation checkpoint until the kl weight and
# l2 weights are equal to 1.0.
if has_any_valid_set:
ev_total_cost, ev_recon_cost, ev_kl_cost = \
self.eval_cost_epoch(datasets, kind='valid')
valid_costs.append(ev_total_cost)
# > 1 may give more consistent results, but not the actual lowest vae.
# == 1 gives the lowest vae seen so far.
n_lve = 1
run_avg_lve = np.mean(valid_costs[-n_lve:])
# conditions for saving checkpoints:
# KL weight must have finished stepping (>=1.0), AND
# L2 weight must have finished stepping OR L2 is not being used, AND
# the current run has a lower LVE than previous runs AND
# len(valid_costs > n_lve) (not sure what that does)
if kl_weight >= 1.0 and \
(l2_weight >= 1.0 or \
(self.hps.l2_gen_scale == 0.0 and self.hps.l2_con_scale == 0.0)) \
and (len(valid_costs) > n_lve and run_avg_lve < lowest_ev_cost):
lowest_ev_cost = run_avg_lve
checkpoint_path = os.path.join(self.hps.lfads_save_dir,
self.hps.checkpoint_name + '_lve.ckpt')
self.lve_saver.save(session, checkpoint_path,
global_step=self.train_step,
latest_filename='checkpoint_lve')
# Plot and summarize.
values = {'nepochs':i, 'has_any_valid_set': has_any_valid_set,
'tr_total_cost':tr_total_cost, 'ev_total_cost':ev_total_cost,
'tr_recon_cost':tr_recon_cost, 'ev_recon_cost':ev_recon_cost,
'tr_kl_cost':tr_kl_cost, 'ev_kl_cost':ev_kl_cost,
'l2_weight':l2_weight, 'kl_weight':kl_weight,
'l2_cost':l2_cost}
self.summarize_all(datasets, values)
self.plot_single_example(datasets)
# Manage learning rate.
train_res = tr_total_cost
n_lr = hps.learning_rate_n_to_compare
if len(train_costs) > n_lr and train_res > np.max(train_costs[-n_lr:]):
_ = session.run(self.learning_rate_decay_op)
lr = session.run(self.learning_rate)
print(" Decreasing learning rate to %f." % lr)
# Force the system to run n_lr times while at this lr.
train_costs.append(np.inf)
else:
train_costs.append(train_res)
if lr < lr_stop:
print("Stopping optimization based on learning rate criteria.")
break
def eval_cost_epoch(self, datasets, kind='train', ext_input_extxi=None,
batch_size=None):
"""Evaluate the cost of the epoch.
Args:
data_dict: The dictionary of data (training and validation) used for
training and evaluation of the model, respectively.
Returns:
a 3 tuple of costs:
(epoch total cost, epoch reconstruction cost, epoch KL cost)
"""
ops_to_eval = [self.cost, self.recon_cost, self.kl_cost]
collected_op_values = self.run_epoch(datasets, ops_to_eval, kind=kind,
keep_prob=1.0)
total_cost = total_recon_cost = total_kl_cost = 0.0
# normalizing by batch done in distributions.py
epoch_size = len(collected_op_values)
for op_values in collected_op_values:
total_cost += op_values[0]
total_recon_cost += op_values[1]
total_kl_cost += op_values[2]
epoch_total_cost = total_cost / epoch_size
epoch_recon_cost = total_recon_cost / epoch_size
epoch_kl_cost = total_kl_cost / epoch_size
return epoch_total_cost, epoch_recon_cost, epoch_kl_cost
def eval_model_runs_batch(self, data_name, data_bxtxd, ext_input_bxtxi=None,
do_eval_cost=False, do_average_batch=False):
"""Returns all the goodies for the entire model, per batch.
If data_bxtxd and ext_input_bxtxi can have fewer than batch_size along dim 1
in which case this handles the padding and truncating automatically
Args:
data_name: The name of the data dict, to select which in/out matrices
to use.
data_bxtxd: Numpy array training data with shape:
batch_size x # time steps x # dimensions
ext_input_bxtxi: Numpy array training external input with shape:
batch_size x # time steps x # external input dims
do_eval_cost (optional): If true, the IWAE (Importance Weighted
Autoencoder) log likeihood bound, instead of the VAE version.
do_average_batch (optional): average over the batch, useful for getting
good IWAE costs, and model outputs for a single data point.
Returns:
A dictionary with the outputs of the model decoder, namely:
prior g0 mean, prior g0 variance, approx. posterior mean, approx
posterior mean, the generator initial conditions, the control inputs (if
enabled), the state of the generator, the factors, and the rates.
"""
session = tf.get_default_session()
# if fewer than batch_size provided, pad to batch_size
hps = self.hps
batch_size = hps.batch_size
E, _, _ = data_bxtxd.shape
if E < hps.batch_size:
data_bxtxd = np.pad(data_bxtxd, ((0, hps.batch_size-E), (0, 0), (0, 0)),
mode='constant', constant_values=0)
if ext_input_bxtxi is not None:
ext_input_bxtxi = np.pad(ext_input_bxtxi,
((0, hps.batch_size-E), (0, 0), (0, 0)),
mode='constant', constant_values=0)
feed_dict = self.build_feed_dict(data_name, data_bxtxd,
ext_input_bxtxi, keep_prob=1.0)
# Non-temporal signals will be batch x dim.
# Temporal signals are list length T with elements batch x dim.
tf_vals = [self.gen_ics, self.gen_states, self.factors,
self.output_dist_params]
tf_vals.append(self.cost)
tf_vals.append(self.nll_bound_vae)
tf_vals.append(self.nll_bound_iwae)
tf_vals.append(self.train_step) # not train_op!
if self.hps.ic_dim > 0:
tf_vals += [self.prior_zs_g0.mean, self.prior_zs_g0.logvar,
self.posterior_zs_g0.mean, self.posterior_zs_g0.logvar]
if self.hps.co_dim > 0:
tf_vals.append(self.controller_outputs)
tf_vals_flat, fidxs = flatten(tf_vals)
np_vals_flat = session.run(tf_vals_flat, feed_dict=feed_dict)
ff = 0
gen_ics = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
gen_states = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
factors = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
out_dist_params = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
costs = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
nll_bound_vaes = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
nll_bound_iwaes = [np_vals_flat[f] for f in fidxs[ff]]; ff +=1
train_steps = [np_vals_flat[f] for f in fidxs[ff]]; ff +=1
if self.hps.ic_dim > 0:
prior_g0_mean = [np_vals_flat[f] for f in fidxs[ff]]; ff +=1
prior_g0_logvar = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
post_g0_mean = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
post_g0_logvar = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
if self.hps.co_dim > 0:
controller_outputs = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
# [0] are to take out the non-temporal items from lists
gen_ics = gen_ics[0]
costs = costs[0]
nll_bound_vaes = nll_bound_vaes[0]
nll_bound_iwaes = nll_bound_iwaes[0]
train_steps = train_steps[0]
# Convert to full tensors, not lists of tensors in time dim.
gen_states = list_t_bxn_to_tensor_bxtxn(gen_states)
factors = list_t_bxn_to_tensor_bxtxn(factors)
out_dist_params = list_t_bxn_to_tensor_bxtxn(out_dist_params)
if self.hps.ic_dim > 0:
# select first time point
prior_g0_mean = prior_g0_mean[0]
prior_g0_logvar = prior_g0_logvar[0]
post_g0_mean = post_g0_mean[0]
post_g0_logvar = post_g0_logvar[0]
if self.hps.co_dim > 0:
controller_outputs = list_t_bxn_to_tensor_bxtxn(controller_outputs)
# slice out the trials in case < batch_size provided
if E < hps.batch_size:
idx = np.arange(E)
gen_ics = gen_ics[idx, :]
gen_states = gen_states[idx, :]
factors = factors[idx, :, :]
out_dist_params = out_dist_params[idx, :, :]
if self.hps.ic_dim > 0:
prior_g0_mean = prior_g0_mean[idx, :]
prior_g0_logvar = prior_g0_logvar[idx, :]
post_g0_mean = post_g0_mean[idx, :]
post_g0_logvar = post_g0_logvar[idx, :]
if self.hps.co_dim > 0:
controller_outputs = controller_outputs[idx, :, :]
if do_average_batch:
gen_ics = np.mean(gen_ics, axis=0)
gen_states = np.mean(gen_states, axis=0)
factors = np.mean(factors, axis=0)
out_dist_params = np.mean(out_dist_params, axis=0)
if self.hps.ic_dim > 0:
prior_g0_mean = np.mean(prior_g0_mean, axis=0)
prior_g0_logvar = np.mean(prior_g0_logvar, axis=0)
post_g0_mean = np.mean(post_g0_mean, axis=0)
post_g0_logvar = np.mean(post_g0_logvar, axis=0)
if self.hps.co_dim > 0:
controller_outputs = np.mean(controller_outputs, axis=0)
model_vals = {}
model_vals['gen_ics'] = gen_ics
model_vals['gen_states'] = gen_states
model_vals['factors'] = factors
model_vals['output_dist_params'] = out_dist_params
model_vals['costs'] = costs
model_vals['nll_bound_vaes'] = nll_bound_vaes
model_vals['nll_bound_iwaes'] = nll_bound_iwaes
model_vals['train_steps'] = train_steps
if self.hps.ic_dim > 0:
model_vals['prior_g0_mean'] = prior_g0_mean
model_vals['prior_g0_logvar'] = prior_g0_logvar
model_vals['post_g0_mean'] = post_g0_mean
model_vals['post_g0_logvar'] = post_g0_logvar
if self.hps.co_dim > 0:
model_vals['controller_outputs'] = controller_outputs
return model_vals
def eval_model_runs_avg_epoch(self, data_name, data_extxd,
ext_input_extxi=None):
"""Returns all the expected value for goodies for the entire model.
The expected value is taken over hidden (z) variables, namely the initial
conditions and the control inputs. The expected value is approximate, and
accomplished via sampling (batch_size) samples for every examples.
Args:
data_name: The name of the data dict, to select which in/out matrices
to use.
data_extxd: Numpy array training data with shape:
# examples x # time steps x # dimensions
ext_input_extxi (optional): Numpy array training external input with
shape: # examples x # time steps x # external input dims
Returns:
A dictionary with the averaged outputs of the model decoder, namely:
prior g0 mean, prior g0 variance, approx. posterior mean, approx
posterior mean, the generator initial conditions, the control inputs (if
enabled), the state of the generator, the factors, and the output
distribution parameters, e.g. (rates or mean and variances).
"""
hps = self.hps
batch_size = hps.batch_size
E, T, D = data_extxd.shape
E_to_process = hps.ps_nexamples_to_process
if E_to_process > E:
E_to_process = E
if hps.ic_dim > 0:
prior_g0_mean = np.zeros([E_to_process, hps.ic_dim])
prior_g0_logvar = np.zeros([E_to_process, hps.ic_dim])
post_g0_mean = np.zeros([E_to_process, hps.ic_dim])
post_g0_logvar = np.zeros([E_to_process, hps.ic_dim])
if hps.co_dim > 0:
controller_outputs = np.zeros([E_to_process, T, hps.co_dim])
gen_ics = np.zeros([E_to_process, hps.gen_dim])
gen_states = np.zeros([E_to_process, T, hps.gen_dim])
factors = np.zeros([E_to_process, T, hps.factors_dim])
if hps.output_dist == 'poisson':
out_dist_params = np.zeros([E_to_process, T, D])
elif hps.output_dist == 'gaussian':
out_dist_params = np.zeros([E_to_process, T, D+D])
else:
assert False, "NIY"
costs = np.zeros(E_to_process)
nll_bound_vaes = np.zeros(E_to_process)
nll_bound_iwaes = np.zeros(E_to_process)
train_steps = np.zeros(E_to_process)
for es_idx in range(E_to_process):
print("Running %d of %d." % (es_idx+1, E_to_process))
example_idxs = es_idx * np.ones(batch_size, dtype=np.int32)
data_bxtxd, ext_input_bxtxi = self.get_batch(data_extxd,
ext_input_extxi,
batch_size=batch_size,
example_idxs=example_idxs)
model_values = self.eval_model_runs_batch(data_name, data_bxtxd,
ext_input_bxtxi,
do_eval_cost=True,
do_average_batch=True)
if self.hps.ic_dim > 0:
prior_g0_mean[es_idx,:] = model_values['prior_g0_mean']
prior_g0_logvar[es_idx,:] = model_values['prior_g0_logvar']
post_g0_mean[es_idx,:] = model_values['post_g0_mean']
post_g0_logvar[es_idx,:] = model_values['post_g0_logvar']
gen_ics[es_idx,:] = model_values['gen_ics']
if self.hps.co_dim > 0:
controller_outputs[es_idx,:,:] = model_values['controller_outputs']
gen_states[es_idx,:,:] = model_values['gen_states']
factors[es_idx,:,:] = model_values['factors']
out_dist_params[es_idx,:,:] = model_values['output_dist_params']
costs[es_idx] = model_values['costs']
nll_bound_vaes[es_idx] = model_values['nll_bound_vaes']
nll_bound_iwaes[es_idx] = model_values['nll_bound_iwaes']
train_steps[es_idx] = model_values['train_steps']
print('bound nll(vae): %.3f, bound nll(iwae): %.3f' \
% (nll_bound_vaes[es_idx], nll_bound_iwaes[es_idx]))
model_runs = {}
if self.hps.ic_dim > 0:
model_runs['prior_g0_mean'] = prior_g0_mean
model_runs['prior_g0_logvar'] = prior_g0_logvar
model_runs['post_g0_mean'] = post_g0_mean
model_runs['post_g0_logvar'] = post_g0_logvar
model_runs['gen_ics'] = gen_ics
if self.hps.co_dim > 0:
model_runs['controller_outputs'] = controller_outputs
model_runs['gen_states'] = gen_states
model_runs['factors'] = factors
model_runs['output_dist_params'] = out_dist_params
model_runs['costs'] = costs
model_runs['nll_bound_vaes'] = nll_bound_vaes
model_runs['nll_bound_iwaes'] = nll_bound_iwaes
model_runs['train_steps'] = train_steps
return model_runs
def eval_model_runs_push_mean(self, data_name, data_extxd,
ext_input_extxi=None):
"""Returns values of interest for the model by pushing the means through
The mean values for both initial conditions and the control inputs are
pushed through the model instead of sampling (as is done in
eval_model_runs_avg_epoch).
This is a quick and approximate version of estimating these values instead
of sampling from the posterior many times and then averaging those values of
interest.
Internally, a total of batch_size trials are run through the model at once.
Args:
data_name: The name of the data dict, to select which in/out matrices
to use.
data_extxd: Numpy array training data with shape:
# examples x # time steps x # dimensions
ext_input_extxi (optional): Numpy array training external input with
shape: # examples x # time steps x # external input dims
Returns:
A dictionary with the estimated outputs of the model decoder, namely:
prior g0 mean, prior g0 variance, approx. posterior mean, approx
posterior mean, the generator initial conditions, the control inputs (if
enabled), the state of the generator, the factors, and the output
distribution parameters, e.g. (rates or mean and variances).
"""
hps = self.hps
batch_size = hps.batch_size
E, T, D = data_extxd.shape
E_to_process = hps.ps_nexamples_to_process
if E_to_process > E:
print("Setting number of posterior samples to process to : ", E)
E_to_process = E
if hps.ic_dim > 0:
prior_g0_mean = np.zeros([E_to_process, hps.ic_dim])
prior_g0_logvar = np.zeros([E_to_process, hps.ic_dim])
post_g0_mean = np.zeros([E_to_process, hps.ic_dim])
post_g0_logvar = np.zeros([E_to_process, hps.ic_dim])
if hps.co_dim > 0:
controller_outputs = np.zeros([E_to_process, T, hps.co_dim])
gen_ics = np.zeros([E_to_process, hps.gen_dim])
gen_states = np.zeros([E_to_process, T, hps.gen_dim])
factors = np.zeros([E_to_process, T, hps.factors_dim])
if hps.output_dist == 'poisson':
out_dist_params = np.zeros([E_to_process, T, D])
elif hps.output_dist == 'gaussian':
out_dist_params = np.zeros([E_to_process, T, D+D])
else:
assert False, "NIY"
costs = np.zeros(E_to_process)
nll_bound_vaes = np.zeros(E_to_process)
nll_bound_iwaes = np.zeros(E_to_process)
train_steps = np.zeros(E_to_process)
# generator that will yield 0:N in groups of per items, e.g.
# (0:per-1), (per:2*per-1), ..., with the last group containing <= per items
# this will be used to feed per=batch_size trials into the model at a time
def trial_batches(N, per):
for i in range(0, N, per):
yield np.arange(i, min(i+per, N), dtype=np.int32)
for batch_idx, es_idx in enumerate(trial_batches(E_to_process,
hps.batch_size)):
print("Running trial batch %d with %d trials" % (batch_idx+1,
len(es_idx)))
data_bxtxd, ext_input_bxtxi = self.get_batch(data_extxd,
ext_input_extxi,
batch_size=batch_size,
example_idxs=es_idx)
model_values = self.eval_model_runs_batch(data_name, data_bxtxd,
ext_input_bxtxi,
do_eval_cost=True,
do_average_batch=False)
if self.hps.ic_dim > 0:
prior_g0_mean[es_idx,:] = model_values['prior_g0_mean']
prior_g0_logvar[es_idx,:] = model_values['prior_g0_logvar']
post_g0_mean[es_idx,:] = model_values['post_g0_mean']
post_g0_logvar[es_idx,:] = model_values['post_g0_logvar']
gen_ics[es_idx,:] = model_values['gen_ics']
if self.hps.co_dim > 0:
controller_outputs[es_idx,:,:] = model_values['controller_outputs']
gen_states[es_idx,:,:] = model_values['gen_states']
factors[es_idx,:,:] = model_values['factors']
out_dist_params[es_idx,:,:] = model_values['output_dist_params']
# TODO
# model_values['costs'] and other costs come out as scalars, summed over
# all the trials in the batch. what we want is the per-trial costs
costs[es_idx] = model_values['costs']
nll_bound_vaes[es_idx] = model_values['nll_bound_vaes']
nll_bound_iwaes[es_idx] = model_values['nll_bound_iwaes']
train_steps[es_idx] = model_values['train_steps']
model_runs = {}
if self.hps.ic_dim > 0:
model_runs['prior_g0_mean'] = prior_g0_mean
model_runs['prior_g0_logvar'] = prior_g0_logvar
model_runs['post_g0_mean'] = post_g0_mean
model_runs['post_g0_logvar'] = post_g0_logvar
model_runs['gen_ics'] = gen_ics
if self.hps.co_dim > 0:
model_runs['controller_outputs'] = controller_outputs
model_runs['gen_states'] = gen_states
model_runs['factors'] = factors
model_runs['output_dist_params'] = out_dist_params
# You probably do not want the LL associated values when pushing the mean
# instead of sampling.
model_runs['costs'] = costs
model_runs['nll_bound_vaes'] = nll_bound_vaes
model_runs['nll_bound_iwaes'] = nll_bound_iwaes
model_runs['train_steps'] = train_steps
return model_runs
def write_model_runs(self, datasets, output_fname=None, push_mean=False):
"""Run the model on the data in data_dict, and save the computed values.
LFADS generates a number of outputs for each examples, and these are all
saved. They are:
The mean and variance of the prior of g0.
The mean and variance of approximate posterior of g0.
The control inputs (if enabled)
The initial conditions, g0, for all examples.
The generator states for all time.
The factors for all time.
The output distribution parameters (e.g. rates) for all time.
Args:
datasets: a dictionary of named data_dictionaries, see top of lfads.py
output_fname: a file name stem for the output files.
push_mean: if False (default), generates batch_size samples for each trial
and averages the results. if True, runs each trial once without noise,
pushing the posterior mean initial conditions and control inputs through
the trained model. False is used for posterior_sample_and_average, True
is used for posterior_push_mean.
"""
hps = self.hps
kind = hps.kind
for data_name, data_dict in datasets.items():
data_tuple = [('train', data_dict['train_data'],
data_dict['train_ext_input']),
('valid', data_dict['valid_data'],
data_dict['valid_ext_input'])]
for data_kind, data_extxd, ext_input_extxi in data_tuple:
if not output_fname:
fname = "model_runs_" + data_name + '_' + data_kind + '_' + kind
else:
fname = output_fname + data_name + '_' + data_kind + '_' + kind
print("Writing data for %s data and kind %s." % (data_name, data_kind))
if push_mean:
model_runs = self.eval_model_runs_push_mean(data_name, data_extxd,
ext_input_extxi)
else:
model_runs = self.eval_model_runs_avg_epoch(data_name, data_extxd,
ext_input_extxi)
full_fname = os.path.join(hps.lfads_save_dir, fname)
write_data(full_fname, model_runs, compression='gzip')
print("Done.")
def write_model_samples(self, dataset_name, output_fname=None):
"""Use the prior distribution to generate batch_size number of samples
from the model.
LFADS generates a number of outputs for each sample, and these are all
saved. They are:
The mean and variance of the prior of g0.
The control inputs (if enabled)
The initial conditions, g0, for all examples.
The generator states for all time.
The factors for all time.
The output distribution parameters (e.g. rates) for all time.
Args:
dataset_name: The name of the dataset to grab the factors -> rates
alignment matrices from.
output_fname: The name of the file in which to save the generated
samples.
"""
hps = self.hps
batch_size = hps.batch_size
print("Generating %d samples" % (batch_size))
tf_vals = [self.factors, self.gen_states, self.gen_ics,
self.cost, self.output_dist_params]
if hps.ic_dim > 0:
tf_vals += [self.prior_zs_g0.mean, self.prior_zs_g0.logvar]
if hps.co_dim > 0:
tf_vals += [self.prior_zs_ar_con.samples_t]
tf_vals_flat, fidxs = flatten(tf_vals)
session = tf.get_default_session()
feed_dict = {}
feed_dict[self.dataName] = dataset_name
feed_dict[self.keep_prob] = 1.0
np_vals_flat = session.run(tf_vals_flat, feed_dict=feed_dict)
ff = 0
factors = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
gen_states = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
gen_ics = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
costs = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
output_dist_params = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
if hps.ic_dim > 0:
prior_g0_mean = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
prior_g0_logvar = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
if hps.co_dim > 0:
prior_zs_ar_con = [np_vals_flat[f] for f in fidxs[ff]]; ff += 1
# [0] are to take out the non-temporal items from lists
gen_ics = gen_ics[0]
costs = costs[0]
# Convert to full tensors, not lists of tensors in time dim.
gen_states = list_t_bxn_to_tensor_bxtxn(gen_states)
factors = list_t_bxn_to_tensor_bxtxn(factors)
output_dist_params = list_t_bxn_to_tensor_bxtxn(output_dist_params)
if hps.ic_dim > 0:
prior_g0_mean = prior_g0_mean[0]
prior_g0_logvar = prior_g0_logvar[0]
if hps.co_dim > 0:
prior_zs_ar_con = list_t_bxn_to_tensor_bxtxn(prior_zs_ar_con)
model_vals = {}
model_vals['gen_ics'] = gen_ics
model_vals['gen_states'] = gen_states
model_vals['factors'] = factors
model_vals['output_dist_params'] = output_dist_params
model_vals['costs'] = costs.reshape(1)
if hps.ic_dim > 0:
model_vals['prior_g0_mean'] = prior_g0_mean
model_vals['prior_g0_logvar'] = prior_g0_logvar
if hps.co_dim > 0:
model_vals['prior_zs_ar_con'] = prior_zs_ar_con
full_fname = os.path.join(hps.lfads_save_dir, output_fname)
write_data(full_fname, model_vals, compression='gzip')
print("Done.")
@staticmethod
def eval_model_parameters(use_nested=True, include_strs=None):
"""Evaluate and return all of the TF variables in the model.
Args:
use_nested (optional): For returning values, use a nested dictoinary, based
on variable scoping, or return all variables in a flat dictionary.
include_strs (optional): A list of strings to use as a filter, to reduce the
number of variables returned. A variable name must contain at least one
string in include_strs as a sub-string in order to be returned.
Returns:
The parameters of the model. This can be in a flat
dictionary, or a nested dictionary, where the nesting is by variable
scope.
"""
all_tf_vars = tf.global_variables()
session = tf.get_default_session()
all_tf_vars_eval = session.run(all_tf_vars)
vars_dict = {}
strs = ["LFADS"]
if include_strs:
strs += include_strs
for i, (var, var_eval) in enumerate(zip(all_tf_vars, all_tf_vars_eval)):
if any(s in include_strs for s in var.name):
if not isinstance(var_eval, np.ndarray): # for H5PY
print(var.name, """ is not numpy array, saving as numpy array
with value: """, var_eval, type(var_eval))
e = np.array(var_eval)
print(e, type(e))
else:
e = var_eval
vars_dict[var.name] = e
if not use_nested:
return vars_dict
var_names = vars_dict.keys()
nested_vars_dict = {}
current_dict = nested_vars_dict
for v, var_name in enumerate(var_names):
var_split_name_list = var_name.split('/')
split_name_list_len = len(var_split_name_list)
current_dict = nested_vars_dict
for p, part in enumerate(var_split_name_list):
if p < split_name_list_len - 1:
if part in current_dict:
current_dict = current_dict[part]
else:
current_dict[part] = {}
current_dict = current_dict[part]
else:
current_dict[part] = vars_dict[var_name]
return nested_vars_dict
@staticmethod
def spikify_rates(rates_bxtxd):
"""Randomly spikify underlying rates according a Poisson distribution
Args:
rates_bxtxd: a numpy tensor with shape:
Returns:
A numpy array with the same shape as rates_bxtxd, but with the event
counts.
"""
B,T,N = rates_bxtxd.shape
assert all([B > 0, N > 0]), "problems"
# Because the rates are changing, there is nesting
spikes_bxtxd = np.zeros([B,T,N], dtype=np.int32)
for b in range(B):
for t in range(T):
for n in range(N):
rate = rates_bxtxd[b,t,n]
count = np.random.poisson(rate)
spikes_bxtxd[b,t,n] = count
return spikes_bxtxd
|
andela-ooladayo/django
|
refs/heads/master
|
django/db/backends/base/validation.py
|
393
|
class BaseDatabaseValidation(object):
"""
This class encapsulates all backend-specific model validation.
"""
def __init__(self, connection):
self.connection = connection
def check_field(self, field, **kwargs):
return []
|
briancline/pyruse
|
refs/heads/master
|
setup.py
|
1
|
from setuptools import setup
from pyruse.version import version_str
setup(
name='pyruse',
version=version_str,
author='Brian Cline',
author_email='[email protected]',
description=('Peruse metadata about Python releases (namely version '
'information).'),
long_description=open('README.rst').read(),
license='MIT',
keywords='python release metadata version',
url='https://github.com/briancline/pyruse',
packages=['pyruse'],
install_requires=open('requirements.txt').readlines(),
test_suite='nose.collector',
entry_points={
'console_scripts': [
'pyruse-versions=pyruse.commands:versions_main'
]
},
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Natural Language :: English',
'Environment :: Console',
'Operating System :: POSIX :: Linux',
'Operating System :: POSIX :: AIX',
'Operating System :: POSIX :: HP-UX',
'Operating System :: POSIX :: IRIX',
'Operating System :: POSIX :: SunOS/Solaris',
'Operating System :: POSIX :: BSD :: BSD/OS',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: BSD :: NetBSD',
'Operating System :: POSIX :: BSD :: OpenBSD',
'Operating System :: Unix',
'Programming Language :: Python',
],
)
|
dbckz/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/amazon/ec2_elb_lb.py
|
28
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'curated'}
DOCUMENTATION = """
---
module: ec2_elb_lb
description:
- Returns information about the load balancer.
- Will be marked changed when called only if state is changed.
short_description: Creates or destroys Amazon ELB.
version_added: "1.5"
author:
- "Jim Dalton (@jsdalton)"
options:
state:
description:
- Create or destroy the ELB
choices: ["present", "absent"]
required: true
name:
description:
- The name of the ELB
required: true
listeners:
description:
- List of ports/protocols for this ELB to listen on (see example)
required: false
purge_listeners:
description:
- Purge existing listeners on ELB that are not found in listeners
required: false
default: true
instance_ids:
description:
- List of instance ids to attach to this ELB
required: false
default: false
version_added: "2.1"
purge_instance_ids:
description:
- Purge existing instance ids on ELB that are not found in instance_ids
required: false
default: false
version_added: "2.1"
zones:
description:
- List of availability zones to enable on this ELB
required: false
purge_zones:
description:
- Purge existing availability zones on ELB that are not found in zones
required: false
default: false
security_group_ids:
description:
- A list of security groups to apply to the elb
required: false
default: None
version_added: "1.6"
security_group_names:
description:
- A list of security group names to apply to the elb
required: false
default: None
version_added: "2.0"
health_check:
description:
- An associative array of health check configuration settings (see example)
required: false
default: None
access_logs:
description:
- An associative array of access logs configuration settings (see example)
required: false
default: None
version_added: "2.0"
subnets:
description:
- A list of VPC subnets to use when creating ELB. Zones should be empty if using this.
required: false
default: None
aliases: []
version_added: "1.7"
purge_subnets:
description:
- Purge existing subnet on ELB that are not found in subnets
required: false
default: false
version_added: "1.7"
scheme:
description:
- The scheme to use when creating the ELB. For a private VPC-visible ELB use 'internal'.
required: false
default: 'internet-facing'
version_added: "1.7"
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
aliases: []
version_added: "1.5"
connection_draining_timeout:
description:
- Wait a specified timeout allowing connections to drain before terminating an instance
required: false
aliases: []
version_added: "1.8"
idle_timeout:
description:
- ELB connections from clients and to servers are timed out after this amount of time
required: false
version_added: "2.0"
cross_az_load_balancing:
description:
- Distribute load across all configured Availability Zones
required: false
default: "no"
choices: ["yes", "no"]
aliases: []
version_added: "1.8"
stickiness:
description:
- An associative array of stickiness policy settings. Policy will be applied to all listeners ( see example )
required: false
version_added: "2.0"
wait:
description:
- When specified, Ansible will check the status of the load balancer to ensure it has been successfully
removed from AWS.
required: false
default: no
choices: ["yes", "no"]
version_added: "2.1"
wait_timeout:
description:
- Used in conjunction with wait. Number of seconds to wait for the elb to be terminated.
A maximum of 600 seconds (10 minutes) is allowed.
required: false
default: 60
version_added: "2.1"
tags:
description:
- An associative array of tags. To delete all tags, supply an empty dict.
required: false
version_added: "2.1"
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = """
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Basic provisioning example (non-VPC)
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http # options are http, https, ssl, tcp
load_balancer_port: 80
instance_port: 80
proxy_protocol: True
- protocol: https
load_balancer_port: 443
instance_protocol: http # optional, defaults to value of protocol setting
instance_port: 80
# ssl certificate required for https or ssl
ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert"
# Internal ELB example
- local_action:
module: ec2_elb_lb
name: "test-vpc"
scheme: internal
state: present
instance_ids:
- i-abcd1234
purge_instance_ids: true
subnets:
- subnet-abcd1234
- subnet-1a2b3c4d
listeners:
- protocol: http # options are http, https, ssl, tcp
load_balancer_port: 80
instance_port: 80
# Configure a health check and the access logs
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
health_check:
ping_protocol: http # options are http, https, ssl, tcp
ping_port: 80
ping_path: "/index.html" # not required for tcp or ssl
response_timeout: 5 # seconds
interval: 30 # seconds
unhealthy_threshold: 2
healthy_threshold: 10
access_logs:
interval: 5 # minutes (defaults to 60)
s3_location: "my-bucket" # This value is required if access_logs is set
s3_prefix: "logs"
# Ensure ELB is gone
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: absent
# Ensure ELB is gone and wait for check (for default timeout)
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: absent
wait: yes
# Ensure ELB is gone and wait for check with timeout value
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: absent
wait: yes
wait_timeout: 600
# Normally, this module will purge any listeners that exist on the ELB
# but aren't specified in the listeners parameter. If purge_listeners is
# false it leaves them alone
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
purge_listeners: no
# Normally, this module will leave availability zones that are enabled
# on the ELB alone. If purge_zones is true, then any extraneous zones
# will be removed
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
purge_zones: yes
# Creates a ELB and assigns a list of subnets to it.
- local_action:
module: ec2_elb_lb
state: present
name: 'New ELB'
security_group_ids: 'sg-123456, sg-67890'
region: us-west-2
subnets: 'subnet-123456,subnet-67890'
purge_subnets: yes
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
# Create an ELB with connection draining, increased idle timeout and cross availability
# zone load balancing
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
connection_draining_timeout: 60
idle_timeout: 300
cross_az_load_balancing: "yes"
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
# Create an ELB with load balancer stickiness enabled
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
stickiness:
type: loadbalancer
enabled: yes
expiration: 300
# Create an ELB with application stickiness enabled
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
stickiness:
type: application
enabled: yes
cookie: SESSIONID
# Create an ELB and add tags
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
tags:
Name: "New ELB"
stack: "production"
client: "Bob"
# Delete all tags from an ELB
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
tags: {}
"""
try:
import boto
import boto.ec2.elb
import boto.ec2.elb.attributes
import boto.vpc
from boto.ec2.elb.healthcheck import HealthCheck
from boto.ec2.tag import Tag
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
import time
import random
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import ec2_argument_spec, connect_to_aws, AnsibleAWSError
from ansible.module_utils.ec2 import get_aws_connection_info
def _throttleable_operation(max_retries):
def _operation_wrapper(op):
def _do_op(*args, **kwargs):
retry = 0
while True:
try:
return op(*args, **kwargs)
except boto.exception.BotoServerError as e:
if retry < max_retries and e.code in \
("Throttling", "RequestLimitExceeded"):
retry = retry + 1
time.sleep(min(random.random() * (2 ** retry), 300))
continue
else:
raise
return _do_op
return _operation_wrapper
def _get_vpc_connection(module, region, aws_connect_params):
try:
return connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
_THROTTLING_RETRIES = 5
class ElbManager(object):
"""Handles ELB creation and destruction"""
def __init__(self, module, name, listeners=None, purge_listeners=None,
zones=None, purge_zones=None, security_group_ids=None,
health_check=None, subnets=None, purge_subnets=None,
scheme="internet-facing", connection_draining_timeout=None,
idle_timeout=None,
cross_az_load_balancing=None, access_logs=None,
stickiness=None, wait=None, wait_timeout=None, tags=None,
region=None,
instance_ids=None, purge_instance_ids=None, **aws_connect_params):
self.module = module
self.name = name
self.listeners = listeners
self.purge_listeners = purge_listeners
self.instance_ids = instance_ids
self.purge_instance_ids = purge_instance_ids
self.zones = zones
self.purge_zones = purge_zones
self.security_group_ids = security_group_ids
self.health_check = health_check
self.subnets = subnets
self.purge_subnets = purge_subnets
self.scheme = scheme
self.connection_draining_timeout = connection_draining_timeout
self.idle_timeout = idle_timeout
self.cross_az_load_balancing = cross_az_load_balancing
self.access_logs = access_logs
self.stickiness = stickiness
self.wait = wait
self.wait_timeout = wait_timeout
self.tags = tags
self.aws_connect_params = aws_connect_params
self.region = region
self.changed = False
self.status = 'gone'
self.elb_conn = self._get_elb_connection()
self.elb = self._get_elb()
self.ec2_conn = self._get_ec2_connection()
@_throttleable_operation(_THROTTLING_RETRIES)
def ensure_ok(self):
"""Create the ELB"""
if not self.elb:
# Zones and listeners will be added at creation
self._create_elb()
else:
self._set_zones()
self._set_security_groups()
self._set_elb_listeners()
self._set_subnets()
self._set_health_check()
# boto has introduced support for some ELB attributes in
# different versions, so we check first before trying to
# set them to avoid errors
if self._check_attribute_support('connection_draining'):
self._set_connection_draining_timeout()
if self._check_attribute_support('connecting_settings'):
self._set_idle_timeout()
if self._check_attribute_support('cross_zone_load_balancing'):
self._set_cross_az_load_balancing()
if self._check_attribute_support('access_log'):
self._set_access_log()
# add sitcky options
self.select_stickiness_policy()
# ensure backend server policies are correct
self._set_backend_policies()
# set/remove instance ids
self._set_instance_ids()
self._set_tags()
def ensure_gone(self):
"""Destroy the ELB"""
if self.elb:
self._delete_elb()
if self.wait:
elb_removed = self._wait_for_elb_removed()
# Unfortunately even though the ELB itself is removed quickly
# the interfaces take longer so reliant security groups cannot
# be deleted until the interface has registered as removed.
elb_interface_removed = self._wait_for_elb_interface_removed()
if not (elb_removed and elb_interface_removed):
self.module.fail_json(msg='Timed out waiting for removal of load balancer.')
def get_info(self):
try:
check_elb = self.elb_conn.get_all_load_balancers(self.name)[0]
except:
check_elb = None
if not check_elb:
info = {
'name': self.name,
'status': self.status,
'region': self.region
}
else:
try:
lb_cookie_policy = check_elb.policies.lb_cookie_stickiness_policies[0].__dict__['policy_name']
except:
lb_cookie_policy = None
try:
app_cookie_policy = check_elb.policies.app_cookie_stickiness_policies[0].__dict__['policy_name']
except:
app_cookie_policy = None
info = {
'name': check_elb.name,
'dns_name': check_elb.dns_name,
'zones': check_elb.availability_zones,
'security_group_ids': check_elb.security_groups,
'status': self.status,
'subnets': self.subnets,
'scheme': check_elb.scheme,
'hosted_zone_name': check_elb.canonical_hosted_zone_name,
'hosted_zone_id': check_elb.canonical_hosted_zone_name_id,
'lb_cookie_policy': lb_cookie_policy,
'app_cookie_policy': app_cookie_policy,
'proxy_policy': self._get_proxy_protocol_policy(),
'backends': self._get_backend_policies(),
'instances': [instance.id for instance in check_elb.instances],
'out_of_service_count': 0,
'in_service_count': 0,
'unknown_instance_state_count': 0,
'region': self.region
}
# status of instances behind the ELB
if info['instances']:
info['instance_health'] = [ dict(
instance_id = instance_state.instance_id,
reason_code = instance_state.reason_code,
state = instance_state.state
) for instance_state in self.elb_conn.describe_instance_health(self.name)]
else:
info['instance_health'] = []
# instance state counts: InService or OutOfService
if info['instance_health']:
for instance_state in info['instance_health']:
if instance_state['state'] == "InService":
info['in_service_count'] += 1
elif instance_state['state'] == "OutOfService":
info['out_of_service_count'] += 1
else:
info['unknown_instance_state_count'] += 1
if check_elb.health_check:
info['health_check'] = {
'target': check_elb.health_check.target,
'interval': check_elb.health_check.interval,
'timeout': check_elb.health_check.timeout,
'healthy_threshold': check_elb.health_check.healthy_threshold,
'unhealthy_threshold': check_elb.health_check.unhealthy_threshold,
}
if check_elb.listeners:
info['listeners'] = [self._api_listener_as_tuple(l)
for l in check_elb.listeners]
elif self.status == 'created':
# When creating a new ELB, listeners don't show in the
# immediately returned result, so just include the
# ones that were added
info['listeners'] = [self._listener_as_tuple(l)
for l in self.listeners]
else:
info['listeners'] = []
if self._check_attribute_support('connection_draining'):
info['connection_draining_timeout'] = int(self.elb_conn.get_lb_attribute(self.name, 'ConnectionDraining').timeout)
if self._check_attribute_support('connecting_settings'):
info['idle_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectingSettings').idle_timeout
if self._check_attribute_support('cross_zone_load_balancing'):
is_cross_az_lb_enabled = self.elb_conn.get_lb_attribute(self.name, 'CrossZoneLoadBalancing')
if is_cross_az_lb_enabled:
info['cross_az_load_balancing'] = 'yes'
else:
info['cross_az_load_balancing'] = 'no'
# return stickiness info?
info['tags'] = self.tags
return info
@_throttleable_operation(_THROTTLING_RETRIES)
def _wait_for_elb_removed(self):
polling_increment_secs = 15
max_retries = (self.wait_timeout / polling_increment_secs)
status_achieved = False
for x in range(0, max_retries):
try:
self.elb_conn.get_all_lb_attributes(self.name)
except (boto.exception.BotoServerError, StandardError) as e:
if "LoadBalancerNotFound" in e.code:
status_achieved = True
break
else:
time.sleep(polling_increment_secs)
return status_achieved
@_throttleable_operation(_THROTTLING_RETRIES)
def _wait_for_elb_interface_removed(self):
polling_increment_secs = 15
max_retries = (self.wait_timeout / polling_increment_secs)
status_achieved = False
elb_interfaces = self.ec2_conn.get_all_network_interfaces(
filters={'attachment.instance-owner-id': 'amazon-elb',
'description': 'ELB {0}'.format(self.name) })
for x in range(0, max_retries):
for interface in elb_interfaces:
try:
result = self.ec2_conn.get_all_network_interfaces(interface.id)
if result == []:
status_achieved = True
break
else:
time.sleep(polling_increment_secs)
except (boto.exception.BotoServerError, StandardError) as e:
if 'InvalidNetworkInterfaceID' in e.code:
status_achieved = True
break
else:
self.module.fail_json(msg=str(e))
return status_achieved
@_throttleable_operation(_THROTTLING_RETRIES)
def _get_elb(self):
elbs = self.elb_conn.get_all_load_balancers()
for elb in elbs:
if self.name == elb.name:
self.status = 'ok'
return elb
def _get_elb_connection(self):
try:
return connect_to_aws(boto.ec2.elb, self.region,
**self.aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
self.module.fail_json(msg=str(e))
def _get_ec2_connection(self):
try:
return connect_to_aws(boto.ec2, self.region,
**self.aws_connect_params)
except (boto.exception.NoAuthHandlerFound, StandardError) as e:
self.module.fail_json(msg=str(e))
@_throttleable_operation(_THROTTLING_RETRIES)
def _delete_elb(self):
# True if succeeds, exception raised if not
result = self.elb_conn.delete_load_balancer(name=self.name)
if result:
self.changed = True
self.status = 'deleted'
def _create_elb(self):
listeners = [self._listener_as_tuple(l) for l in self.listeners]
self.elb = self.elb_conn.create_load_balancer(name=self.name,
zones=self.zones,
security_groups=self.security_group_ids,
complex_listeners=listeners,
subnets=self.subnets,
scheme=self.scheme)
if self.elb:
# HACK: Work around a boto bug in which the listeners attribute is
# always set to the listeners argument to create_load_balancer, and
# not the complex_listeners
# We're not doing a self.elb = self._get_elb here because there
# might be eventual consistency issues and it doesn't necessarily
# make sense to wait until the ELB gets returned from the EC2 API.
# This is necessary in the event we hit the throttling errors and
# need to retry ensure_ok
# See https://github.com/boto/boto/issues/3526
self.elb.listeners = self.listeners
self.changed = True
self.status = 'created'
def _create_elb_listeners(self, listeners):
"""Takes a list of listener tuples and creates them"""
# True if succeeds, exception raised if not
self.changed = self.elb_conn.create_load_balancer_listeners(self.name,
complex_listeners=listeners)
def _delete_elb_listeners(self, listeners):
"""Takes a list of listener tuples and deletes them from the elb"""
ports = [l[0] for l in listeners]
# True if succeeds, exception raised if not
self.changed = self.elb_conn.delete_load_balancer_listeners(self.name,
ports)
def _set_elb_listeners(self):
"""
Creates listeners specified by self.listeners; overwrites existing
listeners on these ports; removes extraneous listeners
"""
listeners_to_add = []
listeners_to_remove = []
listeners_to_keep = []
# Check for any listeners we need to create or overwrite
for listener in self.listeners:
listener_as_tuple = self._listener_as_tuple(listener)
# First we loop through existing listeners to see if one is
# already specified for this port
existing_listener_found = None
for existing_listener in self.elb.listeners:
# Since ELB allows only one listener on each incoming port, a
# single match on the incoming port is all we're looking for
if existing_listener[0] == int(listener['load_balancer_port']):
existing_listener_found = self._api_listener_as_tuple(existing_listener)
break
if existing_listener_found:
# Does it match exactly?
if listener_as_tuple != existing_listener_found:
# The ports are the same but something else is different,
# so we'll remove the existing one and add the new one
listeners_to_remove.append(existing_listener_found)
listeners_to_add.append(listener_as_tuple)
else:
# We already have this listener, so we're going to keep it
listeners_to_keep.append(existing_listener_found)
else:
# We didn't find an existing listener, so just add the new one
listeners_to_add.append(listener_as_tuple)
# Check for any extraneous listeners we need to remove, if desired
if self.purge_listeners:
for existing_listener in self.elb.listeners:
existing_listener_tuple = self._api_listener_as_tuple(existing_listener)
if existing_listener_tuple in listeners_to_remove:
# Already queued for removal
continue
if existing_listener_tuple in listeners_to_keep:
# Keep this one around
continue
# Since we're not already removing it and we don't need to keep
# it, let's get rid of it
listeners_to_remove.append(existing_listener_tuple)
if listeners_to_remove:
self._delete_elb_listeners(listeners_to_remove)
if listeners_to_add:
self._create_elb_listeners(listeners_to_add)
def _api_listener_as_tuple(self, listener):
"""Adds ssl_certificate_id to ELB API tuple if present"""
base_tuple = listener.get_complex_tuple()
if listener.ssl_certificate_id and len(base_tuple) < 5:
return base_tuple + (listener.ssl_certificate_id,)
return base_tuple
def _listener_as_tuple(self, listener):
"""Formats listener as a 4- or 5-tuples, in the order specified by the
ELB API"""
# N.B. string manipulations on protocols below (str(), upper()) is to
# ensure format matches output from ELB API
listener_list = [
int(listener['load_balancer_port']),
int(listener['instance_port']),
str(listener['protocol'].upper()),
]
# Instance protocol is not required by ELB API; it defaults to match
# load balancer protocol. We'll mimic that behavior here
if 'instance_protocol' in listener:
listener_list.append(str(listener['instance_protocol'].upper()))
else:
listener_list.append(str(listener['protocol'].upper()))
if 'ssl_certificate_id' in listener:
listener_list.append(str(listener['ssl_certificate_id']))
return tuple(listener_list)
def _enable_zones(self, zones):
try:
self.elb.enable_zones(zones)
except boto.exception.BotoServerError as e:
if "Invalid Availability Zone" in e.error_message:
self.module.fail_json(msg=e.error_message)
else:
self.module.fail_json(msg="an unknown server error occurred, please try again later")
self.changed = True
def _disable_zones(self, zones):
try:
self.elb.disable_zones(zones)
except boto.exception.BotoServerError as e:
if "Invalid Availability Zone" in e.error_message:
self.module.fail_json(msg=e.error_message)
else:
self.module.fail_json(msg="an unknown server error occurred, please try again later")
self.changed = True
def _attach_subnets(self, subnets):
self.elb_conn.attach_lb_to_subnets(self.name, subnets)
self.changed = True
def _detach_subnets(self, subnets):
self.elb_conn.detach_lb_from_subnets(self.name, subnets)
self.changed = True
def _set_subnets(self):
"""Determine which subnets need to be attached or detached on the ELB"""
if self.subnets:
if self.purge_subnets:
subnets_to_detach = list(set(self.elb.subnets) - set(self.subnets))
subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
else:
subnets_to_detach = None
subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
if subnets_to_attach:
self._attach_subnets(subnets_to_attach)
if subnets_to_detach:
self._detach_subnets(subnets_to_detach)
def _set_zones(self):
"""Determine which zones need to be enabled or disabled on the ELB"""
if self.zones:
if self.purge_zones:
zones_to_disable = list(set(self.elb.availability_zones) -
set(self.zones))
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
else:
zones_to_disable = None
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
if zones_to_enable:
self._enable_zones(zones_to_enable)
# N.B. This must come second, in case it would have removed all zones
if zones_to_disable:
self._disable_zones(zones_to_disable)
def _set_security_groups(self):
if self.security_group_ids is not None and set(self.elb.security_groups) != set(self.security_group_ids):
self.elb_conn.apply_security_groups_to_lb(self.name, self.security_group_ids)
self.changed = True
def _set_health_check(self):
"""Set health check values on ELB as needed"""
if self.health_check:
# This just makes it easier to compare each of the attributes
# and look for changes. Keys are attributes of the current
# health_check; values are desired values of new health_check
health_check_config = {
"target": self._get_health_check_target(),
"timeout": self.health_check['response_timeout'],
"interval": self.health_check['interval'],
"unhealthy_threshold": self.health_check['unhealthy_threshold'],
"healthy_threshold": self.health_check['healthy_threshold'],
}
update_health_check = False
# The health_check attribute is *not* set on newly created
# ELBs! So we have to create our own.
if not self.elb.health_check:
self.elb.health_check = HealthCheck()
for attr, desired_value in health_check_config.items():
if getattr(self.elb.health_check, attr) != desired_value:
setattr(self.elb.health_check, attr, desired_value)
update_health_check = True
if update_health_check:
self.elb.configure_health_check(self.elb.health_check)
self.changed = True
def _check_attribute_support(self, attr):
return hasattr(boto.ec2.elb.attributes.LbAttributes(), attr)
def _set_cross_az_load_balancing(self):
attributes = self.elb.get_attributes()
if self.cross_az_load_balancing:
if not attributes.cross_zone_load_balancing.enabled:
self.changed = True
attributes.cross_zone_load_balancing.enabled = True
else:
if attributes.cross_zone_load_balancing.enabled:
self.changed = True
attributes.cross_zone_load_balancing.enabled = False
self.elb_conn.modify_lb_attribute(self.name, 'CrossZoneLoadBalancing',
attributes.cross_zone_load_balancing.enabled)
def _set_access_log(self):
attributes = self.elb.get_attributes()
if self.access_logs:
if 's3_location' not in self.access_logs:
self.module.fail_json(msg='s3_location information required')
access_logs_config = {
"enabled": True,
"s3_bucket_name": self.access_logs['s3_location'],
"s3_bucket_prefix": self.access_logs.get('s3_prefix', ''),
"emit_interval": self.access_logs.get('interval', 60),
}
update_access_logs_config = False
for attr, desired_value in access_logs_config.items():
if getattr(attributes.access_log, attr) != desired_value:
setattr(attributes.access_log, attr, desired_value)
update_access_logs_config = True
if update_access_logs_config:
self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log)
self.changed = True
elif attributes.access_log.enabled:
attributes.access_log.enabled = False
self.changed = True
self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log)
def _set_connection_draining_timeout(self):
attributes = self.elb.get_attributes()
if self.connection_draining_timeout is not None:
if not attributes.connection_draining.enabled or \
attributes.connection_draining.timeout != self.connection_draining_timeout:
self.changed = True
attributes.connection_draining.enabled = True
attributes.connection_draining.timeout = self.connection_draining_timeout
self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
else:
if attributes.connection_draining.enabled:
self.changed = True
attributes.connection_draining.enabled = False
self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
def _set_idle_timeout(self):
attributes = self.elb.get_attributes()
if self.idle_timeout is not None:
if attributes.connecting_settings.idle_timeout != self.idle_timeout:
self.changed = True
attributes.connecting_settings.idle_timeout = self.idle_timeout
self.elb_conn.modify_lb_attribute(self.name, 'ConnectingSettings', attributes.connecting_settings)
def _policy_name(self, policy_type):
return __file__.split('/')[-1].split('.')[0].replace('_', '-') + '-' + policy_type
def _create_policy(self, policy_param, policy_meth, policy):
getattr(self.elb_conn, policy_meth )(policy_param, self.elb.name, policy)
def _delete_policy(self, elb_name, policy):
self.elb_conn.delete_lb_policy(elb_name, policy)
def _update_policy(self, policy_param, policy_meth, policy_attr, policy):
self._delete_policy(self.elb.name, policy)
self._create_policy(policy_param, policy_meth, policy)
def _set_listener_policy(self, listeners_dict, policy=[]):
for listener_port in listeners_dict:
if listeners_dict[listener_port].startswith('HTTP'):
self.elb_conn.set_lb_policies_of_listener(self.elb.name, listener_port, policy)
def _set_stickiness_policy(self, elb_info, listeners_dict, policy, **policy_attrs):
for p in getattr(elb_info.policies, policy_attrs['attr']):
if str(p.__dict__['policy_name']) == str(policy[0]):
if str(p.__dict__[policy_attrs['dict_key']]) != str(policy_attrs['param_value'] or 0):
self._set_listener_policy(listeners_dict)
self._update_policy(policy_attrs['param_value'], policy_attrs['method'], policy_attrs['attr'], policy[0])
self.changed = True
break
else:
self._create_policy(policy_attrs['param_value'], policy_attrs['method'], policy[0])
self.changed = True
self._set_listener_policy(listeners_dict, policy)
def select_stickiness_policy(self):
if self.stickiness:
if 'cookie' in self.stickiness and 'expiration' in self.stickiness:
self.module.fail_json(msg='\'cookie\' and \'expiration\' can not be set at the same time')
elb_info = self.elb_conn.get_all_load_balancers(self.elb.name)[0]
d = {}
for listener in elb_info.listeners:
d[listener[0]] = listener[2]
listeners_dict = d
if self.stickiness['type'] == 'loadbalancer':
policy = []
policy_type = 'LBCookieStickinessPolicyType'
if self.module.boolean(self.stickiness['enabled']):
if 'expiration' not in self.stickiness:
self.module.fail_json(msg='expiration must be set when type is loadbalancer')
expiration = self.stickiness['expiration'] if self.stickiness['expiration'] is not 0 else None
policy_attrs = {
'type': policy_type,
'attr': 'lb_cookie_stickiness_policies',
'method': 'create_lb_cookie_stickiness_policy',
'dict_key': 'cookie_expiration_period',
'param_value': expiration
}
policy.append(self._policy_name(policy_attrs['type']))
self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
elif not self.module.boolean(self.stickiness['enabled']):
if len(elb_info.policies.lb_cookie_stickiness_policies):
if elb_info.policies.lb_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
self.changed = True
else:
self.changed = False
self._set_listener_policy(listeners_dict)
self._delete_policy(self.elb.name, self._policy_name(policy_type))
elif self.stickiness['type'] == 'application':
policy = []
policy_type = 'AppCookieStickinessPolicyType'
if self.module.boolean(self.stickiness['enabled']):
if 'cookie' not in self.stickiness:
self.module.fail_json(msg='cookie must be set when type is application')
policy_attrs = {
'type': policy_type,
'attr': 'app_cookie_stickiness_policies',
'method': 'create_app_cookie_stickiness_policy',
'dict_key': 'cookie_name',
'param_value': self.stickiness['cookie']
}
policy.append(self._policy_name(policy_attrs['type']))
self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
elif not self.module.boolean(self.stickiness['enabled']):
if len(elb_info.policies.app_cookie_stickiness_policies):
if elb_info.policies.app_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
self.changed = True
self._set_listener_policy(listeners_dict)
self._delete_policy(self.elb.name, self._policy_name(policy_type))
else:
self._set_listener_policy(listeners_dict)
def _get_backend_policies(self):
"""Get a list of backend policies"""
policies = []
if self.elb.backends is not None:
for backend in self.elb.backends:
if backend.policies is not None:
for policy in backend.policies:
policies.append(str(backend.instance_port) + ':' + policy.policy_name)
return policies
def _set_backend_policies(self):
"""Sets policies for all backends"""
ensure_proxy_protocol = False
replace = []
backend_policies = self._get_backend_policies()
# Find out what needs to be changed
for listener in self.listeners:
want = False
if 'proxy_protocol' in listener and listener['proxy_protocol']:
ensure_proxy_protocol = True
want = True
if str(listener['instance_port']) + ':ProxyProtocol-policy' in backend_policies:
if not want:
replace.append({'port': listener['instance_port'], 'policies': []})
elif want:
replace.append({'port': listener['instance_port'], 'policies': ['ProxyProtocol-policy']})
# enable or disable proxy protocol
if ensure_proxy_protocol:
self._set_proxy_protocol_policy()
# Make the backend policies so
for item in replace:
self.elb_conn.set_lb_policies_of_backend_server(self.elb.name, item['port'], item['policies'])
self.changed = True
def _get_proxy_protocol_policy(self):
"""Find out if the elb has a proxy protocol enabled"""
if self.elb.policies is not None and self.elb.policies.other_policies is not None:
for policy in self.elb.policies.other_policies:
if policy.policy_name == 'ProxyProtocol-policy':
return policy.policy_name
return None
def _set_proxy_protocol_policy(self):
"""Install a proxy protocol policy if needed"""
proxy_policy = self._get_proxy_protocol_policy()
if proxy_policy is None:
self.elb_conn.create_lb_policy(
self.elb.name, 'ProxyProtocol-policy', 'ProxyProtocolPolicyType', {'ProxyProtocol': True}
)
self.changed = True
# TODO: remove proxy protocol policy if not needed anymore? There is no side effect to leaving it there
def _diff_list(self, a, b):
"""Find the entries in list a that are not in list b"""
b = set(b)
return [aa for aa in a if aa not in b]
def _get_instance_ids(self):
"""Get the current list of instance ids installed in the elb"""
instances = []
if self.elb.instances is not None:
for instance in self.elb.instances:
instances.append(instance.id)
return instances
def _set_instance_ids(self):
"""Register or deregister instances from an lb instance"""
assert_instances = self.instance_ids or []
has_instances = self._get_instance_ids()
add_instances = self._diff_list(assert_instances, has_instances)
if add_instances:
self.elb_conn.register_instances(self.elb.name, add_instances)
self.changed = True
if self.purge_instance_ids:
remove_instances = self._diff_list(has_instances, assert_instances)
if remove_instances:
self.elb_conn.deregister_instances(self.elb.name, remove_instances)
self.changed = True
def _set_tags(self):
"""Add/Delete tags"""
if self.tags is None:
return
params = {'LoadBalancerNames.member.1': self.name}
tagdict = dict()
# get the current list of tags from the ELB, if ELB exists
if self.elb:
current_tags = self.elb_conn.get_list('DescribeTags', params,
[('member', Tag)])
tagdict = dict((tag.Key, tag.Value) for tag in current_tags
if hasattr(tag, 'Key'))
# Add missing tags
dictact = dict(set(self.tags.items()) - set(tagdict.items()))
if dictact:
for i, key in enumerate(dictact):
params['Tags.member.%d.Key' % (i + 1)] = key
params['Tags.member.%d.Value' % (i + 1)] = dictact[key]
self.elb_conn.make_request('AddTags', params)
self.changed=True
# Remove extra tags
dictact = dict(set(tagdict.items()) - set(self.tags.items()))
if dictact:
for i, key in enumerate(dictact):
params['Tags.member.%d.Key' % (i + 1)] = key
self.elb_conn.make_request('RemoveTags', params)
self.changed=True
def _get_health_check_target(self):
"""Compose target string from healthcheck parameters"""
protocol = self.health_check['ping_protocol'].upper()
path = ""
if protocol in ['HTTP', 'HTTPS'] and 'ping_path' in self.health_check:
path = self.health_check['ping_path']
return "%s:%s%s" % (protocol, self.health_check['ping_port'], path)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state={'required': True, 'choices': ['present', 'absent']},
name={'required': True},
listeners={'default': None, 'required': False, 'type': 'list'},
purge_listeners={'default': True, 'required': False, 'type': 'bool'},
instance_ids={'default': None, 'required': False, 'type': 'list'},
purge_instance_ids={'default': False, 'required': False, 'type': 'bool'},
zones={'default': None, 'required': False, 'type': 'list'},
purge_zones={'default': False, 'required': False, 'type': 'bool'},
security_group_ids={'default': None, 'required': False, 'type': 'list'},
security_group_names={'default': None, 'required': False, 'type': 'list'},
health_check={'default': None, 'required': False, 'type': 'dict'},
subnets={'default': None, 'required': False, 'type': 'list'},
purge_subnets={'default': False, 'required': False, 'type': 'bool'},
scheme={'default': 'internet-facing', 'required': False},
connection_draining_timeout={'default': None, 'required': False, 'type': 'int'},
idle_timeout={'default': None, 'type': 'int', 'required': False},
cross_az_load_balancing={'default': None, 'type': 'bool', 'required': False},
stickiness={'default': None, 'required': False, 'type': 'dict'},
access_logs={'default': None, 'required': False, 'type': 'dict'},
wait={'default': False, 'type': 'bool', 'required': False},
wait_timeout={'default': 60, 'type': 'int', 'required': False},
tags={'default': None, 'required': False, 'type': 'dict'}
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive = [['security_group_ids', 'security_group_names']]
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
name = module.params['name']
state = module.params['state']
listeners = module.params['listeners']
purge_listeners = module.params['purge_listeners']
instance_ids = module.params['instance_ids']
purge_instance_ids = module.params['purge_instance_ids']
zones = module.params['zones']
purge_zones = module.params['purge_zones']
security_group_ids = module.params['security_group_ids']
security_group_names = module.params['security_group_names']
health_check = module.params['health_check']
access_logs = module.params['access_logs']
subnets = module.params['subnets']
purge_subnets = module.params['purge_subnets']
scheme = module.params['scheme']
connection_draining_timeout = module.params['connection_draining_timeout']
idle_timeout = module.params['idle_timeout']
cross_az_load_balancing = module.params['cross_az_load_balancing']
stickiness = module.params['stickiness']
wait = module.params['wait']
wait_timeout = module.params['wait_timeout']
tags = module.params['tags']
if state == 'present' and not listeners:
module.fail_json(msg="At least one listener is required for ELB creation")
if state == 'present' and not (zones or subnets):
module.fail_json(msg="At least one availability zone or subnet is required for ELB creation")
if wait_timeout > 600:
module.fail_json(msg='wait_timeout maximum is 600 seconds')
if security_group_names:
security_group_ids = []
try:
ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params)
if subnets: # We have at least one subnet, ergo this is a VPC
vpc_conn = _get_vpc_connection(module=module, region=region, aws_connect_params=aws_connect_params)
vpc_id = vpc_conn.get_all_subnets([subnets[0]])[0].vpc_id
filters = {'vpc_id': vpc_id}
else:
filters = None
grp_details = ec2.get_all_security_groups(filters=filters)
for group_name in security_group_names:
if isinstance(group_name, basestring):
group_name = [group_name]
group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ]
security_group_ids.extend(group_id)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg = str(e))
elb_man = ElbManager(module, name, listeners, purge_listeners, zones,
purge_zones, security_group_ids, health_check,
subnets, purge_subnets, scheme,
connection_draining_timeout, idle_timeout,
cross_az_load_balancing,
access_logs, stickiness, wait, wait_timeout, tags,
region=region, instance_ids=instance_ids, purge_instance_ids=purge_instance_ids,
**aws_connect_params)
# check for unsupported attributes for this version of boto
if cross_az_load_balancing and not elb_man._check_attribute_support('cross_zone_load_balancing'):
module.fail_json(msg="You must install boto >= 2.18.0 to use the cross_az_load_balancing attribute")
if connection_draining_timeout and not elb_man._check_attribute_support('connection_draining'):
module.fail_json(msg="You must install boto >= 2.28.0 to use the connection_draining_timeout attribute")
if idle_timeout and not elb_man._check_attribute_support('connecting_settings'):
module.fail_json(msg="You must install boto >= 2.33.0 to use the idle_timeout attribute")
if state == 'present':
elb_man.ensure_ok()
elif state == 'absent':
elb_man.ensure_gone()
ansible_facts = {'ec2_elb': 'info'}
ec2_facts_result = dict(changed=elb_man.changed,
elb=elb_man.get_info(),
ansible_facts=ansible_facts)
module.exit_json(**ec2_facts_result)
if __name__ == '__main__':
main()
|
roscoeZA/GeoGigSync
|
refs/heads/master
|
pycharm-debug.egg_FILES/third_party/pep8/lib2to3/lib2to3/pgen2/pgen.py
|
321
|
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# Pgen imports
from . import grammar, token, tokenize
class PgenGrammar(grammar.Grammar):
pass
class ParserGenerator(object):
def __init__(self, filename, stream=None):
close_stream = None
if stream is None:
stream = open(filename)
close_stream = stream.close
self.filename = filename
self.stream = stream
self.generator = tokenize.generate_tokens(stream.readline)
self.gettoken() # Initialize lookahead
self.dfas, self.startsymbol = self.parse()
if close_stream is not None:
close_stream()
self.first = {} # map from symbol name to set of tokens
self.addfirstsets()
def make_grammar(self):
c = PgenGrammar()
names = self.dfas.keys()
names.sort()
names.remove(self.startsymbol)
names.insert(0, self.startsymbol)
for name in names:
i = 256 + len(c.symbol2number)
c.symbol2number[name] = i
c.number2symbol[i] = name
for name in names:
dfa = self.dfas[name]
states = []
for state in dfa:
arcs = []
for label, next in state.arcs.iteritems():
arcs.append((self.make_label(c, label), dfa.index(next)))
if state.isfinal:
arcs.append((0, dfa.index(state)))
states.append(arcs)
c.states.append(states)
c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name))
c.start = c.symbol2number[self.startsymbol]
return c
def make_first(self, c, name):
rawfirst = self.first[name]
first = {}
for label in rawfirst:
ilabel = self.make_label(c, label)
##assert ilabel not in first # XXX failed on <> ... !=
first[ilabel] = 1
return first
def make_label(self, c, label):
# XXX Maybe this should be a method on a subclass of converter?
ilabel = len(c.labels)
if label[0].isalpha():
# Either a symbol name or a named token
if label in c.symbol2number:
# A symbol name (a non-terminal)
if label in c.symbol2label:
return c.symbol2label[label]
else:
c.labels.append((c.symbol2number[label], None))
c.symbol2label[label] = ilabel
return ilabel
else:
# A named token (NAME, NUMBER, STRING)
itoken = getattr(token, label, None)
assert isinstance(itoken, int), label
assert itoken in token.tok_name, label
if itoken in c.tokens:
return c.tokens[itoken]
else:
c.labels.append((itoken, None))
c.tokens[itoken] = ilabel
return ilabel
else:
# Either a keyword or an operator
assert label[0] in ('"', "'"), label
value = eval(label)
if value[0].isalpha():
# A keyword
if value in c.keywords:
return c.keywords[value]
else:
c.labels.append((token.NAME, value))
c.keywords[value] = ilabel
return ilabel
else:
# An operator (any non-numeric token)
itoken = grammar.opmap[value] # Fails if unknown token
if itoken in c.tokens:
return c.tokens[itoken]
else:
c.labels.append((itoken, None))
c.tokens[itoken] = ilabel
return ilabel
def addfirstsets(self):
names = self.dfas.keys()
names.sort()
for name in names:
if name not in self.first:
self.calcfirst(name)
#print name, self.first[name].keys()
def calcfirst(self, name):
dfa = self.dfas[name]
self.first[name] = None # dummy to detect left recursion
state = dfa[0]
totalset = {}
overlapcheck = {}
for label, next in state.arcs.iteritems():
if label in self.dfas:
if label in self.first:
fset = self.first[label]
if fset is None:
raise ValueError("recursion for rule %r" % name)
else:
self.calcfirst(label)
fset = self.first[label]
totalset.update(fset)
overlapcheck[label] = fset
else:
totalset[label] = 1
overlapcheck[label] = {label: 1}
inverse = {}
for label, itsfirst in overlapcheck.iteritems():
for symbol in itsfirst:
if symbol in inverse:
raise ValueError("rule %s is ambiguous; %s is in the"
" first sets of %s as well as %s" %
(name, symbol, label, inverse[symbol]))
inverse[symbol] = label
self.first[name] = totalset
def parse(self):
dfas = {}
startsymbol = None
# MSTART: (NEWLINE | RULE)* ENDMARKER
while self.type != token.ENDMARKER:
while self.type == token.NEWLINE:
self.gettoken()
# RULE: NAME ':' RHS NEWLINE
name = self.expect(token.NAME)
self.expect(token.OP, ":")
a, z = self.parse_rhs()
self.expect(token.NEWLINE)
#self.dump_nfa(name, a, z)
dfa = self.make_dfa(a, z)
#self.dump_dfa(name, dfa)
oldlen = len(dfa)
self.simplify_dfa(dfa)
newlen = len(dfa)
dfas[name] = dfa
#print name, oldlen, newlen
if startsymbol is None:
startsymbol = name
return dfas, startsymbol
def make_dfa(self, start, finish):
# To turn an NFA into a DFA, we define the states of the DFA
# to correspond to *sets* of states of the NFA. Then do some
# state reduction. Let's represent sets as dicts with 1 for
# values.
assert isinstance(start, NFAState)
assert isinstance(finish, NFAState)
def closure(state):
base = {}
addclosure(state, base)
return base
def addclosure(state, base):
assert isinstance(state, NFAState)
if state in base:
return
base[state] = 1
for label, next in state.arcs:
if label is None:
addclosure(next, base)
states = [DFAState(closure(start), finish)]
for state in states: # NB states grows while we're iterating
arcs = {}
for nfastate in state.nfaset:
for label, next in nfastate.arcs:
if label is not None:
addclosure(next, arcs.setdefault(label, {}))
for label, nfaset in arcs.iteritems():
for st in states:
if st.nfaset == nfaset:
break
else:
st = DFAState(nfaset, finish)
states.append(st)
state.addarc(st, label)
return states # List of DFAState instances; first one is start
def dump_nfa(self, name, start, finish):
print "Dump of NFA for", name
todo = [start]
for i, state in enumerate(todo):
print " State", i, state is finish and "(final)" or ""
for label, next in state.arcs:
if next in todo:
j = todo.index(next)
else:
j = len(todo)
todo.append(next)
if label is None:
print " -> %d" % j
else:
print " %s -> %d" % (label, j)
def dump_dfa(self, name, dfa):
print "Dump of DFA for", name
for i, state in enumerate(dfa):
print " State", i, state.isfinal and "(final)" or ""
for label, next in state.arcs.iteritems():
print " %s -> %d" % (label, dfa.index(next))
def simplify_dfa(self, dfa):
# This is not theoretically optimal, but works well enough.
# Algorithm: repeatedly look for two states that have the same
# set of arcs (same labels pointing to the same nodes) and
# unify them, until things stop changing.
# dfa is a list of DFAState instances
changes = True
while changes:
changes = False
for i, state_i in enumerate(dfa):
for j in range(i+1, len(dfa)):
state_j = dfa[j]
if state_i == state_j:
#print " unify", i, j
del dfa[j]
for state in dfa:
state.unifystate(state_j, state_i)
changes = True
break
def parse_rhs(self):
# RHS: ALT ('|' ALT)*
a, z = self.parse_alt()
if self.value != "|":
return a, z
else:
aa = NFAState()
zz = NFAState()
aa.addarc(a)
z.addarc(zz)
while self.value == "|":
self.gettoken()
a, z = self.parse_alt()
aa.addarc(a)
z.addarc(zz)
return aa, zz
def parse_alt(self):
# ALT: ITEM+
a, b = self.parse_item()
while (self.value in ("(", "[") or
self.type in (token.NAME, token.STRING)):
c, d = self.parse_item()
b.addarc(c)
b = d
return a, b
def parse_item(self):
# ITEM: '[' RHS ']' | ATOM ['+' | '*']
if self.value == "[":
self.gettoken()
a, z = self.parse_rhs()
self.expect(token.OP, "]")
a.addarc(z)
return a, z
else:
a, z = self.parse_atom()
value = self.value
if value not in ("+", "*"):
return a, z
self.gettoken()
z.addarc(a)
if value == "+":
return a, z
else:
return a, a
def parse_atom(self):
# ATOM: '(' RHS ')' | NAME | STRING
if self.value == "(":
self.gettoken()
a, z = self.parse_rhs()
self.expect(token.OP, ")")
return a, z
elif self.type in (token.NAME, token.STRING):
a = NFAState()
z = NFAState()
a.addarc(z, self.value)
self.gettoken()
return a, z
else:
self.raise_error("expected (...) or NAME or STRING, got %s/%s",
self.type, self.value)
def expect(self, type, value=None):
if self.type != type or (value is not None and self.value != value):
self.raise_error("expected %s/%s, got %s/%s",
type, value, self.type, self.value)
value = self.value
self.gettoken()
return value
def gettoken(self):
tup = self.generator.next()
while tup[0] in (tokenize.COMMENT, tokenize.NL):
tup = self.generator.next()
self.type, self.value, self.begin, self.end, self.line = tup
#print token.tok_name[self.type], repr(self.value)
def raise_error(self, msg, *args):
if args:
try:
msg = msg % args
except:
msg = " ".join([msg] + map(str, args))
raise SyntaxError(msg, (self.filename, self.end[0],
self.end[1], self.line))
class NFAState(object):
def __init__(self):
self.arcs = [] # list of (label, NFAState) pairs
def addarc(self, next, label=None):
assert label is None or isinstance(label, str)
assert isinstance(next, NFAState)
self.arcs.append((label, next))
class DFAState(object):
def __init__(self, nfaset, final):
assert isinstance(nfaset, dict)
assert isinstance(iter(nfaset).next(), NFAState)
assert isinstance(final, NFAState)
self.nfaset = nfaset
self.isfinal = final in nfaset
self.arcs = {} # map from label to DFAState
def addarc(self, next, label):
assert isinstance(label, str)
assert label not in self.arcs
assert isinstance(next, DFAState)
self.arcs[label] = next
def unifystate(self, old, new):
for label, next in self.arcs.iteritems():
if next is old:
self.arcs[label] = new
def __eq__(self, other):
# Equality test -- ignore the nfaset instance variable
assert isinstance(other, DFAState)
if self.isfinal != other.isfinal:
return False
# Can't just return self.arcs == other.arcs, because that
# would invoke this method recursively, with cycles...
if len(self.arcs) != len(other.arcs):
return False
for label, next in self.arcs.iteritems():
if next is not other.arcs.get(label):
return False
return True
__hash__ = None # For Py3 compatibility.
def generate_grammar(filename="Grammar.txt"):
p = ParserGenerator(filename)
return p.make_grammar()
|
izawaryu/venture
|
refs/heads/master
|
venture.py
|
1
|
# Ryu Izawa
# Written 2017-10-15
# Last updated 2017-10-31
import csv
import math
import json
import string
import random
import os.path
import numpy as np
import pandas as pd
import urllib, urllib2
start_latitude = 40.433183
start_longitude = -74.199800
# Google Maps API Keys
key = "&key=" + 'AIzaSyA_phpapSMniXBO2AfGjxp3ZfAD64wyh1s' # Verdi
#key = "&key=" + 'AIzaSyBXOIFAVM65wlaVsRjD-q6YnWQ4V0HpgZQ' # Dreamfish
step_size = 0.0001
search_radius = 0.01
locations=[]
locations_limit = 100000
def get_nearest_pano(coord):
base = "https://maps.googleapis.com/maps/api/streetview/metadata?"
arg_location = 'location=' + coord
arg_heading = '&heading=' + '0'
full_URL = base + arg_location + arg_heading + key
req = urllib2.urlopen(full_URL)
reply = req.read()
json_parsed = json.loads(reply)
json_status = json_parsed['status']
if (json_status=='OK'):
json_date = json_parsed['date']
json_pano_id = json_parsed['pano_id']
json_latitude = json_parsed['location']['lat']
json_longitude = json_parsed['location']['lng']
return [json_date, json_pano_id, json_latitude, json_longitude, 0]
else:
return None
def there_exists_a_new_pano_at(some_location):
visited_pano_ids = [pano[1] for pano in locations]
if some_location is not None and some_location[1] not in visited_pano_ids:
return True
else:
return False
def distance_between_panos(first_pano, second_pano):
if second_pano is not None and first_pano is not None:
scalar_distance = ((second_pano[2]-first_pano[2])**2 \
+ (second_pano[3]-first_pano[3])**2) \
** (1.0/2.0)
else:
scalar_distance = 1
return scalar_distance
def some_point_to_relative_bearing(observer_position, observer_track, relative_bearing):
# Given an observer's position, observer's track and some measure of distance,
# return a pano in the direction of the relative bearing, the given distance away.
# Return None if none exists.
steps = 1
new_point = None
absolute_bearing = observer_track + relative_bearing
lat_increment = math.sin(absolute_bearing)
lon_increment = math.cos(absolute_bearing)
while new_point is None:
steps += 1
if steps > 9:
break
latitude_of_the_new_point = observer_position[2] + (lat_increment * step_size * steps)
longitude_of_the_new_point = observer_position[3] + (lon_increment * step_size * steps)
coordinates_of_the_new_point = ('{},{}'.format(latitude_of_the_new_point, longitude_of_the_new_point))
new_point = get_nearest_pano(coordinates_of_the_new_point)
# Record the direction of travel.
if new_point is not None:
new_point[4] = math.degrees(observer_track)
if there_exists_a_new_pano_at(new_point) and distance_between_panos(observer_position, new_point) < step_size:
new_point = None
return new_point
def continue_along_path(prior_point, current_point):
lat_track = current_point[2] - prior_point[2]
lon_track = current_point[3] - prior_point[3]
# When working with this trig, consider '0' often runs horizontally
# to the right in a conventional cartesian grid, with angles increasing counter-clockwise.
# We're using an absolute lat/lon grid, so '0' is geo-north and angles increase clockwise.
current_track = (math.atan2(lon_track,lat_track)+2*math.pi)%(2*math.pi)
# Do not iterate beyond the limiting number of locations.
if len(locations) <= locations_limit:
# Determine some point ahead of the current position and track.
# 'Ahead' here means some distance away in an arbitrary direction,
# but not in reverse along the current track.
# In this case, I'm checking all angles from fore to aft
# along either side, in a fashion similar to breast stroke.
# Angles are checked every pi/4 radians.
# We do not consider checking panos in reverse.
for relative_bearing in [math.pi * 0.0/6.0, \
math.pi * 1.0/6.0, \
math.pi * 11.0/6.0, \
math.pi * 2.0/6.0, \
math.pi * 10.0/6.0, \
math.pi * 3.0/6.0, \
math.pi * 9.0/6.0, \
math.pi * 4.0/6.0, \
math.pi * 8.0/6.0]:
some_new_direction_of_travel = some_point_to_relative_bearing(current_point, current_track, relative_bearing)
# If there is a new direction of travel (excluding reverse), follow it.
if distance_between_panos(some_new_direction_of_travel, start_point) <= search_radius and \
some_new_direction_of_travel is not None:
if there_exists_a_new_pano_at(some_new_direction_of_travel):
locations.append(some_new_direction_of_travel)
df = pd.DataFrame(some_new_direction_of_travel).T
df.to_csv('venture.csv', mode='a', header=False, index=False)
print('{}: travelling {:3.0f} from {:.4f}, {:.4f}'.format(len(locations), math.degrees(current_track), some_new_direction_of_travel[2], some_new_direction_of_travel[3]))
if distance_between_panos(current_point, some_new_direction_of_travel) >= step_size:
continue_along_path(current_point, some_new_direction_of_travel)
else:
continue_along_path(prior_point, some_new_direction_of_travel)
return None
def venture_outward_from_location(latitude, longitude):
# Starting at a given location,
# move outward along paths of extisting GSV panoramas.
if os.path.isfile('./venture.csv'):
with open('venture.csv', 'rb') as v:
reader = csv.reader(v)
locations = list(reader)
latitude = locations[-1][2]
longitude = locations[-1][3]
else:
locations=[]
df = pd.DataFrame(locations, columns=['date', 'pano_id', 'latitude', 'longitude', 'comment'])
df.to_csv('venture.csv', index=False)
coordinates = ('{},{}'.format(latitude, longitude))
try:
# Find the nearest panorama to the starting point, A.
global start_point
start_point = get_nearest_pano(coordinates)
# Find another nearby panorama, B.
next_point = some_point_to_relative_bearing(start_point, 0.0, 0.0)
except ValueError:
print('*** failure at venture_outward_from_location({})'.format(coordinates))
locations.append(start_point)
sp = pd.DataFrame(start_point).T
sp.to_csv('venture.csv', mode='a', header=False, index=False)
locations.append(next_point)
np = pd.DataFrame(next_point).T
np.to_csv('venture.csv', mode='a', header=False, index=False)
continue_along_path(start_point, next_point)
venture_outward_from_location(start_latitude, start_longitude)
|
uliss/quneiform
|
refs/heads/master
|
tests/py/lpod/frame.py
|
1
|
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2009 Ars Aperta, Itaapy, Pierlis, Talend.
#
# Authors: David Versmisse <[email protected]>
# Hervé Cauwelier <[email protected]>
#
# This file is part of Lpod (see: http://lpod-project.org).
# Lpod is free software; you can redistribute it and/or modify it under
# the terms of either:
#
# a) the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option)
# any later version.
# Lpod is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Lpod. If not, see <http://www.gnu.org/licenses/>.
#
# b) the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Import from lpod
from image import odf_create_image
from element import odf_create_element, odf_element, register_element_class
from paragraph import odf_create_paragraph
def odf_create_frame(name=None, size=('1cm', '1cm'), anchor_type='paragraph',
page_number=None, position=None, style=None):
"""Create a frame element of the given size. If positioned by page, give
the page number and the x, y position.
Size is a (width, height) tuple and position is a (left, top) tuple; items
are strings including the unit, e.g. ('10cm', '15cm').
Frames are not useful by themselves. You should consider calling
odf_create_image_frame or odf_create_text_frame directly.
Arguments:
name -- unicode
size -- (str, str)
anchor_type -- 'page', 'frame', 'paragraph' (default), 'char'
or 'as-char'
page_number -- int (when anchor_type == 'page')
position -- (str, str)
style -- unicode
Return: odf_element
"""
element = odf_create_element('draw:frame')
element.set_frame_size(size)
element.set_frame_anchor_type(anchor_type, page_number=page_number)
if name:
element.set_attribute('draw:name', name)
if position is not None:
element.set_frame_position(position)
if style is not None:
element.set_attribute('draw:style-name', style)
return element
def odf_create_image_frame(uri, text=None, size=('1cm', '1cm'),
anchor_type='paragraph', page_number=None, position=None, style=None):
"""Create a ready-to-use image, since it must be embedded in a
frame.
The optionnal text will appear above the image.
Size is a (width, height) tuple and position is a (left, top) tuple;
items are strings including the unit, e.g. ('21cm', '29.7cm').
Arguments:
uri -- str
text -- unicode
size -- (str, str)
anchor_type -- 'page', 'frame', 'paragraph', 'char' or 'as-char'
page_number -- int (when anchor_type == 'page')
position -- (str, str)
style -- unicode
Return: odf_element
"""
frame = odf_create_frame(size=size, anchor_type=anchor_type,
page_number=page_number, position=position, style=style)
image = odf_create_image(uri)
if text:
image.set_text_content(text)
frame.append(image)
return frame
def odf_create_text_frame(text_or_element, size=('1cm', '1cm'),
anchor_type='paragraph', page_number=None, position=None, style=None,
text_style=None):
"""Create a ready-to-use text box, since it must be embedded in a frame.
Size is a (width, height) tuple and position is a (left, top) tuple; items
are strings including the unit, e.g. ('21cm', '29.7cm').
Arguments:
text_or_element -- unicode or odf_element
size -- (str, str)
anchor_type -- 'page', 'frame', 'paragraph', 'char' or 'as-char'
page_number -- int (when anchor_type == 'page')
position -- (str, str)
style -- unicode
text_style -- unicode
Return: odf_element
"""
frame = odf_create_frame(size=size, anchor_type=anchor_type,
page_number=page_number, position=position, style=style)
if text_style:
frame.set_attribute('draw:text-style-name', text_style)
text_box = odf_create_element('draw:text-box')
if not isinstance(text_or_element, (list, tuple)):
text_or_element = [text_or_element]
for item in text_or_element:
if type(item) is unicode:
item = odf_create_paragraph(item, style=text_style)
text_box.append(item)
frame.append(text_box)
return frame
class odf_frame(odf_element):
def get_frame_name(self):
return self.get_attribute('draw:name')
def set_frame_name(self, name):
self.set_attribute('draw:name', name)
def get_frame_size(self):
"""Get the size of the frame.
Size is a (width, height) tuple with items including the unit,
e.g. ('10cm', '15cm').
Return: (str, str)
"""
get_attr = self.get_attribute
return get_attr('svg:width'), get_attr('svg:height')
def set_frame_size(self, size):
"""Set the size of the frame.
Size is a (width, height) tuple with items including the unit,
e.g. ('10cm', '15cm'). The dimensions can be None.
Arguments:
size -- (str, str)
"""
self.set_attribute('svg:width', size[0])
self.set_attribute('svg:height', size[1])
def get_frame_position(self):
"""Get the position of the frame relative to its anchor
point.
Position is a (left, top) tuple with items including the unit,
e.g. ('10cm', '15cm').
Return: (str, str)
"""
get_attr = self.get_attribute
return get_attr('svg:x'), get_attr('svg:y')
def set_frame_position(self, position):
"""Set the position of the frame relative to its anchor
point.
Position is a (left, top) tuple with items including the unit,
e.g. ('10cm', '15cm').
Arguments:
position -- (str, str)
"""
self.set_attribute('svg:x', position[0])
self.set_attribute('svg:y', position[1])
def get_frame_anchor_type(self):
"""Get how the frame is attached to its environment.
Return: 'page', 'frame', 'paragraph', 'char' or 'as-char'
"""
return self.get_attribute('text:anchor-type')
def set_frame_anchor_type(self, anchor_type, page_number=None):
"""Set how the frame is attached to its environment.
When the type is 'page', you can give the number of the page where to
attach.
Arguments:
anchor_type -- 'page', 'frame', 'paragraph', 'char' or 'as-char'
page_number -- int (when anchor_type == 'page')
"""
self.set_attribute('text:anchor-type', anchor_type)
if anchor_type == 'page' and page_number:
self.set_frame_page_number(page_number)
def get_frame_page_number(self):
"""Get the number of the page where the frame is attached when the
anchor type is 'page'.
Return: int
"""
page_number = self.get_attribute('text:anchor-page-number')
if page_number is None:
return None
return int(page_number)
def set_frame_page_number(self, page_number):
"""Set the number of the page where the frame is attached when the
anchor type is 'page', or None to delete it
Arguments:
page_number -- int or None
"""
if page_number is None:
self.set_attribute('text:anchor-page-number', None)
self.set_attribute('text:anchor-page-number', str(page_number))
def get_formatted_text(self, context):
result = []
for element in self.get_children():
tag = element.get_tag()
if tag == 'draw:image':
if context['rst_mode']:
result.append(u'\n.. image:: %s\n' %
element.get_attribute('xlink:href'))
else:
result.append(u'[Image %s]\n' %
element.get_attribute('xlink:href'))
elif tag == 'draw:text-box':
subresult = [u' ']
for element in element.get_children():
subresult.append(element.get_formatted_text(context))
subresult = u''.join(subresult)
subresult = subresult.replace(u'\n', u'\n ')
subresult.rstrip(' ')
result.append(subresult)
else:
result.append(element.get_formatted_text(context))
result.append(u'\n')
return u''.join(result)
register_element_class('draw:frame', odf_frame)
|
kuiwei/kuiwei
|
refs/heads/master
|
common/lib/xmodule/xmodule/tests/test_progress.py
|
54
|
"""Module progress tests"""
import unittest
from mock import Mock
from xblock.field_data import DictFieldData
from xmodule.progress import Progress
from xmodule import x_module
from . import get_test_system
class ProgressTest(unittest.TestCase):
''' Test that basic Progress objects work. A Progress represents a
fraction between 0 and 1.
'''
not_started = Progress(0, 17)
part_done = Progress(2, 6)
half_done = Progress(3, 6)
also_half_done = Progress(1, 2)
done = Progress(7, 7)
def test_create_object(self):
# These should work:
p = Progress(0, 2)
p = Progress(1, 2)
p = Progress(2, 2)
p = Progress(2.5, 5.0)
p = Progress(3.7, 12.3333)
# These shouldn't
self.assertRaises(ValueError, Progress, 0, 0)
self.assertRaises(ValueError, Progress, 2, 0)
self.assertRaises(ValueError, Progress, 1, -2)
self.assertRaises(TypeError, Progress, 0, "all")
# check complex numbers just for the heck of it :)
self.assertRaises(TypeError, Progress, 2j, 3)
def test_clamp(self):
self.assertEqual((2, 2), Progress(3, 2).frac())
self.assertEqual((0, 2), Progress(-2, 2).frac())
def test_frac(self):
p = Progress(1, 2)
(a, b) = p.frac()
self.assertEqual(a, 1)
self.assertEqual(b, 2)
def test_percent(self):
self.assertEqual(self.not_started.percent(), 0)
self.assertAlmostEqual(self.part_done.percent(), 33.33333333333333)
self.assertEqual(self.half_done.percent(), 50)
self.assertEqual(self.done.percent(), 100)
self.assertEqual(self.half_done.percent(), self.also_half_done.percent())
def test_started(self):
self.assertFalse(self.not_started.started())
self.assertTrue(self.part_done.started())
self.assertTrue(self.half_done.started())
self.assertTrue(self.done.started())
def test_inprogress(self):
# only true if working on it
self.assertFalse(self.done.inprogress())
self.assertFalse(self.not_started.inprogress())
self.assertTrue(self.part_done.inprogress())
self.assertTrue(self.half_done.inprogress())
def test_done(self):
self.assertTrue(self.done.done())
self.assertFalse(self.half_done.done())
self.assertFalse(self.not_started.done())
def test_str(self):
self.assertEqual(str(self.not_started), "0/17")
self.assertEqual(str(self.part_done), "2/6")
self.assertEqual(str(self.done), "7/7")
def test_ternary_str(self):
self.assertEqual(self.not_started.ternary_str(), "none")
self.assertEqual(self.half_done.ternary_str(), "in_progress")
self.assertEqual(self.done.ternary_str(), "done")
def test_to_js_status(self):
'''Test the Progress.to_js_status_str() method'''
self.assertEqual(Progress.to_js_status_str(self.not_started), "none")
self.assertEqual(Progress.to_js_status_str(self.half_done), "in_progress")
self.assertEqual(Progress.to_js_status_str(self.done), "done")
self.assertEqual(Progress.to_js_status_str(None), "0")
def test_to_js_detail_str(self):
'''Test the Progress.to_js_detail_str() method'''
f = Progress.to_js_detail_str
for p in (self.not_started, self.half_done, self.done):
self.assertEqual(f(p), str(p))
# But None should be encoded as 0
self.assertEqual(f(None), "0")
def test_add(self):
'''Test the Progress.add_counts() method'''
p = Progress(0, 2)
p2 = Progress(1, 3)
p3 = Progress(2, 5)
pNone = None
add = lambda a, b: Progress.add_counts(a, b).frac()
self.assertEqual(add(p, p), (0, 4))
self.assertEqual(add(p, p2), (1, 5))
self.assertEqual(add(p2, p3), (3, 8))
self.assertEqual(add(p2, pNone), p2.frac())
self.assertEqual(add(pNone, p2), p2.frac())
def test_equality(self):
'''Test that comparing Progress objects for equality
works correctly.'''
p = Progress(1, 2)
p2 = Progress(2, 4)
p3 = Progress(1, 2)
self.assertTrue(p == p3)
self.assertFalse(p == p2)
# Check != while we're at it
self.assertTrue(p != p2)
self.assertFalse(p != p3)
class ModuleProgressTest(unittest.TestCase):
''' Test that get_progress() does the right thing for the different modules
'''
def test_xmodule_default(self):
'''Make sure default get_progress exists, returns None'''
xm = x_module.XModule(Mock(), get_test_system(), DictFieldData({'location': 'a://b/c/d/e'}), Mock())
p = xm.get_progress()
self.assertEqual(p, None)
|
sanguinariojoe/FreeCAD
|
refs/heads/master
|
src/Mod/Fem/femviewprovider/view_element_geometry2D.py
|
12
|
# ***************************************************************************
# * Copyright (c) 2015 Bernd Hahnebach <[email protected]> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
__title__ = "FreeCAD FEM element geometry 2D ViewProvider for the document object"
__author__ = "Bernd Hahnebach"
__url__ = "https://www.freecadweb.org"
## @package view_element_geometry2D
# \ingroup FEM
# \brief view provider for element geometry 2D object
from femtaskpanels import task_element_geometry2D
from . import view_base_femconstraint
class VPElementGeometry2D(view_base_femconstraint.VPBaseFemConstraint):
"""
A View Provider for the ElementGeometry2D object
"""
def setEdit(self, vobj, mode=0):
view_base_femconstraint.VPBaseFemConstraint.setEdit(
self,
vobj,
mode,
task_element_geometry2D._TaskPanel
)
|
vertigo235/Sick-Beard-XEM
|
refs/heads/master
|
lib/html5lib/treewalkers/__init__.py
|
133
|
"""A collection of modules for iterating through different kinds of
tree, generating tokens identical to those produced by the tokenizer
module.
To create a tree walker for a new type of tree, you need to do
implement a tree walker object (called TreeWalker by convention) that
implements a 'serialize' method taking a tree as sole argument and
returning an iterator generating tokens.
"""
treeWalkerCache = {}
def getTreeWalker(treeType, implementation=None, **kwargs):
"""Get a TreeWalker class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are "simpletree", "dom", "etree" and "beautifulsoup"
"simpletree" - a built-in DOM-ish tree type with support for some
more pythonic idioms.
"dom" - The xml.dom.minidom DOM implementation
"pulldom" - The xml.dom.pulldom event stream
"etree" - A generic walker for tree implementations exposing an
elementtree-like interface (known to work with
ElementTree, cElementTree and lxml.etree).
"lxml" - Optimized walker for lxml.etree
"beautifulsoup" - Beautiful soup (if installed)
"genshi" - a Genshi stream
implementation - (Currently applies to the "etree" tree type only). A module
implementing the tree type e.g. xml.etree.ElementTree or
cElementTree."""
treeType = treeType.lower()
if treeType not in treeWalkerCache:
if treeType in ("dom", "pulldom", "simpletree"):
mod = __import__(treeType, globals())
treeWalkerCache[treeType] = mod.TreeWalker
elif treeType == "genshi":
import genshistream
treeWalkerCache[treeType] = genshistream.TreeWalker
elif treeType == "beautifulsoup":
import soup
treeWalkerCache[treeType] = soup.TreeWalker
elif treeType == "lxml":
import lxmletree
treeWalkerCache[treeType] = lxmletree.TreeWalker
elif treeType == "etree":
import etree
# XXX: NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeWalker
return treeWalkerCache.get(treeType)
|
dneg/cortex
|
refs/heads/master
|
test/IECoreRI/MotionTest.py
|
7
|
##########################################################################
#
# Copyright (c) 2009-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import unittest
import IECore
import IECoreRI
import os.path
import os
class MotionTest( IECoreRI.TestCase ) :
def test( self ) :
r = IECoreRI.Renderer( "test/IECoreRI/output/motionTest.rib" )
with IECore.WorldBlock( r ) :
with IECore.MotionBlock( r, [ 1.75, 2.25 ] ) :
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0 ) ) )
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 1 ) ) )
l = "\n".join( file( "test/IECoreRI/output/motionTest.rib" ).readlines() )
self.assert_( "MotionBegin [ 1.75 2.25 ]" in l )
self.assert_( "MotionEnd" in l )
if __name__ == "__main__":
unittest.main()
|
draugiskisprendimai/odoo
|
refs/heads/8.0
|
openerp/addons/base/__init__.py
|
379
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import ir
import workflow
import module
import res
import report
import tests
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
rosmo/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/fortios/fortios_firewall_vipgrp64.py
|
24
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2018 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_vipgrp64
short_description: Configure IPv6 to IPv4 virtual IP groups in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure firewall feature and vipgrp64 category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: false
firewall_vipgrp64:
description:
- Configure IPv6 to IPv4 virtual IP groups.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
color:
description:
- Integer value to determine the color of the icon in the GUI (range 1 to 32, default = 0, which sets the value to 1).
comments:
description:
- Comment.
member:
description:
- Member VIP objects of the group (Separate multiple objects with a space).
suboptions:
name:
description:
- VIP64 name. Source firewall.vip64.name.
required: true
name:
description:
- VIP64 group name.
required: true
uuid:
description:
- Universally Unique Identifier (UUID; automatically assigned but can be manually reset).
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure IPv6 to IPv4 virtual IP groups.
fortios_firewall_vipgrp64:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
firewall_vipgrp64:
state: "present"
color: "3"
comments: "<your_own_value>"
member:
-
name: "default_name_6 (source firewall.vip64.name)"
name: "default_name_7"
uuid: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "key1"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_firewall_vipgrp64_data(json):
option_list = ['color', 'comments', 'member',
'name', 'uuid']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def firewall_vipgrp64(data, fos):
vdom = data['vdom']
firewall_vipgrp64_data = data['firewall_vipgrp64']
filtered_data = filter_firewall_vipgrp64_data(firewall_vipgrp64_data)
if firewall_vipgrp64_data['state'] == "present":
return fos.set('firewall',
'vipgrp64',
data=filtered_data,
vdom=vdom)
elif firewall_vipgrp64_data['state'] == "absent":
return fos.delete('firewall',
'vipgrp64',
mkey=filtered_data['name'],
vdom=vdom)
def fortios_firewall(data, fos):
login(data)
methodlist = ['firewall_vipgrp64']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": "False"},
"firewall_vipgrp64": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"color": {"required": False, "type": "int"},
"comments": {"required": False, "type": "str"},
"member": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"name": {"required": True, "type": "str"},
"uuid": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_firewall(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
edx/edx-enterprise
|
refs/heads/master
|
integrated_channels/moodle/migrations/0003_auto_20201006_1706.py
|
1
|
# Generated by Django 2.2.16 on 2020-10-06 17:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('moodle', '0002_moodlelearnerdatatransmissionaudit'),
]
operations = [
migrations.AlterField(
model_name='historicalmoodleenterprisecustomerconfiguration',
name='transmission_chunk_size',
field=models.IntegerField(default=4, help_text='The maximum number of data items to transmit to the integrated channel with each request.'),
),
migrations.AlterField(
model_name='moodleenterprisecustomerconfiguration',
name='transmission_chunk_size',
field=models.IntegerField(default=4, help_text='The maximum number of data items to transmit to the integrated channel with each request.'),
),
]
|
sometallgit/AutoUploader
|
refs/heads/master
|
Python27/Lib/site-packages/pip/req/__init__.py
|
806
|
from __future__ import absolute_import
from .req_install import InstallRequirement
from .req_set import RequirementSet, Requirements
from .req_file import parse_requirements
__all__ = [
"RequirementSet", "Requirements", "InstallRequirement",
"parse_requirements",
]
|
rimbalinux/MSISDNArea
|
refs/heads/master
|
djangoappengine/build/lib/djangoappengine/tests/not_return_sets.py
|
5
|
from .testmodels import FieldsWithOptionsModel, OrderedModel, SelfReferenceModel
import datetime
from django.test import TestCase
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
class NonReturnSetsTest(TestCase):
floats = [5.3, 2.6, 9.1, 1.58, 2.4]
emails = ['[email protected]', '[email protected]',
'[email protected]', '[email protected]', '[email protected]']
def setUp(self):
for index, (float, email) in enumerate(zip(NonReturnSetsTest.floats,
NonReturnSetsTest.emails)):
self.last_save_time = datetime.datetime.now().time()
ordered_instance = OrderedModel(priority=index, pk=index + 1)
ordered_instance.save()
model = FieldsWithOptionsModel(floating_point=float,
integer=int(float), email=email,
time=self.last_save_time,
foreign_key=ordered_instance)
model.save()
def test_get(self):
self.assertEquals(FieldsWithOptionsModel.objects.get(
email='[email protected]')
.email, '[email protected]')
# test exception when matching multiple entities
self.assertRaises(MultipleObjectsReturned, FieldsWithOptionsModel.objects
.get, integer=2)
# test exception when entity does not exist
self.assertRaises(ObjectDoesNotExist, FieldsWithOptionsModel.objects
.get, floating_point=5.2)
# TODO: test create when djangos model.save_base is refactored
# TODO: test get_or_create when refactored
def test_count(self):
self.assertEquals(FieldsWithOptionsModel.objects.filter(
integer=2).count(), 2)
def test_in_bulk(self):
self.assertEquals([key in ['[email protected]', '[email protected]']
for key in FieldsWithOptionsModel.objects.in_bulk(
['[email protected]', '[email protected]']).keys()],
[True, ]*2)
def test_latest(self):
self.assertEquals('[email protected]', FieldsWithOptionsModel.objects
.latest('time').email)
def test_exists(self):
self.assertEquals(True, FieldsWithOptionsModel.objects.exists())
def test_deletion(self):
# TODO: ForeignKeys will not be deleted! This has to be done via
# background tasks
self.assertEquals(FieldsWithOptionsModel.objects.count(), 5)
FieldsWithOptionsModel.objects.get(email='[email protected]').delete()
self.assertEquals(FieldsWithOptionsModel.objects.count(), 4)
FieldsWithOptionsModel.objects.filter(email__in=['[email protected]',
'[email protected]', '[email protected]', ]).delete()
self.assertEquals(FieldsWithOptionsModel.objects.count(), 2)
def test_selfref_deletion(self):
entity = SelfReferenceModel()
entity.save()
entity.delete()
def test_foreign_key_fetch(self):
# test fetching the ForeignKey
ordered_instance = OrderedModel.objects.get(priority=2)
self.assertEquals(FieldsWithOptionsModel.objects.get(integer=9).foreign_key,
ordered_instance)
def test_foreign_key_backward(self):
entity = OrderedModel.objects.all()[0]
self.assertEquals(entity.keys.count(), 1)
# TODO: add should save the added instance transactional via for example
# force_insert
new_foreign_key = FieldsWithOptionsModel(floating_point=5.6, integer=3,
email='[email protected]', time=datetime.datetime.now())
entity.keys.add(new_foreign_key)
self.assertEquals(entity.keys.count(), 2)
# TODO: add test for create
entity.keys.remove(new_foreign_key)
self.assertEquals(entity.keys.count(), 1)
entity.keys.clear()
self.assertTrue(not entity.keys.exists())
entity.keys = [new_foreign_key, new_foreign_key]
self.assertEquals(entity.keys.count(), 1)
self.assertEquals(entity.keys.all()[0].integer, 3)
|
claudejrogers/RNASEQR
|
refs/heads/master
|
interval_tree.py
|
1
|
'''
This code is copied from package bpbio -- simple bioinformatics scripts
http://code.google.com/p/bpbio/
(Oct 03, 2009)
Some code for inmporting psyco was removed, since RNASEQR runs on the 64bits system, and psyco is for the 32 bits systems.
Some minor modifications.
'''
import operator
class IntervalTree(object):
__slots__ = ('intervals', 'left', 'right', 'center')
def __init__(self, intervals, depth=16, minbucket=64, _extent=None, maxbucket=512):
"""\
`intervals` a list of intervals *with start and stop* attributes.
`depth` the depth of the tree
`minbucket` if any node in the tree has fewer than minbucket
elements, make it a leaf node
`maxbucket` even it at specifined `depth`, if the number of intervals >
maxbucket, split the node, make the tree deeper.
depth and minbucket usually do not need to be changed. if
dealing with large numbers (> 1M) of intervals, the depth could
be increased to 24.
Useage:
>>> ivals = [Interval(2, 3), Interval(1, 8), Interval(3, 6)]
>>> tree = IntervalTree(ivals)
>>> sorted(tree.find(1, 2))
[Interval(2, 3), Interval(1, 8)]
this provides an extreme and satisfying performance improvement
over searching manually over all 3 elements in the list (like
a sucker).
the IntervalTree class now also supports the iterator protocol
so it's easy to loop over all elements in the tree:
>>> import operator
>>> sorted([iv for iv in tree], key=operator.attrgetter('start'))
[Interval(1, 8), Interval(2, 3), Interval(3, 6)]
NOTE: any object with start and stop attributes can be used
in the incoming intervals list.
"""
depth -= 1
if (len(intervals) < minbucket) or (depth==0 and len(intervals) < maxbucket):
intervals.sort(key=operator.attrgetter('start'))
self.intervals = intervals
self.left = self.right = None
return
if _extent is None:
# sorting the first time through allows it to get
# better performance in searching later.
intervals.sort(key=operator.attrgetter('start'))
left, right = _extent or \
(intervals[0].start, max(i.stop for i in intervals))
#center = intervals[len(intervals)/ 2].stop
center = (left + right) / 2.0
self.intervals = []
lefts, rights = [], []
for interval in intervals:
if interval.stop < center:
lefts.append(interval)
elif interval.start > center:
rights.append(interval)
else: # overlapping.
self.intervals.append(interval)
self.left = lefts and IntervalTree(lefts, depth, minbucket, (intervals[0].start, center)) or None
self.right = rights and IntervalTree(rights, depth, minbucket, (center, right)) or None
self.center = center
def find(self, start, stop):
"""find all elements between (or overlapping) start and stop"""
if self.intervals and not stop < self.intervals[0].start:
overlapping = [i for i in self.intervals if i.stop >= start
and i.start <= stop]
else:
overlapping = []
if self.left and start <= self.center:
overlapping += self.left.find(start, stop)
if self.right and stop >= self.center:
overlapping += self.right.find(start, stop)
return overlapping
def __iter__(self):
if self.left:
for l in self.left: yield l
for i in self.intervals: yield i
if self.right:
for r in self.right: yield r
# methods to allow un/pickling (by pzs):
def __getstate__(self):
return { 'intervals' : self.intervals,
'left' : self.left,
'right' : self.right,
'center' : self.center }
def __setstate__(self, state):
for key,value in state.iteritems():
setattr(self, key, value)
class Interval(object):
__slots__ = ('id','start', 'stop')
def __init__(self, start, stop,id=""):
self.id=id
self.start = start
self.stop = stop
def __repr__(self):
return "Interval(%i, %i)" % (self.start, self.stop)
def __getstate__(self):
return {'start': self.start,
'stop': self.stop }
def __setstate__(self, state):
for k, v in state.iteritems():
setattr(self, k, v)
def get_id(self):
return self.id
if __name__ == '__main__':
def brute_force_find(intervals, start, stop):
return [i for i in intervals if i.stop >= start and i.start <= stop]
import random, time
def rand():
s = random.randint(1, 2000000)
return Interval(s, s + random.randint(200, 6000))
intervals = [rand() for i in xrange(300000)]
START, STOP = 390000, 400000
intervals.append(Interval(0, 500000))
tries = 100
tree = IntervalTree(intervals)
t = time.time()
for i in range(tries):
res = tree.find(START, STOP)
treetime = time.time() - t
t = time.time()
print treetime
"""
for i in range(tries):
bf = [i for i in intervals if i.stop >= START and i.start <= STOP]
btime = time.time() - t
assert not set(bf).symmetric_difference(res) , (len(bf), len(res), set(bf).difference(res), START, STOP)
print treetime, btime, btime/treetime
assert sum(1 for x in tree) == len(intervals), "iterator not working?"
intervals = [rand() for i in xrange(300)]
atree = IntervalTree(intervals)
import cPickle
btree = cPickle.loads(cPickle.dumps(atree, -1))
af = atree.find(START, STOP)
bf = btree.find(START, STOP)
assert len(af) == len(bf)
for a, b in zip(af, bf):
assert a.start == b.start
assert a.stop == b.stop
import doctest
doctest.testmod()
"""
|
mbernasocchi/QGIS
|
refs/heads/master
|
python/plugins/processing/algs/gdal/rasterize_over.py
|
14
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
rasterize_over.py
---------------------
Date : September 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'September 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsRasterFileWriter,
QgsProcessingException,
QgsProcessingParameterDefinition,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterField,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterNumber,
QgsProcessingParameterString,
QgsProcessingParameterBoolean,
QgsProcessingOutputRasterLayer)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class rasterize_over(GdalAlgorithm):
INPUT = 'INPUT'
FIELD = 'FIELD'
INPUT_RASTER = 'INPUT_RASTER'
ADD = 'ADD'
EXTRA = 'EXTRA'
OUTPUT = 'OUTPUT'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT,
self.tr('Input vector layer')))
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT_RASTER,
self.tr('Input raster layer')))
self.addParameter(QgsProcessingParameterField(self.FIELD,
self.tr('Field to use for burn in value'),
None,
self.INPUT,
QgsProcessingParameterField.Numeric,
optional=False))
params = [
QgsProcessingParameterBoolean(self.ADD,
self.tr('Add burn in values to existing raster values'),
defaultValue=False,
),
QgsProcessingParameterString(self.EXTRA,
self.tr('Additional command-line parameters'),
defaultValue=None,
optional=True)
]
for p in params:
p.setFlags(p.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(p)
self.addOutput(QgsProcessingOutputRasterLayer(self.OUTPUT,
self.tr('Rasterized')))
def name(self):
return 'rasterize_over'
def displayName(self):
return self.tr('Rasterize (overwrite with attribute)')
def group(self):
return self.tr('Vector conversion')
def groupId(self):
return 'vectorconversion'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'rasterize.png'))
def commandName(self):
return 'gdal_rasterize'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
ogrLayer, layerName = self.getOgrCompatibleSource(self.INPUT, parameters, context, feedback, executing)
inLayer = self.parameterAsRasterLayer(parameters, self.INPUT_RASTER, context)
if inLayer is None:
raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT_RASTER))
fieldName = self.parameterAsString(parameters, self.FIELD, context)
self.setOutputValue(self.OUTPUT, inLayer.source())
arguments = [
'-l',
layerName,
'-a',
fieldName
]
if self.parameterAsBool(parameters, self.ADD, context):
arguments.append('-add')
if self.EXTRA in parameters and parameters[self.EXTRA] not in (None, ''):
extra = self.parameterAsString(parameters, self.EXTRA, context)
arguments.append(extra)
arguments.append(ogrLayer)
arguments.append(inLayer.source())
return [self.commandName(), GdalUtils.escapeAndJoin(arguments)]
|
JLJTECH/TutorialTesting
|
refs/heads/master
|
CodeWars/2016/SillyCase-7k.py
|
1
|
#Create a function that takes a string and returns that string with the first half lowercased and the last half uppercased.
def sillycase(silly):
mid = (len(silly)+1) // 2 #add 1 to make divisible midpoint
return silly[:mid].lower() + silly[mid:].upper()
|
salguarnieri/intellij-community
|
refs/heads/master
|
python/testData/quickFixes/PyUpdatePropertySignatureQuickFixTest/setterLessParam.py
|
80
|
class A(Aa):
@property
def <warning descr="Getter signature should be (self)">x</warning>(self, r):
return r
@x.setter
def <warning descr="Setter signature should be (self, value)">x<caret></warning>():
self._x = ""
|
RudoCris/horizon
|
refs/heads/master
|
horizon/test/tests/views.py
|
69
|
# Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from horizon.test import helpers as test
from horizon import views
from django import forms
from django.test import client
from django.utils.translation import ugettext_lazy as _
from django.views import generic
FAKENAME = "FakeName"
class ViewData(object):
template_name = 'fake'
def get_context_data(self, **kwargs):
context = super(ViewData, self).get_context_data(**kwargs)
context['object'] = {'name': 'myName'}
return context
class PageWithNoTitle(ViewData, views.HorizonTemplateView):
pass
class PageWithTitle(ViewData, views.HorizonTemplateView):
page_title = "A Title"
class PageWithTitleData(ViewData, views.HorizonTemplateView):
page_title = "A Title: {{ object.name }}"
class FormWithTitle(ViewData, views.HorizonFormView):
form_class = forms.Form
page_title = "A Title: {{ object.name }}"
class ViewWithTitle(views.PageTitleMixin, generic.TemplateView):
page_title = "Fake"
class ViewWithTransTitle(views.PageTitleMixin, generic.TemplateView):
page_title = _("Fake")
class PageTitleTests(test.TestCase):
def setUp(self):
super(PageTitleTests, self).setUp()
self.request = client.RequestFactory().get('fake')
def _dispatch(self, viewClass):
p = viewClass()
p.request = self.request
return p.dispatch(self.request)
def test_render_context_with_title(self):
tm = ViewWithTitle()
context = tm.render_context_with_title({})
self.assertEqual("Fake", context['page_title'])
def test_render_context_with_title_override(self):
tm = ViewWithTitle()
context = tm.render_context_with_title({'page_title': "ekaF"})
self.assertEqual("ekaF", context['page_title'])
def test_render_context_with_title_lazy_translations(self):
tm = ViewWithTransTitle()
context = tm.render_context_with_title({})
self.assertEqual("Fake", context['page_title'])
def test_no_title_set(self):
res = self._dispatch(PageWithNoTitle)
self.assertEqual("", res.context_data['page_title'])
def test_title_set(self):
res = self._dispatch(PageWithTitle)
self.assertEqual("A Title", res.context_data['page_title'])
def test_title_with_data(self):
res = self._dispatch(PageWithTitleData)
self.assertEqual("A Title: myName", res.context_data['page_title'])
def test_form_with_title(self):
res = self._dispatch(FormWithTitle)
self.assertEqual("A Title: myName", res.context_data['page_title'])
|
sgerhart/ansible
|
refs/heads/maintenance_policy_module
|
lib/ansible/utils/module_docs_fragments/eos.py
|
58
|
#
# (c) 2015, Peter Sprygada <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
authorize:
description:
- B(Deprecated)
- "Starting with Ansible 2.5 we recommend using C(connection: network_cli) and C(become: yes)."
- This option is only required if you are using eAPI.
- For more information please see the L(EOS Platform Options guide, ../network/user_guide/platform_eos.html).
- HORIZONTALLINE
- Instructs the module to enter privileged mode on the remote device
before sending any commands. If not specified, the device will
attempt to execute all commands in non-privileged mode. If the value
is not specified in the task, the value of environment variable
C(ANSIBLE_NET_AUTHORIZE) will be used instead.
type: bool
default: 'no'
auth_pass:
description:
- B(Deprecated)
- "Starting with Ansible 2.5 we recommend using C(connection: network_cli) and C(become: yes) with C(become_pass)."
- This option is only required if you are using eAPI.
- For more information please see the L(EOS Platform Options guide, ../network/user_guide/platform_eos.html).
- HORIZONTALLINE
- Specifies the password to use if required to enter privileged mode
on the remote device. If I(authorize) is false, then this argument
does nothing. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead.
provider:
description:
- B(Deprecated)
- "Starting with Ansible 2.5 we recommend using C(connection: network_cli)."
- This option is only required if you are using eAPI.
- For more information please see the L(EOS Platform Options guide, ../network/user_guide/platform_eos.html).
- HORIZONTALLINE
- A dict object containing connection details.
suboptions:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote
device. This value applies to either I(cli) or I(eapi). The port
value will default to the appropriate transport common port if
none is provided in the task. (cli=22, http=80, https=443).
default: 0 (use common port)
username:
description:
- Configures the username to use to authenticate the connection to
the remote device. This value is used to authenticate
either the CLI login or the eAPI authentication depending on which
transport is used. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
password:
description:
- Specifies the password to use to authenticate the connection to
the remote device. This is a common argument used for either I(cli)
or I(eapi) transports. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device
for either connecting or sending commands. If the timeout is
exceeded before the operation is completed, the module will error.
default: 10
ssh_keyfile:
description:
- Specifies the SSH keyfile to use to authenticate the connection to
the remote device. This argument is only used for I(cli) transports.
If the value is not specified in the task, the value of environment
variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
authorize:
description:
- Instructs the module to enter privileged mode on the remote device
before sending any commands. If not specified, the device will
attempt to execute all commands in non-privileged mode. If the value
is not specified in the task, the value of environment variable
C(ANSIBLE_NET_AUTHORIZE) will be used instead.
type: bool
default: 'no'
auth_pass:
description:
- Specifies the password to use if required to enter privileged mode
on the remote device. If I(authorize) is false, then this argument
does nothing. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead.
transport:
description:
- Configures the transport connection to use when connecting to the
remote device.
required: true
choices:
- eapi
- cli
default: cli
use_ssl:
description:
- Configures the I(transport) to use SSL if set to true only when the
C(transport=eapi). If the transport
argument is not eapi, this value is ignored.
type: bool
default: 'yes'
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates. If the transport
argument is not eapi, this value is ignored.
type: bool
use_proxy:
description:
- If C(no), the environment variables C(http_proxy) and C(https_proxy) will be ignored.
type: bool
default: 'yes'
version_added: "2.5"
notes:
- For information on using CLI, eAPI and privileged mode see the :ref:`EOS Platform Options guide <eos_platform_options>`
- For more information on using Ansible to manage network devices see the :ref:`Ansible Network Guide <network_guide>`
- For more information on using Ansible to manage Arista EOS devices see the `Arista integration page <https://www.ansible.com/ansible-arista-networks>`_.
"""
|
jank3/django
|
refs/heads/master
|
tests/version/tests.py
|
352
|
from unittest import TestCase
from django import get_version
from django.utils import six
class VersionTests(TestCase):
def test_development(self):
ver_tuple = (1, 4, 0, 'alpha', 0)
# This will return a different result when it's run within or outside
# of a git clone: 1.4.devYYYYMMDDHHMMSS or 1.4.
ver_string = get_version(ver_tuple)
six.assertRegex(self, ver_string, r'1\.4(\.dev[0-9]+)?')
def test_releases(self):
tuples_to_strings = (
((1, 4, 0, 'alpha', 1), '1.4a1'),
((1, 4, 0, 'beta', 1), '1.4b1'),
((1, 4, 0, 'rc', 1), '1.4c1'),
((1, 4, 0, 'final', 0), '1.4'),
((1, 4, 1, 'rc', 2), '1.4.1c2'),
((1, 4, 1, 'final', 0), '1.4.1'),
)
for ver_tuple, ver_string in tuples_to_strings:
self.assertEqual(get_version(ver_tuple), ver_string)
|
taxigps/xbmc-addons-chinese
|
refs/heads/master
|
plugin.video.bdyun/resources/modules/get_auth.py
|
3
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import xbmc, xbmcgui, xbmcaddon, xbmcvfs
import os, sys, re, json
from resources.modules import auth
dialog = xbmcgui.Dialog()
class VcodeWindow(xbmcgui.WindowDialog):
def __init__(self, cookie, tokens, vcodetype, codeString, vcode_path):
self.cookie = cookie
self.tokens = tokens
self.vcodetype = vcodetype
self.codeString = codeString
self.vcode_path = vcode_path
# windowItems
self.image = xbmcgui.ControlImage(80, 100, 500, 200, self.vcode_path)
self.buttonInput = xbmcgui.ControlButton(
100, 330, 220, 50, label=u'输入验证码', alignment=6, font='font13', textColor='0xFFFFFFFF'
)
self.buttonRefresh = xbmcgui.ControlButton(
290, 330, 220, 50, label=u'刷新验证码', alignment=6, font='font13', textColor='0xFFFFFFFF'
)
self.addControls([self.image, self.buttonInput, self.buttonRefresh])
self.buttonInput.controlRight(self.buttonRefresh)
self.buttonRefresh.controlLeft(self.buttonInput)
self.setFocus(self.buttonInput)
def onControl(self, event):
if event == self.buttonInput:
self.close()
elif event == self.buttonRefresh:
(self.codeString, self.vcode_path) = auth.refresh_vcode(self.cookie, self.tokens, self.vcodetype)
if self.codeString and self.vcode_path:
self.removeControl(self.image)
self.image = xbmcgui.ControlImage(80, 100, 500, 200, self.vcode_path)
self.addControl(self.image)
else:
dialog.ok('Error', u'无法刷新验证码,请重试')
# Authorisation Process
def run(username,password):
cookie = auth.get_BAIDUID()
token = auth.get_token(cookie)
tokens = {'token': token}
ubi = auth.get_UBI(cookie,tokens)
cookie = auth.add_cookie(cookie,ubi,['UBI','PASSID'])
key_data = auth.get_public_key(cookie,tokens)
pubkey = key_data['pubkey']
rsakey = key_data['key']
password_enc = auth.RSA_encrypt(pubkey, password)
err_no,query = auth.post_login(cookie,tokens,username,password_enc,rsakey)
if err_no == 257:
vcodetype = query['vcodetype']
codeString = query['codeString']
vcode_path = auth.get_signin_vcode(cookie, codeString)
win = VcodeWindow(cookie, tokens, vcodetype, codeString, vcode_path)
win.doModal()
codeString = win.codeString
verifycode = dialog.input(heading=u'验证码')
if verifycode:
err_no,query = auth.post_login(cookie,tokens,username,password_enc,rsakey,verifycode,codeString)
if err_no == 0:
temp_cookie = query
auth_cookie, bdstoken = auth.get_bdstoken(temp_cookie)
if bdstoken:
tokens['bdstoken'] = bdstoken
return auth_cookie,tokens
elif err_no == 4:
dialog.ok('Error',u'密码错误')
elif err_no == 6:
dialog.ok('Error',u'验证码错误')
else:
dialog.ok('Error',u'未知错误,请重试')
else:
dialog.ok('Error',u'请输入验证码')
elif err_no == 4:
dialog.ok('Error',u'密码错误')
elif err_no == 0:
auth_cookie = query
bdstoken = auth.get_bdstoken(auth_cookie)
if bdstoken:
tokens['bdstoken'] = bdstoken
return auth_cookie,tokens
else:
dialog.ok('Error',u'未知错误,请重试')
return None,None
|
johan--/python_koans
|
refs/heads/master
|
python2/koans/local_module_with_all_defined.py
|
127
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__all__ = (
'Goat',
'_Velociraptor'
)
class Goat(object):
@property
def name(self):
return "George"
class _Velociraptor(object):
@property
def name(self):
return "Cuddles"
class SecretDuck(object):
@property
def name(self):
return "None of your business"
|
akaihola/django
|
refs/heads/master
|
django/contrib/formtools/tests/wizard/namedwizardtests/urls.py
|
320
|
from django.conf.urls import patterns, url
from django.contrib.formtools.tests.wizard.namedwizardtests.forms import (
SessionContactWizard, CookieContactWizard, Page1, Page2, Page3, Page4)
def get_named_session_wizard():
return SessionContactWizard.as_view(
[('form1', Page1), ('form2', Page2), ('form3', Page3), ('form4', Page4)],
url_name='nwiz_session',
done_step_name='nwiz_session_done'
)
def get_named_cookie_wizard():
return CookieContactWizard.as_view(
[('form1', Page1), ('form2', Page2), ('form3', Page3), ('form4', Page4)],
url_name='nwiz_cookie',
done_step_name='nwiz_cookie_done'
)
urlpatterns = patterns('',
url(r'^nwiz_session/(?P<step>.+)/$', get_named_session_wizard(), name='nwiz_session'),
url(r'^nwiz_session/$', get_named_session_wizard(), name='nwiz_session_start'),
url(r'^nwiz_cookie/(?P<step>.+)/$', get_named_cookie_wizard(), name='nwiz_cookie'),
url(r'^nwiz_cookie/$', get_named_cookie_wizard(), name='nwiz_cookie_start'),
)
|
Vassyli/biofragmentor
|
refs/heads/master
|
gui/mainwindow.py
|
1
|
import sys
from PyQt5.QtWidgets import QApplication, QMainWindow
from PyQt5 import uic
class MainWindow(QMainWindow):
app = None
def __init__(self):
self.app = QApplication(sys.argv)
super().__init__()
self.ui = uic.loadUi("gui/main.ui")
self.ui.show()
self.app.exec_()
|
scanny/python-pptx
|
refs/heads/master
|
pptx/oxml/shapes/shared.py
|
1
|
# encoding: utf-8
"""Common shape-related oxml objects."""
from __future__ import absolute_import, division, print_function, unicode_literals
from pptx.dml.fill import CT_GradientFillProperties
from pptx.enum.shapes import PP_PLACEHOLDER
from pptx.oxml.ns import qn
from pptx.oxml.simpletypes import (
ST_Angle,
ST_Coordinate,
ST_Direction,
ST_DrawingElementId,
ST_LineWidth,
ST_PlaceholderSize,
ST_PositiveCoordinate,
XsdBoolean,
XsdString,
XsdUnsignedInt,
)
from pptx.oxml.xmlchemy import (
BaseOxmlElement,
Choice,
OptionalAttribute,
OxmlElement,
RequiredAttribute,
ZeroOrOne,
ZeroOrOneChoice,
)
from pptx.util import Emu
class BaseShapeElement(BaseOxmlElement):
"""
Provides common behavior for shape element classes like CT_Shape,
CT_Picture, etc.
"""
@property
def cx(self):
return self._get_xfrm_attr("cx")
@cx.setter
def cx(self, value):
self._set_xfrm_attr("cx", value)
@property
def cy(self):
return self._get_xfrm_attr("cy")
@cy.setter
def cy(self, value):
self._set_xfrm_attr("cy", value)
@property
def flipH(self):
return bool(self._get_xfrm_attr("flipH"))
@flipH.setter
def flipH(self, value):
self._set_xfrm_attr("flipH", value)
@property
def flipV(self):
return bool(self._get_xfrm_attr("flipV"))
@flipV.setter
def flipV(self, value):
self._set_xfrm_attr("flipV", value)
def get_or_add_xfrm(self):
"""
Return the ``<a:xfrm>`` grandchild element, newly-added if not
present. This version works for ``<p:sp>``, ``<p:cxnSp>``, and
``<p:pic>`` elements, others will need to override.
"""
return self.spPr.get_or_add_xfrm()
@property
def has_ph_elm(self):
"""
True if this shape element has a ``<p:ph>`` descendant, indicating it
is a placeholder shape. False otherwise.
"""
return self.ph is not None
@property
def ph(self):
"""
The ``<p:ph>`` descendant element if there is one, None otherwise.
"""
ph_elms = self.xpath("./*[1]/p:nvPr/p:ph")
if len(ph_elms) == 0:
return None
return ph_elms[0]
@property
def ph_idx(self):
"""
Integer value of placeholder idx attribute. Raises |ValueError| if
shape is not a placeholder.
"""
ph = self.ph
if ph is None:
raise ValueError("not a placeholder shape")
return ph.idx
@property
def ph_orient(self):
"""
Placeholder orientation, e.g. 'vert'. Raises |ValueError| if shape is
not a placeholder.
"""
ph = self.ph
if ph is None:
raise ValueError("not a placeholder shape")
return ph.orient
@property
def ph_sz(self):
"""
Placeholder size, e.g. ST_PlaceholderSize.HALF, None if shape has no
``<p:ph>`` descendant.
"""
ph = self.ph
if ph is None:
raise ValueError("not a placeholder shape")
return ph.sz
@property
def ph_type(self):
"""
Placeholder type, e.g. ST_PlaceholderType.TITLE ('title'), none if
shape has no ``<p:ph>`` descendant.
"""
ph = self.ph
if ph is None:
raise ValueError("not a placeholder shape")
return ph.type
@property
def rot(self):
"""
Float representing degrees this shape is rotated clockwise.
"""
xfrm = self.xfrm
if xfrm is None:
return 0.0
return xfrm.rot
@rot.setter
def rot(self, value):
self.get_or_add_xfrm().rot = value
@property
def shape_id(self):
"""
Integer id of this shape
"""
return self._nvXxPr.cNvPr.id
@property
def shape_name(self):
"""
Name of this shape
"""
return self._nvXxPr.cNvPr.name
@property
def txBody(self):
"""
Child ``<p:txBody>`` element, None if not present
"""
return self.find(qn("p:txBody"))
@property
def x(self):
return self._get_xfrm_attr("x")
@x.setter
def x(self, value):
self._set_xfrm_attr("x", value)
@property
def xfrm(self):
"""
The ``<a:xfrm>`` grandchild element or |None| if not found. This
version works for ``<p:sp>``, ``<p:cxnSp>``, and ``<p:pic>``
elements, others will need to override.
"""
return self.spPr.xfrm
@property
def y(self):
return self._get_xfrm_attr("y")
@y.setter
def y(self, value):
self._set_xfrm_attr("y", value)
@property
def _nvXxPr(self):
"""
Required non-visual shape properties element for this shape. Actual
name depends on the shape type, e.g. ``<p:nvPicPr>`` for picture
shape.
"""
return self.xpath("./*[1]")[0]
def _get_xfrm_attr(self, name):
xfrm = self.xfrm
if xfrm is None:
return None
return getattr(xfrm, name)
def _set_xfrm_attr(self, name, value):
xfrm = self.get_or_add_xfrm()
setattr(xfrm, name, value)
class CT_ApplicationNonVisualDrawingProps(BaseOxmlElement):
"""
``<p:nvPr>`` element
"""
ph = ZeroOrOne(
"p:ph",
successors=(
"a:audioCd",
"a:wavAudioFile",
"a:audioFile",
"a:videoFile",
"a:quickTimeFile",
"p:custDataLst",
"p:extLst",
),
)
class CT_LineProperties(BaseOxmlElement):
"""Custom element class for <a:ln> element"""
_tag_seq = (
"a:noFill",
"a:solidFill",
"a:gradFill",
"a:pattFill",
"a:prstDash",
"a:custDash",
"a:round",
"a:bevel",
"a:miter",
"a:headEnd",
"a:tailEnd",
"a:extLst",
)
eg_lineFillProperties = ZeroOrOneChoice(
(
Choice("a:noFill"),
Choice("a:solidFill"),
Choice("a:gradFill"),
Choice("a:pattFill"),
),
successors=_tag_seq[4:],
)
prstDash = ZeroOrOne("a:prstDash", successors=_tag_seq[5:])
custDash = ZeroOrOne("a:custDash", successors=_tag_seq[6:])
del _tag_seq
w = OptionalAttribute("w", ST_LineWidth, default=Emu(0))
@property
def eg_fillProperties(self):
"""
Required to fulfill the interface used by dml.fill.
"""
return self.eg_lineFillProperties
@property
def prstDash_val(self):
"""Return value of `val` attribute of `a:prstDash` child.
Return |None| if not present.
"""
prstDash = self.prstDash
if prstDash is None:
return None
return prstDash.val
@prstDash_val.setter
def prstDash_val(self, val):
self._remove_custDash()
prstDash = self.get_or_add_prstDash()
prstDash.val = val
class CT_NonVisualDrawingProps(BaseOxmlElement):
"""
``<p:cNvPr>`` custom element class.
"""
_tag_seq = ("a:hlinkClick", "a:hlinkHover", "a:extLst")
hlinkClick = ZeroOrOne("a:hlinkClick", successors=_tag_seq[1:])
hlinkHover = ZeroOrOne("a:hlinkHover", successors=_tag_seq[2:])
id = RequiredAttribute("id", ST_DrawingElementId)
name = RequiredAttribute("name", XsdString)
del _tag_seq
class CT_Placeholder(BaseOxmlElement):
"""
``<p:ph>`` custom element class.
"""
type = OptionalAttribute("type", PP_PLACEHOLDER, default=PP_PLACEHOLDER.OBJECT)
orient = OptionalAttribute("orient", ST_Direction, default=ST_Direction.HORZ)
sz = OptionalAttribute("sz", ST_PlaceholderSize, default=ST_PlaceholderSize.FULL)
idx = OptionalAttribute("idx", XsdUnsignedInt, default=0)
class CT_Point2D(BaseOxmlElement):
"""
Custom element class for <a:off> element.
"""
x = RequiredAttribute("x", ST_Coordinate)
y = RequiredAttribute("y", ST_Coordinate)
class CT_PositiveSize2D(BaseOxmlElement):
"""
Custom element class for <a:ext> element.
"""
cx = RequiredAttribute("cx", ST_PositiveCoordinate)
cy = RequiredAttribute("cy", ST_PositiveCoordinate)
class CT_ShapeProperties(BaseOxmlElement):
"""Custom element class for `p:spPr` element.
Shared by `p:sp`, `p:cxnSp`, and `p:pic` elements as well as a few more
obscure ones.
"""
_tag_seq = (
"a:xfrm",
"a:custGeom",
"a:prstGeom",
"a:noFill",
"a:solidFill",
"a:gradFill",
"a:blipFill",
"a:pattFill",
"a:grpFill",
"a:ln",
"a:effectLst",
"a:effectDag",
"a:scene3d",
"a:sp3d",
"a:extLst",
)
xfrm = ZeroOrOne("a:xfrm", successors=_tag_seq[1:])
custGeom = ZeroOrOne("a:custGeom", successors=_tag_seq[2:])
prstGeom = ZeroOrOne("a:prstGeom", successors=_tag_seq[3:])
eg_fillProperties = ZeroOrOneChoice(
(
Choice("a:noFill"),
Choice("a:solidFill"),
Choice("a:gradFill"),
Choice("a:blipFill"),
Choice("a:pattFill"),
Choice("a:grpFill"),
),
successors=_tag_seq[9:],
)
ln = ZeroOrOne("a:ln", successors=_tag_seq[10:])
effectLst = ZeroOrOne("a:effectLst", successors=_tag_seq[11:])
del _tag_seq
@property
def cx(self):
"""
Shape width as an instance of Emu, or None if not present.
"""
cx_str_lst = self.xpath("./a:xfrm/a:ext/@cx")
if not cx_str_lst:
return None
return Emu(cx_str_lst[0])
@property
def cy(self):
"""
Shape height as an instance of Emu, or None if not present.
"""
cy_str_lst = self.xpath("./a:xfrm/a:ext/@cy")
if not cy_str_lst:
return None
return Emu(cy_str_lst[0])
@property
def x(self):
"""
The offset of the left edge of the shape from the left edge of the
slide, as an instance of Emu. Corresponds to the value of the
`./xfrm/off/@x` attribute. None if not present.
"""
x_str_lst = self.xpath("./a:xfrm/a:off/@x")
if not x_str_lst:
return None
return Emu(x_str_lst[0])
@property
def y(self):
"""
The offset of the top of the shape from the top of the slide, as an
instance of Emu. None if not present.
"""
y_str_lst = self.xpath("./a:xfrm/a:off/@y")
if not y_str_lst:
return None
return Emu(y_str_lst[0])
def _new_gradFill(self):
return CT_GradientFillProperties.new_gradFill()
class CT_Transform2D(BaseOxmlElement):
"""`a:xfrm` custom element class.
NOTE: this is a composite including CT_GroupTransform2D, which appears
with the `a:xfrm` tag in a group shape (including a slide `p:spTree`).
"""
_tag_seq = ("a:off", "a:ext", "a:chOff", "a:chExt")
off = ZeroOrOne("a:off", successors=_tag_seq[1:])
ext = ZeroOrOne("a:ext", successors=_tag_seq[2:])
chOff = ZeroOrOne("a:chOff", successors=_tag_seq[3:])
chExt = ZeroOrOne("a:chExt", successors=_tag_seq[4:])
del _tag_seq
rot = OptionalAttribute("rot", ST_Angle, default=0.0)
flipH = OptionalAttribute("flipH", XsdBoolean, default=False)
flipV = OptionalAttribute("flipV", XsdBoolean, default=False)
@property
def x(self):
off = self.off
if off is None:
return None
return off.x
@x.setter
def x(self, value):
off = self.get_or_add_off()
off.x = value
@property
def y(self):
off = self.off
if off is None:
return None
return off.y
@y.setter
def y(self, value):
off = self.get_or_add_off()
off.y = value
@property
def cx(self):
ext = self.ext
if ext is None:
return None
return ext.cx
@cx.setter
def cx(self, value):
ext = self.get_or_add_ext()
ext.cx = value
@property
def cy(self):
ext = self.ext
if ext is None:
return None
return ext.cy
@cy.setter
def cy(self, value):
ext = self.get_or_add_ext()
ext.cy = value
def _new_ext(self):
ext = OxmlElement("a:ext")
ext.cx = 0
ext.cy = 0
return ext
def _new_off(self):
off = OxmlElement("a:off")
off.x = 0
off.y = 0
return off
|
D4wN/brickv
|
refs/heads/master
|
src/build_data/windows/OpenGL/raw/GL/EXT/separate_shader_objects.py
|
3
|
'''OpenGL extension EXT.separate_shader_objects
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_EXT_separate_shader_objects'
_DEPRECATED = False
GL_ACTIVE_PROGRAM_EXT = constant.Constant( 'GL_ACTIVE_PROGRAM_EXT', 0x8B8D )
glUseShaderProgramEXT = platform.createExtensionFunction(
'glUseShaderProgramEXT',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum,constants.GLuint,),
doc='glUseShaderProgramEXT(GLenum(type), GLuint(program)) -> None',
argNames=('type','program',),
deprecated=_DEPRECATED,
)
glActiveProgramEXT = platform.createExtensionFunction(
'glActiveProgramEXT',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLuint,),
doc='glActiveProgramEXT(GLuint(program)) -> None',
argNames=('program',),
deprecated=_DEPRECATED,
)
glCreateShaderProgramEXT = platform.createExtensionFunction(
'glCreateShaderProgramEXT',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=constants.GLuint,
argTypes=(constants.GLenum,arrays.GLcharArray,),
doc='glCreateShaderProgramEXT(GLenum(type), GLcharArray(string)) -> constants.GLuint',
argNames=('type','string',),
deprecated=_DEPRECATED,
)
def glInitSeparateShaderObjectsEXT():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
|
kmoocdev2/edx-platform
|
refs/heads/real_2019
|
lms/djangoapps/discussion_api/pagination.py
|
16
|
"""
Discussion API pagination support
"""
from rest_framework.utils.urls import replace_query_param
from edx_rest_framework_extensions.paginators import NamespacedPageNumberPagination
class _Page(object):
"""
Implements just enough of the django.core.paginator.Page interface to allow
PaginationSerializer to work.
"""
def __init__(self, page_num, num_pages):
"""
Create a new page containing the given objects, with the given page
number and number of pages
"""
self.page_num = page_num
self.num_pages = num_pages
def has_next(self):
"""Returns True if there is a page after this one, otherwise False"""
return self.page_num < self.num_pages
def has_previous(self):
"""Returns True if there is a page before this one, otherwise False"""
return self.page_num > 1
def next_page_number(self):
"""Returns the number of the next page"""
return self.page_num + 1
def previous_page_number(self):
"""Returns the number of the previous page"""
return self.page_num - 1
class DiscussionAPIPagination(NamespacedPageNumberPagination):
"""
Subclasses NamespacedPageNumberPagination to provide custom implementation of pagination metadata
by overriding it's methods
"""
def __init__(self, request, page_num, num_pages, result_count=0):
"""
Overrides parent constructor to take information from discussion api
essential for the parent method
"""
self.page = _Page(page_num, num_pages)
self.base_url = request.build_absolute_uri()
self.count = result_count
super(DiscussionAPIPagination, self).__init__()
def get_result_count(self):
"""
Returns total number of results
"""
return self.count
def get_num_pages(self):
"""
Returns total number of pages the response is divided into
"""
return self.page.num_pages
def get_next_link(self):
"""
Returns absolute url of the next page if there's a next page available
otherwise returns None
"""
next_url = None
if self.page.has_next():
next_url = replace_query_param(self.base_url, "page", self.page.next_page_number())
return next_url
def get_previous_link(self):
"""
Returns absolute url of the previous page if there's a previous page available
otherwise returns None
"""
previous_url = None
if self.page.has_previous():
previous_url = replace_query_param(self.base_url, "page", self.page.previous_page_number())
return previous_url
|
thisispuneet/potato-blog
|
refs/heads/master
|
django/contrib/gis/db/backends/spatialite/introspection.py
|
401
|
from django.contrib.gis.gdal import OGRGeomType
from django.db.backends.sqlite3.introspection import DatabaseIntrospection, FlexibleFieldLookupDict
class GeoFlexibleFieldLookupDict(FlexibleFieldLookupDict):
"""
Sublcass that includes updates the `base_data_types_reverse` dict
for geometry field types.
"""
base_data_types_reverse = FlexibleFieldLookupDict.base_data_types_reverse.copy()
base_data_types_reverse.update(
{'point' : 'GeometryField',
'linestring' : 'GeometryField',
'polygon' : 'GeometryField',
'multipoint' : 'GeometryField',
'multilinestring' : 'GeometryField',
'multipolygon' : 'GeometryField',
'geometrycollection' : 'GeometryField',
})
class SpatiaLiteIntrospection(DatabaseIntrospection):
data_types_reverse = GeoFlexibleFieldLookupDict()
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# Querying the `geometry_columns` table to get additional metadata.
cursor.execute('SELECT "coord_dimension", "srid", "type" '
'FROM "geometry_columns" '
'WHERE "f_table_name"=%s AND "f_geometry_column"=%s',
(table_name, geo_col))
row = cursor.fetchone()
if not row:
raise Exception('Could not find a geometry column for "%s"."%s"' %
(table_name, geo_col))
# OGRGeomType does not require GDAL and makes it easy to convert
# from OGC geom type name to Django field.
field_type = OGRGeomType(row[2]).django
# Getting any GeometryField keyword arguments that are not the default.
dim = row[0]
srid = row[1]
field_params = {}
if srid != 4326:
field_params['srid'] = srid
if isinstance(dim, basestring) and 'Z' in dim:
field_params['dim'] = 3
finally:
cursor.close()
return field_type, field_params
|
rfosterslo/wagtailplus
|
refs/heads/master
|
wagtailplus/wagtaillinks/tests/test_views.py
|
2
|
"""
Contains view unit tests.
"""
from django.core.urlresolvers import reverse
from wagtailplus.tests import views
from ..models import Link
class TestLinkIndexView(views.BaseTestIndexView):
url_namespace = 'wagtaillinks'
template_dir = 'wagtaillinks/links'
def _create_sequential_instance(self, index):
Link.objects.create(
link_type = Link.LINK_TYPE_EXTERNAL,
title = 'Link #{0}'.format(index),
external_url = 'http://www.site-{0}.com'.format(index)
)
class TestLinkCreateView(views.BaseTestCreateView):
url_namespace = 'wagtaillinks'
template_dir = 'wagtaillinks/links'
model_class = Link
filter_keys = ['title']
def _get_post_data(self):
return {
'link_type': Link.LINK_TYPE_EXTERNAL,
'title': 'Test Link',
'external_url': 'http://www.test.com/'
}
class TestLinkUpdateView(views.BaseTestUpdateView):
url_namespace = 'wagtaillinks'
template_dir = 'wagtaillinks/links'
model_class = Link
def _get_instance(self):
return Link.objects.create(
link_type = Link.LINK_TYPE_EXTERNAL,
title = 'Test Link',
external_url = 'http://www.test.com/'
)
def _get_post_data(self):
return {
'link_type': Link.LINK_TYPE_EXTERNAL,
'title': 'Test Link Changed',
'external_url': 'http://www.test.com/'
}
class TestLinkDeleteView(views.BaseTestDeleteView):
url_namespace = 'wagtaillinks'
template_dir = 'wagtaillinks/links'
model_class = Link
def _get_instance(self):
return Link.objects.create(
link_type = Link.LINK_TYPE_EXTERNAL,
title = 'Test Link',
external_url = 'http://www.test.com/'
)
class TestEmailLinkChooserView(views.BaseTestChooserView):
url_namespace = 'wagtaillinks'
template_dir = 'wagtaillinks/chooser'
model_class = Link
def _create_sequential_instance(self, index):
return Link.objects.create(
link_type = Link.LINK_TYPE_EMAIL,
title = 'Test Email #{0}'.format(index),
email = 'somebody-{0}@something.com'.format(index)
)
def get(self, params=None):
if not params:
params = {}
return self.client.get(
reverse('wagtailadmin_choose_page_email_link'),
params
)
class TestExternalLinkChooserView(views.BaseTestChooserView):
url_namespace = 'wagtaillinks'
template_dir = 'wagtaillinks/chooser'
model_class = Link
def _create_sequential_instance(self, index):
return Link.objects.create(
link_type = Link.LINK_TYPE_EXTERNAL,
title = 'Test Link #{0}'.format(index),
external_url = 'http://www.site-{0}.com'.format(index)
)
def get(self, params=None):
if not params:
params = {}
return self.client.get(
reverse('wagtailadmin_choose_page_external_link'),
params
)
class TestEmailLinkChosenView(views.BaseTestChosenView):
url_namespace = 'wagtaillinks'
template_dir = 'wagtaillinks/chooser'
model_class = Link
def _get_instance(self):
return Link.objects.create(
link_type = Link.LINK_TYPE_EMAIL,
title = 'Test Email',
email = '[email protected]'
)
class TestExternalLinkChosenView(views.BaseTestChosenView):
url_namespace = 'wagtaillinks'
template_dir = 'wagtaillinks/chooser'
model_class = Link
def _get_instance(self):
return Link.objects.create(
link_type = Link.LINK_TYPE_EXTERNAL,
title = 'Test Link',
external_url = 'http://www.test.com/'
)
class TestChooserCreateEmailLinkView(views.BaseTestChooserCreateView):
url_namespace = 'wagtaillinks'
template_dir = 'wagtaillinks/chooser'
model_class = Link
def _get_post_data(self):
return {
'link_type': Link.LINK_TYPE_EMAIL,
'title': 'Test Email',
'email': '[email protected]',
}
def test_get(self):
# Generate the response.
response = self.client.get(
reverse('wagtailadmin_choose_page_email_link')
)
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response,
'{0}/chooser.html'.format(self.template_dir)
)
self.assertTemplateUsed(
response,
'{0}/results.html'.format(self.template_dir)
)
self.assertTemplateUsed(
response,
'{0}/chooser.js'.format(self.template_dir)
)
def test_post(self):
# Get POST data.
data = self._get_post_data()
# Generate the response.
response = self.client.post(
reverse('wagtailadmin_choose_page_email_link'),
data
)
# Check assertions.
self.assertTemplateUsed(
response,
'{0}/chosen.js'.format(self.template_dir)
)
self.assertContains(
response,
'modal.respond'
)
self.assertTrue(
self.model_class.objects.filter(**data).exists()
)
class TestChooserCreateExternalLinkView(views.BaseTestChooserCreateView):
url_namespace = 'wagtaillinks'
template_dir = 'wagtaillinks/chooser'
model_class = Link
def _get_post_data(self):
return {
'link_type': Link.LINK_TYPE_EXTERNAL,
'title': 'Test Link',
'external_url': 'http://www.test.com/',
}
def test_get(self):
# Generate the response.
response = self.client.get(
reverse('wagtailadmin_choose_page_external_link')
)
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response,
'{0}/chooser.html'.format(self.template_dir)
)
self.assertTemplateUsed(
response,
'{0}/results.html'.format(self.template_dir)
)
self.assertTemplateUsed(
response,
'{0}/chooser.js'.format(self.template_dir)
)
def test_post(self):
# Get POST data.
data = self._get_post_data()
# Generate the response.
response = self.client.post(
reverse('wagtailadmin_choose_page_external_link'),
data
)
# Check assertions.
self.assertTemplateUsed(
response,
'{0}/chosen.js'.format(self.template_dir)
)
self.assertContains(
response,
'modal.respond'
)
self.assertTrue(
self.model_class.objects.filter(**data).exists()
)
|
doheekim/chuizonetest
|
refs/heads/master
|
lib/sqlalchemy/dialects/postgresql/pg8000.py
|
10
|
# postgresql/pg8000.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS
# file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: postgresql+pg8000
:name: pg8000
:dbapi: pg8000
:connectstring: \
postgresql+pg8000://user:password@host:port/dbname[?key=value&key=value...]
:url: https://pythonhosted.org/pg8000/
Unicode
-------
When communicating with the server, pg8000 **always uses the server-side
character set**. SQLAlchemy has no ability to modify what character set
pg8000 chooses to use, and additionally SQLAlchemy does no unicode conversion
of any kind with the pg8000 backend. The origin of the client encoding setting
is ultimately the CLIENT_ENCODING setting in postgresql.conf.
It is not necessary, though is also harmless, to pass the "encoding" parameter
to :func:`.create_engine` when using pg8000.
.. _pg8000_isolation_level:
pg8000 Transaction Isolation Level
-------------------------------------
The pg8000 dialect offers the same isolation level settings as that
of the :ref:`psycopg2 <psycopg2_isolation_level>` dialect:
* ``READ COMMITTED``
* ``READ UNCOMMITTED``
* ``REPEATABLE READ``
* ``SERIALIZABLE``
* ``AUTOCOMMIT``
.. versionadded:: 0.9.5 support for AUTOCOMMIT isolation level when using
pg8000.
.. seealso::
:ref:`postgresql_isolation_level`
:ref:`psycopg2_isolation_level`
"""
from ... import util, exc
import decimal
from ... import processors
from ... import types as sqltypes
from .base import (
PGDialect, PGCompiler, PGIdentifierPreparer, PGExecutionContext,
_DECIMAL_TYPES, _FLOAT_TYPES, _INT_TYPES)
class _PGNumeric(sqltypes.Numeric):
def result_processor(self, dialect, coltype):
if self.asdecimal:
if coltype in _FLOAT_TYPES:
return processors.to_decimal_processor_factory(
decimal.Decimal, self._effective_decimal_return_scale)
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
# pg8000 returns Decimal natively for 1700
return None
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype)
else:
if coltype in _FLOAT_TYPES:
# pg8000 returns float natively for 701
return None
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
return processors.to_float
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype)
class _PGNumericNoBind(_PGNumeric):
def bind_processor(self, dialect):
return None
class PGExecutionContext_pg8000(PGExecutionContext):
pass
class PGCompiler_pg8000(PGCompiler):
def visit_mod_binary(self, binary, operator, **kw):
return self.process(binary.left, **kw) + " %% " + \
self.process(binary.right, **kw)
def post_process_text(self, text):
if '%%' in text:
util.warn("The SQLAlchemy postgresql dialect "
"now automatically escapes '%' in text() "
"expressions to '%%'.")
return text.replace('%', '%%')
class PGIdentifierPreparer_pg8000(PGIdentifierPreparer):
def _escape_identifier(self, value):
value = value.replace(self.escape_quote, self.escape_to_quote)
return value.replace('%', '%%')
class PGDialect_pg8000(PGDialect):
driver = 'pg8000'
supports_unicode_statements = True
supports_unicode_binds = True
default_paramstyle = 'format'
supports_sane_multi_rowcount = False
execution_ctx_cls = PGExecutionContext_pg8000
statement_compiler = PGCompiler_pg8000
preparer = PGIdentifierPreparer_pg8000
description_encoding = 'use_encoding'
colspecs = util.update_copy(
PGDialect.colspecs,
{
sqltypes.Numeric: _PGNumericNoBind,
sqltypes.Float: _PGNumeric
}
)
@classmethod
def dbapi(cls):
return __import__('pg8000')
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
if 'port' in opts:
opts['port'] = int(opts['port'])
opts.update(url.query)
return ([], opts)
def is_disconnect(self, e, connection, cursor):
return "connection is closed" in str(e)
def set_isolation_level(self, connection, level):
level = level.replace('_', ' ')
# adjust for ConnectionFairy possibly being present
if hasattr(connection, 'connection'):
connection = connection.connection
if level == 'AUTOCOMMIT':
connection.autocommit = True
elif level in self._isolation_lookup:
connection.autocommit = False
cursor = connection.cursor()
cursor.execute(
"SET SESSION CHARACTERISTICS AS TRANSACTION "
"ISOLATION LEVEL %s" % level)
cursor.execute("COMMIT")
cursor.close()
else:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s or AUTOCOMMIT" %
(level, self.name, ", ".join(self._isolation_lookup))
)
dialect = PGDialect_pg8000
|
wskplho/sl4a
|
refs/heads/master
|
python-build/python-libs/gdata/src/gdata/Crypto/Hash/__init__.py
|
271
|
"""Hashing algorithms
Hash functions take arbitrary strings as input, and produce an output
of fixed size that is dependent on the input; it should never be
possible to derive the input data given only the hash function's
output. Hash functions can be used simply as a checksum, or, in
association with a public-key algorithm, can be used to implement
digital signatures.
The hashing modules here all support the interface described in PEP
247, "API for Cryptographic Hash Functions".
Submodules:
Crypto.Hash.HMAC RFC 2104: Keyed-Hashing for Message Authentication
Crypto.Hash.MD2
Crypto.Hash.MD4
Crypto.Hash.MD5
Crypto.Hash.RIPEMD
Crypto.Hash.SHA
"""
__all__ = ['HMAC', 'MD2', 'MD4', 'MD5', 'RIPEMD', 'SHA', 'SHA256']
__revision__ = "$Id: __init__.py,v 1.6 2003/12/19 14:24:25 akuchling Exp $"
|
pandeydivesh15/Farmers-Portal
|
refs/heads/master
|
crop/migrations/0002_auto_20171031_1011.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-31 10:11
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('crop', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='fertilizer',
old_name='dis_id',
new_name='ferti_id',
),
]
|
fengbaicanhe/intellij-community
|
refs/heads/master
|
python/testData/formatter/lambdaColon.py
|
83
|
lambda o:o.fullName()
|
codeaudit/pattern-1
|
refs/heads/master
|
pattern/web/cache/__init__.py
|
21
|
#### PATTERN | CACHE ###############################################################################
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <[email protected]>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
try:
import hashlib; md5=hashlib.md5
except:
import md5; md5=md5.new
#### UNICODE #######################################################################################
def decode_string(v, encoding="utf-8"):
""" Returns the given value as a Unicode string (if possible).
"""
if isinstance(encoding, basestring):
encoding = ((encoding,),) + (("windows-1252",), ("utf-8", "ignore"))
if isinstance(v, str):
for e in encoding:
try: return v.decode(*e)
except:
pass
return v
return unicode(v)
def encode_string(v, encoding="utf-8"):
""" Returns the given value as a Python byte string (if possible).
"""
if isinstance(encoding, basestring):
encoding = ((encoding,),) + (("windows-1252",), ("utf-8", "ignore"))
if isinstance(v, unicode):
for e in encoding:
try: return v.encode(*e)
except:
pass
return v
return str(v)
decode_utf8 = decode_string
encode_utf8 = encode_string
#### CACHE #########################################################################################
# Caching is implemented in URL.download(), which is used by all other downloaders.
import os
import glob
import tempfile
import codecs
import datetime
try:
MODULE = os.path.dirname(os.path.realpath(__file__))
except:
MODULE = ""
TMP = os.path.join(tempfile.gettempdir(), "pattern_web")
def date_now():
return datetime.datetime.today()
def date_modified(path):
return datetime.datetime.fromtimestamp(os.stat(path)[8])
class Cache(object):
def __init__(self, path=os.path.join(MODULE, "tmp")):
""" Cache with data stored as files with hashed filenames.
Content retrieved from URLs and search engines are stored in cache for performance.
The path where the cache is stored can be given. This way you can manage persistent
sets of downloaded data. If path=TMP, cached items are stored in a temporary folder.
"""
self.path = path
def _get_path(self):
return self._path
def _set_path(self, path):
if not os.path.isdir(path):
os.makedirs(path)
self._path = path
path = property(_get_path, _set_path)
def _hash(self, k):
k = encode_utf8(k) # MD5 works on Python byte strings.
return os.path.join(self.path, md5(k).hexdigest())
def __len__(self):
return len(glob.glob(os.path.join(self.path, "*")))
def __contains__(self, k):
return os.path.exists(self._hash(k))
def __getitem__(self, k):
return self.get(k)
def __setitem__(self, k, v):
f = open(self._hash(k), "wb")
f.write(codecs.BOM_UTF8)
f.write(encode_utf8(v))
f.close()
def __delitem__(self, k):
try: os.unlink(self._hash(k))
except OSError:
pass
def get(self, k, unicode=True):
""" Returns the data stored with the given id.
With unicode=True, returns a Unicode string.
"""
if k in self:
f = open(self._hash(k), "rb"); v=f.read().lstrip(codecs.BOM_UTF8)
f.close()
if unicode is True:
return decode_utf8(v)
else:
return v
raise KeyError(k)
def age(self, k):
""" Returns the age of the cached item, in days.
"""
p = self._hash(k)
return os.path.exists(p) and (date_now() - date_modified(p)).days or 0
def clear(self, age=None):
""" Clears all items from the cache (whose age is the given amount of days or older).
"""
n = date_now()
for p in glob.glob(os.path.join(self.path, "*")):
if age is None or (n - date_modified(p)).days >= age:
os.unlink(p)
cache = Cache()
|
msphair/django_echo_bridge
|
refs/heads/master
|
Device.py
|
1
|
import json
class Device:
def __init__(self, name, deviceType, offUrl, onUrl, httpVerb, contentType, contentBody):
self.name = name
self.deviceType = deviceType
self.offUrl = offUrl
self.onUrl = onUrl
self.httpVerb = httpVerb
self.contentBody = contentBody
self.contentType = contentType
def encode(self):
return {
'__type__': 'Device',
'name': self.name,
'devicetype': self.deviceType,
'offurl': self.offUrl,
'onurl': self.onUrl,
'httpverb': self.httpVerb,
'contentbody': self.contentBody,
'contenttype': self.contentType
}
def decodeDevice(jsonObj):
if '__type__' in jsonObj and jsonObj['__type__'] == 'Device':
return Device(jsonObj['name'], jsonObj['devicetype'], jsonObj['offurl'], jsonObj['onurl'], jsonObj['httpverb'], jsonObj['contentbody'], jsonObj['contenttype'])
return jsonObj
|
dstiert/Wox
|
refs/heads/master
|
PythonHome/Lib/site-packages/pip/_vendor/requests/certs.py
|
961
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
certs.py
~~~~~~~~
This module returns the preferred default CA certificate bundle.
If you are packaging Requests, e.g., for a Linux distribution or a managed
environment, you can change the definition of where() to return a separately
packaged CA bundle.
"""
import os.path
def where():
"""Return the preferred certificate bundle."""
# vendored bundle inside Requests
return os.path.join(os.path.dirname(__file__), 'cacert.pem')
if __name__ == '__main__':
print(where())
|
youprofit/NewsBlur
|
refs/heads/master
|
apps/search/views.py
|
6027
|
# Create your views here.
|
tienfuc/gdcmdtools
|
refs/heads/master
|
gdmount.py
|
45
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
|
JioCloud/keystone
|
refs/heads/master
|
keystone/common/sql/migrate_repo/versions/041_add_remaining_uses_count_to_trusts.py
|
14
|
# Copyright (c) 2014 Matthieu Huin <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
def downgrade_trust_table_with_column_drop(meta, migrate_engine):
trust_table = sqlalchemy.Table('trust', meta, autoload=True)
# delete trusts with a limited use count, we are downgrading so uses
# will not be tracked anymore.
d = trust_table.delete(trust_table.c.remaining_uses >= 0)
d.execute()
trust_table.drop_column('remaining_uses')
def upgrade_trust_table(meta, migrate_engine):
trust_table = sqlalchemy.Table('trust', meta, autoload=True)
trust_table.create_column(sqlalchemy.Column('remaining_uses',
sqlalchemy.Integer(),
nullable=True))
def upgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
upgrade_trust_table(meta, migrate_engine)
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
downgrade_trust_table_with_column_drop(meta, migrate_engine)
|
mattuuh7/incubator-airflow
|
refs/heads/master
|
tests/ti_deps/deps/test_not_in_retry_period_dep.py
|
31
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from datetime import datetime, timedelta
from freezegun import freeze_time
from mock import Mock
from airflow.models import TaskInstance
from airflow.ti_deps.deps.not_in_retry_period_dep import NotInRetryPeriodDep
from airflow.utils.state import State
class NotInRetryPeriodDepTest(unittest.TestCase):
def _get_task_instance(self, state, end_date=None,
retry_delay=timedelta(minutes=15)):
task = Mock(retry_delay=retry_delay, retry_exponential_backoff=False)
ti = TaskInstance(task=task, state=state, execution_date=None)
ti.end_date = end_date
return ti
@freeze_time('2016-01-01 15:44')
def test_still_in_retry_period(self):
"""
Task instances that are in their retry period should fail this dep
"""
ti = self._get_task_instance(State.UP_FOR_RETRY,
end_date=datetime(2016, 1, 1, 15, 30))
self.assertTrue(ti.is_premature)
self.assertFalse(NotInRetryPeriodDep().is_met(ti=ti))
@freeze_time('2016-01-01 15:46')
def test_retry_period_finished(self):
"""
Task instance's that have had their retry period elapse should pass this dep
"""
ti = self._get_task_instance(State.UP_FOR_RETRY,
end_date=datetime(2016, 1, 1))
self.assertFalse(ti.is_premature)
self.assertTrue(NotInRetryPeriodDep().is_met(ti=ti))
def test_not_in_retry_period(self):
"""
Task instance's that are not up for retry can not be in their retry period
"""
ti = self._get_task_instance(State.SUCCESS)
self.assertTrue(NotInRetryPeriodDep().is_met(ti=ti))
|
intgr/django
|
refs/heads/master
|
tests/m2m_signals/tests.py
|
84
|
"""
Testing signals emitted on changing m2m relations.
"""
from django.db import models
from django.test import TestCase
from .models import Car, Part, Person, SportsCar
class ManyToManySignalsTest(TestCase):
def m2m_changed_signal_receiver(self, signal, sender, **kwargs):
message = {
'instance': kwargs['instance'],
'action': kwargs['action'],
'reverse': kwargs['reverse'],
'model': kwargs['model'],
}
if kwargs['pk_set']:
message['objects'] = list(
kwargs['model'].objects.filter(pk__in=kwargs['pk_set'])
)
self.m2m_changed_messages.append(message)
def setUp(self):
self.m2m_changed_messages = []
self.vw = Car.objects.create(name='VW')
self.bmw = Car.objects.create(name='BMW')
self.toyota = Car.objects.create(name='Toyota')
self.wheelset = Part.objects.create(name='Wheelset')
self.doors = Part.objects.create(name='Doors')
self.engine = Part.objects.create(name='Engine')
self.airbag = Part.objects.create(name='Airbag')
self.sunroof = Part.objects.create(name='Sunroof')
self.alice = Person.objects.create(name='Alice')
self.bob = Person.objects.create(name='Bob')
self.chuck = Person.objects.create(name='Chuck')
self.daisy = Person.objects.create(name='Daisy')
def tearDown(self):
# disconnect all signal handlers
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Car.default_parts.through
)
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Car.optional_parts.through
)
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Person.fans.through
)
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Person.friends.through
)
def _initialize_signal_car(self, add_default_parts_before_set_signal=False):
""" Install a listener on the two m2m relations. """
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Car.optional_parts.through
)
if add_default_parts_before_set_signal:
# adding a default part to our car - no signal listener installed
self.vw.default_parts.add(self.sunroof)
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Car.default_parts.through
)
def test_m2m_relations_add_remove_clear(self):
expected_messages = []
self._initialize_signal_car(add_default_parts_before_set_signal=True)
self.vw.default_parts.add(self.wheelset, self.doors, self.engine)
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# give the BMW and Toyota some doors as well
self.doors.car_set.add(self.bmw, self.toyota)
expected_messages.append({
'instance': self.doors,
'action': 'pre_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
expected_messages.append({
'instance': self.doors,
'action': 'post_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
def test_m2m_relations_signals_remove_relation(self):
self._initialize_signal_car()
# remove the engine from the self.vw and the airbag (which is not set
# but is returned)
self.vw.default_parts.remove(self.engine, self.airbag)
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.vw,
'action': 'pre_remove',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.engine],
}, {
'instance': self.vw,
'action': 'post_remove',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.engine],
}
])
def test_m2m_relations_signals_give_the_self_vw_some_optional_parts(self):
expected_messages = []
self._initialize_signal_car()
# give the self.vw some optional parts (second relation to same model)
self.vw.optional_parts.add(self.airbag, self.sunroof)
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.sunroof],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.sunroof],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# add airbag to all the cars (even though the self.vw already has one)
self.airbag.cars_optional.add(self.vw, self.bmw, self.toyota)
expected_messages.append({
'instance': self.airbag,
'action': 'pre_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
expected_messages.append({
'instance': self.airbag,
'action': 'post_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
def test_m2m_relations_signals_reverse_relation_with_custom_related_name(self):
self._initialize_signal_car()
# remove airbag from the self.vw (reverse relation with custom
# related_name)
self.airbag.cars_optional.remove(self.vw)
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.airbag,
'action': 'pre_remove',
'reverse': True,
'model': Car,
'objects': [self.vw],
}, {
'instance': self.airbag,
'action': 'post_remove',
'reverse': True,
'model': Car,
'objects': [self.vw],
}
])
def test_m2m_relations_signals_clear_all_parts_of_the_self_vw(self):
self._initialize_signal_car()
# clear all parts of the self.vw
self.vw.default_parts.clear()
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.vw,
'action': 'pre_clear',
'reverse': False,
'model': Part,
}, {
'instance': self.vw,
'action': 'post_clear',
'reverse': False,
'model': Part,
}
])
def test_m2m_relations_signals_all_the_doors_off_of_cars(self):
self._initialize_signal_car()
# take all the doors off of cars
self.doors.car_set.clear()
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.doors,
'action': 'pre_clear',
'reverse': True,
'model': Car,
}, {
'instance': self.doors,
'action': 'post_clear',
'reverse': True,
'model': Car,
}
])
def test_m2m_relations_signals_reverse_relation(self):
self._initialize_signal_car()
# take all the airbags off of cars (clear reverse relation with custom
# related_name)
self.airbag.cars_optional.clear()
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.airbag,
'action': 'pre_clear',
'reverse': True,
'model': Car,
}, {
'instance': self.airbag,
'action': 'post_clear',
'reverse': True,
'model': Car,
}
])
def test_m2m_relations_signals_alternative_ways(self):
expected_messages = []
self._initialize_signal_car()
# alternative ways of setting relation:
self.vw.default_parts.create(name='Windows')
p6 = Part.objects.get(name='Windows')
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [p6],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [p6],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# direct assignment clears the set first, then adds
self.vw.default_parts.set([self.wheelset, self.doors, self.engine])
expected_messages.append({
'instance': self.vw,
'action': 'pre_remove',
'reverse': False,
'model': Part,
'objects': [p6],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_remove',
'reverse': False,
'model': Part,
'objects': [p6],
})
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
def test_m2m_relations_signals_clearing_removing(self):
expected_messages = []
self._initialize_signal_car(add_default_parts_before_set_signal=True)
# set by clearing.
self.vw.default_parts.set([self.wheelset, self.doors, self.engine], clear=True)
expected_messages.append({
'instance': self.vw,
'action': 'pre_clear',
'reverse': False,
'model': Part,
})
expected_messages.append({
'instance': self.vw,
'action': 'post_clear',
'reverse': False,
'model': Part,
})
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# set by only removing what's necessary.
self.vw.default_parts.set([self.wheelset, self.doors], clear=False)
expected_messages.append({
'instance': self.vw,
'action': 'pre_remove',
'reverse': False,
'model': Part,
'objects': [self.engine],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_remove',
'reverse': False,
'model': Part,
'objects': [self.engine],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
def test_m2m_relations_signals_when_inheritance(self):
expected_messages = []
self._initialize_signal_car(add_default_parts_before_set_signal=True)
# Signals still work when model inheritance is involved
c4 = SportsCar.objects.create(name='Bugatti', price='1000000')
c4b = Car.objects.get(name='Bugatti')
c4.default_parts.set([self.doors])
expected_messages.append({
'instance': c4,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.doors],
})
expected_messages.append({
'instance': c4,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.doors],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
self.engine.car_set.add(c4)
expected_messages.append({
'instance': self.engine,
'action': 'pre_add',
'reverse': True,
'model': Car,
'objects': [c4b],
})
expected_messages.append({
'instance': self.engine,
'action': 'post_add',
'reverse': True,
'model': Car,
'objects': [c4b],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
def _initialize_signal_person(self):
# Install a listener on the two m2m relations.
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Person.fans.through
)
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Person.friends.through
)
def test_m2m_relations_with_self_add_friends(self):
self._initialize_signal_person()
self.alice.friends.set([self.bob, self.chuck])
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.alice,
'action': 'pre_add',
'reverse': False,
'model': Person,
'objects': [self.bob, self.chuck],
}, {
'instance': self.alice,
'action': 'post_add',
'reverse': False,
'model': Person,
'objects': [self.bob, self.chuck],
}
])
def test_m2m_relations_with_self_add_fan(self):
self._initialize_signal_person()
self.alice.fans.set([self.daisy])
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.alice,
'action': 'pre_add',
'reverse': False,
'model': Person,
'objects': [self.daisy],
}, {
'instance': self.alice,
'action': 'post_add',
'reverse': False,
'model': Person,
'objects': [self.daisy],
}
])
def test_m2m_relations_with_self_add_idols(self):
self._initialize_signal_person()
self.chuck.idols.set([self.alice, self.bob])
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.chuck,
'action': 'pre_add',
'reverse': True,
'model': Person,
'objects': [self.alice, self.bob],
}, {
'instance': self.chuck,
'action': 'post_add',
'reverse': True,
'model': Person,
'objects': [self.alice, self.bob],
}
])
|
ymero/learn-python3
|
refs/heads/master
|
samples/packages/pil/use_pil_resize.py
|
20
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from PIL import Image
# 打开一个jpg图像文件,注意是当前路径:
im = Image.open('test.jpg')
# 获得图像尺寸:
w, h = im.size
print('Original image size: %sx%s' % (w, h))
# 缩放到50%:
im.thumbnail((w//2, h//2))
print('Resize image to: %sx%s' % (w//2, h//2))
# 把缩放后的图像用jpeg格式保存:
im.save('thumbnail.jpg', 'jpeg')
|
gwq5210/litlib
|
refs/heads/master
|
thirdparty/sources/boost_1_60_0/tools/build/src/tools/types/exe.py
|
75
|
# Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
from b2.build import type
def register ():
type.register_type ('EXE', ['exe'], None, ['NT', 'CYGWIN'])
type.register_type ('EXE', [], None, [])
register ()
|
markeTIC/OCB
|
refs/heads/8.0
|
openerp/modules/loading.py
|
7
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Modules (also called addons) management.
"""
import itertools
import logging
import os
import sys
import threading
import time
import openerp
import openerp.modules.db
import openerp.modules.graph
import openerp.modules.migration
import openerp.modules.registry
import openerp.osv as osv
import openerp.tools as tools
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
from openerp.modules.module import initialize_sys_path, \
load_openerp_module, init_module_models, adapt_version
from module import runs_post_install
_logger = logging.getLogger(__name__)
_test_logger = logging.getLogger('openerp.tests')
def load_module_graph(cr, graph, status=None, perform_checks=True, skip_modules=None, report=None):
"""Migrates+Updates or Installs all module nodes from ``graph``
:param graph: graph of module nodes to load
:param status: deprecated parameter, unused, left to avoid changing signature in 8.0
:param perform_checks: whether module descriptors should be checked for validity (prints warnings
for same cases)
:param skip_modules: optional list of module names (packages) which have previously been loaded and can be skipped
:return: list of modules that were installed or updated
"""
def load_test(module_name, idref, mode):
cr.commit()
try:
_load_data(cr, module_name, idref, mode, 'test')
return True
except Exception:
_test_logger.exception(
'module %s: an exception occurred in a test', module_name)
return False
finally:
if tools.config.options['test_commit']:
cr.commit()
else:
cr.rollback()
# avoid keeping stale xml_id, etc. in cache
openerp.modules.registry.RegistryManager.clear_caches(cr.dbname)
def _get_files_of_kind(kind):
if kind == 'demo':
kind = ['demo_xml', 'demo']
elif kind == 'data':
kind = ['init_xml', 'update_xml', 'data']
if isinstance(kind, str):
kind = [kind]
files = []
for k in kind:
for f in package.data[k]:
files.append(f)
if k.endswith('_xml') and not (k == 'init_xml' and not f.endswith('.xml')):
# init_xml, update_xml and demo_xml are deprecated except
# for the case of init_xml with yaml, csv and sql files as
# we can't specify noupdate for those file.
correct_key = 'demo' if k.count('demo') else 'data'
_logger.warning(
"module %s: key '%s' is deprecated in favor of '%s' for file '%s'.",
package.name, k, correct_key, f
)
return files
def _load_data(cr, module_name, idref, mode, kind):
"""
kind: data, demo, test, init_xml, update_xml, demo_xml.
noupdate is False, unless it is demo data or it is csv data in
init mode.
"""
try:
if kind in ('demo', 'test'):
threading.currentThread().testing = True
for filename in _get_files_of_kind(kind):
_logger.info("loading %s/%s", module_name, filename)
noupdate = False
if kind in ('demo', 'demo_xml') or (filename.endswith('.csv') and kind in ('init', 'init_xml')):
noupdate = True
tools.convert_file(cr, module_name, filename, idref, mode, noupdate, kind, report)
finally:
if kind in ('demo', 'test'):
threading.currentThread().testing = False
processed_modules = []
loaded_modules = []
registry = openerp.registry(cr.dbname)
migrations = openerp.modules.migration.MigrationManager(cr, graph)
_logger.info('loading %d modules...', len(graph))
registry.clear_manual_fields()
# register, instantiate and initialize models for each modules
t0 = time.time()
t0_sql = openerp.sql_db.sql_counter
for index, package in enumerate(graph):
module_name = package.name
module_id = package.id
if skip_modules and module_name in skip_modules:
continue
migrations.migrate_module(package, 'pre')
load_openerp_module(package.name)
new_install = package.state == 'to install'
if new_install:
py_module = sys.modules['openerp.addons.%s' % (module_name,)]
pre_init = package.info.get('pre_init_hook')
if pre_init:
getattr(py_module, pre_init)(cr)
models = registry.load(cr, package)
loaded_modules.append(package.name)
if hasattr(package, 'init') or hasattr(package, 'update') or package.state in ('to install', 'to upgrade'):
registry.setup_models(cr, partial=True)
init_module_models(cr, package.name, models)
idref = {}
mode = 'update'
if hasattr(package, 'init') or package.state == 'to install':
mode = 'init'
if hasattr(package, 'init') or hasattr(package, 'update') or package.state in ('to install', 'to upgrade'):
# Can't put this line out of the loop: ir.module.module will be
# registered by init_module_models() above.
modobj = registry['ir.module.module']
if perform_checks:
modobj.check(cr, SUPERUSER_ID, [module_id])
if package.state=='to upgrade':
# upgrading the module information
modobj.write(cr, SUPERUSER_ID, [module_id], modobj.get_values_from_terp(package.data))
_load_data(cr, module_name, idref, mode, kind='data')
has_demo = hasattr(package, 'demo') or (package.dbdemo and package.state != 'installed')
if has_demo:
_load_data(cr, module_name, idref, mode, kind='demo')
cr.execute('update ir_module_module set demo=%s where id=%s', (True, module_id))
modobj.invalidate_cache(cr, SUPERUSER_ID, ['demo'], [module_id])
migrations.migrate_module(package, 'post')
if new_install:
post_init = package.info.get('post_init_hook')
if post_init:
getattr(py_module, post_init)(cr, registry)
registry._init_modules.add(package.name)
# validate all the views at a whole
registry['ir.ui.view']._validate_module_views(cr, SUPERUSER_ID, module_name)
if has_demo:
# launch tests only in demo mode, allowing tests to use demo data.
if tools.config.options['test_enable']:
# Yamel test
report.record_result(load_test(module_name, idref, mode))
# Python tests
ir_http = registry['ir.http']
if hasattr(ir_http, '_routing_map'):
# Force routing map to be rebuilt between each module test suite
del(ir_http._routing_map)
report.record_result(openerp.modules.module.run_unit_tests(module_name, cr.dbname))
processed_modules.append(package.name)
ver = adapt_version(package.data['version'])
# Set new modules and dependencies
modobj.write(cr, SUPERUSER_ID, [module_id], {'state': 'installed', 'latest_version': ver})
# Update translations for all installed languages
modobj.update_translations(cr, SUPERUSER_ID, [module_id], None, {'overwrite': openerp.tools.config["overwrite_existing_translations"]})
package.state = 'installed'
for kind in ('init', 'demo', 'update'):
if hasattr(package, kind):
delattr(package, kind)
registry._init_modules.add(package.name)
cr.commit()
_logger.log(25, "%s modules loaded in %.2fs, %s queries", len(graph), time.time() - t0, openerp.sql_db.sql_counter - t0_sql)
registry.clear_manual_fields()
cr.commit()
return loaded_modules, processed_modules
def _check_module_names(cr, module_names):
mod_names = set(module_names)
if 'base' in mod_names:
# ignore dummy 'all' module
if 'all' in mod_names:
mod_names.remove('all')
if mod_names:
cr.execute("SELECT count(id) AS count FROM ir_module_module WHERE name in %s", (tuple(mod_names),))
if cr.dictfetchone()['count'] != len(mod_names):
# find out what module name(s) are incorrect:
cr.execute("SELECT name FROM ir_module_module")
incorrect_names = mod_names.difference([x['name'] for x in cr.dictfetchall()])
_logger.warning('invalid module names, ignored: %s', ", ".join(incorrect_names))
def load_marked_modules(cr, graph, states, force, progressdict, report, loaded_modules, perform_checks):
"""Loads modules marked with ``states``, adding them to ``graph`` and
``loaded_modules`` and returns a list of installed/upgraded modules."""
processed_modules = []
while True:
cr.execute("SELECT name from ir_module_module WHERE state IN %s" ,(tuple(states),))
module_list = [name for (name,) in cr.fetchall() if name not in graph]
if not module_list:
break
graph.add_modules(cr, module_list, force)
_logger.debug('Updating graph with %d more modules', len(module_list))
loaded, processed = load_module_graph(cr, graph, progressdict, report=report, skip_modules=loaded_modules, perform_checks=perform_checks)
processed_modules.extend(processed)
loaded_modules.extend(loaded)
if not processed:
break
return processed_modules
def load_modules(db, force_demo=False, status=None, update_module=False):
initialize_sys_path()
force = []
if force_demo:
force.append('demo')
cr = db.cursor()
try:
if not openerp.modules.db.is_initialized(cr):
_logger.info("init db")
openerp.modules.db.initialize(cr)
update_module = True # process auto-installed modules
tools.config["init"]["all"] = 1
tools.config['update']['all'] = 1
if not tools.config['without_demo']:
tools.config["demo"]['all'] = 1
# This is a brand new registry, just created in
# openerp.modules.registry.RegistryManager.new().
registry = openerp.registry(cr.dbname)
if 'base' in tools.config['update'] or 'all' in tools.config['update']:
cr.execute("update ir_module_module set state=%s where name=%s and state=%s", ('to upgrade', 'base', 'installed'))
# STEP 1: LOAD BASE (must be done before module dependencies can be computed for later steps)
graph = openerp.modules.graph.Graph()
graph.add_module(cr, 'base', force)
if not graph:
_logger.critical('module base cannot be loaded! (hint: verify addons-path)')
raise osv.osv.except_osv(_('Could not load base module'), _('module base cannot be loaded! (hint: verify addons-path)'))
# processed_modules: for cleanup step after install
# loaded_modules: to avoid double loading
report = registry._assertion_report
loaded_modules, processed_modules = load_module_graph(cr, graph, status, perform_checks=update_module, report=report)
if tools.config['load_language'] or update_module:
# some base models are used below, so make sure they are set up
registry.setup_models(cr, partial=True)
if tools.config['load_language']:
for lang in tools.config['load_language'].split(','):
tools.load_language(cr, lang)
# STEP 2: Mark other modules to be loaded/updated
if update_module:
modobj = registry['ir.module.module']
_logger.info('updating modules list')
modobj.update_list(cr, SUPERUSER_ID)
_check_module_names(cr, itertools.chain(tools.config['init'].keys(), tools.config['update'].keys()))
mods = [k for k in tools.config['init'] if tools.config['init'][k]]
if mods:
ids = modobj.search(cr, SUPERUSER_ID, ['&', ('state', '=', 'uninstalled'), ('name', 'in', mods)])
if ids:
modobj.button_install(cr, SUPERUSER_ID, ids)
mods = [k for k in tools.config['update'] if tools.config['update'][k]]
if mods:
ids = modobj.search(cr, SUPERUSER_ID, ['&', ('state', '=', 'installed'), ('name', 'in', mods)])
if ids:
modobj.button_upgrade(cr, SUPERUSER_ID, ids)
cr.execute("update ir_module_module set state=%s where name=%s", ('installed', 'base'))
modobj.invalidate_cache(cr, SUPERUSER_ID, ['state'])
# STEP 3: Load marked modules (skipping base which was done in STEP 1)
# IMPORTANT: this is done in two parts, first loading all installed or
# partially installed modules (i.e. installed/to upgrade), to
# offer a consistent system to the second part: installing
# newly selected modules.
# We include the modules 'to remove' in the first step, because
# they are part of the "currently installed" modules. They will
# be dropped in STEP 6 later, before restarting the loading
# process.
# IMPORTANT 2: We have to loop here until all relevant modules have been
# processed, because in some rare cases the dependencies have
# changed, and modules that depend on an uninstalled module
# will not be processed on the first pass.
# It's especially useful for migrations.
previously_processed = -1
while previously_processed < len(processed_modules):
previously_processed = len(processed_modules)
processed_modules += load_marked_modules(cr, graph,
['installed', 'to upgrade', 'to remove'],
force, status, report, loaded_modules, update_module)
if update_module:
processed_modules += load_marked_modules(cr, graph,
['to install'], force, status, report,
loaded_modules, update_module)
registry.setup_models(cr)
# STEP 4: Finish and cleanup installations
if processed_modules:
cr.execute("""select model,name from ir_model where id NOT IN (select distinct model_id from ir_model_access)""")
for (model, name) in cr.fetchall():
if model in registry and not registry[model].is_transient() and not isinstance(registry[model], openerp.osv.orm.AbstractModel):
_logger.warning('The model %s has no access rules, consider adding one. E.g. access_%s,access_%s,model_%s,,1,0,0,0',
model, model.replace('.', '_'), model.replace('.', '_'), model.replace('.', '_'))
# Temporary warning while we remove access rights on osv_memory objects, as they have
# been replaced by owner-only access rights
cr.execute("""select distinct mod.model, mod.name from ir_model_access acc, ir_model mod where acc.model_id = mod.id""")
for (model, name) in cr.fetchall():
if model in registry and registry[model].is_transient():
_logger.warning('The transient model %s (%s) should not have explicit access rules!', model, name)
cr.execute("SELECT model from ir_model")
for (model,) in cr.fetchall():
if model in registry:
registry[model]._check_removed_columns(cr, log=True)
else:
_logger.warning("Model %s is declared but cannot be loaded! (Perhaps a module was partially removed or renamed)", model)
# Cleanup orphan records
registry['ir.model.data']._process_end(cr, SUPERUSER_ID, processed_modules)
for kind in ('init', 'demo', 'update'):
tools.config[kind] = {}
cr.commit()
# STEP 5: Cleanup menus
# Remove menu items that are not referenced by any of other
# (child) menu item, ir_values, or ir_model_data.
# TODO: This code could be a method of ir_ui_menu. Remove menu without actions of children
if update_module:
while True:
cr.execute('''delete from
ir_ui_menu
where
(id not IN (select parent_id from ir_ui_menu where parent_id is not null))
and
(id not IN (select res_id from ir_values where model='ir.ui.menu'))
and
(id not IN (select res_id from ir_model_data where model='ir.ui.menu'))''')
cr.commit()
if not cr.rowcount:
break
else:
_logger.info('removed %d unused menus', cr.rowcount)
# STEP 6: Uninstall modules to remove
if update_module:
# Remove records referenced from ir_model_data for modules to be
# removed (and removed the references from ir_model_data).
cr.execute("SELECT name, id FROM ir_module_module WHERE state=%s", ('to remove',))
modules_to_remove = dict(cr.fetchall())
if modules_to_remove:
pkgs = reversed([p for p in graph if p.name in modules_to_remove])
for pkg in pkgs:
uninstall_hook = pkg.info.get('uninstall_hook')
if uninstall_hook:
py_module = sys.modules['openerp.addons.%s' % (pkg.name,)]
getattr(py_module, uninstall_hook)(cr, registry)
registry['ir.module.module'].module_uninstall(cr, SUPERUSER_ID, modules_to_remove.values())
# Recursive reload, should only happen once, because there should be no
# modules to remove next time
cr.commit()
_logger.info('Reloading registry once more after uninstalling modules')
openerp.api.Environment.reset()
return openerp.modules.registry.RegistryManager.new(cr.dbname, force_demo, status, update_module)
# STEP 7: verify custom views on every model
if update_module:
Views = registry['ir.ui.view']
custom_view_test = True
for model in registry.models.keys():
if not Views._validate_custom_views(cr, SUPERUSER_ID, model):
custom_view_test = False
_logger.error('invalid custom view(s) for model %s', model)
report.record_result(custom_view_test)
if report.failures:
_logger.error('At least one test failed when loading the modules.')
else:
_logger.info('Modules loaded.')
# STEP 8: call _register_hook on every model
for model in registry.models.values():
model._register_hook(cr)
# STEP 9: Run the post-install tests
cr.commit()
t0 = time.time()
t0_sql = openerp.sql_db.sql_counter
if openerp.tools.config['test_enable']:
if update_module:
cr.execute("SELECT name FROM ir_module_module WHERE state='installed' and name = ANY(%s)", (processed_modules,))
else:
cr.execute("SELECT name FROM ir_module_module WHERE state='installed'")
for module_name in cr.fetchall():
report.record_result(openerp.modules.module.run_unit_tests(module_name[0], cr.dbname, position=runs_post_install))
_logger.log(25, "All post-tested in %.2fs, %s queries", time.time() - t0, openerp.sql_db.sql_counter - t0_sql)
finally:
cr.close()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
tgsmith61591/pyramid
|
refs/heads/master
|
pmdarima/arima/tests/test_context.py
|
1
|
# -*- coding: utf-8 -*-
from pmdarima.arima.auto import StepwiseContext, auto_arima
from pmdarima.arima._context import ContextStore, ContextType
from pmdarima.arima import _context as context_lib
from pmdarima.datasets import load_lynx, load_wineind
from unittest import mock
import threading
import collections
import pytest
import warnings
lynx = load_lynx()
wineind = load_wineind()
# test StepwiseContext parameter validation
@pytest.mark.parametrize(
'max_steps,max_dur', [
pytest.param(-1, None),
pytest.param(0, None),
pytest.param(1001, None),
pytest.param(1100, None),
pytest.param(None, -1),
pytest.param(None, 0),
])
def test_stepwise_context_args(max_steps, max_dur):
with pytest.raises(ValueError):
StepwiseContext(max_steps=max_steps, max_dur=max_dur)
# test auto_arima stepwise run with StepwiseContext
def test_auto_arima_with_stepwise_context():
samp = lynx[:8]
with StepwiseContext(max_steps=3, max_dur=30):
with pytest.warns(UserWarning) as uw:
auto_arima(samp, suppress_warnings=False, stepwise=True,
error_action='ignore')
# assert that max_steps were taken
assert any(str(w.message)
.startswith('stepwise search has reached the '
'maximum number of tries') for w in uw)
# test effective context info in nested context scenario
def test_nested_context():
ctx1_data = {'max_dur': 30}
ctx2_data = {'max_steps': 5}
ctx1 = StepwiseContext(**ctx1_data)
ctx2 = StepwiseContext(**ctx2_data)
with ctx1, ctx2:
effective_ctx_data = ContextStore.get_or_empty(
ContextType.STEPWISE)
expected_ctx_data = ctx1_data.copy()
expected_ctx_data.update(ctx2_data)
assert all(effective_ctx_data[key] == expected_ctx_data[key]
for key in expected_ctx_data.keys())
assert all(effective_ctx_data[key] == expected_ctx_data[key]
for key in effective_ctx_data.keys())
# Test a context honors the max duration
def test_max_dur():
# set arbitrarily low to guarantee will always pass after one iter
with StepwiseContext(max_dur=.5), \
pytest.warns(UserWarning) as uw:
auto_arima(lynx, stepwise=True)
# assert that max_dur was reached
assert any(str(w.message)
.startswith('early termination') for w in uw)
# Test that a context after the first will not inherit the first's attrs
def test_subsequent_contexts():
# Force a very fast fit
with StepwiseContext(max_dur=.5), \
pytest.warns(UserWarning):
auto_arima(lynx, stepwise=True)
# Out of scope, should be EMPTY
ctx = ContextStore.get_or_empty(ContextType.STEPWISE)
assert ctx.get_type() is ContextType.EMPTY
# Now show that we DON'T hit early termination by time here
with StepwiseContext(max_steps=100), \
warnings.catch_warnings(record=True) as uw:
ctx = ContextStore.get_or_empty(ContextType.STEPWISE)
assert ctx.get_type() is ContextType.STEPWISE
assert ctx.max_dur is None
auto_arima(lynx, stepwise=True)
# assert that max_dur was NOT reached
if uw:
assert not any(str(w.message)
.startswith('early termination') for w in uw)
# test param validation of ContextStore's add, get and remove members
def test_add_get_remove_context_args():
with pytest.raises(ValueError):
ContextStore._add_context(None)
with pytest.raises(ValueError):
ContextStore._remove_context(None)
with pytest.raises(ValueError):
ContextStore.get_context(None)
def test_context_store_accessible_across_threads():
# Make sure it's completely empty by patching it
d = {}
with mock.patch('pmdarima.arima._context._ctx.store', d):
# pushes onto the Context Store
def push(n):
# n is the number of times this has been executed before. If > 0,
# assert there is a context there
if n > 0:
assert len(context_lib._ctx.store[ContextType.STEPWISE]) == n
else:
context_lib._ctx.store[ContextType.STEPWISE] = \
collections.deque()
new_ctx = StepwiseContext()
context_lib._ctx.store[ContextType.STEPWISE].append(new_ctx)
assert len(context_lib._ctx.store[ContextType.STEPWISE]) == n + 1
for i in range(5):
t = threading.Thread(target=push, args=(i,))
t.start()
t.join(1) # it shouldn't take even close to this time
# Assert the mock has lifted
assert context_lib._ctx.store is not d
|
mollstam/UnrealPy
|
refs/heads/master
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/cffi-1.1.2/testing/support.py
|
5
|
import sys
if sys.version_info < (3,):
__all__ = ['u']
class U(object):
def __add__(self, other):
return eval('u'+repr(other).replace(r'\\u', r'\u')
.replace(r'\\U', r'\U'))
u = U()
assert u+'a\x00b' == eval(r"u'a\x00b'")
assert u+'a\u1234b' == eval(r"u'a\u1234b'")
assert u+'a\U00012345b' == eval(r"u'a\U00012345b'")
else:
__all__ = ['u', 'unicode', 'long']
u = ""
unicode = str
long = int
|
bukepo/openthread
|
refs/heads/master
|
tests/toranj/test-603-channel-manager-announce-recovery.py
|
7
|
#!/usr/bin/env python3
#
# Copyright (c) 2018, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from wpan import verify
import wpan
import time
# -----------------------------------------------------------------------------------------------------------------------
# Test description: Orphaned node attach through MLE Announcement
test_name = __file__[:-3] if __file__.endswith('.py') else __file__
print('-' * 120)
print('Starting \'{}\''.format(test_name))
def verify_channel(nodes, new_channel, wait_time=20):
"""
This function checks the channel on a given list of `nodes` and verifies that all nodes
switch to a given `new_channel` (as int) within certain `wait_time` (int and in seconds)
"""
start_time = time.time()
while not all([(new_channel == int(node.get(wpan.WPAN_CHANNEL), 0)) for node in nodes]):
if time.time() - start_time > wait_time:
print('Took too long to switch to channel {} ({}>{} sec)'.format(new_channel,
time.time() - start_time, wait_time))
exit(1)
time.sleep(0.1)
# -----------------------------------------------------------------------------------------------------------------------
# Creating `wpan.Nodes` instances
router = wpan.Node()
c1 = wpan.Node()
c2 = wpan.Node()
all_nodes = [router, c1, c2]
# -----------------------------------------------------------------------------------------------------------------------
# Init all nodes
wpan.Node.init_all_nodes()
# -----------------------------------------------------------------------------------------------------------------------
# Build network topology
router.form('announce-tst', channel=11)
c1.join_node(router, node_type=wpan.JOIN_TYPE_SLEEPY_END_DEVICE)
c2.join_node(router, node_type=wpan.JOIN_TYPE_SLEEPY_END_DEVICE)
c1.set(wpan.WPAN_POLL_INTERVAL, '500')
c2.set(wpan.WPAN_POLL_INTERVAL, '500')
c1.set(wpan.WPAN_THREAD_DEVICE_MODE, '5')
c2.set(wpan.WPAN_THREAD_DEVICE_MODE, '5')
# -----------------------------------------------------------------------------------------------------------------------
# Test implementation
# Reset c2 and keep it in detached state
c2.set('Daemon:AutoAssociateAfterReset', 'false')
c2.reset()
# Switch the rest of network to channel 26
router.set(wpan.WPAN_CHANNEL_MANAGER_NEW_CHANNEL, '26')
verify_channel([router, c1], 26)
# Now re-enable c2 and verify that it does attach to router and is on channel 26
# c2 would go through the ML Announce recovery.
c2.set('Daemon:AutoAssociateAfterReset', 'true')
c2.reset()
verify(int(c2.get(wpan.WPAN_CHANNEL), 0) == 11)
# wait for 20s for c2 to be attached/associated
def check_c2_is_associated():
verify(c2.is_associated())
wpan.verify_within(check_c2_is_associated, 20)
# Check that c2 is now on channel 26.
verify(int(c2.get(wpan.WPAN_CHANNEL), 0) == 26)
# -----------------------------------------------------------------------------------------------------------------------
# Test finished
wpan.Node.finalize_all_nodes()
print('\'{}\' passed.'.format(test_name))
|
tntnatbry/tensorflow
|
refs/heads/master
|
tensorflow/contrib/learn/python/learn/estimators/__init__.py
|
5
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An estimator is a rule for calculating an estimate of a given quantity.
# Estimators
* **Estimators** are used to train and evaluate TensorFlow models.
They support regression and classification problems.
* **Classifiers** are functions that have discrete outcomes.
* **Regressors** are functions that predict continuous values.
## Choosing the correct estimator
* For **Regression** problems use one of the following:
* `LinearRegressor`: Uses linear model.
* `DNNRegressor`: Uses DNN.
* `DNNLinearCombinedRegressor`: Uses Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest.
See tf.contrib.tensor_forest.client.random_forest.TensorForestEstimator.
* `Estimator`: Use when you need a custom model.
* For **Classification** problems use one of the following:
* `LinearClassifier`: Multiclass classifier using Linear model.
* `DNNClassifier`: Multiclass classifier using DNN.
* `DNNLinearCombinedClassifier`: Multiclass classifier using Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest.
See tf.contrib.tensor_forest.client.random_forest.TensorForestEstimator.
* `SVM`: Binary classifier using linear SVMs.
* `LogisticRegressor`: Use when you need custom model for binary
classification.
* `Estimator`: Use when you need custom model for N class classification.
## Pre-canned Estimators
Pre-canned estimators are machine learning estimators premade for general
purpose problems. If you need more customization, you can always write your
own custom estimator as described in the section below.
Pre-canned estimators are tested and optimized for speed and quality.
### Define the feature columns
Here are some possible types of feature columns used as inputs to a pre-canned
estimator.
Feature columns may vary based on the estimator used. So you can see which
feature columns are fed to each estimator in the below section.
```python
sparse_feature_a = sparse_column_with_keys(
column_name="sparse_feature_a", keys=["AB", "CD", ...])
embedding_feature_a = embedding_column(
sparse_id_column=sparse_feature_a, dimension=3, combiner="sum")
sparse_feature_b = sparse_column_with_hash_bucket(
column_name="sparse_feature_b", hash_bucket_size=1000)
embedding_feature_b = embedding_column(
sparse_id_column=sparse_feature_b, dimension=16, combiner="sum")
crossed_feature_a_x_b = crossed_column(
columns=[sparse_feature_a, sparse_feature_b], hash_bucket_size=10000)
real_feature = real_valued_column("real_feature")
real_feature_buckets = bucketized_column(
source_column=real_feature,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
```
### Create the pre-canned estimator
DNNClassifier, DNNRegressor, and DNNLinearCombinedClassifier are all pretty
similar to each other in how you use them. You can easily plug in an
optimizer and/or regularization to those estimators.
#### DNNClassifier
A classifier for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNClassifier(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNRegressor
A regressor for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNLinearCombinedClassifier
A classifier for TensorFlow Linear and DNN joined training models.
* Wide and deep model
* Multi class (2 by default)
```python
my_linear_features = [crossed_feature_a_x_b]
my_deep_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNLinearCombinedClassifier(
# Common settings
n_classes=n_classes,
weight_column_name=weight_column_name,
# Wide settings
linear_feature_columns=my_linear_features,
linear_optimizer=tf.train.FtrlOptimizer(...),
# Deep settings
dnn_feature_columns=my_deep_features,
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.AdagradOptimizer(...))
```
#### LinearClassifier
Train a linear model to classify instances into one of multiple possible
classes. When number of possible classes is 2, this is binary classification.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearClassifier(
feature_columns=my_features,
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### LinearRegressor
Train a linear regression model to predict a label value given observation of
feature values.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearRegressor(
feature_columns=my_features)
```
### LogisticRegressor
Logistic regression estimator for binary classification.
```python
# See tf.contrib.learn.Estimator(...) for details on model_fn structure
def my_model_fn(...):
pass
estimator = LogisticRegressor(model_fn=my_model_fn)
# Input builders
def input_fn_train:
pass
estimator.fit(input_fn=input_fn_train)
estimator.predict(x=x)
```
#### SVM - Support Vector Machine
Support Vector Machine (SVM) model for binary classification.
Currently only linear SVMs are supported.
```python
my_features = [real_feature, sparse_feature_a]
estimator = SVM(
example_id_column='example_id',
feature_columns=my_features,
l2_regularization=10.0)
```
#### DynamicRnnEstimator
An `Estimator` that uses a recurrent neural network with dynamic unrolling.
```python
problem_type = ProblemType.CLASSIFICATION # or REGRESSION
prediction_type = PredictionType.SINGLE_VALUE # or MULTIPLE_VALUE
estimator = DynamicRnnEstimator(problem_type,
prediction_type,
my_feature_columns)
```
### Use the estimator
There are two main functions for using estimators, one of which is for
training, and one of which is for evaluation.
You can specify different data sources for each one in order to use different
datasets for train and eval.
```python
# Input builders
def input_fn_train: # returns x, Y
...
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, Y
...
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
## Creating Custom Estimator
To create a custom `Estimator`, provide a function to `Estimator`'s
constructor that builds your model (`model_fn`, below):
```python
estimator = tf.contrib.learn.Estimator(
model_fn=model_fn,
model_dir=model_dir) # Where the model's data (e.g., checkpoints)
# are saved.
```
Here is a skeleton of this function, with descriptions of its arguments and
return values in the accompanying tables:
```python
def model_fn(features, targets, mode, params):
# Logic to do the following:
# 1. Configure the model via TensorFlow operations
# 2. Define the loss function for training/evaluation
# 3. Define the training operation/optimizer
# 4. Generate predictions
return predictions, loss, train_op
```
You may use `mode` and check against
`tf.contrib.learn.ModeKeys.{TRAIN, EVAL, INFER}` to parameterize `model_fn`.
In the Further Reading section below, there is an end-to-end TensorFlow
tutorial for building a custom estimator.
## Additional Estimators
There is an additional estimators under
`tensorflow.contrib.factorization.python.ops`:
* Gaussian mixture model (GMM) clustering
## Further reading
For further reading, there are several tutorials with relevant topics,
including:
* [Overview of linear models](../../../tutorials/linear/overview.md)
* [Linear model tutorial](../../../tutorials/wide/index.md)
* [Wide and deep learning tutorial](../../../tutorials/wide_and_deep/index.md)
* [Custom estimator tutorial](../../../tutorials/estimators/index.md)
* [Building input functions](../../../tutorials/input_fn/index.md)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.estimators.constants import ProblemType
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNRegressor
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedRegressor
from tensorflow.contrib.learn.python.learn.estimators.dynamic_rnn_estimator import DynamicRnnEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import BaseEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import Estimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input_fn
from tensorflow.contrib.learn.python.learn.estimators.estimator import SKCompat
from tensorflow.contrib.learn.python.learn.estimators.kmeans import KMeansClustering
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearClassifier
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearRegressor
from tensorflow.contrib.learn.python.learn.estimators.logistic_regressor import LogisticRegressor
from tensorflow.contrib.learn.python.learn.estimators.metric_key import MetricKey
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModeKeys
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModelFnOps
from tensorflow.contrib.learn.python.learn.estimators.prediction_key import PredictionKey
from tensorflow.contrib.learn.python.learn.estimators.run_config import ClusterConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import Environment
from tensorflow.contrib.learn.python.learn.estimators.run_config import RunConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import TaskType
from tensorflow.contrib.learn.python.learn.estimators.svm import SVM
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.