repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
tempbottle/python-driver | tests/integration/standard/test_cython_protocol_handlers.py | 6 | 4435 | """Test the various Cython-based message deserializers"""
# Based on test_custom_protocol_handler.py
try:
import unittest2 as unittest
except ImportError:
import unittest
from cassandra.query import tuple_factory
from cassandra.cluster import Cluster
from cassandra.protocol import ProtocolHandler, LazyProtocolHandler, NumpyProtocolHandler
from tests.integration import use_singledc, PROTOCOL_VERSION
from tests.integration.datatype_utils import update_datatypes
from tests.integration.standard.utils import (
create_table_with_all_types, get_all_primitive_params, get_primitive_datatypes)
from tests.unit.cython.utils import cythontest, numpytest
def setup_module():
use_singledc()
update_datatypes()
class CythonProtocolHandlerTest(unittest.TestCase):
N_ITEMS = 10
@classmethod
def setUpClass(cls):
cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cls.session = cls.cluster.connect()
cls.session.execute("CREATE KEYSPACE testspace WITH replication = "
"{ 'class' : 'SimpleStrategy', 'replication_factor': '1'}")
cls.session.set_keyspace("testspace")
cls.colnames = create_table_with_all_types("test_table", cls.session, cls.N_ITEMS)
@classmethod
def tearDownClass(cls):
cls.session.execute("DROP KEYSPACE testspace")
cls.cluster.shutdown()
@cythontest
def test_cython_parser(self):
"""
Test Cython-based parser that returns a list of tuples
"""
verify_iterator_data(self.assertEqual, get_data(ProtocolHandler))
@cythontest
def test_cython_lazy_parser(self):
"""
Test Cython-based parser that returns an iterator of tuples
"""
verify_iterator_data(self.assertEqual, get_data(LazyProtocolHandler))
@numpytest
def test_numpy_parser(self):
"""
Test Numpy-based parser that returns a NumPy array
"""
# arrays = { 'a': arr1, 'b': arr2, ... }
arrays = get_data(NumpyProtocolHandler)
colnames = self.colnames
datatypes = get_primitive_datatypes()
for colname, datatype in zip(colnames, datatypes):
arr = arrays[colname]
self.match_dtype(datatype, arr.dtype)
verify_iterator_data(self.assertEqual, arrays_to_list_of_tuples(arrays, colnames))
def match_dtype(self, datatype, dtype):
"""Match a string cqltype (e.g. 'int' or 'blob') with a numpy dtype"""
if datatype == 'smallint':
self.match_dtype_props(dtype, 'i', 2)
elif datatype == 'int':
self.match_dtype_props(dtype, 'i', 4)
elif datatype in ('bigint', 'counter'):
self.match_dtype_props(dtype, 'i', 8)
elif datatype == 'float':
self.match_dtype_props(dtype, 'f', 4)
elif datatype == 'double':
self.match_dtype_props(dtype, 'f', 8)
else:
self.assertEqual(dtype.kind, 'O', msg=(dtype, datatype))
def match_dtype_props(self, dtype, kind, size, signed=None):
self.assertEqual(dtype.kind, kind, msg=dtype)
self.assertEqual(dtype.itemsize, size, msg=dtype)
def arrays_to_list_of_tuples(arrays, colnames):
"""Convert a dict of arrays (as given by the numpy protocol handler) to a list of tuples"""
first_array = arrays[colnames[0]]
return [tuple(arrays[colname][i] for colname in colnames)
for i in range(len(first_array))]
def get_data(protocol_handler):
"""
Get some data from the test table.
:param key: if None, get all results (100.000 results), otherwise get only one result
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect(keyspace="testspace")
# use our custom protocol handler
session.client_protocol_handler = protocol_handler
session.row_factory = tuple_factory
results = session.execute("SELECT * FROM test_table")
session.shutdown()
return results
def verify_iterator_data(assertEqual, results):
"""
Check the result of get_data() when this is a list or
iterator of tuples
"""
for result in results:
params = get_all_primitive_params(result[0])
assertEqual(len(params), len(result),
msg="Not the right number of columns?")
for expected, actual in zip(params, result):
assertEqual(actual, expected)
| apache-2.0 | 1,678,659,623,024,413,400 | 33.379845 | 95 | 0.656144 | false | 3.984726 | true | false | false |
kaneorotar/Mabinogi_INI_Script | #skilliconDB_Compat.py | 1 | 3126 | # -*- coding: utf-8 -*-
import os
import codecs
import sys
import re
import datetime
import xml.etree.ElementTree as ET
def getTextID(txt):
txtgp = re.search(r"_LT\[xml.[^\.]*.([0-9]+)\]", txt)
if txtgp is None:
return ""
return int(txtgp.group(1))
targetName = "skillinfo"
targetTXTName = targetName
localeName = ""
outputName = "Mabi_Skillicon"
dataDBText = {}
dataDB = {}
hasTXT = False
hasXML = False
fileList = os.listdir("./data/")
for fileN in fileList:
if hasXML and hasTXT:
break
txtNameMatch = re.match(targetTXTName+".([a-zA-Z]*).txt", fileN)
if txtNameMatch is not None:
targetTXTName = fileN
localeName = txtNameMatch.group(1)
hasTXT = True
continue
xmlNameMatch = re.match(targetName+".xml", fileN)
if xmlNameMatch is not None:
hasXML = True
continue
if hasTXT is False:
print("Missing "+targetTXTName+" TXT file.")
sys.exit()
if hasXML is False:
print("Missing "+targetName+" XML file.")
sys.exit()
today = datetime.datetime.now().strftime("%Y%m%d")
outdir = os.getcwd()+"/patch-"+localeName+"-"+today+"/mod/"
print("Output: " + outdir)
try:
os.makedirs(outdir)
except:
pass
#targetName.XXXXX.txt
infilename = targetTXTName
try:
fi = codecs.open("./data/" + infilename,'r', encoding='utf-16')
line = fi.readline()
fi.seek(0)
except:
fi.close()
fi = codecs.open("./data/" + infilename,'r', encoding='utf-8-sig')
for line in fi:
oline = re.match(r"([0-9]{0,8})\t(([^\r])+)\r\n", line)
if oline is not None:
dataDBText[int(oline.group(1))] = oline.group(2)
fi.close()
print(infilename + " processed.")
#targetName.xml
infilename = targetName + ".xml"
tree = ET.parse("./data/" + infilename)
root = tree.getroot()
for elelist in list(root):
for ele in elelist:
ID = int(ele.attrib["SkillID"])
finalName = "0,0,0"
if "ImageFile" in ele.attrib:
imgName = ele.attrib["ImageFile"].lower()
imgG = re.search("data/gfx/image/gui_icon_skill_([^\.]*).dds", imgName)
if imgG != None:
imgNameIdx = 0
try:
imgNameIdx = int(imgG.group(1))
except ValueError:
pass
if imgNameIdx >= 4: # dds not implemented in TinTimer
pass
elif "PositionX" in ele.attrib and "PositionY" in ele.attrib:
posX = int(ele.attrib["PositionX"])
posY = int(ele.attrib["PositionY"])
finalName = str(imgNameIdx)+','+str(posX)+','+str(posY)
if ID in dataDB.keys():
if "Locale" in ele.attrib:
if ele.attrib["Locale"] != localeName:
continue
dataDB[ID] = finalName
print(infilename + " processed.")
dataIDs = list(dataDB.keys())
dataIDs.sort()
fo = codecs.open(outdir+outputName+".ini", 'w', encoding="utf-16")
fo.write("["+outputName+"]\r\n")
for key in dataIDs:
fo.write(str(key)+"="+dataDB[key]+"\r\n")
fo.close()
print(outputName + ".ini generated.") | mit | -2,300,442,473,329,194,500 | 26.191304 | 83 | 0.581574 | false | 3.27673 | false | false | false |
MichaelMraka/dnf-plugins-extras | plugins/dnfpluginsextras/__init__.py | 4 | 3312 | # Copyright (C) 2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
""" Common code for dnf-plugins-extras"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from gettext import NullTranslations
from sys import version_info
import argparse
import dnf.exceptions
import gettext
import logging
# python 3 compabillity settings
if version_info.major >= 3:
PY3 = True
# u?gettext dont exists in python3 NullTranslations
NullTranslations.ugettext = NullTranslations.gettext
NullTranslations.ungettext = NullTranslations.ngettext
t = gettext.translation('dnf-plugins-extras', fallback=True)
_ = t.ugettext
P_ = t.ungettext
logger = logging.getLogger('dnf.plugin')
class ArgumentParser(argparse.ArgumentParser):
"""Parses the argument and options given to a tool from DNF.
default help commands (-h, --help) is disabled and a custom --help-cmd
is add by default
Errors in parse of option/arguments will print the help and raise
a dnf.exception.Error
"""
def __init__(self, cmd, **kwargs):
argparse.ArgumentParser.__init__(self, prog='dnf %s' % cmd,
add_help=False, **kwargs)
self.add_argument('--help-cmd', action='store_true',
help=_('show this help about this tool'))
def error(self, message):
"""Overload the default error method.
We dont wan't the default exit action on parse
errors, just raise an AttributeError we can catch.
"""
raise AttributeError(message)
def parse_args(self, args):
try:
opts = argparse.ArgumentParser.parse_args(self, args)
except AttributeError as e:
self.print_help()
raise dnf.exceptions.Error(str(e))
return opts
def is_erasing(transaction, pkg):
"""Check if package removing in transaction
Args:
transaction (dnf.transaction.Transaction): Transaction instance.
pkg (str): Package name to check.
Returns:
bool: True if pkg removes by transaction, False otherwise.
"""
installed = set([package.name for package in transaction.install_set])
erased = set([package.name for package in transaction.remove_set])
# Don't run tracer when uninstalling it
if pkg in erased - installed:
return True
else:
return False
| gpl-2.0 | -4,297,217,325,143,319,600 | 34.234043 | 77 | 0.696256 | false | 4.284605 | false | false | false |
ity/pants | src/python/pants/engine/legacy/parser.py | 1 | 3768 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import threading
import six
from pants.base.build_file_target_factory import BuildFileTargetFactory
from pants.base.parse_context import ParseContext
from pants.engine.legacy.structs import BundleAdaptor, Globs, RGlobs, TargetAdaptor, ZGlobs
from pants.engine.objects import Serializable
from pants.engine.parser import Parser
from pants.util.memo import memoized_method, memoized_property
class LegacyPythonCallbacksParser(Parser):
"""A parser that parses the given python code into a list of top-level objects.
Only Serializable objects with `name`s will be collected and returned. These objects will be
addressable via their name in the parsed namespace.
This parser attempts to be compatible with existing legacy BUILD files and concepts including
macros and target factories.
"""
_objects = []
_lock = threading.Lock()
@classmethod
@memoized_method
def _get_symbols(cls, symbol_table_cls):
symbol_table = symbol_table_cls.table()
# TODO: nasty escape hatch
aliases = symbol_table_cls.aliases()
class Registrar(BuildFileTargetFactory):
def __init__(self, type_alias, object_type):
self._type_alias = type_alias
self._object_type = object_type
self._serializable = Serializable.is_serializable_type(self._object_type)
@memoized_property
def target_types(self):
return [self._object_type]
def __call__(self, *args, **kwargs):
name = kwargs.get('name')
if name and self._serializable:
obj = self._object_type(type_alias=self._type_alias, **kwargs)
cls._objects.append(obj)
return obj
else:
return self._object_type(*args, **kwargs)
# Compute a single ParseContext for a default path, which we will mutate for each parsed path.
symbols = {}
for alias, target_macro_factory in aliases.target_macro_factories.items():
for target_type in target_macro_factory.target_types:
symbols[target_type] = TargetAdaptor
parse_context = ParseContext(rel_path='', type_aliases=symbols)
for alias, symbol in symbol_table.items():
registrar = Registrar(alias, symbol)
symbols[alias] = registrar
symbols[symbol] = registrar
if aliases.objects:
symbols.update(aliases.objects)
# Compute "per path" symbols (which will all use the same mutable ParseContext).
aliases = symbol_table_cls.aliases()
for alias, object_factory in aliases.context_aware_object_factories.items():
symbols[alias] = object_factory(parse_context)
for alias, target_macro_factory in aliases.target_macro_factories.items():
symbols[alias] = target_macro_factory.target_macro(parse_context)
for target_type in target_macro_factory.target_types:
symbols[target_type] = TargetAdaptor
# TODO: Replace builtins for paths with objects that will create wrapped PathGlobs objects.
symbols['globs'] = Globs
symbols['rglobs'] = RGlobs
symbols['zglobs'] = ZGlobs
symbols['bundle'] = BundleAdaptor
return symbols, parse_context
@classmethod
def parse(cls, filepath, filecontent, symbol_table_cls):
symbols, parse_context = cls._get_symbols(symbol_table_cls)
python = filecontent
# Mutate the parse context for the new path.
parse_context._rel_path = os.path.dirname(filepath)
with cls._lock:
del cls._objects[:]
six.exec_(python, symbols, {})
return list(cls._objects)
| apache-2.0 | 5,205,157,451,023,971,000 | 35.230769 | 98 | 0.704352 | false | 3.978881 | false | false | false |
nke001/attention-lvcsr | libs/Theano/theano/sandbox/theano_object.py | 1 | 8601 | """DRAFT: TheanoObject
N.B. the gotcha with this design is listed in the documentation of `TheanoObject`
"""
from __future__ import print_function
import theano
from theano import tensor
import numpy
def theano_type(x):
"""Return a theano Type instance suitable for containing value `x`."""
if type(x) is int:
return tensor.lscalar
else:
raise NotImplementedError()
class symbolic_fn_callable(object):
"""This is the class whose instance you get when you access a symbolic function in a
`TheanoObject`.
When you call a symbolic function (`symbolic_fn`) of a TheanoObject the `__call__` of this
class handles your request.
You can also access the symbolic outputs and updates of a symbolic function though this
class.
.. code-block:: python
class T(TheanoObject):
@symbolic_fn
def add(self, x):
...
add_outputs = ...
add_updates = ...
return RVal(add_outputs, add_updates)
t = T()
t.add.outputs(5) # returns `add_outputs` from when `x=theano_type(5)`
t.add.updates(5) # returns `add_updates` from when `x=theano_type(5)`
t.add.theano_function(5) # returns the `Function` compiled when `x=theano_type(5)`
t.add(5) # runs the `Function` compiled when `x=theano_type(5)`
# with arguments `(5,)`
"""
def __init__(self, fn, mode):
self.fn = fn
self.mode = mode
def on(self, o_self):
"""Silly method to work with symbolic_fn.__get__"""
self.o_self = o_self
return self
def run_symbolic(self, *args, **kwargs):
return self.o_self._get_method_impl(self.fn, self.o_self, args, kwargs, mode=self.mode)
def __call__(self, *args, **kwargs):
return self.run_symbolic(*args, **kwargs)['theano_function'](*args, **kwargs)
def theano_function(self, *args, **kwargs):
return self.run_symbolic(*args, **kwargs)['theano_function']
def outputs(self, *args, **kwargs):
return self.run_symbolic(*args, **kwargs)['outputs']
def updates(self, *args, **kwargs):
return self.run_symbolic(*args, **kwargs)['updates']
class symbolic_fn(object):
"""A property-like class for decorating symbolic functions in `TheanoObject`
"""
def __init__(self, fn, mode=None):
self.fn = fn
self.callable = symbolic_fn_callable(fn, mode)
def __get__(self, o_self, o_cls):
return self.callable.on(o_self)
def __set__(self, o_self, new_val):
pass
# return NotImplemented
def symbolic_fn_opts(**kwargs):
"""Return a decorator for symbolic_functions in a `TheanoObject`
`kwargs` passed here are passed to `theano.function` via `symbolic_fn`
"""
def deco(f):
return symbolic_fn(f, **kwargs)
return deco
class RVal(object):
"""A Return-Value object for a `symbolic_fn` """
outputs = []
"""The method will compute values for the variables in this list"""
updates = {}
"""The method will update module variables in this dictionary
For items ``(k,v)`` in this dictionary, ``k`` must be a `symbolic_member` of some module.
On each call to this compiled function, the value of ``k`` will be replaced with the
computed value of the Variable ``v``.
"""
def __init__(self, outputs, updates=None):
if updates is None:
updates = {}
self.outputs = outputs
assert type(updates) is dict
self.updates = updates
class TheanoObject(object):
"""Base for Theano-supported classes
This class provides support for symbolic_fn class attributes.
These will be compiled on demand so that they can be used just like normal (non-symbolic)
methods.
The symbolic functions in a TheanoObject can share member variables that have been created
using the `symbolic_member` method.
:note: Other variables (ones not created using ``self.symbolic_member``) referred to in the
body of a symbolic function will *not* be shared between symbolic functions, or between
symbolic functions and this class. These other variables will be locked away in the
closure of a symbolic function when that function is compiled.
:warning: It is not recommended for code to interleave
(a) changes to non-symbolic instance variables with
(b) calls to symbolic functions that use those instance variables.
A symbolic function may be
compiled multiple times because it must be compiled for each set of argument types.
Each time the function is compiled, the values of non-symbolic variables will be locked
into the compiled function. Subsequent changes to those non-symbolic instance variables
will not have any effect on the behaviour of the already-compiled symbolic function.
:todo: Is there an efficient way of recognizing when a compiled symbolic function is stale,
wrt the current values of the class's instance variables?
- One option is to re-evaluate symbolic functions symbolically and see if the graph can be
completely merged with the original graph. This is not fast enough to do all the time by
default though.
"""
def __init__(self):
self.module_method_cache = {}
def _get_method_impl(self, fn, o_self, args, kwargs, mode):
"""Retrieve information about the symbolic function (`fn`) in TheanoObject instance
`o_self`, being evaluated on arguments `args` and `kwargs`.
:rtype: dict with entries 'theano_function', 'outputs', 'updates'
:return: the theano function compiled for these arguments, the symbolic outputs of that
function, and the symbolic updates performed by that function.
:note: This function caches return values in self.`module_method_cache`.
:todo: This may at some point become a class-level cache rather than an instance-level
cache.
"""
if kwargs:
raise NotImplementedError()
cache = self.module_method_cache
args_types = tuple(theano_type(arg) for arg in args)
key = (fn, args_types)
if key not in cache:
inputs = [a() for a in args_types]
print('compiling', fn, 'for inputs', inputs)
rval = fn(o_self, *inputs)
print('compiling to compute outputs', rval.outputs)
if isinstance(rval.outputs, (tuple, list)):
all_required_inputs = theano.gof.graph.inputs(rval.outputs)
else:
all_required_inputs = theano.gof.graph.inputs([rval.outputs])
# construct In instances for the symbolic_member instances that can automatically be
# included here.
module_inputs = [theano.compile.io.In(
variable=v,
value=v._theanoclass_container,
mutable=(v in rval.updates),
update=rval.updates.get(v, None))
for v in all_required_inputs \
if hasattr(v, '_theanoclass_container') and not (v in inputs)]
cache[key] = dict(theano_function=theano.function(inputs+module_inputs, rval.outputs),
updates=rval.updates,
outputs=rval.outputs,
mode=mode)
return cache[key]
def symbolic_member(self, ival, name=None):
"""Create a Variable instance to hold value `ival`.
This function also immediately creates a Container object for ival.
When the returned Variable is used as input to a `TheanoObject` `symbolic_fn`, (but
does not appear as an argument to that symbolic_fn), then this Container will be used to
retrieve (and store) values for the Variable.
This Variable's Container's contents can be retrieved by its `get()` method.
This Variable's Container's contents can be written using its `set(newval)` method.
"""
if type(ival) is not int:
raise NotImplementedError()
v = tensor.lscalar(name)
v._theanoclass_container = \
theano.gof.Container(v,
storage=[theano._asarray(ival, dtype='int64')],
readonly=False)
assert not hasattr(v, 'set')
assert not hasattr(v, 'get')
v.get = lambda : v._theanoclass_container.data
def setval_in_v(newval):
v._theanoclass_container.data = newval
v.set = setval_in_v
return v
| mit | -1,412,912,226,055,826,200 | 35.6 | 98 | 0.62981 | false | 4.262141 | false | false | false |
iamaris/ppf | ppf/core/leg.py | 1 | 2997 | class leg(object):
'''
>>> from ppf.date_time import *
>>> from pay_receive import *
>>> from generate_flows import *
>>> flows = generate_flows(
... start = date(2007, Jun, 29)
... , end = date(2017, Jun, 29)
... , resolution = date_resolutions.months
... , period = 6
... , shift_method = shift_convention.modified_following
... , basis = "ACT/360")
>>>
>>> pay_leg = leg(flows, PAY)
>>>
>>> for flow in pay_leg.flows():
... print flow
10000000.000000, USD, [2007-Jun-29, 2007-Dec-31], basis_act_360, 2007-Dec-31,
10000000.000000, USD, [2007-Dec-31, 2008-Jun-30], basis_act_360, 2008-Jun-30,
10000000.000000, USD, [2008-Jun-30, 2008-Dec-29], basis_act_360, 2008-Dec-29,
10000000.000000, USD, [2008-Dec-29, 2009-Jun-29], basis_act_360, 2009-Jun-29,
10000000.000000, USD, [2009-Jun-29, 2009-Dec-29], basis_act_360, 2009-Dec-29,
10000000.000000, USD, [2009-Dec-29, 2010-Jun-29], basis_act_360, 2010-Jun-29,
10000000.000000, USD, [2010-Jun-29, 2010-Dec-29], basis_act_360, 2010-Dec-29,
10000000.000000, USD, [2010-Dec-29, 2011-Jun-29], basis_act_360, 2011-Jun-29,
10000000.000000, USD, [2011-Jun-29, 2011-Dec-29], basis_act_360, 2011-Dec-29,
10000000.000000, USD, [2011-Dec-29, 2012-Jun-29], basis_act_360, 2012-Jun-29,
10000000.000000, USD, [2012-Jun-29, 2012-Dec-31], basis_act_360, 2012-Dec-31,
10000000.000000, USD, [2012-Dec-31, 2013-Jun-28], basis_act_360, 2013-Jun-28,
10000000.000000, USD, [2013-Jun-28, 2013-Dec-30], basis_act_360, 2013-Dec-30,
10000000.000000, USD, [2013-Dec-30, 2014-Jun-30], basis_act_360, 2014-Jun-30,
10000000.000000, USD, [2014-Jun-30, 2014-Dec-29], basis_act_360, 2014-Dec-29,
10000000.000000, USD, [2014-Dec-29, 2015-Jun-29], basis_act_360, 2015-Jun-29,
10000000.000000, USD, [2015-Jun-29, 2015-Dec-29], basis_act_360, 2015-Dec-29,
10000000.000000, USD, [2015-Dec-29, 2016-Jun-29], basis_act_360, 2016-Jun-29,
10000000.000000, USD, [2016-Jun-29, 2016-Dec-29], basis_act_360, 2016-Dec-29,
10000000.000000, USD, [2016-Dec-29, 2017-Jun-29], basis_act_360, 2017-Jun-29,
'''
def __init__(self, flows, pay_or_receive, adjuvant_table = None, payoff = None):
self.__flows = flows
self.__pay_or_receive = pay_or_receive
self.__adjuvant_table = adjuvant_table
self.__payoff = payoff
def flows(self):
return self.__flows
def pay_receive(self):
return self.__pay_or_receive
def has_adjuvant_table(self):
return self.__adjuvant_table <> None
def has_payoff(self):
return self.__payoff <> None
def adjuvant_table(self):
if self.__adjuvant_table == None:
raise RumtimeError, "Null adjuvant table"
return self.__adjuvant_table
def payoff(self):
if self.__payoff == None:
raise RumtimeError, "Null payoff"
return self.__payoff
def _test():
import doctest
doctest.testmod()
if __name__ == '__main__':
_test()
| mit | -3,494,925,771,843,209,000 | 40.054795 | 82 | 0.628295 | false | 2.704874 | false | false | false |
Ashkeelun/GitHealth | GitHealth/health/models.py | 1 | 5145 | from django.db import models
from django.core.urlresolvers import reverse
import requests
from time import sleep
from .utils import *
# Repository Table
class RepositoryManager(models.Manager):
def create_repository(self, url):
repo = requests.get(url).text
domain = get_url_parts(url)[0]
name = re.findall(get_repo_name_re(domain), repo)[0]
last_commit = re.findall(get_commit_re(domain), repo)[0]
root = Directory.manage.create_directory(domain=domain, path=get_url_parts(url)[1], name=name)
return self.create(name=name, url=url, last_commit=last_commit, root=root)
class Repository(models.Model):
name = models.CharField(max_length=150,)
url = models.URLField(max_length=250,)
last_commit = models.CharField(max_length=150)
root = models.ForeignKey('Directory', on_delete=models.CASCADE, blank=True, null=True, related_name="repository",)
manage = RepositoryManager()
objects = models.Manager()
def __str__(self):
return self.name + ': ' + self.url
def get_domain(self):
return get_url_parts(self.url)[0]
def get_path(self):
return get_url_parts(self.url)[1]
def document_stats(self):
return self.root.total_doc_info()
# Directory Table
class DirectoryManager(models.Manager):
def create_directory(self, domain, path, name, parent=None):
url = domain+path
dir_html = requests.get(url).text
last_commit = re.findall(get_commit_re(domain), dir_html)[0]
if parent:
dir = self.create(name=name, url=url, last_commit=last_commit, parent_dir=parent)
else:
dir = self.create(name=name, url=url, last_commit=last_commit)
contents = re.findall(get_dir_re(domain), dir_html)
for content in contents:
if not content[3] and content[0] == "file-directory":
Directory.manage.create_directory(domain=domain, path=content[1], name=content[2], parent=dir)
elif is_file_sup(content[3]):
File.manage.create_file(domain=domain, path=content[1], name=content[2], extension=content[3], parent=dir)
# sleep(1)
return dir
class Directory(models.Model):
name = models.CharField(max_length=100,)
url = models.URLField(max_length=250,)
last_commit = models.CharField(max_length=150)
parent_dir = models.ForeignKey('Directory', on_delete=models.CASCADE, blank=True, null=True, related_name="sub_dirs",)
manage = DirectoryManager()
objects = models.Manager()
def __str__(self):
return self.name + ': ' + self.url
def get_domain(self):
return get_url_parts(self.url)[0]
def get_path(self):
return get_url_parts(self.url)[1]
def total_doc_info(self):
resaults = {}
for file_info in self.gen_doc_info():
for key, value in file_info.items():
try:
resaults[key] += value
except KeyError:
resaults[key] = value
return resaults
def gen_doc_info(self):
files = []
for file in self.sub_files.all():
files.append(file.gen_doc_info())
for dir in self.sub_dirs.all():
files += dir.gen_doc_info()
return files
# File Table
class FileManager(models.Manager):
def create_file(self, domain, path, name, extension, parent):
# raw_domain = 'https://raw.githubusercontent.com' # use domain for raw file reading: https://raw.githubusercontent.com
url = domain+path
file_html = requests.get(url).text
lines = get_lines(domain, file_html)
slcs = re.findall(get_slc_re(extension), lines)
mlcs = re.findall(get_mlc_re(extension), lines)
code = re.sub(get_alc_re(extension), '', lines)
return self.create(name=name, extension=extension, url=url, parent_dir=parent,
mlc_size=len(''.join(mlcs)), mlc_num=len(mlcs), slc_size=len(''.join(slcs)), slc_num=len(slcs),
comt_size=len(''.join(slcs) + ''.join(mlcs)), code_size=len(code))
class File(models.Model):
name = models.CharField(max_length=200,)
extension = models.CharField(max_length=15,)
url = models.URLField(max_length=250,)
parent_dir = models.ForeignKey(Directory, on_delete=models.CASCADE, blank=True, null=True, related_name="sub_files",)
mlc_size = models.IntegerField()
mlc_num = models.IntegerField()
slc_size = models.IntegerField()
slc_num = models.IntegerField()
comt_size = models.IntegerField()
code_size = models.IntegerField()
manage = FileManager()
objects = models.Manager()
def __str__(self):
return self.name + ': ' + self.url
def get_domain(self):
return get_url_parts(self.url)[0]
def get_path(self):
return get_url_parts(self.url)[1]
def gen_doc_info(self):
return {'mlcNum': self.mlc_num, 'mlcSize': self.mlc_size,
'slcNum': self.slc_num, 'slcSize': self.slc_size,
'comtSize': self.comt_size, 'codeSize': self.code_size}
| mit | -2,708,766,986,539,718,000 | 34.729167 | 127 | 0.623712 | false | 3.504768 | false | false | false |
ddico/odoo | addons/stock/models/stock_package_level.py | 2 | 10696 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from itertools import groupby
from operator import itemgetter
from odoo import _, api, fields, models
class StockPackageLevel(models.Model):
_name = 'stock.package_level'
_description = 'Stock Package Level'
_check_company_auto = True
package_id = fields.Many2one(
'stock.quant.package', 'Package', required=True, check_company=True,
domain="[('location_id', 'child_of', parent.location_id), '|', ('company_id', '=', False), ('company_id', '=', company_id)]")
picking_id = fields.Many2one('stock.picking', 'Picking', check_company=True)
move_ids = fields.One2many('stock.move', 'package_level_id')
move_line_ids = fields.One2many('stock.move.line', 'package_level_id')
location_id = fields.Many2one('stock.location', 'From', compute='_compute_location_id', check_company=True)
location_dest_id = fields.Many2one(
'stock.location', 'To', check_company=True,
domain="[('id', 'child_of', parent.location_dest_id), '|', ('company_id', '=', False), ('company_id', '=', company_id)]")
is_done = fields.Boolean('Done', compute='_compute_is_done', inverse='_set_is_done')
state = fields.Selection([
('draft', 'Draft'),
('confirmed', 'Confirmed'),
('assigned', 'Reserved'),
('new', 'New'),
('done', 'Done'),
('cancel', 'Cancelled'),
],string='State', compute='_compute_state')
is_fresh_package = fields.Boolean(compute='_compute_fresh_pack')
picking_type_code = fields.Selection(related='picking_id.picking_type_code')
show_lots_m2o = fields.Boolean(compute='_compute_show_lot')
show_lots_text = fields.Boolean(compute='_compute_show_lot')
company_id = fields.Many2one('res.company', 'Company', required=True, index=True)
@api.depends('move_line_ids', 'move_line_ids.qty_done')
def _compute_is_done(self):
for package_level in self:
# If it is an existing package
if package_level.is_fresh_package:
package_level.is_done = True
else:
package_level.is_done = package_level._check_move_lines_map_quant_package(package_level.package_id)
def _set_is_done(self):
for package_level in self:
if package_level.is_done:
if not package_level.is_fresh_package:
for quant in package_level.package_id.quant_ids:
corresponding_ml = package_level.move_line_ids.filtered(lambda ml: ml.product_id == quant.product_id and ml.lot_id == quant.lot_id)
if corresponding_ml:
corresponding_ml[0].qty_done = corresponding_ml[0].qty_done + quant.quantity
else:
corresponding_move = package_level.move_ids.filtered(lambda m: m.product_id == quant.product_id)[:1]
self.env['stock.move.line'].create({
'location_id': package_level.location_id.id,
'location_dest_id': package_level.location_dest_id.id,
'picking_id': package_level.picking_id.id,
'product_id': quant.product_id.id,
'qty_done': quant.quantity,
'product_uom_id': quant.product_id.uom_id.id,
'lot_id': quant.lot_id.id,
'package_id': package_level.package_id.id,
'result_package_id': package_level.package_id.id,
'package_level_id': package_level.id,
'move_id': corresponding_move.id,
})
else:
package_level.move_line_ids.filtered(lambda ml: ml.product_qty == 0).unlink()
package_level.move_line_ids.filtered(lambda ml: ml.product_qty != 0).write({'qty_done': 0})
@api.depends('move_line_ids', 'move_line_ids.package_id', 'move_line_ids.result_package_id')
def _compute_fresh_pack(self):
for package_level in self:
if not package_level.move_line_ids or all(ml.package_id and ml.package_id == ml.result_package_id for ml in package_level.move_line_ids):
package_level.is_fresh_package = False
else:
package_level.is_fresh_package = True
@api.depends('move_ids', 'move_ids.state', 'move_line_ids', 'move_line_ids.state')
def _compute_state(self):
for package_level in self:
if not package_level.move_ids and not package_level.move_line_ids:
package_level.state = 'draft'
elif not package_level.move_line_ids and package_level.move_ids.filtered(lambda m: m.state not in ('done', 'cancel')):
package_level.state = 'confirmed'
elif package_level.move_line_ids and not package_level.move_line_ids.filtered(lambda ml: ml.state == 'done'):
if package_level.is_fresh_package:
package_level.state = 'new'
elif package_level._check_move_lines_map_quant_package(package_level.package_id, 'product_uom_qty'):
package_level.state = 'assigned'
else:
package_level.state = 'confirmed'
elif package_level.move_line_ids.filtered(lambda ml: ml.state =='done'):
package_level.state = 'done'
elif package_level.move_line_ids.filtered(lambda ml: ml.state == 'cancel') or package_level.move_ids.filtered(lambda m: m.state == 'cancel'):
package_level.state = 'cancel'
def _compute_show_lot(self):
for package_level in self:
if any(ml.product_id.tracking != 'none' for ml in package_level.move_line_ids):
if package_level.picking_id.picking_type_id.use_existing_lots or package_level.state == 'done':
package_level.show_lots_m2o = True
package_level.show_lots_text = False
else:
if self.picking_id.picking_type_id.use_create_lots and package_level.state != 'done':
package_level.show_lots_m2o = False
package_level.show_lots_text = True
else:
package_level.show_lots_m2o = False
package_level.show_lots_text = False
else:
package_level.show_lots_m2o = False
package_level.show_lots_text = False
def _generate_moves(self):
for package_level in self:
if package_level.package_id:
for quant in package_level.package_id.quant_ids:
self.env['stock.move'].create({
'picking_id': package_level.picking_id.id,
'name': quant.product_id.display_name,
'product_id': quant.product_id.id,
'product_uom_qty': quant.quantity,
'product_uom': quant.product_id.uom_id.id,
'location_id': package_level.location_id.id,
'location_dest_id': package_level.location_dest_id.id,
'package_level_id': package_level.id,
'company_id': package_level.company_id.id,
})
@api.model
def create(self, vals):
result = super(StockPackageLevel, self).create(vals)
if vals.get('location_dest_id'):
result.mapped('move_line_ids').write({'location_dest_id': vals['location_dest_id']})
result.mapped('move_ids').write({'location_dest_id': vals['location_dest_id']})
if result.picking_id.state != 'draft' and result.location_id and result.location_dest_id and not result.move_ids and not result.move_line_ids:
result._generate_moves()
return result
def write(self, vals):
result = super(StockPackageLevel, self).write(vals)
if vals.get('location_dest_id'):
self.mapped('move_line_ids').write({'location_dest_id': vals['location_dest_id']})
self.mapped('move_ids').write({'location_dest_id': vals['location_dest_id']})
return result
def unlink(self):
self.mapped('move_ids').unlink()
self.mapped('move_line_ids').write({'result_package_id': False})
return super(StockPackageLevel, self).unlink()
def _check_move_lines_map_quant_package(self, package, field='qty_done'):
""" should compare in good uom """
all_in = True
pack_move_lines = self.move_line_ids
keys = ['product_id', 'lot_id']
def sorted_key(object):
object.ensure_one()
return [object.product_id.id, object.lot_id.id]
grouped_quants = {}
for k, g in groupby(sorted(package.quant_ids, key=sorted_key), key=itemgetter(*keys)):
grouped_quants[k] = sum(self.env['stock.quant'].concat(*list(g)).mapped('quantity'))
grouped_ops = {}
for k, g in groupby(sorted(pack_move_lines, key=sorted_key), key=itemgetter(*keys)):
grouped_ops[k] = sum(self.env['stock.move.line'].concat(*list(g)).mapped(field))
if any(grouped_quants.get(key, 0) - grouped_ops.get(key, 0) != 0 for key in grouped_quants) \
or any(grouped_ops.get(key, 0) - grouped_quants.get(key, 0) != 0 for key in grouped_ops):
all_in = False
return all_in
@api.depends('state', 'is_fresh_package', 'move_ids', 'move_line_ids')
def _compute_location_id(self):
for pl in self:
if pl.state == 'new' or pl.is_fresh_package:
pl.location_id = False
elif pl.state == 'confirmed' and pl.move_ids:
pl.location_id = pl.move_ids[0].location_id
elif pl.state in ('assigned', 'done') and pl.move_line_ids:
pl.location_id = pl.move_line_ids[0].location_id
else:
pl.location_id = pl.picking_id.location_id
def action_show_package_details(self):
self.ensure_one()
view = self.env.ref('stock.package_level_form_view')
return {
'name': _('Package Content'),
'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_model': 'stock.package_level',
'views': [(view.id, 'form')],
'view_id': view.id,
'target': 'new',
'res_id': self.id,
'flags': {'mode': 'readonly'},
}
| agpl-3.0 | 2,010,676,459,348,125,000 | 50.671498 | 155 | 0.56451 | false | 3.788877 | false | false | false |
matiaslindgren/not-enough-bogo | bogo/bogoapp/bogo_manager.py | 1 | 4905 | """
Fear and loathing.
"""
import ast
import asyncio
import logging
import time
from bogoapp import tools
from bogoapp.bogo import Bogo
logger = logging.getLogger("BogoManager")
class BogoError(Exception):
pass
class BogoManager:
"""
Manages all state related to bogosorting a sequence of lists.
"""
def __init__(self,
unsorted_lists,
speed_resolution,
database,
random_module):
if speed_resolution <= 0:
raise BogoError("Invalid speed resolution, "
"N shuffles per {} seconds doesn't make sense."
.format(speed_resolution))
self.unsorted_lists = unsorted_lists
self.speed_resolution = speed_resolution
self.database = database
self.random = random_module
self.current_bogo = None
self.stopping = False
self.asyncio_task = None
async def load_previous_state(self):
logging.info("Loading previous state.")
bogo_row = await self.database.newest_bogo()
if not bogo_row:
logging.info("No previous bogo found.")
return None
bogo = Bogo.from_database_row(bogo_row)
random_state_row = await self.database.newest_random_state()
if not random_state_row:
raise BogoError("Improperly saved random state "
f"Found newest bogo with id {bogo.db_id} "
"but no previous random state was found.")
random_state_bogo_id = random_state_row[3]
if bogo.db_id != random_state_bogo_id:
raise BogoError("Improperly saved random state, "
f"newest bogo has id {bogo.db_id} "
"but newest random state has a reference "
f"to a bogo with id {random_state_bogo_id}.")
logging.info("Setting random state.")
self.random.setstate(ast.literal_eval(random_state_row[1]))
logging.info(f"Returning previous bogo {bogo}")
return bogo
async def save_state(self, now):
logging.debug("Saving state.")
random_state = self.random.getstate()
await self.database.save_state(self.current_bogo, random_state, now)
async def make_next_bogo(self, sequence):
logging.debug(f"Making new bogo from sequence {sequence}.")
now = tools.isoformat_now()
self.current_bogo = Bogo(sequence=sequence, created=now)
await self.save_state(now=now)
self.current_bogo.db_id = (await self.database.newest_bogo())[0]
async def sort_current_until_done(self):
"""Bogosort the current sequence until it is sorted."""
logging.debug("Sorting current bogo until done.")
delta_iterations = 0
delta_seconds = 0.0
while not (self.current_bogo.is_finished() or self.stopping):
await asyncio.sleep(1e-100)
perf_counter_start = time.perf_counter()
self.current_bogo.shuffle_with(self.random.shuffle)
delta_iterations += 1
delta_seconds += time.perf_counter() - perf_counter_start
if delta_seconds >= self.speed_resolution:
delta_iterations = 0
delta_seconds = 0.0
logging.debug("Stopped sorting bogo.")
now = tools.isoformat_now()
if self.current_bogo.is_finished():
logging.debug("Bogo was sorted")
self.current_bogo.finished = now
else:
logging.debug("Bogo was not sorted")
await self.save_state(now)
async def sort_all(self):
logging.debug("Sorting all unsorted lists.")
for lst in self.unsorted_lists:
if self.stopping:
logging.info("Stopping sorting all unsorted lists.")
break
await self.make_next_bogo(lst)
await self.sort_current_until_done()
async def run(self):
logging.info("Running BogoManager.")
previous_bogo = await self.load_previous_state()
if previous_bogo and not previous_bogo.is_finished():
logging.info("Found unfinished previous bogo.")
unfinished_length = len(previous_bogo.sequence)
self.unsorted_lists = tools.fast_forward_to_length(
self.unsorted_lists, unfinished_length)
# Drop next list since it has the same length as the sequence in
# the unfinished previous_bogo.
next(self.unsorted_lists)
self.current_bogo = previous_bogo
await self.sort_current_until_done()
else:
logging.info("Did not find an unfinished previous bogo.")
await self.sort_all()
def get_current_state(self):
return (self.current_bogo.shuffles,
self.current_bogo.is_finished())
| mit | 1,056,780,022,889,662,500 | 37.320313 | 76 | 0.59368 | false | 3.952458 | false | false | false |
huanpc/lab_cloud_computing | Jmeter Test Plan/Seeding_Data/Main.py | 1 | 1880 | __author__ = 'huanpc'
import constant
import argparse
from random import randint
# Duong dan toi thu muc output file dataseed
DIR_OUTPUT_PATH = './output'
# Chon gia tri cho id trong bang customer
customer_id_begin = 5
product_id_begin = 0
# So ban ghi can tao
num_of_row = 100
#def createProductData():
def createCustomerData():
first_name_list = constant.FIRST_NAME
last_name_list = constant.LAST_NAME
i = 0
f = open(DIR_OUTPUT_PATH+'/customer_data_seed.csv','w')
column_heading = ['customer_id','customer_group_id','store_id','first_name','last_name','email','telephone','fax','password','salt','cart','whistlist',
'newsleter','address_id','custom_field','ip','status','approves','safe','token','date_added']
row = ['1',constant.CUSTOMER_GROUP_ID,constant.STORE_ID,'1','1','1','1','1',constant.PASSWORD,constant.SALT,constant.CART,constant.WHISTLIST,constant.NEWSLETTER,constant.ADDRESS_ID,
constant.CUSTOM_FIELD,constant.IP,constant.STATUS,constant.APPROVED,constant.SAFE,constant.TOKEN,constant.DATE_ADDED]
while i<num_of_row:
first_name = first_name_list[randint(0,len(constant.FIRST_NAME)-1)]
last_name = last_name_list[randint(0,len(constant.LAST_NAME)-1)]
row[0] = str(i+customer_id_begin)
row[3] = first_name
row[4] = last_name
row[5] = str(first_name+'.'+last_name+'@gmail.com').lower()
row[6] = str(randint(11111,99999))+ str(randint(11111,99999))
row[7] = row[6]
line = ','.join(row)
i+=1
f.write(line+'\n')
f.close()
def main():
# parser = argparse.ArgumentParser(description='Sinh du lieu mau cho tap test')
# parser.add_argument('integers', metavar='N', type=int, nargs='+',
# help='an integer for the accumulator')
createCustomerData()
if __name__ == '__main__':
main()
| apache-2.0 | 7,157,417,236,255,695,000 | 39.869565 | 185 | 0.642021 | false | 3.191851 | false | false | false |
chantera/teras | teras/training/listeners.py | 1 | 8746 | from collections import OrderedDict
import os
import pickle
import logging
import numpy as np
from teras.training.event import Listener
from teras.utils.collections import ImmutableMap
class ProgressBar(Listener):
"""
Example::
>>> from tqdm import tqdm
>>> import time
>>> pbar = ProgressBar(lambda n: tqdm(total=n))
>>> pbar.init(512)
>>> for _ in range(16):
>>> time.sleep(0.1)
>>> pbar.update(32)
>>> pbar.close()
"""
name = "progressbar"
def __init__(self, factory, **kwargs):
super().__init__(**kwargs)
self._pbar = None
self._factory = factory
def init(self, total):
self.close()
self._pbar = self._factory(total)
def update(self, n):
self._pbar.update(n)
def close(self):
if self._pbar is not None:
self._pbar.close()
self._pbar = None
def __del__(self):
self.close()
def on_epoch_train_begin(self, data):
self.init(data['size'])
def on_batch_end(self, data):
self.update(data['batch_size'])
def on_epoch_train_end(self, data):
self.close()
on_epoch_validate_begin = on_epoch_train_begin
on_epoch_validate_end = on_epoch_train_end
_reporters = []
def report(values):
if not _reporters:
return
for reporter in reversed(_reporters):
reporter.report(values)
class Reporter(Listener):
name = "reporter"
def __init__(self, logger, **kwargs):
super().__init__(**kwargs)
self._logger = logger
self._logs = OrderedDict()
self._reported = 0
self._history = []
def __enter__(self):
_reporters.append(self)
def __exit__(self, exc_type, exc_value, traceback):
_reporters.pop()
def report(self, values):
for name, value in values.items():
if name != "loss" and "loss" in name:
loss = self._logs.get(name, [])
loss.append(float(value))
value = loss
elif "accuracy" in name:
accuracy = self._logs.get(name, 0.0)
if isinstance(value, (tuple, list)) and len(value) == 2:
if isinstance(accuracy, float):
accuracy = [0, 0]
accuracy[0] += value[0]
accuracy[1] += value[1]
else:
accuracy += float(value)
value = accuracy
self._logs[name] = value
self._reported += 1
def get_summary(self):
summary = OrderedDict()
for name, value in self._logs.items():
if name != "loss" and "loss" in name:
n = len(value)
summary[name] = sum(value) / n if n > 0 else np.nan
elif "accuracy" in name:
if isinstance(value, list):
correct, total = value[:2]
if total == 0:
accuracy = np.nan
else:
accuracy = correct / total
else:
accuracy = value / self._reported
summary[name] = accuracy
else:
summary[name] = value
return summary
def get_history(self):
return self._history
def on_train_begin(self, data):
self._history = []
def on_epoch_train_begin(self, data):
self._logs.clear()
self._reported = 0
on_epoch_validate_begin = on_epoch_train_begin
def on_epoch_train_end(self, data):
self.report({'loss': data['loss']})
summary = self.get_summary()
self._output_log("training", summary, data)
self._history.append({'training': summary, 'validation': None})
def on_epoch_validate_end(self, data):
self.report({'loss': data['loss']})
summary = self.get_summary()
self._output_log("validation", summary, data)
self._history[-1]['validation'] = summary
def _output_log(self, label, summary, data):
message = "[{}] epoch {} - #samples: {}, loss: {:.8f}".format(
label, data['epoch'], data['size'], summary['loss'])
if 'accuracy' in summary:
message += ", accuracy: {:.8f}".format(summary['accuracy'])
v = self._logs.get('accuracy', None)
if isinstance(v, list) and v[1] > 0:
message += " ({}/{})".format(v[0], v[1])
self._logger.info(message)
message = []
for name, value in summary.items():
if name == 'loss' or name == 'accuracy':
continue
if isinstance(value, float):
message.append("{}: {:.8f}".format(name, value))
else:
message.append("{}: {}".format(name, value))
if 'accuracy' in name:
v = self._logs.get(name, None)
if isinstance(v, list) and v[1] > 0:
message[-1] += " ({}/{})".format(v[0], v[1])
if message:
self._logger.info(", ".join(message))
class Saver(Listener):
name = "saver"
class Context(ImmutableMap):
def __getattr__(self, name):
if name in self.data:
return self.data[name]
raise AttributeError("'{}' object has no attribute '{}'"
.format(type(self).__name__, name))
def __hash__(self):
return hash(tuple(sorted(self.data.items())))
def __init__(self, model, basename, directory='', context=None, interval=1,
save_from=None, save_best=False, evaluate=None,
serializer=None, logger=None, **kwargs):
super().__init__(**kwargs)
self._model = model
self._basename = os.path.join(os.path.expanduser(directory), basename)
if context is not None and not isinstance(context, Saver.Context):
context = Saver.Context(context)
self._context = context
if not isinstance(interval, int):
raise ValueError("interval must be specified as int value: "
"actual('{}')".format(type(interval).__name__))
self._interval = interval
self._save_from = save_from
self._save_best = save_best
self._evaluate = evaluate
self._best_value = -float('inf')
self._serializer = serializer if serializer is not None else pickle
self._logger = logger \
if logger is not None else logging.getLogger(__name__)
def save_context(self, context):
if not isinstance(context, Saver.Context):
raise TypeError('`context` must be a Saver.Context object')
file = self._basename + '.context'
self._logger.info("saving the context to {} ...".format(file))
with open(file, 'wb') as f:
self._serializer.dump(context, f)
def save_model(self, model, suffix=''):
file = "{}{}.pkl".format(self._basename, suffix)
self._logger.info("saving the model to {} ...".format(file))
with open(file, 'wb') as f:
self._serializer.dump(model, f)
@staticmethod
def load_context(model_file, deserializer=None):
if deserializer is None:
deserializer = pickle
_dir, _file = os.path.split(model_file)
context_file = os.path.basename(_file).split('.')[0] + '.context'
context_file = os.path.join(_dir, context_file)
with open(context_file, 'rb') as f:
context = deserializer.load(f)
return context
def on_train_begin(self, data):
if self._context is not None:
self.save_context(self._context)
def on_epoch_validate_end(self, data):
if self._save_best:
self._trigger_save(data)
def on_epoch_end(self, data):
if not self._save_best:
self._trigger_save(data)
def _trigger_save(self, data):
epoch = data['epoch']
if self._save_from is not None and epoch < self._save_from:
return
if epoch % self._interval == 0:
if self._save_best:
if callable(self._evaluate):
value = self._evaluate(data)
else:
value = -data['loss']
if value <= self._best_value:
return
self._logger.info("update the best score - new: {}, old: {}"
.format(value, self._best_value))
self._best_value = value
self.save_model(self._model)
else:
self.save_model(self._model, suffix='.' + str(epoch))
| mit | -6,274,623,873,771,057,000 | 32.381679 | 79 | 0.523668 | false | 4.115765 | false | false | false |
sinnwerkstatt/landmatrix | commands/link_comments.py | 1 | 1095 | #!/usr/bin/env python
from django.core.management import BaseCommand
class Command(BaseCommand):
help = "Replace historical activity IDs with activity IDs within Threaded comments"
def handle(self, *args, **options):
pass
"""
ThreadedComment = get_model()
passed, failed = 0, 0
for comment in ThreadedComment.objects.filter(content_type_id=42):
try:
ha = HistoricalActivity.objects.get(id=comment.object_pk)
a = Activity.objects.filter(
activity_identifier=ha.activity_identifier
).first()
comment.content_type = ContentType.objects.get(
app_label="landmatrix", model="activity"
)
comment.object_pk = a.id
comment.save()
passed += 1
except Activity.DoesNotExist:
failed += 1
except HistoricalActivity.DoesNotExist:
failed += 1
self.stdout.write("%i passed, %i failed" % (passed, failed))
"""
| agpl-3.0 | 3,495,733,314,741,191,000 | 35.5 | 87 | 0.553425 | false | 4.76087 | false | false | false |
socialc0de/germany-says-welcome-backend | gsw/gsw/settings.py | 1 | 3165 | """
Django settings for gsw project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=f4aydus9$f*c(108mqo!-)b8i@ttb80&h%940z@4bd%)%i8jj'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'rest_framework',
'rest_framework_gis',
'corsheaders',
'hvad',
'backend',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
)
ROOT_URLCONF = 'gsw.urls'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'gsw.db'),
}
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gsw.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = "/api/"
STATIC_ROOT = os.path.join(BASE_DIR, "static")
CORS_ORIGIN_ALLOW_ALL = True
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
)
}
from gsw.local_settings import *
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media') | agpl-3.0 | 1,764,764,762,736,935,700 | 24.739837 | 71 | 0.686572 | false | 3.440217 | false | false | false |
bayesimpact/bob-emploi | frontend/server/asynchronous/i18n/download_translations.py | 1 | 4312 | """Download translations from Airtable for static server strings."""
import argparse
import collections
import json
import logging
import os
import typing
from typing import Any, Dict, Optional, Sequence, Set, Tuple
from airtable import airtable
import polib
from bob_emploi.frontend.server.mail import campaign
from bob_emploi.frontend.server.mail.templates import mailjet_templates
# Translations base: https://airtable.com/appkEc8N0Bw4Uok43
_I18N_BASE_ID = 'appkEc8N0Bw4Uok43'
_REQUIRED_LANGUAGES = {'en'}
_DOWNLOAD_LANGUAGES = {'en', 'en_UK', 'fr', 'fr@tu'}
def main(string_args: Optional[Sequence[str]] = None) -> None:
"""Download translations from Airtable for static server strings."""
# Parse arguments.
parser = argparse.ArgumentParser(
description='Download translations from Airtable for static server strings.')
parser.add_argument('--api-key', default=os.getenv('AIRTABLE_API_KEY'))
parser.add_argument(
'--strings', help='Path to the PO file containing the extracted strings to translate.',
required=True)
parser.add_argument(
'--output', help='File in which to save the translations.',
required=True)
args = parser.parse_args(string_args)
if not args.api_key:
raise ValueError(
'No API key found. Create an airtable API key at '
'https://airtable.com/account and set it in the AIRTABLE_API_KEY '
'env var.')
logging.info('Loading extracted strings…')
extracted_strings = {
msg.msgid for msg in polib.pofile(args.strings)
# Do not keep strings that are only in test files.
if not msg.occurrences or
not all(f.endswith('_test.py') for f, unused_line in msg.occurrences)
}
logging.info('Loading extra strings from Mailjet templates…')
mailjet_strings = {
campaign.get_campaign_subject(campaign_id)
for campaign_id in mailjet_templates.MAP
}
logging.info('Downloading translations from Airtable…')
i18n_base = airtable.Airtable(_I18N_BASE_ID, args.api_key)
translations = {
typing.cast(Dict[str, Any], record['fields']).get('string', ''): {
lang: translation
for lang, translation in record['fields'].items()
if lang in _DOWNLOAD_LANGUAGES
}
for record in i18n_base.iterate('translations')
}
logging.info('Mapping keys with context to their base keys…')
contexts = collections.defaultdict(list)
for translation in translations:
parts = translation.split('_')
for index in range(1, len(parts)):
key = '_'.join(parts[0: index])
contexts[key].extend([
'_'.join(parts[0: split_index + 1])
for split_index in range(index, len(parts))])
logging.info('Filtering translations of extracted strings…')
extracted_translations: Dict[str, Dict[str, str]] = {}
should_raise_on_missing = bool(os.getenv('FAIL_ON_MISSING_TRANSLATIONS', ''))
missing_translations: Set[Tuple[Optional[str], str]] = set()
for key in extracted_strings | mailjet_strings:
if key not in translations:
if key in extracted_strings:
missing_translations.add((None, key))
continue
for language in _REQUIRED_LANGUAGES - translations[key].keys():
missing_translations.add((language, key))
extracted_translations[key] = translations[key]
for key_with_context in contexts.get(key, []):
try:
extracted_translations[key_with_context] = translations[key_with_context]
except KeyError:
pass
if missing_translations:
missing_translations_string = 'Missing translations:\n' + '\n'.join(
f'{language if language else "all"}: {key}' for language, key in missing_translations)
if should_raise_on_missing:
raise KeyError(missing_translations_string)
logging.info(missing_translations_string)
logging.info('Creating the translations file…')
with open(args.output, 'wt') as output_file:
json.dump(extracted_translations, output_file, ensure_ascii=False, sort_keys=True)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main()
| gpl-3.0 | -2,897,353,337,188,802,600 | 37.053097 | 98 | 0.654186 | false | 3.988868 | false | false | false |
brunobraga/termsaver-figlet | figlet/constants.py | 1 | 2794 | ###############################################################################
#
# file: constants.py
#
# Purpose: refer to module documentation for details
#
# Note: This file is part of Termsaver-Figlet plugin, and should not be
# used or executed separately.
#
###############################################################################
#
# Copyright 2012 Termsaver
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
###############################################################################
"""
Holds constant values used throughout termsaver-figlet plugin.
"""
#
# Termsaver modules
#
from termsaverlib.constants import PropertyClass
class Plugin(PropertyClass):
"""
Holds application related properties used by termsaver-figlet plugin
screens. Refer to each of the available properties for detailed
documentation.
"""
VERSION = "0.1"
"""
Defines the version of termsaver-figlet plugin. This is accessed during
install process, and to any help and usage messages informed by it.
Refer to CHANGELOG file for a complete history about this project.
"""
NAME = 'termsaver-figlet'
"""
Defines the termsaver-figlet plugin, usually the plugin package name.
"""
TITLE = 'TermSaver Figlet Plugin'
"""
Defines the termsaver-figlet plugin's official name as it should appear
in documentation.
"""
DESCRIPTION = 'A set of screens for termsaver using figlet.'
"""
Defines the main description of the termsaver-figlet plugin.
"""
URL = 'http://www.termsaver.info/plugins'
"""
Defines the termsaver-figlet plugin official website address.
"""
SOURCE_URL = 'http://github.com/brunobraga/termsaver'
"""
Defines the termsaver-figlet plugin official source-code control site,
hosted on GitHub.
"""
AUTHORS = ['Bruno Braga <[email protected]>']
"""
Defines a list of all authors contributing to the termsaver-figlet plugin.
"""
class Settings(PropertyClass):
"""
Holds configuration settings used by termsaver-figlet plugin. Refer to each
of the available properties for detailed documentation.
Follow the formatting:
SETTING_NAME = VALUE
\"\"\"
document it!
\"\"\"
"""
pass
| apache-2.0 | 5,993,225,176,620,943,000 | 27.510204 | 79 | 0.63529 | false | 4.37931 | false | false | false |
gmathers/iii-addons | account_payment_cc/account_voucher.py | 2 | 8836 | # -*- coding: utf-8 -*-
######################################################################
#
# Note: Program metadata is available in /__init__.py
#
######################################################################
from openerp.osv import osv
class account_voucher(osv.osv):
_inherit = 'account.voucher'
def action_move_line_create(self, cr, uid, ids, context=None):
res = super(account_voucher, self).action_move_line_create(cr, uid, ids, context)
move_line_pool = self.pool.get('account.move.line')
move_pool = self.pool.get('account.move')
for voucher in self.browse(cr, uid, ids):
if voucher.journal_id.support_creditcard_transactions:
company_currency = self._get_company_currency(cr, uid, voucher.id, context)
current_currency = self._get_current_currency(cr, uid, voucher.id, context)
partner_id = voucher.journal_id.partner_id
account_payable = voucher.journal_id.partner_id.property_account_payable
account_receivable = voucher.journal_id.partner_id.property_account_receivable
account = voucher.journal_id.default_credit_account_id
if voucher.type in ('receipt', 'sale'):
account = voucher.journal_id.default_debit_account_id
# Create the account move record.
move_id = move_pool.create(cr, uid, self.account_move_get(cr, uid, voucher.id, context=context), context=context)
fline = self.first_move_line_get(cr,uid,voucher.id, move_id, company_currency, current_currency, context)
fline.update({
'partner_id': partner_id and partner_id.id or voucher.partner_id.id,
})
credit, debit = fline.get('credit'), fline.get('debit')
alines = [line.id for line in voucher.line_ids if line.amount]
ctx = context and context.copy() or {}
ctx.update({'date': voucher.date})
if alines:
for line in voucher.line_ids:
#create one move line per voucher line where amount is not 0.0
if not line.amount:
continue
amount = self._convert_amount(cr, uid, line.amount, voucher.id, context=ctx)
line_debit = line_credit = 0.0
if voucher.type in ('purchase', 'payment'):
line_credit = amount
elif voucher.type in ('sale', 'receipt'):
line_debit = amount
if line_debit < 0: line_credit = -line_debit; line_debit = 0.0
if line_credit < 0: line_debit = -line_credit; line_credit = 0.0
move_line = {
'journal_id': voucher.journal_id.id,
'period_id': voucher.period_id.id,
'name': line.name or '/',
'account_id': account_payable.id,
'move_id': move_id,
'partner_id': partner_id and partner_id.id or voucher.partner_id.id,
'currency_id': line.move_line_id and (company_currency <> line.move_line_id.currency_id.id and line.move_line_id.currency_id.id) or False,
'analytic_account_id': line.account_analytic_id and line.account_analytic_id.id or False,
'quantity': 1,
'credit': credit,
'debit': debit,
'date': voucher.date
}
if voucher.type in ('payment', 'purchase'):
move_line.update({'account_id': account_payable.id})
if line.type=='cr':
move_line['debit'] = line_debit
fline.update({
'credit': debit, 'debit': credit,
})
else:
move_line['credit'] = line_credit
fline.update({
'credit': debit, 'debit': credit,
'account_id': account.id
})
if voucher.type in ('receipt', 'sale'):
move_line.update({'account_id': account_receivable.id})
if line.type=='cr':
fline.update({
'credit': debit, 'debit': credit,
'account_id': account.id
})
move_line['debit'] = line_debit
else:
move_line['credit'] = line_credit
fline.update({
'credit': debit, 'debit': credit,
})
move_line_pool.create(cr, uid, move_line, context)
else:
amount = self._convert_amount(cr, uid, (credit+debit), voucher.id, context=ctx)
line_debit = line_credit = 0.0
if voucher.type in ('purchase', 'payment'):
line_credit = amount
elif voucher.type in ('sale', 'receipt'):
line_debit = amount
if line_debit < 0: line_credit = -line_debit; line_debit = 0.0
if line_credit < 0: line_debit = -line_credit; line_credit = 0.0
move_line = {
'journal_id': voucher.journal_id.id,
'period_id': voucher.period_id.id,
'name': voucher.name or '/',
'account_id': account_payable.id,
'move_id': move_id,
'partner_id': partner_id and partner_id.id or voucher.partner_id.id,
'quantity': 1,
'credit': credit,
'debit': debit,
'debit': 0.0,
'date': voucher.date
}
if voucher.type in ('receipt', 'sale'):
move_line.update({'account_id': account_receivable.id})
if (credit > 0):
move_line['debit'] = amount
else:
move_line['credit'] = amount
move_line_pool.create(cr, uid, move_line, context)
move_line_pool.create(cr, uid, fline, context)
return res
def cancel_voucher(self, cr, uid, ids, context=None):
reconcile_pool = self.pool.get('account.move.reconcile')
move_pool = self.pool.get('account.move')
for voucher in self.browse(cr, uid, ids, context=context):
voucher_number = voucher.number
recs = []
for line in voucher.move_ids:
if line.reconcile_id:
recs += [line.reconcile_id.id]
if line.reconcile_partial_id:
recs += [line.reconcile_partial_id.id]
reconcile_pool.unlink(cr, uid, recs)
if voucher.move_id:
move_pool.button_cancel(cr, uid, [voucher.move_id.id])
move_pool.unlink(cr, uid, [voucher.move_id.id])
if voucher_number and voucher.journal_id.support_creditcard_transactions:
cc_move = move_pool.search(cr, uid, [("name", "=", voucher_number)], context=context)
for move in move_pool.browse(cr, uid, cc_move, context=context):
if move.journal_id.support_creditcard_transactions:
recs = []
for line in move.line_id:
if line.reconcile_id:
recs += [line.reconcile_id.id]
if line.reconcile_partial_id:
recs += [line.reconcile_partial_id.id]
reconcile_pool.unlink(cr, uid, recs, context=context)
move_pool.button_cancel(cr, uid, [move.id], context=context)
move_pool.unlink(cr, uid, [move.id], context=context)
res = {
'state':'cancel',
'move_id':False,
}
self.write(cr, uid, ids, res)
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 2,463,914,182,569,678,300 | 50.672515 | 166 | 0.454391 | false | 4.652975 | false | false | false |
AC130USpectre/ImagesWatermarks | Convert.py | 1 | 1634 | from GenerateNoise import noise
from PIL import Image, ImageDraw
import math
SCALE = 1#масштаб шума
def direct(rect, usernumber, userquantity, coordinate, dimension, seed):#наложить шум на прямоугольный кластер
n = noise(usernumber, userquantity, coordinate, dimension, seed)
n *= SCALE
if (n == 0.0):
return rect
if (n > 0.0):
n = math.floor(n)
else:
n = math.ceil(n)
(width, height) = rect.size#размеры кластера
pix = rect.load()#массив пикселей кластера
draw = ImageDraw.Draw(rect)
for i in range(width):#проход по всем пикселям кластера
for j in range(height):
(r, g, b) = pix[i, j]
r += n; g += n; b += n
if (r > 255):
r = 255
elif (r < 0):
r = 0
if (g > 255):
g = 255
elif (g < 0):
g = 0
if (b > 255):
b = 255
elif (b < 0):
b = 0
draw.point((i, j), (r, g, b))
return rect
def diff(original, copy):#разница оригинала и копии
pix_o = original.load()
pix_c = copy.load()
(width, height) = original.size
diff = 0.0
for i in range(width):
for j in range(height):
for k in range(2):#максимум модуля разности
if (math.fabs(pix_c[i, j][k] - pix_o[i, j][k]) > diff):
diff = pix_c[i, j][k] - pix_o[i, j][k]
return diff / SCALE
| gpl-2.0 | 6,767,493,533,111,639,000 | 30.510638 | 110 | 0.501013 | false | 2.76306 | false | false | false |
Xeralux/tensorflow | tensorflow/contrib/tpu/python/tpu/tpu_context.py | 1 | 18709 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPU system metdata and associated tooling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from contextlib import contextmanager
import copy
import numpy as np
from tensorflow.contrib.tpu.python.tpu import device_assignment as tpu_device_assignment
from tensorflow.contrib.tpu.python.tpu import tpu_system_metadata as tpu_system_metadata_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.platform import tf_logging as logging
_DEFAULT_JOB_NAME = 'tpu_worker'
_DEFAULT_COORDINATOR_JOB_NAME = 'coordinator'
_LOCAL_MASTERS = ('', 'local')
class _TPUContext(object):
"""A context holds immutable states of TPU computation.
This immutable object holds TPUEstimator config, train/eval batch size, and
`TPUEstimator.use_tpu`, which is expected to be passed around. It also
provides utility functions, based on the current state, to determine other
information commonly required by TPU computation, such as TPU device names,
TPU hosts, shard batch size, etc.
N.B. As `mode` is not immutable state in Estimator, but essential to
distinguish between TPU training and evaluation, a common usage for
_TPUContext with `mode` is as follows:
```
with _ctx.with_mode(mode) as ctx:
if ctx.is_running_on_cpu():
...
```
"""
def __init__(self, config, train_batch_size, eval_batch_size,
predict_batch_size, use_tpu):
self._config = config
self._train_batch_size = train_batch_size
self._eval_batch_size = eval_batch_size
self._predict_batch_size = predict_batch_size
self._use_tpu = use_tpu
self._model_parallelism_enabled = (
use_tpu and config.tpu_config.computation_shape)
self._mode = None
self._lazy_tpu_system_metadata_dict = {} # key by master address
self._lazy_device_assignment_dict = {} # key by master address
self._lazy_validation_dict = {} # key by ModeKeys
def _assert_mode(self):
if self._mode is None:
raise RuntimeError(
'`mode` needs to be set via contextmanager `with_mode`.')
return self._mode
@contextmanager
def with_mode(self, mode):
# NOTE(xiejw): Shallow copy is enough. It will share he lazy dictionaries,
# such as _lazy_tpu_system_metadata_dict between new copy and the original
# one. Note that all lazy states stored in properties _lazy_foo are sort of
# immutable as they should be same for the process lifetime.
new_ctx = copy.copy(self)
new_ctx._mode = mode # pylint: disable=protected-access
yield new_ctx
@property
def mode(self):
return self._assert_mode()
def _get_master_address(self):
mode = self._assert_mode()
config = self._config
master = (
config.master
if mode != model_fn_lib.ModeKeys.EVAL else config.evaluation_master)
return master
def _get_tpu_system_metadata(self):
"""Gets the (maybe cached) TPU system metadata."""
master = self._get_master_address()
tpu_system_metadata = self._lazy_tpu_system_metadata_dict.get(master)
if tpu_system_metadata is not None:
return tpu_system_metadata
# pylint: disable=protected-access
tpu_system_metadata = (
tpu_system_metadata_lib._query_tpu_system_metadata(
master,
run_config=self._config,
query_topology=self.model_parallelism_enabled))
self._lazy_tpu_system_metadata_dict[master] = tpu_system_metadata
return tpu_system_metadata
def _get_device_assignment(self):
"""Gets the (maybe cached) TPU device assignment."""
master = self._get_master_address()
device_assignment = self._lazy_device_assignment_dict.get(master)
if device_assignment is not None:
return device_assignment
tpu_system_metadata = self._get_tpu_system_metadata()
device_assignment = tpu_device_assignment.device_assignment(
tpu_system_metadata.topology,
computation_shape=self._config.tpu_config.computation_shape,
num_replicas=self.num_replicas)
logging.info('computation_shape: %s',
str(self._config.tpu_config.computation_shape))
logging.info('num_replicas: %d', self.num_replicas)
logging.info('device_assignment.topology.device_coordinates: %s',
str(device_assignment.topology.device_coordinates))
logging.info('device_assignment.core_assignment: %s',
str(device_assignment.core_assignment))
self._lazy_device_assignment_dict[master] = device_assignment
return device_assignment
@property
def model_parallelism_enabled(self):
return self._model_parallelism_enabled
@property
def device_assignment(self):
return (self._get_device_assignment()
if self._model_parallelism_enabled else None)
@property
def num_of_cores_per_host(self):
metadata = self._get_tpu_system_metadata()
return metadata.num_of_cores_per_host
@property
def num_cores(self):
metadata = self._get_tpu_system_metadata()
return metadata.num_cores
@property
def num_of_replicas_per_host(self):
if self.model_parallelism_enabled:
return self.num_replicas // self.num_hosts
else:
return self.num_of_cores_per_host
@property
def num_replicas(self):
num_cores_in_system = self.num_cores
if self.model_parallelism_enabled:
computation_shape_array = np.asarray(
self._config.tpu_config.computation_shape, dtype=np.int32)
num_cores_per_replica = np.prod(computation_shape_array)
if num_cores_per_replica > num_cores_in_system:
raise ValueError(
'The num of cores required by the model parallelism, specified by '
'TPUConfig.computation_shape, is larger than the total num of '
'TPU cores in the system. computation_shape: {}, num cores '
'in the system: {}'.format(
self._config.tpu_config.computation_shape,
num_cores_in_system))
if num_cores_in_system % num_cores_per_replica != 0:
raise RuntimeError(
'The num of cores in the system ({}) is not divisible by the num '
'of cores ({}) required by the model parallelism, specified by '
'TPUConfig.computation_shape. This should never happen!'.format(
num_cores_in_system, num_cores_per_replica))
return num_cores_in_system // num_cores_per_replica
else:
return num_cores_in_system
@property
def num_hosts(self):
metadata = self._get_tpu_system_metadata()
return metadata.num_hosts
@property
def config(self):
return self._config
def is_input_sharded_per_core(self):
"""Return true if input_fn is invoked per-core (other than per-host)."""
mode = self._assert_mode()
return (mode == model_fn_lib.ModeKeys.TRAIN and
not self._config.tpu_config.per_host_input_for_training)
def is_running_on_cpu(self, is_export_mode=False):
"""Determines whether the input_fn and model_fn should be invoked on CPU.
This API also validates user provided configuration, such as batch size,
according the lazy initialized TPU system metadata.
Args:
is_export_mode: Indicates whether the current mode is for exporting the
model, when mode == PREDICT. Only with this bool, we could
tell whether user is calling the Estimator.predict or
Estimator.export_savedmodel, which are running on TPU and CPU
respectively. Parent class Estimator does not distinguish these two.
Returns:
bool, whether current input_fn or model_fn should be running on CPU.
Raises:
ValueError: any configuration is invalid.
"""
is_running_on_cpu = self._is_running_on_cpu(is_export_mode)
if not is_running_on_cpu:
self._validate_tpu_configuration()
return is_running_on_cpu
def _is_running_on_cpu(self, is_export_mode):
"""Determines whether the input_fn and model_fn should be invoked on CPU."""
mode = self._assert_mode()
if not self._use_tpu:
return True
if mode != model_fn_lib.ModeKeys.PREDICT:
return False
# There are actually 2 use cases when running with mode.PREDICT: prediction
# and saving the model. We run actual predictions on the TPU, but
# model export is run on the CPU.
if is_export_mode:
return True
return False
@property
def global_batch_size(self):
mode = self._assert_mode()
if mode == model_fn_lib.ModeKeys.TRAIN:
return self._train_batch_size
elif mode == model_fn_lib.ModeKeys.EVAL:
return self._eval_batch_size
elif mode == model_fn_lib.ModeKeys.PREDICT:
return self._predict_batch_size
else:
return None
@property
def batch_size_for_input_fn(self):
"""Returns the shard batch size for `input_fn`."""
global_batch_size = self.global_batch_size
if self.is_running_on_cpu():
return global_batch_size
# On TPU
if self.is_input_sharded_per_core():
# We prohibit per core input sharding for the model parallelism case,
# therefore it is safe to use num_cores here.
return global_batch_size // self.num_cores
else:
return global_batch_size // self.num_hosts
@property
def batch_size_for_model_fn(self):
"""Returns the shard batch size for `model_fn`."""
global_batch_size = self.global_batch_size
if self.is_running_on_cpu():
return global_batch_size
# On TPU. always sharded per shard.
return global_batch_size // self.num_replicas
@property
def master_job(self):
"""Returns the job name to use to place TPU computations on.
Returns:
A string containing the job name, or None if no job should be specified.
Raises:
ValueError: If the user needs to specify a tpu_job_name, because we are
unable to infer the job name automatically, or if the user-specified job
names are inappropriate.
"""
run_config = self._config
# If the user specifies the tpu_job_name, use that.
if run_config.tpu_config.tpu_job_name:
return run_config.tpu_config.tpu_job_name
# The tpu job is determined by the run_config. Right now, this method is
# required as tpu_config is not part of the RunConfig.
mode = self._assert_mode()
master = (
run_config.evaluation_master
if mode == model_fn_lib.ModeKeys.EVAL else run_config.master)
if master in _LOCAL_MASTERS:
return None
if (not run_config.session_config or
not run_config.session_config.cluster_def.job):
return _DEFAULT_JOB_NAME
cluster_def = run_config.session_config.cluster_def
job_names = set([job.name for job in cluster_def.job])
if _DEFAULT_JOB_NAME in job_names:
# b/37868888 tracks allowing ClusterSpec propagation to reuse job names.
raise ValueError('Currently, tpu_worker is not an allowed job name.')
if len(job_names) == 1:
return cluster_def.job[0].name
if len(job_names) == 2:
if _DEFAULT_COORDINATOR_JOB_NAME in job_names:
job_names.remove(_DEFAULT_COORDINATOR_JOB_NAME)
return job_names.pop()
# TODO(b/67716447): Include more sophisticated heuristics.
raise ValueError(
'Could not infer TPU job name. Please specify a tpu_job_name as part '
'of your TPUConfig.')
@property
def tpu_host_placement_function(self):
"""Returns the TPU host place function."""
master = self.master_job
def _placement_function(_sentinal=None, core_id=None, host_id=None): # pylint: disable=invalid-name
assert _sentinal is None
if core_id is not None and host_id is not None:
raise RuntimeError(
'core_id and host_id can have only one non-None value.')
if master is None:
return '/replica:0/task:0/device:CPU:0'
else:
if core_id is not None:
host_id = core_id / self.num_of_cores_per_host
return '/job:%s/task:%d/device:CPU:0' % (master, host_id)
return _placement_function
@property
def tpu_device_placement_function(self):
"""Returns a TPU device placement Fn."""
master = self.master_job
job_device = '' if master is None else ('/job:%s' % master)
def _placement_function(i):
if self.model_parallelism_enabled:
return self.device_assignment.tpu_device(replica=i, job=master)
else:
num_of_cores_per_host = self.num_of_cores_per_host
host_id = i / num_of_cores_per_host
ordinal_id = i % num_of_cores_per_host
return '%s/task:%d/device:TPU:%d' % (job_device, host_id, ordinal_id)
return _placement_function
@property
def tpu_ordinal_function(self):
"""Returns the TPU ordinal fn."""
def _tpu_ordinal_function(index):
"""Return the TPU ordinal associated with a shard.
Required because the enqueue ops are placed on CPU.
Args:
index: the shard index
Returns:
The ordinal of the TPU device the shard's infeed should be placed on.
"""
if self.model_parallelism_enabled:
return self.device_assignment.tpu_ordinal(replica=index)
else:
return index % self.num_of_cores_per_host
return _tpu_ordinal_function
def _validate_tpu_configuration(self):
"""Validates the configuration based on the TPU system metadata."""
mode = self._assert_mode()
if self._lazy_validation_dict.get(mode):
return
# All following information is obtained from TPU system metadata.
num_cores = self.num_cores
num_replicas = self.num_replicas
num_hosts = self.num_hosts
if not num_cores:
tpu_system_metadata = self._get_tpu_system_metadata()
raise RuntimeError(
'Cannot find any TPU cores in the system. Please double check '
'Tensorflow master address and TPU worker(s). Available devices '
'are {}.'.format(tpu_system_metadata.devices))
if self._config.tpu_config.num_shards:
user_provided_num_replicas = self._config.tpu_config.num_shards
if user_provided_num_replicas != num_replicas:
message = (
'TPUConfig.num_shards is not set correctly. According to TPU '
'system metadata for Tensorflow master ({}): num_replicas should '
'be ({}), got ({}). For non-model-parallelism, num_replicas should '
'be the total num of TPU cores in the system. For '
'model-parallelism, the total number of TPU cores should be '
'product(computation_shape) * num_replicas. Please set it '
'accordingly or leave it as `None`'.format(
self._get_master_address(), num_replicas,
user_provided_num_replicas))
raise ValueError(message)
if mode == model_fn_lib.ModeKeys.TRAIN:
if self._train_batch_size % num_replicas != 0:
raise ValueError(
'train batch size {} must be divisible by number of replicas {}'
.format(self._train_batch_size, num_replicas))
elif mode == model_fn_lib.ModeKeys.EVAL:
if self._eval_batch_size is None:
raise ValueError(
'eval_batch_size in TPUEstimator constructor cannot be `None`'
'if .evaluate is running on TPU.')
if self._eval_batch_size % num_replicas != 0:
raise ValueError(
'eval batch size {} must be divisible by number of replicas {}'
.format(self._eval_batch_size, num_replicas))
if num_hosts > 1:
raise ValueError(
'TPUEstimator.evaluate should be running on single TPU worker. '
'got {}.'.format(num_hosts))
else:
assert mode == model_fn_lib.ModeKeys.PREDICT
if self._predict_batch_size is None:
raise ValueError(
'predict_batch_size in TPUEstimator constructor should not be '
'`None` if .predict is running on TPU.')
if self._predict_batch_size % num_replicas != 0:
raise ValueError(
'predict batch size {} must be divisible by number of replicas {}'
.format(self._predict_batch_size, num_replicas))
if num_hosts > 1:
raise ValueError(
'TPUEstimator.predict should be running on single TPU worker. '
'got {}.'.format(num_hosts))
# Record the state "validated" into lazy dictionary.
self._lazy_validation_dict[mode] = True
class _OneCoreTPUContext(_TPUContext):
"""Special _TPUContext for one core usage."""
def __init__(self, config, train_batch_size, eval_batch_size,
predict_batch_size, use_tpu):
super(_OneCoreTPUContext, self).__init__(
config, train_batch_size, eval_batch_size,
predict_batch_size, use_tpu)
def _get_tpu_system_metadata(self):
"""Gets the (maybe cached) TPU system metadata."""
master = self._get_master_address()
tpu_system_metadata = self._lazy_tpu_system_metadata_dict.get(master)
if tpu_system_metadata is not None:
return tpu_system_metadata
tpu_system_metadata = (
tpu_system_metadata_lib._TPUSystemMetadata( # pylint: disable=protected-access
num_cores=1,
num_hosts=1,
num_of_cores_per_host=1,
topology=None,
devices=[]))
self._lazy_tpu_system_metadata_dict[master] = tpu_system_metadata
return tpu_system_metadata
def _get_tpu_context(config, train_batch_size, eval_batch_size,
predict_batch_size, use_tpu):
"""Returns an instance of `_TPUContext`."""
if (config.tpu_config.num_shards == 1 and
config.tpu_config.computation_shape is None):
logging.warning(
'Setting TPUConfig.num_shards==1 is an unsupported behavior. '
'Please fix as soon as possible (leaving num_shards as None.')
return _OneCoreTPUContext(config, train_batch_size, eval_batch_size,
predict_batch_size, use_tpu)
return _TPUContext(config, train_batch_size, eval_batch_size,
predict_batch_size, use_tpu)
| apache-2.0 | 8,212,790,590,476,293,000 | 35.684314 | 104 | 0.661981 | false | 3.804962 | true | false | false |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/windows/sid/__init__.py | 1 | 1396 | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: __init__.py
import dsz
def GetWellKnownSid(wellknown, addr=dsz.script.Env['target_address']):
x = dsz.control.Method()
dsz.control.echo.Off()
envName = '_WELLKNOWN_SID_%s' % wellknown
if dsz.env.Check(envName, 0, addr):
return dsz.env.Get(envName, 0, addr)
if not dsz.cmd.Run('dst=%s sidlookup -wellknown "%s"' % (addr, wellknown), dsz.RUN_FLAG_RECORD):
return wellknown
try:
name = dsz.cmd.data.Get('Sid::Name', dsz.TYPE_STRING)
try:
dsz.env.Set(envName, name[0], 0, addr)
except:
pass
return name[0]
except:
return wellknown
def GetUserSid(sid, local=False, addr=dsz.script.Env['target_address']):
x = dsz.control.Method()
dsz.control.echo.Off()
envName = '_USER_SID_%s' % sid
if dsz.env.Check(envName, 0, addr):
return dsz.env.Get(envName, 0, addr)
if not dsz.cmd.Run('dst=%s sidlookup -user -name "%s"' % (addr, sid), dsz.RUN_FLAG_RECORD):
return sid
try:
name = dsz.cmd.data.Get('Sid::Name', dsz.TYPE_STRING)
try:
dsz.env.Set(envName, name[0], 0, addr)
except:
pass
return name[0]
except:
return sid | unlicense | 5,633,599,388,853,054,000 | 30.044444 | 100 | 0.59384 | false | 2.945148 | false | false | false |
Lucasgscruz/harpia | harpia/blockTemplate.py | 2 | 3317 | import s2idirectory
############################################################
##################### block templates ######################
class blockTemplate:
blockType = 'NA'
blockNumber = 'NA'
imagesIO = ''
dealloc = ''
outDealloc = ''
properties = []
myConnections = []
outputsToSave = []
weight = 1
outTypes = []
header = ''
###########################################################################
def __init__(self, block_type, block_id):
self.blockType = block_type
self.blockNumber = block_id
self.properties = []
self.myConnections = []
self.outputsToSave = []
######################################################3
def getBlockOutputTypes(self):
try:
self.outTypes = s2idirectory.block[int(self.blockType)]["OutTypes"]
except:
self.outTypes = "HRP_IMAGE", "HRP_IMAGE", "HRP_IMAGE", "HRP_IMAGE"
######################################################3
def blockCodeWriter(self):
PkgName = 'harpia.bpGUI.'
ModName = str(s2idirectory.block[int(self.blockType)]["Path"]["Python"])
#from spam.ham import eggs" results in "
harpia_bpGUI_Mod = __import__(PkgName, globals(), locals(), [ModName])
guiMod = getattr(harpia_bpGUI_Mod, ModName)
guiMod.generate(self)
self.imagesIO = self.imagesIO.replace("$$", str(self.blockNumber))
self.functionCall = self.functionCall.replace("$$", str(self.blockNumber))
self.dealloc = self.dealloc.replace("$$", str(self.blockNumber))
self.outDealloc = self.outDealloc.replace("$$", str(self.blockNumber))
######################################################3
def connectorCodeWriter(self):
for x in self.myConnections:
if x.destinationNumber != '--':
if x.connType == "HRP_IMAGE":
self.functionCall += 'block$dn$_img_i$di$ = cvCloneImage(block$bn$_img_o$so$);// IMG conection\n'
elif x.connType == "HRP_INT":
self.functionCall += 'block$dn$_int_i$di$ = block$bn$_int_o$so$;// INT conection\n'
elif x.connType == "HRP_POINT":
self.functionCall += 'block$dn$_point_i$di$ = block$bn$_point_o$so$;// POINT conection\n'
elif x.connType == "HRP_RECT":
self.functionCall += 'block$dn$_rect_i$di$ = block$bn$_rect_o$so$;// RECT conection\n'
elif x.connType == "HRP_DOUBLE":
self.functionCall += 'block$dn$_double_i$di$ = block$bn$_double_o$so$;// DOUBLE conection\n'
elif x.connType == "HRP_SIZE":
self.functionCall += 'block$dn$_size_i$di$ = block$bn$_size_o$so$;// SIZE conection\n'
else:
self.functionCall += 'block$dn$_img_i$di$ = cvCloneImage(block$bn$_img_o$so$);// IMG conection\n'
self.functionCall = self.functionCall.replace("$dn$", str(x.destinationNumber))
self.functionCall = self.functionCall.replace("$di$", str(x.destinationInput))
self.functionCall = self.functionCall.replace("$bn$", str(self.blockNumber))
self.functionCall = self.functionCall.replace("$so$", str(x.sourceOutput))
| gpl-2.0 | 5,695,901,803,973,828,000 | 47.072464 | 117 | 0.512813 | false | 3.71861 | false | false | false |
RocketPod/wagtail-cookiecutter | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/urls.py | 1 | 1210 | from django.conf.urls import include, url
from django.conf import settings
from django.contrib import admin
from wagtail.wagtailadmin import urls as wagtailadmin_urls
from wagtail.wagtaildocs import urls as wagtaildocs_urls
from wagtail.wagtailcore import urls as wagtail_urls
urlpatterns = [
url(r'^django-admin/', include(admin.site.urls)),
url(r'^admin/', include(wagtailadmin_urls)),
url(r'^documents/', include(wagtaildocs_urls)),
url(r'^search/$', '{{ cookiecutter.repo_name }}.search.views.search', name='search'),
]
if settings.DEBUG:
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic import TemplateView
# Serve static and media files from development server
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# Add views for testing 404 and 500 templates
urlpatterns += [
url(r'^test404/$', TemplateView.as_view(template_name='404.html')),
url(r'^test500/$', TemplateView.as_view(template_name='500.html')),
]
urlpatterns += [
url(r'', include(wagtail_urls)),
]
| bsd-3-clause | 7,428,504,255,464,982,000 | 30.025641 | 89 | 0.720661 | false | 3.878205 | false | false | false |
windskyer/k_nova | paxes_nova/network/powerkvm/agent/common/warning.py | 1 | 4125 | #
# =================================================================
# =================================================================
from nova import exception
from powervc_nova import _
class OVSWarning(exception.NovaException):
msg_fmt = _("An issue was detected during validation.")
name = "generic.warning"
class OVSPortModificationNetworkOutageWarning(OVSWarning):
msg_fmt = _("Adding, deleting, or updating a port from Open vSwitch "
"%(ovs)s may cause a loss of network connectivity for virtual "
"machines that are using this port.")
name = "port.modification.network.outage.warning"
class OVSLastPortRemovalWarning(OVSWarning):
msg_fmt = _("This operation will remove the last "
"port from the Open vSwitch %(ovs)s. No external traffic "
"will be available on this virtual switch after the "
"port is removed.")
name = "last.port.removal.warning"
class OVSPortModificationVMActionWarning(OVSWarning):
msg_fmt = _("Do not run any operations on the virtual machines "
"on this Host while the ports are being modified. "
"If you try to run another operation, such as deploy, "
"the operations might fail.")
name = "port.modification.vm.action.failure.warning"
class OVSMultipleVirtualPortsWarning(OVSWarning):
msg_fmt = _("The virtual switch %(vswitch_name)s has multiple virtual "
"ports configured on it. This configuration will bridge "
"independent physical networks together, which is an "
"uncommon configuration. You can instead bond adapters "
"together in a single virtual port.")
name = "multiple.virtual.port.warning"
class OVSMovingPortsFromBridgeToOVSWarning(OVSWarning):
msg_fmt = _("Bridges cannot be added directly to the virtual switches. "
"Proceeding with this operation will remove the components "
"from the bridge %(bridge_name)s and add them to the "
"virtual switch. The following components will be moved: "
"%(ports)s")
name = "moving.ports.from.bridge.warning"
class OVSNoPortsOnBridgeWarning(OVSWarning):
msg_fmt = _("Bridges cannot be added directly to the virtual switches. "
"Only the components of the bridge can be moved to the "
"virtual switch. The bridge %(bridge_name)s cannot be added "
"to the virtual switch because it has no components "
"associated with it, therefore there is no way "
"to associate the bridge with the virtual switch. "
"This portion of the request will be ignored.")
name = "no.ports.on.bridge.warning"
class OVSAdapterHasTempIPAddressWarning(OVSWarning):
msg_fmt = _("Adapter(s) %(adapter_name)s have a temporary IP address "
"assigned. This operation will restart the network "
"service and remove this address from the adapter "
"configuration. Before continuing, it is recommended that "
"you save the configuration in the appropriate ifcfg file "
"in the /etc/sysconfig/network-scripts/ directory.")
name = 'adapter.temp.ipaddress.warning'
class OVSAdapterDHCPWarning(OVSWarning):
msg_fmt = _("Adapter(s) %(adapter_name)s are configured for DHCP. This "
"operation will restart the network service, "
"which could cause a new IP address to be assigned.")
name = 'adapter.dhcp.warning'
class OVSMovingDHCPAddressWarning(OVSWarning):
msg_fmt = _("The IP address on %(adapter_name)s is being moved "
"to %(target_dev)s. The IP address was obtained by "
"DHCP. This operation will restart the network "
"service, which might cause a new IP address to be "
"assigned to the target device. The target device "
"will have a unique MAC address as well.")
name = 'adapter.move.ipaddress.dhcp.warning'
| apache-2.0 | 1,421,785,359,258,432,500 | 44.833333 | 79 | 0.62303 | false | 4.537954 | true | false | false |
BoPeng/simuPOP | docs/saveLoadPedigree.py | 1 | 1820 | #!/usr/bin/env python
#
# $File: saveLoadPedigree.py $
#
# This file is part of simuPOP, a forward-time population genetics
# simulation environment. Please visit http://simupop.sourceforge.net
# for details.
#
# Copyright (C) 2004 - 2010 Bo Peng ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script is an example in the simuPOP user's guide. Please refer to
# the user's guide (http://simupop.sourceforge.net/manual) for a detailed
# description of this example.
#
import simuPOP as sim
pop = sim.Population(4, loci=1, infoFields=['ind_id', 'father_id', 'mother_id'],
ancGen=-1)
pop.evolve(
initOps=[
sim.InitSex(),
sim.IdTagger(),
sim.InitGenotype(freq=[0.5, 0.5]),
sim.PedigreeTagger(output='>>pedigree.ped', outputLoci=0)
],
matingScheme=sim.RandomMating(
ops=[
sim.MendelianGenoTransmitter(),
sim.IdTagger(),
sim.PedigreeTagger(output='>>pedigree.ped', outputLoci=0)
],
),
gen = 2
)
#
print(open('pedigree.ped').read())
pop.asPedigree()
pop.save('pedigree1.ped', loci=0)
print(open('pedigree1.ped').read())
#
ped = sim.loadPedigree('pedigree1.ped')
sim.dump(ped, ancGens=range(3))
| gpl-2.0 | 7,416,597,249,696,086,000 | 30.37931 | 80 | 0.692308 | false | 3.215548 | false | false | false |
apetrone/gemini | tools/bootstrap.py | 1 | 2384 | # Adam Petrone
# August, 2014
import os
import sys
import logging
import subprocess
import platform
BOOTSTRAP_VIRTUALENV_PATH = "env"
REQUIREMENTS_FILE = "requirements"
def get_platform():
platform_line = platform.platform().lower()
if "linux" in platform_line:
return "linux"
elif "darwin" in platform_line:
return "macosx"
elif "nt" or "windows" in platform_line:
return "windows"
else:
return "unknown"
def get_virtualenv_path(root_path, name):
# if the system is posix, the virtualenv binaries are placed
# into a "bin" folder. Windows places these into "Scripts"
intermediate_paths = {
"posix": "bin",
"nt": "Scripts"
}
extensions = {
"posix": "",
"nt": ".exe"
}
path = intermediate_paths[os.name]
binary_name = name + extensions[os.name]
return os.path.join(root_path, path, binary_name)
def setup_environment(after_install):
try:
import virtualenv
except:
raise Exception("virtualenv not installed! This is required.")
root_path = os.path.dirname(__file__)
virtualenv_root = os.path.join(root_path, BOOTSTRAP_VIRTUALENV_PATH)
if os.path.exists(virtualenv_root):
logging.info(
"virtualenv already exists at \"%s\". Nothing to do." %
virtualenv_root
)
return virtualenv_root
logging.info("creating virtualenv at \"%s\"" % virtualenv_root)
sys.argv.append("--distribute")
sys.argv.append(virtualenv_root)
virtualenv.after_install = after_install
virtualenv.main()
return virtualenv_root
def install_packages(root_path):
pip = get_virtualenv_path(root_path, "pip")
abs_requirements_path = os.path.abspath(
os.path.join(root_path, os.path.pardir, REQUIREMENTS_FILE)
)
if get_platform() == "macosx":
os.environ["CFLAGS"] = "-Wno-unused-command-line-argument-hard-error-in-future"
command = [pip, "install", "-r", abs_requirements_path]
subprocess.call(command)
def build_docs(root_path):
sphinx_build = get_virtualenv_path(root_path, "sphinx-build")
command = [
sphinx_build,
"-b",
"html",
"docs",
"docs/html"
]
subprocess.call(command)
def post_install(options, root_path):
# after the virtualenv is installed, call the following
#
# install via requirements file
install_packages(root_path)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
root_path = setup_environment(post_install)
# (this should be moved) build documentation
build_docs(root_path)
| bsd-2-clause | -5,418,729,175,523,367,000 | 22.372549 | 81 | 0.708893 | false | 3.076129 | false | false | false |
algui91/grado_informatica_MH_Practicas | src/qapproblem/MA_10_01_PMX.py | 1 | 1859 | '''
Created on June 27, 2014
@author: Alejandro Alcalde (elbauldelprogramador.com)
Licensed under GPLv3
'''
from timeit import Timer
from qapproblem.MA_10_1_PMX import MA_10_1_PMX
class MA_10_01_PMX(MA_10_1_PMX):
'''
A memetic genetic algorithm with local search. Every 10 generations
local search is applied to the population with probability 0.1.
PMX as crossover operator
'''
def __init__(self, f_name, seed):
'''
Constructor
'''
super(MA_10_01_PMX, self).__init__(f_name, seed)
def timewrapper():
return self._find_solution()
self.exec_time = Timer(timewrapper).timeit(number=1)
def _find_solution(self):
population = self.population_lenght
self.initPopulation()
self.evaluate(population)
generation_number = 0
while self.stop_crit >= 0:
# swap current and old population
self.old_population, self.current_population = self.current_population, self.old_population
self.select(population)
self.cross()
self.mutate(population, self.how_many_mutate)
self.reemplace()
self.evaluate(population)
generation_number += 1
if generation_number == 10:
for i in xrange(int(self.population_lenght * .1)):
(
self.current_population[i][2],
self.current_population[i][1],
num_evals
) = self.local_search(self.current_population[i][2], 400)
self.stop_crit -= num_evals
generation_number = 0
self.S = self.best_guy[2]
self.cost = self.best_current_cost
| gpl-2.0 | 8,342,235,070,847,642,000 | 28.983871 | 103 | 0.542227 | false | 4.103753 | false | false | false |
hongzhouye/frankenstein | mp/romp2.py | 1 | 2928 | """
ROHF-MP2
"""
import numpy as np
from pyscf import scf, mp, lib
def lorentz_regularization(denom, alpha, deg=2):
return denom + alpha**deg / denom
def kernel1(mp, mo_energy, mo_coeff, eris, with_t2, thr_zero, alpha, verbose):
if mo_energy is None or mo_coeff is None:
moidx = mp.get_frozen_mask()
mo_coeff = None
mo_energy = (mp.mo_energy[0][moidx[0]], mp.mo_energy[1][moidx[1]])
else:
# For backward compatibility. In pyscf-1.4 or earlier, mp.frozen is
# not supported when mo_energy or mo_coeff is given.
assert(mp.frozen is 0 or mp.frozen is None)
if eris is None: eris = mp.ao2mo(mo_coeff)
nocca, noccb = mp.get_nocc()
nmoa, nmob = mp.get_nmo()
nvira, nvirb = nmoa-nocca, nmob-noccb
mo_ea, mo_eb = mo_energy
eia_a = mo_ea[:nocca,None] - mo_ea[None,nocca:]
eia_b = mo_eb[:noccb,None] - mo_eb[None,noccb:]
if with_t2:
dtype = eris.ovov.dtype
t2aa = np.empty((nocca,nocca,nvira,nvira), dtype=dtype)
t2ab = np.empty((nocca,noccb,nvira,nvirb), dtype=dtype)
t2bb = np.empty((noccb,noccb,nvirb,nvirb), dtype=dtype)
t2 = (t2aa,t2ab,t2bb)
else:
t2 = None
emp2 = 0.0
for i in range(nocca):
eris_ovov = np.asarray(eris.ovov[i*nvira:(i+1)*nvira])
eris_ovov = eris_ovov.reshape(nvira,nocca,nvira).transpose(1,0,2)
denom = lib.direct_sum('a+jb->jab', eia_a[i], eia_a)
t2i = eris_ovov.conj() / lorentz_regularization(denom, alpha)
emp2 += np.einsum('jab,jab', t2i, eris_ovov) * .5
emp2 -= np.einsum('jab,jba', t2i, eris_ovov) * .5
if with_t2:
t2aa[i] = t2i - t2i.transpose(0,2,1)
eris_ovov = np.asarray(eris.ovOV[i*nvira:(i+1)*nvira])
eris_ovov = eris_ovov.reshape(nvira,noccb,nvirb).transpose(1,0,2)
denom = lib.direct_sum('a+jb->jab', eia_a[i], eia_b)
if i == nocca-1 and np.abs(denom[-1,0,0]) < thr_zero:
denom[-1,0,0] = 1.E20
t2i = eris_ovov.conj() / lorentz_regularization(denom, alpha)
emp2 += np.einsum('JaB,JaB', t2i, eris_ovov)
if with_t2:
t2ab[i] = t2i
for i in range(noccb):
eris_ovov = np.asarray(eris.OVOV[i*nvirb:(i+1)*nvirb])
eris_ovov = eris_ovov.reshape(nvirb,noccb,nvirb).transpose(1,0,2)
denom = lib.direct_sum('a+jb->jab', eia_b[i], eia_b)
t2i = eris_ovov.conj() / lorentz_regularization(denom, alpha)
emp2 += np.einsum('jab,jab', t2i, eris_ovov) * .5
emp2 -= np.einsum('jab,jba', t2i, eris_ovov) * .5
if with_t2:
t2bb[i] = t2i - t2i.transpose(0,2,1)
return emp2.real, t2
class ROMP2(mp.ump2.UMP2):
def kernel(self, mo_energy=None, mo_coeff=None, eris=None, with_t2=True,
thr_zero=1.E-10, alpha=0.0):
return kernel1(self, mo_energy, mo_coeff, eris, with_t2, thr_zero,
alpha, self.verbose)
| bsd-3-clause | -2,246,946,983,703,782,700 | 34.277108 | 78 | 0.586749 | false | 2.429876 | false | false | false |
aniversarioperu/Proveedor | Proveedor/search_form.py | 2 | 1949 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask_wtf import Form
from wtforms import TextField, HiddenField, BooleanField
from wtforms import SelectMultipleField, DateField, DateTimeField
from wtforms import widgets
from wtforms.validators import DataRequired
etiquetas_data = [('irregulares','Fechas irregulares (Coincidencia de fechas)'),
('cercanas','Fechas cercanas (Fechas menores a 5 dias)'),
('mayor','Montos irregulares (Monto contratado mayor al referencial')]
moneda_data = [('S/.','Soles'),
('US$','Dolares'),
('EUR,$','Euros')]
class SearchFormProveedor(Form):
page = HiddenField('page')
proveedor = HiddenField('proveedor')
term = TextField('term')
monto = TextField('monto')
tipo_moneda = SelectMultipleField(
choices=moneda_data,
option_widget=widgets.CheckboxInput(),
widget=widgets.ListWidget(prefix_label=False)
)
etiquetas = SelectMultipleField(
choices=etiquetas_data,
option_widget=widgets.CheckboxInput(),
widget=widgets.ListWidget(prefix_label=False)
)
fecha_inicial = DateTimeField('Fecha de inicio', format='%Y-%m-%d')
fecha_final = DateTimeField('Fecha de fin', format='%Y-%m-%d')
class SearchFormEntidad(Form):
page = HiddenField('page')
entidad = HiddenField('entidad')
term = TextField('term')
monto = TextField('monto')
tipo_moneda = SelectMultipleField(
choices=moneda_data,
option_widget=widgets.CheckboxInput(),
widget=widgets.ListWidget(prefix_label=False)
)
etiquetas = SelectMultipleField(
choices=etiquetas_data,
option_widget=widgets.CheckboxInput(),
widget=widgets.ListWidget(prefix_label=False)
)
fecha_inicial = DateTimeField('Fecha de inicio', format='%Y-%m-%d')
fecha_final = DateTimeField('Fecha de fin', format='%Y-%m-%d')
class SearchTerm(Form):
termino = TextField('Termino de busqueda') | gpl-3.0 | -7,707,249,838,963,322,000 | 32.050847 | 80 | 0.680862 | false | 3.354561 | false | false | false |
praekelt/airtime-service | airtime_service/tests/test_service.py | 1 | 1317 | from twisted.python.usage import UsageError
from twisted.trial.unittest import TestCase
from airtime_service import service
class TestService(TestCase):
def test_make_service(self):
svc = service.makeService({
'database-connection-string': 'sqlite://',
'port': '0',
})
assert not svc.running
def test_make_service_bad_db_conn_str(self):
self.assertRaises(Exception, service.makeService, {
'database-connection-string': 'the cloud',
'port': '0',
})
def test_happy_options(self):
opts = service.Options()
opts.parseOptions(['-p', '1234', '-d', 'sqlite://'])
assert set(opts.keys()) == set([
'port', 'database-connection-string'])
assert opts['database-connection-string'] == 'sqlite://'
assert opts['port'] == '1234'
def test_default_port(self):
opts = service.Options()
opts.parseOptions(['-d', 'sqlite://'])
assert set(opts.keys()) == set([
'port', 'database-connection-string'])
assert opts['database-connection-string'] == 'sqlite://'
assert opts['port'] == '8080'
def test_db_conn_str_required(self):
opts = service.Options()
self.assertRaises(UsageError, opts.parseOptions, [])
| bsd-3-clause | 8,046,536,479,329,052,000 | 32.769231 | 64 | 0.587699 | false | 4.052308 | true | false | false |
tensorflow/tfjs-converter | tfjs-converter/python/tensorflowjs/converters/common.py | 1 | 1272 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from tensorflowjs import version
# File name for the indexing JSON file in an artifact directory.
ARTIFACT_MODEL_JSON_FILE_NAME = 'model.json'
# JSON string keys for fields of the indexing JSON.
ARTIFACT_MODEL_TOPOLOGY_KEY = 'modelTopology'
ARTIFACT_WEIGHTS_MANIFEST_KEY = 'weightsManifest'
FORMAT_KEY = 'format'
TFJS_GRAPH_MODEL_FORMAT = 'graph-model'
TFJS_LAYERS_MODEL_FORMAT = 'layers-model'
GENERATED_BY_KEY = 'generatedBy'
CONVERTED_BY_KEY = 'convertedBy'
def get_converted_by():
"""Get the convertedBy string for storage in model artifacts."""
return 'TensorFlow.js Converter v%s' % version.version
| apache-2.0 | -1,322,221,879,318,080,000 | 34.333333 | 80 | 0.712264 | false | 3.854545 | false | false | false |
pycontribs/jira | docs/conf.py | 1 | 9563 | # -*- coding: utf-8 -*-
#
# Jira Python Client documentation build configuration file, created by
# sphinx-quickstart on Thu May 3 17:01:50 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
from jira import __version__ # noqa
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "4.0.0"
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
]
intersphinx_mapping = {
"python": ("https://docs.python.org/3.8", None),
"requests": ("https://requests.readthedocs.io/en/latest/", None),
"requests-oauthlib": ("https://requests-oauthlib.readthedocs.io/en/latest/", None),
"ipython": ("https://ipython.readthedocs.io/en/stable/", None),
"pip": ("https://pip.readthedocs.io/en/stable/", None),
}
autodoc_default_options = {
"member-order": "bysource",
"members": True,
"show-inheritance": True,
"special-members": "__init__",
"undoc-members": True,
}
autodoc_inherit_docstrings = False
nitpick_ignore = [
("py:class", "JIRA"), # in jira.resources we only import this class if type
("py:obj", "typing.ResourceType"), # only Py36 has a problem with this reference
("py:class", "jira.resources.MyAny"), # Dummy subclass for type checking
# From other packages
("py:mod", "filemagic"),
("py:mod", "ipython"),
("py:mod", "pip"),
("py:class", "_io.BufferedReader"),
("py:class", "BufferedReader"),
("py:class", "Request"),
("py:class", "requests.models.Response"),
("py:class", "requests.sessions.Session"),
("py:class", "Response"),
("py:mod", "requests-kerberos"),
("py:mod", "requests-oauthlib"),
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"jira-python"
copyright = u"2012, Atlassian Pty Ltd."
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
today = "1"
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {"body_max_width": "100%"}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_style = "css/custom_width.css"
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = "%b %d, %Y"
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
html_use_opensearch = ""
# This is the file name suffix for HTML files (e.g. ".xhtml").
html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "jirapythondoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {"papersize": "a4paper", "pointsize": "10pt"}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
"index",
"jirapython.tex",
u"jira-python Documentation",
u"Atlassian Pty Ltd.",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
("index", "jirapython", u"jira-python Documentation", [u"Atlassian Pty Ltd."], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Napoleon -----------------------------------------------------
napoleon_google_docstring = True
napoleon_numpy_docstring = False # Explicitly prefer Google style docstring
napoleon_use_param = True # for type hint support
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"jirapython",
u"jira-python Documentation",
u"Atlassian Pty Ltd.",
"jirapython",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| bsd-2-clause | 1,172,788,009,354,954,800 | 31.198653 | 87 | 0.682317 | false | 3.72391 | true | false | false |
marcus-crane/site | site/stats/migrations/0001_initial.py | 1 | 1919 | # Generated by Django 2.0 on 2018-03-30 05:45
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('image', models.URLField()),
('link', models.URLField()),
('author', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Movie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('image', models.URLField()),
('link', models.URLField()),
('year', models.IntegerField()),
],
),
migrations.CreateModel(
name='Show',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('image', models.URLField()),
('link', models.URLField()),
('series', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Song',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('image', models.URLField()),
('link', models.URLField()),
('artist', models.CharField(max_length=200)),
],
),
]
| mit | -5,596,246,032,809,652,000 | 34.537037 | 114 | 0.499739 | false | 4.6691 | false | false | false |
bcspragu/Machine-Learning-Projects | MovieLens/Code/run_me.py | 1 | 2959 | import numpy as np
import scipy.sparse
import kmeans
import json
#Make sure we get consistent, reproducible results
np.random.seed(seed=1)
#Define the data directory (change if you place data elsewhere)
data_dir = "/home/bsprague/Projects/CS589/MovieLens/Data/"
#Load the training ratings
A = np.load(data_dir + "train.npy")
A.shape = (1,)
Xtrain = A[0]
#Load the validation ratings
A = np.load(data_dir + "validate.npy")
A.shape = (1,)
Xval = A[0]
#Load the test ratings
A = np.load(data_dir + "test.npy")
A.shape = (1,)
Xtest = A[0]
#Load the user, item, and genre information
Users = np.load(data_dir + "users.npy")
Items = np.load(data_dir + "items.npy")
Genres = np.load(data_dir + "genres.npy")
def getRMSE(k):
model = kmeans.kmeans(n_clusters=k)
model.fit(Xtrain)
#Predict back the training ratings and compute the RMSE
XtrainHat = model.predict(Xtrain,Xtrain)
tr= model.rmse(Xtrain,XtrainHat)
#Predict the validation ratings and compute the RMSE
XvalHat = model.predict(Xtrain,Xval)
val= model.rmse(Xval,XvalHat)
return (tr,val)
results = []
#Test k from 1 to 10
for k in range(1,11):
results.append([])
#Do 5 random restarts
for runs in range(1,6):
#Store the results
results[k-1].append(getRMSE(k))
# Average, Max, and Min RMSE over k = 1 to 10 on training set
avg_tr = [np.mean([z[0] for z in y]) for y in results]
max_tr = [np.amax([z[0] for z in y]) for y in results]
min_tr = [np.amin([z[0] for z in y]) for y in results]
# Average, Max, and Min RMSE over k = 1 to 10 on validation set
avg_val = [np.mean([z[1] for z in y]) for y in results]
max_val = [np.amax([z[1] for z in y]) for y in results]
min_val = [np.amin([z[1] for z in y]) for y in results]
# Our actual model, with k=3
model = kmeans.kmeans(n_clusters=3)
model.fit(Xtrain)
clusters = model.cluster(Xtrain)
# Age, Gender, Occupation, and Address arrays for each cluster
resAge = [[],[],[]]
resGen = [[],[],[]]
resOcc = [[],[],[]]
resSt = [[],[],[]]
for i, x in enumerate(clusters):
resAge[int(x)].append(Users[i][1])
resGen[int(x)].append(Users[i][2])
resOcc[int(x)].append(Users[i][3])
resSt[int(x)].append(Users[i][4])
# 'zip.json' is a map from zip codes to states
with open('zip.json') as data_file:
mapping = json.load(data_file)
for x in range(3):
d = {}
# Look at each zip code in the cluster and add it into our map
for o in resSt[x]:
if o in mapping:
if mapping[o] in d:
d[mapping[o]] += 1
else:
d[mapping[o]] = 1
else:
print("Couldn't find " + o)
# Here, we'd build our pie chart
# centers is a k x 1682 array of ratings
centers = model.get_centers()
high = [list(reversed(sorted([(rating, Items[movie_id][1]) for movie_id, rating in enumerate(center)])))[:5] for center in centers]
low = [sorted([(rating, Items[movie_id][1]) for movie_id, rating in enumerate(center)])[:5] for center in centers]
| mit | -2,089,935,727,280,396,300 | 27.728155 | 131 | 0.64684 | false | 2.768007 | false | false | false |
kevinarpe/kevinarpe-rambutan3 | rambutan3/check_args/base/RAbstractForwardingTypeMatcher.py | 1 | 1929 | from abc import abstractmethod
from rambutan3.check_args.base.RAbstractTypeMatcher import RAbstractTypeMatcher
from rambutan3.check_args.base.traverse.RTypeMatcherError import RTypeMatcherError
class RAbstractForwardingTypeMatcher(RAbstractTypeMatcher):
# noinspection PyMissingConstructor
def __init__(self):
raise NotImplementedError('Internal error: Do not call this constructor')
@property
@abstractmethod
def _delegate(self) -> RAbstractTypeMatcher:
"""Do not forget to include decorator @property in the overriding subclasses!"""
raise NotImplementedError()
# @override
def matches(self, value, matcher_error: RTypeMatcherError=None) -> bool:
x = self._delegate.matches(value, matcher_error)
return x
# # @override
# def check_arg(self, value, arg_name: str, *arg_name_format_args):
# self._delegate.check_arg(value, arg_name, *arg_name_format_args)
# Leave this code for history.
# Disabled during testing as this causes bugs.
# # @override
# def __or__(self, other: RAbstractTypeMatcher) -> RAbstractTypeMatcher:
# x = self._delegate.__or__(other)
# return x
# @override
def __eq__(self, other: RAbstractTypeMatcher) -> bool:
if not isinstance(other, type(self)):
return False
x = self._delegate.__eq__(other._delegate)
return x
# Leave this code for history.
# Disabled during testing as this causes bugs.
# # @override
# def __ne__(self, other: RAbstractTypeMatcher) -> bool:
# if not isinstance(other, type(self)):
# return True
#
# x = self._delegate.__ne__(other._delegate)
# return x
# @override
def __hash__(self) -> int:
x = self._delegate.__hash__()
return x
# @override
def __str__(self) -> str:
x = self._delegate.__str__()
return x
| gpl-3.0 | 7,008,961,957,703,396,000 | 30.622951 | 88 | 0.635044 | false | 3.97732 | false | false | false |
SnowWalkerJ/quantlib | quant/data/wind/tables/ashareipo.py | 1 | 6795 | from ....common.db.sql import VARCHAR, Numeric as NUMBER, DateTime as DATETIME, Column, BaseModel, CLOB, DATE
VARCHAR2 = VARCHAR
class AShareIPO(BaseModel):
"""
4.17 中国A股首次公开发行数据
Attributes
----------
object_id: VARCHAR2(100)
对象ID
s_info_windcode: VARCHAR2(40)
Wind代码
crncy_code: VARCHAR2(10)
货币代码
s_ipo_price: NUMBER(20,4)
发行价格(元) 网上申购价格
s_ipo_pre_dilutedpe: NUMBER(20,4)
发行市盈率(发行前股本)
s_ipo_dilutedpe: NUMBER(20,4)
发行市盈率(发行后股本)
s_ipo_amount: NUMBER(20,4)
发行数量(万股)
s_ipo_amtbyplacing: NUMBER(20,4)
网上发行数量(万股)
s_ipo_amttojur: NUMBER(20,4)
网下发行数量(万股)
s_ipo_collection: NUMBER(20,4)
募集资金(万元) 含发行费用
s_ipo_cashratio: NUMBER(20,8)
网上发行中签率(%)
s_ipo_purchasecode: VARCHAR2(10)
网上申购代码
s_ipo_subdate: VARCHAR2(8)
申购日
s_ipo_jurisdate: VARCHAR2(8)
向一般法人配售上市日期 网下机构首次限售上次
s_ipo_instisdate: VARCHAR2(8)
向战略投资者配售部分上市日期
s_ipo_expectlistdate: VARCHAR2(8)
预计上市日期
s_ipo_fundverificationdate: VARCHAR2(8)
申购资金验资日
s_ipo_ratiodate: VARCHAR2(8)
中签率公布日
s_fellow_unfrozedate: VARCHAR2(8)
申购资金解冻日
s_ipo_listdate: VARCHAR2(8)
上市日
s_ipo_puboffrdate: VARCHAR2(8)
招股公告日
s_ipo_anncedate: VARCHAR2(8)
发行公告日
s_ipo_anncelstdate: VARCHAR2(8)
上市公告日
s_ipo_roadshowstartdate: VARCHAR2(8)
初步询价(预路演)起始日期
s_ipo_roadshowenddate: VARCHAR2(8)
初步询价(预路演)终止日期
s_ipo_placingdate: VARCHAR2(8)
网下配售发行公告日
s_ipo_applystartdate: VARCHAR2(8)
网下申购起始日期
s_ipo_applyenddate: VARCHAR2(8)
网下申购截止日期
s_ipo_priceannouncedate: VARCHAR2(8)
网下定价公告日
s_ipo_placingresultdate: VARCHAR2(8)
网下配售结果公告日
s_ipo_fundenddate: VARCHAR2(8)
网下申购资金到帐截止日
s_ipo_capverificationdate: VARCHAR2(8)
网下验资日
s_ipo_refunddate: VARCHAR2(8)
网下多余款项退还日
s_ipo_expectedcollection: NUMBER(20,4)
预计募集资金(万元)
s_ipo_list_fee: NUMBER(20,4)
发行费用(万元)
s_ipo_namebyplacing: NUMBER(20,4)
上网发行简称
s_ipo_showpricedownlimit: NUMBER(20,4)
投标询价申购价格下限
s_ipo_par: NUMBER(20,4)
面值
s_ipo_purchaseuplimit: NUMBER(20,4)
网上申购上限(个人)
s_ipo_op_uplimit: NUMBER(20,4)
网下申购上限
s_ipo_op_downlimit: NUMBER(20,4)
网下申购下限
s_ipo_purchasemv_dt: VARCHAR2(8)
网上市值申购登记日
s_ipo_pubosdtotisscoll: NUMBER(20,4)
公开及原股东募集资金总额
s_ipo_osdexpoffamount: NUMBER(20,4)
原股东预计售股数量
s_ipo_osdexpoffamountup: NUMBER(20,4)
原股东预计售股数量上限
s_ipo_osdactoffamount: NUMBER(20,4)
原股东实际售股数量
s_ipo_osdactoffprice: NUMBER(20,4)
原股东实际售股金额
s_ipo_osdunderwritingfees: NUMBER(20,4)
原股东应摊承销费用
s_ipo_pureffsubratio: NUMBER(20,4)
网上投资者有效认购倍数
s_ipo_reporate: NUMBER(20,4)
回拨比例 网下往网上是正的, 网上往网下是负的, 占本次发行数量合计的比例
ann_dt: VARCHAR2(8)
最新公告日期
is_failure: NUMBER(5,0)
是否发行失败 0:发行正常;1:发行失败;2:发行暂缓
s_ipo_otc_cash_pct: NUMBER(24,8)
网下申购配售比例 网下中签率
opdate: DATETIME
opdate
opmode: VARCHAR(1)
opmode
"""
__tablename__ = "AShareIPO"
object_id = Column(VARCHAR2(100), primary_key=True)
s_info_windcode = Column(VARCHAR2(40))
crncy_code = Column(VARCHAR2(10))
s_ipo_price = Column(NUMBER(20,4))
s_ipo_pre_dilutedpe = Column(NUMBER(20,4))
s_ipo_dilutedpe = Column(NUMBER(20,4))
s_ipo_amount = Column(NUMBER(20,4))
s_ipo_amtbyplacing = Column(NUMBER(20,4))
s_ipo_amttojur = Column(NUMBER(20,4))
s_ipo_collection = Column(NUMBER(20,4))
s_ipo_cashratio = Column(NUMBER(20,8))
s_ipo_purchasecode = Column(VARCHAR2(10))
s_ipo_subdate = Column(VARCHAR2(8))
s_ipo_jurisdate = Column(VARCHAR2(8))
s_ipo_instisdate = Column(VARCHAR2(8))
s_ipo_expectlistdate = Column(VARCHAR2(8))
s_ipo_fundverificationdate = Column(VARCHAR2(8))
s_ipo_ratiodate = Column(VARCHAR2(8))
s_fellow_unfrozedate = Column(VARCHAR2(8))
s_ipo_listdate = Column(VARCHAR2(8))
s_ipo_puboffrdate = Column(VARCHAR2(8))
s_ipo_anncedate = Column(VARCHAR2(8))
s_ipo_anncelstdate = Column(VARCHAR2(8))
s_ipo_roadshowstartdate = Column(VARCHAR2(8))
s_ipo_roadshowenddate = Column(VARCHAR2(8))
s_ipo_placingdate = Column(VARCHAR2(8))
s_ipo_applystartdate = Column(VARCHAR2(8))
s_ipo_applyenddate = Column(VARCHAR2(8))
s_ipo_priceannouncedate = Column(VARCHAR2(8))
s_ipo_placingresultdate = Column(VARCHAR2(8))
s_ipo_fundenddate = Column(VARCHAR2(8))
s_ipo_capverificationdate = Column(VARCHAR2(8))
s_ipo_refunddate = Column(VARCHAR2(8))
s_ipo_expectedcollection = Column(NUMBER(20,4))
s_ipo_list_fee = Column(NUMBER(20,4))
s_ipo_namebyplacing = Column(NUMBER(20,4))
s_ipo_showpricedownlimit = Column(NUMBER(20,4))
s_ipo_par = Column(NUMBER(20,4))
s_ipo_purchaseuplimit = Column(NUMBER(20,4))
s_ipo_op_uplimit = Column(NUMBER(20,4))
s_ipo_op_downlimit = Column(NUMBER(20,4))
s_ipo_purchasemv_dt = Column(VARCHAR2(8))
s_ipo_pubosdtotisscoll = Column(NUMBER(20,4))
s_ipo_osdexpoffamount = Column(NUMBER(20,4))
s_ipo_osdexpoffamountup = Column(NUMBER(20,4))
s_ipo_osdactoffamount = Column(NUMBER(20,4))
s_ipo_osdactoffprice = Column(NUMBER(20,4))
s_ipo_osdunderwritingfees = Column(NUMBER(20,4))
s_ipo_pureffsubratio = Column(NUMBER(20,4))
s_ipo_reporate = Column(NUMBER(20,4))
ann_dt = Column(VARCHAR2(8))
is_failure = Column(NUMBER(5,0))
s_ipo_otc_cash_pct = Column(NUMBER(24,8))
opdate = Column(DATETIME)
opmode = Column(VARCHAR(1))
| gpl-3.0 | -2,932,784,816,115,643,400 | 31.75419 | 109 | 0.621013 | false | 2.08722 | false | false | false |
pemryan/DAKOTA | examples/script_interfaces/Python/rosenbrock.py | 1 | 1473 | def rosenbrock_list(**kwargs):
num_fns = kwargs['functions']
# if num_fns > 1:
# least_sq_flag = true
# else:
# least_sq_flag = false
x = kwargs['cv']
ASV = kwargs['asv']
f0 = x[1]-x[0]*x[0]
f1 = 1-x[0]
retval = dict([])
if (ASV[0] & 1): # **** f:
f = [100*f0*f0+f1*f1]
retval['fns'] = f
if (ASV[0] & 2): # **** df/dx:
g = [ [-400*f0*x[0] - 2*f1, 200*f0] ]
retval['fnGrads'] = g
if (ASV[0] & 4): # **** d^2f/dx^2:
fx = x[1]-3*x[0]*x[0]
h = [
[ [-400*fx + 2, -400*x[0]],
[-400*x[0], 200 ] ]
]
retval['fnHessians'] = h
return(retval)
def rosenbrock_numpy(**kwargs):
from numpy import array
num_fns = kwargs['functions']
# if num_fns > 1:
# least_sq_flag = true
# else:
# least_sq_flag = false
x = kwargs['cv']
ASV = kwargs['asv']
f0 = x[1]-x[0]*x[0]
f1 = 1-x[0]
retval = dict([])
if (ASV[0] & 1): # **** f:
f = array([100*f0*f0+f1*f1])
retval['fns'] = f
if (ASV[0] & 2): # **** df/dx:
g = array([[-400*f0*x[0] - 2*f1, 200*f0]])
retval['fnGrads'] = g
if (ASV[0] & 4): # **** d^2f/dx^2:
fx = x[1]-3*x[0]*x[0]
h = array([ [ [-400*fx + 2, -400*x[0]],
[-400*x[0], 200 ] ] ] )
retval['fnHessians'] = h
return(retval)
| lgpl-2.1 | 3,471,674,982,820,286,500 | 19.458333 | 50 | 0.39647 | false | 2.55286 | false | false | false |
mrphrazer/bjoern | python-bjoern/bjoern/all.py | 2 | 2645 | from py2neo import Graph
from py2neo.ext.gremlin import Gremlin
import os
DEFAULT_GRAPHDB_URL = "http://localhost:7474/db/data/"
DEFAULT_STEP_DIR = os.path.dirname(__file__) + '/bjoernsteps/'
class BjoernSteps:
def __init__(self):
self._initJoernSteps()
self.initCommandSent = False
def setGraphDbURL(self, url):
""" Sets the graph database URL. By default,
http://localhost:7474/db/data/ is used."""
self.graphDbURL = url
def addStepsDir(self, stepsDir):
"""Add an additional directory containing steps to be injected
into the server"""
self.stepsDirs.append(stepsDir)
def connectToDatabase(self):
""" Connects to the database server."""
self.graphDb = Graph(self.graphDbURL)
self.gremlin = Gremlin(self.graphDb)
def runGremlinQuery(self, query):
""" Runs the specified gremlin query on the database. It is
assumed that a connection to the database has been
established. To allow the user-defined steps located in the
joernsteps directory to be used in the query, these step
definitions are prepended to the query."""
if not self.initCommandSent:
self.gremlin.execute(self._createInitCommand())
self.initCommandSent = True
return self.gremlin.execute(query)
def runCypherQuery(self, cmd):
""" Runs the specified cypher query on the graph database."""
return cypher.execute(self.graphDb, cmd)
def getGraphDbURL(self):
return self.graphDbURL
"""
Create chunks from a list of ids.
This method is useful when you want to execute many independent
traversals on a large set of start nodes. In that case, you
can retrieve the set of start node ids first, then use 'chunks'
to obtain disjoint subsets that can be passed to idListToNodes.
"""
def chunks(self, idList, chunkSize):
for i in xrange(0, len(idList), chunkSize):
yield idList[i:i+chunkSize]
def _initJoernSteps(self):
self.graphDbURL = DEFAULT_GRAPHDB_URL
self.stepsDirs = [DEFAULT_STEP_DIR]
def _createInitCommand(self):
initCommand = ""
for stepsDir in self.stepsDirs:
for (root, dirs, files) in os.walk(stepsDir, followlinks=True):
files.sort()
for f in files:
filename = os.path.join(root, f)
if not filename.endswith('.groovy'): continue
initCommand += file(filename).read() + "\n"
return initCommand
| gpl-3.0 | 8,872,401,343,072,897,000 | 33.802632 | 75 | 0.626843 | false | 4.050536 | false | false | false |
tawfiq9009/bottle-tweepy | bottle_tweepy.py | 1 | 1685 | import tweepy
import inspect
from bottle import PluginError
class TweepyPlugin(object):
name = 'tweepy'
api = 2
def __init__(self, consumer_key, consumer_secret, access_token, access_token_secret, keyword='api'):
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.access_token = access_token
self.access_token_secret = access_token_secret
self.keyword = keyword
def setup(self, app):
for other in app.plugins:
if not isinstance(other, TweepyPlugin): continue
if other.keyword == self.keyword:
raise PluginError("Found another tweepy plugin with "\
"conflicting settings (non-unique keyword).")
def apply(self, callback, context):
conf = context.config.get('tweepy') or {}
consumer_key = conf.get('consumer_key', self.consumer_key)
consumer_secret = conf.get('consumer_secret', self.consumer_secret)
access_token = conf.get('access_token', self.access_token)
access_token_secret = conf.get('access_token_secret', self.access_token_secret)
keyword = conf.get('keyword', self.keyword)
args = inspect.getargspec(context.callback)[0]
if keyword not in args:
return callback
def wrapper(*args, **kwargs):
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
kwargs[self.keyword] = tweepy.API(auth)
rv = callback(*args, **kwargs)
return rv
return wrapper | mit | 1,766,222,548,043,890,700 | 37.318182 | 104 | 0.608309 | false | 4.201995 | false | false | false |
rouxcode/django-filer-addons | filer_addons/filer_signals/models.py | 1 | 6286 | from __future__ import unicode_literals
import os
from . import conf # noqa need the settings
from django.db.models.signals import pre_save, post_save
from django.dispatch import receiver
from django.core.files.base import File as DjangoFile
from filer.models import File, Folder
from filer.utils.files import get_valid_filename
def check_rename(instance, old_name=None):
"""
do the rename, needed
if old_name is provided, use it, otherwise fetch old file from db.
:param instance: filer file instance
:return:
"""
if not conf.FILER_ADDONS_CONSISTENT_FILENAMES:
return
if instance.id and instance.file:
if old_name is None:
old_instance = File.objects.get(pk=instance.id)
old_name = old_instance.file.name
old_name = os.path.basename(old_name)
new_name = get_valid_filename(instance.original_filename)
# done with startswith instead of ==
# prevent deadlock, when storagem gives the _x5sx4sd suffix!
splitext = os.path.splitext
if not (splitext(old_name)[0].startswith(splitext(new_name)[0]) and
splitext(old_name)[1] == splitext(new_name)[1]):
# rename!
# print "do rename: %s to %s" % (old_name, new_name)
existing_file = open(instance.file.path, mode='rb')
new_file = DjangoFile(existing_file)
instance.file.delete(False) # remove including thumbs
instance.file.save(new_name, new_file, save=False)
# print instance.file.name
# do it here, original_filename is not updated correctly else!
instance.save()
existing_file.close()
@receiver(
post_save,
sender='filer.File',
dispatch_uid="filer_addons_unfiled_file_to_folder",
)
@receiver(
post_save,
sender='filer.Image',
dispatch_uid="filer_addons_unfiled_image_to_folder",
)
def filer_unfiled_to_folder(sender, instance, **kwargs):
"""
check if a file is unfiled, if yes, put into default folder.
ATTENTION: this signal must be registered before the duplicate detection
signal => for when only duplicates in the same folder need to be detected!
(put in folder first, then detect duplicate)
"""
UNFILED_HANDLING = conf.FILER_ADDONS_UNFILED_HANDLING
if not UNFILED_HANDLING.get('move_unfiled', None):
return
created_only = UNFILED_HANDLING.get('created_only', False)
if created_only and not kwargs.get('created', None):
return
if not instance.folder:
default_folder_name = UNFILED_HANDLING.get(
'default_folder_name',
'Unfiled',
)
default_folder_list = Folder.objects.filter(name=default_folder_name)
if default_folder_list.count() > 0:
default_folder = default_folder_list[0]
else:
default_folder = Folder(name=default_folder_name)
default_folder.save()
instance.folder = default_folder
instance.save()
@receiver(
post_save,
sender='filer.File',
dispatch_uid="filer_addons_prevent_duplicates_file",
)
@receiver(
post_save,
sender='filer.Image',
dispatch_uid="filer_addons_prevent_duplicates_image",
)
def filer_duplicates_and_rename(sender, instance, **kwargs):
"""
check for duplicates, dont allow them!
as this is post save, it will ELIMINATE ALL DUPLICATES of a file,
if there are...this can be quite dangerous, but also be wonderfull ;-)
"""
DUPLICATE_HANDLING = conf.FILER_ADDONS_DUPLICATE_HANDLING
if not DUPLICATE_HANDLING.get('prevent'):
check_rename(instance)
return
created_only = DUPLICATE_HANDLING.get('created_only', False)
if created_only and not kwargs.get('created', None):
check_rename(instance)
return
file_obj = instance
duplicates = File.objects.exclude(pk=file_obj.id).filter(sha1=file_obj.sha1)
# narrow down? depends.
if DUPLICATE_HANDLING.get('same_folder_required', None):
duplicates = duplicates.filter(folder=file_obj.folder)
if DUPLICATE_HANDLING.get('same_filename_required', None):
# TODO: is this slugified somehow??!
duplicates = duplicates.filter(
original_filename=file_obj.original_filename
)
if len(duplicates):
# print "duplicates found (post save):"
# print duplicates
duplicate = None
for file in duplicates:
if file.file:
duplicate = file
if duplicate is None:
# duplicate without file is nothing we use and would corrupt data!
return
instance.delete()
duplicate = duplicates[0]
old_name = duplicate.file.name
instance.id = duplicate.id
instance.file = duplicate.file
instance.name = duplicate.name
instance.name = duplicate.name
instance.description = duplicate.description
if hasattr(duplicate, 'subject_location'):
instance.subject_location = duplicate.subject_location
# to be defined: set some more fields from duplicate, if filled?!
# arf dont touch django magic
# instance._uncommitted_filefields = []
# instance._state = duplicate._state
instance.save()
check_rename(instance, old_name=old_name)
else:
"""
when updating a file in a files detail view, it already has the new,
correct name leaving this here, for example when manipulating files
(and original_filename) programmatically.
"""
check_rename(instance)
@receiver(
pre_save,
sender='filer.File',
dispatch_uid="filer_addons_prevent_replace_orphans",
)
@receiver(
pre_save,
sender='filer.Image',
dispatch_uid="filer_addons_prevent_replace_orphans",
)
def filer_prevent_rename_orphans(sender, instance, **kwargs):
"""
https://github.com/divio/django-filer/pull/958
"""
if not conf.FILER_ADDONS_REPLACE_FIX:
return
# Delete old file(s) when updating the file via: admin > advanced > replace file
try:
from_db = File.objects.get(id=instance.id)
if from_db.file != instance.file:
from_db.file.delete(save=False)
except:
pass
return
| mit | -34,688,410,850,411,708 | 34.92 | 84 | 0.644448 | false | 3.795894 | false | false | false |
LYZhelloworld/Courses | 50.021/04/gm/gm2.py | 1 | 8780 | import pdb
import operator
def mul(seq):
return reduce(operator.mul, seq, 1)
print 'This version of gm for use during first lecture.'
class Potential:
# variables: list of strings naming the variables
# pot: dictionary mapping tuples of variable values to potential value
def __init__(self, variables, pot):
self.vars = variables
self.indices = dict(zip(variables, range(len(variables))))
self.pot = pot
def __str__(self):
return 'Potential('+str(self.vars)+','+str(self.pot)+')'
# vt is a tuple of values; return the associated potential value
# return 0 if vt is not explicitly represented in self.pot
def valTuple(self, vt):
return self.pot[vt] if vt in self.pot else 0.0
# Return a list of all elements that have weight > 0 in this potential
def support(self):
return [k for (k, v) in self.pot.items() if v > 0]
# assign is a dictionary mapping variable names to values; return
# the associated potential valu.
def val(self, assign):
return self.valTuple(tuple([assign[var] for var in self.vars]))
# Product of two instances of Potential is a new Potential defined
# on the union of the variables of self and other
def mul(self, other):
# Three sets of vars: only in self, in both, only in other
selfOnly = set(self.vars).difference(set(other.vars))
otherOnly = list(set(other.vars).difference(set(self.vars)))
both = set(self.vars).intersection(set(other.vars))
# keep whole tuple from self; add some indices from other
otherIndices = [other.indices[v] for v in otherOnly]
newPot = {}
for e1 in self.support():
for e2 in other.support():
if self.agrees(other, e1, e2, both):
newElt = tuple(list(e1) + [e2[i] for i in otherIndices])
newPot[newElt] = self.valTuple(e1) * other.valTuple(e2)
return Potential(self.vars + otherOnly, newPot)
# vs is a list of variable names
# Assume: tuple1 is an assignment of the variables in self, tuple
# 2 is an assignment of variables in other. Return True if they
# agree on the values of the variables in vs
def agrees(self, other, tuple1, tuple2, vs):
for v in vs:
if tuple1[self.indices[v]] != tuple2[other.indices[v]]:
return False
return True
# cVars is a list of variable names
# cVals is a list of the same length of values for those variables
# Treat self as a joint probability distribution, and this as the
# operation of conditioning on the event cVars = cVals
# - select out entries for which cVars = cVals
# - remove cVars from the potential
# - sum potential values if there are duplicate entries
# - renormalize to obtain a distribution
# Returns a new instance of Potential defined on previous vars minus cVars
def condition(self, cVars, cVals):
newPot = {}
indices = [self.indices[v] for v in cVars]
for e in self.support():
if all(e[i] == v for (i, v) in zip(indices, cVals)):
newPot[removeIndices(e, indices)] = self.pot[e]
return Potential(removeIndices(self.vars, indices), newPot).normalize()
# qVars is a list of variable names
# Sum out all other variables, returning a new potential on qVars
def marginalize(self, qVars):
newPot = {}
indices = removeVals(range(len(self.vars)),
[self.indices[v] for v in qVars])
for e in self.support():
newE = removeIndices(e, indices)
addToEntry(newPot, newE, self.valTuple(e))
return Potential(qVars, newPot)
# Divide through by sum of values; returns a new Potential on the
# same variables with potential values that sum to 1 over the
# whole domain.
def normalize(self):
total = sum(self.pot.values())
newPot = dict([(v, p/total) for (v, p) in self.pot.items()])
return Potential(self.vars, newPot)
# Convenient abbreviation
P = Potential
# Useful as the multiplicitive identity: p.mul(iPot) = p
iPot = Potential([], {tuple() : 1.0})
######################################################################
# Bayesian networks
######################################################################
class BNNode:
# name is a string naming the variable
# parents is a list of strings naming parent variables
# cpd is an instance of Potential, defined on variables [name] + parents
# It needs to be a well-formed conditional probability
# distribution, so that for each value v of name,
# sum_{values of parents} cpd([v] + values of parents) = 1
def __init__(self, name, parents, cpd):
self.name = name
self.parents = parents
self.cpd = cpd
class BN:
# bn is a dictionary
# key: string
# value: (list of strings, Potential on varName and parents)
def __init__(self, nodes):
self.vars = [n.name for n in nodes]
# LPK: Check to be sure all parents are in network
self.nodes = nodes
# assign is a dictionary from variable names to values, with an
# entry for every variable in the network
# Returns probability of that assignment
def prob(self, assign):
return mul([n.cpd.val(assign) for n in self.nodes])
# Create a joint probability distribution
# Returns a potential reprsenting the joint distribution, defined
# over all the variables in the network
def joint(self):
j = reduce(Potential.mul, [n.cpd for n in self.nodes], iPot)
assert 1-1e-8 < sum(j.pot.values()) < 1 + 1e-8
return j
# queryVars is a list of variable names
# eVars is a list of variable names
# eValues is a list of values, one for each of eVars
# Returns a joint distribution on the query variables representing
# P(queryVars | eVars = eValues)
def query(self, queryVars, eVars = [], eValues = []):
# your code here
return self.joint().condition(eVars, eValues).marginalize(queryVars)
######################################################################
# xs is a tuple (or list) of items and indices is a list of indices
# returns a new tuple containing only those items whose indices are
# not in the list
def removeIndices(xs, indices):
return tuple([xs[i] for i in range(len(xs)) if not i in indices])
# xs is a tuple (or list) of items and vals is a list of values
# returns a new tuple containing only those items whose indices are
# not in the list. Use this instead of set difference because we want
# maintain the order of the remaining xs
def removeVals(xs, vals):
return tuple([x for x in xs if x not in vals])
# Assuming d is a dictionary mapping elements to numeric values
# Adds e to the dictionary if it is not already there
# Increments the value of e by v
def addToEntry(d, e, v):
if not e in d: d[e] = 0
d[e] += v
######################################################################
# Test cases
######################################################################
# Wet grass
wg = BN([BNNode('R', [], P(['R'], {(0,) : .8, (1,) : .2})),
BNNode('S', [], P(['S'], {(0,) : .9, (1,) : .1})),
BNNode('J', ['R'],
P(['J', 'R'],
{(0, 0) : 0.8, (0, 1) : 0.0, (1, 0) : 0.2, (1, 1) : 1.0})),
BNNode('T', ['R', 'S'],
P(['T', 'R', 'S'],
{(0, 0, 0) : 1.0, (1, 0, 0) : 0.0,
(0, 0, 1) : 0.1, (1, 0, 1) : 0.9,
(0, 1, 0) : 0.0, (1, 1, 0) : 1.0,
(0, 1, 1) : 0.0, (1, 1, 1) : 1.0}))])
# Test BN query method using the wet grass model.
def test2():
print 'Testing prob'
print "wg.prob({'R' : 1, 'S' : 1, 'T' : 0, 'J' : 0})"
print wg.prob({'R' : 1, 'S' : 1, 'T' : 0, 'J' : 0})
print "wg.prob({'R' : 0, 'S' : 0, 'T' : 0, 'J' : 0})"
print wg.prob({'R' : 0, 'S' : 0, 'T' : 0, 'J' : 0})
print "wg.prob({'R' : 1, 'S' : 0, 'T' : 0, 'J' : 0})"
print wg.prob({'R' : 1, 'S' : 0, 'T' : 0, 'J' : 0})
print "wg.prob({'R' : 0, 'S' : 1, 'T' : 0, 'J' : 0})"
print wg.prob({'R' : 0, 'S' : 1, 'T' : 0, 'J' : 0})
print 'Testing query'
print "wg.query(['S'])"
print wg.query(['S'])
print "wg.query(['S'], ['T'], [1])"
print wg.query(['S'], ['T'], [1])
print "wg.query(['S'], ['T', 'J'], [1, 1])"
print wg.query(['S'], ['T', 'J'], [1, 1])
print "wg.query('R')"
print wg.query('R')
print "wg.query('R', ['T'], [1])"
print wg.query('R', ['T'], [1])
print "wg.query('R', ['T', 'S'], [1, 1])"
print wg.query('R', ['T', 'S'], [1, 1])
print "Loaded gm.py"
| mit | -4,116,659,734,526,188,500 | 38.54955 | 79 | 0.567426 | false | 3.456693 | false | false | false |
rueberger/MJHMC | mjhmc/misc/distributions.py | 1 | 16295 | """
This module contains the Distribution class which defines a standard
interface for distributions It also provides several implemented
distributions, which inherit from Distribution Any user-specified
distributions should inherit from Distribution
"""
import numpy as np
from .utils import overrides, package_path
import os
from scipy import stats
import pickle
class Distribution(object):
"""
Interface/abstract class for distributions.
Any user-specified distributions should be defined by inheriting from this class and
overriding the appropriate methods.
"""
def __init__(self, ndims=2, nbatch=100):
""" Creates a Distribution object
:param ndims: the dimension of the state space for this distribution
:param nbatch: the number of sampling particles to run simultaneously
:returns: a Distribution object
:rtype: Distribution
"""
# distribution dimensions
self.ndims = ndims
# number of sampling particles to use
self.nbatch = nbatch
# TensorflowDistributions require some special treatment
# this attribute is to be used instead of isinstance, as that would require
# tensorflow to be imported globally
if not hasattr(self, 'backend'):
self.backend = 'numpy'
# true iff being sampled with a jump process
self.mjhmc = None
# number of times energy op has been called
self.E_count = 0
# number of times gradient op has been called
self.dEdX_count = 0
# only set to true when I have a bias initialization and am being burned in
# to generate and cache a fair initialization for continuous samplers
self.generation_instance = False
# so some distributions may modify the default
if not hasattr(self, 'max_n_particles'):
self.max_n_particles = None
# set the state fairly. calls out to a cache
self.init_X()
def E(self, X):
self.E_count += X.shape[1]
return self.E_val(X)
def E_val(self, X):
"""
Subclasses should implement this with the correct energy function
"""
raise NotImplementedError()
def dEdX(self, X):
self.dEdX_count += X.shape[1]
return self.dEdX_val(X)
def dEdX_val(self, X):
"""
Subclasses should implement this with the correct energy gradient function
"""
raise NotImplementedError()
def __hash__(self):
""" Subclasses should implement this as the hash of the tuple of all parameters
that effect the distribution, including ndims. This is very important!!
nbatch should not be part of the hash!! Including it will break everything
As an example, see how this is implemented in Gaussian
:returns: a hash of the relevant parameters of self
:rtype: int
"""
raise NotImplementedError()
def init_X(self):
"""
Sets self.Xinit to a good initial value
"""
# TODO: make production ready by adding global flag to disable
# research options like this
self.cached_init_X()
def cached_init_X(self):
""" Sets self.Xinit to cached (serialized) initial states for continuous-time samplers, generated by burn in
*For use with continuous-time samplers only*
:returns: None
:rtype: none
"""
distr_name = type(self).__name__
distr_hash = hash(self)
file_name = '{}_{}.pickle'.format(distr_name, distr_hash)
file_prefix = '{}/initializations'.format(package_path())
if file_name in os.listdir(file_prefix):
with open('{}/{}'.format(file_prefix, file_name), 'rb') as cache_file:
mjhmc_endpt, _, _, control_endpt = pickle.load(cache_file)
if self.mjhmc:
self.Xinit = mjhmc_endpt[:, :self.nbatch]
else:
self.Xinit = control_endpt[:, :self.nbatch]
else:
from mjhmc.misc.gen_mj_init import MAX_N_PARTICLES, cache_initialization
# modify this object so it can be used by gen_mj_init
old_nbatch = self.nbatch
self.nbatch = self.max_n_particles or MAX_N_PARTICLES
self.generation_instance = True
# must rebuild now that nbatch is changed back
if self.backend == 'tensorflow':
self.build_graph()
# start with biased initializations
# changes self.nbatch
try:
self.gen_init_X()
except NotImplementedError:
# completely arbitrary choice
self.Xinit = np.random.randn(self.ndims, self.nbatch)
#generate and cache fair initialization
cache_initialization(self)
# reconstruct this object using fair initialization
self.nbatch = old_nbatch
self.generation_instance = False
# must rebuild now that nbatch is changed back
if self.backend == 'tensorflow':
self.build_graph()
self.cached_init_X()
def gen_init_X(self):
""" Sets self.Xinit to generated initial states for the sampling particles
*For use with discrete-time samplers only*
:returns: None
:rtype: None
"""
raise NotImplementedError()
def reset(self):
"""
resets the object. returns self for convenience
"""
self.E_count = 0
self.dEdX_count = 0
if not self.generation_instance:
self.init_X()
return self
def __call__(self, X):
"""
Convenience method for NUTS compatibility
returns -E, -dEdX
"""
rshp_X = X.reshape(len(X), 1)
E = float(self.E(rshp_X))
dEdX = self.dEdX(rshp_X).T[0]
return -E, -dEdX
def load_cache(self):
""" Loads and returns the cached fair initializations and
estimated variances associated with this
distribution. Throws an error if the cache does not exist
:returns: the loaded cache: (fair_initialization, emc_var_estimate, true_var_estimate)
:rtype: (np.ndarray, float, float)
"""
distr_name = type(self).__name__
distr_hash = hash(self)
file_name = '{}_{}.pickle'.format(distr_name, distr_hash)
file_prefix = '{}/initializations'.format(package_path())
with open('{}/{}'.format(file_prefix, file_name)) as cache_file:
return pickle.load(cache_file)
class LambdaDistribution(Distribution):
""" An `anonymous' distribution object for quick
experimentation. Due to the initialization time that is required
at first run it, one shouldn't use this object in the
long-term. Rather create your own distribution class that inherits
from Distribution.
You should give your LambdaDistribution objects a name. Use a
descriptive name, and use the same for functionally equivalent
LambdaDistributions - the hash of the name is used to label the
initialization information which is generated at first run time of
a new distribution. This requirement is a side effect of the
unfortunate fact that there is no computable hash function which
assigns functionally identical programs to the same number.
"""
#pylint: disable=too-many-arguments
def __init__(self, energy_func=None, energy_grad_func=None, init=None, name=None):
""" Creates an anonymous distribution object.
:param ndims: the dimension of the state space for this distribution
:param nbatch: the number of sampling particles to run simultaneously
:param energy_func: function specifying the energy
:param energy_grad_func: function specifying gradient of the energy
:param name: name of this distribution. use the same name for
functionally identical distributions
:param init: fair initialization for this distribution. array of shape (ndims, nbatch)
:returns: an anonymous distribution object
:rtype: LambdaDistribution
"""
self.energy_func = energy_func
self.energy_grad_func = energy_grad_func
self.init = init
# TODO: raise warning if name is not passed
self.name = name or str(np.random())
super(LambdaDistribution, self).__init__(ndims=init.shape[0], nbatch=init.shape[1])
@overrides(Distribution)
def E_val(self, X):
return np.sum(X*np.dot(self.J,X), axis=0).reshape((1,-1))/2.
@overrides(Distribution)
def dEdX_val(self, X):
return np.dot(self.J,X)/2. + np.dot(self.J.T,X)/2.
@overrides(Distribution)
def gen_init_X(self):
self.Xinit = self.init
@overrides(Distribution)
def __hash__(self):
return hash((self.ndims, self.nbatch, self.name))
class Gaussian(Distribution):
def __init__(self, ndims=2, nbatch=100, log_conditioning=6):
"""
Energy function, gradient, and hyperparameters for the "ill
conditioned Gaussian" example from the LAHMC paper.
"""
self.conditioning = 10**np.linspace(-log_conditioning, 0, ndims)
self.J = np.diag(self.conditioning)
self.description = '%dD Anisotropic Gaussian, %g self.conditioning'%(ndims, 10**log_conditioning)
super(Gaussian, self).__init__(ndims, nbatch)
@overrides(Distribution)
def E_val(self, X):
return np.sum(X*np.dot(self.J,X), axis=0).reshape((1,-1))/2.
@overrides(Distribution)
def dEdX_val(self, X):
return np.dot(self.J,X)/2. + np.dot(self.J.T,X)/2.
@overrides(Distribution)
def gen_init_X(self):
self.Xinit = (1./np.sqrt(self.conditioning).reshape((-1,1))) * np.random.randn(self.ndims,self.nbatch)
@overrides(Distribution)
def __hash__(self):
return hash((self.ndims, hash(tuple(self.conditioning))))
class RoughWell(Distribution):
def __init__(self, ndims=2, nbatch=100, scale1=100, scale2=4):
"""
Energy function, gradient, and hyperparameters for the "rough well"
example from the LAHMC paper.
"""
self.scale1 = scale1
self.scale2 = scale2
self.description = '{} Rough Well'.format(ndims)
super(RoughWell, self).__init__(ndims, nbatch)
@overrides(Distribution)
def E_val(self, X):
cosX = np.cos(X*2*np.pi/self.scale2)
E = np.sum((X**2) / (2*self.scale1**2) + cosX, axis=0).reshape((1,-1))
return E
@overrides(Distribution)
def dEdX_val(self, X):
sinX = np.sin(X*2*np.pi/self.scale2)
dEdX = X/self.scale1**2 + -sinX*2*np.pi/self.scale2
return dEdX
@overrides(Distribution)
def gen_init_X(self):
self.Xinit = self.scale1 * np.random.randn(self.ndims, self.nbatch)
@overrides(Distribution)
def __hash__(self):
return hash((self.ndims, self.scale1, self.scale2))
class MultimodalGaussian(Distribution):
def __init__(self, ndims=2, nbatch=100, separation=3):
self.sep_vec = np.array([separation] * nbatch +
[0] * (ndims - 1) * nbatch).reshape(ndims, nbatch)
# separated along first axis
self.sep_vec[0] += separation
super(MultimodalGaussian, self).__init__(ndims, nbatch)
@overrides(Distribution)
def E_val(self, X):
trim_sep_vec = self.sep_vec[:, :X.shape[1]]
return -np.log(np.exp(-np.sum((X + trim_sep_vec)**2, axis=0)) +
np.exp(-np.sum((X - trim_sep_vec)**2, axis=0)))
@overrides(Distribution)
def dEdX_val(self, X):
# allows for partial batch size
trim_sep_vec = self.sep_vec[:, :X.shape[1]]
common_exp = np.exp(np.sum(4 * trim_sep_vec * X, axis=0))
# floating point hax
return ((2 * ((X - trim_sep_vec) * common_exp + trim_sep_vec + X)) /
(common_exp + 1))
@overrides(Distribution)
def init_X(self):
# okay, this is pointless... sep vecs cancel
self.Xinit = ((np.random.randn(self.ndims, self.nbatch) + self.sep_vec) +
(np.random.randn(self.ndims, self.nbatch) - self.sep_vec))
@overrides(Distribution)
def __hash__(self):
return hash((self.ndims, self.separation))
class TestGaussian(Distribution):
def __init__(self, ndims=2, nbatch=100, sigma=1.):
"""Simple default unit variance gaussian for testing samplers
"""
self.sigma = sigma
super(TestGaussian, self).__init__(ndims, nbatch)
@overrides(Distribution)
def E_val(self, X):
return np.sum(X**2, axis=0).reshape((1, -1)) / (2. * self.sigma ** 2)
@overrides(Distribution)
def dEdX_val(self, X):
return X/self.sigma**2
@overrides(Distribution)
def gen_init_X(self):
self.Xinit = np.random.randn(self.ndims, self.nbatch)
@overrides(Distribution)
def __hash__(self):
return hash((self.ndims, self.sigma))
#pylint: disable=too-many-instance-attributes
class ProductOfT(Distribution):
""" Provides the product of T experts distribution
"""
#pylint: disable=too-many-arguments
def __init__(self, ndims=36, nbasis=36, nbatch=100, lognu=None, W=None, b=None):
""" Product of T experts, assumes a fixed W that is sparse and alpha that is
"""
# awkward hack to import theano in poe only
try:
import theano.tensor as T
import theano
self.theano = theano
self.T = T
except:
raise ImportError("Theano could not be imported")
if ndims != nbasis:
raise NotImplementedError("Initializer only works for ndims == nbasis")
self.ndims = ndims
self.nbasis = nbasis
self.nbatch = nbatch
if W is None:
W = np.eye(ndims, nbasis)
self.weights = self.theano.shared(np.array(W, dtype='float32'), 'W')
if lognu is None:
pre_nu = np.random.rand(nbasis,) * 2 + 2.1
else:
pre_nu = np.exp(lognu)
self.nu = self.theano.shared(np.array(pre_nu, dtype='float32'), 'nu')
if b is None:
b = np.zeros((nbasis,))
self.bias = self.theano.shared(np.array(b, dtype='float32'), 'b')
state = T.matrix()
energy = self.E_def(state)
gradient = T.grad(T.sum(energy), state)
#@overrides(Distribution)
self.E_val = self.theano.function([state], energy, allow_input_downcast=True)
#@overrides(Distribution)
self.dEdX_val = self.theano.function([state], gradient, allow_input_downcast=True)
super(ProductOfT,self).__init__(ndims,nbatch)
self.backend = 'theano'
def E_def(self,X):
"""
energy for a POE with student's-t expert in terms of:
samples [# dimensions]x[# samples] X
receptive fields [# dimensions]x[# experts] W
biases [# experts] b
degrees of freedom [# experts] nu
"""
rshp_b = self.bias.reshape((1,-1))
rshp_nu = self.nu.reshape((1, -1))
alpha = (rshp_nu + 1.)/2.
energy_per_expert = alpha * self.T.log(1 + ((self.T.dot(X.T, self.weights) + rshp_b) / rshp_nu) ** 2)
energy = self.T.sum(energy_per_expert, axis=1).reshape((1, -1))
return energy
@overrides(Distribution)
def gen_init_X(self):
#hack to remap samples from a generic product of experts to
#the model we are actually going to generate samples from
Zinit = np.zeros((self.ndims, self.nbatch))
for ii in xrange(self.ndims):
Zinit[ii] = stats.t.rvs(self.nu.get_value()[ii], size=self.nbatch)
Yinit = Zinit - self.bias.get_value().reshape((-1, 1))
self.Xinit = np.dot(np.linalg.inv(self.weights.get_value()), Yinit)
@overrides(Distribution)
def __hash__(self):
return hash((self.ndims,
self.nbasis,
hash(tuple(self.nu.get_value())),
hash(tuple(self.weights.get_value().ravel())),
hash(tuple(self.bias.get_value().ravel()))))
| gpl-2.0 | 4,360,708,793,972,506,000 | 34.971302 | 116 | 0.607732 | false | 3.852246 | false | false | false |
batxes/4Cin | SHH_WT_models/SHH_WT_models_final_output_0.1_-0.1_11000/SHH_WT_models12018.py | 4 | 17586 | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((5285.54, 11341.9, 1266.98), (0.7, 0.7, 0.7), 890.203)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((6267.49, 10231.7, 2218.72), (0.7, 0.7, 0.7), 792.956)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((5755.87, 8443.87, 1755.19), (0.7, 0.7, 0.7), 856.786)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((5944.66, 9768.49, -158.49), (0.7, 0.7, 0.7), 963.679)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((5367.6, 8543.67, -1285.72), (0.7, 0.7, 0.7), 761.442)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((4587.65, 6515.84, -83.8183), (0.7, 0.7, 0.7), 961.183)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((3461.98, 5536.51, 774.218), (0.7, 0.7, 0.7), 753.151)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((3477.3, 5981.21, 12.3706), (1, 0.7, 0), 1098.07)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((2712.38, 4839.23, 2289.47), (0.7, 0.7, 0.7), 1010.42)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((1104.07, 5036.13, 2675.56), (1, 0.7, 0), 821.043)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((1401.61, 4296.47, 4376.8), (0.7, 0.7, 0.7), 873.876)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((2079.88, 5166.2, 4888.51), (0.7, 0.7, 0.7), 625.532)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((2711.46, 5856.52, 6126.36), (0.7, 0.7, 0.7), 880.474)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((2392.22, 6942.3, 5562.68), (0.7, 0.7, 0.7), 659.161)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((2095.77, 8244.87, 7401.45), (0.7, 0.7, 0.7), 831.745)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((4181.79, 9261.98, 9458.4), (0.7, 0.7, 0.7), 803.065)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((5790.45, 8413.5, 8741.98), (0.7, 0.7, 0.7), 610.262)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((5203.45, 8225.03, 8802.05), (0.7, 0.7, 0.7), 741.265)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((4149.33, 7246.24, 7957.07), (0.7, 0.7, 0.7), 748.625)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((2847.89, 6635.09, 8322.73), (0.7, 0.7, 0.7), 677.181)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((3320.45, 5332.86, 6334.83), (0.7, 0.7, 0.7), 616.015)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((3609.98, 6471.49, 8025.94), (0.7, 0.7, 0.7), 653.154)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((4417.02, 5780.82, 8183.08), (0.7, 0.7, 0.7), 595.33)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((5321.91, 6495.04, 8813.66), (0.7, 0.7, 0.7), 627.901)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((5643.39, 7832.99, 8429.98), (0.7, 0.7, 0.7), 663.941)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((5873.76, 9308.5, 8974.68), (0.7, 0.7, 0.7), 663.899)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((5075.97, 8135.59, 8406.41), (0.7, 0.7, 0.7), 644.694)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((4296.78, 7392.28, 6506.27), (0.7, 0.7, 0.7), 896.802)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((4169.32, 6353.38, 7030.46), (0.7, 0.7, 0.7), 576.38)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((3641.7, 5188.88, 6772.72), (0.7, 0.7, 0.7), 635.092)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((4566.35, 5023.36, 6807.17), (0.7, 0.7, 0.7), 651.505)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((3178.24, 5212.79, 5802.02), (0.7, 0.7, 0.7), 718.042)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((3462.31, 5123.43, 7566.67), (0.7, 0.7, 0.7), 726.714)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((4851.65, 5186.4, 7904.56), (0.7, 0.7, 0.7), 673.585)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((4405.94, 6426.33, 8292.53), (0.7, 0.7, 0.7), 598.418)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((4297.14, 7138.95, 9379.59), (0.7, 0.7, 0.7), 693.382)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((4000.83, 5875.07, 7335.34), (0.7, 0.7, 0.7), 804.038)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((4842.35, 6232.66, 8975.94), (0.7, 0.7, 0.7), 816.178)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((5620.05, 5926.64, 8063.02), (0.7, 0.7, 0.7), 776.628)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((4535.19, 6122.51, 8984.33), (0.7, 0.7, 0.7), 750.656)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((4479.25, 4866.2, 7896.88), (0.7, 0.7, 0.7), 709.625)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((3267.87, 3441.61, 7723.6), (0.7, 0.7, 0.7), 927.681)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((4158.55, 2578.54, 10044.9), (0.7, 0.7, 0.7), 1088.21)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((3375.26, 2176.27, 8342.43), (0.7, 0.7, 0.7), 736.147)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((4211.22, 3623.71, 8420.38), (0.7, 0.7, 0.7), 861.101)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((4331.56, 3595.69, 6467.44), (0.7, 0.7, 0.7), 924.213)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((6095.12, 2766.37, 6756.62), (0.7, 0.7, 0.7), 881.828)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((5651.57, 1754.71, 8491.76), (0.7, 0.7, 0.7), 927.681)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((6082.32, 1817.25, 6661.8), (0.7, 0.7, 0.7), 831.576)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((5641.96, 2468.79, 4933.65), (0.7, 0.7, 0.7), 859.494)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((5257.38, 1223.83, 5310.65), (0.7, 0.7, 0.7), 704.845)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((4526.6, 2592.91, 4573.49), (0.7, 0.7, 0.7), 804.461)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((4126.57, 4196.26, 3851.78), (0.7, 0.7, 0.7), 934.111)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((3275.14, 3552.26, 2737.51), (0.7, 0.7, 0.7), 988.339)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((3456.32, 2765.96, 2806.34), (1, 0.7, 0), 803.7)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((5162.83, 2935.53, 4018.09), (0.7, 0.7, 0.7), 812.118)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((6744.23, 3850.89, 2836.36), (0.7, 0.7, 0.7), 1177.93)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((9257.77, 4204.87, 2949.83), (0.7, 0.7, 0.7), 1038.21)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((9824.21, 4294.32, 3049.01), (1, 0.7, 0), 758.016)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((9779.03, 4230.77, 2228.35), (0.7, 0.7, 0.7), 824.046)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((9193.47, 3844.48, 2791.86), (0.7, 0.7, 0.7), 793.379)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((9561.92, 3243.52, 2338.43), (0.7, 0.7, 0.7), 1011.56)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((7898.62, 3985.36, 2919.97), (0.7, 0.7, 0.7), 1097.01)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((9190.93, 3100.32, 1946.76), (0.7, 0.7, 0.7), 851.626)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((11048.1, 2476.57, 1343.01), (0.7, 0.7, 0.7), 869.434)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((9970.25, 1441.99, 2355.89), (0.7, 0.7, 0.7), 818.463)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((11533.1, 1714.77, 2849.73), (0.7, 0.7, 0.7), 759.539)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((9256.93, 2584.04, 2650.17), (0.7, 0.7, 0.7), 1088.59)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((11136.5, 3015.37, 1882.96), (0.7, 0.7, 0.7), 822.312)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((12557.4, 3027.16, 2795.64), (0.7, 0.7, 0.7), 749.81)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((11919.7, 3293.02, 3323.69), (0.7, 0.7, 0.7), 764.488)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| gpl-3.0 | -4,534,313,612,562,101,000 | 46.147453 | 75 | 0.699818 | false | 2.616575 | false | false | false |
Radagast-red/golem | apps/rendering/gui/controller/renderercustomizer.py | 2 | 11106 | import logging
import os
from copy import deepcopy
from PyQt5.QtWidgets import QFileDialog
from gui.controller.customizer import Customizer
logger = logging.getLogger("apps.rendering")
class RendererCustomizer(Customizer):
def __init__(self, gui, logic):
self.options = logic.options
Customizer.__init__(self, gui, logic)
def get_task_name(self):
raise NotImplementedError
def load_data(self):
r = self.logic.get_task_type(self.get_task_name())
self.gui.ui.outputResXSpinBox.setValue(
r.defaults.resolution[0])
self.gui.ui.outputResYSpinBox.setValue(
r.defaults.resolution[1])
# FIXME Move verification function to task specific widgets
self.logic.customizer.gui.ui.verificationSizeXSpinBox.setMaximum(
r.defaults.resolution[0])
self.logic.customizer.gui.ui.verificationSizeYSpinBox.setMaximum(
r.defaults.resolution[1])
self.gui.ui.outputFormatsComboBox.clear()
self.gui.ui.outputFormatsComboBox.addItems(r.output_formats)
for i, output_format in enumerate(r.output_formats):
if output_format == r.defaults.output_format:
self.gui.ui.outputFormatsComboBox.setCurrentIndex(i)
self.gui.ui.mainSceneFileLineEdit.clear()
self.gui.ui.outputFileLineEdit.clear()
self.options = self.logic.options
def load_task_definition(self, definition):
self.options = deepcopy(definition.options)
self.gui.ui.mainSceneFileLineEdit.setText(definition.main_scene_file)
self.gui.ui.outputResXSpinBox.setValue(definition.resolution[0])
self.gui.ui.outputResYSpinBox.setValue(definition.resolution[1])
self.gui.ui.outputFileLineEdit.setText(definition.output_file)
output_format_item = self.gui.ui.outputFormatsComboBox.findText(definition.output_format)
if output_format_item >= 0:
self.gui.ui.outputFormatsComboBox.setCurrentIndex(output_format_item)
else:
logger.error("Cannot load task, wrong output format")
return
if os.path.normpath(definition.main_scene_file) in definition.resources:
definition.resources.remove(os.path.normpath(definition.main_scene_file))
self.save_setting('main_scene_path',
os.path.dirname(definition.main_scene_file))
self.save_setting('output_file_path',
os.path.dirname(definition.output_file), sync=True)
def get_task_specific_options(self, definition):
self._change_options()
definition.options = self.options
definition.resolution = [self.gui.ui.outputResXSpinBox.value(), self.gui.ui.outputResYSpinBox.value()]
definition.output_file = self._add_ext_to_out_filename()
definition.output_format = u"{}".format(
self.gui.ui.outputFormatsComboBox.itemText(self.gui.ui.outputFormatsComboBox.currentIndex()))
definition.main_scene_file = u"{}".format(
self.gui.ui.mainSceneFileLineEdit.text())
def _change_options(self):
pass
def _setup_connections(self):
self.gui.ui.chooseMainSceneFileButton.clicked.connect(
self._choose_main_scene_file_button_clicked)
self._setup_output_connections()
self._connect_with_task_settings_changed([
self.gui.ui.mainSceneFileLineEdit.textChanged,
])
self.gui.ui.outputFormatsComboBox.currentIndexChanged.connect(self._add_ext_to_out_filename)
self.gui.ui.outputFileLineEdit.editingFinished.connect(self._add_ext_to_out_filename)
def _add_ext_to_out_filename(self):
chosen_ext = str(self.gui.ui.outputFormatsComboBox.itemText(self.gui.ui.outputFormatsComboBox.currentIndex()))
out_file_name = str(self.gui.ui.outputFileLineEdit.text())
if not out_file_name:
return ""
file_name, ext = os.path.splitext(out_file_name)
ext = ext[1:]
if self.gui.ui.outputFormatsComboBox.findText(ext) != -1 or \
self.gui.ui.outputFormatsComboBox.findText(ext.upper()) != -1:
self.gui.ui.outputFileLineEdit.setText(u"{}.{}".format(file_name, chosen_ext))
else:
self.gui.ui.outputFileLineEdit.setText(u"{}.{}".format(out_file_name, chosen_ext))
return u"{}".format(str(self.gui.ui.outputFileLineEdit.text()))
def _connect_with_task_settings_changed(self, list_gui_el):
for gui_el in list_gui_el:
gui_el.connect(self.logic.task_settings_changed)
def _setup_output_connections(self):
self.gui.ui.chooseOutputFileButton.clicked.connect(
self._choose_output_file_button_clicked)
self.gui.ui.outputResXSpinBox.valueChanged.connect(self._res_x_changed)
self.gui.ui.outputResYSpinBox.valueChanged.connect(self._res_y_changed)
def _choose_main_scene_file_button_clicked(self):
tmp_output_file_ext = self.logic.get_current_task_type().output_file_ext
output_file_ext = []
for ext in tmp_output_file_ext:
output_file_ext.append(ext.upper())
output_file_ext.append(ext.lower())
output_file_types = " ".join([u"*.{}".format(ext) for ext in output_file_ext])
filter_ = u"Scene files ({})".format(output_file_types)
path = u"{}".format(str(self.load_setting('main_scene_path', os.path.expanduser('~'))))
file_name, _ = QFileDialog.getOpenFileName(self.gui,
"Choose main scene file",
path,
filter_)
if file_name:
self.save_setting('main_scene_path', os.path.dirname(file_name))
self.gui.ui.mainSceneFileLineEdit.setText(file_name)
def _choose_output_file_button_clicked(self):
output_file_type = u"{}".format(self.gui.ui.outputFormatsComboBox.currentText())
filter_ = u"{} (*.{})".format(output_file_type, output_file_type)
path = u"{}".format(str(self.load_setting('output_file_path', os.path.expanduser('~'))))
file_name, _ = QFileDialog.getSaveFileName(self.gui,
"Choose output file",
path,
filter_)
if file_name:
self.save_setting('output_file_path', os.path.dirname(file_name))
self.gui.ui.outputFileLineEdit.setText(file_name)
def _res_x_changed(self):
self.logic.change_verification_option(size_x_max=self.gui.ui.outputResXSpinBox.value())
def _res_y_changed(self):
self.logic.change_verification_option(size_y_max=self.gui.ui.outputResYSpinBox.value())
class FrameRendererCustomizer(RendererCustomizer):
def _setup_connections(self):
super(FrameRendererCustomizer, self)._setup_connections()
self.gui.ui.framesCheckBox.stateChanged.connect(self._frames_check_box_changed)
self.gui.ui.framesLineEdit.textChanged.connect(self._frames_changed)
self.gui.ui.framesCheckBox.stateChanged.connect(self._frames_changed)
def load_data(self):
super(FrameRendererCustomizer, self).load_data()
self._set_frames_from_options()
def load_task_definition(self, definition):
super(FrameRendererCustomizer, self).load_task_definition(definition)
self._set_frames_from_options()
def _set_frames_from_options(self):
self.gui.ui.framesCheckBox.setChecked(self.options.use_frames)
self.gui.ui.framesLineEdit.setEnabled(self.options.use_frames)
if self.options.use_frames:
self.gui.ui.framesLineEdit.setText(self.frames_to_string(self.options.frames))
else:
self.gui.ui.framesLineEdit.setText("")
def _change_options(self):
self.options.use_frames = self.gui.ui.framesCheckBox.isChecked()
if self.options.use_frames:
frames = self.string_to_frames(self.gui.ui.framesLineEdit.text())
if not frames:
self.show_error_window(u"Wrong frame format. Frame list expected, e.g. 1;3;5-12.")
return
self.options.frames = frames
def _frames_changed(self):
self.logic.task_settings_changed()
def _frames_check_box_changed(self):
self.gui.ui.framesLineEdit.setEnabled(self.gui.ui.framesCheckBox.isChecked())
if self.gui.ui.framesCheckBox.isChecked():
self.gui.ui.framesLineEdit.setText(self.frames_to_string(self.options.frames))
@staticmethod
def frames_to_string(frames):
s = ""
last_frame = None
interval = False
try:
for frame in sorted(frames):
frame = int(frame)
if frame < 0:
raise ValueError("Frame number must be greater or equal to 0")
if last_frame is None:
s += str(frame)
elif frame - last_frame == 1:
if not interval:
s += '-'
interval = True
elif interval:
s += str(last_frame) + ";" + str(frame)
interval = False
else:
s += ';' + str(frame)
last_frame = frame
except (ValueError, AttributeError, TypeError) as err:
logger.error("Wrong frame format: {}".format(err))
return ""
if interval:
s += str(last_frame)
return s
@staticmethod
def string_to_frames(s):
try:
frames = []
after_split = s.split(";")
for i in after_split:
inter = i.split("-")
if len(inter) == 1: # pojedyncza klatka (np. 5)
frames.append(int(inter[0]))
elif len(inter) == 2:
inter2 = inter[1].split(",")
if len(inter2) == 1: # przedzial klatek (np. 1-10)
start_frame = int(inter[0])
end_frame = int(inter[1]) + 1
frames += range(start_frame, end_frame)
elif len(inter2) == 2: # co n-ta klata z przedzialu (np. 10-100,5)
start_frame = int(inter[0])
end_frame = int(inter2[0]) + 1
step = int(inter2[1])
frames += range(start_frame, end_frame, step)
else:
raise ValueError("Wrong frame step")
else:
raise ValueError("Wrong frame range")
return sorted(frames)
except ValueError as err:
logger.warning("Wrong frame format: {}".format(err))
return []
except (AttributeError, TypeError) as err:
logger.error("Problem with change string to frame: {}".format(err))
return []
| gpl-3.0 | -4,423,214,315,490,894,300 | 42.046512 | 118 | 0.598865 | false | 3.980645 | false | false | false |
lampwins/netbox | netbox/users/migrations/0001_api_tokens_squashed_0002_unicode_literals.py | 1 | 1389 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-08-01 17:43
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
replaces = [('users', '0001_api_tokens'), ('users', '0002_unicode_literals')]
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Token',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('expires', models.DateTimeField(blank=True, null=True)),
('key', models.CharField(max_length=40, unique=True, validators=[django.core.validators.MinLengthValidator(40)])),
('write_enabled', models.BooleanField(default=True, help_text='Permit create/update/delete operations using this key')),
('description', models.CharField(blank=True, max_length=100)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tokens', to=settings.AUTH_USER_MODEL)),
],
options={
'default_permissions': [],
},
),
]
| apache-2.0 | -3,063,974,207,590,273,500 | 41.090909 | 141 | 0.62059 | false | 4.221884 | false | false | false |
mazulo/tango_with_django_project | rango/views.py | 1 | 7098 | # -*- coding: utf-8 -*-
from django.shortcuts import render
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseRedirect
from rango.models import Category, Page
from rango.forms import CategoryForm, PageForm, UserForm, UserProfileForm
from datetime import datetime
def index(request):
# Consulte o banco de dados por uma lista de TODAS as categorias.
# Ordene as categorias pelo número de likes em ordem decrescente.
# Recupere apenas o top 5- ou todas se for menos do que 5
pages = Page.objects.order_by('-views')[:5]
categories = Category.objects.order_by('-likes')[:5]
context_dict = {'categories': categories, 'pages': pages}
# Pegue o número de visitas ao site.
# Nós usamos a função COOKIES.get() para obter o cookie de visitas.
# Se o cookie existir, o valor retornado é convertido para um inteiro.
# Se o cookie não existir, por padrão setamos para zero e convertemos.
visits = request.session.get('visits')
if not visits:
visits = 0
reset_last_visit_time = False
last_visit = request.session.get('last_visit')
if last_visit:
last_visit_time = datetime.strptime(
last_visit[:-7], '%Y-%m-%d %H:%M:%S'
)
if (datetime.now() - last_visit_time).days > 0:
# soma o valor anterior do cookie com +1
visits += 1
# e atualiza o cookie last_visit também
reset_last_visit_time = True
else:
# Cookie last_visit não existe, então crie ele com a data/hora atual
reset_last_visit_time = True
context_dict['visits'] = visits
request.session['visits'] = visits
if reset_last_visit_time:
request.session['last_visit'] = str(datetime.now())
response = render(request, 'rango/index.html', context_dict)
# Retornar uma resposta para o usuário, atualizando
# quaisquer cookies que precisem ser mudados.
return response
def about(request):
context_dict = {'message': 'Not necessary'}
return render(request, 'rango/about.html', context_dict)
def category(request, category_name_slug):
# Crie um dicionário de contexto para que possamos passar para engine
# de renderização de template.
context_dict = {}
try:
# Nós podemos encontrar um slug do nome da categoria com o nome dado
# Se não encontrarmos, o método .get() lança uma exceção DoesNotExist
# Assim, o método .get() retorna 1 instância do model ou lança exceção
category = Category.objects.get(slug=category_name_slug)
context_dict['category_name'] = category.name
# Recupera todas as páginas associadas.
# Note que o filter retorna >= 1 instância de model.
pages = Page.objects.filter(category=category)
# Adicione nossa lista de resultados de contexto com o nome 'pages'
context_dict['pages'] = pages
# Nós também adicionamos o objeto category do banco para o contexto.
# Usaremos isso no template para verificar se a categoria existe
context_dict['category'] = category
except Category.DoesNotExist:
# Entramos aqui se não tiver sido encontrada a categoria desejada
# Não faça nada - o template mostrará a mensagem "sem categoria".
pass
# Renderize a resposta e retorne-a para o cliente.
context_dict['category_name_slug'] = category_name_slug
return render(request, 'rango/category.html', context_dict)
@login_required
def add_category(request):
# É um POST HTTP?
if request.method == 'POST':
form = CategoryForm(request.POST)
# O form é válido?
if form.is_valid():
# Salve a nova categoria no banco
form.save(commit=True)
# Agora chame a view index()
# O usuário será levado para a página inicial
return index(request)
else:
# O form fornecido contém erros - dê print neles
print form.errors
else:
# Se a requisição não é POST, mostre o form para inserir dados
form = CategoryForm()
# Renderize o form com as mensagens de erro (se houver alguma)
return render(request, 'rango/add_category.html', {'form': form})
@login_required
def add_page(request, category_name_slug):
try:
cat = Category.objects.get(slug=category_name_slug)
except Category.DoesNotExist:
cat = None
if request.method == 'POST':
form = PageForm(request.POST)
if form.is_valid():
if cat:
page = form.save(commit=False)
page.category = cat
page.views = 0
page.save()
return category(request, category_name_slug)
else:
print form.errors
else:
form = PageForm()
context_dict = {'form': form, 'category': cat}
return render(request, 'rango/add_page.html', context_dict)
def register(request):
registered = False
if request.method == 'POST':
user_form = UserForm(data=request.POST)
profile_form = UserProfileForm(data=request.POST)
if user_form.is_valid() and profile_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
profile = profile_form.save(commit=False)
profile.user = user
if 'picture' in request.FILES:
profile.picture = request.FILES['picture']
profile.save()
registered = True
else:
print(user_form.errors, profile_form.errors)
else:
user_form = UserForm()
profile_form = UserProfileForm()
return render(
request,
'rango/register.html',
{
'user_form': user_form,
'profile_form': profile_form,
'registered': registered
}
)
def user_login(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request, user)
return HttpResponseRedirect('/rango/')
else:
return HttpResponse('Sua conta está desativada.')
else:
print("Detalhes inválidos de login: {0}, {1}".format(
username, password)
)
context_dict = {'errors': 'Nome de user ou senha incorretos.'}
return render(request, 'rango/login.html', context_dict)
# return HttpResponse("Detalhes inválidos de login fornecidos.")
else:
return render(request, 'rango/login.html', {})
@login_required
def user_logout(request):
logout(request)
return HttpResponseRedirect('/rango/')
@login_required
def restricted(request):
return render(request, 'rango/restricted.html', {})
| mit | -5,168,162,080,484,604,000 | 33.05314 | 78 | 0.625904 | false | 3.641012 | false | false | false |
MatiasSM/fcb | fcb/upload_checker.py | 1 | 1294 | import signal
import sys
from fcb.checker import mail, mega
from fcb.checker.settings import Configuration
from fcb.database.helpers import get_session
from fcb.database.helpers import get_db_version
from fcb.utils.log_helper import get_logger_module
log = get_logger_module('mail_checker')
def main():
# noinspection PyUnresolvedReferences
import log_configuration
if len(sys.argv) < 2:
log.error("Usage: %s <config_file>", sys.argv[0])
exit(1)
conf = Configuration(sys.argv[1])
with get_session() as session:
db_version = get_db_version(session)
if db_version != 3:
log.error("Invalid database version (%d). 3 expected", db_version)
session.close()
exit(1)
session.close()
mail_verifier = mail.Verifier()
mega_verifier = mega.Verifier()
def signal_handler(signal, frame):
print "Abort signal received!!!!"
mail_verifier.stop()
mega_verifier.stop()
signal.signal(signal.SIGINT, signal_handler)
for mail_conf in conf.mail_confs:
mail_verifier.verify(mail_conf)
for meaga_conf in conf.mega_confs:
mega_verifier.verify(meaga_conf)
mail_verifier.close()
mega_verifier.close()
if __name__ == '__main__':
main()
| lgpl-3.0 | 2,179,570,835,735,439,600 | 23.884615 | 78 | 0.64915 | false | 3.594444 | false | false | false |
edonet/packages | Sass/completions/completions.py | 2 | 1410 | # import module
from Sass.completions import properties as prop
import sublime, re
# default
prop_default = prop.names + prop.tag
# Completions Flag
flag = sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS
# 【CSS】补全方法
def completions(self, view, prefix, locations):
# 获取当前位置
pt = locations[0] - len(prefix)
# 获取属性值
if view.match_selector(pt, 'meta.property-value.sass'):
line = view.substr(sublime.Region(view.line(pt).begin(), pt))
m = re.search(re.compile('(?:-webkit-|-moz-|-ms-|-o-)?([-a-zA-Z]+):[^;]*$'), line)
if m:
style = m.group(1)
if style in prop.value_for_name:
return prop.value_for_name[style] + prop.default_value, flag
return prop.default_value, flag
return prop.names, flag
if view.match_selector(pt, 'meta.parameter-value.sass'):
return flag
# 获取当前字符
ch = view.substr(sublime.Region(pt - 1, pt))
# at-rule
if ch == '@':
return prop.extends_style, flag
# 伪类
if ch == ':':
return prop.pseude_class, flag
# 变量
if ch == '$':
return flag
line = view.substr(sublime.Region(view.line(pt).begin(), pt))
# 属性
if re.search(re.compile('^\s*$'), line):
return prop_default, flag
# 标签
return prop.tag, flag
| isc | -60,636,841,419,177,210 | 22.241379 | 90 | 0.591246 | false | 2.969163 | false | false | false |
sthjs/python-learn | diamonds.py | 1 | 1102 | line = input()
while line != "":
rows = int(line)
diamonds = int(input())
symbol = input()
mid = rows // 2
# For each row
for currentRow in range(rows):
# For each diamond in the pattern
for diamond in range(diamonds):
# For each column in a diamond
for currentCol in range(rows):
if currentRow <= mid:
if currentCol == mid - currentRow or currentCol == mid + currentRow:
print(symbol, end="")
else:
print(" ", end="")
else:
if currentCol == mid - (rows - currentRow) + 1 or currentCol == mid + (rows - currentRow) - 1:
print(symbol, end="")
else:
print(" ", end="")
# A row in a single diamond in the pattern is finished
if diamond < diamonds - 2:
print(" ", end="")
# A complete row is finished
print()
# The whole pattern is finished
print()
line = input()
| gpl-3.0 | -3,858,567,009,583,473,000 | 33.4375 | 114 | 0.463702 | false | 4.630252 | false | false | false |
roderickmackenzie/opvdm | gui/scan.py | 1 | 19673 | # Organic Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for organic solar cells.
# Copyright (C) 2012 Roderick C. I. MacKenzie
#
# [email protected]
# www.opvdm.com
# Room B86 Coates, University Park, Nottingham, NG7 2RD, UK
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2.0, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import pygtk
pygtk.require('2.0')
import gtk
import sys
import os
import shutil
from cal_path import get_image_file_path
from about import about_dialog_show
from used_files_menu import used_files_menu
from server import server
from scan_tab import scan_vbox
from gui_util import dlg_get_text
import threading
import gobject
import multiprocessing
import time
import glob
from window_list import windows
from util import opvdm_delete_file
from util import delete_second_level_link_tree
from util import copy_scan_dir
from search import return_file_list
from win_lin import running_on_linux
import webbrowser
from search import find_fit_log
from scan_io import get_scan_dirs
from hpc import hpc_class
from debug import debug_mode
from inp import inp_update_token_value
from inp import inp_get_token_value
import i18n
_ = i18n.language.gettext
class scan_class(gtk.Window):
def callback_cluster(self, widget, data=None):
if self.cluster_window==None:
self.cluster_window=hpc_class()
self.cluster_window.init(self.hpc_root_dir,self.myserver.terminal)
print self.cluster_window.get_property("visible")
if self.cluster_window.get_property("visible")==True:
self.cluster_window.hide()
else:
self.cluster_window.show()
def get_main_menu(self, window):
accel_group = gtk.AccelGroup()
item_factory = gtk.ItemFactory(gtk.MenuBar, "<main>", accel_group)
item_factory.create_items(self.menu_items)
if debug_mode()==False:
item_factory.delete_item(_("/Advanced"))
window.add_accel_group(accel_group)
self.item_factory = item_factory
return item_factory.get_widget("<main>")
def callback_close(self, widget, data=None):
self.win_list.update(self,"scan_window")
self.hide()
return True
def callback_change_dir(self, widget, data=None):
dialog = gtk.FileChooserDialog(_("Change directory"),
None,
gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK))
dialog.set_default_response(gtk.RESPONSE_OK)
dialog.set_action(gtk.FILE_CHOOSER_ACTION_CREATE_FOLDER)
filter = gtk.FileFilter()
filter.set_name(_("All files"))
filter.add_pattern("*")
dialog.add_filter(filter)
response = dialog.run()
if response == gtk.RESPONSE_OK:
self.sim_dir=dialog.get_filename()
a = open("scan_window.inp", "w")
a.write(self.sim_dir)
a.close()
self.clear_pages()
self.load_tabs()
dialog.destroy()
return True
def callback_help(self, widget, data=None):
webbrowser.open('http://www.opvdm.com/man/index.html')
def callback_add_page(self, widget, data=None):
new_sim_name=dlg_get_text( _("New simulation name:"), _("Simulation ")+str(self.number_of_tabs+1))
if new_sim_name!=None:
new_sim_name=self.remove_invalid(new_sim_name)
name=os.path.join(os.getcwd(),new_sim_name)
self.add_page(name)
def callback_remove_page(self,widget,name):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
self.toggle_tab_visible(tab.tab_name)
def callback_cluster_sleep(self,widget,data):
self.myserver.sleep()
def callback_cluster_poweroff(self,widget,data):
self.myserver.poweroff()
def callback_cluster_get_data(self,widget):
self.myserver.get_data()
def callback_cluster_print_jobs(self,widget):
self.myserver.print_jobs()
def callback_cluster_fit_log(self,widget):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
name=tab.tab_name
path=os.path.join(self.sim_dir,name)
find_fit_log("./fit.dat",path)
os.system("gnuplot -persist ./fit.dat &\n")
def callback_copy_page(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
name=tab.tab_name
old_dir=os.path.join(self.sim_dir,name)
new_sim_name=dlg_get_text( _("Clone the current simulation to a new simulation called:"), name)
if new_sim_name!=None:
new_sim_name=self.remove_invalid(new_sim_name)
new_dir=os.path.join(self.sim_dir,new_sim_name)
copy_scan_dir(new_dir,old_dir)
print _("I want to copy"),new_dir,old_dir
self.add_page(new_sim_name)
def callback_run_simulation(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
tab.simulate(True,True)
def callback_build_simulation(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
tab.simulate(False,True)
def callback_run_simulation_no_build(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
tab.simulate(True,False)
def callback_nested_simulation(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
tab.nested_simulation()
def callback_clean_simulation(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
tab.clean_scan_dir()
def callback_clean_unconverged_simulation(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
tab.scan_clean_unconverged()
def callback_clean_simulation_output(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
tab.scan_clean_simulation_output()
def callback_import_from_hpc(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
tab.import_from_hpc()
def callback_push_to_hpc(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
tab.push_to_hpc()
def callback_push_unconverged_to_hpc(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
tab.push_unconverged_to_hpc()
def callback_set_hpc_dir(self,widget,data):
config_file=os.path.join(self.sim_dir,"server.inp")
hpc_path=inp_get_token_value(config_file, "#hpc_dir")
dialog = gtk.FileChooserDialog(_("Select HPC dir"),
None,
gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK))
dialog.set_default_response(gtk.RESPONSE_OK)
dialog.set_action(gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER)
if os.path.isdir(hpc_path):
dialog.set_current_folder(hpc_path)
filter = gtk.FileFilter()
filter.set_name(_("All files"))
filter.add_pattern("*")
dialog.add_filter(filter)
response = dialog.run()
if response == gtk.RESPONSE_OK:
inp_update_token_value(config_file, "#hpc_dir", dialog.get_filename(),1)
dialog.destroy()
def remove_invalid(self,input_name):
return input_name.replace (" ", "_")
def callback_rename_page(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
name=tab.tab_name
old_dir=os.path.join(self.sim_dir,name)
new_sim_name=dlg_get_text( _("Rename the simulation to be called:"), name)
if new_sim_name!=None:
new_sim_name=self.remove_invalid(new_sim_name)
new_dir=os.path.join(self.sim_dir,new_sim_name)
shutil.move(old_dir, new_dir)
tab.rename(new_dir)
def callback_delete_page(self,widget,data):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
name=tab.tab_name
dir_to_del=os.path.join(self.sim_dir,name)
md = gtk.MessageDialog(None, 0, gtk.MESSAGE_QUESTION, gtk.BUTTONS_YES_NO, _("Should I remove the simulation directory ")+dir_to_del)
#gtk.MessageDialog(self, gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_QUESTION,
# gtk.BUTTONS_CLOSE, "Should I remove the simulation directory "+dir_to_del)
response = md.run()
if response == gtk.RESPONSE_YES:
self.notebook.remove_page(pageNum)
for items in self.tab_menu.get_children():
if items.get_label()==name:
self.tab_menu.remove(items)
print _("I am going to delete file"),dir_to_del
delete_second_level_link_tree(dir_to_del)
self.number_of_tabs=self.number_of_tabs-1
elif response == gtk.RESPONSE_NO:
print _("Not deleting")
md.destroy()
def toggle_tab_visible(self,name):
tabs_open=0
print name
for i in range(0, self.number_of_tabs):
if self.rod[i].visible==True:
tabs_open=tabs_open+1
#print "tabs open",tabs_open,self.number_of_tabs
for i in range(0, self.number_of_tabs):
print self.rod[i].tab_name, name, self.rod[i].visible
if self.rod[i].tab_name==name:
if self.rod[i].visible==False:
self.rod[i].set_visible(True)
self.rod[i].visible=True
else:
if tabs_open>1:
print self.rod[i].tab_label
self.rod[i].set_visible(False)
self.rod[i].visible=False
def callback_view_toggle(self, widget, data):
#print "one",widget.get_label()
self.toggle_tab_visible(widget.get_label())
def callback_view_toggle_tab(self, widget, data):
self.toggle_tab_visible(data)
def callback_run_all_simulations(self,widget):
for i in range(0,self.notebook.get_n_pages()):
tab = self.notebook.get_nth_page(i)
tab.simulate(True,True)
def callback_stop_simulation(self,widget):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
tab.stop_simulation()
def load_tabs(self):
sim_dirs=[]
get_scan_dirs(sim_dirs,self.sim_dir)
print sim_dirs,self.sim_dir
if len(sim_dirs)==0:
sim_dirs.append("scan1")
else:
for i in range(0,len(sim_dirs)):
sim_dirs[i]=sim_dirs[i]
for i in range(0,len(sim_dirs)):
self.add_page(sim_dirs[i])
def clear_pages(self):
for items in self.tab_menu.get_children():
self.tab_menu.remove(items)
for child in self.notebook.get_children():
self.notebook.remove(child)
self.rod=[]
def add_page(self,name):
hbox=gtk.HBox()
hbox.set_size_request(-1, 25)
label=gtk.Label("")
sim_name=os.path.basename(os.path.normpath(name))
print "Looking for",sim_name,name
self.rod.append(scan_vbox())
self.rod[len(self.rod)-1].init(self.myserver,self.tooltips,self.status_bar,self.context_id,label,self.sim_dir,sim_name)
label.set_justify(gtk.JUSTIFY_LEFT)
hbox.pack_start(label, False, True, 0)
button = gtk.Button()
close_image = gtk.Image()
close_image.set_from_file(os.path.join(get_image_file_path(),"close.png"))
close_image.show()
button.add(close_image)
button.props.relief = gtk.RELIEF_NONE
button.connect("clicked", self.callback_view_toggle_tab,self.rod[len(self.rod)-1].tab_name)
button.set_size_request(25, 25)
button.show()
hbox.pack_end(button, False, False, 0)
hbox.show_all()
self.notebook.append_page(self.rod[len(self.rod)-1],hbox)
self.notebook.set_tab_reorderable(self.rod[len(self.rod)-1],True)
menu_item = gtk.CheckMenuItem(sim_name)
menu_item.set_active(True)
self.tab_menu.append(menu_item)
menu_item.show()
menu_item.set_active(self.rod[len(self.rod)-1].visible)
#print "Rod",name,self.rod[len(self.rod)-1].visible
menu_item.connect("activate", self.callback_view_toggle,menu_item)
self.number_of_tabs=self.number_of_tabs+1
def callback_last_menu_click(self, widget, data):
print [data]
def switch_page(self,page, page_num, user_param1):
pageNum = self.notebook.get_current_page()
tab = self.notebook.get_nth_page(pageNum)
self.status_bar.push(self.context_id, tab.sim_dir)
def callback_remove_all_results(self, widget, data):
results=[]
return_file_list(results,self.sim_dir,"scan.inp")
for i in range(0,len(results)):
dir_name=os.path.dirname(results[i])
if os.path.isdir(dir_name):
print "delete:",dir_name
#opvdm_delete_file(dir_name)
def callback_wol(self, widget, data):
self.myserver.wake_nodes()
def init(self,my_server):
self.cluster_window=None
self.win_list=windows()
self.win_list.load()
self.win_list.set_window(self,"scan_window")
print "constructur"
self.rod=[]
if os.path.isfile("scan_window.inp"):
f = open("scan_window.inp")
lines = f.readlines()
f.close()
path=lines[0].strip()
if path.startswith(os.getcwd()):
self.sim_dir=path
else:
self.sim_dir=os.getcwd()
else:
self.sim_dir=os.getcwd()
self.tooltips = gtk.Tooltips()
self.set_border_width(2)
self.set_title(_("Parameter scan - opvdm"))
n=0
self.hpc_root_dir= os.path.abspath(os.getcwd()+'/../')
self.number_of_tabs=0
items=0
self.status_bar = gtk.Statusbar()
self.status_bar.show()
self.context_id = self.status_bar.get_context_id("Statusbar example")
box=gtk.HBox()
box.add(self.status_bar)
box.set_child_packing(self.status_bar, True, True, 0, 0)
box.show()
self.menu_items = (
( _("/_File"), None, None, 0, "<Branch>" ),
( _("/File/Change dir"), None, self.callback_change_dir, 0, None ),
( _("/File/Close"), None, self.callback_close, 0, None ),
( _("/Simulations/_New"), None, self.callback_add_page, 0, "<StockItem>", "gtk-new" ),
( _("/Simulations/_Delete simulaton"), None, self.callback_delete_page, 0, "<StockItem>", "gtk-delete" ),
( _("/Simulations/_Rename simulation"), None, self.callback_rename_page, 0, "<StockItem>", "gtk-edit" ),
( _("/Simulations/_Clone simulation"), None, self.callback_copy_page, 0, "<StockItem>", "gtk-copy" ),
( _("/Simulations/sep1"), None, None, 0, "<Separator>" ),
( _("/Simulations/_Run simulation"), None, self.callback_run_simulation, 0, "<StockItem>", "gtk-media-play" ),
( _("/Advanced/_Build simulation"), None, self.callback_build_simulation, 0, "<StockItem>", "gtk-cdrom" ),
( _("/Advanced/_Run (no build)"), None, self.callback_run_simulation_no_build, 0, "<StockItem>", "gtk-media-play" ),
( _("/Advanced/_Run nested simulation"), None, self.callback_nested_simulation, 0, "<StockItem>", "gtk-media-play" ),
( _("/Advanced/_Clean simulation"), None, self.callback_clean_simulation, 0, "<StockItem>", "gtk-clear" ),
( _("/Advanced/_Clean unconverged simulation"), None, self.callback_clean_unconverged_simulation, 0, "<StockItem>", "gtk-clear" ),
( _("/Advanced/_Clean simulation output"), None, self.callback_clean_simulation_output, 0, "<StockItem>", "gtk-clear" ),
( _("/Advanced/sep2"), None, None, 0, "<Separator>" ),
( _("/Advanced/_Import from hpc"), None, self.callback_import_from_hpc, 0, "<StockItem>", "gtk-open" ),
( _("/Advanced/_Push to hpc"), None, self.callback_push_to_hpc, 0, "<StockItem>", "gtk-save" ),
( _("/Advanced/_Push unconverged to hpc"), None, self.callback_push_unconverged_to_hpc, 0, "<StockItem>", "gtk-save" ),
( _("/Advanced/_Set hpc dir"), None, self.callback_set_hpc_dir, 0, "<StockItem>", "gtk-open" ),
( _("/Advanced/_Cluster sleep"), None, self.callback_cluster_sleep, 0, "<StockItem>", "gtk-copy" ),
( _("/Advanced/_Cluster poweroff"), None, self.callback_cluster_poweroff, 0, "<StockItem>", "gtk-copy" ),
( _("/Advanced/_Cluster wake"), None, self.callback_wol, 0, "<StockItem>", "gtk-copy" ),
( _("/Advanced/_Remove all results"), None, self.callback_remove_all_results, 0, "<StockItem>", "gtk-copy" ),
( _("/_Help"), None, None, 0, "<LastBranch>" ),
( _("/_Help/Help"), None, self.callback_help, 0, None ),
( _("/_Help/About"), None, about_dialog_show, 0, "<StockItem>", "gtk-about" ),
)
main_vbox = gtk.VBox(False, 3)
menubar = self.get_main_menu(self)
main_vbox.pack_start(menubar, False, False, 0)
menubar.show()
toolbar = gtk.Toolbar()
toolbar.set_style(gtk.TOOLBAR_ICONS)
toolbar.set_size_request(-1, 50)
pos=0
#image = gtk.Image()
#image.set_from_file(os.path.join(get_image_file_path(),"new-tab.png"))
tb_new_scan = gtk.MenuToolButton(gtk.STOCK_NEW)
tb_new_scan.connect("clicked", self.callback_add_page)
self.tooltips.set_tip(tb_new_scan, _("New simulation"))
self.tab_menu=gtk.Menu()
tb_new_scan.set_menu(self.tab_menu)
toolbar.insert(tb_new_scan, pos)
pos=pos+1
sep = gtk.SeparatorToolItem()
sep.set_draw(True)
sep.set_expand(False)
toolbar.insert(sep, pos)
pos=pos+1
delete = gtk.ToolButton(gtk.STOCK_DELETE)
delete.connect("clicked", self.callback_delete_page,None)
self.tooltips.set_tip(delete, _("Delete simulation"))
toolbar.insert(delete, pos)
pos=pos+1
copy = gtk.ToolButton(gtk.STOCK_COPY)
copy.connect("clicked", self.callback_copy_page,None)
self.tooltips.set_tip(copy, _("Clone simulation"))
toolbar.insert(copy, pos)
pos=pos+1
rename = gtk.ToolButton(gtk.STOCK_EDIT)
rename.connect("clicked", self.callback_rename_page,None)
self.tooltips.set_tip(rename, _("Rename simulation"))
toolbar.insert(rename, pos)
pos=pos+1
sep = gtk.SeparatorToolItem()
sep.set_draw(True)
sep.set_expand(False)
toolbar.insert(sep, pos)
pos=pos+1
image = gtk.Image()
image.set_from_file(os.path.join(get_image_file_path(),"forward2.png"))
tb_simulate = gtk.ToolButton(image)
tb_simulate.connect("clicked", self.callback_run_all_simulations)
self.tooltips.set_tip(tb_simulate, _("Run all simulation"))
toolbar.insert(tb_simulate, pos)
pos=pos+1
if debug_mode()==True:
sep = gtk.SeparatorToolItem()
sep.set_draw(True)
sep.set_expand(False)
toolbar.insert(sep, pos)
pos=pos+1
image = gtk.Image()
image.set_from_file(os.path.join(get_image_file_path(),"server.png"))
cluster = gtk.ToolButton(image)
cluster.connect("clicked", self.callback_cluster)
self.tooltips.set_tip(cluster, _("Configure cluster"))
toolbar.insert(cluster, pos)
cluster.show()
pos=pos+1
sep = gtk.SeparatorToolItem()
sep.set_draw(False)
sep.set_expand(True)
toolbar.insert(sep, pos)
pos=pos+1
tb_help = gtk.ToolButton(gtk.STOCK_HELP)
tb_help.connect("clicked", self.callback_help)
self.tooltips.set_tip(tb_help, _("Help"))
toolbar.insert(tb_help, pos)
pos=pos+1
toolbar.show_all()
main_vbox.pack_start(toolbar, False, False, 0)
#main_vbox.add(toolbar)
main_vbox.set_border_width(1)
self.add(main_vbox)
main_vbox.show()
self.myserver=my_server
self.notebook = gtk.Notebook()
self.notebook.show()
self.notebook.set_tab_pos(gtk.POS_LEFT)
self.load_tabs()
main_vbox.pack_start(self.notebook, True, True, 0)
main_vbox.pack_start(box, False, False, 0)
self.connect("delete-event", self.callback_close)
self.notebook.connect("switch-page",self.switch_page)
self.set_icon_from_file(os.path.join(get_image_file_path(),"image.jpg"))
self.hide()
| gpl-2.0 | 5,365,791,326,686,025,000 | 31.145425 | 137 | 0.681137 | false | 2.864861 | false | false | false |
chbrown/argv | argv/parsers/inferential.py | 1 | 1820 | from argv.iterables import peekable
from argv.flags import parse_tokens
class InferentialParser(object):
def __repr__(self):
return '%s()' % self.__class__.__name__
def parse(self, args=None):
'''Parse a list of arguments, returning a dict.
Flags are only boolean if they are not followed by a non-flag argument.
All positional arguments not associable with a flag will be added to the return dictionary's `['_']` field.
'''
opts = dict()
if args is None:
import sys
# skip over the program name with the [1:] slice
args = sys.argv[1:]
# arglist is a tuple of (is_flag, name) pairs
arglist = peekable(parse_tokens(args))
for is_flag, name in arglist:
if is_flag is True:
# .peek will return the default argument iff there are no more entries
next_is_flag, next_name = arglist.peek(default=(None, None))
# next_is_flag will be None if there are no more items, but True/False if there is a next item
# if this argument looks for a subsequent (is set as boolean),
# and the subsequent is not a flag, consume it
if next_is_flag is False:
opts[name] = next_name
# finally, advance our iterator, but since we already have the next values, just discard it
arglist.next()
else:
# if there is no next thing, or the next thing is a flag,
# all the boolean=False's in the world can't save you then
opts[name] = True
else:
# add positional argument
opts.setdefault('_', []).append(name)
return opts
| mit | -1,911,478,264,357,357,600 | 39.444444 | 115 | 0.55989 | false | 4.631043 | false | false | false |
swooingfish/kidsafe | install/opt/kidsafe/kidsafe.py | 1 | 12889 | #!/usr/bin/env python
# kidsafe child safe proxy server using squid
# see http://www.penguintutor.com/kidsafe
# kidsafe.py - squid v3 authethentication helper application
# Copyright Stewart Watkiss 2012
# This file is part of kidsafe.
#
# kidsafe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# kidsafe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kidsafe. If not, see <http://www.gnu.org/licenses/>.
import sys, time, datetime, os
# These are global variables so can be accessed elsewhere
# These are not normally updated during running
# Only loglevel needs to be global, but others kept together at start of file
# Set loglevel to amount of logging required
# 0 = no logging, 1 = critical only, 2 = significant (warning), 3 = disable all logging of accepts / denies, 4 = log entries where log=yes is set on rule, 5 = include denies, 6 = include accept, 7 = include session tests, 8 = detailed session log, 9 = debug, 10 = full debug
# Normally this should be level 4 (or level 5 if want to log all blocked sites)
# above 6 not recommended for normal use
# 3 is same as 2 at the moment
loglevel = 6
#loglevel = 10
session_file = "/opt/kidsafe/kidsafe.session"
rules_file = "/opt/kidsafe/kidsafe.rules"
log_file = "/var/log/squid3/kidsafe.log"
def main ():
global loglevel
global rules_file
global session_file
# store time when files were last modified - if this is updated then we reload the file
r_mtime = os.path.getmtime(rules_file)
s_mtime = os.path.getmtime(session_file)
if (loglevel >= 2):
logEntry (2, "kidsafe starting")
# Session list
# holds whether user is logged in and at what level access they have
sessions = loadSessions(session_file);
# List to hold whitelist
# each entry in the whitelist is a list containing
rules = loadRules(rules_file);
while (1):
# read entry from stdin - src, dst, dstport
inline = sys.stdin.readline()
# only use src & dst; dstport is for logging purposes
# path is included, but not used. In future this can be used for additional keyword checks against url
(src, dst, dstport, path) = inline.split()
if (loglevel >= 10):
logEntry (10, "New proxy request "+inline)
# current time - we use this later to check for expired entries / sessions
timenow = int(time.time());
# convert dst to lowercase (rest should be IP addresses)
dst = dst.lower()
# check if sessions file updated (mtime) to see if we need to reload
if (loglevel >= 10):
logEntry (10, "last loaded mtime "+str(s_mtime))
logEntry (10, "new mtime "+str(os.path.getmtime(session_file)))
if (s_mtime != os.path.getmtime(session_file)):
sessions = loadSessions(session_file)
s_mtime = os.path.getmtime(session_file)
# check if rules file updated (mtime) to see if we need to reload
if (loglevel >= 10):
logEntry (10, "last loaded mtime "+str(r_mtime))
logEntry (10, "new mtime "+str(os.path.getmtime(rules_file)))
if (r_mtime != os.path.getmtime(rules_file)):
rules = loadRules(rules_file)
r_mtime = os.path.getmtime(rules_file)
# reset authorisation level
authlevel = 0
# check to see if the user is logged on and get permission level
for sessionentry in sessions:
# check time not expired (when we loaded we checked, but time could have expired since)
if (sessionentry[3]!='*' and sessionentry[3]!= '0' and int(sessionentry[3]) < timenow):
# may not be relevant, but we include in level 8
if (loglevel >= 8):
logEntry (8, "Expired session "+str(sessionentry))
# expired to move to next
continue
if (checkAddress (src,sessionentry[0])):
# Log all matches if on session log level
if (loglevel >= 8):
logEntry (8, "Session match "+str(sessionentry))
# set auth level if higher
if (sessionentry[1] > authlevel):
# cast this as an int - otherwise int tests fail later
authlevel = int(sessionentry[1])
elif (loglevel >= 10):
logEntry (10, "Session not matched "+src+" "+str(sessionentry))
if (loglevel >= 7):
logEntry (7, "Highest permission current session "+str(authlevel))
# Special case - level 10 is accept all & no log to return as OK
if (authlevel > 9):
sys.stdout.write("OK\n")
sys.stdout.flush()
continue
# Check against rules
# have we had an accept?
accept = False
# rulematch will hold the rule number that get a hit on for logging
rulematch = 0
# set logentry if status is to log error
logentry = 1
for ruleentry in rules:
# check rule not expired (since generated)
if (ruleentry[4]!='*' and ruleentry[4]!= '0' and int(ruleentry[4]) < timenow):
# may not be relevant, but we include in level 9
if (loglevel >= 9):
logEntry (9, "Expired rule "+str(ruleentry))
continue
# check if the user level matches this rule
if (checkUserLevel(authlevel, ruleentry[3]) == False) :
if (loglevel >= 10) :
logEntry (10, "User level not matched on rule "+str(ruleentry[1]))
continue
if (loglevel >= 10) :
logEntry (10, "User level match on rule "+str(ruleentry[1]))
# check if the destination matches
if checkDst(dst, ruleentry[0]) :
rulematch = ruleentry[1]
if (loglevel >= 10) :
logEntry (10, "Destination match on rule "+str(rulematch))
logentry = ruleentry[5]
# is this an accept or a deny rule
# allow if not 0
if (int(ruleentry[2]) != 0) :
if (loglevel >= 10) :
logEntry (10, "Rule "+str(rulematch)+" is allow "+str(ruleentry[2]))
accept = True
break
# deny
else :
if (loglevel >= 10) :
logEntry (10, "Rule "+str(rulematch)+" is deny "+str(ruleentry[2]))
accept = False
break
else :
if (loglevel >= 9):
logEntry (9, "Rule doesn't match destination")
if (loglevel >= 10) :
logEntry (10, "RULES checked accept = "+str(accept))
# if accept has been changed to True - return OK otherwise return ERR
# if logging because it's set in rule then use loglevel 4, otherwise 5 / 6 as appropriate
if (accept == True) :
if (loglevel >= 4 and logentry != '0'):
logEntry (4, "ACCEPT "+src+" -> "+dst+":"+str(dstport)+" rule:"+str(rulematch))
elif (loglevel >= 6):
logEntry (6, "ACCEPT "+src+" -> "+dst+":"+str(dstport)+" rule:"+str(rulematch))
sys.stdout.write("OK\n")
else :
if (loglevel >= 4 and logentry != '0'):
logEntry (4, "REJECT "+src+" -> "+dst+":"+str(dstport)+" rule:"+str(rulematch))
elif (loglevel >= 5):
logEntry (5, "REJECT "+src+" -> "+dst+":"+str(dstport)+" rule:"+str(rulematch))
sys.stdout.write("ERR\n")
sys.stdout.flush()
# Open and close the file each time so that we don't run the risk of keeping the file
# open when another thread wants to write to it.
def logEntry(logmessagelevel, logmessage):
global log_file
# Get timestamp as human readable format
now = datetime.datetime.now()
timestamp = now.strftime("%Y-%m-%d %H:%M:%s")
# open file to apptend
logfile = open(log_file, 'a')
logfile.write(timestamp+" "+str(logmessagelevel)+" "+logmessage+"\n")
logfile.close()
return
def loadRules(filename):
global loglevel
ruleslist = list()
# Read in rules file
ruleslistfile = open(filename, 'r')
# currenttime
timenow = int(time.time());
# Use linecount to track position in file - in case of error
# read in each line
for linecount, entry in enumerate(ruleslistfile):
entry = entry.rstrip()
# ignore any empty lines / comments
if (entry and not(entry.startswith('#'))):
thisLine = entry.split(' ')
# check there is a valid entry (basic check of number of elements in entry)
if (len(thisLine) < 6):
if (loglevel >= 1):
logEntry(1, "Invalid entry in rules file line %d \n" % (linecount))
# Print message and abort
#print ("Invalid entry in rules file line %d \n" % (linecount))
# print deny
print "ERR\n"
sys.exit()
# check not expired
if (thisLine[4]!='*' and thisLine[4]!= '0' and int(thisLine[4]) < timenow):
if (loglevel >= 9):
logEntry (9, "Expired rule (load) "+str(entry))
continue
# if expired move on to next entry (ignore)
continue
ruleslist.append (thisLine)
ruleslistfile.close()
if (loglevel >= 2):
logEntry(2, "loaded rules file")
# debug level >=9 is not recommended for normal use
if (loglevel >= 9):
all_entries = "";
for each_entry in ruleslist:
all_entries += str(each_entry)+"\n"
logEntry (9, "Rules entries:\n"+all_entries)
return ruleslist
# returns current login level for this IP address (highest value)
def loadSessions(filename):
global loglevel
sessionlist = list()
# Read in whitelist file
sessionlistfile = open(filename, 'r')
# currenttime
timenow = int(time.time());
# Use linecount to track position in file - in case of error
# read in each line
for linecount, entry in enumerate(sessionlistfile):
entry = entry.rstrip()
# ignore any empty lines / comments
if (entry and not(entry.startswith('#'))):
thisLine = entry.split(' ')
# check there is a valid entry (basic check of number of elements in entry)
if (len(thisLine) < 4):
if (loglevel >=1 ):
logEntry (1, "Invalid entry in session file line %d \n" %(linecount))
# Print message and abort
#print ("Invalid entry in sessions file line %d \n" % (linecount))
print "ERR\n"
sys.exit()
# check not expired
if (thisLine[3]!='*' and thisLine[3]!= '0' and int(thisLine[3]) < timenow):
# if expired move on to next entry (ignore) - only skip here for efficiency later as we need to check this in case it changes in future anyway
# may not be relevant, but we include in level 9 (ie higher than normal session log level)
if (loglevel >= 9):
logEntry (9, "Expired session (load) "+str(entry))
continue
sessionlist.append (thisLine)
sessionlistfile.close()
if (loglevel >= 2):
logEntry(2, "loaded session file")
# debug level >=9 is not recommended for normal use
if (loglevel >= 9):
all_entries = "";
for each_entry in sessionlist:
all_entries += str(each_entry)+"\n"
logEntry (9, "Session entries:\n"+all_entries)
return sessionlist
# function to check if a specific destination matches a particular rule
# rule should just be the domain/host part of the whitelist
def checkDst(dest, rule):
# check for * rule (normally used to always allow for a specific source IP address or to temporarily disable
if (rule=='*'):
return True
# check specific rule first - more efficient than rest
if (dest==rule):
return True
# does entry start with a . (if so then check using endswith)
if (rule.startswith('.')) :
if (dest.endswith(rule)):
return True
else :
return False
# least efficient - regular expression
elif (dest.startswith('/')) :
if re.match (rule, dest) :
return True
else :
return False
# No match
else :
return False
# check if our IP address matches that in the rule
# currently accept fixed IP address or regexp (add subnets in future)
def checkAddress(src, session):
# First try a normal ip address (most likely match)
if (src == session):
return True
# look for a regular expression
elif session.startswith ('/'):
if re.match (session, src) :
return True
else :
return False;
# if it's a subnet (not yet implemented)
#elif session.find('/')
# otherwise it's a normal IP address
else:
return False
# check to see if user level matches (supports +/- after the value)
def checkUserLevel(authlevel, ruleuser):
# rule = * applies to all users
if (ruleuser=='*') : return True
# split into comma separated entries if applicable
listruleuser = ruleuser.split (',')
for thisruleuser in listruleuser:
# get int representation (with +/- removed)
ruleuserint = int (thisruleuser.rstrip('+-'))
# first check for exact match (don't need to know if it had +/-
if (authlevel == ruleuserint) : return True
# check with +
if (ruleuser.endswith('+')) :
if (authlevel > ruleuserint) : return True
elif (ruleuser.endswith('-')) :
if (authlevel < ruleuserint) : return True
# if not matched
return False
# - Added inline instead as more efficient than function call
## function to check if a particular rule has expired
## uses unix time stamp - or * for no expiry
#def checkExpire(expiretime):
# if (expiretime == '*' or expiretime == '0'): return True
# timenow = int(time.time());
# if (int(expiretime) > timenow): return True;
# return False
# Start
if __name__ == '__main__':
main()
| gpl-3.0 | -5,666,652,179,323,141,000 | 32.477922 | 274 | 0.675925 | false | 3.33739 | false | false | false |
wasit7/parallel_forest | pforest/tree.py | 1 | 4251 | """
A tree module for pforest.
GNU GENERAL PUBLIC LICENSE Version 2
Created on Thu Oct 16 17:33:47 2014
@author: Wasit
"""
import numpy as np
try:
__import__('imp').find_module('pforest')
print "Found pforest"
from pforest.master import mnode
except ImportError:
print "Not found pforest. Importing local modules"
from master import mnode
class tree(mnode):
"""
A tree class that represent the tree of the random forest.
"""
def settree(self,root=mnode(0,0,0)):
"""
Initialize the tree with training result.
"""
self.theta=root.theta #vector array
self.tau=root.tau #scalar
self.H=root.H #scalar
self.P=root.P #vector array
self.parent=root.parent #mnode
self.depth=root.depth #int
self.char=root.char
self.Q=root.Q
if root.L is not None:
self.L=tree() #mnode
self.L.settree(root.L)
self.R=tree() #mnode
self.R.settree(root.R)
def classify(self,Ix):
"""
Classify input Ix by the decision tree.
"""
if self.tau is None: #reaching terminal node
return self.P
else:
if(Ix[ int(self.theta) ]<self.tau):
return self.L.classify(Ix)
else:
return self.R.classify(Ix)
def getP(self,x,dset):
"""
Return the probability.
input:
x sample index [int]
dset the dataset object
output:
P [1d ndarray] probability P(L|Ix)
"""
#print("test>>mnode:{}".format(self))
if self.tau is None:#reaching terminal node
return self.P
else:
#if (self.L is not None and goLeft) :
if (dset.getI(self.theta,x)<self.tau) :
return self.L.getP(x,dset)
else:
return self.R.getP(x,dset)
def getL(self,x,dset):
"""
input:
x sample index [int]
dset the dataset object
output:
L [integer] label
"""
return np.argmax(self.getP(x,dset))
def show(self):
"""Output this tree on standard output."""
print self.table()
if __name__ == '__main__':
import pickle
from matplotlib import pyplot as plt
try:
__import__('imp').find_module('pforest')
print "Found pforest"
from pforest.dataset import dataset
except ImportError:
print "Not found pforest. Importing local modules"
from dataset import dataset
#from scmaster import master
# #training
# m=master()
# m.reset()
# m.train()
# print m.root.table()
# #recording the tree pickle file
# pickleFile = open('out/root.pic', 'wb')
# pickle.dump(m.root, pickleFile, pickle.HIGHEST_PROTOCOL)
# pickleFile.close()
#reading the tree pickle file
pickleFile = open('out_tree.pic', 'rb')
root = pickle.load(pickleFile)
pickleFile.close()
#init the test tree
t=tree()
t.settree(root)
t.show()
#compute recall rate
dset=dataset()
correct=0;
for x in xrange(dset.size):
L=t.getL(np.array([x]),dset)
if dset.getL(x) == L:
correct=correct+1
dset.setL(x,L)
print("recall rate: {}%".format(correct/float(dset.size)*100))
#setup the new test-set
d=0.01
y, x = np.mgrid[slice(-1, 1+d, d), slice(-1, 1+d, d)]
#create dataset
dset2=dataset()
#start labeling
L=np.zeros(x.shape,dtype=int)
for r in xrange(x.shape[0]):
for c in xrange(x.shape[1]):
Prob=t.classify(( x[r,c],y[r,c] ))
L[r,c]=np.argmax(Prob)
#plot the lalbel out put
plt.close('all')
plt.axis([-1,1,-1,1])
plt.pcolor(x,y,L)
# plt.show()
#overlaying new input data
plt.hold(True)
plt.set_cmap('jet')
marker=['bo','co','go','ro','mo','yo','ko',
'bs','cs','gs','rs','ms','ys','ks']
z=np.random.randint(0,dset.size,1000)
for i in z:
plt.plot(dset2.I[i,0],dset2.I[i,1],marker[dset2.samples[i]])
plt.show()
| mit | 3,306,292,795,613,459,000 | 24.769697 | 68 | 0.540108 | false | 3.365796 | false | false | false |
mikesligo/distributed-search | src/Messages/Message_handler.py | 1 | 7842 | import json
from Send_formatter import Send_formatter
from src.networking.IP_Parser import IP_Parser
from Exceptions.Table_lookup_failed_exception import Table_lookup_failed_exception
from src.Encoder import Encoder
from src.Search.Search_results import Search_results
from src.Search.Database import Database
import threading
class Message_handler(object):
def __init__(self, table, socket):
self.parser = IP_Parser()
self.table = table
self.socket = socket
self.__send_formatter = Send_formatter(self.table)
self.__encoder = Encoder()
self.__db = Database()
self.setup_lock_event = threading.Event()
def handle(self, data, sender_addr):
message = json.loads(data)
message_type = message["type"]
if not self.__valid_message(message_type):
print "Warning - Malformed message received"
return
print "Received message - " + message_type + " from " + str(sender_addr[0]) + ":" + str(sender_addr[1])
print message
if message_type == "JOINING_NETWORK":
self.__handle_joining_network(message, sender_addr)
if message_type == "ROUTING_INFO":
self.setup_lock_event.set()
self.__handle_routing_info(message, sender_addr)
if message_type == "JOINING_NETWORK_RELAY":
self.__handle_joining_network_relay(message)
if message_type == "SEARCH_RESPONSE":
self.__handle_search_response(message)
if message_type == "SEARCH":
self.__handle_search(message)
if message_type == "INDEX":
self.__handle_index(message)
def __valid_message(self, message_type):
return message_type
def join_network(self, bootstrap_ip):
to_send = self.__send_formatter.send_joining_network()
self.send_message(to_send, bootstrap_ip.get_ip_pair())
def __handle_routing_info(self, message, sender_addr):
self.table.load_routing_info(message["route_table"])
if self.__sender_is_gateway(message, sender_addr):
self.table.add_routing_info(message["gateway_id"], message["ip_address"])
self.__forward_routing_info_if_necessary(message)
def __sender_is_gateway(self, message, sender_addr):
msg_ip = str(message["ip_address"])
parsed_sender_addr = str(self.parser.parse_from_tuple(sender_addr))
return msg_ip == parsed_sender_addr
def __forward_routing_info_if_necessary(self, message):
node_id = message["node_id"]
gateway_id = message["gateway_id"]
if not self.__id_is_me(node_id):
if self.__id_is_me(gateway_id):
ip = self.__normalise_ip_to_pair(node_id)
jsoned = json.dumps(message)
self.send_message(jsoned, ip)
else:
print "Error - Expecting to forward routing info but I am not gateway"
return
def __id_is_me(self, node_id):
return int(node_id) == int(self.table.node_id)
def __handle_joining_network_relay(self, message):
gateway_id = message["gateway_id"]
node_id = message["node_id"]
if self.__id_is_me(node_id):
return
self.__forward_message_to_closest_node(message, node_id)
if not self.__id_is_me(gateway_id):
new_known_ip = self.table.get_ip_of_node(gateway_id)
self.table.add_routing_info(node_id, new_known_ip)
to_send = self.__send_formatter.send_routing_info(node_id, gateway_id)
self.send_to_node_id(to_send, gateway_id)
def send_to_node_id(self, message, node_id):
ip = self.__normalise_ip_to_pair(node_id)
self.send_message(message, ip)
def __handle_joining_network(self, message, sender_addr):
node_id = message["node_id"]
node_ip = message["ip_address"]
to_send_forward = self.__send_formatter.send_joining_network_relay(node_id)
self.__forward_message_to_closest_node(to_send_forward, node_id)
to_send = self.__send_formatter.send_routing_info(node_id, self.table.node_id)
self.table.add_routing_info(node_id, node_ip)
self.send_message(to_send, sender_addr)
def send_message(self, message, sender_addr):
sender_ip = str(sender_addr[0])
sender_port = str(sender_addr[1])
loaded = json.loads(message)
print "Sending " + loaded["type"] + " to " + sender_ip + ":" + sender_port
print message
self.socket.sendto(message, sender_addr)
def __normalise_ip_to_pair(self, node_id):
try:
node_ip = self.table.get_ip_of_node(node_id)
except KeyError:
print "----------- Error - Could not find ip of node " + str(node_id)
raise Table_lookup_failed_exception("Could not find ip for id " + str(node_id))
normalised_ip = self.parser.parse(node_ip).get_ip_pair()
return normalised_ip
def search(self, words):
for word in words:
hash_of_word = self.__encoder.get_hash_of_word(word)
print "Hash is " + str(hash_of_word)
closest_node = self.table.get_closest_node_id(hash_of_word)
if closest_node:
message = self.__send_formatter.search(word, closest_node)
ip = self.__normalise_ip_to_pair(closest_node)
self.send_message(message, ip)
else:
return self.__db.get_results(word)
#TODO handle pings
def __handle_search(self, message):
word = message["word"]
target_node_id = message["sender_id"]
results = self.__db.get_results(word)
message = self.__send_formatter.search_response(word, target_node_id, results)
self.__forward_message_to_closest_node(message, target_node_id)
def __handle_search_response(self, message):
node_id = message["node_id"]
if self.__id_is_me(node_id):
word = message["word"]
responses = message["response"]
search_result = Search_results(word, responses)
print "RECEIVED RESPONSE FOR " + search_result.word
print "Results:"
for result in search_result.results:
print "Url:\t\t" + str(result["url"])
print "Rank:\t\t" + str(result["rank"])
return search_result
else:
self.__forward_message_to_closest_node(message, node_id)
def __handle_index(self, message):
target_id = message["target_id"]
if self.__id_is_me(target_id):
self.__send_ack(target_id)
word = message["keyword"]
urls = message["link"]
self.__db.index_results(word, urls)
else:
self.__forward_message_to_closest_node(message, target_id)
def __send_ack(self, target_id):
message = self.__send_formatter.ack(target_id)
ip = self.__normalise_ip_to_pair(target_id)
self.send_message(message, ip)
def index(self, keyword, link):
hash_of_word = self.__encoder.get_hash_of_word(keyword)
message = self.__send_formatter.index(hash_of_word, keyword, link)
self.__forward_message_to_closest_node(message, hash_of_word)
loaded = json.loads(message)
word = loaded["keyword"]
urls = loaded["link"]
self.__db.index_results(word, urls)
def __forward_message_to_closest_node(self, message, node_id):
if type(message) is dict:
message = json.dumps(message)
closest_node = self.table.get_closest_node_id(node_id)
if closest_node:
self.__send_message(message, closest_node)
def __send_message(self, message, node_id):
ip = self.__normalise_ip_to_pair(node_id)
self.send_message(message, ip)
| mit | 2,591,718,884,708,643,300 | 38.407035 | 111 | 0.601505 | false | 3.649139 | false | false | false |
gdos/parole | src/testgame/gamedir/data01.res/scripts/util.py | 2 | 1033 |
def messageBox(text, align='center', textWidth=274):
font = parole.resource.getFont("fonts/Arial.ttf", 14)
block = parole.shader.TextBlockPass(font, (255,255,255),
wrap_width=textWidth, bg_rgb=(0,64,128), align=align, wrap='word')
block.text = text
block.update()
sdr = parole.shader.Shader("FrameContents",
(block.width+20, block.height+20))
sdr.addPass(parole.shader.ColorField((0,64,128), sdr.size))
sdr.addPass(block, (10,10))
mbox = parole.shader.Frame((parole.shader.VerticalBevel((0,0,0),
(128,128,128), (255,255,255),1, 2, 1),
parole.shader.VerticalBevel((0,0,0), (128,129,128), (255,255,255), 1, 2, 1),
parole.shader.HorizontalBevel((255,255,255), (128,128,128), (0,0,0), 1,
2, 1),
parole.shader.HorizontalBevel((255,255,255), (128,128,128), (0,0,0), 1,
2, 1),
None,None,None,None),
contents=[sdr])
mbox.update()
parole.display.scene.add(mbox, pos=mbox.centeredPos())
return mbox
| gpl-2.0 | 5,577,007,840,918,970,000 | 42.041667 | 84 | 0.607938 | false | 2.837912 | false | false | false |
fbuitron/TCHRBot | hr_bot_main.py | 1 | 10603 | import os
import time
import json
from slackclient import SlackClient
'''
Welcome to the code!
This is the main class for the hr_bot.
This code sucks: spaghetti, mixed, hardcoded and etc. So if you are a very opinionated person I am accepting refactoring PRs :)
Go ahead, have fun!
'''
# starterbot's ID as an environment variable
BOT_ID = os.environ["HRBOT_ID"]
BOT_TOKEN = os.environ["HRBOT_TOKEN"]
# constants
AT_BOT = "<@" + BOT_ID + ">"
DICT_USER = {}
NUMBER_OF_REACTIONS_INT = 3
NUMBER_OF_REACTIONS = str(NUMBER_OF_REACTIONS_INT)
INITIAL_SCORE = 0
OPERATION_TIMEOUT = 300 #5 minutes
REBUKE_COMMAND = "boo"
CONGRAT_COMMAND = "kudos"
HELP_COMMAND = "help"
LEADER_BOARD_COMMAND = "leaderboard"
ERROR_SUFFIX = ". Type `@hr help` for instructions"
NOT_FOUND_MSGS = ['Not sure what you meant. I am still being coded! Sorry :pensive:','I am very busy right now! Maybe after a :coffee:', 'Nope']
INSTRUCTIONS_MSG = "Hi there! my name is HR. I can listen to complaints or praise between coworkers. You can raise a complaint by using the *" + REBUKE_COMMAND +"*"\
" command or praise someone by using the *"+CONGRAT_COMMAND+"* command. Just tell me: `@hr "+CONGRAT_COMMAND+" @aric 200 He helped me with my computer` "\
" If your message gets 3 OR + votes _@aric_ gets 200 points. On the contrary if you tell me: `@hr "+REBUKE_COMMAND+" @aric 500 he said the b word at lunch `"\
" If your message gets 3 OR + votes _@aric_ losses 500 points. :warning: if you don't get enough votes you may loose some points!"\
" Type `@hr "+LEADER_BOARD_COMMAND+"` to get the top 5 worst employees in the HR score."
slack_client = SlackClient(BOT_TOKEN)
list_of_operations = []
class HR_Operation:
def __init__(self, author, isPositive, target, amount, reason, channel, timestamp):
self.author = clean_up_user_name(author)
self.isPositive = isPositive
self.target = clean_up_user_name(target)
self.amount = amount
self.reason = reason
self.channel = channel
self.timestamp = timestamp
self.votes = []
self.processed = False
def addVote(vote):
self.votes.append(vote)
class MSG_Votes:
def __init__(self, reaction, channel, userReacting, msg_ts, msg_author):
self.reaction = reaction
self.channel = channel
self.userReacting = clean_up_user_name(userReacting)
self.msg_ts = msg_ts
self.msg_author = clean_up_user_name(msg_author)
def publish(text,channel):
slack_client.api_call("chat.postMessage", channel=channel,
text=text, as_user=True)
def handle_command(hr_operation):
#We need to filter the rules before actually applying this
#Cannot allow that you upvote or downvote yourself
if hr_operation.target == hr_operation.author:
apply_point(False,10,hr_operation.target)
publish("Are you serious? Do you think I don't have an if statement for this? -10pts for you <@"+hr_operation.author+">, cheater", hr_operation.channel)
elif hr_operation.target == BOT_ID and not hr_operation.isPositive:
apply_point(False,150,hr_operation.target)
publish("hahah you think you are so funny... -150pts for you <@"+hr_operation.author+">. Report me to HR.... try it", hr_operation.channel)
else:
list_of_operations.append(hr_operation)
response = "If you get at least *"+NUMBER_OF_REACTIONS+"* reactions, consider it done!"
publish(response, hr_operation.channel)
def handle_reaction(vote):
#Look for the operation and add vote if found
if len(list_of_operations) > 0:
for op in list_of_operations:
# check if the vote is for the operation
if op.timestamp == vote.msg_ts and op.author == vote.msg_author and vote.channel == op.channel:
if vote.msg_author == vote.userReacting:
apply_point(False,10,vote.msg_author)
publish("You can't vote, you sneaky cheater! -10pts for you <@"+vote.msg_author+">", vote.channel)
return
if op.target == vote.userReacting and op.isPositive:
apply_point(False,10, op.target)
publish("Hey, what do you think I am? An empty robot? You cannot vote for yourself, cheater! -10pts for you <@"+vote.userReacting+">", vote.channel)
return
for op_vote in op.votes:
if vote.userReacting == op_vote.userReacting:
apply_point(False,10, vote.userReacting)
publish("Hey <@"+vote.userReacting+">, you can't vote twice, cheater! -10pts for you ", vote.channel)
return
op.votes.append(vote)
refresh_leaderboard()
def process_pending_operations():
for op in list_of_operations:
if not op.processed and (time.time() - float(op.timestamp)) > OPERATION_TIMEOUT:
penalty_points = 10
msg = ""
if op.isPositive:
penalty_points = 5
msg = "Not enough votes, <@"+op.author+">. Next time try to get some traction. I have to take 5 points because of RAM/CPU wasted time. Good luck next time. :smile: "
else:
penalty_points = 10
msg = "You didn't get traction, dude. Are you falsely accusing your coworker -10 pts for you <@"+op.author+">."
apply_point(False,penalty_points,op.author)
op.processed = True
publish(msg, op.channel)
def refresh_leaderboard():
for op in list_of_operations:
if len(op.votes) == NUMBER_OF_REACTIONS_INT and not op.processed:
apply_point(op.isPositive, op.amount, op.target)
op.processed = True
msg = "The people had spoken. <@"+op.target+"> has *"+op.amount+"* "+(" more " if op.isPositive else " less ")+" points"
publish(msg, op.channel)
def apply_point(increment, amount, user):
if not user in DICT_USER:
DICT_USER[user] = INITIAL_SCORE
if increment:
DICT_USER[user] = DICT_USER[user] + int(amount)
else:
DICT_USER[user] = DICT_USER[user] - int(amount)
def clean_up_user_name(username):
if username.find("<@") == -1 :
return username
username = username.replace("<@","")
username = username.replace(">","")
return username
def handle_help(channel):
publish(INSTRUCTIONS_MSG, channel)
def handle_leader_board(channel):
index = 1
msg = "Ok, sure sweetheart!\n"
if len(DICT_USER) > 0:
for key, value in DICT_USER.iteritems():
msg += str(""+str(index)+"- <@"+key+"> ---> "+str(value)+"\n")
index += 1
else:
msg= "I feel so lonely, no one voted yet... :crying_cat_face:"
publish(msg, channel)
def isUser(subStr):
return subStr.startswith("<@U")
def parse_txt(msg_str, channel):
errorMsg = None
isPositive = None
target = None
amount = None
reason = None
valid = False
bySpace = msg_str.split(" ")
if len(bySpace) >= 2:
if bySpace[0] == AT_BOT:
if bySpace[1] in [CONGRAT_COMMAND, REBUKE_COMMAND]:
if bySpace[1] == CONGRAT_COMMAND:
isPositive = True
else:
isPositive = False
if isUser(bySpace[2]):
target = bySpace[2]
if (bySpace[3].isdigit()):
amount = bySpace[3]
if (len(bySpace) > 4):
reason = " ".join(bySpace[4:])
valid = True
else:
errorMsg = "Expected the number of points not this *"+bySpace[3]+"*"
else:
errorMsg = "Need to put a user after the command instead of *"+bySpace[2]+"*"
elif bySpace[1] == HELP_COMMAND:
valid = True
handle_help(channel)
elif bySpace[1] == LEADER_BOARD_COMMAND:
handle_leader_board(channel)
valid = True
else:
errorMsg = "You used the wrong command *"+bySpace[1]+"*"
else:
errorMsg = "C'mon! You can do better than that"
else:
errorMsg = "At least you mentioned me :smiley:"
return errorMsg, valid, target, isPositive, amount, reason
def parse_msg(msg_json):
channel = msg_json["channel"]
if not msg_json["user"] == BOT_ID:
errorMSG, valid, target, isPositive, amount, reason = parse_txt(msg_json["text"], channel)
if (errorMSG):
msgResponse = errorMSG + ERROR_SUFFIX
publish(msgResponse,channel)
elif not (isPositive == None):
channel = msg_json["channel"]
author = msg_json["user"]
timestamp = msg_json["ts"]
op = HR_Operation(author,isPositive, target, amount, reason, channel, timestamp)
handle_command(op)
elif not valid:
msgResponse = errorMSG + ERROR_SUFFIX
publish(msgResponse,channel)
def parse_reaction(reaction_json):
if reaction_json["item"]:
if reaction_json["type"] == 'reaction_added':
if 'channel' not in reaction_json["item"]:
return
if 'user' not in reaction_json:
return
if 'ts' not in reaction_json["item"]:
return
if 'item_user' not in reaction_json:
return
vote = MSG_Votes(reaction_json["reaction"], reaction_json["item"]["channel"],reaction_json["user"],reaction_json["item"]["ts"], reaction_json["item_user"])
handle_reaction(vote)
def parse_slack_output(slack_rtm_output):
output_list = slack_rtm_output
if output_list and len(output_list) > 0:
for output in output_list:
if output and 'text' in output and AT_BOT in output['text']:
op = parse_msg(output)
return op
if output and 'reaction' in output:
parse_reaction(output)
return None
if __name__ == "__main__":
READ_WEBSOCKET_DELAY = 1
if slack_client.rtm_connect():
print("Connection succesful")
while True:
operation = parse_slack_output(slack_client.rtm_read())
process_pending_operations()
time.sleep(READ_WEBSOCKET_DELAY)
else:
print("Connection failed. Invalid Slack token or bot ID?")
| mit | 657,540,030,640,269,400 | 40.096899 | 181 | 0.589927 | false | 3.72558 | false | false | false |
google-research/google-research | mol_dqn/chemgraph/dqn/deep_q_networks.py | 1 | 30250 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""DeepQNetwork models for molecule generation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from rdkit import Chem
from rdkit import DataStructs
from rdkit.Chem import AllChem
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib import training as contrib_training
class DeepQNetwork(object):
"""Deep Q Network.
This class implements the network as used in the Nature
(2015) paper.
Human-level control through deep reinforcement learning
https://www.nature.com/articles/nature14236
https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf
"""
def __init__(self,
input_shape,
q_fn,
learning_rate=0.001,
learning_rate_decay_steps=10000,
learning_rate_decay_rate=0.8,
optimizer='Adam',
grad_clipping=None,
gamma=1.0,
epsilon=0.2,
double_q=True,
num_bootstrap_heads=10,
scope='dqn',
reuse=None):
"""Creates the model function.
Args:
input_shape: Tuple. The shape of input.
q_fn: A function, whose input is the observation features, and the
output is the Q value of the observation.
learning_rate: Float. The learning rate of the optimizer.
learning_rate_decay_steps: Integer. The number of steps between each
learning rate decay.
learning_rate_decay_rate: Float. The rate of learning rate decay.
optimizer: String. Which optimizer to use.
grad_clipping: Boolean. Whether to clip gradient.
gamma: Float. Discount factor.
epsilon: Float. The probability of choosing a random action.
double_q: Boolean. Whether to use double q learning.
num_bootstrap_heads: Integer. The number of bootstrap heads to use.
scope: String or VariableScope. Variable Scope.
reuse: Boolean or None. Whether or not the variable should be reused.
"""
self.input_shape = input_shape
self.q_fn = q_fn
self.learning_rate = learning_rate
self.learning_rate_decay_steps = learning_rate_decay_steps
self.learning_rate_decay_rate = learning_rate_decay_rate
self.optimizer = optimizer
self.grad_clipping = grad_clipping
self.gamma = gamma
self.num_bootstrap_heads = num_bootstrap_heads
self.double_q = double_q
self.scope = scope
self.reuse = reuse
self.epsilon = epsilon
def build(self):
"""Builds the computational graph and training operations."""
self._build_graph()
self._build_training_ops()
self._build_summary_ops()
def _build_single_q_network(self, observations, head, state_t, state_tp1,
done_mask, reward_t, error_weight):
"""Builds the computational graph for a single Q network.
Briefly, this part is calculating the following two quantities:
1. q_value = q_fn(observations)
2. td_error = q_fn(state_t) - reward_t - gamma * q_fn(state_tp1)
The optimization target is to minimize the td_error.
Args:
observations: shape = [batch_size, hparams.fingerprint_length].
The input of the Q function.
head: shape = [1].
The index of the head chosen for decision in bootstrap DQN.
state_t: shape = [batch_size, hparams.fingerprint_length].
The state at time step t.
state_tp1: a list of tensors, with total number of batch_size,
each has shape = [num_actions, hparams.fingerprint_length].
Note that the num_actions can be different for each tensor.
The state at time step t+1, tp1 is short for t plus 1.
done_mask: shape = [batch_size, 1]
Whether state_tp1 is the terminal state.
reward_t: shape = [batch_size, 1]
the reward at time step t.
error_weight: shape = [batch_size, 1]
weight for the loss.
Returns:
q_values: Tensor of [batch_size, 1]. The q values for the observations.
td_error: Tensor of [batch_size, 1]. The TD error.
weighted_error: Tensor of [batch_size, 1]. The TD error weighted by
error_weight.
q_fn_vars: List of tf.Variables. The variables of q_fn when computing
the q_values of state_t
q_fn_vars: List of tf.Variables. The variables of q_fn when computing
the q_values of state_tp1
"""
with tf.variable_scope('q_fn'):
# q_value have shape [batch_size, 1].
q_values = tf.gather(self.q_fn(observations), head, axis=-1)
# calculating q_fn(state_t)
# The Q network shares parameters with the action graph.
with tf.variable_scope('q_fn', reuse=True):
q_t = self.q_fn(state_t, reuse=True)
q_fn_vars = tf.trainable_variables(scope=tf.get_variable_scope().name +
'/q_fn')
# calculating q_fn(state_tp1)
with tf.variable_scope('q_tp1', reuse=tf.AUTO_REUSE):
q_tp1 = [self.q_fn(s_tp1, reuse=tf.AUTO_REUSE) for s_tp1 in state_tp1]
q_tp1_vars = tf.trainable_variables(scope=tf.get_variable_scope().name +
'/q_tp1')
if self.double_q:
with tf.variable_scope('q_fn', reuse=True):
q_tp1_online = [self.q_fn(s_tp1, reuse=True) for s_tp1 in state_tp1]
if self.num_bootstrap_heads:
num_heads = self.num_bootstrap_heads
else:
num_heads = 1
# determine the action to choose based on online Q estimator.
q_tp1_online_idx = [
tf.stack(
[tf.argmax(q, axis=0),
tf.range(num_heads, dtype=tf.int64)],
axis=1) for q in q_tp1_online
]
# use the index from max online q_values to compute the value
# function
v_tp1 = tf.stack(
[tf.gather_nd(q, idx) for q, idx in zip(q_tp1, q_tp1_online_idx)],
axis=0)
else:
v_tp1 = tf.stack([tf.reduce_max(q) for q in q_tp1], axis=0)
# if s_{t+1} is the terminal state, we do not evaluate the Q value of
# the state.
q_tp1_masked = (1.0 - done_mask) * v_tp1
q_t_target = reward_t + self.gamma * q_tp1_masked
# stop gradient from flowing to the computating graph which computes
# the Q value of s_{t+1}.
# td_error has shape [batch_size, 1]
td_error = q_t - tf.stop_gradient(q_t_target)
# If use bootstrap, each head is trained with a different subset of the
# training sample. Like the idea of dropout.
if self.num_bootstrap_heads:
head_mask = tf.keras.backend.random_binomial(
shape=(1, self.num_bootstrap_heads), p=0.6)
td_error = tf.reduce_mean(td_error * head_mask, axis=1)
# The loss comes from a traditional trick in convex optimization:
# http://web.stanford.edu/~boyd/cvxbook/.
# See Chapter 6 pp. 298
# It will makes the optimization robust.
# Specifically, the loss will use l1 instead of l2 loss when the td error
# gets larger than 1.0. The l2 loss has the disadvantage that it has
# the tendency to be dominated by outliers. In terms of estimation theory,
# the asymptotic relative efficiency of the l1 loss estimator is better
# for heavy-tailed distributions.
errors = tf.where(
tf.abs(td_error) < 1.0, tf.square(td_error) * 0.5,
1.0 * (tf.abs(td_error) - 0.5))
weighted_error = tf.reduce_mean(error_weight * errors)
return q_values, td_error, weighted_error, q_fn_vars, q_tp1_vars
def _build_input_placeholder(self):
"""Creates the input placeholders.
Input placeholders created:
observations: shape = [batch_size, hparams.fingerprint_length].
The input of the Q function.
head: shape = [1].
The index of the head chosen for decision.
state_t: shape = [batch_size, hparams.fingerprint_length].
The state at time step t.
state_tp1: a list of tensors,
each has shape = [num_actions, hparams.fingerprint_length].
Note that the num_actions can be different for each tensor.
The state at time step t+1.
done_mask: shape = [batch_size, 1]
Whether state_tp1 is the terminal state.
error_weight: shape = [batch_size, 1]
weight for the loss.
"""
batch_size, fingerprint_length = self.input_shape
with tf.variable_scope(self.scope, reuse=self.reuse):
# Build the action graph to choose an action.
# The observations, which are the inputs of the Q function.
self.observations = tf.placeholder(
tf.float32, [None, fingerprint_length], name='observations')
# head is the index of the head we want to choose for decison.
# See https://arxiv.org/abs/1703.07608
self.head = tf.placeholder(tf.int32, [], name='head')
# When sample from memory, the batch_size can be fixed, as it is
# possible to sample any number of samples from memory.
# state_t is the state at time step t
self.state_t = tf.placeholder(
tf.float32, self.input_shape, name='state_t')
# state_tp1 is the state at time step t + 1, tp1 is short for t plus 1.
self.state_tp1 = [
tf.placeholder(
tf.float32, [None, fingerprint_length], name='state_tp1_%i' % i)
for i in range(batch_size)
]
# done_mask is a {0, 1} tensor indicating whether state_tp1 is the
# terminal state.
self.done_mask = tf.placeholder(
tf.float32, (batch_size, 1), name='done_mask')
self.error_weight = tf.placeholder(
tf.float32, (batch_size, 1), name='error_weight')
def _build_graph(self):
"""Builds the computational graph.
Input placeholders created:
reward_t: shape = [batch_size, 1]
the reward at time step t.
Instance attributes created:
q_values: the q values of the observations.
q_fn_vars: the variables in q function.
q_tp1_vars: the variables in q_tp1 function.
td_error: the td_error.
weighted_error: the weighted td error.
action: the action to choose next step.
"""
batch_size, _ = self.input_shape
with tf.variable_scope(self.scope, reuse=self.reuse):
self._build_input_placeholder()
self.reward_t = tf.placeholder(
tf.float32, (batch_size, 1), name='reward_t')
# The Q network shares parameters with the action graph.
# tenors start with q or v have shape [batch_size, 1] when not using
# bootstrap. When using bootstrap, the shapes are
# [batch_size, num_bootstrap_heads]
(self.q_values, self.td_error, self.weighted_error,
self.q_fn_vars, self.q_tp1_vars) = self._build_single_q_network(
self.observations, self.head, self.state_t, self.state_tp1,
self.done_mask, self.reward_t, self.error_weight)
self.action = tf.argmax(self.q_values)
def _build_training_ops(self):
"""Creates the training operations.
Instance attributes created:
optimization_op: the operation of optimize the loss.
update_op: the operation to update the q network.
"""
with tf.variable_scope(self.scope, reuse=self.reuse):
self.optimization_op = contrib_layers.optimize_loss(
loss=self.weighted_error,
global_step=tf.train.get_or_create_global_step(),
learning_rate=self.learning_rate,
optimizer=self.optimizer,
clip_gradients=self.grad_clipping,
learning_rate_decay_fn=functools.partial(
tf.train.exponential_decay,
decay_steps=self.learning_rate_decay_steps,
decay_rate=self.learning_rate_decay_rate),
variables=self.q_fn_vars)
self.update_op = []
for var, target in zip(
sorted(self.q_fn_vars, key=lambda v: v.name),
sorted(self.q_tp1_vars, key=lambda v: v.name)):
self.update_op.append(target.assign(var))
self.update_op = tf.group(*self.update_op)
def _build_summary_ops(self):
"""Creates the summary operations.
Input placeholders created:
smiles: the smiles string.
reward: the reward.
Instance attributes created:
error_summary: the operation to log the summary of error.
episode_summary: the operation to log the smiles string and reward.
"""
with tf.variable_scope(self.scope, reuse=self.reuse):
with tf.name_scope('summaries'):
# The td_error here is the difference between q_t and q_t_target.
# Without abs(), the summary of td_error is actually underestimated.
self.error_summary = tf.summary.scalar(
'td_error', tf.reduce_mean(tf.abs(self.td_error)))
self.smiles = tf.placeholder(tf.string, [], 'summary_smiles')
self.reward = tf.placeholder(tf.float32, [], 'summary_reward')
smiles_summary = tf.summary.text('SMILES', self.smiles)
reward_summary = tf.summary.scalar('reward', self.reward)
self.episode_summary = tf.summary.merge(
[smiles_summary, reward_summary])
def log_result(self, smiles, reward):
"""Summarizes the SMILES string and reward at the end of an episode.
Args:
smiles: String. The SMILES string.
reward: Float. The reward.
Returns:
the summary protobuf
"""
return tf.get_default_session().run(
self.episode_summary,
feed_dict={
self.smiles: smiles,
self.reward: reward
})
def _run_action_op(self, observations, head):
"""Function that runs the op calculating an action given the observations.
Args:
observations: np.array. shape = [num_actions, fingerprint_length].
Observations that can be feed into the Q network.
head: Integer. The output index to use.
Returns:
Integer. which action to be performed.
"""
return np.asscalar(tf.get_default_session().run(
self.action,
feed_dict={
self.observations: observations,
self.head: head
}))
def get_action(self,
observations,
stochastic=True,
head=0,
update_epsilon=None):
"""Function that chooses an action given the observations.
Args:
observations: np.array. shape = [num_actions, fingerprint_length].
Observations that can be feed into the Q network.
stochastic: Boolean. If set to False all the actions are always
deterministic (default True).
head: Integer. The output index to use.
update_epsilon: Float or None. update epsilon a new value, if None
no update happens (default: no update).
Returns:
Integer. which action to be performed.
"""
if update_epsilon is not None:
self.epsilon = update_epsilon
if stochastic and np.random.uniform() < self.epsilon:
return np.random.randint(0, observations.shape[0])
else:
return self._run_action_op(observations, head)
def train(self, states, rewards, next_states, done, weight, summary=True):
"""Function that takes a transition (s,a,r,s') and optimizes Bellman error.
Args:
states: object, a batch of observations.
rewards: np.array, immediate reward attained after executing those actions
dtype must be float32 and shape must be (batch_size,).
next_states: object, observations that followed states.
done: np.array, 1 if obs_t was the last observation in the episode and 0
otherwise obs_tp1 gets ignored, but must be of the valid shape. dtype
must be float32 and shape must be (batch_size,).
weight: np.array, importance sampling weights for every element of the
batch. dtype must be float32 and shape must be (batch_size,).
summary: Boolean, whether to get summary.
Returns:
td_error: np.array. a list of differences between Q(s,a) and the
target in Bellman's equation.
dtype is float32 and shape is (batch_size,).
"""
if summary:
ops = [self.td_error, self.error_summary, self.optimization_op]
else:
ops = [self.td_error, self.optimization_op]
feed_dict = {
self.state_t: states,
self.reward_t: rewards,
self.done_mask: done,
self.error_weight: weight
}
for i, next_state in enumerate(next_states):
feed_dict[self.state_tp1[i]] = next_state
return tf.get_default_session().run(ops, feed_dict=feed_dict)
class MultiObjectiveDeepQNetwork(DeepQNetwork):
"""Multi Objective Deep Q Network.
The idea is described in
Multiobjective Reinforcement Learning: A Comprehensive Overview
https://ieeexplore.ieee.org/document/6918520/
Briefly, the difference between this Multi Objective Deep Q Network and
a naive Deep Q Network is that this one uses one Q network for approximating
each of the objectives. And a weighted sum of those Q values are used for
decision making.
The loss is the summation of the losses of each Q network.
"""
def __init__(self, objective_weight, **kwargs):
"""Creates the model function.
Args:
objective_weight: np.array with shape [num_objectives, 1]. The weight
vector for the objectives.
**kwargs: arguments for the DeepQNetworks class.
"""
# Normalize the sum to 1.
self.objective_weight = objective_weight / np.sum(objective_weight)
self.num_objectives = objective_weight.shape[0]
super(MultiObjectiveDeepQNetwork, self).__init__(**kwargs)
def _build_graph(self):
"""Builds the computational graph.
Input placeholders created:
observations: shape = [batch_size, hparams.fingerprint_length].
The input of the Q function.
head: shape = [1].
The index of the head chosen for decision.
objective_weight: shape = [num_objectives, 1].
objective_weight is the weight to scalarize the objective vector:
reward = sum (objective_weight_i * objective_i)
state_t: shape = [batch_size, hparams.fingerprint_length].
The state at time step t.
state_tp1: a list of tensors,
each has shape = [num_actions, hparams.fingerprint_length].
Note that the num_actions can be different for each tensor.
The state at time step t+1.
done_mask: shape = [batch_size, 1]
Whether state_tp1 is the terminal state.
reward_t: shape = [batch_size, num_objectives]
the reward at time step t.
error weight: shape = [batch_size, 1]
weight for the loss.
Instance attributes created:
q_values: List of Tensors of [batch_size, 1]. The q values for the
observations.
td_error: List of Tensor of [batch_size, 1]. The TD error.
weighted_error: List of Tensor of [batch_size, 1]. The TD error weighted
by importance sampling weight.
q_fn_vars: List of tf.Variables. The variables of q_fn when computing
the q_values of state_t
q_fn_vars: List of tf.Variables. The variables of q_fn when computing
the q_values of state_tp1
"""
batch_size, _ = self.input_shape
with tf.variable_scope(self.scope, reuse=self.reuse):
self._build_input_placeholder()
self.reward_t = tf.placeholder(
tf.float32, (batch_size, self.num_objectives), name='reward_t')
# objective_weight is the weight to scalarize the objective vector:
# reward = sum (objective_weight_i * objective_i)
self.objective_weight_input = tf.placeholder(
tf.float32, [self.num_objectives, 1], name='objective_weight')
# split reward for each q network
rewards_list = tf.split(self.reward_t, self.num_objectives, axis=1)
q_values_list = []
self.td_error = []
self.weighted_error = 0
self.q_fn_vars = []
self.q_tp1_vars = []
# build a Q network for each objective
for obj_idx in range(self.num_objectives):
with tf.variable_scope('objective_%i' % obj_idx):
(q_values, td_error, weighted_error,
q_fn_vars, q_tp1_vars) = self._build_single_q_network(
self.observations, self.head, self.state_t, self.state_tp1,
self.done_mask, rewards_list[obj_idx], self.error_weight)
q_values_list.append(tf.expand_dims(q_values, 1))
# td error is for summary only.
# weighted error is the optimization goal.
self.td_error.append(td_error)
self.weighted_error += weighted_error / self.num_objectives
self.q_fn_vars += q_fn_vars
self.q_tp1_vars += q_tp1_vars
q_values = tf.concat(q_values_list, axis=1)
# action is the one that leads to the maximum weighted reward.
self.action = tf.argmax(
tf.matmul(q_values, self.objective_weight_input), axis=0)
def _build_summary_ops(self):
"""Creates the summary operations.
Input placeholders created:
smiles: the smiles string.
rewards: the rewards.
weighted_reward: the weighted sum of the rewards.
Instance attributes created:
error_summary: the operation to log the summary of error.
episode_summary: the operation to log the smiles string and reward.
"""
with tf.variable_scope(self.scope, reuse=self.reuse):
with tf.name_scope('summaries'):
# The td_error here is the difference between q_t and q_t_target.
# Without abs(), the summary of td_error is actually underestimated.
error_summaries = [
tf.summary.scalar('td_error_%i' % i,
tf.reduce_mean(tf.abs(self.td_error[i])))
for i in range(self.num_objectives)
]
self.error_summary = tf.summary.merge(error_summaries)
self.smiles = tf.placeholder(tf.string, [], 'summary_smiles')
self.rewards = [
tf.placeholder(tf.float32, [], 'summary_reward_obj_%i' % i)
for i in range(self.num_objectives)
]
# Weighted sum of the rewards.
self.weighted_reward = tf.placeholder(tf.float32, [],
'summary_reward_sum')
smiles_summary = tf.summary.text('SMILES', self.smiles)
reward_summaries = [
tf.summary.scalar('reward_obj_%i' % i, self.rewards[i])
for i in range(self.num_objectives)
]
reward_summaries.append(
tf.summary.scalar('sum_reward', self.rewards[-1]))
self.episode_summary = tf.summary.merge([smiles_summary] +
reward_summaries)
def log_result(self, smiles, reward):
"""Summarizes the SMILES string and reward at the end of an episode.
Args:
smiles: String. The SMILES string.
reward: List of Float. The rewards for each objective.
Returns:
the summary protobuf.
"""
feed_dict = {
self.smiles: smiles,
}
for i, reward_value in enumerate(reward):
feed_dict[self.rewards[i]] = reward_value
# calculated the weighted sum of the rewards.
feed_dict[self.weighted_reward] = np.asscalar(
np.array([reward]).dot(self.objective_weight))
return tf.get_default_session().run(
self.episode_summary, feed_dict=feed_dict)
def _run_action_op(self, observations, head):
"""Function that runs the op calculating an action given the observations.
Args:
observations: np.array. shape = [num_actions, fingerprint_length].
Observations that can be feed into the Q network.
head: Integer. The output index to use.
Returns:
Integer. which action to be performed.
"""
return np.asscalar(tf.get_default_session().run(
self.action,
feed_dict={
self.observations: observations,
self.objective_weight_input: self.objective_weight,
self.head: head
}))
def multi_layer_model(inputs, hparams, reuse=None):
"""Multi-layer model for q learning.
Args:
inputs: Tensor. The input.
hparams: tf.HParameters. The hyper-parameters.
reuse: Boolean. Whether the parameters should be reused.
Returns:
Tensor. shape = [batch_size, hparams.num_bootstrap_heads]. The output.
"""
output = inputs
for i, units in enumerate(hparams.dense_layers):
output = tf.layers.dense(output, units, name='dense_%i' % i, reuse=reuse)
output = getattr(tf.nn, hparams.activation)(output)
if hparams.batch_norm:
output = tf.layers.batch_normalization(
output, fused=True, name='bn_%i' % i, reuse=reuse)
if hparams.num_bootstrap_heads:
output_dim = hparams.num_bootstrap_heads
else:
output_dim = 1
output = tf.layers.dense(output, output_dim, name='final', reuse=reuse)
return output
def get_hparams(**kwargs):
"""Get the hyperparameters for the model from a json object.
Args:
**kwargs: Dict of parameter overrides.
Possible keyword arguments:
atom_types: Dict. The possible atom types in the molecule.
max_steps_per_episode: Integer. The maximum number of steps for one episode.
allow_removal: Boolean. Whether to allow removal of a bond.
allow_no_modification: Boolean. If true, the valid action set will include
doing nothing to the current molecule, i.e., the current molecule itself
will be added to the action set.
replay_buffer_size: Integer. The size of the replay buffer.
learning_rate: Float. Learning rate.
learning_rate_decay_steps: Integer. The number of steps between each
learning rate decay.
learning_rate_decay_rate: Float. The rate of learning rate decay.
num_episodes: Integer. Number of episodes to run.
batch_size: Integer. The batch size.
learning_frequency: Integer. The number of steps between each training
operation.
update_frequency: Integer. The number of steps between each update of the
target Q network
grad_clipping: Integer. maximum value of the gradient norm.
gamma: Float. The discount factor for the reward.
double_q: Boolean. Whether to used double Q learning.
See https://arxiv.org/abs/1509.06461 for detail.
bootstrap: Integer. The number of bootstrap heads. See
https://arxiv.org/abs/1703.07608 for detail.
prioritized: Boolean. Whether to use prioritized replay. See
https://arxiv.org/abs/1511.05952 for detail.
prioritized_alpha: Float. The parameter alpha in the prioritized replay.
prioritized_beta: Float. The parameter beta in the prioritized replay.
prioritized_epsilon: Float. The parameter epsilon in the prioritized replay.
fingerprint_radius: Integer. The radius of the Morgan fingerprint.
fingerprint_length: Integer. The length of the Morgan fingerprint.
dense_layers: List of integers. The hidden units in the dense layers.
activation: String. The activation function to use.
optimizer: String. The optimizer to use.
batch_norm: Boolean. Whether to use batch normalization.
save_frequency: Integer. The number of episodes between each saving.
Returns:
A HParams object containing all the hyperparameters.
"""
hparams = contrib_training.HParams(
atom_types=['C', 'O', 'N'],
max_steps_per_episode=40,
allow_removal=True,
allow_no_modification=True,
allow_bonds_between_rings=False,
allowed_ring_sizes=[3, 4, 5, 6],
replay_buffer_size=1000000,
learning_rate=1e-4,
learning_rate_decay_steps=10000,
learning_rate_decay_rate=0.8,
num_episodes=5000,
batch_size=64,
learning_frequency=4,
update_frequency=20,
grad_clipping=10.0,
gamma=0.9,
double_q=True,
num_bootstrap_heads=12,
prioritized=False,
prioritized_alpha=0.6,
prioritized_beta=0.4,
prioritized_epsilon=1e-6,
fingerprint_radius=3,
fingerprint_length=2048,
dense_layers=[1024, 512, 128, 32],
activation='relu',
optimizer='Adam',
batch_norm=False,
save_frequency=1000,
max_num_checkpoints=100,
discount_factor=0.7)
return hparams.override_from_dict(kwargs)
def get_fingerprint(smiles, hparams):
"""Get Morgan Fingerprint of a specific SMILES string.
Args:
smiles: String. The SMILES string of the molecule.
hparams: tf.contrib.training.HParams. Hyper parameters.
Returns:
np.array. shape = [hparams.fingerprint_length]. The Morgan fingerprint.
"""
if smiles is None:
return np.zeros((hparams.fingerprint_length,))
molecule = Chem.MolFromSmiles(smiles)
if molecule is None:
return np.zeros((hparams.fingerprint_length,))
fingerprint = AllChem.GetMorganFingerprintAsBitVect(
molecule, hparams.fingerprint_radius, hparams.fingerprint_length)
arr = np.zeros((1,))
# ConvertToNumpyArray takes ~ 0.19 ms, while
# np.asarray takes ~ 4.69 ms
DataStructs.ConvertToNumpyArray(fingerprint, arr)
return arr
def get_fingerprint_with_steps_left(smiles, steps_left, hparams):
"""Get Morgan Fingerprint of a SMILES string with number of steps left.
If fixing the max num of steps can be taken in a MDP, the MDP is then
a time-heterogeneous one. Therefore a time dependent policy is needed
for optimal performance.
Args:
smiles: String. The SMILES string of the molecule.
steps_left: Integer. The number of steps left in the environment.
hparams: tf.contrib.training.HParams. Hyper parameters.
Returns:
np.array. shape = [hparams.fingerprint_length + 1]. The fingerprint.
"""
fingerprint = get_fingerprint(smiles, hparams)
return np.append(fingerprint, steps_left)
| apache-2.0 | -6,469,962,716,477,765,000 | 38.388021 | 89 | 0.654909 | false | 3.731343 | false | false | false |
stuarteberg/lazyflow | lazyflow/operators/opCache.py | 1 | 7767 | ###############################################################################
# lazyflow: data flow based lazy parallel computation framework
#
# Copyright (C) 2011-2014, the ilastik developers
# <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the Lesser GNU General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# See the files LICENSE.lgpl2 and LICENSE.lgpl3 for full text of the
# GNU Lesser General Public License version 2.1 and 3 respectively.
# This information is also available on the ilastik web site at:
# http://ilastik.org/license/
###############################################################################
from abc import abstractmethod, ABCMeta
#lazyflow
from lazyflow.operators.cacheMemoryManager import CacheMemoryManager
class Cache(object):
"""
Interface for objects that act as caches. This is a mixin, use as
>>> class MyCachingOperator(Cache, Operator):
... pass
This interface is designed for operators that hold values but can neither
be queried for their memory usage nor be cleaned up. All operators that
have non-negligible amounts of memory allocated internally *must* implement
this interface. However, most operators that need to implement this
interface *should* probably implement an extended interface (see below).
This interface can still be useful for several purposes:
* tell the user about memory consuming objects in general (e.g. in an
environment like ilastik)
* automated statistics and tests
Almost all caches will want to call self.registerWithMemoryManager()
to be handled by the cache memory manager thread.
WARNING: If you plan to do time consuming operations in your
__init__, be sure to make all cache API methods threadsafe. A cache
cleanup could occur while the cache is still under construction!
"""
__metaclass__ = ABCMeta
def registerWithMemoryManager(self):
manager = CacheMemoryManager()
if self.parent is None or not isinstance(self.parent, Cache):
manager.addFirstClassCache(self)
else:
manager.addCache(self)
def generateReport(self, memInfoNode):
rs = []
for child in self.children:
if not isinstance(child, Cache):
continue
r = MemInfoNode()
child.generateReport(r)
rs.append(r)
memInfoNode.children = rs
memInfoNode.type = type(self)
memInfoNode.id = id(self)
memInfoNode.name = self.name
class ObservableCache(Cache):
"""
Interface for caches that can report their usage
This interface is intended for caches that can be measured, but for
which no (easy) cleanup method is known, or which do not want to
be cleaned up by the cache memory manager.
"""
@abstractmethod
def usedMemory(self):
"""
get used memory in bytes of this cache and all observable children
"""
total = 0
for child in self.children:
if isinstance(child, ObservableCache):
total += child.usedMemory()
return 0
@abstractmethod
def fractionOfUsedMemoryDirty(self):
"""
get fraction of used memory that is in a dirty state
Dirty memory is memory that has been allocated, but cannot be used
anymore. It is ok to always return 0 if there is no dirtiness
management inside the cache. The returned value must lie in the
range [0, 1].
"""
return 0.0
def generateReport(self, memInfoNode):
super(ObservableCache, self).generateReport(memInfoNode)
memInfoNode.usedMemory = self.usedMemory()
memInfoNode.fractionOfUsedMemoryDirty =\
self.fractionOfUsedMemoryDirty()
class ManagedCache(ObservableCache):
"""
Interface for caches that can report their usage and can be cleaned up
"""
_last_access_time = 0.0
@abstractmethod
def lastAccessTime(self):
"""
get the timestamp of the last access (python timestamp)
In general, time.time() should be used here. Don't be afraid to use the
default implementation, i.e. fill the attribute _last_access_time.
"""
return self._last_access_time
@abstractmethod
def freeMemory(self):
"""
free all memory cached by this operator and its children
The result of `freeMemory()` should be compatible with
`usedMemory()`, i.e.
>>> a = cache.usedMemory()
>>> d = cache.freeMemory()
>>> a - d == cache.usedMemory()
True
@return amount of bytes freed (if applicable)
"""
raise NotImplementedError(
"No default implementation for freeMemory()")
@abstractmethod
def freeDirtyMemory(self):
"""
free all memory cached by this operator and its children that
is marked as dirty
This should not delete any non-dirty memory
@return amount of bytes freed (if applicable)
"""
raise NotImplementedError(
"No default implementation for freeDirtyMemory()")
def generateReport(self, memInfoNode):
super(ManagedCache, self).generateReport(memInfoNode)
memInfoNode.lastAccessTime = self.lastAccessTime()
class ManagedBlockedCache(ManagedCache):
"""
Interface for caches that can be managed in more detail
"""
def lastAccessTime(self):
"""
get the timestamp of the last access (python timestamp)
The default method is to use the maximum of the block timestamps.
"""
t = map(lambda x: x[1], self.getBlockAccessTimes())
if not t:
return 0.0
else:
return max(t)
@abstractmethod
def getBlockAccessTimes(self):
"""
get a list of block ids and their time stamps
"""
raise NotImplementedError(
"No default implementation for getBlockAccessTimes()")
@abstractmethod
def freeBlock(self, block_id):
"""
free memory in a specific block
The block_id argument must have been in the result of a call to
getBlockAccessTimes. When all blocks returned by getBlockAccessTimes()
are freed, the cache should be empty.
@return amount of bytes freed (if applicable)
"""
raise NotImplementedError(
"No default implementation for freeBlock()")
class MemInfoNode:
"""
aggregation of cache status indicators
"""
# type
type = None
# object id
id = None
# used memory in bytes
usedMemory = None
# data type of single cache elements (if applicable)
dtype = None
# a region of interest this cache is assigned to
# (mostly useful for wrapped caches as in OpBlockedArrayCache)
roi = None
# fraction of used memory that is dirty
fractionOfUsedMemoryDirty = None
# python timestamp of last access
lastAccessTime = None
# operator name
name = None
# additional info set by cache implementation
info = None
# reports for all of this operators children that are of type
# OpObservableCache
children = None
def __init__(self):
self.children = list()
| lgpl-3.0 | -3,512,828,103,017,726,500 | 30.445344 | 79 | 0.642462 | false | 4.693051 | false | false | false |
tvshowtime/tvshowtime-plex-scrobbler | plex_tvst_scrobbler/tvst.py | 1 | 3813 | import urllib2
import urllib
import urlparse
import xml.etree.ElementTree
import re
from htmlentitydefs import name2codepoint
import hashlib
import sys
import logging
import time
import os
import json
class Tvst(object):
CLIENT_ID = 'va0D2CEfSPNNlLoYMYYT'
CLIENT_SECRET = 'RF51gSEZBJAbLXmEUCZ8thJAwJPAyQSafCQCyqOt'
USER_AGENT = 'plex-tvst-scrobbler'
def __init__(self, cfg):
self.logger = logging.getLogger(__name__)
self.cfg = cfg
def get_session(self):
if os.path.exists(self.cfg.get('plex-tvst-scrobbler', 'session')):
sessfp = open(self.cfg.get('plex-tvst-scrobbler', 'session'), 'r')
session = sessfp.read().strip()
sessfp.close()
return session
def _do_tvst_post(self, url, data):
f = urllib2.Request(url)
f.add_header('User-Agent', self.USER_AGENT)
try:
res = urllib2.urlopen(f, data)
return json.load(res)
except urllib2.URLError, e:
self.logger.error('Unable to submit post data {url} - {error}'.format(
url=url, error=e))
raise
def _get_auth_infos(self):
args = {
'client_id': self.CLIENT_ID
}
url = urlparse.urlunparse(('https',
'api.tvshowtime.com',
'/v1/oauth/device/code', '', '', ''))
res = self._do_tvst_post(url, urllib.urlencode(args))
return res
def _get_access_token(self, code):
args = {
'client_id': self.CLIENT_ID,
'client_secret': self.CLIENT_SECRET,
'code': code,
}
url = urlparse.urlunparse(('https',
'api.tvshowtime.com',
'/v1/oauth/access_token', '', '', ''))
res = self._do_tvst_post(url, urllib.urlencode(args))
return res
def scrobble(self, show_id, season_number, number):
session = self.get_session()
self.logger.info(u'submitting {show_id} - S{season_number}E{number} to tvshowtime.com.'.format(
show_id=show_id, season_number=season_number.zfill(2), number=number.zfill(2)))
args = {
'access_token': session,
'show_id': show_id,
'season_number': season_number.zfill(2),
'number': number.zfill(2)
}
url = urlparse.urlunparse(('https',
'api.tvshowtime.com',
'/v1/checkin', '', '', ''))
try:
res = self._do_tvst_post(url, urllib.urlencode(args))
except:
return False
return True
def tvst_auth(self):
print '== Requesting tvshowtime.com auth =='
auth_infos = self._get_auth_infos()
accepted = 'n'
print '\nPlease do the following to authorize the scrobbler:\n\n1/ Connect on {auth_url}\n2/ Enter the code: {code}'.format(
auth_url=auth_infos['verification_url'], code=auth_infos['user_code'])
while accepted.lower() == 'n':
print
accepted = raw_input('Have you authorized me [y/N] :')
try:
access_token_infos = self._get_access_token(auth_infos['device_code'])
except urllib2.HTTPError, e:
self.logger.error('Unable to send authorization request {error}'.format(error=e))
return False
if access_token_infos['result'] != 'OK':
print access_token_infos['message']
return
token = access_token_infos['access_token']
fp = open(self.cfg.get('plex-tvst-scrobbler', 'session'), 'w')
fp.write(token)
fp.close()
self.logger.info('TVShow Time authorization successful.')
| mit | -2,433,716,835,912,450,600 | 29.504 | 132 | 0.547076 | false | 3.738235 | false | false | false |
feureau/Small-Scripts | Blender/Blender config/2.91/scripts/addons/bricker_v2-2-1/functions/bricksdict/getters.py | 1 | 2804 | # Copyright (C) 2020 Christopher Gearhart
# [email protected]
# http://bblanimation.com/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# System imports
# NONE!
# Blender imports
import bpy
# Module imports
from ..common import *
from ..general import *
def get_material_name(bricksdict, key, size, zstep, material_type, mat_shell_depth=1, custom_mat=None, random_mat_seed=1000, brick_mats=None):
mat = None
if bricksdict[key]["custom_mat_name"] and is_mat_shell_val(bricksdict[key]["val"], mat_shell_depth):
mat = bpy.data.materials.get(bricksdict[key]["mat_name"])
elif material_type == "CUSTOM":
mat = custom_mat
elif material_type == "SOURCE":
mat_name = get_most_frequent_mat_name(bricksdict, key, size, zstep, mat_shell_depth)
# get the material for that mat_name
mat = bpy.data.materials.get(mat_name)
elif material_type == "RANDOM" and brick_mats is not None and len(brick_mats) > 0:
if len(brick_mats) > 1:
rand_state = np.random.RandomState(0)
rand_state.seed(random_mat_seed + int(str(hash(key))[-9:]))
rand_idx = rand_state.randint(0, len(brick_mats))
else:
rand_idx = 0
mat_name = brick_mats[rand_idx]
mat = bpy.data.materials.get(mat_name)
mat_name = "" if mat is None else mat.name
return mat_name
def get_most_frequent_mat_name(bricksdict, key, size, zstep, mat_shell_depth):
# initialize vars
highest_val = 0
mats_L = []
mat_name = ""
# get most frequent material in brick size
keys_in_brick = get_keys_in_brick(bricksdict, size, zstep, key=key)
for key0 in keys_in_brick:
cur_brick_d = bricksdict[key0]
if cur_brick_d["val"] >= highest_val:
highest_val = cur_brick_d["val"]
mat_name = cur_brick_d["mat_name"]
if is_mat_shell_val(cur_brick_d["val"], mat_shell_depth) and mat_name:
mats_L.append(mat_name)
# if multiple shell materials, use the most frequent one
if len(mats_L) > 1:
mat_name = most_common(mats_L)
return mat_name
def is_mat_shell_val(val, mat_shell_depth=1):
return (1 - val) * 100 < mat_shell_depth
| gpl-3.0 | 2,340,659,080,343,046,700 | 37.410959 | 142 | 0.659058 | false | 3.294947 | false | false | false |
nevion/clcommons | tests/kernels.py | 1 | 2204 | import pyopencl as cl
import pyopencl.array as clarray
from time import time
import numpy as np
import os
from median_of_medians import base_path
from common import *
from numpy import uint32, int32
common_lib_path = base_path
#ctx = cl.create_some_context()
platform = cl.get_platforms()[0]
devices = [device for device in platform.get_devices() if device.type == cl.device_type.GPU]
device = [devices[0]]
queue_properties = cl.command_queue_properties.PROFILING_ENABLE | cl.command_queue_properties.OUT_OF_ORDER_EXEC_MODE_ENABLE
ctx = cl.Context(devices)
queues = [cl.CommandQueue(ctx, device, properties=queue_properties) for device in devices]
#multicontext
#ctxs = [cl.Context(device) for device in devices]
#queues = [cl.CommandQueue(ctx, device, properties=queue_properties) for ctx, device in zip(ctxs, devices)]
queue = queues[0]
computeUnits = device.max_compute_units
device_wg_size = min([wavefront_wg_size(device) for device in devices])
default_wg_size = device_wg_size
is_amd_platform = all([is_device_amd(device) for device in devices])
is_nvidia_platform = all([is_device_nvidia(device) for device in devices])
def cl_opt_decorate(kop, CL_FLAGS, max_wg_size_used = None):
if is_amd_platform:
CL_FLAGS2 = '-D AMD_ARCH -D DEVICE_WAVEFRONT_SIZE={wavefront_size} '.format(wavefront_size=device_wg_size)
if max_wg_size_used is not None and np.prod(max_wg_size_used, dtype=np.uint32) <= device_wg_size:
CL_FLAGS2 = CL_FLAGS2 + '-D PROMISE_WG_IS_WAVEFRONT '
CL_FLAGS = CL_FLAGS2 + CL_FLAGS
elif is_nvidia_platform:
CL_FLAGS2 = '-D NVIDIA_ARCH -D DEVICE_WAVEFRONT_SIZE={wavefront_size} '.format(wavefront_size=device_wg_size)
#if max_wg_size_used is not None and np.prod(max_wg_size_used, dtype=np.uint32) <= device_wg_size:
# CL_FLAGS2 = CL_FLAGS2 + '-D PROMISE_WG_IS_WAVEFRONT '
#causes segfault in NvCliCompileBitcode - seems like internal compiler error
CL_FLAGS = CL_FLAGS2 + CL_FLAGS
if kop.debug == 2:
CL_FLAGS = '-D DEBUG -g -cl-opt-disable '+CL_FLAGS
elif kop.debug:
CL_FLAGS = '-D DEBUG '+CL_FLAGS
return CL_FLAGS
def green(image):
return image[:, :, 1].copy()
| mit | -7,111,820,551,035,825,000 | 43.979592 | 123 | 0.703721 | false | 3.035813 | false | false | false |
cajus/qpid-cpp-store-debian | tests/persistence.py | 1 | 25530 | # Copyright (c) 2007, 2008 Red Hat, Inc.
#
# This file is part of the Qpid async store library msgstore.so.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
#
# The GNU Lesser General Public License is available in the file COPYING.
import sys, re, traceback, socket
from getopt import getopt, GetoptError
from qpid.connection import Connection
from qpid.util import connect
from qpid.datatypes import Message, RangedSet
from qpid.queue import Empty
from qpid.session import SessionException
from qpid.testlib import TestBase010
from time import sleep
class PersistenceTest(TestBase010):
XA_RBROLLBACK = 1
XA_RBTIMEOUT = 2
XA_OK = 0
def createMessage(self, **kwargs):
session = self.session
dp = {}
dp['delivery_mode'] = 2
mp = {}
for k, v in kwargs.iteritems():
if k in ['routing_key', 'delivery_mode']: dp[k] = v
if k in ['message_id', 'correlation_id', 'application_headers']: mp[k] = v
args = []
args.append(session.delivery_properties(**dp))
if len(mp):
args.append(session.message_properties(**mp))
if kwargs.has_key('body'): args.append(kwargs['body'])
return Message(*args)
def phase1(self):
session = self.session
session.queue_declare(queue="queue-a", durable=True)
session.queue_declare(queue="queue-b", durable=True)
session.exchange_bind(queue="queue-a", exchange="amq.direct", binding_key="a")
session.exchange_bind(queue="queue-b", exchange="amq.direct", binding_key="b")
session.message_transfer(destination="amq.direct",
message=self.createMessage(routing_key="a", correlation_id="Msg0001", body="A_Message1"))
session.message_transfer(destination="amq.direct",
message=self.createMessage(routing_key="b", correlation_id="Msg0002", body="B_Message1"))
# session.queue_declare(queue="lvq-test", durable=True, arguments={"qpid.last_value_queue":True})
# session.message_transfer(message=self.createMessage(routing_key="lvq-test", application_headers={"qpid.LVQ_key":"B"}, body="B1"))
# session.message_transfer(message=self.createMessage(routing_key="lvq-test", application_headers={"qpid.LVQ_key":"A"}, body="A1"))
# session.message_transfer(message=self.createMessage(routing_key="lvq-test", application_headers={"qpid.LVQ_key":"A"}, body="A2"))
# session.message_transfer(message=self.createMessage(routing_key="lvq-test", application_headers={"qpid.LVQ_key":"B"}, body="B2"))
# session.message_transfer(message=self.createMessage(routing_key="lvq-test", application_headers={"qpid.LVQ_key":"B"}, body="B3"))
# session.message_transfer(message=self.createMessage(routing_key="lvq-test", application_headers={"qpid.LVQ_key":"C"}, body="C1"))
def phase2(self):
session = self.session
#check queues exists
session.queue_declare(queue="queue-a", durable=True, passive=True)
session.queue_declare(queue="queue-b", durable=True, passive=True)
#check they are still bound to amq.direct correctly
responses = []
responses.append(session.exchange_bound(queue="queue-a", exchange="amq.direct", binding_key="a"))
responses.append(session.exchange_bound(queue="queue-b", exchange="amq.direct", binding_key="b"))
for r in responses:
self.assert_(not r.exchange_not_found)
self.assert_(not r.queue_not_found)
self.assert_(not r.key_not_matched)
#check expected messages are there
self.assertMessageOnQueue("queue-a", "Msg0001", "A_Message1")
self.assertMessageOnQueue("queue-b", "Msg0002", "B_Message1")
self.assertEmptyQueue("queue-a")
self.assertEmptyQueue("queue-b")
session.queue_declare(queue="queue-c", durable=True)
#send a message to a topic such that it reaches all queues
session.exchange_bind(queue="queue-a", exchange="amq.topic", binding_key="abc")
session.exchange_bind(queue="queue-b", exchange="amq.topic", binding_key="abc")
session.exchange_bind(queue="queue-c", exchange="amq.topic", binding_key="abc")
session.message_transfer(destination="amq.topic",
message=self.createMessage(routing_key="abc", correlation_id="Msg0003", body="AB_Message2"))
# #check LVQ exists and has exepected messages:
# session.queue_declare(queue="lvq-test", durable=True, passive=True)
# session.message_subscribe(destination="lvq", queue="lvq-test")
# lvq = session.incoming("lvq")
# lvq.start()
# accepted = RangedSet()
# for m in ["A2", "B3", "C1"]:
# msg = lvq.get(timeout=1)
# self.assertEquals(m, msg.body)
# accepted.add(msg.id)
# try:
# extra = lvq.get(timeout=1)
# self.fail("lvq-test not empty, contains: " + extra.body)
# except Empty: None
# #publish some more messages while subscriber is active (no replacement):
# session.message_transfer(message=self.createMessage(routing_key="lvq-test", application_headers={"qpid.LVQ_key":"C"}, body="C2"))
# session.message_transfer(message=self.createMessage(routing_key="lvq-test", application_headers={"qpid.LVQ_key":"C"}, body="C3"))
# session.message_transfer(message=self.createMessage(routing_key="lvq-test", application_headers={"qpid.LVQ_key":"A"}, body="A3"))
# session.message_transfer(message=self.createMessage(routing_key="lvq-test", application_headers={"qpid.LVQ_key":"A"}, body="A4"))
# session.message_transfer(message=self.createMessage(routing_key="lvq-test", application_headers={"qpid.LVQ_key":"C"}, body="C4"))
# #check that accepting replaced messages is safe
# session.message_accept(accepted)
def phase3(self):
session = self.session
# #lvq recovery validation
# session.queue_declare(queue="lvq-test", durable=True, passive=True)
# session.message_subscribe(destination="lvq", queue="lvq-test")
# lvq = session.incoming("lvq")
# lvq.start()
# accepted = RangedSet()
# lvq.start()
# for m in ["C4", "A4"]:
# msg = lvq.get(timeout=1)
# self.assertEquals(m, msg.body)
# accepted.add(msg.id)
# session.message_accept(accepted)
# try:
# extra = lvq.get(timeout=1)
# self.fail("lvq-test not empty, contains: " + extra.body)
# except Empty: None
# session.message_cancel(destination="lvq")
# session.queue_delete(queue="lvq-test")
#check queues exists
session.queue_declare(queue="queue-a", durable=True, passive=True)
session.queue_declare(queue="queue-b", durable=True, passive=True)
session.queue_declare(queue="queue-c", durable=True, passive=True)
session.tx_select()
#check expected messages are there
self.assertMessageOnQueue("queue-a", "Msg0003", "AB_Message2")
self.assertMessageOnQueue("queue-b", "Msg0003", "AB_Message2")
self.assertMessageOnQueue("queue-c", "Msg0003", "AB_Message2")
self.assertEmptyQueue("queue-a")
self.assertEmptyQueue("queue-b")
self.assertEmptyQueue("queue-c")
#note: default bindings must be restored for this to work
session.message_transfer(message=self.createMessage(
routing_key="queue-a", correlation_id="Msg0004", body="A_Message3"))
session.message_transfer(message=self.createMessage(
routing_key="queue-a", correlation_id="Msg0005", body="A_Message4"))
session.message_transfer(message=self.createMessage(
routing_key="queue-a", correlation_id="Msg0006", body="A_Message5"))
session.tx_commit()
#delete a queue
session.queue_delete(queue="queue-c")
session.message_subscribe(destination="ctag", queue="queue-a", accept_mode=0)
session.message_flow(destination="ctag", unit=0, value=0xFFFFFFFF)
session.message_flow(destination="ctag", unit=1, value=0xFFFFFFFF)
included = session.incoming("ctag")
msg1 = included.get(timeout=1)
self.assertExpectedContent(msg1, "Msg0004", "A_Message3")
msg2 = included.get(timeout=1)
self.assertExpectedContent(msg2, "Msg0005", "A_Message4")
msg3 = included.get(timeout=1)
self.assertExpectedContent(msg3, "Msg0006", "A_Message5")
self.ack(msg1, msg2, msg3)
session.message_transfer(destination="amq.direct", message=self.createMessage(
routing_key="queue-b", correlation_id="Msg0007", body="B_Message3"))
session.tx_rollback()
def phase4(self):
session = self.session
#check queues exists
session.queue_declare(queue="queue-a", durable=True, passive=True)
session.queue_declare(queue="queue-b", durable=True, passive=True)
self.assertMessageOnQueue("queue-a", "Msg0004", "A_Message3")
self.assertMessageOnQueue("queue-a", "Msg0005", "A_Message4")
self.assertMessageOnQueue("queue-a", "Msg0006", "A_Message5")
self.assertEmptyQueue("queue-a")
self.assertEmptyQueue("queue-b")
#check this queue doesn't exist
try:
session.queue_declare(queue="queue-c", durable=True, passive=True)
raise Exception("Expected queue-c to have been deleted")
except SessionException, e:
self.assertEquals(404, e.args[0].error_code)
def phase5(self):
session = self.session
queues = ["queue-a1", "queue-a2", "queue-b1", "queue-b2", "queue-c1", "queue-c2", "queue-d1", "queue-d2"]
for q in queues:
session.queue_declare(queue=q, durable=True)
session.queue_purge(queue=q)
session.message_transfer(message=self.createMessage(
routing_key="queue-a1", correlation_id="MsgA", body="MessageA"))
session.message_transfer(message=self.createMessage(
routing_key="queue-b1", correlation_id="MsgB", body="MessageB"))
session.message_transfer(message=self.createMessage(
routing_key="queue-c1", correlation_id="MsgC", body="MessageC"))
session.message_transfer(message=self.createMessage(
routing_key="queue-d1", correlation_id="MsgD", body="MessageD"))
session.dtx_select()
txa = self.xid('a')
txb = self.xid('b')
txc = self.xid('c')
txd = self.xid('d')
self.txswap("queue-a1", "queue-a2", txa)
self.txswap("queue-b1", "queue-b2", txb)
self.txswap("queue-c1", "queue-c2", txc)
self.txswap("queue-d1", "queue-d2", txd)
#no queue should have any messages accessible
for q in queues:
self.assertEqual(0, session.queue_query(queue=q).message_count, "Bad count for %s" % (q))
self.assertEqual(self.XA_OK, session.dtx_commit(xid=txa, one_phase=True).status)
self.assertEqual(self.XA_OK, session.dtx_rollback(xid=txb).status)
self.assertEqual(self.XA_OK, session.dtx_prepare(xid=txc).status)
self.assertEqual(self.XA_OK, session.dtx_prepare(xid=txd).status)
#further checks
not_empty = ["queue-a2", "queue-b1"]
for q in queues:
if q in not_empty:
self.assertEqual(1, session.queue_query(queue=q).message_count, "Bad count for %s" % (q))
else:
self.assertEqual(0, session.queue_query(queue=q).message_count, "Bad count for %s" % (q))
def phase6(self):
session = self.session
#check prepared transaction are reported correctly by recover
txc = self.xid('c')
txd = self.xid('d')
xids = session.dtx_recover().in_doubt
ids = [x.global_id for x in xids] #TODO: come up with nicer way to test these
if txc.global_id not in ids:
self.fail("Recovered xids not as expected. missing: %s" % (txc))
if txd.global_id not in ids:
self.fail("Recovered xids not as expected. missing: %s" % (txd))
self.assertEqual(2, len(xids))
queues = ["queue-a1", "queue-a2", "queue-b1", "queue-b2", "queue-c1", "queue-c2", "queue-d1", "queue-d2"]
not_empty = ["queue-a2", "queue-b1"]
#re-check
not_empty = ["queue-a2", "queue-b1"]
for q in queues:
if q in not_empty:
self.assertEqual(1, session.queue_query(queue=q).message_count, "Bad count for %s" % (q))
else:
self.assertEqual(0, session.queue_query(queue=q).message_count, "Bad count for %s" % (q))
#complete the prepared transactions
self.assertEqual(self.XA_OK, session.dtx_commit(xid=txc).status)
self.assertEqual(self.XA_OK, session.dtx_rollback(xid=txd).status)
not_empty.append("queue-c2")
not_empty.append("queue-d1")
for q in queues:
if q in not_empty:
self.assertEqual(1, session.queue_query(queue=q).message_count)
else:
self.assertEqual(0, session.queue_query(queue=q).message_count)
def phase7(self):
session = self.session
session.synchronous = False
# check xids from phase 6 are gone
txc = self.xid('c')
txd = self.xid('d')
xids = session.dtx_recover().in_doubt
ids = [x.global_id for x in xids] #TODO: come up with nicer way to test these
if txc.global_id in ids:
self.fail("Xid still present : %s" % (txc))
if txd.global_id in ids:
self.fail("Xid still present : %s" % (txc))
self.assertEqual(0, len(xids))
#test deletion of queue after publish
#create queue
session.queue_declare(queue = "q", auto_delete=True, durable=True)
#send message
for i in range(1, 10):
session.message_transfer(message=self.createMessage(routing_key = "q", body = "my-message"))
session.synchronous = True
#explicitly delete queue
session.queue_delete(queue = "q")
#test acking of message from auto-deleted queue
#create queue
session.queue_declare(queue = "q", auto_delete=True, durable=True)
#send message
session.message_transfer(message=self.createMessage(routing_key = "q", body = "my-message"))
#create consumer
session.message_subscribe(queue = "q", destination = "a", accept_mode=0, acquire_mode=0)
session.message_flow(unit = 1, value = 0xFFFFFFFF, destination = "a")
session.message_flow(unit = 0, value = 10, destination = "a")
queue = session.incoming("a")
#consume the message, cancel subscription (triggering auto-delete), then ack it
msg = queue.get(timeout = 5)
session.message_cancel(destination = "a")
self.ack(msg)
#test implicit deletion of bindings when queue is deleted
session.queue_declare(queue = "durable-subscriber-queue", exclusive=True, durable=True)
session.exchange_bind(exchange="amq.topic", queue="durable-subscriber-queue", binding_key="xyz")
session.message_transfer(destination= "amq.topic", message=self.createMessage(routing_key = "xyz", body = "my-message"))
session.queue_delete(queue = "durable-subscriber-queue")
#test unbind:
#create a series of bindings to a queue
session.queue_declare(queue = "binding-test-queue", durable=True)
session.exchange_bind(exchange="amq.direct", queue="binding-test-queue", binding_key="abc")
session.exchange_bind(exchange="amq.direct", queue="binding-test-queue", binding_key="pqr")
session.exchange_bind(exchange="amq.direct", queue="binding-test-queue", binding_key="xyz")
session.exchange_bind(exchange="amq.match", queue="binding-test-queue", binding_key="a", arguments={"x-match":"all", "p":"a"})
session.exchange_bind(exchange="amq.match", queue="binding-test-queue", binding_key="b", arguments={"x-match":"all", "p":"b"})
session.exchange_bind(exchange="amq.match", queue="binding-test-queue", binding_key="c", arguments={"x-match":"all", "p":"c"})
#then restart broker...
def phase8(self):
session = self.session
#continue testing unbind:
#send messages to the queue via each of the bindings
for k in ["abc", "pqr", "xyz"]:
data = "first %s" % (k)
session.message_transfer(destination= "amq.direct", message=self.createMessage(routing_key=k, body=data))
for a in [{"p":"a"}, {"p":"b"}, {"p":"c"}]:
data = "first %s" % (a["p"])
session.message_transfer(destination="amq.match", message=self.createMessage(application_headers=a, body=data))
#unbind some bindings (using final 0-10 semantics)
session.exchange_unbind(exchange="amq.direct", queue="binding-test-queue", binding_key="pqr")
session.exchange_unbind(exchange="amq.match", queue="binding-test-queue", binding_key="b")
#send messages again
for k in ["abc", "pqr", "xyz"]:
data = "second %s" % (k)
session.message_transfer(destination= "amq.direct", message=self.createMessage(routing_key=k, body=data))
for a in [{"p":"a"}, {"p":"b"}, {"p":"c"}]:
data = "second %s" % (a["p"])
session.message_transfer(destination="amq.match", message=self.createMessage(application_headers=a, body=data))
#check that only the correct messages are received
expected = []
for k in ["abc", "pqr", "xyz"]:
expected.append("first %s" % (k))
for a in [{"p":"a"}, {"p":"b"}, {"p":"c"}]:
expected.append("first %s" % (a["p"]))
for k in ["abc", "xyz"]:
expected.append("second %s" % (k))
for a in [{"p":"a"}, {"p":"c"}]:
expected.append("second %s" % (a["p"]))
session.message_subscribe(queue = "binding-test-queue", destination = "binding-test")
session.message_flow(unit = 1, value = 0xFFFFFFFF, destination = "binding-test")
session.message_flow(unit = 0, value = 10, destination = "binding-test")
queue = session.incoming("binding-test")
while len(expected):
msg = queue.get(timeout=1)
if msg.body not in expected:
self.fail("Missing message: %s" % msg.body)
expected.remove(msg.body)
try:
msg = queue.get(timeout=1)
self.fail("Got extra message: %s" % msg.body)
except Empty: pass
session.queue_declare(queue = "durable-subscriber-queue", exclusive=True, durable=True)
session.exchange_bind(exchange="amq.topic", queue="durable-subscriber-queue", binding_key="xyz")
session.message_transfer(destination= "amq.topic", message=self.createMessage(routing_key = "xyz", body = "my-message"))
session.queue_delete(queue = "durable-subscriber-queue")
def xid(self, txid, branchqual = ''):
return self.session.xid(format=0, global_id=txid, branch_id=branchqual)
def txswap(self, src, dest, tx):
self.assertEqual(self.XA_OK, self.session.dtx_start(xid=tx).status)
self.session.message_subscribe(destination="temp-swap", queue=src, accept_mode=0)
self.session.message_flow(destination="temp-swap", unit=0, value=1)
self.session.message_flow(destination="temp-swap", unit=1, value=0xFFFFFFFF)
msg = self.session.incoming("temp-swap").get(timeout=1)
self.session.message_cancel(destination="temp-swap")
self.session.message_transfer(message=self.createMessage(routing_key=dest, correlation_id=self.getProperty(msg, 'correlation_id'),
body=msg.body))
self.ack(msg)
self.assertEqual(self.XA_OK, self.session.dtx_end(xid=tx).status)
def assertEmptyQueue(self, name):
self.assertEqual(0, self.session.queue_query(queue=name).message_count)
def assertConnectionException(self, expectedCode, message):
self.assertEqual("connection", message.method.klass.name)
self.assertEqual("close", message.method.name)
self.assertEqual(expectedCode, message.reply_code)
def assertExpectedMethod(self, reply, klass, method):
self.assertEqual(klass, reply.method.klass.name)
self.assertEqual(method, reply.method.name)
def assertExpectedContent(self, msg, id, body):
self.assertEqual(id, self.getProperty(msg, 'correlation_id'))
self.assertEqual(body, msg.body)
return msg
def getProperty(self, msg, name):
for h in msg.headers:
if hasattr(h, name): return getattr(h, name)
return None
def ack(self, *msgs):
session = self.session
set = RangedSet()
for m in msgs:
set.add(m.id)
#TODO: tidy up completion
session.receiver._completed.add(m.id)
session.message_accept(set)
session.channel.session_completed(session.receiver._completed)
def assertExpectedGetResult(self, id, body):
return self.assertExpectedContent(session.incoming("incoming-gets").get(timeout=1), id, body)
def assertEqual(self, expected, actual, msg=''):
if expected != actual: raise Exception("%s expected: %s actual: %s" % (msg, expected, actual))
def assertMessageOnQueue(self, queue, id, body):
self.session.message_subscribe(destination="incoming-gets", queue=queue, accept_mode=0)
self.session.message_flow(destination="incoming-gets", unit=0, value=1)
self.session.message_flow(destination="incoming-gets", unit=1, value=0xFFFFFFFF)
msg = self.session.incoming("incoming-gets").get(timeout=1)
self.assertExpectedContent(msg, id, body)
self.ack(msg)
self.session.message_cancel(destination="incoming-gets")
def __init__(self):
TestBase010.__init__(self, "run")
self.setBroker("localhost")
self.errata = []
def connect(self):
""" Connects to the broker """
self.conn = Connection(connect(self.host, self.port))
self.conn.start(timeout=10)
self.session = self.conn.session("test-session", timeout=10)
def run(self, args=sys.argv[1:]):
try:
opts, extra = getopt(args, "r:s:e:b:p:h", ["retry=", "spec=", "errata=", "broker=", "phase=", "help"])
except GetoptError, e:
self._die(str(e))
phase = 0
retry = 0;
for opt, value in opts:
if opt in ("-h", "--help"): self._die()
if opt in ("-s", "--spec"): self.spec = value
if opt in ("-e", "--errata"): self.errata.append(value)
if opt in ("-b", "--broker"): self.setBroker(value)
if opt in ("-p", "--phase"): phase = int(value)
if opt in ("-r", "--retry"): retry = int(value)
if not phase: self._die("please specify the phase to run")
phase = "phase%d" % phase
self.connect()
try:
getattr(self, phase)()
print phase, "succeeded"
res = True;
except Exception, e:
print phase, "failed: ", e
traceback.print_exc()
res = False
if not self.session.error(): self.session.close(timeout=10)
self.conn.close(timeout=10)
# Crude fix to wait for thread in client to exit after return from session_close()
# Reduces occurrences of "Unhandled exception in thread" messages after each test
import time
time.sleep(1)
return res
def setBroker(self, broker):
rex = re.compile(r"""
# [ <user> [ / <password> ] @] <host> [ :<port> ]
^ (?: ([^/]*) (?: / ([^@]*) )? @)? ([^:]+) (?: :([0-9]+))?$""", re.X)
match = rex.match(broker)
if not match: self._die("'%s' is not a valid broker" % (broker))
self.user, self.password, self.host, self.port = match.groups()
self.port = int(default(self.port, 5672))
self.user = default(self.user, "guest")
self.password = default(self.password, "guest")
def _die(self, message = None):
if message: print message
print """
Options:
-h/--help : this message
-s/--spec <spec.xml> : file containing amqp XML spec
-p/--phase : test phase to run
-b/--broker [<user>[/<password>]@]<host>[:<port>] : broker to connect to
"""
sys.exit(1)
def default(value, default):
if (value == None): return default
else: return value
if __name__ == "__main__":
test = PersistenceTest()
if not test.run(): sys.exit(1)
| lgpl-2.1 | 8,354,213,556,659,311,000 | 43.322917 | 138 | 0.620564 | false | 3.582655 | true | false | false |
cameronbwhite/GithubRemote | GithubRemote/Gui/AddAccountWizard.py | 1 | 8889 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright (C) 2013, Cameron White
from .tools import waiting_effects
from github import Github
from github.GithubException import BadCredentialsException, \
TwoFactorException, GithubException
from github.Authorization import Authorization
from PyQt4.QtCore import QRegExp
from PyQt4.QtGui import QWizardPage, QWizard, QRadioButton, QLineEdit, \
QRegExpValidator, QVBoxLayout, QLabel, QFormLayout, QValidator
class GithubCredentialsWizardPage(QWizardPage):
def __init__(self, parent=None):
super(GithubCredentialsWizardPage, self).__init__(
parent,
title="Credentials",
subTitle="Enter your username/password or token")
# Radio Buttons
self.userPassRadioButton = QRadioButton()
self.userPassRadioButton.toggled.connect(self.changeMode)
self.userPassRadioButton.toggled.connect(self.completeChanged.emit)
self.tokenRadioButton = QRadioButton()
self.tokenRadioButton.toggled.connect(self.changeMode)
self.tokenRadioButton.toggled.connect(self.completeChanged.emit)
# LineEdits
# usernameEdit
self.usernameEdit = QLineEdit(
textChanged=self.completeChanged.emit)
# Username may only contain alphanumeric characters or dash
# and cannot begin with a dash
self.usernameEdit.setValidator(
QRegExpValidator(QRegExp('[A-Za-z\d]+[A-Za-z\d-]+')))
# passwordEdit
self.passwordEdit = QLineEdit(
textChanged=self.completeChanged.emit)
self.passwordEdit.setValidator(
QRegExpValidator(QRegExp('.+')))
self.passwordEdit.setEchoMode(QLineEdit.Password)
# tokenEdit
self.tokenEdit = QLineEdit(
textChanged=self.completeChanged.emit)
# token may only contain alphanumeric characters
self.tokenEdit.setValidator(
QRegExpValidator(QRegExp('[A-Za-z\d]+')))
self.tokenEdit.setEchoMode(QLineEdit.Password)
# Form
form = QFormLayout()
form.addRow("<b>username/password</b>", self.userPassRadioButton)
form.addRow("username: ", self.usernameEdit)
form.addRow("password: ", self.passwordEdit)
form.addRow("<b>token</b>", self.tokenRadioButton)
form.addRow("token: ", self.tokenEdit)
# Layout
self.mainLayout = QVBoxLayout()
self.mainLayout.addLayout(form)
self.setLayout(self.mainLayout)
# Fields
self.registerField("username", self.usernameEdit)
self.registerField("password", self.passwordEdit)
self.registerField("token", self.tokenEdit)
self.userPassRadioButton.toggle()
self.require_2fa = False
def changeMode(self):
if self.userPassRadioButton.isChecked():
self.usernameEdit.setEnabled(True)
self.passwordEdit.setEnabled(True)
self.tokenEdit.setEnabled(False)
elif self.tokenRadioButton.isChecked():
self.usernameEdit.setEnabled(False)
self.passwordEdit.setEnabled(False)
self.tokenEdit.setEnabled(True)
def nextId(self):
if self.require_2fa:
return 2 # TODO remove magic number
else:
return 3 # TODO remove magic number
def isComplete(self):
if self.userPassRadioButton.isChecked():
usernameValidator = self.usernameEdit.validator()
usernameText = self.usernameEdit.text()
usernameState = usernameValidator.validate(usernameText, 0)[0]
passwordValidator = self.passwordEdit.validator()
passwordText = self.passwordEdit.text()
passwordState = passwordValidator.validate(passwordText, 0)[0]
if usernameState == QValidator.Acceptable and \
passwordState == QValidator.Acceptable:
return True
elif self.tokenRadioButton.isChecked():
tokenValidator = self.tokenEdit.validator()
tokenText = self.tokenEdit.text()
tokenState = tokenValidator.validate(tokenText, 0)[0]
if tokenState == QValidator.Acceptable:
return True
return False
@waiting_effects
def validatePage(self):
# TODO - clean this up
if self.userPassRadioButton.isChecked():
username = str(self.field('username').toString())
password = str(self.field('password').toString())
try:
g = Github(username, password)
user = g.get_user()
authentication = user.create_authorization(scopes=['repo'], note='test')
except TwoFactorException:
self.require_2fa = True
return True
except GithubException:
self.require_2fa = False
return False
self.setField('token', str(authentication.token))
self.require_2fa = False
return True
elif self.tokenRadioButton.isChecked():
token = str(self.field('token').toString())
try:
self.setField('username', Github(token).get_user().login)
except BadCredentialsException:
return False
else:
self.require_2fa = False
return True
else:
self.require_2fa = False
return False
class AccountTypeWizardPage(QWizardPage):
def __init__(self, parent=None):
super(AccountTypeWizardPage, self).__init__(
parent,
title="Select Account Type",
subTitle="Select the type of account to create")
# Radio Buttons
self.githubRadioButton = QRadioButton("Github account")
self.githubRadioButton.toggle()
# Layout
self.mainLayout = QVBoxLayout()
self.mainLayout.addWidget(self.githubRadioButton)
self.setLayout(self.mainLayout)
def nextId(self):
if self.githubRadioButton.isChecked():
return 1 # TODO remove magic number
class Github2FAWizardPage(QWizardPage):
def __init__(self, parent=None):
super(Github2FAWizardPage, self).__init__(
parent,
title="Two-Factor Authentication",
subTitle="Enter required authentication code")
# LineEdits
self.codeEdit = QLineEdit()
# codeEdit may only contain 1 or more digits
self.codeEdit.setValidator(QRegExpValidator(QRegExp(r'[\d]+')))
# Form
self.form = QFormLayout()
self.form.addRow("Code: ", self.codeEdit)
# Layout
self.setLayout(self.form)
# Fields
self.registerField('2fa_code*', self.codeEdit)
def nextId(self):
return 3 # TODO remove magic number
@waiting_effects
def validatePage(self):
username = str(self.field('username').toString())
password = str(self.field('password').toString())
code = str(self.field('2fa_code').toString())
try: # to use 2fa code
g = Github(username, password)
user = g.get_user()
authentication = user.create_authorization(
scopes=['repo'], note='test', onetime_password=code)
except GithubException:
self.wizard().back() # start over TODO make sure this works
return False
self.setField('token', str(authentication.token))
return True
class UserSummaryWizardPage(QWizardPage):
def __init__(self, parent=None):
super(UserSummaryWizardPage, self).__init__(
parent,
title="Summary",
subTitle="Summary of new user account")
# labels
self.usernameLabel = QLabel()
self.tokenLabel = QLabel()
# form
self.form = QFormLayout()
self.form.addRow("username: ", self.usernameLabel)
self.form.addRow("token: ", self.tokenLabel)
# layout
self.setLayout(self.form)
def initializePage(self):
self.usernameLabel.setText(self.field('username').toString())
self.tokenLabel.setText(self.field('token').toString())
class AddAccountWizard(QWizard):
def __init__(self, parent=None):
super(AddAccountWizard, self).__init__(
parent,
windowTitle="Sign In")
# TODO - remove magic numbers
self.setPage(0, AccountTypeWizardPage())
self.setPage(1, GithubCredentialsWizardPage())
self.setPage(2, Github2FAWizardPage())
self.setPage(3, UserSummaryWizardPage())
| bsd-3-clause | -8,045,317,347,130,864,000 | 32.670455 | 88 | 0.602655 | false | 4.473578 | false | false | false |
diamond-org/flask-diamond | setup.py | 1 | 1607 | # -*- coding: utf-8 -*-
# Flask-Diamond (c) Ian Dennis Miller
import re
import os
import codecs
from setuptools import setup, find_packages
def read(*rnames):
return codecs.open(os.path.join(os.path.dirname(__file__), *rnames), 'r', 'utf-8').read()
def grep(attrname):
pattern = r"{0}\W*=\W*'([^']+)'".format(attrname)
strval, = re.findall(pattern, read('flask_diamond/__meta__.py'))
return strval
setup(
version=grep('__version__'),
name='Flask-Diamond',
description=(
"Flask-Diamond is a batteries-included Flask framework, "
"sortof like Django but radically decomposable. "
"Flask-Diamond offers some opinions about "
"data-centric Internet applications and systems."
),
packages=find_packages(),
scripts=[
"bin/flask-diamond",
],
long_description=read('Readme.rst'),
classifiers=[
"Development Status :: 4 - Beta",
"Framework :: Flask",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX",
"Operating System :: MacOS :: MacOS X",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Internet :: WWW/HTTP",
],
include_package_data=True,
keywords='',
author=grep('__author__'),
author_email=grep('__email__'),
url=grep('__url__'),
install_requires=read('requirements.txt'),
license='MIT',
zip_safe=False,
)
| mit | 7,841,562,046,073,927,000 | 28.218182 | 93 | 0.599253 | false | 3.754673 | false | false | false |
simock85/semantica-rt-py | semantica_rt_py/views.py | 1 | 1741 | from cornice import Service
from colander import MappingSchema, SchemaNode, String
import facebook
from semantica_rt_py.persistence_config import mongodb
FB_APP_ID = '156235004569304'
class AddPageSchema(MappingSchema):
user_token = SchemaNode(String(), location='body', type='str')
page_id = SchemaNode(String(), location='body', type='str')
pages = Service(name='pages', path='/pages', description="Pages service", renderer='bson')
@pages.post(schema=AddPageSchema)
def post_pages(request):
page_id = request.validated['page_id']
token = request.validated['user_token']
graph = facebook.GraphAPI(token)
try:
graph.put_object(page_id, 'tabs', app_id=FB_APP_ID)
except Exception as e:
request.errors.add(None, 'facebook', e.message)
return {}
page_ = mongodb.pages.find_and_modify(query={'page_id': page_id},
update={
'$set': {'page_id': page_id,
'user_token': token}},
upsert=True)
return page_
class GetUpdatesSchema(MappingSchema):
page_id = SchemaNode(String(), location="querystring", type='str', required=False)
updates = Service(name='updates', path='/updates', description='Updates service', renderer='bson')
@updates.get(schema=GetUpdatesSchema)
def get_updates(request):
q = {'already_sent': 0}
if request.validated['page_id']:
q.update({'page_id': request.validated['page_id']})
mongodb.updates.update(q, {'$set': {'already_sent': 1}}, multi=True)
updates = mongodb.updates.find(q)
updates['_id'] = str(updates['_id'])
return updates
| mit | 7,849,079,915,277,534,000 | 32.480769 | 98 | 0.610569 | false | 3.809628 | false | false | false |
Mercy-Nekesa/sokoapp | sokoapp/utils/admin.py | 1 | 1954 | from django.contrib import admin
from django.contrib.contenttypes import generic
from models import Attribute, BaseModel
from django.utils.translation import ugettext_lazy as _
class MetaInline(generic.GenericTabularInline):
model = Attribute
extra = 0
class BaseAdmin(admin.ModelAdmin):
"""
def get_readonly_fields(self, request, obj=None):
fs = super(BaseAdmin, self).get_readonly_fields(request, obj)
fs += ('created_by', 'last_updated_by',)
return fs
def get_fieldsets(self, request, obj=None):
fs = super(BaseAdmin, self).get_fieldsets(request, obj)
fs[0][1]['fields'].remove('created_by')
fs[0][1]['fields'].remove('last_updated_by')
fs.extend([(_('Other informations'), {'fields':['created_by','last_updated_by'], 'classes':['collapse']})])
return fs
def changelist_view(self, request, extra_context=None):
if request.user.has_perm('%s.can_view_deleted' % self.model._meta.app_label):
if not "deleted_flag" in self.list_filter:
self.list_filter += ("deleted_flag",)
return super(BaseAdmin, self).changelist_view(request, extra_context)
def queryset(self, request):
return super(BaseAdmin, self).queryset(request).exclude(deleted_flag=True)
"""
def save_model(self, request, obj, form, change):
if not change:
obj.created_by = request.user
obj.last_updated_by = request.user
obj.save()
def save_formset(self, request, form, formset, change):
instances = formset.save(commit=False)
for instance in instances:
if isinstance(instance, BaseModel): #Check if it is the correct type of inline
if not instance.created_by_id:
instance.created_by = request.user
instance.last_updated_by = request.user
instance.save()
| mit | -8,367,726,409,982,237,000 | 37.313725 | 115 | 0.622825 | false | 4.070833 | false | false | false |
fikipollo/paintomics3 | PaintomicsServer/src/servlets/DataManagementServlet.py | 1 | 20501 | #***************************************************************
# This file is part of Paintomics v3
#
# Paintomics is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# Paintomics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paintomics. If not, see <http://www.gnu.org/licenses/>.
#
# More info http://bioinfo.cipf.es/paintomics
# Technical contact [email protected]
#**************************************************************
import os, shutil
import logging
import logging.config
from flask import send_from_directory
from src.conf.serverconf import CLIENT_TMP_DIR
from src.classes.File import File
from src.common.UserSessionManager import UserSessionManager
from src.common.DAO.FileDAO import FileDAO
from src.common.DAO.JobDAO import JobDAO
from src.common.ServerErrorManager import handleException
def dataManagementUploadFile(request, response, DESTINATION_DIR, isReference=False):
#VARIABLE DECLARATION
fileInstance = None
daoInstance = None
try:
#****************************************************************
# Step 0.CHECK IF VALID USER SESSION
#****************************************************************
logging.info("STEP0 - CHECK IF VALID USER....")
userID = request.cookies.get('userID')
userName = request.cookies.get('userName')
sessionToken = request.cookies.get('sessionToken')
UserSessionManager().isValidUser(userID, sessionToken)
#ONLY ADMIN USER (id=0) CAN UPLOAD NEW INBUILT GTF FILES
if(isReference and UserSessionManager().isValidAdminUser(userID, userName, sessionToken)):
userID="-1"
#****************************************************************
#1. SAVE THE UPLOADED FILE TO THE USER DIRECTORY AND TO THE DATABASE
#****************************************************************
logging.info("STEP1 - FILE UPLOADING REQUEST RECEIVED")
formFields = request.form
uploadedFiles = request.files
if not isReference:
DESTINATION_DIR = DESTINATION_DIR + userID + "/inputData/"
else:
userID="-1"
DESTINATION_DIR = DESTINATION_DIR + "GTF/"
logging.info("STEP1 - READING FILES....")
fields = {}
for field in formFields.keys():
if formFields[field] == "undefined":
continue
fields[field] = formFields[field]
if isReference and formFields.get("fileName", None) != None:
registerFile(userID, formFields.get("fileName"), fields, DESTINATION_DIR)
else:
for uploadedFileName in uploadedFiles.keys():
if (uploadedFileName is not None):
#GET THE FILE OBJECT
uploadedFile = request.files.get(uploadedFileName)
uploadedFileName = uploadedFile.filename
saveFile(userID, uploadedFileName, fields, uploadedFile, DESTINATION_DIR)
response.setContent({"success": True})
except Exception as ex:
handleException(response, ex, __file__ , "dataManagementUploadFile")
finally:
if(daoInstance != None):
daoInstance.closeConnection()
return response
def dataManagementGetMyFiles(request, response, DESTINATION_DIR, MAX_CLIENT_SPACE, isReference=False):
#VARIABLE DECLARATION
fileInstance = None
fileInstances = []
daoInstance = None
try:
#****************************************************************
# Step 0.CHECK IF VALID USER SESSION
#****************************************************************
logging.info("STEP0 - CHECK IF VALID USER....")
userID = request.cookies.get('userID')
sessionToken = request.cookies.get('sessionToken')
UserSessionManager().isValidUser(userID, sessionToken)
if not isReference:
DESTINATION_DIR += userID
else:
userID="-1"
DESTINATION_DIR += "GTF/"
#****************************************************************
# Step 1.GET THE LIST OF FILES
#****************************************************************
logging.info("STEP1 - GET MY FILE LIST REQUEST RECEIVED")
daoInstance = FileDAO()
matchedFiles = daoInstance.findAll(otherParams={"userID":userID})
logging.info("STEP1 - GET MY FILE LIST REQUEST RECEIVED...DONE")
#****************************************************************
# Step 2.CALCULATE USED SPACE
#****************************************************************
logging.info("STEP2 - GET THE CURRENT USED SPACE...")
dataSummary = {"usedSpace" : dir_total_size(DESTINATION_DIR), "availableSpace": MAX_CLIENT_SPACE}
logging.info("STEP2 - GET THE CURRENT USED SPACE...DONE")
response.setContent({"success": True, "fileList" : matchedFiles, "dataSummary" : dataSummary })
except Exception as ex:
handleException(response, ex, __file__ , "dataManagementGetMyFiles")
finally:
if(daoInstance != None):
daoInstance.closeConnection()
return response
def dataManagementDeleteFile(request, response, DESTINATION_DIR, MAX_CLIENT_SPACE, isReference=False, fileName=None):
#VARIABLE DECLARATION
daoInstance = None
try:
#****************************************************************
# Step 0.CHECK IF VALID USER SESSION
#****************************************************************
logging.info("STEP0 - CHECK IF VALID USER....")
userID = request.cookies.get('userID')
userName = request.cookies.get('userName')
sessionToken = request.cookies.get('sessionToken')
if (userID is None):
response.setContent({"success": False,
"errorMessage": "Log in required</br>Sorry but the feature you are requesting is only available to registered accounts."})
else:
UserSessionManager().isValidUser(userID, sessionToken)
#ONLY ADMIN USER (id=0) CAN UPLOAD NEW INBUILT GTF FILES
if(isReference and UserSessionManager().isValidAdminUser(userID, userName, sessionToken)):
userID="-1"
if not isReference:
DESTINATION_DIR += userID + "/inputData/"
else:
userID="-1"
DESTINATION_DIR += "GTF/"
#****************************************************************
# Step 1. GET THE LIST OF JOB IDs
#****************************************************************
if fileName == None:
fileName = request.form.get("fileName")
files = fileName.split(",")
#****************************************************************
# Step 2. DELETE EACH FILE
#****************************************************************
daoInstance = FileDAO()
for fileName in files:
#****************************************************************
# Step 2.1.DELETE THE GIVEN FILE FROM DATABASE
#****************************************************************
logging.info("STEP1 - REMOVING " + fileName + " FROM DATABASE...")
daoInstance.remove(fileName, otherParams={"userID":userID})
logging.info("STEP1 - REMOVING " + fileName + " FROM DATABASE...DONE")
#****************************************************************
# Step 2.2.DELETE THE GIVEN FILE FROM DIRECTORY
#****************************************************************
logging.info("STEP2 - REMOVING " + fileName + " FROM USER DIRECTORY...")
if os.path.isfile(DESTINATION_DIR + fileName):
os.remove(DESTINATION_DIR + fileName)
logging.info("STEP2 - REMOVING " + fileName + " FROM USER DIRECTORY...DONE")
else:
logging.info("STEP2 - REMOVING " + fileName + " FROM USER DIRECTORY...FILE NOT FOUND")
response.setContent({"success": True })
except Exception as ex:
handleException(response, ex, __file__ , "dataManagementDeleteFile")
finally:
if(daoInstance != None):
daoInstance.closeConnection()
return response
def dataManagementGetMyJobs(request, response):
#VARIABLE DECLARATION
jobInstance = None
jobInstances = []
daoInstance = None
try:
#****************************************************************
# Step 0.CHECK IF VALID USER SESSION
#****************************************************************
logging.info("STEP0 - CHECK IF VALID USER....")
userID = request.cookies.get('userID')
sessionToken = request.cookies.get('sessionToken')
UserSessionManager().isValidUser(userID, sessionToken)
if (userID is None):
response.setContent({"success": False,
"errorMessage": "Log in required</br>Sorry but the feature you are requesting is only available to registered accounts."})
else:
#****************************************************************
# Step 2.GET THE LIST OF JOBS FOR GIVEN USER
#****************************************************************
logging.info("STEP1 - GET MY JOB LIST REQUEST RECEIVED")
daoInstance = JobDAO()
matchedFiles = daoInstance.findAll(otherParams={"userID":userID})
logging.info("STEP1 - GET MY JOB LIST REQUEST RECEIVED...DONE")
response.setContent({"success": True, "jobList" : matchedFiles})
except Exception as ex:
handleException(response, ex, __file__ , "dataManagementGetMyJobs")
finally:
if(daoInstance != None):
daoInstance.closeConnection()
return response
def dataManagementDeleteJob(request, response):
#VARIABLE DECLARATION
daoInstance = None
try:
#****************************************************************
# Step 0.CHECK IF VALID USER SESSION
#****************************************************************
logging.info("STEP0 - CHECK IF VALID USER....")
userID = request.cookies.get('userID')
sessionToken = request.cookies.get('sessionToken')
UserSessionManager().isValidUser(userID, sessionToken)
#****************************************************************
# Step 1. GET THE LIST OF JOB IDs
#****************************************************************.
jobID = request.form.get("jobID")
jobs = jobID.split(",")
#****************************************************************
# Step 2. DELETE EACH JOB
#****************************************************************.
daoInstance = JobDAO()
userDirID = userID if userID is not None else "nologin"
userDir = CLIENT_TMP_DIR + userDirID + "/jobsData/"
tmpDir = CLIENT_TMP_DIR + userDirID + "/tmp/"
for jobID in jobs:
#****************************************************************
# Step 2a. DELETE GIVEN JOB FROM DATABASE
#****************************************************************
logging.info("STEP1 - REMOVING " + jobID + " FROM DATABASE...")
daoInstance.remove(jobID, otherParams={"userID":userID})
logging.info("STEP1 - REMOVING " + jobID + " FROM DATABASE...DONE")
#****************************************************************
# Step 2b. DELETE GIVEN JOB FROM USER DIRECTORY
#****************************************************************
logging.info("STEP2 - REMOVING " + userDir + jobID + " FROM USER DIRECTORY...")
if os.path.isdir(userDir + jobID):
shutil.rmtree(userDir + jobID)
logging.info("STEP2 - REMOVING " + userDir + jobID + " FROM USER DIRECTORY...DONE")
else:
logging.info("STEP2 - REMOVING " + userDir + jobID + " FROM USER DIRECTORY...FILE NOT FOUND")
logging.info("STEP2 - REMOVING TEMPORAL DIR " + tmpDir + jobID + " FROM USER DIRECTORY...")
if os.path.isdir(tmpDir + jobID):
shutil.rmtree(tmpDir + jobID)
logging.info("STEP2 - REMOVING TEMPORAL DIR " + tmpDir + jobID + " FROM USER DIRECTORY...")
else:
logging.info("STEP2 - REMOVING TEMPORAL DIR " + tmpDir + jobID + " FROM USER DIRECTORY...FILE NOT FOUND")
response.setContent({"success": True })
except Exception as ex:
handleException(response, ex,__file__ , "dataManagementDeleteJob")
finally:
if(daoInstance != None):
daoInstance.closeConnection()
return response
def dataManagementDownloadFile(request, response):
try:
#****************************************************************
# Step 0.CHECK IF VALID USER SESSION
#****************************************************************
logging.info("STEP0 - CHECK IF VALID USER....")
userID = request.cookies.get('userID')
sessionToken = request.cookies.get('sessionToken')
UserSessionManager().isValidUser(userID, sessionToken)
#****************************************************************
# Step 1.READ PARAMS
#****************************************************************
fileName = request.args.get("fileName", "")
fileType =request.args.get("fileType", "")
jobID =request.args.get("jobID", "")
serve =(request.args.get("serve", "").lower() == "true")
offset =int(request.args.get("offset", 0))
#send_from_directory(self.FILES_SETTINGS.ROOT_DIRECTORY + 'public_html', filename)
#****************************************************************
# Step 2.GENERATE THE PATH TO FILE
#****************************************************************
logging.info("STEP1 - GET FILE REQUEST RECEIVED")
userDirID = userID if userID is not None else "nologin"
if fileType=="job_result":
userDir = "/jobsData/" + jobID + "/output/"
elif fileType=="input":
userDir = "/inputData/"
else:
userDir = "/tmp/" + jobID
userDir = CLIENT_TMP_DIR + userDirID + userDir
file_path = "{path}/{file}".format(path=userDir, file=fileName)
if os.path.isfile(file_path):
#IF THE REQUEST WANTS THE FILE IN A STREAM
if serve == True:
#TODO: HACER ESTO<- http://flask.pocoo.org/docs/0.10/patterns/streaming/
def generate():
with open(file_path) as f:
lines = f.readlines()
first = min(len(lines), offset)
last = min(len(lines), offset + 51)
lines = lines[first:last]
for row in lines:
yield row.rstrip() + "\n"
f.close()
from flask import Response
return Response(generate(), mimetype='text/plain')
#response.imetype='text/plain')
else:
return send_from_directory(userDir, fileName, as_attachment=True, attachment_filename=fileName)
else:
response.setContent({"success": False, "errorMessage": "File not found.</br>Sorry but it looks like the requested file was removed from system."})
return response
except Exception as ex:
handleException(response, ex, __file__ , "dataManagementDownloadFile")
return response
#****************************************************************
# FILES MANIPULATION
#****************************************************************
def saveFile(userID, uploadedFileName, options, uploadedFile, DESTINATION_DIR):
#1. CREATE THE USER DATA DIRECTORY IF NOT EXISTS
if(not os.path.isdir(DESTINATION_DIR)):
os.makedirs(DESTINATION_DIR)
# Make sure to replace invalid chars to avoid problems
uploadedFileName = ''.join('_' if ch in [':', '!', '[', ']', ';'] else ch for ch in uploadedFileName)
#TODO: CHECK IF ENOUGH SPACE
#SAVE THE FILE TO USER's DIRECTORY
file_path = "{path}/{file}".format(path=DESTINATION_DIR, file=uploadedFileName)
#CHECK IF FILENAME ALREADY EXISTS -> IF SO, ADD SUBFIX
fileExtension=uploadedFileName.rsplit(".")
originalName = fileExtension[0]
if(len(fileExtension)>1):
fileExtension= "." + fileExtension[1]
else:
fileExtension= ""
iteration = 1
while(os.path.isfile(file_path)):
uploadedFileName = originalName + str(iteration) + fileExtension
file_path = "{path}/{file}".format(path=DESTINATION_DIR, file=uploadedFileName)
iteration=iteration+1
logging.info("\tSAVING " + uploadedFile.filename + " AS " + uploadedFileName + "...")
uploadedFile.save(file_path)
logging.info("\tSAVING " + uploadedFile.filename + " AS " + uploadedFileName + "...DONE")
#REGISTER FILE IN DATABASE
registerFile(userID, uploadedFileName, options, DESTINATION_DIR)
return uploadedFileName
def copyFile(userID, fileName, options, origin, destination):
file_path = "{path}/{file}".format(path=destination, file=fileName)
#CHECK IF FILENAME ALREADY EXISTS -> IF SO, ADD SUBFIX
fileExtension=fileName.rsplit(".")
originalName = fileExtension[0]
if(len(fileExtension)>1):
fileExtension= "." + fileExtension[1]
else:
fileExtension=""
iteration = 1
while(os.path.isfile(file_path)):
fileName = originalName + str(iteration) + fileExtension
file_path = "{path}/{file}".format(path=destination, file=fileName)
iteration=iteration+1
logging.info("\tCOPYING " + originalName + fileExtension + " AS " + fileName + "...")
shutil.copy(origin + originalName + fileExtension, destination + fileName)
logging.info("\tCOPYING " + originalName + fileExtension + " AS " + fileName + "...DONE")
#REGISTER FILE IN DATABASE
registerFile(userID, fileName, options, destination)
return fileName
def registerFile(userID, fileName, options, location):
# Do not register the file in the database
if (str(userID) == 'None'):
return None
logging.info("\tREGISTERING " + fileName + " INTO DATABASE...")
fileInstance = File("")
fileInstance.setFileName(fileName)
fileInstance.setDataType(options.get("dataType"))
fileInstance.setOmicType(options.get("omicType"))
fileInstance.setDescription(options.get("description", ""))
options.pop("dataType", None)
options.pop("omicType", None)
options.pop("description", None)
if bool(options): #NOT EMPTY
fileInstance.otherFields = options
file_path = "{path}/{file}".format(path=location, file=fileName)
fileInstance.setSize(os.stat(file_path).st_size)
import time
fileInstance.setSubmissionDate(time.strftime("%d/%m/%Y %H:%M"))
daoInstance = FileDAO()
daoInstance.remove(fileName, otherParams={"userID": userID})
daoInstance.insert(fileInstance, otherParams={"userID":userID})
logging.info("\tREGISTERING " + fileName + " INTO DATABASE...DONE")
if(daoInstance != None):
daoInstance.closeConnection()
return fileName
def dir_total_size(source):
total_size = 0
for item in os.listdir(source):
itempath = os.path.join(source, item)
if os.path.isfile(itempath):
total_size += os.path.getsize(itempath)
elif os.path.isdir(itempath):
#TODO:ignore tmp dir
total_size += dir_total_size(itempath)
return total_size
| gpl-3.0 | 4,098,553,121,931,361,300 | 42.993562 | 158 | 0.524657 | false | 4.657201 | false | false | false |
suzuki-shunsuke/generator-ss-ansible-playbook | generators/app/templates/dynamic_inventories/env.py | 1 | 2345 | #!/usr/bin/env python
"""
environment variable inventory source
If the environment variable is undefined, the variable is also undefined.
The group level environment variables are only supported, and the host level environment variables are not supported.
The environment variable name must be uppercase.
## Examples
The environment variable "FOO" is assigned to the variable "foo".
```yaml
env_vars:
- foo
```
The environment variable "FOO" is assigned to the variable "bar".
```yaml
env_vars:
bar: foo
```
"""
import argparse
import json
import os
import sys
import yaml
def main():
parser = get_parser()
args = parser.parse_args()
if "ENV" not in os.environ:
sys.stdout.write("[ERROR] The environment variable 'ENV' is required.\n")
sys.exit(1)
env = os.environ["ENV"]
if args.list:
do_list(env)
if args.host:
do_host(env, args.host)
def do_host(env, hostname):
ret = {}
json.dump(ret, sys.stdout)
def do_list(env):
ret = {}
with open("inventories/{}.yml".format(env)) as r:
groups = [("all", yaml.load(r)["all"])]
while groups:
group_name, group = groups.pop()
node, children = parse_group(group)
ret[group_name] = node
for name, child in children.items():
groups.append((name, child))
json.dump(ret, sys.stdout)
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--list", action="store_true")
parser.add_argument("--host")
return parser
def parse_group(group):
env_vars = group.get("env_vars", {})
ev = {}
if env_vars is None:
pass
elif isinstance(env_vars, list):
for e in env_vars:
k = e.upper()
if k in os.environ:
ev[e] = os.environ[k]
elif isinstance(env_vars, dict):
for k,v in env_vars.items():
env_name = v.upper()
if env_name in os.environ:
ev[k] = os.environ[env_name]
children = group.get("children", {})
hostvars = group.get("hosts", {})
if hostvars is None:
hostvars = {}
ret = {
"hosts": hostvars.keys() if isinstance(hostvars, dict) else hostvars,
"vars": ev,
"children": children.keys()
}
return ret, children
if __name__ == "__main__":
main()
| mit | 3,792,367,338,160,111,000 | 22.217822 | 117 | 0.598721 | false | 3.646967 | false | false | false |
okfse/froide | froide/tests/live/test_request.py | 1 | 18721 | import re
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import LiveServerTestCase
from django.contrib.auth import get_user_model
from django.core import mail
from selenium.webdriver.support.wait import WebDriverWait
from froide.foirequest.tests import factories
from froide.foirequest.models import FoiRequest
from froide.publicbody.models import PublicBody
User = get_user_model()
def get_selenium():
driver = getattr(settings, 'TEST_SELENIUM_DRIVER', 'firefox')
if driver == 'chrome':
from selenium.webdriver.chrome.webdriver import WebDriver as ChromeDriver
return ChromeDriver()
elif driver == 'phantomjs':
from selenium.webdriver import PhantomJS
return PhantomJS()
else:
from selenium.webdriver.firefox.webdriver import WebDriver as FirefoxDriver
return FirefoxDriver()
class JavaScriptException(Exception):
pass
class CheckJSErrors(object):
def __init__(self, driver):
self.driver = driver
def __enter__(self):
self.driver.execute_script('''
window.onerror=function(msg){
$('body').attr('jserror', msg);
};
''')
def __exit__(self, exc_type, exc_value, traceback):
body = self.driver.find_elements_by_xpath('//body[@jserror]')
if body:
msg = body[0].get_attribute('jserror')
raise JavaScriptException(msg)
class TestMakingRequest(LiveServerTestCase):
@classmethod
def setUpClass(cls):
cls.selenium = get_selenium()
cls.selenium.implicitly_wait(3)
super(TestMakingRequest, cls).setUpClass()
@classmethod
def tearDownClass(cls):
cls.selenium.quit()
super(TestMakingRequest, cls).tearDownClass()
def scrollTo(self, id=None, klass=None):
if id is not None:
self.selenium.find_element_by_id(id).location_once_scrolled_into_view
selector = '#' + id
if klass is not None:
self.selenium.find_element_by_class_name(klass).location_once_scrolled_into_view
selector = '.' + klass
self.selenium.execute_script("window.scrollTo(0,0);$('%s').focus();" % selector)
def setUp(self):
factories.make_world()
factories.rebuild_index()
self.user = User.objects.all()[0]
self.pb = PublicBody.objects.all()[0]
def do_login(self, navigate=True):
if navigate:
self.selenium.get('%s%s' % (self.live_server_url, reverse('account-login')))
email_input = self.selenium.find_element_by_id("id_email")
email_input.send_keys(self.user.email)
password_input = self.selenium.find_element_by_id("id_password")
password_input.send_keys('froide')
self.selenium.find_element_by_xpath(
'//form//button[contains(text(), "Log In")]').click()
def test_make_not_logged_in_request(self):
self.selenium.get('%s%s' % (self.live_server_url,
reverse('foirequest-make_request')))
with CheckJSErrors(self.selenium):
search_pbs = self.selenium.find_element_by_id('id_public_body')
search_pbs.send_keys(self.pb.name)
self.selenium.find_element_by_class_name('search-public_bodies-submit').click()
WebDriverWait(self.selenium, 5).until(
lambda driver: driver.find_element_by_css_selector('.search-results .search-result'))
self.selenium.find_element_by_css_selector('.search-results .search-result label').click()
WebDriverWait(self.selenium, 5).until(
lambda driver: driver.find_element_by_id('option-check_foi').is_displayed())
self.selenium.find_element_by_id('option-check_foi').click()
WebDriverWait(self.selenium, 5).until(
lambda driver: driver.find_element_by_id('continue-foicheck'))
self.selenium.find_element_by_id('continue-foicheck').click()
req_title = 'FoiRequest Number'
self.selenium.find_element_by_id('id_subject').send_keys(req_title)
self.selenium.find_element_by_id('id_body').send_keys('Documents describing something...')
WebDriverWait(self.selenium, 5).until(
lambda driver: driver.find_elements_by_css_selector('#similar-requests li'))
WebDriverWait(self.selenium, 5).until(
lambda driver: driver.find_element_by_id('review-button').is_displayed()
)
self.selenium.find_element_by_id('id_first_name')\
.send_keys('Peter')
self.selenium.find_element_by_id('id_last_name')\
.send_keys('Parker')
user_email = '[email protected]'
self.selenium.find_element_by_id('id_user_email')\
.send_keys(user_email)
self.selenium.find_element_by_id('id_terms').click()
self.selenium.find_element_by_id('review-button').click()
self.selenium.find_element_by_id('step-review')
WebDriverWait(self.selenium, 10).until(
lambda driver: 'in' in self.selenium.find_element_by_id('step-review').get_attribute('class'))
self.scrollTo(id='send-request-button')
mail.outbox = []
self.selenium.find_element_by_id('send-request-button').click()
WebDriverWait(self.selenium, 5).until(
lambda driver: driver.find_element_by_css_selector('.heroine-unit'))
new_user = User.objects.get(email=user_email)
self.assertEqual(new_user.private, False)
req = FoiRequest.objects.get(user=new_user)
self.assertEqual(req.title, req_title)
self.assertEqual(req.public, True)
self.assertEqual(req.public_body, self.pb)
self.assertEqual(req.status, 'awaiting_user_confirmation')
message = mail.outbox[0]
match = re.search('http://[^/]+(/.+)', message.body)
activate_url = match.group(1)
self.selenium.get('%s%s' % (self.live_server_url, activate_url))
WebDriverWait(self.selenium, 5).until(
lambda driver: driver.find_element_by_css_selector('#change-password-now'))
self.assertIn('?new#change-password-now', self.selenium.current_url)
req = FoiRequest.objects.get(user=new_user)
self.assertEqual(req.status, 'awaiting_response')
def test_make_not_logged_in_request_to_public_body(self):
self.selenium.get('%s%s' % (self.live_server_url,
reverse('foirequest-make_request',
kwargs={'public_body': self.pb.slug})))
with CheckJSErrors(self.selenium):
WebDriverWait(self.selenium, 5).until(
lambda driver: driver.find_element_by_id('option-check_foi').is_displayed())
self.selenium.find_element_by_id('option-check_foi').click()
self.selenium.find_element_by_id('continue-foicheck').click()
req_title = 'FoiRequest Number'
self.selenium.find_element_by_id('id_subject').send_keys(req_title)
self.selenium.find_element_by_id('id_body').send_keys('Documents describing something...')
WebDriverWait(self.selenium, 5).until(
lambda driver: driver.find_elements_by_css_selector('#similar-requests li'))
WebDriverWait(self.selenium, 5).until(
lambda driver: driver.find_element_by_id('review-button').is_displayed()
)
user_first_name = 'Peter'
user_last_name = 'Parker'
self.selenium.find_element_by_id('id_first_name')\
.send_keys(user_first_name)
self.selenium.find_element_by_id('id_last_name')\
.send_keys(user_last_name)
user_email = '[email protected]'
self.selenium.find_element_by_id('id_user_email')\
.send_keys(user_email)
self.selenium.find_element_by_id('id_terms').click()
self.selenium.find_element_by_id('id_public').click()
self.selenium.find_element_by_id('id_private').click()
self.selenium.find_element_by_id('review-button').click()
WebDriverWait(self.selenium, 10).until(
lambda driver: 'in' in self.selenium.find_element_by_id('step-review').get_attribute('class'))
self.scrollTo(id='send-request-button')
self.selenium.find_element_by_id('send-request-button').click()
WebDriverWait(self.selenium, 5).until(
lambda driver: driver.find_element_by_css_selector('.heroine-unit'))
new_user = User.objects.get(email=user_email)
self.assertEqual(new_user.first_name, user_first_name)
self.assertEqual(new_user.last_name, user_last_name)
self.assertEqual(new_user.private, True)
req = FoiRequest.objects.get(user=new_user)
self.assertEqual(req.title, req_title)
self.assertEqual(req.public, False)
self.assertEqual(req.public_body, self.pb)
self.assertEqual(req.status, 'awaiting_user_confirmation')
def test_make_logged_in_request(self):
self.do_login()
self.selenium.get('%s%s' % (self.live_server_url,
reverse('foirequest-make_request')))
with CheckJSErrors(self.selenium):
search_pbs = self.selenium.find_element_by_id('id_public_body')
search_pbs.send_keys(self.pb.name)
self.selenium.find_element_by_class_name('search-public_bodies-submit').click()
WebDriverWait(self.selenium, 5).until(
lambda driver: driver.find_element_by_css_selector('.search-results .search-result'))
self.selenium.find_element_by_css_selector('.search-results .search-result label').click()
WebDriverWait(self.selenium, 5).until(
lambda driver: driver.find_element_by_id('option-check_foi').is_displayed())
self.selenium.find_element_by_id('option-check_foi').click()
self.selenium.find_element_by_id('continue-foicheck').click()
req_title = 'FoiRequest Number'
WebDriverWait(self.selenium, 5).until(
lambda driver: driver.find_element_by_id('id_body').is_displayed()
)
self.selenium.find_element_by_id('id_subject').send_keys(req_title)
self.selenium.find_element_by_id('id_body').send_keys('Documents describing something...')
WebDriverWait(self.selenium, 5).until(
lambda driver: driver.find_elements_by_css_selector('#similar-requests li'))
WebDriverWait(self.selenium, 5).until(
lambda driver: driver.find_element_by_id('review-button').is_displayed()
)
self.selenium.find_element_by_id('review-button').click()
WebDriverWait(self.selenium, 10).until(
lambda driver: 'in' in self.selenium.find_element_by_id('step-review').get_attribute('class'))
self.scrollTo(id='send-request-button')
WebDriverWait(self.selenium, 10).until(
lambda driver: self.selenium.find_element_by_id('send-request-button').is_displayed())
self.selenium.find_element_by_id('send-request-button').click()
WebDriverWait(self.selenium, 5).until(
lambda driver: driver.find_element_by_css_selector('#messages'))
req = FoiRequest.objects.filter(user=self.user).order_by('-id')[0]
self.assertIn(req.get_absolute_url(), self.selenium.current_url)
self.assertEqual(req.title, req_title)
self.assertEqual(req.public, True)
self.assertEqual(req.public_body, self.pb)
self.assertEqual(req.status, 'awaiting_response')
def test_make_logged_in_request_no_pb_yet(self):
self.do_login()
self.selenium.get('%s%s' % (self.live_server_url,
reverse('foirequest-make_request')))
with CheckJSErrors(self.selenium):
self.selenium.find_element_by_id('option-emptypublicbody').click()
WebDriverWait(self.selenium, 5).until(
lambda driver: driver.find_element_by_id('option-check_foi').is_displayed())
self.selenium.find_element_by_id('option-check_foi').click()
self.selenium.find_element_by_id('continue-foicheck').click()
req_title = 'FoiRequest Number'
WebDriverWait(self.selenium, 5).until(
lambda driver: driver.find_element_by_id('id_body').is_displayed()
)
self.selenium.find_element_by_id('id_subject').send_keys(req_title)
self.selenium.find_element_by_id('id_body').send_keys('Documents describing something...')
WebDriverWait(self.selenium, 5).until(
lambda driver: driver.find_elements_by_css_selector('#similar-requests li'))
WebDriverWait(self.selenium, 5).until(
lambda driver: driver.find_element_by_id('review-button').is_displayed()
)
self.selenium.find_element_by_id('review-button').click()
WebDriverWait(self.selenium, 10).until(
lambda driver: 'in' in self.selenium.find_element_by_id('step-review').get_attribute('class'))
self.scrollTo(id='send-request-button')
WebDriverWait(self.selenium, 10).until(
lambda driver: self.selenium.find_element_by_id('send-request-button').is_displayed())
self.selenium.find_element_by_id('send-request-button').click()
WebDriverWait(self.selenium, 5).until(
lambda driver: driver.find_element_by_css_selector('#messages'))
req = FoiRequest.objects.filter(user=self.user).order_by('-id')[0]
self.assertIn(req.get_absolute_url(), self.selenium.current_url)
self.assertEqual(req.title, req_title)
self.assertEqual(req.public, True)
self.assertTrue(req.public_body is None)
self.assertEqual(req.status, 'publicbody_needed')
def test_make_request_logged_out_with_existing_account(self):
self.selenium.get('%s%s' % (self.live_server_url,
reverse('foirequest-make_request')))
with CheckJSErrors(self.selenium):
self.selenium.find_element_by_id('option-emptypublicbody').click()
WebDriverWait(self.selenium, 5).until(
lambda driver: driver.find_element_by_id('option-check_foi').is_displayed())
self.selenium.find_element_by_id('option-check_foi').click()
self.selenium.find_element_by_id('continue-foicheck').click()
req_title = 'FoiRequest Number'
WebDriverWait(self.selenium, 5).until(
lambda driver: driver.find_element_by_id('id_body').is_displayed()
)
self.selenium.find_element_by_id('id_subject').send_keys(req_title)
self.selenium.find_element_by_id('id_body').send_keys('Documents describing something...')
user_first_name = self.user.first_name
user_last_name = self.user.last_name
self.selenium.find_element_by_id('id_first_name')\
.send_keys(user_first_name)
self.selenium.find_element_by_id('id_last_name')\
.send_keys(user_last_name)
self.selenium.find_element_by_id("id_user_email").send_keys(self.user.email)
self.selenium.find_element_by_id('id_terms').click()
self.selenium.find_element_by_id('id_public').click()
self.selenium.find_element_by_id('id_private').click()
WebDriverWait(self.selenium, 5).until(
lambda driver: driver.find_elements_by_css_selector('#similar-requests li'))
WebDriverWait(self.selenium, 5).until(
lambda driver: driver.find_element_by_id('review-button').is_displayed()
)
self.selenium.find_element_by_id('review-button').click()
WebDriverWait(self.selenium, 10).until(
lambda driver: 'in' in self.selenium.find_element_by_id('step-review').get_attribute('class'))
self.scrollTo(id='send-request-button')
WebDriverWait(self.selenium, 10).until(
lambda driver: self.selenium.find_element_by_id('send-request-button').is_displayed())
self.selenium.find_element_by_id('send-request-button').click()
main_window_handle = self.selenium.current_window_handle
login_link = '//div[@class="user_data_form"]//ul[@class="errorlist"]//a'
with CheckJSErrors(self.selenium):
WebDriverWait(self.selenium, 10).until(
lambda driver: self.selenium.find_element_by_xpath(login_link)
)
self.scrollTo(klass='target-small')
WebDriverWait(self.selenium, 10).until(
lambda driver: self.selenium.find_element_by_xpath(login_link).is_displayed())
self.selenium.find_element_by_xpath(login_link).click()
popup_handle = [wh for wh in self.selenium.window_handles if wh != main_window_handle][0]
self.selenium.switch_to_window(popup_handle)
with CheckJSErrors(self.selenium):
password_input = self.selenium.find_element_by_id("id_password")
password_input.send_keys('froide')
self.selenium.find_element_by_xpath(
'//form//button[contains(text(), "Log In")]').click()
self.selenium.switch_to_window(main_window_handle)
with CheckJSErrors(self.selenium):
self.selenium.find_element_by_id('review-button').click()
WebDriverWait(self.selenium, 10).until(
lambda driver: 'in' in self.selenium.find_element_by_id('step-review').get_attribute('class'))
self.scrollTo(id='send-request-button')
WebDriverWait(self.selenium, 10).until(
lambda driver: self.selenium.find_element_by_id('send-request-button').is_displayed())
self.selenium.find_element_by_id('send-request-button').click()
req = FoiRequest.objects.filter(user=self.user).order_by('-id')[0]
self.assertIn(req.get_absolute_url(), self.selenium.current_url)
self.assertEqual(req.title, req_title)
self.assertEqual(req.public, False)
self.assertTrue(req.public_body is None)
self.assertEqual(req.status, 'publicbody_needed')
def test_collapsed_menu(self):
self.selenium.set_window_size(600, 800)
self.selenium.get('%s%s' % (self.live_server_url,
reverse('index')))
self.selenium.find_element_by_css_selector('.navbar-toggle').click()
WebDriverWait(self.selenium, 5).until(
lambda driver: driver.find_element_by_css_selector('.navbar-form').is_displayed()
)
| mit | -6,090,042,946,033,985,000 | 50.290411 | 110 | 0.629721 | false | 3.727798 | true | false | false |
r0balo/pelisalacarta | python/version-xbmc-09-plugin/core/config.py | 2 | 7245 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta 4
# Copyright 2015 [email protected]
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#
# Distributed under the terms of GNU General Public License v3 (GPLv3)
# http://www.gnu.org/licenses/gpl-3.0.html
# ------------------------------------------------------------
# This file is part of pelisalacarta 4.
#
# pelisalacarta 4 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pelisalacarta 4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pelisalacarta 4. If not, see <http://www.gnu.org/licenses/>.
# ------------------------------------------------------------
# Parámetros de configuración (XBMC)
# ------------------------------------------------------------
import os
import sys
import xbmc
import xbmcplugin
PLATFORM_NAME = "xbmc-plugin"
PLUGIN_NAME = "pelisalacarta"
def get_platform(full_version=False):
#full_version solo es util en xbmc/kodi
ret = {
'num_version': 9.0 ,
'name_version': PLATFORM_NAME ,
'video_db': "",
'plaform': PLATFORM_NAME
}
if full_version:
return ret
else:
return PLATFORM_NAME
def is_xbmc():
return True
def get_library_support():
return True
def get_system_platform():
""" fonction: pour recuperer la platform que xbmc tourne """
platform = "unknown"
if xbmc.getCondVisibility("system.platform.linux"):
platform = "linux"
elif xbmc.getCondVisibility("system.platform.xbox"):
platform = "xbox"
elif xbmc.getCondVisibility("system.platform.windows"):
platform = "windows"
elif xbmc.getCondVisibility("system.platform.osx"):
platform = "osx"
return platform
def open_settings():
xbmcplugin.openSettings(sys.argv[0])
def get_setting(name, channel=""):
"""
Retorna el valor de configuracion del parametro solicitado.
Devuelve el valor del parametro 'name' en la configuracion global o en la configuracion propia del canal 'channel'.
Si se especifica el nombre del canal busca en la ruta \addon_data\plugin.video.pelisalacarta\settings_channels el
archivo channel_data.json y lee el valor del parametro 'name'. Si el archivo channel_data.json no existe busca en la
carpeta channels el archivo channel.xml y crea un archivo channel_data.json antes de retornar el valor solicitado.
Si el parametro 'name' no existe en channel_data.json lo busca en la configuracion global y si ahi tampoco existe
devuelve un str vacio.
Parametros:
name -- nombre del parametro
channel [opcional] -- nombre del canal
Retorna:
value -- El valor del parametro 'name'
"""
# Specific channel setting
if channel:
# logger.info("config.get_setting reading channel setting '"+name+"' from channel xml")
from core import channeltools
value = channeltools.get_channel_setting(name, channel)
# logger.info("config.get_setting -> '"+repr(value)+"'")
if value is not None:
return value
else:
return ""
# Global setting
else:
# logger.info("config.get_setting reading main setting '"+name+"'")
value = xbmcplugin.getSetting(channel + name)
# Translate Path if start with "special://"
if value.startswith("special://") and "librarypath" not in name:
value = xbmc.translatePath(value)
# logger.info("config.get_setting -> '"+value+"'")
return value
def set_setting(name, value, channel=""):
"""
Fija el valor de configuracion del parametro indicado.
Establece 'value' como el valor del parametro 'name' en la configuracion global o en la configuracion propia del
canal 'channel'.
Devuelve el valor cambiado o None si la asignacion no se ha podido completar.
Si se especifica el nombre del canal busca en la ruta \addon_data\plugin.video.pelisalacarta\settings_channels el
archivo channel_data.json y establece el parametro 'name' al valor indicado por 'value'. Si el archivo
channel_data.json no existe busca en la carpeta channels el archivo channel.xml y crea un archivo channel_data.json
antes de modificar el parametro 'name'.
Si el parametro 'name' no existe lo añade, con su valor, al archivo correspondiente.
Parametros:
name -- nombre del parametro
value -- valor del parametro
channel [opcional] -- nombre del canal
Retorna:
'value' en caso de que se haya podido fijar el valor y None en caso contrario
"""
if channel:
from core import channeltools
return channeltools.set_channel_setting(name, value, channel)
else:
try:
xbmcplugin.setSetting(name, value)
except:
return None
return value
def get_localized_string(code):
dev = xbmc.getLocalizedString(code)
try:
dev = dev.encode("utf-8")
except:
pass
return dev
def get_library_config_path():
value = get_setting("librarypath")
if value == "":
verify_directories_created()
value = get_setting("librarypath")
return value
def get_library_path():
return xbmc.translatePath(get_library_config_path())
def get_temp_file(filename):
return xbmc.translatePath(os.path.join("special://temp/", filename))
def get_runtime_path():
return os.getcwd()
def get_data_path():
dev = xbmc.translatePath("special://profile/plugin_data/video/pelisalacarta")
#Crea el directorio si no existe
if not os.path.exists(dev):
os.makedirs(dev)
return dev
def get_cookie_data():
import os
ficherocookies = os.path.join(get_data_path(), 'cookies.dat')
cookiedatafile = open(ficherocookies, 'r')
cookiedata = cookiedatafile.read()
cookiedatafile.close()
return cookiedata
# Test if all the required directories are created
def verify_directories_created():
from core import logger
from core import filetools
config_paths = [["librarypath", "library"],
["downloadpath", "downloads"],
["downloadlistpath", "downloads/list"],
["settings_path", "settings_channels"]]
for path, default in config_paths:
saved_path = get_setting(path)
if not saved_path:
saved_path = "special://profile/plugin_data/video/pelisalacarta/" + default
set_setting(path, saved_path)
saved_path = xbmc.translatePath(saved_path)
if not filetools.exists(saved_path):
logger.debug("Creating %s: %s" % (path, saved_path))
filetools.mkdir(saved_path)
# Biblioteca
if path == "librarypath":
set_setting("library_version", "v4")
| gpl-3.0 | -1,247,484,197,485,377,000 | 30.081545 | 120 | 0.645678 | false | 3.723393 | true | false | false |
amnet04/ALECMAPREADER1 | localidad.py | 1 | 3336 | import numpy as np
import re
import os
import cv2
import csv
import funcionesCV_recurrentes as cvr
import georef
from conf import RUTA_PLANTILLAS
numeros = re.compile('\D')
espacios = re.compile('\s')
with open(RUTA_PLANTILLAS+'/localidades.csv') as localidades:
reader = csv.DictReader(localidades)
err_msg = ', no es un código válido. '
err_no0 = 'El tercer caractér no corresponde con el patrón.'
err_spa = 'el código contiene espacios.'
err_tail = ' Lo corregiré automáticamente pero es conveniente que lo'
err_tail = err_tail + ' verifique en el archivo "localidades.cvs"'
LOCALIDADES = {}
for row in reader:
Localidad = dict(row)
Localidad.pop('Idlocalidad')
Localidad.pop('Comentarios')
err = '{0}'.format(row['Idlocalidad']) + err_msg
if espacios.match(row['Idlocalidad']):
err = err + err_spa + err_tail
print(err)
row['Idlocalidad'] = ''.join(row['Idlocalidad'].split())
if numeros.match(row['Idlocalidad'][2]):
err = err + err_no0 + err_tail
print(err)
id_as_list = list(row['Idlocalidad'])
id_as_list[2] = '0'
row['Idlocalidad'] = ''.join(id_as_list)
print(row['Idlocalidad'])
LOCALIDADES[row['Idlocalidad']] = Localidad
class localidad(object):
'''
Clase para manejar los departamentos
'''
def __init__(self, template, imagen, id):
'''
Inicia el objeto departamento cargando una imagen
'''
self.template = cv2.imread(template, 0)
self.w, self.h = self.template.shape[::-1]
self.ruta_archivo = template
self.nombre_archivo = os.path.basename(template)
self.id = os.path.basename(template)[:-4]
self.nombre = LOCALIDADES[self.id]['Nombre']
self.imagen = imagen
if cvr.detectar(self.template, imagen, 400000)[0] is not None:
self.supi, self.infd, self.roi = cvr.detectar(self.template,
imagen,
400000)
else:
self.supi, self.infd, self.roi = (None, None, None)
print('No se encontraron coincidencias para {0}'.format(id))
def enmarcar(self, color):
if (self.supi is not None and
self.infd is not None and
self.roi is not None):
enmarcado = cv2.rectangle(self.imagen,
self.supi,
self.infd,
color,
1)
return(enmarcado)
def escribir_nombre(self, color, imagen=[], supi=[]):
if supi == []:
supi = self.supi
if imagen == []:
imagen = self.imagen
if supi is not None:
nombre_en_mapa = cv2.putText(imagen,
self.id,
supi,
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
color,
1
)
return(nombre_en_mapa)
| mit | 651,476,822,275,177,600 | 36.829545 | 73 | 0.496846 | false | 3.658242 | false | false | false |
gridsync/gridsync | tests/test_filter.py | 1 | 22773 | # -*- coding: utf-8 -*-
import json
import os
from collections import OrderedDict
from unittest.mock import Mock
import pytest
from gridsync import autostart_file_path, config_dir, pkgdir
from gridsync.filter import (
apply_filters,
filter_tahoe_log_message,
get_filters,
)
@pytest.fixture
def core():
c = Mock()
c.executable = "/tmp/test/tahoe.exe"
gateway = Mock()
gateway.name = "TestGrid"
gateway.newscap = "URI:NEWSCAP"
storage_settings = OrderedDict() # Because python3.5
storage_settings["v0-22222"] = {
"anonymous-storage-FURL": "pb://[email protected]:1234/5555"
}
storage_settings["v0-66666"] = {
"anonymous-storage-FURL": "pb://[email protected]:1234/9999"
}
gateway.get_settings = Mock(
return_value={
"rootcap": "URI:000:111",
"introducer": "pb://[email protected]:12345/ccc",
"storage": storage_settings,
}
)
gateway.magic_folders = OrderedDict() # Because python3.5
gateway.magic_folders["TestFolder"] = {
"collective_dircap": "URI:aaa:bbb",
"upload_dircap": "URI:ccc:ddd",
"admin_dircap": "URI:eee:fff",
"directory": "/tmp/test/TestFolder",
"member": "Alice",
}
gateway.magic_folders["CatPics"] = {
"collective_dircap": "URI:ggg:hhh",
"upload_dircap": "URI:iii:jjj",
"admin_dircap": "URI:kkk:lll",
"directory": "/tmp/test/CatPics",
"member": "Bob",
}
c.gui.main_window.gateways = [gateway]
return c
@pytest.mark.parametrize(
"pair",
[
(pkgdir, "PkgDir"),
(config_dir, "ConfigDir"),
(autostart_file_path, "AutostartFilePath"),
(os.path.expanduser("~"), "HomeDir"),
],
)
def test_get_filters_pair_in_default_filters(core, pair):
filters = get_filters(core)
assert pair in filters
@pytest.mark.parametrize(
"string",
[
pkgdir,
config_dir,
autostart_file_path,
"TestGrid",
"URI:NEWSCAP",
"URI:000:111",
"v0-22222",
"pb://[email protected]:1234/5555",
"v0-66666",
"pb://[email protected]:1234/9999",
"TestFolder",
"URI:aaa:bbb",
"URI:ccc:ddd",
"URI:eee:fff",
"/tmp/test/TestFolder",
"Alice",
"CatPics",
"URI:ggg:hhh",
"URI:iii:jjj",
"URI:kkk:lll",
"/tmp/test/CatPics",
"Bob",
os.path.expanduser("~"),
"/tmp/test/tahoe.exe",
],
)
def test_apply_filters_string_not_in_result(core, string):
filters = get_filters(core)
in_str = "Bob gave {} to Alice".format(string)
result = apply_filters(in_str, filters)
assert string not in result
@pytest.mark.parametrize(
"string,filtered",
[
(pkgdir, "PkgDir"),
(config_dir, "ConfigDir"),
(autostart_file_path, "AutostartFilePath"),
("TestGrid", "GatewayName:1"),
("URI:NEWSCAP", "Newscap:1"),
("URI:000:111", "Rootcap:1"),
("v0-22222", "StorageServerName:1:1"),
("pb://[email protected]:1234/5555", "StorageServerFurl:1:1"),
("v0-66666", "StorageServerName:1:2"),
("pb://[email protected]:1234/9999", "StorageServerFurl:1:2"),
("URI:aaa:bbb", "Folder:1:1:CollectiveDircap"),
("URI:ccc:ddd", "Folder:1:1:UploadDircap"),
("URI:eee:fff", "Folder:1:1:AdminDircap"),
("/tmp/test/TestFolder", "Folder:1:1:Directory"),
("TestFolder", "Folder:1:1:Name"),
("Alice", "Folder:1:1:Member"),
("URI:ggg:hhh", "Folder:1:2:CollectiveDircap"),
("URI:iii:jjj", "Folder:1:2:UploadDircap"),
("URI:kkk:lll", "Folder:1:2:AdminDircap"),
("/tmp/test/CatPics", "Folder:1:2:Directory"),
("CatPics", "Folder:1:2:Name"),
("Bob", "Folder:1:2:Member"),
(os.path.expanduser("~"), "HomeDir"),
("/tmp/test/tahoe.exe", "TahoeExecutablePath"),
],
)
def test_apply_filters_filtered_string_in_result(core, string, filtered):
filters = get_filters(core)
in_str = "Bob gave {} to Alice".format(string)
result = apply_filters(in_str, filters)
assert "<Filtered:{}>".format(filtered) in result
@pytest.mark.parametrize(
"msg,keys",
[
(
{
"action_type": "dirnode:add-file",
"action_status": "started",
"metadata": {
"last_downloaded_timestamp": 1554248457.597176,
"user_mtime": 1554212870.7714074,
"version": 0,
},
"name": "lolcat.jpg",
"overwrite": True,
"task_level": [4, 3, 5, 6, 1],
"task_uuid": "c7a1ec7e-93c1-4549-b916-adc28cda73a1",
"timestamp": 1554248457.597313,
},
["name"],
),
(
{
"action_type": "invite-to-magic-folder",
"action_status": "started",
"timestamp": 1554305616.315925,
"client_num": 0,
"nickname": "Alice\u00f8",
"task_level": [1],
"task_uuid": "c0fd93dc-01c3-48e5-a0fa-14028cb83cdc",
},
["nickname"], # XXX MemberName
),
(
{
"action_type": "join-magic-folder",
"action_status": "started",
"timestamp": 1554305611.622096,
"local_dir": "cli/MagicFolder/create-and-then-invite-join/magic",
"client_num": 0,
"task_uuid": "41282946-79da-490f-b640-9a0ae349ffb4",
"task_level": [1],
"invite_code": "URI:DIR2-RO:3x67kv2fmpz2fji4s775o72yxe:jpi2cfxsc4xjioea735g7fnqdjimkn6scpit4xumkkzk27nfm6pq+URI:DIR2:shiycttqoawwqkonizibpkx5ye:6hv4g33odqojq23g5bq22ej6if6kinytivsmx2gwhuol65fxd2za",
},
["local_dir", "invite_code"],
),
(
{
"action_type": "magic-folder-db:update-entry",
"action_status": "started",
"last_downloaded_timestamp": 1554248457.008035,
"last_downloaded_uri": "URI:CHK:452hmzwvthqbsawh6e4ua4plei:6zeihsoigv7xl7ijdmyzfa7wt5rajqhj3ppmaqgxoilt4n5srszq:1:1:201576",
"last_uploaded_uri": "URI:CHK:452hmzwvthqbsawh6e4ua4plei:6zeihsoigv7xl7ijdmyzfa7wt5rajqhj3ppmaqgxoilt4n5srszq:1:1:201576",
"pathinfo": {
"ctime_ns": 1554212870771407360,
"exists": True,
"isdir": False,
"isfile": True,
"islink": False,
"mtime_ns": 1554212870771407360,
"size": 201576,
},
"relpath": "Garfield.jpg",
"task_level": [4, 3, 4, 7, 1],
"task_uuid": "c7a1ec7e-93c1-4549-b916-adc28cda73a1",
"timestamp": 1554248457.573836,
"version": 0,
},
["last_downloaded_uri", "last_uploaded_uri", "relpath"],
),
(
{
"action_type": "magic-folder:add-pending",
"action_status": "started",
"relpath": "Grumpy Cat.jpg",
"task_level": [2, 2, 9, 1],
"task_uuid": "c7a1ec7e-93c1-4549-b916-adc28cda73a1",
"timestamp": 1554248455.404073,
},
["relpath"],
),
(
{
"action_type": "magic-folder:downloader:get-latest-file",
"task_uuid": "bb07d7f1-0af0-44ed-9bcb-e60828fcf0a3",
"task_level": [18, 5, 16, 3, 1],
"timestamp": 1554305539.486,
"name": "blam",
"action_status": "started",
},
["name"],
),
(
{
"action_type": "magic-folder:full-scan",
"action_status": "started",
"direction": "uploader",
"nickname": "Demo Grid",
"task_level": [2, 1],
"task_uuid": "1f7049fd-1530-4d12-8461-94e42655f1be",
"timestamp": 1554248626.324124,
},
["nickname"],
),
(
{
"action_type": "magic-folder:iteration",
"action_status": "started",
"direction": "uploader",
"nickname": "Demo Grid",
"task_level": [4, 1],
"task_uuid": "c7a1ec7e-93c1-4549-b916-adc28cda73a1",
"timestamp": 1554248455.40636,
},
["nickname"],
),
(
{
"action_type": "magic-folder:notified",
"action_status": "started",
"timestamp": 1554305907.525834,
"nickname": "client-0",
"task_uuid": "b29934a9-ec4f-44d1-b987-45a8cc0d2ba2",
"task_level": [7, 4, 2, 1],
"path": "/Users/vagrant/tahoe-lafs/_trial_temp/immutable/Test/code/clients/2g45r67f/tmp/tmpP9HEA2/local_dir/bar",
"direction": "uploader",
},
["nickname", "path"],
),
(
{
"action_type": "magic-folder:process-directory",
"task_uuid": "bc637a12-9141-41de-b36e-6eccc0a65e86",
"task_level": [8, 3, 2, 7, 6],
"timestamp": 1554305529.111,
"action_status": "succeeded",
"created_directory": "subdir",
},
["created_directory"],
),
(
{
"action_type": "magic-folder:process-item",
"action_status": "started",
"item": {"relpath": "Garfield.jpg", "size": 201576},
"task_level": [13, 3, 2, 1],
"task_uuid": "d3a0e3db-3cd6-49c5-9847-7c742b6eec56",
"timestamp": 1554250168.097768,
},
["item"], # XXX dict with relpath
),
(
{
"action_type": "magic-folder:processing-loop",
"action_status": "started",
"direction": "uploader",
"nickname": "Demo Grid",
"task_level": [3, 1],
"task_uuid": "c7a1ec7e-93c1-4549-b916-adc28cda73a1",
"timestamp": 1554248455.406146,
},
["nickname"],
),
(
{
"action_type": "magic-folder:remove-from-pending",
"action_status": "started",
"pending": [
"Cheshire Cat.jpeg",
"Kitler.png",
"Colonel Meow.jpg",
"Waffles.jpg",
"Grumpy Cat.jpg",
"lolcat.jpg",
],
"relpath": "lolcat.jpg",
"task_level": [4, 3, 5, 3, 1],
"task_uuid": "c7a1ec7e-93c1-4549-b916-adc28cda73a1",
"timestamp": 1554248457.596115,
},
["pending", "relpath"], # XXX list of paths
),
(
{
"action_type": "magic-folder:rename-conflicted",
"abspath_u": "/Users/vagrant/tahoe-lafs/_trial_temp/cli/MagicFolder/write-downloaded-file/foobar",
"action_status": "started",
"timestamp": 1554305923.406739,
"replacement_path_u": "/Users/vagrant/tahoe-lafs/_trial_temp/cli/MagicFolder/write-downloaded-file/foobar.tmp",
"task_level": [7, 2, 1],
"task_uuid": "9e88518e-d2f4-4459-babc-e45e8a24034d",
},
["abspath_u", "replacement_path_u"],
),
(
{
"action_type": "magic-folder:rename-conflicted",
"task_level": [7, 2, 2],
"timestamp": 1554305923.408401,
"result": "/Users/vagrant/tahoe-lafs/_trial_temp/cli/MagicFolder/write-downloaded-file/foobar.conflict",
"action_type": "magic-folder:rename-conflicted",
"action_status": "succeeded",
"task_uuid": "9e88518e-d2f4-4459-babc-e45e8a24034d",
},
["result"],
),
(
{
"action_type": "magic-folder:rename-deleted",
"abspath_u": "/Users/vagrant/tahoe-lafs/_trial_temp/immutable/Test/code/clients/2g45r67f/tmp/tmp1cdGlh/Bob-magic/file1",
"task_level": [18, 5, 17, 4, 3, 3, 1],
"timestamp": 1554305926.082758,
"action_status": "started",
"task_uuid": "14100717-85cd-41bc-bb1c-eadc418e760b",
},
["abspath_u"],
),
(
{
"action_type": "magic-folder:rename-deleted",
"task_level": [18, 5, 17, 4, 3, 3, 2],
"timestamp": 1554305926.083676,
"result": "/Users/vagrant/tahoe-lafs/_trial_temp/immutable/Test/code/clients/2g45r67f/tmp/tmp1cdGlh/Bob-magic/file1",
"action_type": "magic-folder:rename-deleted",
"action_status": "succeeded",
"task_uuid": "14100717-85cd-41bc-bb1c-eadc418e760b",
},
["result"],
),
(
{
"action_type": "magic-folder:scan-remote-dmd",
"action_status": "started",
"nickname": "admin",
"task_level": [3, 2, 1],
"task_uuid": "5816398c-a658-4b59-8526-f8052f63e114",
"timestamp": 1554248455.52203,
},
["nickname"], # XXX MemberName
),
(
{
"action_type": "magic-folder:start-downloading",
"action_status": "started",
"direction": "downloader",
"nickname": "Demo Grid",
"task_level": [1],
"task_uuid": "5816398c-a658-4b59-8526-f8052f63e114",
"timestamp": 1554248455.417441,
},
["nickname"],
),
(
{
"action_type": "magic-folder:start-monitoring",
"action_status": "started",
"direction": "uploader",
"nickname": "Demo Grid",
"task_level": [1],
"task_uuid": "e03e0c60-870f-43e3-ae87-5808728ad7ee",
"timestamp": 1554248454.973468,
},
["nickname"],
),
(
{
"action_type": "magic-folder:start-uploading",
"action_status": "started",
"direction": "uploader",
"nickname": "Demo Grid",
"task_level": [1],
"task_uuid": "1f7049fd-1530-4d12-8461-94e42655f1be",
"timestamp": 1554248626.323862,
},
["nickname"],
),
(
{
"action_type": "magic-folder:stop",
"task_uuid": "18bceae8-4f93-4f96-8ccf-cc51986355c6",
"task_level": [25, 1],
"timestamp": 1554305541.345,
"nickname": "magic-folder-default",
"action_status": "started",
},
["nickname"],
),
(
{
"action_type": "magic-folder:stop-monitoring",
"action_status": "started",
"task_level": [18, 5, 12, 4, 7, 2, 1],
"timestamp": 1554305542.267,
"nickname": "client-0",
"task_uuid": "d7a30d64-992b-48ca-a0e9-84cb0d55ea37",
"direction": "uploader",
},
["nickname"],
),
(
{
"action_type": "magic-folder:write-downloaded-file",
"mtime": 1554305970.0,
"is_conflict": False,
"timestamp": 1554305970.864801,
"abspath": "/Users/vagrant/tahoe-lafs/_trial_temp/immutable/Test/code/clients/2g45r67f/tmp/tmp4Rwmkc/Bob-magic/file2",
"task_level": [20, 5, 41, 4, 3, 3, 2, 1],
"size": 9,
"task_uuid": "4ac59194-cbbf-43d4-8c36-c940541b608e",
"action_status": "started",
"now": 1554305970.864769,
},
["abspath"],
),
(
{
"action_type": "notify-when-pending",
"task_level": [9, 3, 1],
"timestamp": 1554305908.530923,
"filename": "/Users/vagrant/tahoe-lafs/_trial_temp/immutable/Test/code/clients/2g45r67f/tmp/tmpC02XVl/local_dir/subdir/some-file",
"action_status": "started",
"task_uuid": "19b59820-a424-4773-8fd5-e6a5f2655339",
},
["filename"],
),
(
{
"action_type": "watchdog:inotify:any-event",
"event": "FileCreatedEvent",
"action_status": "started",
"timestamp": 1554305884.723024,
"path": "/Users/vagrant/tahoe-lafs/_trial_temp/immutable/Test/code/clients/2g45r67f/tmp/tmp_mEbEu/foo.bar",
"task_level": [1],
"task_uuid": "e03ccbec-f120-49a3-9264-1ae63fdb3c5e",
},
["path"],
),
],
)
def test__apply_filter_by_action_type(msg, keys):
for key in keys:
original_value = str(msg.get(key))
filtered_msg = filter_tahoe_log_message(json.dumps(msg), "1")
assert original_value not in filtered_msg
@pytest.mark.parametrize(
"msg,keys",
[
(
{
"message_type": "fni",
"task_uuid": "564c8258-e36c-4455-95f0-a8c6b1abb481",
"info": "Event('FILE_ACTION_ADDED', u'blam.tmp')",
"task_level": [1],
"timestamp": 1554305542.236,
},
["info"],
),
(
{
"message_type": "magic-folder:add-to-download-queue",
"timestamp": 1554308128.248248,
"task_level": [79, 2, 2, 2, 6],
"task_uuid": "1dbadb17-3260-46d4-9a10-6177a5309060",
"relpath": "/tmp/magic_folder_test",
},
["relpath"],
),
(
{
"message_type": "magic-folder:all-files",
"task_uuid": "3082ca20-b897-45d6-9f65-a2ed4574f2d2",
"task_level": [14, 2, 3, 2],
"timestamp": 1554305532.329,
"files": ["what1"],
},
["files"],
),
(
{
"message_type": "magic-folder:downloader:get-latest-file:collective-scan",
"task_uuid": "a331e6e8-8e07-4393-9e49-fa2e1af46fa4",
"task_level": [18, 5, 10, 2, 2],
"timestamp": 1554305538.049,
"dmds": ["Alice\u00f8", "Bob\u00f8"],
},
["dmds"],
),
(
{
"message_type": "magic-folder:item:status-change",
"relpath": "foo",
"task_level": [6, 4, 2, 2, 2, 2],
"timestamp": 1554305907.522427,
"status": "queued",
"task_uuid": "b29934a9-ec4f-44d1-b987-45a8cc0d2ba2",
},
["relpath"],
),
(
{
"message_type": "magic-folder:maybe-upload",
"relpath": "subdir/some-file",
"task_level": [10, 3, 2, 5],
"timestamp": 1554305908.534811,
"task_uuid": "19b59820-a424-4773-8fd5-e6a5f2655339",
},
["relpath"],
),
(
{
"message_type": "magic-folder:notified-object-disappeared",
"timestamp": 1554305910.549119,
"task_level": [11, 3, 2, 5],
"path": "/Users/vagrant/tahoe-lafs/_trial_temp/immutable/Test/code/clients/2g45r67f/tmp/tmpY7_3G4/local_dir/foo",
"task_uuid": "b5d0c0ee-c4d1-4765-b1ed-e7ff5f556dc5",
},
["path"],
),
(
{
"message_type": "magic-folder:remote-dmd-entry",
"pathentry": {
"ctime_ns": 1554212870771407360,
"last_downloaded_timestamp": 1554248457.008035,
"last_downloaded_uri": "URI:CHK:452hmzwvthqbsawh6e4ua4plei:6zeihsoigv7xl7ijdmyzfa7wt5rajqhj3ppmaqgxoilt4n5srszq:1:1:201576",
"last_uploaded_uri": "URI:CHK:452hmzwvthqbsawh6e4ua4plei:6zeihsoigv7xl7ijdmyzfa7wt5rajqhj3ppmaqgxoilt4n5srszq:1:1:201576",
"mtime_ns": 1554212870771407360,
"size": 201576,
"version": 0,
},
"relpath": "Garfield.jpg",
"remote_uri": "URI:CHK:452hmzwvthqbsawh6e4ua4plei:6zeihsoigv7xl7ijdmyzfa7wt5rajqhj3ppmaqgxoilt4n5srszq:1:1:201576",
"remote_version": 0,
"task_level": [3, 2, 2],
"task_uuid": "cab6c818-50d8-4759-a53a-bd0bb64a2062",
"timestamp": 1554248626.503385,
},
["pathentry", "relpath", "remote_uri"],
),
(
{
"message_type": "magic-folder:scan-batch",
"batch": ["/tmp/magic_folder_test"],
"task_level": [50, 2, 2, 3, 3],
"timestamp": 1554305971.962848,
"task_uuid": "4ac59194-cbbf-43d4-8c36-c940541b608e",
},
["batch"],
),
(
{
"message_type": "magic-folder:item:status-change",
"relpath": "Grumpy Cat.jpg",
"status": "queued",
"task_level": [2, 2, 9, 2],
"task_uuid": "c7a1ec7e-93c1-4549-b916-adc28cda73a1",
"timestamp": 1554248455.404471,
},
["relpath"],
),
(
{
"message_type": "processing",
"task_uuid": "19595202-3d20-441f-946e-d409709130d4",
"info": "Event('FILE_ACTION_MODIFIED', u'blam.tmp')",
"task_level": [1],
"timestamp": 1554305535.829,
},
["info"],
),
],
)
def test__apply_filter_by_message_type(msg, keys):
for key in keys:
original_value = str(msg.get(key))
filtered_msg = filter_tahoe_log_message(json.dumps(msg), "1")
assert original_value not in filtered_msg
| gpl-3.0 | 2,050,122,630,346,721,000 | 35.671498 | 214 | 0.472621 | false | 3.328899 | true | false | false |
gwtaylor/pyautodiff | autodiff/context.py | 2 | 55536 | import builtins
import logging
import copy
import meta
from ast import *
import types
import inspect
import numpy as np
import theano
import theano.tensor as T
import autodiff
import autodiff.utils as utils
import autodiff.functions
import collections
logger = logging.getLogger('autodiff')
# XXX FIXME This will not do - seed must be exposed.
# from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from theano.tensor.shared_randomstreams import RandomStreams
global_randomstreams = RandomStreams(seed=12345)
# seed = np.random.randint(1, 999999))
#########################
#########################
# from numba source
import linecache
import textwrap
try:
from meta.decompiler import decompile_func
except Exception as exn:
def decompile_func(*args, **kwargs):
raise Exception("Could not import Meta -- Cannot recreate source "
"from bytecode")
def fix_ast_lineno(tree):
# NOTE: A hack to fix assertion error in debug mode due to bad lineno.
# Lineno must increase monotonically for co_lnotab,
# the "line number table" to work correctly.
# This script just set all lineno to 1 and col_offset = to 0.
# This makes it impossible to do traceback, but it is not possible
# anyway since we are dynamically changing the source code.
for node in ast.walk(tree):
# only ast.expr and ast.stmt and their subclass has lineno and
# col_offset.
# if isinstance(node, ast.expr) or isinstance(node, ast.stmt):
node.lineno = 1
node.col_offset = 0
return tree
## Fixme:
## This should be changed to visit the AST and fix-up where a None object
## is present as this will likely not work for all AST.
def _fix_ast(myast):
import _ast
# Remove Pass nodes from the end of the ast
while len(myast.body) > 0 and isinstance(myast.body[-1], _ast.Pass):
del myast.body[-1]
# Add a return node at the end of the ast if not present
if len(myast.body) < 1 or not isinstance(myast.body[-1], _ast.Return):
name = _ast.Name(id='None', ctx=_ast.Load(), lineno=0, col_offset=0)
myast.body.append(Return(name))
# remove _decorator list which sometimes confuses ast visitor
try:
indx = myast._fields.index('decorator_list')
except ValueError:
return
else:
myast.decorator_list = []
def get_ast(func):
if func.__name__ == '<lambda>':
func_def = decompile_func(func)
if isinstance(func_def, Lambda):
func_def = FunctionDef(name='<lambda>',
args=func_def.args,
body=[Return(func_def.body)],
decorator_list=[])
assert isinstance(func_def, FunctionDef)
return func_def
try:
linecache.checkcache(inspect.getsourcefile(func))
source = inspect.getsource(func)
source_module = inspect.getmodule(func)
except IOError:
return decompile_func(func)
else:
# Split off decorators
# TODO: This is not quite correct, we can have comments or strings
# starting at column 0 and an indented function !
source = textwrap.dedent(source)
decorators = 0
# decorator can have multiple lines
while not source.lstrip().startswith('def'):
assert source
decorator, sep, source = source.partition('\n')
decorators += 1
source_file = getattr(source_module, '__file__', '<unknown file>')
module_ast = compile(source, source_file, "exec", PyCF_ONLY_AST, True)
lineoffset = func.__code__.co_firstlineno + decorators - 1
increment_lineno(module_ast, lineoffset)
assert len(module_ast.body) == 1
func_def = module_ast.body[0]
_fix_ast(func_def)
assert isinstance(func_def, FunctionDef)
# remove docstrings (really any unassigned strings)
for node in func_def.body:
if isinstance(node, Expr) and isinstance(node.value, Str):
func_def.body.remove(node)
return func_def
#########################
#########################
def get_source(ast):
if hasattr(ast, '__code__'):
ast = get_ast(ast)
elif isinstance(ast, collections.Callable):
ast = get_ast(ast.__call__)
return meta.asttools.dump_python_source(ast)
def print_ast(ast):
if hasattr(ast, '__code__'):
ast = get_ast(ast)
elif isinstance(ast, collections.Callable):
ast = get_ast(ast.__call__)
meta.asttools.print_ast(ast)
def print_source(ast):
if hasattr(ast, '__code__'):
ast = get_ast(ast)
elif isinstance(ast, collections.Callable):
ast = get_ast(ast.__call__)
meta.asttools.python_source(ast)
def simple_Call(func, args=None):
"""
Simple alias for building Call nodes that doesn't require specification of
keywords, kwargs or starargs.
"""
args = utils.as_seq(args)
call = Call(args=args,
func=func,
keywords=[],
kwargs=None,
starargs=None)
return call
def isvar_ast(name):
"""
Wraps a Name node in a call to utils.isvar.
"""
isvar = simple_Call(args=utils.as_seq(name),
func=Attribute(attr='isvar',
ctx=Load(),
value=Name(ctx=Load(), id='_utils__')))
return isvar
class Context(object):
def __init__(self,
borrowable=None,
force_floatX=False,
ignore=None,
infer_updates=False,
escape_on_error=False):
self.sym_vars = dict()
self.tags = dict()
# FIXME do we need to hold on to all of these itermediates?
# ensure these id's do not get recycled by garbage collection
self._nogc = []
self._top_def = None
self.infer_updates = infer_updates
self.updates = collections.OrderedDict()
self.borrowable = [id(b) for b in utils.as_seq(borrowable)]
self.force_floatX = force_floatX
self.ignore = utils.as_seq(ignore, tuple)
self.escape_on_error = escape_on_error
self.shadowed_containers = dict()
def recompile(self, f, nested=False):
"""
Accepts a function f that operates on numerical objects and
returns a function that operates on Theano objects.
nested : bool
`recompile` resets the context and sets the 'top_node' of the
function, which helps in tracing arguments. By passing nested=True,
this reset can be bypassed. This is used, for example, when
transforming nested functions. In this case, we want to use the
same context but keep it when calling recompile.
"""
transformer = TheanoTransformer(context=self)
f_ast = get_ast(f)
if not nested:
self._top_def = f_ast
self.tags.clear()
transformed_ast = fix_missing_locations(transformer.visit(f_ast))
f_globals = f.__globals__.copy()
f_globals.update(dict(_ctx__=transformer,
_functions__=autodiff.functions,
_T__=theano.tensor,
_utils__=autodiff.utils))
if f.__closure__:
f_globals.update((v, transformer.shadow(c.cell_contents))
for v, c in
zip(f.__code__.co_freevars, f.__closure__))
for name in f.__code__.co_names:
if name in f_globals.keys():
f_globals[name] = transformer.shadow(f_globals[name])
try:
new_f = meta.decompiler.compile_func(ast_node=transformed_ast,
filename='<Context-AST>',
globals=f_globals)
except SyntaxError as err:
if "'return' with argument inside generator" in err.message:
if isinstance(transformed_ast.body[-1], Return):
transformed_ast.body.pop(-1)
new_f = meta.decompiler.compile_func(
ast_node=transformed_ast,
filename='<Context-AST>',
globals=f_globals)
else:
raise
except:
raise
# add defaults, if necessary (meta erases them and won't recompile!)
if f.__defaults__:
new_f.__defaults__ = utils.clean_int_args(*f.__defaults__)[0]
# recreate method, if necessary
if isinstance(f, types.MethodType):
new_f = types.MethodType(new_f, f.__self__)
return new_f
def get_symbolic(self, x):
"""
Attempts to retrieve the symbolic version of x.
if x is an numeric object (int, float, numpy array), it must have been
traced by the context during recompiled function execution.
if x is a string, it must have been tagged with
autodiff.functions.tag().
"""
if isinstance(x, str):
if x in self.sym_vars:
return self.sym_vars[x]
elif x in self.tags:
return self.tags[x]
else:
raise ValueError(
'Requested the symbolic variable of tag `{0}`'
', but `{0}` was not tagged.'.format(x))
elif utils.isvar(x):
return x
elif id(x) in self.sym_vars:
return self.sym_vars[id(x)]
elif isinstance(x, int) and not isinstance(x, bool) and -5 <= x <= 256:
raise ValueError(
'Small integers (-5 <= x <= 256) can not be shadowed due to '
'CPython caching. Try casting the variable as a NumPy int '
'type or array before tracing: {0}'.format(x))
elif np.asarray(x).dtype == 'object':
raise ValueError(
'Requested the symbolic variable shadowing object {0}, but '
'it was not traced because it is not compatible with any '
'Theano type.'.format(x))
else:
raise ValueError(
'Requested the symbolic variable shadowing object {0}, but '
'it was not traced because it did not appear in the '
'function.'.format(x))
def reset(self):
self.sym_vars.clear()
self.tags.clear()
self._nogc = []
self._top_node = None
self.shadowed_containers.clear()
class TheanoTransformer(NodeTransformer):
def __init__(self, context):
super(TheanoTransformer, self).__init__()
self.context = context
def ast_wrap(self, method_name, args):
"""
Allows Python methods to be applied to AST nodes at runtime.
`method_name` is a method of the TheanoTransformer class that accepts
Python objects as arguments.
`args` are the AST nodes representing the arguments for `method_name`
(not including `self`!).
ast_wrap returns an `ast.Call()` node which calls the method on the
specified arguments at runtime.
"""
wrapped = simple_Call(func=Attribute(attr=method_name,
ctx=Load(),
value=Name(ctx=Load(),
id='_ctx__')),
args=args)
return wrapped
# ** --------------------------------------------------------
# ** Direct Manipulation (Methods)
def shadow(self, args):
"""
Helper function for `_shadow` that calls it on a flattened version of
its argument.
"""
shadow_vars = [self._shadow_inner(x) for x in utils.flatten(args)]
new_args = utils.unflatten(args, shadow_vars)
if isinstance(new_args, (list, dict, tuple, set)):
self.context.shadowed_containers[id(new_args)] = args
# add to _nogc to ensure that the id won't be reused
self.context._nogc.append(new_args)
return new_args
def _shadow_inner(self, x):
"""
Given a numerical variable x, return an equivalent Theano shared
variable and store the relationship in self.sym_vars. Otherwise return
x.
"""
# try checking if x is ignored (will fail for NumPy arrays)
try:
if x in self.context.ignore:
return x
except:
pass
# skip Python builtins and ignored id's
if (id(x) in self.context.ignore
or x is None
or isinstance(x, (str, bool))):
return x
# skip ignored types
elif isinstance(x,
tuple(i for i in self.context.ignore if isinstance(i, type))):
return x
# transform compatible numeric values into Theano variables
elif isinstance(x, (int, float, np.number, np.ndarray)):
# take special care with small ints, because CPython caches them.
if isinstance(x, int) and -5 <= x <= 256:
x = np.int_(x)
if getattr(x, 'dtype', None) == bool:
logger.info('Note: Theano has no bool type; '
'upcasting bool to int8.')
x = x.astype('int8')
if id(x) not in self.context.sym_vars:
# store id because x will be changed if force_floatX is True
id_x = id(x)
# add to _nogc to ensure that the id won't be reused
self.context._nogc.append(x)
# check if symbolic variable should be copied or borrowed
borrow = id_x in self.context.borrowable
# cast x if requested
if self.context.force_floatX:
x = np.array(x, dtype=theano.config.floatX)
# create symbolic version
try:
sym_x = theano.shared(x, borrow=borrow)
except:
sym_x = theano.shared(x)
# store symbolic version
self.context.sym_vars[id_x] = sym_x
# return symbolic version
return sym_x
else:
return self.context.sym_vars[id(x)]
else:
return x
# ==================================================
# ==================================================
#
# Runtime Modifications
#
# ==================================================
# ==================================================
@staticmethod
def handle_escape(x):
"""
Handles escaping variables
"""
def escape(x):
if isinstance(x, theano.tensor.sharedvar.SharedVariable):
return x.get_value()
elif utils.isvar(x):
try:
return x.eval()
except Exception as e:
raise ValueError(
'Could not escape {}. \nThe following error was '
'raised when trying to call eval():\n{}'.format(x, e))
else:
return x
return utils.unflatten(x, [escape(i) for i in utils.flatten(x)])
def handle_int(self, x, escape=False):
if escape:
x = self.handle_escape(x)
if utils.isvar(x) and x.ndim == 0 and 'float' in x.dtype:
return x.astype('int64')
elif np.asarray(x).ndim == 0 and np.asarray(x).dtype.kind == 'f':
return int(x)
else:
return x
def handle_assign_updates(self, args):
target, value = args
self.shadow(target)
if id(target) in self.context.sym_vars and utils.isvar(value):
target_var = self.context.sym_vars[id(target)]
self.context.updates[target_var] = value
elif (isinstance(target, T.sharedvar.SharedVariable)
and target in self.context.sym_vars.values()
and utils.isvar(value)):
self.context.updates[target] = value
return value
def handle_escaped_call(self, fn, *args, **kwargs):
esc_args = utils.unflatten(
args, [TheanoTransformer.handle_escape(a) for a in utils.flatten(args)])
esc_kwargs = utils.unflatten(
kwargs, [TheanoTransformer.handle_escape(a) for a in utils.flatten(kwargs)])
escaped_result = fn(*esc_args, **esc_kwargs)
return self.shadow(escaped_result)
def handle_subscript(self, x):
"""
Theano doesn't have a bool type, but we can track certain variables
that we know must be boolean and possibly use that information (for
advanced indexing, for example).
We also cast non-integer scalar indices to ints (they may be coerced
to floats by the force_floatX option, for example).
"""
if isinstance(x, (list, tuple)):
# check for namedtuples, which need their __new__ args expanded
if hasattr(x, '_fields'):
return type(x)(*[self._handle_subscript_inner(xi) for xi in x])
else:
return type(x)(self._handle_subscript_inner(xi) for xi in x)
else:
return self._handle_subscript_inner(x)
def _handle_subscript_inner(self, x):
if utils.isvar(x):
if x.ndim > 0 and x.dtype == 'int8':
return x.nonzero()
elif x.ndim == 0 and 'int' not in x.dtype:
return x.astype('int64')
else:
return x
else:
return x
def handle_tag(self, obj, tag):
if not isinstance(tag, str):
raise ValueError('Tag must be a string. Received: {0}'.format(tag))
if tag in self.context.tags:
logger.warning(
'{0} was tagged as {1}, but the tag {1} was already '
'assigned. Note that the new tag will overwrite '
'the old one.'.format(obj, tag))
else:
self.context.tags[tag] = obj
if utils.isvar(obj):
obj.name = tag
return obj
def handle_tag_function_arg(self, obj, tag):
"""
A version of tagging called only by visit_FunctionDef, which tags
top-level function arguments and stores the tags in sym_vars. These
tags can not be overwritten.
"""
self.context.sym_vars[tag] = obj
if utils.isvar(obj):
obj.name = tag
def handle_functions(self, func):
"""
Given some function for, return another function.
Generally used to exchange NumPy functions for Theano equivalents.
"""
# ** ======================= first handle functions defined here!
if getattr(func, '__module__', None) == __name__:
return func
if func in self.context.ignore:
return func
# ** ======================= special autodiff functions
elif func is autodiff.functions.escape:
# escapes a variable from Tensor representation
return self.handle_escape
elif func is autodiff.functions.escaped_call:
# call a function on escaped arguments without transforming the AST
return self.handle_escaped_call
elif func is autodiff.functions.tag:
# tag a variable
return self.handle_tag
elif func is autodiff.functions.shadow:
return self.shadow
# ** ======================= autodiff classes
elif isinstance(func, autodiff.symbolic.Symbolic):
return func.symfn
# ** ======================= __theano_op__
elif hasattr(func, '__theano_op__'):
return func.__theano_op__
# ** ======================= array methods (with tensor instances)
elif utils.isvar(getattr(func, '__self__', None)):
return self.handle_methods(func.__self__, func.__name__)
# ** ======================= Theano function
elif (getattr(func, '__module__', None) and
getattr(func, '__module__', '').startswith('theano')):
return func
elif isinstance(func, T.elemwise.Elemwise):
return func
# ** ======================= type/casting functions and new builtins
elif type(func) is type:
# range
if func is range:
def range_(*args):
int_args = (self.handle_int(a, escape=True) for a in args)
return func(*int_args)
return range_
# zip
elif func is zip:
def zip_(*args):
if any(utils.isvar(a) for a in args):
raise TypeError(
'Called zip() on Tensor but Tensors '
'do not support iteration. Maybe try escaping '
'the tensor?')
else:
return zip(*args)
return zip_
# casts
elif func in(bool, np.bool_, np.bool8):
logger.info('Warning: Theano has no bool type; '
'upgrading to int8.')
def bool_(x):
return T.neq(x, 0)
return bool_
elif func.__name__ in T.basic._cast_mapping.keys():
def cast(x):
return T.cast(x, dtype=func.__name__)
return cast
elif func is float:
def float_(x):
return T.cast(x, dtype=theano.config.floatX)
return float_
elif func is int:
def int_(x):
return T.cast(x, dtype='int' + theano.config.floatX[-2:])
return int_
# enumerate
elif func is enumerate:
def enumerate_(iterable, start=0):
if utils.isvar(iterable):
raise TypeError(
'Called enumerate() on Tensor {0} but Tensors '
'do not support iteration. Maybe try escaping '
'the tensor?'.format(iterable))
else:
return enumerate(iterable, start=start)
return enumerate_
# any other builtin function (tuple, list, set, Exception)
elif func in builtins.__dict__.values():
return func
else:
def new_type(*args, **kwargs):
try:
return self.shadow(func(*args, **kwargs))
except:
raise ValueError('Unsupported type: {0}'.format(func))
return new_type
# ** ======================= numpy functions
elif (inspect.getmodule(func) is np
or (getattr(func, '__module__', None)
and getattr(func, '__module__').startswith('numpy'))
or isinstance(func, np.ufunc)
or func in (min, max)):
# abs
if func in (np.abs, np.absolute):
return abs
# ones/zeros
# FIXME submitted a PR to Theano to make syntax more
# like Numpy; this change shouldn't be needed afterward.
elif func in (np.ones, np.zeros):
def alloc(shp, dtype=None):
if (not isinstance(shp, (list, tuple))
and not utils.isvar(shp)):
shp = [shp]
return getattr(T, func.__name__)(shp, dtype)
return alloc
# handle asarray
elif func is np.asarray:
def _asarray(x):
if not utils.isvar(x):
return np.asarray(x)
else:
return x
return _asarray
# atleast_1d
elif func is np.atleast_1d:
def _atleast_1d(x):
if x.ndim == 0:
return x.dimshuffle('x')
else:
return x
return _atleast_1d
# atleast_2d
elif func is np.atleast_2d:
def _atleast_2d(x):
if x.ndim == 0:
return x.dimshuffle('x', 'x')
elif x.ndim == 1:
return x.dimshuffle('x', 0)
else:
return x
return _atleast_2d
# atleast_3d
elif func is np.atleast_3d:
def _atleast_3d(x):
if x.ndim == 0:
return x.dimshuffle('x', 'x', 'x')
elif x.ndim == 1:
return x.dimshuffle('x', 'x', 0)
elif x.ndim == 2:
return x.dimshuffle('x', 0, 1)
else:
return x
return _atleast_3d
# reshape
elif func is np.reshape:
def _reshape(*args, **kwargs):
callargs = inspect.getcallargs(T.reshape, *args, **kwargs)
x, newshape = callargs['x'], callargs['newshape']
if isinstance(newshape, (list, tuple)):
newshape = [self.handle_int(s) for s in newshape]
else:
newshape = self.handle_int(newshape)
return T.reshape(x, newshape)
return _reshape
# vstack
elif func is np.vstack:
def _vstack(tup):
return T.vertical_stack(*tup)
return _vstack
# hstack
elif func is np.hstack:
def _hstack(tup):
return T.horizontal_stack(*tup)
return _hstack
# transpose
elif func is np.transpose:
def _transpose(a, axes=None):
if axes is not None:
axes = [self.handle_int(a, escape=True) for a in axes]
return T.transpose(x=a, axes=axes)
return _transpose
# functions taking axis as an argument -- make sure to escape it
elif func in (np.argmax,
np.argmin,
np.argsort,
np.concatenate,
np.max,
np.mean,
np.min,
np.prod,
np.std,
np.sum,
np.var):
def reduce_(*args, **kwargs):
func_name = func.__name__
if func_name == 'amax':
func_name = 'max'
elif func_name == 'amin':
func_name = 'min'
theano_func = getattr(T, func_name)
if 'axis' in kwargs:
kwargs['axis'] = self.handle_int(
kwargs['axis'], escape=True)
elif len(args) >= 2:
args = list(args)
args[1] = self.handle_int(args[1], escape=True)
# sometimes Theano uses 'a', sometimes it uses 'x'
if func not in (np.concatenate,):
np_first_arg = inspect.getargspec(func).args[0]
t_first_arg = inspect.getargspec(theano_func).args[0]
if np_first_arg in kwargs:
if np_first_arg != t_first_arg:
kwargs[t_first_arg] = kwargs.pop(np_first_arg)
return theano_func(*args, **kwargs)
return reduce_
# get equivalent Theano function
elif hasattr(T, func.__name__):
return getattr(T, func.__name__)
else:
raise ValueError(
'Autodiff unsupported function: {0}'.format(func))
# ** ======================= ignore the inspect module
elif inspect.getmodule(func) is inspect:
return func
# ** ======================= built-ins
elif '<built-in' in str(func):
# def escaped_random(*args, **kwargs):
# return self.handle_escaped_call(func, *args, **kwargs)
# return escaped_random
def handle_size(size):
if not utils.isvar(size):
if not isinstance(size, (list, tuple)):
size = [size]
size = [self.handle_int(s) for s in size]
else:
if size.ndim == 0:
size = size.dimshuffle('x')
size = size.astype('int64')
return size
# uniform random numbers (np.random.uniform)
if func is np.random.uniform:
def rand_u(low=0.0, high=1.0, size=1):
size = handle_size(size)
return global_randomstreams.uniform(low=low,
high=high,
size=size)
return rand_u
# standard uniform random numbers (np.random.random, np.random.rand)
elif func in (np.random.random, np.random.rand):
def rand_u(size):
size = handle_size(size)
return global_randomstreams.uniform(size=size)
return rand_u
# normal random numbers (np.random.normal)
elif func is np.random.normal:
def rand_n(loc=0.0, scale=1.0, size=1):
size = handle_size(size)
return global_randomstreams.normal(avg=loc,
std=scale,
size=size)
return rand_n
# standard normal random numbers (np.random.randn)
elif func is np.random.randn:
def rand_n(*size):
size = [self.handle_int(s) for s in size]
return global_randomstreams.normal(size=size)
return rand_n
# binomial random numbers (np.random.binomial)
elif func is np.random.binomial:
def rand_b(n, p, size=1):
size = handle_size(size)
return global_randomstreams.binomial(n=n, p=p, size=size)
return rand_b
# isinstance
elif func is isinstance:
def isinstance_(obj, types):
# if self.context.force_floatX:
# if int in utils.as_seq(types):
# logger.debug(
# 'You are trying to check for ints but '
# 'force_floatX is True, so the check may fail. '
# 'Consider escaping the call.')
escaped_obj = self.handle_escape(obj)
if (isinstance(escaped_obj, (np.ndarray, np.number))
and obj.ndim == 0):
escaped_obj = np.asscalar(escaped_obj)
return isinstance(escaped_obj, self.handle_escape(types))
return isinstance_
# inplace list methods
elif isinstance(
getattr(func, '__self__', None), (list, dict, set, tuple)):
def _inplace(*args):
# check if the container is shadowing a different one
if id(func.__self__) in self.context.shadowed_containers:
c = self.context.shadowed_containers[id(func.__self__)]
tmp = getattr(c, func.__name__)(*args)
if tmp is None:
return c
else:
return tmp
else:
return func(*args)
return _inplace
# anything else
else:
return func
# ** ======================= A bound method not covered yet
# elif isinstance(func, types.MethodType):
# return func
# ** ======================= Misc
elif (('ipdb' in (getattr(func, '__module__', '') or [])
or 'pdb' in (getattr(func, '__module__', '') or []))
and func.__name__ == 'set_trace'):
return func
# ** ======================= Special handling for OrderedDict views
elif func in (collections.abc.ValuesView,
collections.abc.KeysView,
collections.abc.ItemsView):
return func
# ** ======================= Anything else
else:
try:
return self.context.recompile(func, nested=True)
except Exception as err:
if self.context.escape_on_error:
logger.warning(
'Error when recompiling {0}. Calling escaped version '
'because escape_on_error is True.'.format(func))
def escapedfunc(*args, **kwargs):
return self.handle_escaped_call(func, *args, **kwargs)
return escapedfunc
else:
raise ValueError(
'Unsupported function: {}. The following error was '
'raised: {}'.format(func, err))
# ** ======================= Catchall (shouldn't be called)
raise ValueError(
'handle_functions: No case matched function {0}. Something is '
'wrong -- should not reach this point!'.format(func))
def handle_methods(self, var, method_name):
"""
This method is called whenever:
1. An array method is requested that doesn't exist for Theano
variables (like _.swapaxes()). `handle_methods` is used
to supply a replacement method. Note that in this case,
`handle_methods` is called directly.
2. A method is requested that DOES exist for Theano variables. In
this case, `handle_methods` is called by
`handle_functions` prior to calling the method.
`handle_methods` is used to supply a replacement function
that properly handles the supplied arguments (since they are
compliant with the Numpy signature, not the Theano one).
"""
# if we're not dealing with a Theano variable, nothing to do here.
if not utils.isvar(var):
return getattr(var, method_name)
# ** ======================= Reshape
# Theano's reshape requires dim to be in a collection, unlike Numpy.
if method_name == 'reshape':
def reshape(*args, **kwargs):
if 'shape' in kwargs:
args = [kwargs.pop('shape')] + list(args)
if args:
if not isinstance(args[0], (list, tuple)):
args = [args]
else:
args = ((),)
# Theano doesn't handle (), as an arg, which NumPy interprets
# as casting length-1 vectors to scalars
if args == ((),):
if var.ndim > 1:
raise ValueError(
'Reshape with `()` as an arg can only be used '
'with vectors of length 1.')
return var[0]
else:
if args:
args = [self.handle_int(a) for a in args[0]]
if len(args) > 1:
args = [args]
return var.reshape(*args, **kwargs)
return reshape
# ** ======================= repeat
elif method_name == 'repeat':
def repeat(repeats, axis=None):
if isinstance(repeats, (list, tuple)):
repeats = [self.handle_int(r) for r in repeats]
else:
repeats = self.handle_int(repeats)
axis = self.handle_int(axis, escape=True)
return var.repeat(repeats, axis)
return repeat
# ** ======================= swapaxes
# Theano has no swapaxes method
elif method_name == 'swapaxes':
def swapaxes(*args, **kwargs):
axis1, axis2 = (int(self.handle_escape(a)) for a in args)
dims = list(range(var.ndim))
dims[axis1], dims[axis2] = dims[axis2], dims[axis1]
return var.dimshuffle(*dims)
return swapaxes
# ** ======================= astype
# Theano doesn't process numpy dtype objects or 'bool'
elif method_name == 'astype':
def astype(*args, **kwargs):
dtype = kwargs.pop('dtype', None)
if not dtype:
dtype = args[0]
if not isinstance(dtype, str):
# get numpy dtype objects like np.float32
try:
dtype = dtype.__name__
except:
raise NotImplementedError(
'Unsupported dtype: {0}'.format(dtype))
if 'bool' in dtype:
dtype = 'int8'
logger.info('Warning: Theano has no bool type; '
'upgrading to int8.')
return var.astype(dtype)
return astype
# ** ======================= sort
elif method_name == 'sort':
def sort_(*args, **kwargs):
raise ValueError(
'Calling an array\'s `sort()` method is not supported '
'because in NumPy it is an inplace operation, but in '
'Theano it is not. Please use numpy.sort() instead.')
return sort_
# ** ======================= reductions
elif method_name in ('argmax',
'argmin',
'argsort',
'concatenate',
'max',
'mean',
'min',
'norm',
'prod',
'std',
'sum',
'var'):
def reduce_(*args, **kwargs):
method = getattr(var, method_name)
all_args = inspect.getcallargs(method, *args, **kwargs)
for k, v in list(all_args.items()):
if v is method.__self__:
all_args.pop(k)
all_args['axis'] = self.handle_escape(all_args['axis'])
if all_args['axis'] is not None:
all_args['axis'] = int(all_args['axis'])
return method(**all_args)
return reduce_
# ** ======================= anything else
# ...Otherwise, try to access the method on the Theano variable
else:
return getattr(var, method_name)
def handle_comparison(self, operator, left, right):
"""
This method is called whenever an operator is encountered with a single
rhs comparator, since tensors do not properly them.
"""
if utils.isvar(left) or utils.isvar(right):
return getattr(T, operator)(left, right)
elif operator == 'gt':
return left > right
elif operator == 'ge':
return left >= right
elif operator == 'lt':
return left < right
elif operator == 'le':
return left <= right
elif operator == 'eq':
return left == right
elif operator == 'neq':
return left != right
else:
# shouldn't ever reach here!
raise ValueError(
'Not sure how to handle operator: {0}'.format(operator))
# ** --------------------------------------------------------
# ** AST Manipulation (Node Visitors)
def insert_breakpoint(self, _):
import ipdb; ipdb.set_trace()
def visit_Assign_with_updates(self, node):
"""
Given an assignment, attempt to infer a symbolic update from the
target and value.
"""
load_targets = copy.deepcopy(node.targets)
value = node.value
for t in load_targets:
load_transformer.generic_visit(t)
node_with_updates = copy.deepcopy(node)
node_with_updates.value = self.ast_wrap(
'handle_assign_updates', List(
ctx=Load(),
elts=load_targets + [value]))
body=[node_with_updates]
# wrap this in a try because if this is the first time a variable
# is being assigned, then load_targets will try to reference
# a nonexistant variable!
return Try(
body=body,
handlers=[ExceptHandler(body=[node])],
finalbody=[],
orelse=[])
def visit_Assign(self, node):
"""
Applies the following transformations:
- Transform subscripts. Tensor variables do not support inplace
assignment, so subscript assigns must be changed to call the
`set_subtensor` function.
Statements of the form:
x[a:b][c] = y
Become:
if utils.isvar(x):
x = T.set_subtensor(x[a:b], T.set_subtensor(x[a:b][c], y))
else:
x[a:b][c] = y
"""
# TODO
# AugAssigns with unbounded subscripts decompile strangely and can't
# be recompiled. Specifically, they decompile as an Assign to a target
# with a value that is an AugAssign of the same target and the true
# value. To get around this, we just take the AugAssign (which appears
# to be correct) and replace the Assign with it.
# This is the syntax that creates the weird AST:
# a[:b] += c
# if isinstance(node.value, AugAssign):
# return self.visit_AugAssign(node.value)
# handle subscripted assignment for tensor variables
if isinstance(node.targets[0], Subscript):
# helper function to transform subscript into (possibly nested)
# T.set_subtensor statements
def build_subt(subscript, value):
subscript_load = Subscript(ctx=Load(),
slice=subscript.slice,
value=subscript.value)
set_subtensor = simple_Call(
args=[subscript_load, value],
func=Attribute(attr='set_subtensor',
ctx=Load(),
value=Name(ctx=Load(), id='_T__')))
if isinstance(subscript.value, Subscript):
set_subtensor = build_subt(subscript.value, set_subtensor)
return set_subtensor
# get root tensor; check for nested subscripts
tensor = node.targets[0]
while not isinstance(tensor, Name):
try:
tensor = tensor.value
except:
break
if isinstance(tensor, Name):
# transform subscript into set_subtensor
if isinstance(node.value, AugAssign):
value = BinOp(op=node.value.op,
left=node.targets[0],
right=node.value.value)
else:
value = node.value
set_subt = build_subt(subscript=node.targets[0], value=value)
# wrap set_subtensor statements in Assign to root tensor
assign_subtensor = Assign(targets=[Name(ctx=Store(),
id=tensor.id)],
value=set_subt)
# wrap assign_subtensor in If to ensure that the modification
# is only applied to tensor args
self.generic_visit(node.value)
if self.context.infer_updates:
node = self.visit_Assign_with_updates(node)
return If(test=isvar_ast(tensor),
body=[assign_subtensor],
orelse=[node])
else:
self.generic_visit(node)
else:
self.generic_visit(node)
if self.context.infer_updates:
return self.visit_Assign_with_updates(node)
else:
return node
# ==================================================
# ==================================================
#
# AST Modifications
#
# ==================================================
# ==================================================
def visit_Attribute(self, node):
"""
When dealing with an attribute, first see if the object has that
attribute and return it. If not, call the handle_methods method.
"""
self.generic_visit(node)
if isinstance(node.ctx, Store):
return node
else:
new_node = simple_Call(
args=[node.value,
Str(s=node.attr),
self.ast_wrap('handle_methods',
[node.value, Str(s=node.attr)])],
func=Name(ctx=Load(), id='getattr'))
return self.ast_wrap('shadow', new_node)
def visit_AugAssign(self, node):
"""
See documentation for self.visit_Assign() for information on
transformations applied here.
"""
#transform into assign
load_target = load_transformer.generic_visit(copy.deepcopy(node.target))
value = BinOp(op=node.op,
left=self.ast_wrap('shadow', load_target),
right=node.value)
new_node = Assign(targets=[node.target],
value=value)
return self.visit_Assign(new_node)
def visit_Call(self, node):
"""
Whenever a function is called, first pass it to the 'handle_functions'
method. This method examines the function and modifies it prior to
calling it. For example, it might replace `numpy.ones` with
`theano.ones`.
"""
self.generic_visit(node)
node.func = self.ast_wrap('handle_functions', node.func)
# the * and ** syntax won't work if an object has been shadowed...
# if node.starargs:
# node.starargs = self.ast_wrap('handle_shadow_class', node.starargs)
# if node.kwargs:
# node.kwargs = self.ast_wrap('handle_shadow_class', node.kwargs)
return node
def visit_ClassDef(self, node):
return node
def visit_Compare(self, node):
"""
Replaces comparison operators with Theano functions, if either argument
is a tensor variable. Prior to NumPy 1.8, this is required for all
comparisons where the NumPy array is on the left; thereafter it is
required only for == and !=.
Given:
x == y
Becomes:
_ctx__.handle_comparison('eq', x, y)
Which internally performs:
if utils.isvar(x) or utils.isvar(y):
T.eq(x, y)
else:
x == y
This could be done by directly replacing the literal comparison with
the `if` clause, but this wouldn't be compatible with all code. For
example, if the comparison takes place in an `if` clause, the new
(and nested) `if` clause would be illegal syntax. Wrapping the `isvar`
check in a function call means the syntax remains compatible.
"""
self.generic_visit(node)
if isinstance(node.ops[0], Eq):
theano_op = Str(s='eq')
elif isinstance(node.ops[0], NotEq):
theano_op = Str(s='neq')
elif isinstance(node.ops[0], Gt):
theano_op = Str(s='gt')
elif isinstance(node.ops[0], GtE):
theano_op = Str(s='ge')
elif isinstance(node.ops[0], Lt):
theano_op = Str(s='lt')
elif isinstance(node.ops[0], LtE):
theano_op = Str(s='le')
else:
# Is, IsNot, In, NotIn
return node
if len(node.comparators) == 1:
return self.ast_wrap('handle_comparison',
[theano_op, node.left, node.comparators[0]])
else:
return node
def visit_FunctionDef(self, node):
"""
When a function is defined, shadow each of its arguments immediately.
The AST is modified so that a function defined as:
def f(a, b=None, *c, **d):
...
is changed via this method to:
def f(a, b=None, *c, **d):
a = self.shadow(a)
b = self.shadow(b)
c = self.shadow(c)
d = self.shadow(d)
tag(a, 'a')
tag(b, 'b')
for k, v in d.items():
tag(v, k)
...
This way, any future references to these variables will access their
shadowed values. This is important because inplace modifications do
not always force the `shadow` method to get called, and so the inplace
changes might not be reflected the next (and first!) time the variable
is loaded.
"""
self.generic_visit(node)
assigns = []
tags = []
# shadow and tag args
for param in node.args.args:
assigns.append(Assign(
targets=[Name(ctx=Store(), id=param.arg)],
value=self.ast_wrap('shadow', Name(ctx=Load(), id=param.arg))))
tags.append(Expr(value=self.ast_wrap(
method_name='handle_tag_function_arg',
args=[Name(ctx=Load(), id=param.arg), Str(s=param.arg)])))
# shadow the varargs
if node.args.vararg:
if isinstance(node.args.vararg, str):
node.args.vararg = arg(annotation=None, arg=node.args.vararg)
assigns.append(Assign(
targets=[Name(ctx=Store(), id=node.args.vararg.arg)],
value=self.ast_wrap('shadow', Name(ctx=Load(),
id=node.args.vararg.arg))))
# shadow and tag the kwargs
if node.args.kwarg:
if isinstance(node.args.kwarg, str):
node.args.kwarg = arg(annotation=None, arg=node.args.kwarg)
assigns.append(Assign(
targets=[Name(ctx=Store(), id=node.args.kwarg.arg)],
value=self.ast_wrap('shadow', Name(ctx=Load(),
id=node.args.kwarg.arg))))
tags.append(For(
body=[Expr(value=self.ast_wrap(
method_name='handle_tag_function_arg',
args=[Name(ctx=Load(), id='v'),
Name(ctx=Load(), id='k')]))],
iter=simple_Call(
func=Attribute(attr='items',
ctx=Load(),
value=Name(ctx=Load(),
id=node.args.kwarg.arg))),
orelse=[],
target=Tuple(ctx=Store(), elts=[Name(ctx=Store(), id='k'),
Name(ctx=Store(), id='v')])))
if node is self.context._top_def:
node.body = assigns + tags + node.body
self.context._top_def = None
else:
node.body = assigns + node.body
return node
def visit_If(self, node):
"""
Transform this:
if <statement>:
...
else:
...
to this:
if escape(<statement>):
...
else:
...
This means that the if statement's test clause will be evaluated at
runtime. Note that this does NOT carry over to the compiled Theano
code. It just protects against the following case:
if x:
<do something>
If x is a shadowed variable, then it always resolves to True. However,
x could have a value of 0, in which case this shouldn't pass. Escaping
x resolves it when the function is called.
"""
self.generic_visit(node)
node.test = self.ast_wrap('handle_escape', node.test)
return node
def visit_Subscript(self, node):
"""
Theano does not have a bool dtype, and therefore does not support
Numpy's advanced indexing with boolean masks. For example, the
following is interpreted as requested many items at the indices 1 and
0, not as a boolean mask:
x[x > 0.5]
It is possible to replicate the boolean mask behavior in Theano with
the following construction:
x[(x > 0.5).nonzero()]
tensor.nonzero() returns a tuple of indices corresponding to the
nonzero elements. Thus, this properly selects the desired elements but
is not compatible with Numpy comparisons anywhere else.
To resolve this, if a Theano 'int8' subscript or index is requested,
it is treated as a boolean mask and wrapped in a nonzero() call.
NOTE THIS DOESN'T HANDLE ALL CASES
"""
self.generic_visit(node)
if isinstance(node.slice, Index):
node.slice = Index(value=self.ast_wrap('handle_subscript',
node.slice.value))
return node
def visit_Name(self, node):
"""
Whenever a literal variable name is loaded, call the
'shadow' method on its value.
"""
# self.generic_visit(node)
if isinstance(node.ctx, Load):
node = self.ast_wrap('shadow', node)
return node
class LoadTransformer(NodeTransformer):
def generic_visit(self, node):
node = super(LoadTransformer, self).generic_visit(node)
if hasattr(node, 'ctx'):
if isinstance(node.ctx, Store):
node.ctx = Load()
return node
load_transformer = LoadTransformer() | bsd-3-clause | -2,614,805,434,270,758,400 | 35.877158 | 88 | 0.493572 | false | 4.650477 | false | false | false |
Froskekongen/oslodatascience-rl | havakv/havakv_atari.py | 1 | 24577 | import numpy as np
import gym
from keras.layers import Conv2D, Dense, Input, Flatten
from keras.models import Model, load_model
from keras.optimizers import RMSprop
from keras.utils.np_utils import to_categorical
import keras.backend as K
from common import LogPong
from skimage.color import rgb2gray
from skimage.transform import resize
class Game(object):
'''Class for playing an atari game.'''
def __init__(self, gameName, agent, render=False, logfile=None):
self.gameName = gameName
self.agent = agent
self.render = render
self.logfile = logfile
self.logger = LogPong(self.logfile) if self.logfile is not None else None
def _resetEpisode(self):
self.rewardSum = 0
self.episode += 1
observation = self.env.reset()
return observation
def play(self):
'''Play the game'''
self.setupGame()
while True:
self.step()
def setupGame(self):
self.env = gym.make(self.gameName)
self.episode = 0 # becomes 1 when we start
self.observation = self._resetEpisode()
def step(self):
'''Step one frame in game.
Need to run setupGame before we can step.
'''
if self.render: self.env.render()
action = self.agent.drawAction(self.observation)
# step the environment and get new measurements
self.observation, reward, done, info = self.env.step(action)
self.rewardSum += reward
self.agent.update(reward, done, info)
if done: # an episode has finished
print('ep %d: reward total was %f.' % (self.episode, self.rewardSum))
if self.logger is not None:
self.logger.log(self.episode, self.rewardSum) # log progress
self.observation = self._resetEpisode()
# class _GameSingleForMultiple(Game):
# '''This class is similar to the Game class, but it used for playing multiple games.
# It is created to be used with MultiGames.
# '''
# def step(self):
# self.observation, reward, done, info = self.env.step(action)
# self.rewardSum += reward
# def step(self):
# '''Step one frame in game.
# Need to run setupGame before we can step.
# '''
# raise NotImplementedError
# action = self.agent.drawAction(self.observation)
# # step the environment and get new measurements
# self.observation, reward, done, info = self.env.step(action)
# self.rewardSum += reward
# self.agent.update(reward, done, info)
# if done: # an episode has finished
# print('ep %d: reward total was %f.' % (self.episode, self.rewardSum))
# if self.logger is not None:
# self.logger.log(self.episode, self.rewardSum) # log progress
# self.observation = self._resetEpisode()
# class MultiGames(Game):
# '''Play multiple games with a single agent.'''
# def __init__(self, gameName, nbReplicates, agent, render=False, logfile=None):
# super().__init__(gameName, agent, render, logfile)
# self.nbReplicates = nbReplicates
# def setuptGame(self):
# raise NotImplementedError('This function is not used for multiple games')
# def setupGames(self):
# self.envs = [gym.make(self.gameName) for _ in range(nbReplicates)]
# class GameReplicates(object):
# '''Play multiple replicates of the same game but NOT parallelized.
# nbReplicates: Number of replicates.
# gameName: The name of the game.
# agents: A MultipleAgents object holding all agents.
# logfile:
# '''
# def __init__(self, nbReplicates, gameName, agents, logfile=None):
# self.nbReplicates = nbReplicates
# self.gameName = gameName
# self.agents = agents
# self.logfile = logfile
# self.logger = LogPong(self.logfile) if self.logfile is not None else None
# def setupGames(self):
# # Only one game are used for logging
# self.games = [Game(self.gameName, self.agents.mainAgent, False, self.logfile)]
# for agent in self.agents.workerAgents:
# self.games.append(Game(self.gameName, agent, False, None))
# for game in self.games:
# game.setupGame()
# def step(self):
# '''Step through all games.'''
# for game in self.games:
# if not game.agent.done: game.step()
# def play(self):
# '''Play all games.'''
# self.setupGames()
# while True:
# self.step()
# # if all games are done
# if False not in [game.agent.done for game in self.games]:
# self.agents.updateAgents()
# class MultipleAgents(object):
# ''''Does nothing, but can possibly be used for distributed agents...
# The first agent will be used for updating the model, and the model will be sent
# to the others.
# '''
# def __init__(self, nbReplicates, agentClass, **kwargsAgent):
# self.nbReplicates = nbReplicates
# self.agentClass = agentClass
# self.kwargsAgent = kwargsAgent
# raise NotImplementedError('Does nothing, but can possibly be used for distributed agents...')
# @property
# def workerAgents(self):
# return self.agents[1:]
# @property
# def mainAgent(self):
# return self.agents[0]
# def setupAgents(self):
# self.agents = [self.agentClass(**self.kwargsAgent) for _ in self.nbReplicates]
# self.mainAgent.setupModel()
# self.distributeModelToWorkers()
# def updateAgents(self):
# '''Update the model in the agents.'''
# self.collectExperiences()
# self.updateModelMainAgent()
# self.distributeModelToWorkers()
# self.resetExperiences()
# raise NotImplementedError('Need to reset agents!!!!!!!!')
# raise NotImplementedError('Need to take into account that we store last observatoin in observation list, making it longer.')
# raise NotImplementedError('Set rewards if game not done')
# def resetExperiences():
# '''Reset experiences of the agents'''
# raise NotImplementedError
# def collectExperiences(self):
# for agent in self.workerAgents:
# self.mainAgent.appendExperiences(agent.getExperiences())
# def updateModelMainAgent(self):
# '''Perform the update of the model in the mainAgent.'''
# self.mainAgent.updateModel()
# def distributeModelToWorkers(self):
# '''Send the main model to the worker agents.'''
# for agent in self.workerAgents:
# agent.model = self.mainAgent.model
class Agent(object):
'''Abstract class for an agent.
An Agent should implement:
- model: typically a keras model object.
- update: update agent after every response (handle response form env. and call updataModel method).
- preprocess: preprocess observation from environment. Called by drawAction.
- policy: give an action based on predictions.
- updateModel: update the model object.
'''
model = NotImplemented # object for holding the model.
def __init__(self):
self.resetMemory()
def update(self, reward, done, info):
'''Is called to receive the feedback from the environment.
It has three tasks:
- store relevant feedback
- update model if appropriate
- handle end of game (e.g. reset some states)
'''
raise NotImplementedError
def preprocess(self, observation):
'''Preprocess observation, and typically store in states list'''
raise NotImplementedError
def policy(self, pred):
'''Returns an action based on given predictions.'''
raise NotImplementedError
def updateModel(self):
'''Should do all work with updating weights.'''
raise NotImplementedError
def setupModel(self):
'''Function for setting up the self.model object'''
raise NotImplementedError
def resetExperiences(self):
'''Resetting agent after updating the model.'''
raise NotImplementedError
def resetMemory(self):
'''Resets actions, states, and rewards.'''
self.actions = []
self.states= []
self.rewards = []
def currentState(self):
'''Returns the latest state.'''
return self.states[-1]
def drawAction(self, observation):
'''Draw an action based on the new observation.'''
self.preprocess(observation)
pred = self.predict(self.currentState())
action = self.policy(pred)
self.actions.append(action)
return action
def predict(self, states):
'''Returns predictions based on give states.'''
return self.model.predict(states)
def getExperienes(self):
'''Should return all experiences.
Useful when we have multiple worker agents.
'''
return [self.actions, self.states, self.rewards]
def appendExperiences(self, experiences):
'''Should append experiences from getExperiences().
Useful when we have multiple worker agents.
'''
self.actions, self.states, self.rewards = experiences
class StandardAtari(Agent):
'''Abstract class for the standard atari models
Includes:
- preprocessing of atari images.
- keras model.
'''
D = 84 # Scaled images are 84x84.
nbImgInState = 4 # We pass the last 4 images as a state.
def preprocess(self, observation):
'''Preprocess observation, and typically store in states list'''
observation = self.preprocessImage(observation)
newState = np.zeros((1, self.D, self.D, self.nbImgInState))
if len(self.states) != 0:
newState[..., :-1] = self.currentState()[..., 1:]
newState[..., -1] = observation
self.states.append(newState)
def preprocessImage(self, img):
'''Compute luminance (grayscale in range [0, 1]) and resize to (D, D).'''
img = rgb2gray(img) # compute luminance 210x160
img = resize(img, (self.D, self.D), mode='constant') # resize image
return img
def setupModel(self):
'''Not Implemented (Just a suggestion for structure):
Set up the standard DeepMind convnet in Keras.
modelInputShape = (self.D, self.D, self.nbImgInState)
self.model = self.deepMindAtariNet(self.nbClasses, modelInputShape, True)
model.compile(...)
'''
raise NotImplementedError
@staticmethod
def deepMindAtariNet(nbClasses, inputShape, includeTop=True):
'''Set up the 3 conv layer keras model.
classes: Number of outputs.
inputShape: The input shape without the batch size.
includeTop: If you only want the whole net, or just the convolutions.
'''
inp = Input(shape=inputShape)
x = Conv2D(32, 8, 8, subsample=(4, 4), activation='relu', border_mode='same', name='conv1')(inp)
x = Conv2D(64, 4, 4, subsample=(2, 2), activation='relu', border_mode='same', name='conv2')(x)
x = Conv2D(64, 3, 3, activation='relu', border_mode='same', name='conv3')(x)
if includeTop:
x = Flatten(name='flatten')(x)
x = Dense(512, activation='relu', name='dense1')(x)
out = Dense(nbClasses, activation='softmax', name='output')(x)
else:
out = x
model = Model(inp, out)
return model
class A2C_OneGame(StandardAtari):
'''Almost like the A3C agent, but without the with only one game played.
nbClasses: Number of action classes.
nbSteps: Number of steps before updating the agent.
actionSpace: Allowed actions (passed to atari).
'''
gamma = 0.99 # discount factor for reward
mseBeta = 0.5 # Weighting of value mse loss.
entropyBeta = 0.1 # Weighting of entropy loss.
learningRate = 1e-4
decayRate = 0.99 # decay factor for RMSProp leaky sum of grad^2
def __init__(self, nbClasses, nbSteps, actionSpace, modelFileName, resume=False, setupModel=True):
super().__init__()
self.nbClasses = nbClasses
self.nbSteps = nbSteps
self.actionSpace = actionSpace
self.modelFileName = modelFileName
self.resume = resume
if setupModel:
self.setupModel()
self._makeActionClassMapping()
self.episode = 0
self.stepNumber = 0 # iterates every frame
def resetMemory(self):
'''Resets actions, states, rewards, and predicted values.'''
super().resetMemory()
self.valuePreds = []
def _makeActionClassMapping(self):
self.action2Class = {action: i for i, action in enumerate(self.actionSpace)}
self.class2Action = {i: action for i, action in enumerate(self.actionSpace)}
def setupModel(self):
'''Setup models:
self.actionModel is the action predictions.
self.valueModel is the prediction of the value function.
self.model is the model with both outputs
'''
if self.resume:
self.model = load_model(self.modelFileName)
# Need the other models as well...
return
inputShape = (self.D, self.D, self.nbImgInState)
model = self.deepMindAtariNet(self.nbClasses, inputShape, includeTop=False)
inp = Input(shape=inputShape)
x = model(inp)
x = Flatten()(x)
x = Dense(512, activation='relu', name='dense1')(x)
action = Dense(self.nbClasses, activation='softmax', name='action')(x)
self.actionModel = Model(inp, action)
# Should we compile model?
value = Dense(1, activation='linear', name='value')(x)
self.valueModel = Model(inp, value)
# Should we compile model?
self.model = Model(inp, [action, value])
# loss = {'action': 'categorical_crossentropy', 'value': 'mse'}
# loss = {'action': categoricalCrossentropyWithWeights, 'value': 'mse'}
actionAndEntropyLoss = makeActionAndEntropyLossA3C(self.entropyBeta)
loss = {'action': actionAndEntropyLoss, 'value': 'mse'}
loss_weights = {'action': 1, 'value': self.mseBeta}
optim = RMSprop(self.learningRate, self.decayRate)
self.model.compile(optim, loss) # Need to make it possible to set other optimizers
def drawAction(self, observation):
'''Draw an action based on the new obseravtio.'''
self.preprocess(observation)
actionPred, valuePred = self.predict(self.currentState())
self.valuePreds.append(valuePred)
action = self.policy(actionPred)
self.actions.append(action)
return action
def policy(self, pred):
sampleClass = np.random.choice(range(self.nbClasses), 1, p=pred[0])[0]
action = self.class2Action[sampleClass]
return action
def update(self, reward, done, info):
self.rewards.append(reward)
self.stepNumber += 1
if (self.stepNumber == self.nbSteps) or done:
if len(self.states) == 1 + len(self.actions):
self.states = self.states[1:] # The first element is from last update
if not done:
self.rewards[-1] = self.valuePreds[-1]
self.updateModel()
self.resetExperiences()
self.stepNumber = 0
# prevState = self.currentState()
# self.resetMemory()
# self.states.append(prevState) # Store last state (if not done)
if done:
self.episode += 1
# self.resetMemory()
if self.episode % 10 == 0:
self.model.save(self.modelFileName)
def resetExperiences(self, done=False):
'''Resetting agent after updating the model.
done: If game has passed done=True.
'''
if done:
self.resetMemory()
else:
prevState = self.currentState()
self.resetMemory()
self.states.append(prevState) # Store last state (if not done)
def updateModel(self):
rewards = np.vstack(self.rewards)
discountedRewards = self._discountRewards(rewards)
X = np.vstack(self.states)
fakeLabels = [self.action2Class[action] for action in self.actions]
Y = np.vstack(fakeLabels)
valuePreds = np.vstack(self.valuePreds)
actionValues = discountedRewards - valuePreds
Y = responseWithSampleWeights(Y, actionValues, self.nbClasses)
self.model.train_on_batch(X, [Y, discountedRewards])
def _discountRewards(self, r):
"""Take 1D float array of rewards and compute discounted reward """
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(range(0, r.size)):
if r[t] != 0: running_add = 0 # reset the sum, since this was a game boundary (pong specific!)
running_add = running_add * self.gamma + r[t]
discounted_r[t] = running_add
return discounted_r
def getExperienes(self):
'''Should return all experiences.
Useful when we have multiple worker agents.
'''
return [self.actions, self.states, self.rewards, self.valuePreds]
def appendExperiences(self, experiences):
'''Should append experiences from getExperiences().
Useful when we have multiple worker agents.
'''
self.actions, self.states, self.rewards, self.valuePreds = experiences
class A3C_SingleWorker(A2C_OneGame):
'''Like the A3C, but it does not update the model.
It only plays the game.
'''
self.done = False
def update(self, reward, done, info):
self.rewards.append(reward)
self.stepNumber += 1
if (self.stepNumber == self.nbSteps) or done:
self.done = True
def responseWithSampleWeights(y, sampleWeights, nbClasses):
'''Function for making labels ytrueWithWeights passed to
categoricalCrossentropyWithWeights(ytrueWithWeights, ypred).
y: Vector with zero-indexed classes.
sampleWeights: vector of sample weights.
nbClasses: number of classes.
returns: One-hot matrix with y, and last columns contain responses.
'''
n = len(y)
Y = np.zeros((n, nbClasses + 1))
Y[:, :-1] = to_categorical(y, nbClasses)
Y[:, -1] = sampleWeights.flatten()
return Y
def categoricalCrossentropyWithWeights(ytrueWithWeights, ypred):
'''Like regular categorical cross entropy, but with sample weights for every row.
ytrueWithWeights is a matrix where the first columns are one hot encoder for the
classes, while the last column contains the sample weights.
'''
return K.categorical_crossentropy(ypred, ytrueWithWeights[:, :-1]) * ytrueWithWeights[:, -1]
def entropyLoss(ypred):
'''Entropy loss.
Loss = - sum(pred * log(pred))
'''
return K.categorical_crossentropy(ypred, ypred)
def makeActionAndEntropyLossA3C(beta):
'''The part of the A3C loss function concerned with the actions,
i.e. action loss and entropy loss.
Here we return the loss function than can be passed to Keras.
beta: Weighting of entropy.
'''
def loss(ytrueWithWeights, ypred):
'''Action and entropy loss for the A3C algorithm.
ytrueWithWeights: A matrix where the first columns are one hot encoder for the
classes, while the last column contains the sample weights.
ypred: Predictions.
'''
policyLoss = categoricalCrossentropyWithWeights(ytrueWithWeights, ypred)
entropy = entropyLoss(ypred)
return policyLoss - beta * entropy # - because the entropy is positive with minimal values in 0 and 1
return loss
class KarpathyPolicyPong(Agent):
'''Karpathy dense policy network.'''
H = 200 # number of hidden layer neurons
batch_size = 10 # every how many episodes to do a param update?
learning_rate = 1e-3
gamma = 0.99 # discount factor for reward
decay_rate = 0.99 # decay factor for RMSProp leaky sum of grad^2
D = 80 * 80 # input dimensionality: 80x80 grid
def __init__(self, modelFileName, resume=False):
super().__init__()
self.modelFileName = modelFileName
self.resume = resume
self.prev_x = None
self.episode = 0
self.setupModel()
def policy(self, pred):
'''Returns an action based on given predictions.'''
action = 2 if np.random.uniform() < pred else 3 # roll the dice!
return action
def update(self, reward, done, info):
'''See update func in Agent class'''
self.rewards.append(reward)
if done:
self.episode += 1
self.prev_x = None
if self.episode % self.batch_size == 0:
self.updateModel()
def updateModel(self):
'''Should do all work with updating weights.'''
print('Updating weights...')
# stack together all inputs, actions, and rewards for this episode
epx = np.vstack(self.states)
fakeLabels = [1 if action == 2 else 0 for action in self.actions]
epy = np.vstack(fakeLabels)
epr = np.vstack(self.rewards)
self.resetMemory()
# compute the discounted reward backwards through time
discounted_epr = self._discountRewards(epr)
# standardize the rewards to be unit normal (helps control the gradient estimator variance)
discounted_epr -= np.mean(discounted_epr)
discounted_epr /= np.std(discounted_epr)
# update our model weights (all in one batch)
self.model.train_on_batch(epx, epy, sample_weight=discounted_epr.reshape((-1,)))
if self.episode % (self.batch_size * 3) == 0:
self.model.save(self.modelFileName)
def _discountRewards(self, r):
""" take 1D float array of rewards and compute discounted reward """
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(range(0, r.size)):
if r[t] != 0: running_add = 0 # reset the sum, since this was a game boundary (pong specific!)
running_add = running_add * self.gamma + r[t]
discounted_r[t] = running_add
return discounted_r
def setupModel(self):
"""Make keras model"""
if self.resume:
self.model = load_model(self.modelFileName)
else:
inp = Input(shape=(self.D,))
h = Dense(self.H, activation='relu')(inp)
out = Dense(1, activation='sigmoid')(h)
self.model = Model(inp, out)
optim = RMSprop(self.learning_rate, self.decay_rate)
self.model.compile(optim, 'binary_crossentropy')
@staticmethod
def _preprocess_image(I):
'''Preprocess 210x160x3 uint8 frame into 6400 (80x80) 1D float vector'''
I = I[35:195] # crop
I = I[::2,::2,0] # downsample by factor of 2
I[I == 144] = 0 # erase background (background type 1)
I[I == 109] = 0 # erase background (background type 2)
I[I != 0] = 1 # everything else (paddles, ball) just set to 1
return I.astype(np.float).ravel()
def preprocess(self, observation):
'''Proprocess observation. And store in states list'''
cur_x = self._preprocess_image(observation)
x = cur_x - self.prev_x if self.prev_x is not None else np.zeros(self.D)
self.prev_x = cur_x
x = x.reshape((1, -1))
self.states.append(x)
#--------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------
def test():
render = False
filename = 'test.h5'
resume = False
# filename = 'pong_gym_keras_mlp_full_batch.h5'
# resume = True
# render = True
gym.undo_logger_setup() # Stop gym logging
agent = KarpathyPolicyPong(filename, resume=resume)
game = Game('Pong-v0', agent, render=render, logfile='test.log')
game.play()
def testA2C():
render = False
filename = 'testA2C.h5'
resume = False
# resume = True
# render = True
gym.undo_logger_setup() # Stop gym logging
actionSpace = [2, 3]
agent = A2C_OneGame(2, 1024, actionSpace, filename, resume=resume)
game = Game('Pong-v0', agent, render=render, logfile='test.log')
game.play()
if __name__ == '__main__':
# test()
testA2C()
| mit | 3,123,711,620,279,126,500 | 35.68209 | 134 | 0.616308 | false | 3.968513 | false | false | false |
benvanwerkhoven/kernel_tuner | examples/cuda/zeromeanfilter.py | 1 | 1578 | #!/usr/bin/env python
from collections import OrderedDict
import numpy
from kernel_tuner import tune_kernel, run_kernel
def tune_zeromean():
with open('zeromeanfilter.cu', 'r') as f:
kernel_string = f.read()
height = numpy.int32(4391)
width = numpy.int32(3539)
image = numpy.random.randn(height*width).astype(numpy.float32)
tune_vertical(kernel_string, image, height, width)
tune_horizontal(kernel_string, image, height, width)
def tune_vertical(kernel_string, image, height, width):
args = [height, width, image]
#only one row of thread-blocks is to be created
problem_size = (width, 1)
grid_div_x = ["block_size_x"]
grid_div_y = []
tune_params = OrderedDict()
tune_params["block_size_x"] = [32*i for i in range(1,9)]
tune_params["block_size_y"] = [2**i for i in range(6)]
return tune_kernel("computeMeanVertically", kernel_string, problem_size, args, tune_params,
grid_div_y=grid_div_y, grid_div_x=grid_div_x)
def tune_horizontal(kernel_string, image, height, width):
args = [height, width, image]
#use only one column of thread blocks
problem_size = (1, height)
grid_div_x = []
grid_div_y = ["block_size_y"]
tune_params = OrderedDict()
tune_params["block_size_x"] = [32*i for i in range(1,9)]
tune_params["block_size_y"] = [2**i for i in range(6)]
return tune_kernel("computeMeanHorizontally", kernel_string, problem_size, args, tune_params,
grid_div_y=grid_div_y, grid_div_x=grid_div_x)
if __name__ == "__main__":
tune_zeromean()
| apache-2.0 | 5,507,747,072,151,338,000 | 27.178571 | 97 | 0.652091 | false | 3.017208 | false | false | false |
dangra/scrapy | scrapy/pqueues.py | 3 | 6791 | import hashlib
import logging
from scrapy.utils.misc import create_instance
logger = logging.getLogger(__name__)
def _path_safe(text):
"""
Return a filesystem-safe version of a string ``text``
>>> _path_safe('simple.org').startswith('simple.org')
True
>>> _path_safe('dash-underscore_.org').startswith('dash-underscore_.org')
True
>>> _path_safe('some@symbol?').startswith('some_symbol_')
True
"""
pathable_slot = "".join([c if c.isalnum() or c in '-._' else '_'
for c in text])
# as we replace some letters we can get collision for different slots
# add we add unique part
unique_slot = hashlib.md5(text.encode('utf8')).hexdigest()
return '-'.join([pathable_slot, unique_slot])
class ScrapyPriorityQueue:
"""A priority queue implemented using multiple internal queues (typically,
FIFO queues). It uses one internal queue for each priority value. The internal
queue must implement the following methods:
* push(obj)
* pop()
* close()
* __len__()
``__init__`` method of ScrapyPriorityQueue receives a downstream_queue_cls
argument, which is a class used to instantiate a new (internal) queue when
a new priority is allocated.
Only integer priorities should be used. Lower numbers are higher
priorities.
startprios is a sequence of priorities to start with. If the queue was
previously closed leaving some priority buckets non-empty, those priorities
should be passed in startprios.
"""
@classmethod
def from_crawler(cls, crawler, downstream_queue_cls, key, startprios=()):
return cls(crawler, downstream_queue_cls, key, startprios)
def __init__(self, crawler, downstream_queue_cls, key, startprios=()):
self.crawler = crawler
self.downstream_queue_cls = downstream_queue_cls
self.key = key
self.queues = {}
self.curprio = None
self.init_prios(startprios)
def init_prios(self, startprios):
if not startprios:
return
for priority in startprios:
self.queues[priority] = self.qfactory(priority)
self.curprio = min(startprios)
def qfactory(self, key):
return create_instance(self.downstream_queue_cls,
None,
self.crawler,
self.key + '/' + str(key))
def priority(self, request):
return -request.priority
def push(self, request):
priority = self.priority(request)
if priority not in self.queues:
self.queues[priority] = self.qfactory(priority)
q = self.queues[priority]
q.push(request) # this may fail (eg. serialization error)
if self.curprio is None or priority < self.curprio:
self.curprio = priority
def pop(self):
if self.curprio is None:
return
q = self.queues[self.curprio]
m = q.pop()
if not q:
del self.queues[self.curprio]
q.close()
prios = [p for p, q in self.queues.items() if q]
self.curprio = min(prios) if prios else None
return m
def close(self):
active = []
for p, q in self.queues.items():
active.append(p)
q.close()
return active
def __len__(self):
return sum(len(x) for x in self.queues.values()) if self.queues else 0
class DownloaderInterface:
def __init__(self, crawler):
self.downloader = crawler.engine.downloader
def stats(self, possible_slots):
return [(self._active_downloads(slot), slot)
for slot in possible_slots]
def get_slot_key(self, request):
return self.downloader._get_slot_key(request, None)
def _active_downloads(self, slot):
""" Return a number of requests in a Downloader for a given slot """
if slot not in self.downloader.slots:
return 0
return len(self.downloader.slots[slot].active)
class DownloaderAwarePriorityQueue:
""" PriorityQueue which takes Downloader activity into account:
domains (slots) with the least amount of active downloads are dequeued
first.
"""
@classmethod
def from_crawler(cls, crawler, downstream_queue_cls, key, startprios=()):
return cls(crawler, downstream_queue_cls, key, startprios)
def __init__(self, crawler, downstream_queue_cls, key, slot_startprios=()):
if crawler.settings.getint('CONCURRENT_REQUESTS_PER_IP') != 0:
raise ValueError(f'"{self.__class__}" does not support CONCURRENT_REQUESTS_PER_IP')
if slot_startprios and not isinstance(slot_startprios, dict):
raise ValueError("DownloaderAwarePriorityQueue accepts "
"``slot_startprios`` as a dict; "
f"{slot_startprios.__class__!r} instance "
"is passed. Most likely, it means the state is"
"created by an incompatible priority queue. "
"Only a crawl started with the same priority "
"queue class can be resumed.")
self._downloader_interface = DownloaderInterface(crawler)
self.downstream_queue_cls = downstream_queue_cls
self.key = key
self.crawler = crawler
self.pqueues = {} # slot -> priority queue
for slot, startprios in (slot_startprios or {}).items():
self.pqueues[slot] = self.pqfactory(slot, startprios)
def pqfactory(self, slot, startprios=()):
return ScrapyPriorityQueue(self.crawler,
self.downstream_queue_cls,
self.key + '/' + _path_safe(slot),
startprios)
def pop(self):
stats = self._downloader_interface.stats(self.pqueues)
if not stats:
return
slot = min(stats)[1]
queue = self.pqueues[slot]
request = queue.pop()
if len(queue) == 0:
del self.pqueues[slot]
return request
def push(self, request):
slot = self._downloader_interface.get_slot_key(request)
if slot not in self.pqueues:
self.pqueues[slot] = self.pqfactory(slot)
queue = self.pqueues[slot]
queue.push(request)
def close(self):
active = {slot: queue.close()
for slot, queue in self.pqueues.items()}
self.pqueues.clear()
return active
def __len__(self):
return sum(len(x) for x in self.pqueues.values()) if self.pqueues else 0
def __contains__(self, slot):
return slot in self.pqueues
| bsd-3-clause | 4,832,771,251,509,741,000 | 32.955 | 95 | 0.594316 | false | 4.14338 | false | false | false |
brownnrl/moneyguru | core/gui/mass_edition_panel.py | 2 | 6043 | # Copyright 2019 Virgil Dupras
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
import weakref
from datetime import date
from core.util import allsame, flatten
from ..model.currency import Currencies
from .base import GUIPanel
from .completable_edit import CompletableEdit
from .selectable_list import LinkedSelectableList
from .text_field import TextField
class MassEditTextField(TextField):
def __init__(self, panel, fieldname):
TextField.__init__(self)
self._panel = panel
self._attrname = '_' + fieldname
self._enabledname = fieldname + '_enabled'
def _update(self, newvalue):
setattr(self._panel, self._attrname, newvalue)
setattr(self._panel, self._enabledname, True)
self._panel.view.refresh()
class MassEditDateField(MassEditTextField):
def _parse(self, text):
return self._panel.app.parse_date(text)
def _format(self, value):
return self._panel.app.format_date(value)
class MassEditAmountField(MassEditTextField):
def _parse(self, text):
return self._panel.document.parse_amount(text)
def _format(self, value):
return self._panel.document.format_amount(value)
class MassEditionPanel(GUIPanel):
def __init__(self, mainwindow):
GUIPanel.__init__(self, mainwindow)
self_proxy = weakref.proxy(self)
self.date_field = MassEditDateField(self_proxy, 'date')
self.description_field = MassEditTextField(self_proxy, 'description')
self.payee_field = MassEditTextField(self_proxy, 'payee')
self.checkno_field = MassEditTextField(self_proxy, 'checkno')
self.from_field = MassEditTextField(self_proxy, 'from')
self.to_field = MassEditTextField(self_proxy, 'to')
self.amount_field = MassEditAmountField(self_proxy, 'amount')
self.completable_edit = CompletableEdit(mainwindow)
def setfunc(index):
try:
currency = Currencies.code_at_index(index)
except IndexError:
currency = None
if currency != self_proxy.currency:
self_proxy.currency = currency
self_proxy.currency_enabled = currency is not None
self_proxy.view.refresh()
self.currency = None
self.currency_list = LinkedSelectableList(
items=Currencies.display_list(), setfunc=setfunc)
self._init_checkboxes()
# --- Private
def _init_checkboxes(self):
self.date_enabled = False
self.description_enabled = False
self.payee_enabled = False
self.checkno_enabled = False
self.from_enabled = False
self.to_enabled = False
self.amount_enabled = False
self.currency_enabled = False
# --- Override
def _load(self, transactions):
assert len(transactions) >= 2
self.can_change_accounts = all(len(t.splits) == 2 for t in transactions)
self.can_change_amount = all(t.can_set_amount for t in transactions)
self.date_field.value = date.today()
self.description_field.text = ''
self.payee_field.text = ''
self.checkno_field.text = ''
self.from_field.text = ''
self.to_field.text = ''
self.amount_field.value = 0
self.currency = None
first = transactions[0]
if allsame(t.date for t in transactions):
self.date_field.value = first.date
if allsame(t.description for t in transactions):
self.description_field.text = first.description
if allsame(t.payee for t in transactions):
self.payee_field.text = first.payee
if allsame(t.checkno for t in transactions):
self.checkno_field.text = first.checkno
splits = flatten(t.splits for t in transactions)
splits = [s for s in splits if s.amount]
if splits and allsame(s.amount.currency_code for s in splits):
self.currency = splits[0].amount.currency_code
else:
self.currency = self.document.default_currency
try:
self.currency_list.select(Currencies.index(self.currency))
except IndexError:
pass
if self.can_change_accounts:
def get_from(t):
s1, s2 = t.splits
return s1 if s1.amount <= 0 else s2
def get_to(t):
s1, s2 = t.splits
return s2 if s1.amount <= 0 else s1
def get_name(split):
return split.account.name if split.account is not None else ''
if allsame(get_name(get_from(t)) for t in transactions):
self.from_field.text = get_name(get_from(first))
if allsame(get_name(get_to(t)) for t in transactions):
self.to_field.text = get_name(get_to(first))
if self.can_change_amount:
if allsame(t.amount for t in transactions):
self.amount_field.value = first.amount
self._init_checkboxes()
def _save(self):
transactions = self.mainwindow.selected_transactions
kw = {}
if self.date_enabled:
kw['date'] = self.date_field.value
if self.description_enabled:
kw['description'] = self.description_field.text
if self.payee_enabled:
kw['payee'] = self.payee_field.text
if self.checkno_enabled:
kw['checkno'] = self.checkno_field.text
if self.from_enabled:
kw['from_'] = self.from_field.text
if self.to_enabled:
kw['to'] = self.to_field.text
if self.amount_enabled:
kw['amount'] = self.amount_field.value
if self.currency_enabled:
kw['currency'] = self.currency
if kw:
self.document.change_transactions(transactions, **kw)
self.mainwindow.revalidate()
| gpl-3.0 | 6,142,993,101,749,881,000 | 36.76875 | 89 | 0.616747 | false | 3.868758 | false | false | false |
chennan47/osf.io | osf_tests/test_files.py | 6 | 2140 | import pytest
from addons.osfstorage import settings as osfstorage_settings
from osf.models import BaseFileNode, Folder, File
from osf_tests.factories import (
UserFactory,
ProjectFactory,
)
pytestmark = pytest.mark.django_db
@pytest.fixture()
def user():
return UserFactory()
@pytest.fixture()
def project(user):
return ProjectFactory(creator=user)
@pytest.fixture()
def create_test_file(fake):
# TODO: Copied from api_tests/utils.py. DRY this up.
def _create_test_file(node, user=None, filename=None, create_guid=True):
filename = filename or fake.file_name()
user = user or node.creator
osfstorage = node.get_addon('osfstorage')
root_node = osfstorage.get_root()
test_file = root_node.append_file(filename)
if create_guid:
test_file.get_guid(create=True)
test_file.create_version(user, {
'object': '06d80e',
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {
'size': 1337,
'contentType': 'img/png'
}).save()
return test_file
return _create_test_file
def test_active_manager_does_not_return_trashed_file_nodes(project, create_test_file):
create_test_file(node=project)
deleted_file = create_test_file(node=project)
deleted_file.delete(user=project.creator, save=True)
# root folder + file + deleted_file = 3 BaseFileNodes
assert BaseFileNode.objects.filter(node=project).count() == 3
# root folder + file = 2 BaseFileNodes
assert BaseFileNode.active.filter(node=project).count() == 2
def test_folder_update_calls_folder_update_method(project, create_test_file):
file = create_test_file(node=project)
parent_folder = file.parent
# the folder update method should be the Folder.update method
assert parent_folder.__class__.update == Folder.update
# the folder update method should not be the File update method
assert parent_folder.__class__.update != File.update
# the file update method should be the File update method
assert file.__class__.update == File.update
| apache-2.0 | -3,613,884,070,172,181,500 | 32.968254 | 86 | 0.676636 | false | 3.689655 | true | false | false |
felixfan/PyHLA | prepareAAalignment/preALN.py | 1 | 1545 | #!/usr/bin/env python
import re
def oneline(infile, outfile):
pattern = re.compile(r'^(\w+)(\*){1}(\d+)(\:?)')
f = open(infile)
geno = {}
ref = ''
flag = False
for i in f:
i = i.strip()
if i and pattern.search(i):
fs = i.split()
if fs[0] not in geno:
geno[fs[0]]=''
for j in range(1, len(fs)):
geno[fs[0]] += fs[j]
if not flag: # the first allele is reference
ref = fs[0]
flag = True
f.close()
f = open(outfile, 'w')
keys = sorted(geno.keys())
for a in keys:
if a == ref:
f.write(a)
f.write('\t')
f.write(geno[a])
f.write('\n')
else:
f.write(a)
f.write('\t')
tmp = geno[a]
for k in range(len(tmp)):
if tmp[k] == '*':
f.write('*')
elif tmp[k] == '-':
f.write(geno[ref][k])
else:
f.write(tmp[k])
if len(geno[ref]) > len(tmp):
for k in range(len(tmp),len(geno[ref])):
f.write('*')
f.write('\n')
def catfiles(fileList, outfile):
fw = open(outfile, 'w')
for f in fileList:
fr = open(f)
for r in fr:
fw.write(r)
fr.close()
fw.close()
if __name__ == '__main__':
infiles = ['A_prot.txt', 'B_prot.txt', 'C_prot.txt', 'DMA_prot.txt', 'DMB_prot.txt', 'DOA_prot.txt', 'DOB_prot.txt','DPA_prot.txt', 'DPB_prot.txt', 'DQA_prot.txt', 'DQB_prot.txt', 'DRA_prot.txt','DRB_prot.txt']
outfiles = ['A.aln', 'B.aln', 'C.aln', 'DMA.aln','DMB.aln','DOA.aln','DOB.aln','DPA.aln', 'DPB.aln', 'DQA.aln', 'DQB.aln', 'DRA.aln','DRB.aln']
for i in range(0, len(infiles)):
oneline(infiles[i], outfiles[i])
catfiles(outfiles, "aa.aln.txt")
| gpl-2.0 | -966,945,801,158,386,200 | 23.539683 | 211 | 0.54822 | false | 2.188385 | false | false | false |
RethinkRobotics/intera_sdk | intera_examples/scripts/camera_display.py | 1 | 4324 | #! /usr/bin/env python
# Copyright (c) 2013-2018, Rethink Robotics Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
import cv2
from cv_bridge import CvBridge, CvBridgeError
import rospy
import intera_interface
def show_image_callback(img_data, xxx_todo_changeme):
"""The callback function to show image by using CvBridge and cv
"""
(edge_detection, window_name) = xxx_todo_changeme
bridge = CvBridge()
try:
cv_image = bridge.imgmsg_to_cv2(img_data, "bgr8")
except CvBridgeError as err:
rospy.logerr(err)
return
if edge_detection == True:
gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (3, 3), 0)
# customize the second and the third argument, minVal and maxVal
# in function cv2.Canny if needed
get_edge = cv2.Canny(blurred, 10, 100)
cv_image = np.hstack([get_edge])
edge_str = "(Edge Detection)" if edge_detection else ''
cv_win_name = ' '.join([window_name, edge_str])
cv2.namedWindow(cv_win_name, 0)
# refresh the image on the screen
cv2.imshow(cv_win_name, cv_image)
cv2.waitKey(3)
def main():
"""Camera Display Example
Cognex Hand Camera Ranges
- exposure: [0.01-100]
- gain: [0-255]
Head Camera Ranges:
- exposure: [0-100], -1 for auto-exposure
- gain: [0-79], -1 for auto-gain
"""
rp = intera_interface.RobotParams()
valid_cameras = rp.get_camera_names()
if not valid_cameras:
rp.log_message(("Cannot detect any camera_config"
" parameters on this robot. Exiting."), "ERROR")
return
arg_fmt = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(formatter_class=arg_fmt,
description=main.__doc__)
parser.add_argument(
'-c', '--camera', type=str, default="head_camera",
choices=valid_cameras, help='Setup Camera Name for Camera Display')
parser.add_argument(
'-r', '--raw', action='store_true',
help='Specify use of the raw image (unrectified) topic')
parser.add_argument(
'-e', '--edge', action='store_true',
help='Streaming the Canny edge detection image')
parser.add_argument(
'-g', '--gain', type=int,
help='Set gain for camera (-1 = auto)')
parser.add_argument(
'-x', '--exposure', type=float,
help='Set exposure for camera (-1 = auto)')
args = parser.parse_args(rospy.myargv()[1:])
print("Initializing node... ")
rospy.init_node('camera_display', anonymous=True)
cameras = intera_interface.Cameras()
if not cameras.verify_camera_exists(args.camera):
rospy.logerr("Could not detect the specified camera, exiting the example.")
return
rospy.loginfo("Opening camera '{0}'...".format(args.camera))
cameras.start_streaming(args.camera)
rectify_image = not args.raw
use_canny_edge = args.edge
cameras.set_callback(args.camera, show_image_callback,
rectify_image=rectify_image, callback_args=(use_canny_edge, args.camera))
# optionally set gain and exposure parameters
if args.gain is not None:
if cameras.set_gain(args.camera, args.gain):
rospy.loginfo("Gain set to: {0}".format(cameras.get_gain(args.camera)))
if args.exposure is not None:
if cameras.set_exposure(args.camera, args.exposure):
rospy.loginfo("Exposure set to: {0}".format(cameras.get_exposure(args.camera)))
def clean_shutdown():
print("Shutting down camera_display node.")
cv2.destroyAllWindows()
rospy.on_shutdown(clean_shutdown)
rospy.loginfo("Camera_display node running. Ctrl-c to quit")
rospy.spin()
if __name__ == '__main__':
main()
| apache-2.0 | 4,880,555,437,848,059,000 | 35.957265 | 91 | 0.65148 | false | 3.552999 | false | false | false |
armando-migliaccio/tempest | tempest/test.py | 1 | 12158 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import atexit
import functools
import os
import time
import fixtures
import nose.plugins.attrib
import testresources
import testtools
from tempest import clients
from tempest.common import isolated_creds
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
# All the successful HTTP status codes from RFC 2616
HTTP_SUCCESS = (200, 201, 202, 203, 204, 205, 206)
def attr(*args, **kwargs):
"""A decorator which applies the nose and testtools attr decorator
This decorator applies the nose attr decorator as well as the
the testtools.testcase.attr if it is in the list of attributes
to testtools we want to apply.
"""
def decorator(f):
if 'type' in kwargs and isinstance(kwargs['type'], str):
f = testtools.testcase.attr(kwargs['type'])(f)
if kwargs['type'] == 'smoke':
f = testtools.testcase.attr('gate')(f)
elif 'type' in kwargs and isinstance(kwargs['type'], list):
for attr in kwargs['type']:
f = testtools.testcase.attr(attr)(f)
if attr == 'smoke':
f = testtools.testcase.attr('gate')(f)
return nose.plugins.attrib.attr(*args, **kwargs)(f)
return decorator
def services(*args, **kwargs):
"""A decorator used to set an attr for each service used in a test case
This decorator applies a testtools attr for each service that gets
exercised by a test case.
"""
valid_service_list = ['compute', 'image', 'volume', 'orchestration',
'network', 'identity', 'object', 'dashboard']
def decorator(f):
for service in args:
if service not in valid_service_list:
raise exceptions.InvalidServiceTag('%s is not a valid service'
% service)
attr(type=list(args))(f)
return f
return decorator
def stresstest(*args, **kwargs):
"""Add stress test decorator
For all functions with this decorator a attr stress will be
set automatically.
@param class_setup_per: allowed values are application, process, action
``application``: once in the stress job lifetime
``process``: once in the worker process lifetime
``action``: on each action
@param allow_inheritance: allows inheritance of this attribute
"""
def decorator(f):
if 'class_setup_per' in kwargs:
setattr(f, "st_class_setup_per", kwargs['class_setup_per'])
else:
setattr(f, "st_class_setup_per", 'process')
if 'allow_inheritance' in kwargs:
setattr(f, "st_allow_inheritance", kwargs['allow_inheritance'])
else:
setattr(f, "st_allow_inheritance", False)
attr(type='stress')(f)
return f
return decorator
def skip_because(*args, **kwargs):
"""A decorator useful to skip tests hitting known bugs
@param bug: bug number causing the test to skip
@param condition: optional condition to be True for the skip to have place
"""
def decorator(f):
@functools.wraps(f)
def wrapper(*func_args, **func_kwargs):
if "bug" in kwargs:
if "condition" not in kwargs or kwargs["condition"] is True:
msg = "Skipped until Bug: %s is resolved." % kwargs["bug"]
raise testtools.TestCase.skipException(msg)
return f(*func_args, **func_kwargs)
return wrapper
return decorator
def requires_ext(*args, **kwargs):
"""A decorator to skip tests if an extension is not enabled
@param extension
@param service
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*func_args, **func_kwargs):
if not is_extension_enabled(kwargs['extension'],
kwargs['service']):
msg = "Skipped because %s extension: %s is not enabled" % (
kwargs['service'], kwargs['extension'])
raise testtools.TestCase.skipException(msg)
return func(*func_args, **func_kwargs)
return wrapper
return decorator
def is_extension_enabled(extension_name, service):
"""A function that will check the list of enabled extensions from config
"""
configs = config.TempestConfig()
config_dict = {
'compute': configs.compute_feature_enabled.api_extensions,
'compute_v3': configs.compute_feature_enabled.api_v3_extensions,
'volume': configs.volume_feature_enabled.api_extensions,
'network': configs.network_feature_enabled.api_extensions,
}
if config_dict[service][0] == 'all':
return True
if extension_name in config_dict[service]:
return True
return False
# there is a mis-match between nose and testtools for older pythons.
# testtools will set skipException to be either
# unittest.case.SkipTest, unittest2.case.SkipTest or an internal skip
# exception, depending on what it can find. Python <2.7 doesn't have
# unittest.case.SkipTest; so if unittest2 is not installed it falls
# back to the internal class.
#
# The current nose skip plugin will decide to raise either
# unittest.case.SkipTest or its own internal exception; it does not
# look for unittest2 or the internal unittest exception. Thus we must
# monkey-patch testtools.TestCase.skipException to be the exception
# the nose skip plugin expects.
#
# However, with the switch to testr nose may not be available, so we
# require you to opt-in to this fix with an environment variable.
#
# This is temporary until upstream nose starts looking for unittest2
# as testtools does; we can then remove this and ensure unittest2 is
# available for older pythons; then nose and testtools will agree
# unittest2.case.SkipTest is the one-true skip test exception.
#
# https://review.openstack.org/#/c/33056
# https://github.com/nose-devs/nose/pull/699
if 'TEMPEST_PY26_NOSE_COMPAT' in os.environ:
try:
import unittest.case.SkipTest
# convince pep8 we're using the import...
if unittest.case.SkipTest:
pass
raise RuntimeError("You have unittest.case.SkipTest; "
"no need to override")
except ImportError:
LOG.info("Overriding skipException to nose SkipTest")
testtools.TestCase.skipException = nose.plugins.skip.SkipTest
at_exit_set = set()
def validate_tearDownClass():
if at_exit_set:
raise RuntimeError("tearDownClass does not calls the super's "
"tearDownClass in these classes: "
+ str(at_exit_set) + "\n"
"If you see the exception, with another "
"exception please do not report this one!"
"If you are changing tempest code, make sure you",
"are calling the super class's tearDownClass!")
atexit.register(validate_tearDownClass)
class BaseTestCase(testtools.TestCase,
testtools.testcase.WithAttributes,
testresources.ResourcedTestCase):
config = config.TempestConfig()
setUpClassCalled = False
@classmethod
def setUpClass(cls):
if hasattr(super(BaseTestCase, cls), 'setUpClass'):
super(BaseTestCase, cls).setUpClass()
cls.setUpClassCalled = True
@classmethod
def tearDownClass(cls):
at_exit_set.discard(cls)
if hasattr(super(BaseTestCase, cls), 'tearDownClass'):
super(BaseTestCase, cls).tearDownClass()
def setUp(self):
super(BaseTestCase, self).setUp()
if not self.setUpClassCalled:
raise RuntimeError("setUpClass does not calls the super's"
"setUpClass in the "
+ self.__class__.__name__)
at_exit_set.add(self.__class__)
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
test_timeout = int(test_timeout)
except ValueError:
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
os.environ.get('OS_STDOUT_CAPTURE') == '1'):
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
os.environ.get('OS_STDERR_CAPTURE') == '1'):
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
if (os.environ.get('OS_LOG_CAPTURE') != 'False' and
os.environ.get('OS_LOG_CAPTURE') != '0'):
log_format = '%(asctime)-15s %(message)s'
self.useFixture(fixtures.LoggerFixture(nuke_handlers=False,
format=log_format,
level=None))
@classmethod
def get_client_manager(cls):
"""
Returns an Openstack client manager
"""
cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__)
force_tenant_isolation = getattr(cls, 'force_tenant_isolation', None)
if (cls.config.compute.allow_tenant_isolation or
force_tenant_isolation):
creds = cls.isolated_creds.get_primary_creds()
username, tenant_name, password = creds
os = clients.Manager(username=username,
password=password,
tenant_name=tenant_name,
interface=cls._interface)
else:
os = clients.Manager(interface=cls._interface)
return os
@classmethod
def clear_isolated_creds(cls):
"""
Clears isolated creds if set
"""
if getattr(cls, 'isolated_creds'):
cls.isolated_creds.clear_isolated_creds()
@classmethod
def _get_identity_admin_client(cls):
"""
Returns an instance of the Identity Admin API client
"""
os = clients.AdminManager(interface=cls._interface)
admin_client = os.identity_client
return admin_client
@classmethod
def _get_client_args(cls):
return (
cls.config,
cls.config.identity.admin_username,
cls.config.identity.admin_password,
cls.config.identity.uri
)
def call_until_true(func, duration, sleep_for):
"""
Call the given function until it returns True (and return True) or
until the specified duration (in seconds) elapses (and return
False).
:param func: A zero argument callable that returns True on success.
:param duration: The number of seconds for which to attempt a
successful call of the function.
:param sleep_for: The number of seconds to sleep after an unsuccessful
invocation of the function.
"""
now = time.time()
timeout = now + duration
while now < timeout:
if func():
return True
LOG.debug("Sleeping for %d seconds", sleep_for)
time.sleep(sleep_for)
now = time.time()
return False
| apache-2.0 | 4,834,938,428,252,851,000 | 35.731118 | 78 | 0.620415 | false | 4.398698 | true | false | false |
vileopratama/vitech | src/addons/vitech_backend_theme/models/res_config_settings.py | 1 | 1776 | # -*- coding: utf-8 -*-
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import api
from lxml import etree
from openerp.addons.base.res.res_config import \
res_config_settings
class ResConfigSettings(res_config_settings):
@api.model
def fields_view_get(self, view_id=None, view_type='form',
context=None, toolbar=False, submenu=False):
ret_val = super(ResConfigSettings, self).fields_view_get(
view_id=view_id,
view_type=view_type,
context=context,
toolbar=toolbar,
submenu=submenu,
)
page_name = ret_val['name']
doc = etree.XML(ret_val['arch'])
queries = []
if page_name == 'account settings':
queries += [
"//div[field[@name='module_account_reports' and \
@widget='upgrade_boolean']]",
"//div[field[@name='module_account_reports_followup' and \
@widget='upgrade_boolean']]",
"//div[field[@name='module_account_batch_deposit' and \
@widget='upgrade_boolean']]",
]
queries += [
"//div[div[field[@widget='upgrade_boolean']]] \
/preceding-sibling::label[1]",
"//div[div[field[@widget='upgrade_boolean']]]",
"//div[field[@widget='upgrade_boolean']] \
/preceding-sibling::label[1]",
"//div[field[@widget='upgrade_boolean']]",
"//field[@widget='upgrade_boolean']",
]
for query in queries:
for item in doc.xpath(query):
item.getparent().remove(item)
ret_val['arch'] = etree.tostring(doc)
return ret_val
| mit | 6,657,915,532,517,825,000 | 32.509434 | 74 | 0.530968 | false | 3.991011 | false | false | false |
Phasip/mc3p | mc3p/parsing.py | 1 | 9776 | # This source file is part of mc3p, the Minecraft Protocol Parsing Proxy.
#
# Copyright (C) 2011 Matthew J. McGill
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import struct, logging, inspect
logger = logging.getLogger('parsing')
class Parsem(object):
"""Parser/emitter."""
def __init__(self,parser,emitter):
setattr(self,'parse',parser)
setattr(self,'emit',emitter)
def parse_byte(stream):
return struct.unpack_from(">b",stream.read(1))[0]
def emit_byte(b):
return struct.pack(">b",b)
def defmsg(msgtype, name, pairs):
"""Build a Parsem for a message out of (name,Parsem) pairs."""
def parse(stream):
msg = {'msgtype': msgtype}
for (name,parsem) in pairs:
msg[name] = parsem.parse(stream)
return msg
def emit(msg):
return ''.join([emit_unsigned_byte(msgtype),
''.join([parsem.emit(msg[name]) for (name,parsem) in pairs])])
return Parsem(parse,emit)
MC_byte = Parsem(parse_byte,emit_byte)
def parse_unsigned_byte(stream):
return struct.unpack(">B",stream.read(1))[0]
def emit_unsigned_byte(b):
return struct.pack(">B",b)
MC_unsigned_byte = Parsem(parse_unsigned_byte, emit_unsigned_byte)
def parse_short(stream):
return struct.unpack_from(">h",stream.read(2))[0]
def emit_short(s):
return struct.pack(">h",s)
MC_short = Parsem(parse_short, emit_short)
def parse_int(stream):
return struct.unpack_from(">i",stream.read(4))[0]
def emit_int(i):
return struct.pack(">i",i)
MC_int = Parsem(parse_int, emit_int)
def parse_long(stream):
return struct.unpack_from(">q",stream.read(8))[0]
def emit_long(l):
return struct.pack(">q",l)
MC_long = Parsem(parse_long, emit_long)
def parse_float(stream):
return struct.unpack_from(">f",stream.read(4))[0]
def emit_float(f):
return struct.pack(">f",f)
MC_float = Parsem(parse_float, emit_float)
def parse_double(stream):
return struct.unpack_from(">d",stream.read(8))[0]
def emit_double(d):
return struct.pack(">d",d)
MC_double = Parsem(parse_double, emit_double)
def parse_string(stream):
n = parse_short(stream)
if n == 0:
return unicode("", encoding="utf-16-be")
return unicode(stream.read(2*n), encoding="utf-16-be")
def emit_string(s):
return ''.join([emit_short(len(s)), s.encode("utf-16-be")])
MC_string = Parsem(parse_string, emit_string)
def parse_string8(stream):
n = parse_short(stream)
if n == 0:
return ''
return stream.read(n)
def emit_string8(s):
return ''.join([emit_short(len(s)),s])
MC_string8 = Parsem(parse_string8, emit_string8)
def parse_bool(stream):
b = struct.unpack_from(">B",stream.read(1))[0]
if b==0:
return False
else:
return True
def emit_bool(b):
if b:
return emit_unsigned_byte(1)
else:
return emit_unsigned_byte(0)
MC_bool = Parsem(parse_bool, emit_bool)
def parse_metadata(stream):
data=[]
type = parse_unsigned_byte(stream)
while (type != 127):
type = type >> 5
if type == 0:
data.append(parse_byte(stream))
elif type == 1:
data.append(parse_short(stream))
elif type == 2:
data.append(parse_int(stream))
elif type == 3:
data.append(parse_float(stream))
elif type == 4:
data.append(parse_string(stream))
elif type == 5:
data.append(parse_short(stream))
data.append(parse_byte(stream))
data.append(parse_short(stream))
else:
logger.error(repr(stream.buf[:parse.i]))
raise Exception("Unknown metadata type %d" % type)
type = parse_byte(stream)
return data
MC_metadata = Parsem(parse_metadata,None) #Todo! Make a metadata emit!
def parse_inventory(stream):
n = parse_short(stream)
inv = { "count": n }
inv["slots"] = [parse_slot_update(stream) for i in xrange(0,n)]
return inv
def emit_inventory(inv):
slotstr = ''.join([emit_slot_update(slot) for slot in inv['slots']])
return ''.join([emit_short(inv['count']),slotstr])
MC_inventory = Parsem(parse_inventory,emit_inventory)
def parse_slot_update(stream):
id = parse_short(stream)
if id == -1:
return None
return { "item_id": id, "count": parse_byte(stream), "uses": parse_short(stream) }
def emit_slot_update(update):
if not update:
return emit_short(-1)
return ''.join([emit_short(update['item_id']), emit_byte(update['count']), emit_short(update['uses'])])
MC_slot_update = Parsem(parse_slot_update, emit_slot_update)
SLOT_UPDATE_2_ITEM_IDS = set([
0x15A, #Fishing rod
0x167, #Shears
#TOOLS
#sword, shovel, pickaxe, axe, hoe
0x10C, 0x10D, 0x10E, 0x10F, 0x122, #WOOD
0x110, 0x111, 0x112, 0x113, 0x123, #STONE
0x10B, 0x100, 0x101, 0x102, 0x124, #IRON
0x114, 0x115, 0x116, 0x117, 0x125, #DIAMOND
0x11B, 0x11C, 0x11D, 0x11E, 0x126, #GOLD
#ARMOUR
#helmet, chestplate, leggings, boots
0x12A, 0x12B, 0x12C, 0x12D, #LEATHER
0x12E, 0x12F, 0x130, 0x131, #CHAIN
0x132, 0x133, 0x134, 0x135, #IRON
0x136, 0x137, 0x138, 0x139, #DIAMOND
0x13A, 0x13B, 0x13C, 0x14D #GOLD
])
def parse_slot_update2(stream):
r = parse_slot_update(stream)
if r is not None and r['item_id'] in SLOT_UPDATE_2_ITEM_IDS:
n = parse_short(stream)
r['nbt_size'] = n
if n > 0:
r['nbt_data'] = stream.read(n)
else:
r['nbt_data'] = None
return r
def emit_slot_update2(update):
if not update:
return emit_short(-1)
s = emit_slot_update(update)
if update['item_id'] in SLOT_UPDATE_2_ITEM_IDS:
size = update['nbt_size']
s = ''.join(s, emit_short(size))
if size >= 0:
data = update['nbt_data']
s = ''.join([s, nbtdata])
return s
MC_slot_update2 = Parsem(parse_slot_update2, emit_slot_update2)
def parse_inventory2(stream):
n = parse_short(stream)
inv = { "count": n }
inv["slots"] = [parse_slot_update2(stream) for i in xrange(0,n)]
return inv
def emit_inventory2(inv):
slotstr = ''.join([emit_slot_update2(slot) for slot in inv['slots']])
return ''.join([emit_short(inv['count']),slotstr])
MC_inventory2 = Parsem(parse_inventory2,emit_inventory2)
def parse_chunk(stream):
n = parse_int(stream)
return { 'size': n, 'data': stream.read(n) }
def emit_chunk(ch):
return ''.join([emit_int(ch['size']), ch['data']])
MC_chunk = Parsem(parse_chunk, emit_chunk)
def parse_multi_block_change(stream):
n = parse_short(stream)
return {'coord_array': [parse_short(stream) for j in xrange(0,n)],
'type_array': [parse_byte(stream) for j in xrange(0,n)],
'metadata_array': [parse_byte(stream) for j in xrange(0,n)]}
def emit_multi_block_change(changes):
return ''.join([emit_short(len(changes['coord_array'])),
''.join([emit_short(x) for x in changes['coord_array']]),
''.join([emit_byte(x) for x in changes['type_array']]),
''.join([emit_byte(x) for x in changes['metadata_array']])])
MC_multi_block_change = Parsem(parse_multi_block_change, emit_multi_block_change)
def parse_explosion_records(stream):
n = parse_int(stream)
return { 'count': n,
'data': [(parse_byte(stream),parse_byte(stream),parse_byte(stream))
for i in xrange(0,n)]}
def emit_explosion_records(msg):
return ''.join([emit_int(msg['count']),
''.join([(emit_byte(rec[0]), emit_byte(rec[1]), emit_byte(rec[2]))
for rec in msg['data']])])
MC_explosion_records = Parsem(parse_explosion_records, emit_explosion_records)
def parse_vehicle_data(stream):
x = parse_int(stream)
data = { 'unknown1': x }
if x > 0:
data['unknown2'] = parse_short(stream)
data['unknown3'] = parse_short(stream)
data['unknown4'] = parse_short(stream)
return data
def emit_vehicle_data(data):
x = data['unknown1']
str = emit_int(x)
if x > 0:
str = ''.join([str, emit_int(data['unknown2']), emit_int(data['unknown3']), emit_int(data['unknown4'])])
return str
MC_vehicle_data = Parsem(parse_vehicle_data, emit_vehicle_data)
def parse_item_data(stream):
n = parse_unsigned_byte(stream)
if n == 0:
return ''
return stream.read(n)
def emit_item_data(s):
assert len(s) < 265
return ''.join([emit_unsigned_byte(len(s)),s])
MC_item_data = Parsem(parse_item_data, emit_item_data)
def parse_fireball_data(stream):
data = {}
data['thrower_id'] = parse_int(stream)
if data['thrower_id'] > 0:
data['u1'] = parse_short(stream)
data['u2'] = parse_short(stream)
data['u3'] = parse_short(stream)
return data
def emit_fireball_data(data):
str = emit_int(data['thrower_id'])
if data['thrower_id'] > 0:
str = ''.join(str, emit_short(data['u1']),
emit_short(data['u2']),
emit_short(data['u3']))
return str
MC_fireball_data = Parsem(parse_fireball_data, emit_fireball_data)
| gpl-2.0 | 1,744,634,293,767,756,500 | 28.714286 | 112 | 0.618249 | false | 3.081967 | false | false | false |
merantix/picasso | picasso/examples/keras-vgg16/model.py | 1 | 2147 | ###############################################################################
# Copyright (c) 2017 Merantix GmbH
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
#
# Contributors:
# Ryan Henderson - initial API and implementation and/or initial
# documentation
# Josh Chen - refactor and class config
###############################################################################
from keras.applications import imagenet_utils
import numpy as np
from PIL import Image
from picasso.models.keras import KerasModel
VGG16_DIM = (224, 224, 3)
class KerasVGG16Model(KerasModel):
def preprocess(self, raw_inputs):
"""
Args:
raw_inputs (list of Images): a list of PIL Image objects
Returns:
array (float32): num images * height * width * num channels
"""
image_arrays = []
for raw_im in raw_inputs:
im = raw_im.resize(VGG16_DIM[:2], Image.ANTIALIAS)
im = im.convert('RGB')
arr = np.array(im).astype('float32')
image_arrays.append(arr)
all_raw_inputs = np.array(image_arrays)
return imagenet_utils.preprocess_input(all_raw_inputs)
def decode_prob(self, class_probabilities):
r = imagenet_utils.decode_predictions(class_probabilities,
top=self.top_probs)
results = [
[{'code': entry[0],
'name': entry[1],
'prob': '{:.3f}'.format(entry[2])}
for entry in row]
for row in r
]
classes = imagenet_utils.CLASS_INDEX
class_keys = list(classes.keys())
class_values = list(classes.values())
for result in results:
for entry in result:
entry['index'] = int(
class_keys[class_values.index([entry['code'],
entry['name']])])
return results
| epl-1.0 | 2,342,119,976,921,186,300 | 34.783333 | 79 | 0.538426 | false | 4.276892 | false | false | false |
jimsrc/seatos | mixed/src/mcflag2/mixed.py | 2 | 4722 | #!/usr/bin/env ipython
import os
from pylab import *
from numpy import *
import matplotlib.patches as patches
import matplotlib.transforms as transforms
import console_colors as ccl
import numpy as np
#------------------------------
nbins = 50 # (revisar a ojo) bine por unidad de tiempo normalizado
MCwant = '2' # '2', '2.2H'
WangFlag = 'NaN' #'NaN' #'90' #'130'
CorrShift = True
#dTdays = 0.1 # revisar a ojo
if CorrShift==True:
prexShift = 'wShiftCorr'
else:
prexShift = 'woShiftCorr'
#------------------------------
# NOTA: "N-mcs" y "N-sheaths" chekearlos a ojo!
# varname, range-for-plot, label, N-mcs, N-sheaths
VARstf = []
VARstf += [['B', [5., 19.], 'B [nT]', 63, 57]]
VARstf += [['V', [380., 600.], 'Vsw [km/s]', 59, 57]]
VARstf += [['rmsBoB', [0.015, 0.14], 'rms($\hat B$/|B|) [1]', 63, 57]]
VARstf += [['beta', [0.1, 10.], '$\\beta$ [1]', 52, 50]]
VARstf += [['Pcc', [3., 19.], 'proton density [#/cc]', 52, 50]]
VARstf += [['Temp', [1e4, 3e5], 'Temp [K]', 53, 50]]
VARstf += [['AlphaRatio', [0.02, 0.09], 'alpha ratio [K]', 45, 19]]
nvars = len(VARstf)
dir_figs = '../plots/%s/MCflag%s/' % (prexShift, MCwant)
try:
os.system('mkdir -p %s' % dir_figs)
except:
print ccl.On+ " ---> Ya existe: %s" % dir_figs + ccl.W
print ccl.On+" generando figuras en: %s"%dir_figs + ccl.W
fgap=0.2 # fraccion-de-gap-tolerado que escojo plotear
#------------------------------
for i in range(nvars):
varname = VARstf[i][0]
ylims = VARstf[i][1]
ylabel = VARstf[i][2]
Nmc = VARstf[i][3]
Nsh = VARstf[i][4]
fname_sh = '../../../sheaths/ascii/MCflag%s/%s/MCflag%s_2before.4after_Wang%s_fgap%1.1f_%s.txt' % (MCwant, prexShift, MCwant, WangFlag, fgap, varname)
fname_mc = '../../../mcs/ascii/MCflag%s/%s/MCflag%s_2before.4after_Wang%s_fgap%1.1f_%s.txt' % (MCwant, prexShift, MCwant, WangFlag, fgap, varname)
varsh = loadtxt(fname_sh, unpack=True)
varmc = loadtxt(fname_mc, unpack=True)
cond_sh = varsh[0]<1.0
cond_mc = varmc[0]>0.0
#------ sheath
t_sh = varsh[0][cond_sh]
var_med_sh = varsh[1][cond_sh]
var_avr_sh = varsh[2][cond_sh]
var_std_sh = varsh[3][cond_sh]
var_n_sh = varsh[4][cond_sh]
#------ mc
t_mc = varmc[0][cond_mc]*3. + 1.0
var_med_mc = varmc[1][cond_mc]
var_avr_mc = varmc[2][cond_mc]
var_std_mc = varmc[3][cond_mc]
var_n_mc = varmc[4][cond_mc]
#---------------------------------------------------
fig = figure(1, figsize=(11, 5.5))
ax = fig.add_subplot(111)
ax.plot(t_sh, var_avr_sh, '-o', alpha=.9, c='black', markeredgecolor='none', label='average', markersize=5)
ax.plot(t_mc, var_avr_mc, '-o', alpha=.9, c='black', markeredgecolor='none', markersize=5)
# bandas de errores en sheath
inf = var_avr_sh-var_std_sh/sqrt(var_n_sh)
sup = var_avr_sh+var_std_sh/sqrt(var_n_sh)
ax.fill_between(t_sh, inf, sup, facecolor='gray', alpha=0.5)
# bandas de errores en MC
inf = var_avr_mc - var_std_mc/sqrt(var_n_mc)
sup = var_avr_mc + var_std_mc/sqrt(var_n_mc)
ax.fill_between(t_mc, inf, sup, facecolor='gray', alpha=0.5)
# pinta ventana de sheath
trans = transforms.blended_transform_factory(
ax.transData, ax.transAxes)
rect1 = patches.Rectangle((0., 0.), width=1.0, height=1,
transform=trans, color='orange',
alpha=0.3)
ax.add_patch(rect1)
# pinta ventana de mc
rect1 = patches.Rectangle((1., 0.), width=3.0, height=1,
transform=trans, color='blue',
alpha=0.2)
ax.add_patch(rect1)
ax.plot(t_sh, var_med_sh, '-o', markersize=5 ,alpha=.8, c='red', markeredgecolor='none', label='median')
ax.plot(t_mc, var_med_mc, '-o', markersize=5 ,alpha=.8, c='red', markeredgecolor='none')
ax.grid()
ax.set_ylim(ylims);
ax.set_xlim(-2., 7.)
ax.legend(loc='upper right')
ax.set_xlabel('mixed time scale [1]')
ax.set_ylabel(ylabel)
TITLE = '# of MCs: %d \n\
# of sheaths: %d \n\
%dbins per time unit \n\
MCflag: %s \n\
WangFlag: %s' % (Nmc, Nsh, nbins, MCwant, WangFlag)
ax.set_title(TITLE)
if varname=='beta':
ax.set_yscale('log')
#show()
fname_fig = '%s/MCflag%s_2before.4after_Wang%s_fgap%1.1f_%s' % (dir_figs, MCwant, WangFlag, fgap, varname)
savefig('%s.png'%fname_fig, dpi=200, format='png', bbox_inches='tight')
print ccl.Rn + " ---> generamos: " + fname_fig + ccl.W
#savefig('%s.pdf'%fname_fig, dpi=200, format='pdf', bbox_inches='tight')
#savefig('%s.eps'%fname_fig, dpi=200, format='eps', bbox_inches='tight')
close()
| mit | 2,932,413,093,864,362,500 | 40.06087 | 154 | 0.557814 | false | 2.560738 | false | false | false |
moschlar/SAUCE | sauce/controllers/crc/selectors.py | 1 | 3164 | '''
@since: 2015-01-07
@author: moschlar
'''
import sqlalchemy.types as sqlat
import tw2.core as twc
import tw2.bootstrap.forms as twb
import tw2.jqplugins.chosen.widgets as twjc
import sprox.widgets.tw2widgets.widgets as sw
from sprox.sa.widgetselector import SAWidgetSelector
from sprox.sa.validatorselector import SAValidatorSelector, Email
from sauce.widgets.widgets import (LargeMixin, SmallMixin, AdvancedWysihtml5,
MediumTextField, SmallTextField, CalendarDateTimePicker)
from sauce.widgets.validators import AdvancedWysihtml5BleachValidator
class ChosenPropertyMultipleSelectField(LargeMixin, twjc.ChosenMultipleSelectField, sw.PropertyMultipleSelectField):
search_contains = True
def _validate(self, value, state=None):
value = super(ChosenPropertyMultipleSelectField, self)._validate(value, state)
if self.required and not value:
raise twc.ValidationError('Please select at least one value')
else:
return value
class ChosenPropertySingleSelectField(SmallMixin, twjc.ChosenSingleSelectField, sw.PropertySingleSelectField):
search_contains = True
class MyWidgetSelector(SAWidgetSelector):
'''Custom WidgetSelector for SAUCE
Primarily uses fields from tw2.bootstrap.forms and tw2.jqplugins.chosen.
'''
text_field_limit = 256
default_multiple_select_field_widget_type = ChosenPropertyMultipleSelectField
default_single_select_field_widget_type = ChosenPropertySingleSelectField
default_name_based_widgets = {
'name': MediumTextField,
'subject': MediumTextField,
'_url': MediumTextField,
'user_name': MediumTextField,
'email_address': MediumTextField,
'_display_name': MediumTextField,
'description': AdvancedWysihtml5,
'message': AdvancedWysihtml5,
}
def __init__(self, *args, **kwargs):
self.default_widgets.update({
sqlat.String: MediumTextField,
sqlat.Integer: SmallTextField,
sqlat.Numeric: SmallTextField,
sqlat.DateTime: CalendarDateTimePicker,
sqlat.Date: twb.CalendarDatePicker,
sqlat.Time: twb.CalendarTimePicker,
sqlat.Binary: twb.FileField,
sqlat.BLOB: twb.FileField,
sqlat.PickleType: MediumTextField,
sqlat.Enum: twjc.ChosenSingleSelectField,
})
super(MyWidgetSelector, self).__init__(*args, **kwargs)
def select(self, field):
widget = super(MyWidgetSelector, self).select(field)
if (issubclass(widget, sw.TextArea)
and hasattr(field.type, 'length')
and (field.type.length is None or field.type.length < self.text_field_limit)):
widget = MediumTextField
return widget
class MyValidatorSelector(SAValidatorSelector):
_name_based_validators = {
'email_address': Email,
'description': AdvancedWysihtml5BleachValidator,
'message': AdvancedWysihtml5BleachValidator,
}
# def select(self, field):
# print 'MyValidatorSelector', 'select', field
# return super(MyValidatorSelector, self).select(field)
| agpl-3.0 | 3,641,152,664,509,198,000 | 33.769231 | 116 | 0.697535 | false | 3.940224 | false | false | false |
neohanju/GarbageDumping | EventEncoder/posetrack_generate_trajectories.py | 1 | 7755 | import os
import csv
import json
import glob
import progressbar
from collections import OrderedDict
from utils import intersection_over_union
kHaanjuHome = '/home/neohanju/Workspace/dataset'
kJMHome = 'C:/Users/JM/Desktop/Data/ETRIrelated/BMVC'
kCurrentHome = kJMHome
kPosetrackCSVAnnotationBasePath = os.path.join(kCurrentHome, 'posetrack/annotations/csv')
kCOCOKeypointsBasePath = os.path.join(kCurrentHome, 'posetrack/keypoints_COCO')
def load_posetrack_csv_annotation(anno_path):
with open(anno_path, 'r') as csvfile:
reader = csv.reader(csvfile)
keys = next(reader)
dict_list = [OrderedDict(zip(keys, row)) for row in reader]
return {'setname': os.path.basename(anno_path).split('.')[0],
'annotations': dict_list}
def load_posetrack_cvs_annotation_all(anno_base_path=kPosetrackCSVAnnotationBasePath):
file_paths = glob.glob(os.path.join(anno_base_path, '*.csv'))
file_paths.sort()
print('>> Read posetrack annotations')
dict_list = []
for i in progressbar.progressbar(range(len(file_paths))):
dict_list.append(load_posetrack_csv_annotation(file_paths[i]))
return dict_list
def load_coco_keypoints(keypoints_dir):
dir_name = os.path.basename(keypoints_dir)
file_paths = glob.glob(os.path.join(keypoints_dir, '*.json'))
file_paths.sort()
detections = []
for file_path in file_paths:
cur_frame_dict = {'frameNumber': os.path.basename(file_path).split('_')[0],
'keypoints': []}
with open(file_path, 'r') as json_file:
json_data = json.loads(json_file.read())
for people_info in json_data['people']:
cur_frame_dict['keypoints'].append(people_info['pose_keypoints_2d'])
detections.append(cur_frame_dict)
return {'setname': '_'.join(dir_name.split('_')[0:-1]),
'detections': detections}
def load_coco_keypoints_all(keypoints_base_dir=kCOCOKeypointsBasePath):
parent_dir_name_list = next(os.walk(keypoints_base_dir))[1]
parent_dir_name_list.sort()
path_list = []
for parent_dir in parent_dir_name_list:
child_dir_name_list = next(os.walk(os.path.join(keypoints_base_dir, parent_dir)))[1]
path_list += [os.path.join(keypoints_base_dir, parent_dir, current_dir) for current_dir in child_dir_name_list]
print('>> Read keypoints from COCO model')
dict_list = [load_coco_keypoints(path_list[i]) for i in progressbar.progressbar(range(len(path_list)))]
return dict_list
def is_keypoints_in_bbox(keypoints, bbox):
# keypoints; [x0, y0, confidence_0, ..., x18, y18, confidence_18]
# bbox: [xmin, ymin, xmax, ymax]
[xmin, ymin, xmax, ymax] = bbox
point_check_list = [1, 2, 5]
for check_idx in point_check_list:
if xmin > keypoints[3 * check_idx] or xmax < keypoints[3 * check_idx]:
return False
if ymin > keypoints[3 * check_idx + 1] or ymax < keypoints[3 * check_idx + 1]:
return False
return True
def get_trajectories(posetrack_annotation, coco_keypoint):
assert (posetrack_annotation['setname'] == coco_keypoint['setname'])
# for allocation
max_track_id = 0
for cur_anno in posetrack_annotation['annotations']:
if max_track_id < int(cur_anno['track_id']):
max_track_id = int(cur_anno['track_id'])
# clustering with track ID and set bounding box
anno_with_ID = [[] for _ in range(max_track_id + 1)]
for cur_anno in posetrack_annotation['annotations']:
x0_idx = list(cur_anno.keys()).index("x0")
keypoints = list(cur_anno.items())[x0_idx:x0_idx+15*3] # list of tuples like [('x0', '213'), ...]
xs = [float(point[1]) for point in keypoints[0::3] if float(point[1]) != 0]
ys = [float(point[1]) for point in keypoints[1::3] if float(point[1]) != 0]
cur_anno['bbox'] = [min(xs), min(ys), max(xs), max(ys)]
anno_with_ID[int(cur_anno['track_id'])].append(cur_anno)
# calculate bounding box of coco model's keypoints
for frame_info in coco_keypoint['detections']:
frame_info['bbox'] = []
for keypoints in frame_info['keypoints']:
xs, ys = [], []
for p in range(0, len(keypoints), 3):
if 0 == keypoints[p + 2]:
continue
xs.append(keypoints[p])
ys.append(keypoints[p + 1])
frame_info['bbox'].append([min(xs), min(ys), max(xs), max(ys)])
result_trajectories = []
for person in anno_with_ID:
coco_idx = 0
cur_trajectory = []
for pose in person:
# {bbox, frameNumber, head_x1, head_y1, head_x2, head_y2, track_id, x0, y0, is_visible_0 ... x14, y14, is_visible_14}
# find concurrent coco keypoints
while coco_idx < len(coco_keypoint['detections']):
if int(coco_keypoint['detections'][coco_idx]['frameNumber']) < int(pose['frameNumber']):
coco_idx += 1
else:
break
if int(coco_keypoint['detections'][coco_idx]['frameNumber']) > int(pose['frameNumber']):
# there is no concurrent keypoint
continue
# current_coco_detections = []
# while coco_idx < len(coco_keypoint['detections']):
# if int(coco_keypoint['detections'][coco_idx]['frameNumber']) == int(pose['frameNumber']):
# current_coco_detections.append(coco_keypoint['detections'][coco_idx])
# coco_idx += 1
# else:
# break
# find matching keypoint among concurrent keypoints
# criterion: largest I.O.U.(intersection over union)
# but, neck and shoulders of max I.O.U. must be included by annotation box
detection = coco_keypoint['detections'][coco_idx]
if 0 == len(detection['keypoints']):
continue
bbox_iou = [intersection_over_union(pose['bbox'], detection['bbox'][i])
for i, keypoints in enumerate(detection['keypoints'])]
max_iou_pos = bbox_iou.index(max(bbox_iou))
if is_keypoints_in_bbox(detection['keypoints'][max_iou_pos], pose['bbox']):
cur_trajectory.append(
[int(pose['track_id']), 1, int(pose['frameNumber'])] + detection['keypoints'][max_iou_pos] + [0])
result_trajectories.append(cur_trajectory)
return result_trajectories
def save_trajectories(save_path, trajectories):
with open(save_path, 'w') as txtfile:
for trajectory in trajectories:
for pose in trajectory:
txtfile.write(' '.join(map(lambda x: str(x), pose)) + '\n')
def save_trajectories_from_all(save_base_path,
posetrack_anno_base_path=kPosetrackCSVAnnotationBasePath,
coco_keypoints_base_path=kCOCOKeypointsBasePath):
posetrack_annos = load_posetrack_cvs_annotation_all(posetrack_anno_base_path)
coco_keypoints = load_coco_keypoints_all(coco_keypoints_base_path)
for posetrack_annotation in posetrack_annos:
left_coco_keypoints = []
for coco_keypoint in coco_keypoints:
if posetrack_annotation['setname'] == coco_keypoint['setname']:
save_trajectories(os.path.join(save_base_path, posetrack_annotation['setname'] + '.txt'),
get_trajectories(posetrack_annotation, coco_keypoint))
else:
left_coco_keypoints.append(coco_keypoint)
coco_keypoints = left_coco_keypoints
if "__main__" == __name__:
save_trajectories_from_all(kCOCOKeypointsBasePath)
# ()()
# ('') HAANJU.YOO
| bsd-2-clause | 3,583,916,945,920,374,300 | 40.031746 | 129 | 0.608511 | false | 3.336919 | false | false | false |
bikashgupta11/javarobot | src/main/resources/jython/Lib/robot/utils/utf8reader.py | 8 | 1792 | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from codecs import BOM_UTF8
from .robottypes import is_string
class Utf8Reader(object):
def __init__(self, path_or_file):
if is_string(path_or_file):
self._file = open(path_or_file, 'rb')
self._close = True
else:
self._file = path_or_file
self._close = False
# IronPython handles BOM incorrectly if file not opened in binary mode:
# https://ironpython.codeplex.com/workitem/34655
if hasattr(self._file, 'mode') and self._file.mode != 'rb':
raise ValueError('Only files in binary mode accepted.')
def __enter__(self):
return self
def __exit__(self, *exc_info):
if self._close:
self._file.close()
def read(self):
return self._decode(self._file.read())
def readlines(self):
for index, line in enumerate(self._file.readlines()):
yield self._decode(line, remove_bom=index == 0)
def _decode(self, content, remove_bom=True):
if remove_bom and content.startswith(BOM_UTF8):
content = content[len(BOM_UTF8):]
return content.decode('UTF-8')
| gpl-3.0 | 1,502,093,724,916,351,200 | 33.461538 | 79 | 0.646763 | false | 3.87041 | false | false | false |
maxiimou/imapclient | imapclient/test/test_init.py | 2 | 2775 | # Copyright (c) 2014, Menno Smits
# Released subject to the New BSD License
# Please see http://en.wikipedia.org/wiki/BSD_licenses
from mock import patch, sentinel
from imapclient.imapclient import IMAPClient
from imapclient.test.util import unittest
class TestInit(unittest.TestCase):
def setUp(self):
self.patcher = patch('imapclient.imapclient.imaplib')
self.imaplib = self.patcher.start()
def tearDown(self):
self.patcher.stop()
def test_plain(self):
self.imaplib.IMAP4.return_value = sentinel.IMAP4
imap = IMAPClient('1.2.3.4')
self.assertEqual(imap._imap, sentinel.IMAP4)
self.imaplib.IMAP4.assert_called_with('1.2.3.4', 143)
self.assertEqual(imap.host, '1.2.3.4')
self.assertEqual(imap.port, 143)
self.assertEqual(imap.ssl, False)
self.assertEqual(imap.stream, False)
def test_SSL(self):
self.imaplib.IMAP4_SSL.return_value = sentinel.IMAP4_SSL
imap = IMAPClient('1.2.3.4', ssl=True)
self.assertEqual(imap._imap, sentinel.IMAP4_SSL)
self.imaplib.IMAP4_SSL.assert_called_with('1.2.3.4', 993)
self.assertEqual(imap.host, '1.2.3.4')
self.assertEqual(imap.port, 993)
self.assertEqual(imap.ssl, True)
self.assertEqual(imap.stream, False)
def test_SSL_kwargs(self):
self.imaplib.IMAP4_SSL.return_value = sentinel.IMAP4_SSL
imap = IMAPClient('1.2.3.4', ssl=True, keyfile='key.pem',
certfile='cert.pem')
self.assertEqual(imap._imap, sentinel.IMAP4_SSL)
self.imaplib.IMAP4_SSL.assert_called_with('1.2.3.4', 993,
keyfile='key.pem', certfile='cert.pem')
self.assertEqual(imap.ssl, True)
self.assertEqual(imap.stream, False)
imap = IMAPClient('1.2.3.4', ssl=True, ssl_context=sentinel.context)
self.imaplib.IMAP4_SSL.assert_called_with('1.2.3.4', 993,
ssl_context=sentinel.context)
self.assertEqual(imap.ssl, True)
self.assertEqual(imap.stream, False)
def test_stream(self):
self.imaplib.IMAP4_stream.return_value = sentinel.IMAP4_stream
imap = IMAPClient('command', stream=True)
self.assertEqual(imap._imap, sentinel.IMAP4_stream)
self.imaplib.IMAP4_stream.assert_called_with('command')
self.assertEqual(imap.host, 'command')
self.assertEqual(imap.port, None)
self.assertEqual(imap.ssl, False)
self.assertEqual(imap.stream, True)
def test_ssl_and_stream_is_error(self):
self.assertRaises(ValueError, IMAPClient, 'command', ssl=True, stream=True)
def test_stream_and_port_is_error(self):
self.assertRaises(ValueError, IMAPClient, 'command', stream=True, port=123)
| bsd-3-clause | 6,007,561,218,790,899,000 | 34.126582 | 83 | 0.654054 | false | 3.315412 | true | false | false |
lucaskotres/DTS_Charts | E3_DTS/sqlheatmap.py | 1 | 1374 | import datetime
import sys
begin = datetime.datetime.now()
import pyodbc
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
dataini = "'"+str(sys.argv[1])
horaini = str(sys.argv[2])+"'"
datafim = "'"+str(sys.argv[3])
horafim = str(sys.argv[4])+"'"
cnxn = pyodbc.connect('DRIVER={SQL Server};SERVER=.\SQLExpress;DATABASE=DTS_Teste;UID=sa;PWD=Elipse21')
cursor = cnxn.cursor()
string = ' '
for i in np.arange(501)[1:501]:
if i == 500:
string = string + 'T' + str(i)
else:
string = string + 'T' + str(i) + ','
querystring = 'SELECT'+string+' FROM Hist_SupportTest1 WHERE E3TimeStamp >= CAST('+str(dataini)+' '+str(horaini)+' AS datetime) AND E3TimeStamp <= CAST('+str(datafim)+' '+str(horafim)+' AS datetime) '
print querystring
cursor.execute(querystring)
rows = cursor.fetchall()
intensity = []
for item in rows:
intensity.append(item)
#convert intensity (list of lists) to a numpy array for plotting
intensity = np.array(intensity)
x = np.arange(500)
y = np.arange(len(rows))
#setup the 2D grid with Numpy
x, y = np.meshgrid(x, y)
print np.shape(intensity)
#now just plug the data into pcolormesh, it's that easy!
plt.pcolormesh(x, y, intensity)
plt.colorbar() #need a colorbar to show the intensity scale
plt.savefig('heatmap.png')
end = datetime.datetime.now()
print 'tempo:', end - begin
plt.show()
| mit | 6,285,247,639,477,630,000 | 21.9 | 200 | 0.684134 | false | 2.904863 | false | false | false |
andrei1089/canola-picasa | service/liblocation.py | 1 | 7950 | # Python wrapper to the Maemo 4.0 "Chinook" liblocation.
# Wrapper version 0.1.
#
# Copyright 2008 by Robert W. Brewer < rwb123 at gmail dot com >
# Licensed under GNU LGPL v3.
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# Please see <http://www.gnu.org/licenses/> for a copy of the
# GNU Lesser General Public License.
########################################
# For a documentation overview of liblocation please see:
# http://maemo.org/development/documentation/how-tos/4-x/maemo_connectivity_guide.html#Location
#########################################
import gobject
import ctypes as C
from types import MethodType
########################################
# constants
########################################
(STATUS_NO_FIX,
STATUS_FIX,
STATUS_DGPS_FIX) = range(3)
(MODE_NOT_SEEN,
MODE_NO_FIX,
MODE_2D,
MODE_3D) = range(4)
NONE_SET = 0
ALTITUDE_SET = 1<<0
SPEED_SET = 1<<1
TRACK_SET = 1<<2
CLIMB_SET = 1<<3
LATLONG_SET = 1<<4
TIME_SET = 1<<5
########################################
# ctypes structure definitions
########################################
class GTypeInstance(C.Structure):
_fields_ = [('g_class', C.c_ulong)]
class GObject(C.Structure):
_fields_ = [('g_type_instance', GTypeInstance),
('ref_count', C.c_uint),
('qdata', C.c_void_p)]
class GPtrArray(C.Structure):
_fields_ = [('pdata', C.c_void_p),
('len', C.c_uint)]
class LocationGPSDeviceSatellite(C.Structure):
_fields_ = [('prn', C.c_int),
('elevation', C.c_int),
('azimuth', C.c_int),
('signal_strength', C.c_int),
('in_use', C.c_int)]
class LocationGPSDeviceFix(C.Structure):
_fields_ = [('mode', C.c_int),
('fields', C.c_uint),
('time', C.c_double),
('ept', C.c_double),
('latitude', C.c_double),
('longitude', C.c_double),
('eph', C.c_double),
('altitude', C.c_double),
('epv', C.c_double),
('track', C.c_double),
('epd', C.c_double),
('speed', C.c_double),
('eps', C.c_double),
('climb', C.c_double),
('epc', C.c_double),
# private, not used yet
('pitch', C.c_double),
('roll', C.c_double),
('dip', C.c_double)]
class CLocationGPSDevice(C.Structure):
_fields_ = [('parent', GObject),
('online', C.c_int),
('status', C.c_int),
('Cfix', C.POINTER(LocationGPSDeviceFix)),
('satellites_in_view', C.c_int),
('satellites_in_use', C.c_int),
('Csatellites', C.POINTER(GPtrArray))] # of LocationGPSDeviceSatellite
def sv_iter(self):
if not self.Csatellites:
return
gar = self.Csatellites.contents
sv_ptr_ptr = C.cast(gar.pdata,
C.POINTER(C.POINTER(LocationGPSDeviceSatellite)))
for i in range(gar.len):
yield sv_ptr_ptr[i].contents
def __getattr__(self, name):
try:
return C.Structure.__getattr__(self)
except AttributeError:
if name == 'fix':
if self.Cfix:
return self.Cfix.contents
else:
return None
if name == 'satellites':
return self.sv_iter()
raise AttributeError
class CLocationGPSDControl(C.Structure):
_fields_ = [('parent', GObject),
('can_control', C.c_int)]
################################################
# gobject C->Python boilerplate from pygtk FAQ
################################################
# this boilerplate can convert a memory address
# into a proper python gobject.
class _PyGObject_Functions(C.Structure):
_fields_ = [
('register_class',
C.PYFUNCTYPE(C.c_void_p, C.c_char_p,
C.c_int, C.py_object,
C.py_object)),
('register_wrapper',
C.PYFUNCTYPE(C.c_void_p, C.py_object)),
('register_sinkfunc',
C.PYFUNCTYPE(C.py_object, C.c_void_p)),
('lookupclass',
C.PYFUNCTYPE(C.py_object, C.c_int)),
('newgobj',
C.PYFUNCTYPE(C.py_object, C.c_void_p)),
]
class PyGObjectCPAI(object):
def __init__(self):
addr = C.pythonapi.PyCObject_AsVoidPtr(
C.py_object(gobject._PyGObject_API))
self._api = _PyGObject_Functions.from_address(addr)
def pygobject_new(self, addr):
return self._api.newgobj(addr)
# call like this:
# Cgobject = PyGObjectCPAI()
# Cgobject.pygobject_new(memory_address)
# to get memory address from a gobject:
# address = hash(obj)
###################################
# pythonized functions
###################################
def gps_device_get_type():
return loc_gps_type()
def gps_device_get_new():
def struct(self):
ptr = C.cast(C.c_void_p(hash(self)),
C.POINTER(CLocationGPSDevice))
return ptr.contents
# create C gobject for gps device
cgps_dev = gobj_new(gps_device_get_type(), None)
# wrap in python gobject
pyobj = Cgobject.pygobject_new(cgps_dev)
# add a struct() method to hide the ctypes stuff.
setattr(pyobj, 'struct', MethodType(struct, pyobj, pyobj.__class__))
return pyobj
def gps_device_reset_last_known(gpsdevice):
libloc.location_gps_device_reset_last_known(C.c_void_p(hash(gpsdevice)))
def gps_device_start(gpsdevice):
libloc.location_gps_device_start(C.c_void_p(hash(gpsdevice)))
def gps_device_stop(gpsdevice):
libloc.location_gps_device_stop(C.c_void_p(hash(gpsdevice)))
def gpsd_control_get_default():
def struct(self):
ptr = C.cast(C.c_void_p(hash(self)),
C.POINTER(CLocationGPSDControl))
return ptr.contents
gpsd_control_ptr = loc_gpsd_control()
# wrap in python object
pyobj = Cgobject.pygobject_new(gpsd_control_ptr)
# add a struct() method to hide the ctypes stuff.
setattr(pyobj, 'struct', MethodType(struct, pyobj, pyobj.__class__))
return pyobj
def gpsd_control_start(gpsdcontrol):
libloc.location_gpsd_control_start(C.c_void_p(hash(gpsdcontrol)))
def gpsd_control_stop(gpsdcontrol):
libloc.location_gpsd_control_stop(C.c_void_p(hash(gpsdcontrol)))
def gpsd_control_request_status(gpsdcontrol):
libloc.location_gpsd_control_request_status(C.c_void_p(hash(gpsdcontrol)))
########################################
# initialize library
########################################
# load C libraries
libloc = C.CDLL('liblocation.so.0')
libgobject = C.CDLL('libgobject-2.0.so.0')
Cgobject = PyGObjectCPAI()
# inform ctypes of necessary function prototype information
loc_gps_type = libloc.location_gps_device_get_type
loc_gps_type.restype = C.c_ulong
gobj_new = libgobject.g_object_new
gobj_new.restype = C.c_void_p
loc_gpsd_control = libloc.location_gpsd_control_get_default
loc_gpsd_control.restype = C.POINTER(CLocationGPSDControl)
libloc.location_distance_between.argtypes = [C.c_double,
C.c_double,
C.c_double,
C.c_double]
libloc.location_distance_between.restype = C.c_double
| gpl-3.0 | 1,680,566,774,803,956,500 | 26.894737 | 95 | 0.553836 | false | 3.491436 | false | false | false |
happy5214/manitae | manitae/core/managers/ManitaeLogger.py | 1 | 1469 | # Copyright (C) 2012 Alexander Jones
#
# This file is part of Manitae.
#
# Manitae is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Manitae is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Manitae. If not, see <http://www.gnu.org/licenses/>.
from PyQt4 import QtCore
class ManitaeLogger(QtCore.QObject):
send_entry = QtCore.pyqtSignal(str)
def __init__(self):
super(ManitaeLogger, self).__init__()
def append_notice(self, notice):
temp_string = "<p style=\"margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; white-space:pre-wrap\">" + notice + "</p><br/>\n";
self.send_entry.emit(temp_string)
def append_warning(self, warning):
tempString = "<p style=\"margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; white-space:pre-wrap; color:#c00000\">" + warning + "</p><br/>\n";
self.send_entry.emit(tempString);
| gpl-3.0 | -6,643,320,039,052,473,000 | 43.515152 | 206 | 0.686181 | false | 3.361556 | false | false | false |
commial/miasm | example/jitter/unpack_upx.py | 1 | 3555 | from __future__ import print_function
import os
import logging
from pdb import pm
from miasm.loader import pe
from miasm.analysis.sandbox import Sandbox_Win_x86_32
from miasm.os_dep.common import get_win_str_a
# User defined methods
def kernel32_GetProcAddress(jitter):
"""Hook on GetProcAddress to note where UPX stores import pointers"""
ret_ad, args = jitter.func_args_stdcall(["libbase", "fname"])
# When the function is called, EBX is a pointer to the destination buffer
dst_ad = jitter.cpu.EBX
logging.error('EBX ' + hex(dst_ad))
# Handle ordinal imports
fname = (args.fname if args.fname < 0x10000
else get_win_str_a(jitter, args.fname))
logging.error(fname)
# Get the generated address of the library, and store it in memory to
# dst_ad
ad = sb.libs.lib_get_add_func(args.libbase, fname, dst_ad)
# Add a breakpoint in case of a call on the resolved function
# NOTE: never happens in UPX, just for skeleton
jitter.handle_function(ad)
jitter.func_ret_stdcall(ret_ad, ad)
parser = Sandbox_Win_x86_32.parser(description="Generic UPX unpacker")
parser.add_argument("filename", help="PE Filename")
parser.add_argument('-v', "--verbose",
help="verbose mode", action="store_true")
parser.add_argument("--graph",
help="Export the CFG graph in graph.dot",
action="store_true")
options = parser.parse_args()
options.load_hdr = True
sb = Sandbox_Win_x86_32(options.filename, options, globals(),
parse_reloc=False)
if options.verbose is True:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.WARNING)
if options.verbose is True:
print(sb.jitter.vm)
# Ensure there is one and only one leave (for OEP discovering)
mdis = sb.machine.dis_engine(sb.jitter.bs)
mdis.dont_dis_nulstart_bloc = True
asmcfg = mdis.dis_multiblock(sb.entry_point)
leaves = list(asmcfg.get_bad_blocks())
assert(len(leaves) == 1)
l = leaves.pop()
logging.info(l)
end_offset = mdis.loc_db.get_location_offset(l.loc_key)
logging.info('final offset')
logging.info(hex(end_offset))
# Export CFG graph (dot format)
if options.graph is True:
open("graph.dot", "w").write(asmcfg.dot())
if options.verbose is True:
print(sb.jitter.vm)
def update_binary(jitter):
sb.pe.Opthdr.AddressOfEntryPoint = sb.pe.virt2rva(jitter.pc)
logging.info('updating binary')
for s in sb.pe.SHList:
sdata = sb.jitter.vm.get_mem(sb.pe.rva2virt(s.addr), s.rawsize)
sb.pe.rva.set(s.addr, sdata)
# Stop execution
jitter.run = False
return False
# Set callbacks
sb.jitter.add_breakpoint(end_offset, update_binary)
# Run
sb.run()
# Rebuild PE
# Alternative solution: miasm.jitter.loader.pe.vm2pe(sb.jitter, out_fname,
# libs=sb.libs, e_orig=sb.pe)
new_dll = []
sb.pe.SHList.align_sections(0x1000, 0x1000)
logging.info(repr(sb.pe.SHList))
sb.pe.DirRes = pe.DirRes(sb.pe)
sb.pe.DirImport.impdesc = None
logging.info(repr(sb.pe.DirImport.impdesc))
new_dll = sb.libs.gen_new_lib(sb.pe)
logging.info(new_dll)
sb.pe.DirImport.impdesc = []
sb.pe.DirImport.add_dlldesc(new_dll)
s_myimp = sb.pe.SHList.add_section(name="myimp", rawsize=len(sb.pe.DirImport))
logging.info(repr(sb.pe.SHList))
sb.pe.DirImport.set_rva(s_myimp.addr)
# XXXX TODO
sb.pe.NThdr.optentries[pe.DIRECTORY_ENTRY_DELAY_IMPORT].rva = 0
bname, fname = os.path.split(options.filename)
fname = os.path.join(bname, fname.replace('.', '_'))
open(fname + '_unupx.bin', 'wb').write(bytes(sb.pe))
| gpl-2.0 | 2,860,956,189,516,582,000 | 28.380165 | 78 | 0.695359 | false | 2.911548 | false | false | false |
2947721120/sagacious-capsicum | src/ggrc/services/search.py | 3 | 1821 | # Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
import json
from flask import request, current_app
from ggrc.fulltext import get_indexer
from .common import DateTimeEncoder
from .util import url_for
def search():
terms = request.args.get('q')
if not terms or terms is None:
return current_app.make_response((
'Query parameter "q" specifying search terms must be provided.',
400,
[('Content-Type', 'text/plain')],
))
should_group_by_type = request.args.get('group_by_type')
if should_group_by_type is not None and \
should_group_by_type.lower() == 'true':
return group_by_type_search(terms)
return basic_search(terms)
def do_search(terms, list_for_type):
indexer = get_indexer()
results = indexer.search(terms)
for result in results:
id = result.key
model_type = result.type
entries_list = list_for_type(model_type)
entries_list.append({
'id': id,
'type': model_type,
'href': url_for(model_type, id=id),
})
def make_search_result(entries):
return current_app.make_response((
json.dumps({ 'results': {
'selfLink': request.url,
'entries': entries,
}
}, cls=DateTimeEncoder),
200,
[('Content-Type', 'application/json')],
))
def basic_search(terms):
entries = []
list_for_type = lambda t: entries
do_search(terms, list_for_type)
return make_search_result(entries)
def group_by_type_search(terms):
entries = {}
list_for_type = \
lambda t: entries[t] if t in entries else entries.setdefault(t, [])
do_search(terms, list_for_type)
return make_search_result(entries)
| apache-2.0 | 7,198,063,677,188,458,000 | 28.852459 | 78 | 0.667765 | false | 3.372222 | false | false | false |
samupl/django-admin-autoregister | django_autoregister/apps.py | 1 | 1864 | from django.apps import AppConfig
from django.contrib import admin
from django.db import models
class AutoDisplayAdmin(admin.ModelAdmin):
list_links_fields = ('CharField',
'IntegerField',
'AutoField',
'DateField',
'DateTimeField',
'SlugField',
'BigIntegerField',
'EmailField',
'BooleanField',
'DecimalField',
'FloatField',
'IPAddressField',
'GenericIPAddressField',
'NullBooleanField',
'PositiveIntegerField',
'PositiveSmallIntegerField',
'UrlField',
'TimeField',)
list_display_fields = list_links_fields + ('ForeignKey', )
def __init__(self, *args, **kwargs):
admin.ModelAdmin.__init__(self, *args, **kwargs)
self.list_display = []
self.list_display_links = []
for field in args[0]._meta.fields:
if field.get_internal_type() in self.list_display_fields:
self.list_display.append(field.name)
if len(self.list_display_links) < 2 and field.get_internal_type() in self.list_links_fields:
self.list_display_links.append(field.name)
class DjangoAutoRegisterConfig(AppConfig):
name = 'django_autoregister'
verbose_name = 'Django model auto registration plugin'
def ready(self):
all_models = models.get_models()
registered_models = admin.site._registry
for model in all_models:
if model in registered_models:
continue
admin.site.register(model, AutoDisplayAdmin) | bsd-3-clause | 7,853,113,908,200,380,000 | 36.3 | 104 | 0.517167 | false | 5.221289 | false | false | false |
liuzheng712/jumpserver | apps/assets/utils.py | 1 | 1465 | # ~*~ coding: utf-8 ~*~
#
import os
import paramiko
from paramiko.ssh_exception import SSHException
from common.utils import get_object_or_none
from .models import Asset, SystemUser, Label
def get_assets_by_id_list(id_list):
return Asset.objects.filter(id__in=id_list).filter(is_active=True)
def get_system_users_by_id_list(id_list):
return SystemUser.objects.filter(id__in=id_list)
def get_assets_by_fullname_list(hostname_list):
return Asset.get_queryset_by_fullname_list(hostname_list)
def get_system_user_by_name(name):
system_user = get_object_or_none(SystemUser, name=name)
return system_user
def get_system_user_by_id(id):
system_user = get_object_or_none(SystemUser, id=id)
return system_user
class LabelFilter:
def filter_queryset(self, queryset):
queryset = super().filter_queryset(queryset)
query_keys = self.request.query_params.keys()
all_label_keys = Label.objects.values_list('name', flat=True)
valid_keys = set(all_label_keys) & set(query_keys)
labels_query = {}
for key in valid_keys:
labels_query[key] = self.request.query_params.get(key)
conditions = []
for k, v in labels_query.items():
query = {'labels__name': k, 'labels__value': v}
conditions.append(query)
if conditions:
for kwargs in conditions:
queryset = queryset.filter(**kwargs)
return queryset
| gpl-2.0 | -3,100,966,215,337,615,400 | 27.72549 | 70 | 0.659386 | false | 3.430913 | false | false | false |
SystemsBioinformatics/cbmpy | cbmpy/CBTools.py | 1 | 62009 | """
CBMPy: CBTools module
=====================
PySCeS Constraint Based Modelling (http://cbmpy.sourceforge.net)
Copyright (C) 2009-2018 Brett G. Olivier, VU University Amsterdam, Amsterdam, The Netherlands
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
Author: Brett G. Olivier
Contact email: [email protected]
Last edit: $Author: bgoli $ ($Id: CBTools.py 710 2020-04-27 14:22:34Z bgoli $)
"""
# gets rid of "invalid variable name" info
# pylint: disable=C0103
# gets rid of "line to long" info
# pylint: disable=C0301
# use with caution: gets rid of module xxx has no member errors (run once enabled)
# pylint: disable=E1101
# preparing for Python 3 port
from __future__ import division, print_function
from __future__ import absolute_import
# from __future__ import unicode_literals
import os
import time
import re
import pprint
import gzip
import zipfile
try:
import pickle
except ImportError:
import cPickle as pickle
cDir = os.path.dirname(os.path.abspath(os.sys.argv[0]))
import numpy
from . import CBModel
from .CBCommon import (
HAVE_PYPARSING,
checkChemFormula,
pp_chemicalFormula,
extractGeneIdsFromString,
)
from .CBCommon import processSpeciesChargeChemFormulaAnnot, pyparsing
_PPR_ = pprint.PrettyPrinter()
from .CBConfig import __CBCONFIG__ as __CBCONFIG__
__DEBUG__ = __CBCONFIG__['DEBUG']
__version__ = __CBCONFIG__['VERSION']
def createTempFileName():
"""
Return a temporary filename
"""
return str(time.time()).split('.')[0]
# TODO comrpess
def storeObj(obj, filename, compress=False):
"""
Stores a Python *obj* as a serialised binary object in *filename*.dat
- *obj* a python object
- *filename* the base filename
- *compress* [False] use gzip compression not *implemented*
"""
if filename[-4:] != '.dat':
filename = filename + '.dat'
F = file(filename, 'wb')
pickle.dump(obj, F, protocol=2)
print('Object serialised as {}'.format(filename))
F.close()
def loadObj(filename):
"""
Loads a serialised Python pickle from *filename*.dat returns the Python object(s)
"""
if filename[-4:] != '.dat':
filename = filename + '.dat'
assert os.path.exists(filename), '\nFile \"{}\" does not exist'.format(filename)
F = file(filename, 'rb')
obj = pickle.load(F)
F.close()
return obj
def deSerialize(s):
"""
Deserializes a serialised object contained in a string
"""
return pickle.loads(s)
def deSerializeFromDisk(filename):
"""
Loads a serialised Python pickle from *filename* returns the Python object(s)
"""
assert os.path.exists(filename), '\nFile \"{}\" does not exist'.format(filename)
F = file(filename, 'rb')
obj = pickle.load(F)
F.close()
return obj
def addStoichToFBAModel(fm):
"""
Build stoichiometry: this method has been refactored into the model class - cmod.buildStoichMatrix()
"""
fm.buildStoichMatrix()
def addSinkReaction(fbam, species, lb=0.0, ub=1000.0):
"""
Adds a sink reactions that consumes a model *species* so that X -->
- *fbam* an fba model object
- *species* a valid species name
- *lb* lower flux bound [default = 0.0]
- *ub* upper flux bound [default = 1000.0]
"""
assert species in fbam.getSpeciesIds(), '\n%s is not a valid species' % species
if lb < 0.0:
reversible = True
else:
reversible = False
Rname = species + '_sink'
R = CBModel.Reaction(
Rname, name='%s sink reaction' % species, reversible=reversible
)
Su = CBModel.Reagent(Rname + species, species, -1.0)
R.addReagent(Su)
R.is_exchange = True
clb = CBModel.FluxBound(Rname + '_lb', Rname, 'greaterEqual', lb)
cub = CBModel.FluxBound(Rname + '_ub', Rname, 'lessEqual', ub)
fbam.addReaction(R, create_default_bounds=False)
fbam.addFluxBound(clb)
fbam.addFluxBound(cub)
print(
'\n***\nCreated new reaction {} with bounds ({} : {})\n***\n'.format(
Rname, lb, ub
)
)
# TODO: check this
def addSourceReaction(fbam, species, lb=0.0, ub=1000.0):
"""
Adds a source reactions that produces a model *species* so that --> X
- *fbam* an fba model object
- *species* a valid species name
- *lb* lower flux bound [default = 0.0]
- *ub* upper flux bound [default = 1000.0]
Note reversiblity is determined by the lower bound, default 0 = irreversible. If
negative then reversible.
"""
assert species in fbam.getSpeciesIds(), '\n%s is not a valid species' % species
if lb < 0.0:
reversible = True
else:
reversible = False
Rname = species + '_src'
R = CBModel.Reaction(
Rname, name='%s source reaction' % species, reversible=reversible
)
Su = CBModel.Reagent(Rname + species, species, 1.0)
R.addReagent(Su)
R.is_exchange = True
clb = CBModel.FluxBound(Rname + '_lb', Rname, 'greaterEqual', lb)
cub = CBModel.FluxBound(Rname + '_ub', Rname, 'lessEqual', ub)
fbam.addReaction(R, create_default_bounds=False)
fbam.addFluxBound(clb)
fbam.addFluxBound(cub)
print(
'\n***\nCreated new reaction {} with bounds ({} : {})\n***\n'.format(
Rname, lb, ub
)
)
def findDeadEndMetabolites(fbam):
"""
Finds dead-end (single reaction) metabolites rows in N with a single entry), returns a list of (metabolite, reaction) ids
"""
fbam.buildStoichMatrix()
orphaned_list = []
for rr in range(fbam.N.array.shape[0]):
if (fbam.N.array[rr, :] != 0.0).sum() == 1:
if __DEBUG__:
print(fbam.N.array[rr, :])
if __DEBUG__:
print(fbam.N.row[rr])
for c in range(fbam.N.array.shape[1]):
if fbam.N.array[rr, c] != 0.0:
orphaned_list.append((fbam.N.row[rr], fbam.N.col[c]))
return orphaned_list
def findDeadEndReactions(fbam):
"""
Finds dead-end (single substrate/product) reactions (cols in N with a single entry), returns a list of (metabolite, reaction) ids
"""
fbam.buildStoichMatrix()
orphaned_list = []
for cc in range(fbam.N.array.shape[1]):
if (fbam.N.array[:, cc] != 0.0).sum() == 1:
if __DEBUG__:
print(fbam.N.array[:, cc])
if __DEBUG__:
print(fbam.N.col[cc])
for r in range(fbam.N.array.shape[0]):
if fbam.N.array[r, cc] != 0.0:
orphaned_list.append((fbam.N.row[r], fbam.N.col[cc]))
return orphaned_list
def setSpeciesPropertiesFromAnnotations(
fbam, overwriteCharge=False, overwriteChemFormula=False
):
"""
This will attempt to set the model Species properties from the annotation. With the default options
it will only replace missing data. With ChemicalFormula this is easy to detect however charge may
have an "unknown value" of 0. Setting the optional values to true will replace any existing value
with any valid annotation.
- *overwriteChemFormula* [default=False]
- *overwriteCharge* [default=False]
"""
for s_ in fbam.species:
try:
processSpeciesChargeChemFormulaAnnot(
s_,
getFromName=False,
overwriteCharge=overwriteCharge,
overwriteChemFormula=overwriteChemFormula,
)
except Exception:
print(
'processSpeciesChargeChemFormulaAnnot failed for species with id: {}'.format(
s_.getId()
)
)
def fixReversibility(fbam, auto_correct=False):
"""
Set fluxbound lower bound from reactions reversibility information.
- *fbam* and FBAModel instance
- *auto_correct* (default=False) if True automatically sets lower bound to zero if required, otherwise prints a warning if false.
"""
for c in fbam.flux_bounds:
R = c.reaction
# print R
O = c.operation
# print O
V = c.value
# print V
R_obj = fbam.reactions[fbam.getReactionIds().index(c.reaction)]
RE = R_obj.reversible
# print RE
if O in ['greater', 'greaterEqual']:
if not RE and float(V) < 0.0:
print(
'Warning {} is not reversible and lower bound is {}.'.format(R, V)
)
if auto_correct:
print('Resetting {} lower bound ({}) to zero'.format(R, V))
c.value = 0.0
else:
print(
'Reaction ({}) reversible={} inconsistent with fluxbound lower bound ({}) run with auto_correct=True to reset lower bound.'.format(
R, RE, V
)
)
time.sleep(1)
def splitReversibleReactions(fba, selected_reactions=None):
"""
Split a (set of) reactions into reversible reactions returns a copy of the original model
R1: A = B
R1f: A -> B
R1r: B -> A
- *fba* an instantiated CBMPy model object
- *selected_reactions* if a reversible reaction id is in here split it
"""
if selected_reactions is None:
selected_reactions = []
M = fba.clone()
if len(selected_reactions) == 0:
selected_reactions = M.getReversibleReactionIds()
for r_ in M.getReversibleReactionIds():
if r_ in selected_reactions:
splitSingleReversibleReaction(M, r_)
else:
pass
return M
def splitSingleReversibleReaction(fba, rid, fwd_id=None, rev_id=None):
"""
Split a single reversible reaction into two irreversible reactions, returns the original reversible reaction and bounds
while deleting them from model.
R1: A = B
R1_fwd: A -> B
R1_rev: B -> A
- *fba* an instantiated CBMPy model object
- *rid* a valid reaction id
- *fwd_id* [default=None] the new forward reaction id, defaults to rid_fwd
- *rev_id* [default=None] the new forward reaction id, defaults to rid_rev
"""
R = fba.getReaction(rid)
assert R != None
print('Reversible reaction splitter is processing: {}'.format(rid))
Rf = R.clone()
Rb = R.clone()
Rf.setAnnotation('cbmpy_split_fwd', rid)
Rb.setAnnotation('cbmpy_split_rev', rid)
RB = fba.getReactionBounds(rid)
LB = UB = EB = None
if RB[1] != None and RB[2] != None:
assert (
RB[1] <= RB[2]
), 'ERROR: For reaction splitting ({}): LowerBound ({}) must be LessEqual to UpperBound ({})'.format(
rid, round(RB[1], 6), round(RB[2], 6)
)
if RB[1] != None:
LB = fba.getFluxBoundByReactionID(rid, 'lower')
if RB[2] != None:
UB = fba.getFluxBoundByReactionID(rid, 'upper')
if RB[3] != None:
EB = fba.getFluxBoundByReactionID(rid, 'equality')
fba.deleteReactionAndBounds(rid)
if fwd_id is None:
fwd_id = Rf.getId() + '_fwd'
Rf.setPid(fwd_id)
if rev_id is None:
rev_id = Rb.getId() + '_rev'
Rb.setPid(rev_id)
Rf.reversible = False
Rb.reversible = False
for rr_ in Rf.reagents:
rr_.setPid(rr_.getId() + '_fwd')
for rr_ in Rb.reagents:
rr_.setCoefficient(-1.0 * rr_.getCoefficient())
rr_.setPid(rr_.getId() + '_rev')
fba.addReaction(Rf, create_default_bounds=False)
fba.addReaction(Rb, create_default_bounds=False)
if EB != None:
fba.createReactionLowerBound(Rf.getId(), EB.getValue())
fba.createReactionUpperBound(Rf.getId(), EB.getValue())
fba.createReactionLowerBound(Rb.getId(), EB.getValue())
fba.createReactionUpperBound(Rb.getId(), EB.getValue())
elif LB != None and UB != None:
if LB.getValue() <= 0.0 and UB.getValue() >= 0.0:
fba.createReactionLowerBound(Rf.getId(), 0.0)
fba.createReactionUpperBound(Rf.getId(), UB.getValue())
fba.createReactionLowerBound(Rb.getId(), 0.0)
fba.createReactionUpperBound(Rb.getId(), abs(LB.getValue()))
elif LB.getValue() > 0.0 and UB.getValue() > 0.0:
fba.createReactionLowerBound(Rf.getId(), LB.getValue())
fba.createReactionUpperBound(Rf.getId(), UB.getValue())
fba.createReactionLowerBound(Rb.getId(), 0.0)
fba.createReactionUpperBound(Rb.getId(), 0.0)
if LB.getValue() < 0.0 and UB.getValue() < 0.0:
fba.createReactionLowerBound(Rf.getId(), 0.0)
fba.createReactionUpperBound(Rf.getId(), 0.0)
fba.createReactionLowerBound(Rb.getId(), abs(UB.getValue()))
fba.createReactionUpperBound(Rb.getId(), abs(LB.getValue()))
elif LB != None and UB is None:
if LB.getValue() > 0:
fba.createReactionLowerBound(Rf.getId(), LB.getValue())
fba.createReactionUpperBound(Rf.getId(), float('inf'))
fba.createReactionLowerBound(Rb.getId(), 0.0)
fba.createReactionUpperBound(Rb.getId(), 0.0)
else:
fba.createReactionLowerBound(Rf.getId(), 0.0)
fba.createReactionUpperBound(Rf.getId(), float('inf'))
fba.createReactionLowerBound(Rb.getId(), 0.0)
fba.createReactionUpperBound(Rb.getId(), abs(LB.getValue()))
elif LB is None and UB != None:
if UB.getValue() >= 0:
fba.createReactionLowerBound(Rf.getId(), 0.0)
fba.createReactionUpperBound(Rf.getId(), UB.getValue())
fba.createReactionLowerBound(Rb.getId(), 0.0)
fba.createReactionUpperBound(Rb.getId(), float('inf'))
else:
fba.createReactionLowerBound(Rf.getId(), 0.0)
fba.createReactionUpperBound(Rf.getId(), 0.0)
fba.createReactionLowerBound(Rb.getId(), abs(UB.getValue()))
fba.createReactionUpperBound(Rb.getId(), float('inf'))
else:
fba.createReactionLowerBound(Rf.getId(), 0.0)
fba.createReactionUpperBound(Rf.getId(), float('inf'))
fba.createReactionLowerBound(Rb.getId(), 0.0)
fba.createReactionUpperBound(Rb.getId(), float('inf'))
return (R, LB, UB, EB)
def exportLabelledArray(arr, fname, names=None, sep=',', fmt='%f'):
"""
Write a 2D array type object to file
- *arr* the an array like object
- *names* [default=None] the list of row names
- *fname* the output filename
- *sep* [default=','] the column separator
- *fmt* [default='%s'] the output number format
"""
if names != None:
assert arr.shape[0] == len(names), '\n ... rows must equal number of names!'
F = file(fname, 'w')
cntr = 0
for r in range(arr.shape[0]):
if names != None:
F.write(('%s' + sep) % names[r])
for c in range(arr.shape[1]):
if c < arr.shape[1] - 1:
F.write((fmt + sep) % arr[r, c])
else:
F.write((fmt + '\n') % arr[r, c])
cntr += 1
if cntr >= 250:
F.flush()
cntr = 1
F.write('\n')
F.flush()
F.close()
print('exported to {}'.format(fname))
def exportLabelledArrayWithHeader(
arr, fname, names=None, header=None, sep=',', fmt='%f'
):
"""
Export an array with row names and header
- *arr* the an array like object
- *names* [default=None] the list of row names
- *header* [default=None] the list of column names
- *fname* the output filename
- *sep* [default=','] the column separator
- *fmt* [default='%s'] the output number format
- *appendlist* [default=False] if True append the array to *fname* otherwise create a new file
"""
if names != None:
assert arr.shape[0] == len(names), '\n ... rows must equal number of names!'
if header != None:
assert arr.shape[1] == len(
header
), '\n ... cols must equal number of header names!'
F = file(fname, 'w')
cntr = 0
if header != None:
if names != None:
hstr = ' ' + sep
else:
hstr = ''
for h in header:
hstr += str(h) + sep
hstr = hstr[:-1] + '\n'
F.write(hstr)
del hstr
for r in range(arr.shape[0]):
if names != None:
F.write(('%s' + sep) % names[r])
for c in range(arr.shape[1]):
if c < arr.shape[1] - 1:
F.write((fmt + sep) % arr[r, c])
else:
F.write((fmt + '\n') % arr[r, c])
cntr += 1
if cntr >= 250:
F.flush()
cntr = 1
F.write('\n')
F.flush()
F.close()
print('exported to {}'.format(fname))
def exportLabelledLinkedList(
arr, fname, names=None, sep=',', fmt='%s', appendlist=False
):
"""
Write a 2D linked list [[...],[...],[...],[...]] and optionally a list of row labels to file:
- *arr* the linked list
- *fname* the output filename
- *names* [default=None] the list of row names
- *sep* [default=','] the column separator
- *fmt* [default='%s'] the output number format
- *appendlist* [default=False] if True append the array to *fname* otherwise create a new file
"""
if names != None:
assert len(arr) == len(names), '\n ... rows must equal number of names!'
if not appendlist:
F = file(fname, 'w')
else:
F = file(fname, 'a')
cntr = 0
for r in range(len(arr)):
if names != None:
F.write(('%s' + sep) % names[r])
col_l = len(arr[0])
for c in range(col_l):
if c < col_l - 1:
if arr[r][c] == 0.0:
F.write('0.0' + sep)
else:
try:
F.write((fmt + sep) % arr[r][c])
except UnicodeEncodeError:
F.write((fmt + sep) % 'uError')
else:
if arr[r][c] == 0.0:
F.write('0.0\n')
else:
try:
F.write((fmt + '\n') % arr[r][c])
except UnicodeEncodeError:
F.write((fmt + '\n') % 'uError')
cntr += 1
if cntr >= 250:
F.flush()
cntr = 1
# F.write('\n')
F.flush()
F.close()
del arr
if not appendlist:
print('exported to {}'.format(fname))
def exportLabelledArrayWithHeader2CSV(arr, fname, names=None, header=None):
"""
Export an array with row names and header to fname.csv
- *arr* the an array like object
- *fname* the output filename
- *names* [default=None] the list of row names
- *header* [default=None] the list of column names
"""
fname += '.csv'
exportLabelledArrayWithHeader(arr, fname, names, header, sep=',', fmt='%f')
def exportLabelledArray2CSV(arr, fname, names=None):
"""
Export an array with row names to fname.csv
- *arr* the an array like object
- *fname* the output filename
- *names* [default=None] the list of row names
"""
fname += '.csv'
exportLabelledArray(arr, fname, names, sep=',', fmt='%f')
def exportArray2CSV(arr, fname):
"""
Export an array to fname.csv
- *arr* the an array like object
- *fname* the output filename
- *sep* [default=','] the column separator
"""
fname += '.csv'
exportLabelledArray(arr, fname, None, sep=',', fmt='%f')
def exportLabelledArrayWithHeader2TXT(arr, fname, names=None, header=None):
"""
Export an array with row names and header to fname.txt
- *arr* the an array like object
- *names* the list of row names
- *header* the list of column names
- *fname* the output filename
"""
fname += '.txt'
exportLabelledArrayWithHeader(arr, fname, names, header, sep='\t', fmt='%f')
def exportLabelledArray2TXT(arr, fname, names=None):
"""
Export an array with row names to fname.txt
- *arr* the an array like object
- *names* [default=None] the list of row names
- *fname* the output filename
"""
fname += '.txt'
exportLabelledArray(arr, fname, names, sep='\t', fmt='%f')
def exportArray2TXT(arr, fname):
"""
Export an array to fname.txt
- *arr* the an array like object
- *fname* the output filename
- *sep* [default=','] the column separator
"""
fname += '.txt'
exportLabelledArray(arr, fname, None, sep='\t', fmt='%f')
def stringReplace(fbamod, old, new, target):
"""
This is alpha stuff, target can be:
- 'species'
- 'reactions'
- 'constraints'
- 'objectives'
- 'all'
"""
print('stringReplace is relatively new and UNTESTED')
fbamod.id = fbamod.id.replace(old, new)
if target == 'species' or target == 'all':
for s in fbamod.species:
s.id = s.id.replace(old, new)
if target == 'reactions' or target == 'all':
for s in fbamod.reactions:
s.id = s.id.replace(old, new)
for r in s.reagents:
r.id = r.id.replace(old, new)
if target == 'constraints' or target == 'all':
for s in fbamod.flux_bounds:
s.id = s.id.replace(old, new)
s.reaction = s.reaction.replace(old, new)
if target == 'objectives' or target == 'all':
for s in fbamod.objectives:
s.id = s.id.replace(old, new)
for f in s.fluxObjectives:
f.id = f.id.replace(old, new)
f.reaction = f.reaction.replace(old, new)
return fbamod
def getBoundsDict(fbamod, substring=None):
"""
Return a dictionary of reactions&bounds
"""
rBdic = {}
for r in fbamod.getReactionIds(substring=substring):
name, lb, ub, eq = fbamod.getReactionBounds(r)
rBdic.update({name: {'lb': lb, 'ub': ub, 'eq': eq}})
return rBdic
def getExchBoundsDict(fbamod):
"""
Return a dictionary of all exchange reactions (as determined by the is_exchange attribute of Reaction)
- *fbamod* a CBMPy model
"""
rBdic = {}
for r in fbamod.getReactionIds(substring=None):
name, lb, ub, eq = fbamod.getReactionBounds(r)
rBdic.update({name: {'lb': lb, 'ub': ub, 'eq': eq}})
for r in fbamod.reactions:
if not r.is_exchange:
rBdic.pop(r.getId())
return rBdic
def processBiGGchemFormula(fba):
"""
Disambiguates the overloaded BiGG name NAME_CHEMFORMULA into
- *species.name* NAME
- *species.chemFormula* CHEMFORMULA
"""
for s in fba.species:
# print s.name
tmp = s.name
tmp2 = tmp.split('_')
if len(tmp2) >= 2:
CF = tmp2.pop(-1)
NM = ''
for se in tmp2:
NM += '%s_' % se
NM = NM[:-1]
# NM = tmp.replace('_%s' % CF, '')
else:
NM = s.name
CF = ''
if __DEBUG__:
print(NM, CF)
del tmp, tmp2
if s.chemFormula in ['', None, ' '] and CF != '':
s.chemFormula = CF.strip()
s.name = NM.strip()
def processBiGGannotationNote(fba, annotation_key='note'):
"""
Parse the HTML formatted reaction information stored in the BiGG notes field.
This function is being deprecated and replaced by `CBTools.processSBMLAnnotationNotes()`
- requires an *annotation_key* which contains a BiGG HTML fragment
"""
print(
'\nDeprecation warning:\nCBTools.processBiGGannotationNote() is being replaced with CBTools.processSBMLAnnotationNotes'
)
html_p = re.compile("<html:p>.*?</html:p>")
for r in fba.reactions:
new_ann = {}
if annotation_key in r.annotation:
hPs = re.findall(html_p, r.annotation.pop(annotation_key))
if __DEBUG__:
print(hPs)
for p in hPs:
ps = (
p.replace('<html:p>', '')
.replace('</html:p>', '')
.replace('<', '<')
.replace('>', '>')
.split(':', 1)
)
if len(ps) == 2:
new_ann.update({ps[0].strip(): ps[1].strip()})
r.annotation.update(new_ann)
if __DEBUG__:
print(r.annotation)
def processSBMLAnnotationNotes(fba, annotation_key='note', level=3):
"""
Parse the HTML formatted reaction information stored in the SBML notes field currently
processes BiGG and PySCeSCBM style annotations it looks for the the annotation indexed
with the *annotation_key*
- *annotation_key* [default='note'] which contains a HTML/XHTML fragment in BiGG/PySCeSCBM format (ignored in L3)
"""
# if hasattr(fba, '_SBML_LEVEL_') and fba._SBML_LEVEL_ != None:
# print('\n==================================\nINFO \"CBTools.processSBMLAnnotationNotes()\":\n')
# print('This function is now called automatically\nduring model load and can be ignored.')
# print('==================================\n')
# return
html_p = re.compile("<p>.*?</p>")
html_span = re.compile("<span>.*?</span>")
html_bigg_p = re.compile("<html:p>.*?</html:p>")
for r in fba.reactions:
if level >= 3 or annotation_key in r.annotation:
new_ann = {}
notes = ''
if level >= 3:
notes = r.getNotes()
else:
notes = r.annotation.pop(annotation_key)
if '<span xmlns="http://www.w3.org/1999/xhtml">' in notes:
hPs = re.findall(html_p, notes.replace('\n', ''))
if __DEBUG__:
print(hPs)
for p in hPs:
ps = re.findall(html_span, p)
ps = [
p.replace('<span>', '')
.replace('</span>', '')
.replace('<', '<')
.replace('>', '>')
.strip()
for p in ps
]
if len(ps) == 2 and ps[0] not in r.annotation:
new_ann.update({ps[0]: ps[1]})
else:
hPs = re.findall(html_bigg_p, notes)
if len(hPs) > 0:
if __DEBUG__:
print(hPs)
for p in hPs:
ps = (
p.replace('<html:p>', '')
.replace('</html:p>', '')
.replace('<', '<')
.replace('>', '>')
.split(':', 1)
)
if len(ps) == 2 and ps[0].strip() not in r.annotation:
new_ann.update({ps[0].strip(): ps[1].strip()})
else:
hPs = re.findall(html_p, notes)
if __DEBUG__:
print(hPs)
for p in hPs:
ps = (
p.replace('<p>', '')
.replace('</p>', '')
.replace('<', '<')
.replace('>', '>')
.split(':', 1)
)
if len(ps) == 2 and ps[0].strip() not in r.annotation:
new_ann.update({ps[0].strip(): ps[1].strip()})
r.annotation.update(new_ann)
if __DEBUG__:
print(r.annotation)
for s in fba.species:
if level >= 3 or annotation_key in s.annotation:
notes = ''
if level >= 3:
notes = s.getNotes()
else:
notes = s.annotation.pop(annotation_key)
new_ann = {}
if '<span xmlns="http://www.w3.org/1999/xhtml">' in notes:
hPs = re.findall(html_p, notes.replace('\n', ''))
if __DEBUG__:
print(hPs)
for p in hPs:
ps = re.findall(html_span, p)
ps = [
p.replace('<span>', '')
.replace('</span>', '')
.replace('<', '<')
.replace('>', '>')
.strip()
for p in ps
]
if len(ps) == 2 and ps[0].strip() not in s.annotation:
new_ann.update({ps[0]: ps[1]})
else:
hPs = re.findall(html_bigg_p, notes)
if len(hPs) > 0:
if __DEBUG__:
print(hPs)
for p in hPs:
ps = (
p.replace('<html:p>', '')
.replace('</html:p>', '')
.replace('<', '<')
.replace('>', '>')
.split(':', 1)
)
if len(ps) == 2 and ps[0].strip() not in s.annotation:
new_ann.update({ps[0].strip(): ps[1].strip()})
else:
hPs = re.findall(html_p, notes)
if __DEBUG__:
print(hPs)
for p in hPs:
ps = (
p.replace('<p>', '')
.replace('</p>', '')
.replace('<', '<')
.replace('>', '>')
.split(':', 1)
)
if len(ps) == 2 and ps[0].strip() not in s.annotation:
new_ann.update({ps[0].strip(): ps[1].strip()})
s.annotation.update(new_ann)
if 'chemFormula' in s.annotation and (
s.chemFormula is None or s.chemFormula == ''
):
s.chemFormula = s.annotation.pop('chemFormula')
if __DEBUG__:
print(s.annotation)
elif 'FORMULA' in s.annotation and (
s.chemFormula is None or s.chemFormula == ''
):
s.chemFormula = s.annotation.pop('FORMULA')
if s.chemFormula != '' and not checkChemFormula(s.chemFormula):
s.chemFormula = ''
if (
(s.charge is None or s.charge == '' or s.charge == 0)
and 'charge' in s.annotation
and s.annotation['charge'] != ''
):
chrg = s.annotation.pop('charge')
try:
s.charge = int(chrg)
except ValueError:
s.charge = None
print(
'Invalid charge: {} defined for species {}'.format(chrg, s.getId())
)
if __DEBUG__:
print(s.annotation)
elif (
(s.charge is None or s.charge == '' or s.charge == 0)
and 'CHARGE' in s.annotation
and s.annotation['CHARGE'] != ''
):
chrg = s.annotation.pop('CHARGE')
try:
s.charge = int(chrg)
except ValueError:
print(
'Invalid charge: {} defined for species {}'.format(chrg, s.getId())
)
s.charge = None
if __DEBUG__:
print(s.annotation)
def processExchangeReactions(fba, key):
"""
Extract exchange reactions from model using *key* and return:
- a dictionary of all exchange reactions without *medium* reactions
- a dictionary of *medium* exchange reactions (negative lower bound)
"""
# extract all exchange bounds
if key is None:
fexDic = getExchBoundsDict(fba)
else:
fexDic = getBoundsDict(fba, substring=key)
# extract the medium (exchange fluxes that allow uptake)
MediumAll = []
Medium = []
for r in fexDic:
if fexDic[r]['lb'] < 0.0:
MediumAll.append((r, fexDic[r]['lb'], fexDic[r]['ub']))
Medium.append(r)
if __DEBUG__:
print(r, fexDic[r])
# remove medium from bounds dictionary and place in medium dict
mediumDic = {}
for m in Medium:
mediumDic.update({m: fexDic.pop(m)})
if __DEBUG__:
print('\nMedium')
for m in MediumAll:
print(m)
print('mediumDic')
print(mediumDic)
print('\nr in fexDic')
for r in mediumDic:
print(r, r in fexDic)
return fexDic, mediumDic
def generateInputScanReports(
fba, exDict, mediumDict, optimal_growth_rates, wDir, tag=''
):
modName = fba.sourcefile
modName += tag
rnames = fba.getReactionNames()
rid = fba.getReactionIds()
F = file(os.path.join(wDir, '%s.medium.csv' % modName), 'w')
F.write('J, lb, ub\n')
for r in mediumDict:
RN = rnames[rid.index(r)]
F.write('%s, %s, %s, %s\n' % (r, mediumDict[r]['lb'], mediumDict[r]['ub'], RN))
F.write('\n')
F.close()
F = file(os.path.join(wDir, '%s.exchange.csv' % modName), 'w')
F.write('J, lb, ub\n')
for r in exDict:
RN = rnames[rid.index(r)]
F.write('%s, %s, %s, %s\n' % (r, exDict[r]['lb'], exDict[r]['ub'], RN))
F.write('\n')
F.close()
F = file(os.path.join(wDir, '%s.optima.csv' % modName), 'w')
F.write('J, lb, ub, optimum, "name"\n')
if __DEBUG__:
print(rnames)
print(rid)
for r in optimal_growth_rates:
RN = rnames[rid.index(r)]
F.write(
'%s, %s, %s, %s, "%s"\n'
% (r, exDict[r]['lb'], exDict[r]['ub'], optimal_growth_rates[r], RN)
)
F.write('\n')
F.close()
def getAllReactionsAssociatedWithGene(
fba, gene, gene_annotation_key='GENE ASSOCIATION'
):
out = []
for r in fba.reactions:
GA = None
if gene_annotation_key in r.annotation:
GA = gene_annotation_key
elif 'GENE ASSOCIATION' in r.annotation:
GA = 'GENE ASSOCIATION'
elif 'GENE_ASSOCIATION' in r.annotation:
GA = 'GENE_ASSOCIATION'
if GA != None:
if gene in r.annotation[GA]:
out.append(r.getId())
return out
def scanForReactionDuplicates(f, ignore_coefficients=False):
"""
This method uses uses a brute force apprach to finding reactions with matching
stoichiometry
"""
duplicates = []
for r in f.reactions:
Rref = r.getSpeciesIds()
Rref.sort()
refspecies = ''
for s in Rref:
refspecies += '%s:' % s
refspecies = refspecies[:-1]
for r2 in f.reactions:
Rtest = r2.getSpeciesIds()
Rtest.sort()
if Rref == Rtest and r.id != r2.id:
if not ignore_coefficients:
go = True
for rgid in Rtest:
if float(r.getReagentWithSpeciesRef(rgid).coefficient) != float(
r2.getReagentWithSpeciesRef(rgid).coefficient
):
go = False
break
if go:
dup = [
r.id,
r2.id,
]
dup.sort()
dup = dup + [
refspecies,
f.getReaction(dup[0]).getName(),
f.getReaction(dup[1]).getName(),
]
if dup not in duplicates:
duplicates.append(dup)
else:
dup = [
r.id,
r2.id,
]
dup.sort()
dup = dup + [
refspecies,
f.getReaction(dup[0]).getName(),
f.getReaction(dup[1]).getName(),
]
if dup not in duplicates:
duplicates.append(dup)
for d in duplicates:
print(d)
print('\nFound %s pairs of duplicate reactions' % len(duplicates))
return duplicates
def countedPause(Tsec):
print('\nPausing ... ',)
for t in range(Tsec, -1, -1):
print('\b\b\b{}'.format(t), end=" ")
time.sleep(1)
print('\b\b{}'.format('done.'))
def addGenesFromAnnotations(fba, annotation_key='GENE ASSOCIATION', gene_pattern=None):
"""
THIS METHOD IS DERPRECATED PLEASE USE cmod.createGeneAssociationsFromAnnotations()
Add genes to the model using the definitions stored in the annotation key
- *fba* and fba object
- *annotation_key* the annotation dictionary key that holds the gene association for the protein/enzyme
- *gene_pattern* deprecated, not needed anymore
"""
print(
'\nWARNING: CBTools.addGenesFromAnnotations IS DEPRECATED PLEASE USE cmod.createGeneAssociationsFromAnnotations()\n'
)
fba.createGeneAssociationsFromAnnotations(
annotation_key=annotation_key, replace_existing=True
)
def getModelGenesPerReaction(
fba, gene_pattern=None, gene_annotation_key='GENE ASSOCIATION'
):
'''
Parse a BiGG style gene annotation string using default gene_pattern='(\(\W*\w*\W*\))' or
(<any non-alphanum><any alphanum><any non-alphanum>)
Old eColi specific pattern '(b\w*\W)'
It is advisable to use the model methods directly rather than this function
'''
react_gene = {}
# gene_re = re.compile(gene_pattern)
for r in fba.reactions:
GA = None
# print r.annotation
if gene_annotation_key in r.annotation:
GA = gene_annotation_key
elif 'GENE ASSOCIATION' in r.annotation:
GA = 'GENE ASSOCIATION'
elif 'GENE_ASSOCIATION' in r.annotation:
GA = 'GENE_ASSOCIATION'
elif 'gene_association' in r.annotation:
GA = 'gene_association'
elif 'gene association' in r.annotation:
GA = 'gene association'
if GA != None:
genes = extractGeneIdsFromString(r.annotation[GA])
# genes = re.findall(gene_re, r.annotation[GA])
# genes = [g.replace('(','').replace(')','').strip() for g in genes]
# print r.annotation['GENE ASSOCIATION']
if len(genes) == 0:
# print '\n'
# print 'GA:', r.annotation['GENE ASSOCIATION']
# print r.getId(), genes
# raw_input('x')
genes = None
# print r.getId(), genes
# raw_input()
react_gene.update({r.getId(): genes})
else:
react_gene.update({r.getId(): None})
return react_gene
def getReactionsPerGene(react_gene):
gene_react = {}
no_gene = []
for R in react_gene:
if react_gene[R] is None:
print('Reaction {} has no gene associated with it'.format(R))
no_gene.append(R)
else:
for G in react_gene[R]:
## GK = G.replace('(','').replace(')','').strip()
if G in gene_react:
print('Updating gene {} with reaction {}'.format(G, R))
gene_react[G].append(R)
else:
print('Adding gene {} to gene_react'.format(G))
gene_react.update({G: [R]})
genes = list(gene_react)
return gene_react, genes, no_gene
def removeFixedSpeciesReactions(f):
"""
This function is a hack that removes reactions which only have boundary species as reactants
and products. These are typically gene associations encoded in the Manchester style and there
is probably a better way of working around this problem ...
- *f* an instantiated fba model object
"""
c_react = []
for rea in f.reactions:
lsa = numpy.array(
[f.getSpecies(r.species_ref).is_boundary for r in rea.reagents]
)
if lsa.all():
c_react.append(rea.getId())
for r in c_react:
f.deleteReactionAndBounds(r)
def addFluxAsActiveObjective(f, reaction_id, osense, coefficient=1):
"""
Adds a flux as an active objective function
- *reaction_id* a string containing a reaction id
- *osense* objective sense must be **maximize** or **minimize**
- *coefficient* the objective funtion coefficient [default=1]
"""
osense = osense.lower()
if osense == 'max':
osense = 'maximize'
if osense == 'min':
osense = 'minimize'
if osense in ['maximise', 'minimise']:
osense = osense.replace('se', 'ze')
assert osense in ['maximize', 'minimize'], (
"\nosense must be ['maximize', 'minimize'] not %s" % osense
)
assert reaction_id in [r.getId() for r in f.reactions], (
'\n%s is not avalid reaction' % reaction_id
)
n_obj = CBModel.Objective(reaction_id + '_objf', osense)
f.addObjective(n_obj, active=True)
n_flux_obj = CBModel.FluxObjective(
reaction_id + '_fluxobj', reaction_id, coefficient
)
n_obj.addFluxObjective(n_flux_obj)
def checkReactionBalanceElemental(f, Rid=None, zero_tol=1.0e-12):
"""
Check if the reaction is balanced using the chemical formula
- *f* the FBA object
- *Rid* [default = None] the reaction to check, defaults to all
- *zero_tol* [default=1.0e-12] the floating point zero used for elemental balancing
This function is derived from the code found here: http://pyparsing.wikispaces.com/file/view/chemicalFormulas.py
"""
assert HAVE_PYPARSING, '\nPyParsing needs to be installed for this method'
if Rid is None:
Rid = f.getReactionIds()
elif isinstance(Rid, list):
pass
else:
Rid = [Rid]
ROUT = {}
RCHARGE = {}
for rid in Rid:
R = f.getReaction(rid)
reagents = []
netcharge = None
for rr in R.reagents:
CF = f.getSpecies(rr.species_ref).chemFormula
chrg = f.getSpecies(rr.species_ref).charge
if CF not in [None, '']:
# print rid, rr.getId(), CF
try:
CFP = pp_chemicalFormula.parseString(CF)
R2 = [(r[0], int(r[1])) for r in CFP]
# print R2
# note this uses a net stoichiometry approach with signed coefficients
reagents.append([rr.species_ref, rr.coefficient, CF, R2])
except pyparsing.ParseException:
print('Invalid Chemical Formula ({}): {}'.format(rid, CF))
reagents.append([rr.species_ref, rr.coefficient, CF, None])
else:
# note this uses a net stoichiometry approach with signed coefficients
reagents.append([rr.species_ref, rr.coefficient, CF, None])
if chrg not in [None, '']:
if netcharge is None:
netcharge = float(chrg) * rr.coefficient
else:
netcharge += float(chrg) * rr.coefficient
# if after all this we still do not have a charge make it all zero
RCHARGE[rid] = netcharge
ROUT[rid] = reagents
Rres = {}
for R in ROUT:
Ed = {}
for rr in ROUT[R]:
if rr[3] != None:
for s in rr[3]:
if s[0] in Ed:
Ed.update({s[0]: Ed[s[0]] + rr[1] * s[1]})
else:
Ed.update({s[0]: rr[1] * s[1]})
else:
pass # print('Invalid or no chemical formula defined for reagent: {}'.format(rr[0]))
if len(Ed) > 0:
CBAL = True
EBAL = True
else:
CBAL = False
EBAL = False
for e in Ed:
if abs(Ed[e]) >= zero_tol:
EBAL = False
if RCHARGE[R] is None or abs(RCHARGE[R]) >= zero_tol:
CBAL = False
Rres.update(
{
R: {
'id': R,
'charge_balanced': CBAL,
'element_balanced': EBAL,
'elements': Ed.copy(),
'charge': RCHARGE[R],
'stuff': ROUT[R],
}
}
)
if CBAL and EBAL:
f.getReaction(R).is_balanced = True
else:
f.getReaction(R).is_balanced = False
return Rres
def scanForUnbalancedReactions(f, output='all'):
"""
Scan a model for unbalanced reactions, returns a tuple of dictionaries balanced and unbalanced:
- *f* an FBA model instance
- *output* [default='all'] can be one of ['all','charge','element']
- *charge* return all charge **un** balanced reactions
- *element* return all element **un** balanced reactions
"""
bcheck = checkReactionBalanceElemental(f)
badD = bcheck.copy()
out = {}
all_balanced = {}
charge_balanced = {}
element_balanced = {}
for b in bcheck:
if bcheck[b]['charge_balanced'] and bcheck[b]['element_balanced']:
all_balanced.update({b: badD.pop(b)})
elif bcheck[b]['charge_balanced']:
charge_balanced.update({b: badD.pop(b)})
elif bcheck[b]['element_balanced']:
element_balanced.update({b: badD.pop(b)})
if output == 'charge':
out.update(element_balanced)
elif output == 'element':
out.update(charge_balanced)
else:
out.update(element_balanced)
out.update(charge_balanced)
print(len(bcheck), len(badD))
return out
def createZipArchive(zipname, files, move=False, compression='normal'):
"""
Create a zip archive which contains one or more files
- *zipname* the name of the zip archive to create (fully qualified)
- *files* either a valid filename or a list of filenames (fully qualified)
- *move* [default=False] attempt to delete input files after zip-archive creation
- *compression* [default='normal'] normal zip compression, set as None for no compression only store files (zlib not required)
"""
if compression is None:
compression = zipfile.ZIP_STORED
else:
compression = zipfile.ZIP_DEFLATED
zf = zipfile.ZipFile(zipname, mode='w', compression=compression)
if isinstance(files, list) or isinstance(files, tuple):
files = [files]
for f_ in files:
assert os.path.exists(f_), 'ERROR: file \"{}\" does not exist'.format(f_)
for f_ in files:
zf.write(f_, arcname=os.path.split(f_)[-1])
zf.close()
if move:
for f_ in files:
try:
os.remove(f_)
except Exception as ex:
print(ex)
print(
'\nINFO: {} input file(s) moved to archive \"{}\".'.format(
len(files), zipname
)
)
else:
print('\nINFO: zip-archive \"{}\" created.'.format(zipname))
def checkExchangeReactions(fba, autocorrect=True):
"""
Scan all reactions for exchange reactions (reactions containing a boundary species), return a list of
inconsistent reactions or correct automatically.
- *fba* a CBMPy model
- *autocorrect* [default=True] correctly set the "is_exchange" attribute on a reaction
"""
badR = []
for r_ in fba.reactions:
has_fixed = False
if True in [fba.getSpecies(rr_.species_ref).is_boundary for rr_ in r_.reagents]:
has_fixed = True
if r_.is_exchange and not has_fixed:
print(
'WARNING: reaction {} is labelled as an exchange reaction but has no fixed reagents.'.format(
r_.getId()
)
)
if autocorrect:
print('INFO: is_exchange reaction attribute corrected')
r_.is_exchange = has_fixed
badR.append(r_.getId())
elif not r_.is_exchange and has_fixed:
print(
'WARNING: reaction {} is not labelled as an exchange reaction but contains a fixed reagent.'.format(
r_.getId()
)
)
if autocorrect:
print('INFO: is_exchange reaction attribute corrected')
r_.is_exchange = has_fixed
badR.append(r_.getId())
return badR
def checkIds(fba, items='all'):
"""
Checks the id's of the specified model attributes to see if the name is legal and if there are duplicates.
Returns a list of items with errors.
- *fba* a CBMPy model instance
- *items* [default='all'] 'all' means 'species,reactions,flux_bounds,objectives' of which one or more can be specified
"""
if items == 'all':
items = [
a.strip() for a in 'species,reactions,flux_bounds,objectives'.split(',')
]
else:
items = [a.strip() for a in items.split(',')]
for i_ in range(len(items) - 1, -1, -1):
if not hasattr(fba, items[i_]):
print(
'ERROR: bad descriptor \"{}\" removing from input list'.format(
items.pop(i_)
)
)
output = {}
iddump = []
for i_ in items:
output[i_] = []
ITEMS = fba.__getattribute__(i_)
for I_ in ITEMS:
Id = I_.getId()
if Id in iddump:
print('INFO: duplicate \"{}\" id: {}'.format(i_, Id))
output[i_].append(I_)
else:
iddump.append(Id)
if i_ == 'reactions':
if 'reagents' not in output:
output['reagents'] = []
for rr_ in I_.reagents:
rrid = rr_.getId()
if rrid in iddump:
print('INFO: duplicate \"reagent\" id: {}'.format(rrid))
if rr_ not in output['reagents']:
output['reagents'].append(rr_)
else:
iddump.append(rrid)
if i_ == 'objectives':
if 'fluxObjectives' not in output:
output['fluxObjectives'] = []
for fo_ in I_.fluxObjectives:
foid = fo_.getId()
if foid in iddump:
print('INFO: duplicate \"fluxObjective\" id: {}'.format(foid))
if fo_ not in output['fluxObjectives']:
output['fluxObjectives'].append(fo_)
else:
iddump.append(foid)
if len(output) == 0:
print(
'\nWARNING: no valid object descriptors found, please check your function call!'
)
return output
def checkFluxBoundConsistency(fba):
"""
Check flux bound consistency checks for multiply defined bounds, bounds without a reaction, inconsistent bounds with respect to each other
and reaction reversbility. Returns a dictionary of bounds/reactions where errors occur.
"""
dupIDs = checkIds(fba, items='flux_bounds')['flux_bounds']
if len(dupIDs) > 0:
print('\nERROR: {} duplicate flux_bound Id\'s detected!'.format(len(dupIDs)))
LB = {}
UB = {}
EB = {}
eMB = {'lower': {}, 'upper': {}, 'equality': {}}
noreaction = []
for fb in fba.flux_bounds:
raw_type = fb.is_bound
get_type = fb.getType()
RID = fb.getReactionId()
if raw_type != get_type:
print(
'WARNING: incorrect bound type for operation: \"{}\" old \"{}\" --> \"{}\"'.format(
fb.operation, raw_type, get_type
)
)
if get_type == 'lower':
if RID in LB:
print(
'ERROR multiple LOWER bounds defined for reaction: \"{}\"'.format(
RID
)
)
if RID in eMB['lower']:
eMB['lower'][RID].append(fb)
else:
eMB['lower'][RID] = [fb]
LB[RID] = fb
if get_type == 'upper':
if RID in UB:
print(
'ERROR multiple UPPER bounds defined for reaction: \"{}\"'.format(
RID
)
)
if RID in eMB['upper']:
eMB['upper'][RID].append(fb)
else:
eMB['upper'][RID] = [fb]
UB[RID] = fb
if get_type == 'equality':
if RID in EB:
print(
'ERROR multiple EQUAL bounds defined for reaction: \"{}\"'.format(
RID
)
)
if RID in eMB['equality']:
eMB['equality'][RID].append(fb)
else:
eMB['equality'][RID] = [fb]
EB[RID] = fb
if fba.getReaction(RID) is None:
noreaction.append(fb)
for mb_ in list(eMB['lower']):
if len(eMB['lower'][mb_]) == 1:
eMB['lower'].pop(mb_)
for mb_ in list(eMB['upper']):
if len(eMB['upper'][mb_]) == 1:
eMB['upper'].pop(mb_)
for mb_ in list(eMB['equality']):
if len(eMB['equality'][mb_]) == 1:
eMB['equality'].pop(mb_)
undefined = {'no_upper': [], 'no_lower': [], 'no_upper_lower': []}
for r_ in fba.getReactionIds():
LBdef = True
UBdef = True
if r_ not in EB:
if r_ not in LB:
LBdef = False
if r_ not in UB:
UBdef = False
if not LBdef and not UBdef:
print('WARNING: No bounds defined for reaction: \"{}\"'.format(r_))
undefined['no_upper_lower'].append(r_)
else:
if not LBdef:
print('WARNING: No LOWER BOUND defined for reaction: \"{}\"'.format(r_))
undefined['no_lower'].append(r_)
if not UBdef:
print('WARNING: No UPPER BOUND defined for reaction: \"{}\"'.format(r_))
undefined['no_upper'].append(r_)
errors = {
'eq+lb': [],
'eq+ub': [],
'duplicate_ids': dupIDs,
'multiple_defines': eMB,
'lb>ub': [],
'undefined': undefined,
'rev_contradict': [],
'no_reaction': noreaction,
}
for k_ in EB:
if k_ in LB:
errors['eq+lb'].append((EB[k_], LB[k_]))
if k_ in UB:
errors['eq+ub'].append((EB[k_], UB[k_]))
checked = []
for k_ in LB:
if k_ in UB and k_ not in checked:
if not LB[k_].getValue() <= UB[k_].getValue():
print(
'ERROR: Reaction {} has lower bound ({}) larger than upper bound ({})'.format(
k_, LB[k_].getValue(), UB[k_].getValue()
)
)
errors['lb>ub'].append((LB[k_], UB[k_]))
checked.append(k_)
assR = fba.getReaction(LB[k_].getReactionId())
if assR != None:
if not assR.reversible:
if LB[k_].getValue() < 0.0:
print(
'ERROR: Reaction {} is marked as irreversible but has a negative lower bound ({})'.format(
assR.getId(), LB[k_].getValue()
)
)
errors['rev_contradict'].append(assR)
del assR
for k_ in UB:
if k_ in LB and k_ not in checked:
if not LB[k_].getValue() <= UB[k_].getValue():
print(
'ERROR: Reaction {} has lower bound ({}) larger than upper bound ({})'.format(
k_, LB[k_].getValue(), UB[k_].getValue()
)
)
errors['lb>ub'].append((LB[k_], UB[k_]))
checked.append(k_)
return errors
def roundOffWithSense(val, osense='max', tol=1e-8):
"""
Round of a value in a way that takes into consideration the sense of the operation that generated it
- *val* the value
- *osense* [default='max'] the sense
- *tol* [default=1e-8] the tolerance of the roundoff factor
"""
if osense.lower() in ['min', 'minimize', 'minimise']:
val = numpy.ceil(val / tol) * tol
else:
val = numpy.floor(val / tol) * tol
return val
def mergeGroups(m, groups, new_id, new_name='', auto_delete=False):
"""
Merge a list of groups into a new group. Note, annotations are not merged!
- *m* the model containing the source groups
- *groups* a list of groups
- *new_id* the new, merged, group id
- *new_name* [default=''] the new group name, the default behaviour is to merge the old names
- *auto_delete* [default=False] delete the source groups
"""
if type(groups) == list and len(groups) > 1:
badgid = []
m_gids = m.getGroupIds()
for gnew in groups:
if gnew not in m_gids:
badgid.append(gnew)
if len(badgid) > 0:
print('ERROR: groups contains invalid group ids: {}'.format(str(badgid)))
return False
else:
print('ERROR: groups must be a list with more than one element.')
return False
if m.getGroup(new_id) is not None:
print('ERROR: new_id {} already exists'.format(new_id))
return False
m.createGroup(new_id)
gnew = m.getGroup(new_id)
make_name = False
if new_name == '':
make_name = True
for gid in groups:
gobj = m.getGroup(gid)
if make_name:
new_name = '{}+{}'.format(new_name, gobj.getName())
for gm in gobj.members:
if gm not in gnew.members:
gnew.addMember(gm)
if auto_delete:
m.deleteGroup(gid)
if make_name:
new_name = new_name[1:]
gnew.setName(new_name)
return True
def merge2Models(m1, m2, ignore=None, ignore_duplicate_ids=False):
"""
Merge 2 models, this method does a raw merge of model 2 into model 1 without any model checking.
Component id's in ignore are ignored in both models and the first objective of model 1 is arbitrarily
set as active. Compartments are also merged and a new "OuterMerge" compartment is also created.
In all cases duplicate id's are tracked and ignored, essentially using the object id encountered first -
usually that of model 1. Duplicate checking can be disabled by setting the *ignore_duplicate_ids* flag.
- *m1* model 1
- *m2* model 2
- *ignore* [[]] do not merge these id's
- *ignore_duplicate_ids* [False] default behaviour that can be enabled
In development: merging genes and gpr's.
"""
if ignore is None:
ignore = []
out = CBModel.Model(m1.getId() + m2.getId())
out.setName(m1.getName() + m2.getName())
out.createCompartment('OuterMerge', size=1.0, dimensions=3)
idstore = []
for x_ in m1.compartments + m2.compartments:
sid = x_.getId()
if sid not in ignore:
if ignore_duplicate_ids or sid not in idstore:
idstore.append(sid)
out.addCompartment(x_.clone())
for s_ in m1.species + m2.species:
sid = s_.getId()
if sid not in ignore:
if ignore_duplicate_ids or sid not in idstore:
idstore.append(sid)
out.addSpecies(s_.clone())
else:
print('Skipping duplicate id: \"{}\"'.format(sid))
else:
print('Skipping ignored id: \"{}\"'.format(sid))
for r_ in m1.reactions + m2.reactions:
sid = r_.getId()
if r_.getId() not in ignore:
if ignore_duplicate_ids or sid not in idstore:
idstore.append(sid)
out.addReaction(r_.clone(), create_default_bounds=False)
else:
print('Skipping duplicate id: \"{}\"'.format(sid))
else:
print('Skipping ignored id: \"{}\"'.format(sid))
for f_ in m1.flux_bounds + m2.flux_bounds:
sid = f_.getId()
if f_.getId() not in ignore:
if ignore_duplicate_ids or sid not in idstore:
idstore.append(sid)
out.addFluxBound(f_.clone())
else:
print('Skipping duplicate id: \"{}\"'.format(sid))
else:
print('Skipping ignored id: \"{}\"'.format(sid))
GO = True
for o_ in m1.objectives + m2.objectives:
sid = o_.getId()
if o_.getId() not in ignore:
if ignore_duplicate_ids or sid not in idstore:
idstore.append(sid)
if GO:
out.addObjective(o_.clone(), active=True)
GO = False
else:
out.addObjective(o_.clone(), active=False)
else:
print('Skipping duplicate id: \"{}\"'.format(sid))
else:
print('Skipping ignored id: \"{}\"'.format(sid))
print('\nAdded {} components to merged model'.format(len(idstore)))
idstore = []
return out
| gpl-3.0 | 1,549,951,368,185,540,000 | 32.737214 | 155 | 0.52723 | false | 3.738635 | false | false | false |
pnisarg/ABSA | src/ats_lexicon_detect.py | 1 | 3827 | # -*- coding: utf-8 -*-
import sys
import pickle
import json
import ast
import io
from ate_rule_learn import write_json
CONST_POS_SCORE = "POS_SCORE"
CONST_NEG_SCORE = "NEG_SCORE"
hindi_word_net = sys.argv[3]
word2Synset = pickle.load(open( hindi_word_net + "/WordSynsetDict.pk"))
synonyms = pickle.load(open(hindi_word_net + "/SynsetWords.pk"))
# Returns the synonyms of the word from Hindi SWN
def get_synonyms(word):
output = []
syn_map_list = []
if word2Synset.has_key(word):
synsets = word2Synset[word]
for pos in synsets.keys():
for synset in synsets[pos]:
if synonyms.has_key(synset):
synDict = synonyms[synset]
syn_map_list.append(synDict)
for syn_map in syn_map_list:
for word_synoyms_list in syn_map.values():
output.extend(word_synoyms_list)
return output
# Loads model from the model file
def load_terms_output(file_path):
with open(file_path) as terms_output_file:
terms_otuput = json.load(terms_output_file)
terms_otuput = ast.literal_eval(json.dumps(terms_otuput, ensure_ascii=False, encoding='utf8'))
terms_output_file.close()
return terms_otuput
# Creates a lexicon of words from the Hindi SWN.
# This includes the positive and negative polarity scores for each word
# from the SWN
def generate_lexicon(hindi_swn_dir):
lexicon = {}
swn_file = hindi_swn_dir + "/HSWN_WN.txt"
with io.open(swn_file, 'r', encoding='utf8') as f:
for line in iter(f):
line = line.rstrip()
if line:
data = line.split()
pos_score = float(data[2])
neg_score = float(data[3])
words = data[4]
words = words.split(',')
for word in words:
word_map = {}
word_map[CONST_POS_SCORE] = pos_score
word_map[CONST_NEG_SCORE] = neg_score
lexicon[word] = word_map
return lexicon
# Return the effective score of a word or its synonym from lexicon.
# Effective score is the difference between its positive polarity and
# negative polarity.
def get_score(word, swn_lexicon):
score = 0
word_synoyms = []
word = word.decode("utf-8")
if word not in swn_lexicon:
word_synoyms = get_synonyms(word)
else:
word_synoyms.append(word)
for word in word_synoyms:
if word in swn_lexicon:
pos_score = swn_lexicon[word][CONST_POS_SCORE]
neg_score = swn_lexicon[word][CONST_NEG_SCORE]
score = pos_score - neg_score
break
return score
# Detects polarity of aspect terms from Hindi SWN Lexicon
def detect_polarity(terms_output, swn_lexicon):
terms_polarity_output = {}
for sent_id, sent_map in terms_output.iteritems():
sent_polarity = {}
for aspect_term, quality_words in sent_map.iteritems():
polarity = "neu"
score = 0
for quality_word in quality_words:
score += get_score(quality_word, swn_lexicon)
if score > 0:
polarity = "pos"
elif score < 0:
polarity = "neg"
sent_polarity[aspect_term] = polarity
terms_polarity_output[sent_id] = sent_polarity
return terms_polarity_output
# Main function
if __name__ == '__main__':
# loading aspect terms rule based output
terms_output = load_terms_output(sys.argv[1])
# generating Hindi Sentiwordnet lexicon
hindi_swn_dir = sys.argv[2]
swn_lexicon = generate_lexicon(hindi_swn_dir)
# detecting polarity
terms_polarity_output = detect_polarity(terms_output, swn_lexicon)
# writing output to file
write_json(terms_polarity_output, sys.argv[4]) | mit | -6,930,557,273,389,374,000 | 30.121951 | 98 | 0.608832 | false | 3.365875 | false | false | false |
JustinWingChungHui/MyFamilyRoot | facial_recognition/tests/test_resize_tags.py | 2 | 2983 | from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
from gallery.models import Image, Gallery, Tag
from family_tree.models import Family, Person
from message_queue.models import Queue, Message
from facial_recognition.resize_tags import resize_tags
import os
import shutil
import threading
@override_settings(SSLIFY_DISABLE=True,
MEDIA_ROOT=settings.MEDIA_ROOT_TEST,
MEDIA_URL=settings.MEDIA_URL_TEST,
AWS_STORAGE_BUCKET_NAME=settings.AWS_STORAGE_BUCKET_NAME_TEST,
FACE_RECOG_TRAIN_TEMP_DIR = settings.FACE_RECOG_TRAIN_TEST_DIR)
class ResizeTagsTestCase(TestCase): # pragma: no cover
def setUp(self):
'''
Need to create a family and a gallery
'''
self.family = Family()
self.family.save()
self.gallery = Gallery.objects.create(title="test_gallery", family_id=self.family.id)
self.test_image = os.path.join(settings.BASE_DIR, 'facial_recognition/tests/test_image_woman.jpg')
self.test_image_destination = ''.join([settings.MEDIA_ROOT, 'galleries/', str(self.family.id), '/', str(self.gallery.id), '/test_image.jpg'])
self.test_image_s3_key = ''.join(['galleries/', str(self.family.id), '/', str(self.gallery.id), '/test_image.jpg'])
directory = ''.join([settings.MEDIA_ROOT, 'galleries/', str(self.family.id), '/', str(self.gallery.id)])
if not os.path.exists(directory):
os.makedirs(directory)
#Copy test image to media area
shutil.copy2(self.test_image, self.test_image_destination)
self.image = Image(gallery=self.gallery, family=self.family, original_image=''.join(['galleries/', str(self.family.id), '/', str(self.gallery.id), '/test_image.jpg']))
self.image.save()
self.image.upload_files_to_s3()
self.person = Person(name='Wallace', gender='M', email='[email protected]', family_id=self.family.id, language='en')
self.person.save()
self.tag = Tag.objects.create(image_id=self.image.id, x1=0.3, y1=0.2, x2=0.5, y2=0.4, person_id=self.person.id)
def tearDown(self):
self.image.delete_local_image_files()
threading.Thread(target=self.image.delete_remote_image_files).start()
try:
os.remove(self.test_image_destination)
except:
pass
def test_tag_resizes(self):
# Create a message to resize tag
resize_tag_queue_id = Queue.objects.get(name='resize_tag').id
message = Message.objects.create(queue_id=resize_tag_queue_id, integer_data = self.tag.id)
resize_tags([message])
resized_tag = Tag.objects.get(pk=self.tag.id)
self.assertTrue(abs(0.2875 - resized_tag.x1) < 0.001)
self.assertTrue(abs(0.1951 - resized_tag.y1) < 0.001)
self.assertTrue(abs(0.5575 - resized_tag.x2) < 0.001)
self.assertTrue(abs(0.3959 - resized_tag.y2) < 0.001)
| gpl-2.0 | -879,425,692,876,667,400 | 37.24359 | 175 | 0.656386 | false | 3.292494 | true | false | false |
akar43/lsm | tensorboard_logging.py | 1 | 3182 | import logging
from StringIO import StringIO
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
class TensorboardLogger(object):
"""Logging in tensorboard without tensorflow ops.
Adapted from https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514"""
def __init__(self, writer=None, log_dir=None):
"""Creates a summary writer logging to log_dir."""
self.logger = logging.getLogger('mview3d.' + __name__)
if writer is not None:
self.writer = writer
else:
if log_dir is not None:
self.writer = tf.summary.FileWriter(log_dir, flush_secs=30)
else:
self.logger.error(
'At least one of writer or log_dir has to be not None')
self.writer = None
def log_scalar(self, tag, value, step):
"""Log a scalar variable.
Parameter
----------
tag : basestring
Name of the scalar
value
step : int
training iteration
"""
summary = tf.Summary(
value=[tf.Summary.Value(tag=tag, simple_value=value)])
self.writer.add_summary(summary, step)
self.writer.flush()
def log_images(self, tag, images, step):
"""Logs a list of images."""
im_summaries = []
for nr, img in enumerate(images):
# Write the image to a string
s = StringIO()
plt.imsave(s, img, format='png')
# Create an Image object
img_sum = tf.Summary.Image(
encoded_image_string=s.getvalue(),
height=img.shape[0],
width=img.shape[1])
# Create a Summary value
im_summaries.append(
tf.Summary.Value(tag='%s/%d' % (tag, nr), image=img_sum))
# Create and write Summary
summary = tf.Summary(value=im_summaries)
self.writer.add_summary(summary, step)
self.writer.flush()
def log_histogram(self, tag, values, step, bins=1000):
"""Logs the histogram of a list/vector of values."""
# Create histogram using numpy
counts, bin_edges = np.histogram(values, bins=bins)
# Fill fields of histogram proto
hist = tf.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum(values**2))
# Requires equal number as bins, where the first goes from -DBL_MAX to bin_edges[1]
# See https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/summary.proto#L30
# Thus, we drop the start of the first bin
bin_edges = bin_edges[1:]
# Add bin edges and counts
for edge in bin_edges:
hist.bucket_limit.append(edge)
for c in counts:
hist.bucket.append(c)
# Create and write Summary
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
self.writer.add_summary(summary, step)
self.writer.flush()
| mit | -5,417,984,844,522,912,000 | 33.215054 | 110 | 0.580138 | false | 3.997487 | false | false | false |
hyperspy/hyperspyUI | hyperspyui/tests/conftest.py | 1 | 1572 |
import os
import tempfile
import shutil
from hyperspyui.version import __version__
from hyperspyui.__main__ import get_splash
import pytest
from qtpy import QtCore
# QtWebEngineWidgets must be imported before a QCoreApplication instance
# is created (used in eelsdb plugin)
# Avoid a bug in Qt: https://bugreports.qt.io/browse/QTBUG-46720
from qtpy import QtWebEngineWidgets
QCoreApplication = QtCore.QCoreApplication
QSettings = QtCore.QSettings
QCoreApplication.setApplicationName("HyperSpyUI-tests")
QCoreApplication.setOrganizationName("Hyperspy")
QCoreApplication.setApplicationVersion(__version__)
QSettings.setDefaultFormat(QSettings.IniFormat)
_tmpdirpath = ''
def pytest_configure(config):
global _tmpdirpath
_tmpdirpath = tempfile.mkdtemp()
userpath = os.path.join(_tmpdirpath, 'user')
syspath = os.path.join(_tmpdirpath, 'sys')
os.mkdir(userpath)
os.mkdir(syspath)
QSettings.setPath(QSettings.IniFormat,
QSettings.UserScope, userpath)
QSettings.setPath(QSettings.IniFormat,
QSettings.SystemScope, syspath)
settings = QSettings()
settings.setValue(
'plugins/Version selector/check_for_updates_on_start', False)
def pytest_unconfigure(config):
shutil.rmtree(_tmpdirpath)
@pytest.fixture(scope='session')
def mainwindow(qapp):
from hyperspyui.mainwindow import MainWindow
window = MainWindow(get_splash(), argv=[])
yield window
qapp.processEvents()
window.close()
window.deleteLater()
del window
qapp.processEvents()
| gpl-3.0 | -5,874,988,546,532,899,000 | 23.952381 | 72 | 0.734097 | false | 3.707547 | false | false | false |
vogelsgesang/checkmate | checkmate/management/commands/analyze.py | 3 | 15899 | # -*- coding: utf-8 -*-
"""
This file is part of checkmate, a meta code checker written in Python.
Copyright (C) 2015 Andreas Dewes, QuantifiedCode UG
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import unicode_literals
from base import BaseCommand
from collections import defaultdict
import sys
import os
import random
import os.path
import copy
import json
import time
import pprint
import hashlib
import logging
logger = logging.getLogger(__name__)
from checkmate.management.helpers import filter_filenames_by_checkignore
from checkmate.lib.code import CodeEnvironment
def diff_objects(objects_a,objects_b,key,comparator,with_unchanged = False):
"""
Returns a "diff" between two lists of objects.
:param key: The key that identifies objects with identical location in each set,
such as files with the same path or code objects with the same URL.
:param comparator: Comparison functions that decides if two objects are identical.
"""
objects_a_by_key = dict([(key(obj),obj) for obj in objects_a if key(obj)])
objects_b_by_key = dict([(key(obj),obj) for obj in objects_b if key(obj)])
added_objects = [obj for key,obj in objects_b_by_key.items() if key not in objects_a_by_key]
deleted_objects = [obj for key,obj in objects_a_by_key.items() if key not in objects_b_by_key]
joint_keys = [key for key in objects_a_by_key if key in objects_b_by_key]
modified_objects = [objects_b_by_key[key] for key in joint_keys if
comparator(objects_a_by_key[key],objects_b_by_key[key]) != 0
]
result = {
'added' : added_objects,
'deleted' : deleted_objects,
'modified' : modified_objects,
}
if with_unchanged:
unchanged_objects = [objects_b_by_key[key]
for key in joint_keys
if not objects_b_by_key[key] in modified_objects]
result['unchanged'] = unchanged_objects
return result
class Command(BaseCommand):
def diff_snapshots(self,code_environment,snapshot_a,snapshot_b):
diff = {'snapshot_a' : snapshot_a,'snapshot_b' : snapshot_b,'project' : self.project}
def code_object_key(code_object):
key = os.path.join(code_object.module_url,code_object.tree_url)
return key
def code_object_comparator(code_object_a,code_object_b):
return code_object_b.hash-code_object_a.hash
def file_revision_key(file_revision):
return file_revision.path
def file_revision_comparator(file_revision_a,file_revision_b):
res = 0 if file_revision_a.fr_pk == file_revision_b.fr_pk else -1
return res
def issue_key(issue):
try:
return issue.file_revision.path+":"+issue.analyzer+\
":"+issue.code+":"+issue.fingerprint
except AttributeError:
return issue.file_revision.path+":"+issue.analyzer+":"+issue.code
def issue_comparator(issue_a,issue_b):
if issue_key(issue_a) == issue_key(issue_b):
return 0
return -1
file_revisions_a = snapshot_a.get_file_revisions(self.backend)
file_revisions_b = snapshot_b.get_file_revisions(self.backend)
diff['file_revisions'] = diff_objects(file_revisions_a,
file_revisions_b,
file_revision_key,
file_revision_comparator)
#We just generate code objects and issues
#for the modified file revisions, to save time when diffing.
logger.info("Generating list of modified file revisions...")
modified_file_revisions_by_path = {}
for fr_type in ('modified','added','deleted'):
for fr in diff['file_revisions'][fr_type]:
if not fr.path in modified_file_revisions_by_path:
modified_file_revisions_by_path[fr.path] = fr
logger.info("Generating list of modified issues...")
modified_file_revisions_a = [fr for fr in file_revisions_a
if fr.path in modified_file_revisions_by_path]
modified_file_revisions_b = [fr for fr in file_revisions_b
if fr.path in modified_file_revisions_by_path]
issues_a = self.backend.filter(self.project.Issue,
{'project.pk' : self.project.pk,
'file_revision.pk' : {'$in' : [fr.pk
for fr in modified_file_revisions_a]}
})
issues_b = self.backend.filter(self.project.Issue,
{'project.pk' : self.project.pk,
'file_revision.pk' : {'$in' : [fr.pk
for fr in modified_file_revisions_b]}
})
logger.info("Diffing issues (%d in A, %d in B)" % (len(issues_a),len(issues_b)))
diff['issues'] = diff_objects(issues_a,issues_b,issue_key,issue_comparator)
logger.info("Diffing summary...")
diff['summary'] = code_environment.diff_summaries(snapshot_a,snapshot_b)
diff['summary']['issues'] = {}
diff['summary']['file_revisions'] = {}
logger.info("Summarizing diffed file revisions and issues...")
for key in ('added','modified','deleted'):
diff['summary']['file_revisions'][key] = len(diff['file_revisions'][key])
diff['summary']['issues'][key] = code_environment.summarize_issues(diff['issues'][key])
#Add summary to snapshot_b, so that it can be displayed without fetching the diff object
return diff
def run(self):
settings = self.project.get_settings(self.backend)
if 'ignore' in settings:
checkignore = settings['ignore']
else:
checkignore = []
checkignore_filter = lambda filenames : filter_filenames_by_checkignore(filenames,
checkignore)
logger.info("Getting file revisions...")
file_revisions = self.project.get_disk_file_revisions(file_filters = [checkignore_filter],
path_filters = [checkignore_filter])
logger.info("%d file revisions" % len(file_revisions))
snapshot = self.project.DiskSnapshot({'created_at' : time.time()})
try:
code_environment = CodeEnvironment(file_revisions,
settings = settings)
self.analyze_snapshot(snapshot,
code_environment,
save_if_empty = False)
except KeyboardInterrupt:
raise
def generate_diffs(self,code_environment,snapshot_pairs):
diffs = []
logger.info("Generating diffs beween %d snapshot pairs..." % len(snapshot_pairs))
for snapshot_a,snapshot_b in snapshot_pairs:
logger.info("Generating a diff between snapshots %s and %s" % (snapshot_a.pk,
snapshot_b.pk))
diff = self.diff_snapshots(code_environment,snapshot_a,snapshot_b)
diffs.append(diff)
return diffs
def fingerprint_issues(self,file_revision,issues):
content = file_revision.get_file_content()
lines = content.split("\n")
for issue in issues:
lines = "\n".join([line for loc in issue.location
for line in lines[loc[0][0]:loc[1][0]]])
sha = hashlib.sha1()
sha.update(lines)
issue.fingerprint = sha.hexdigest()
def annotate_file_revisions(self,snapshot,file_revisions):
"""
We convert various items in the file revision to documents,
so that we can easily search and retrieve them...
"""
annotations = defaultdict(list)
def group_issues_by_code(issues):
"""
We group the issues by code to avoid generating 100s of issues per file...
"""
issues_for_code = {}
for issue in issues:
if not issue['code'] in issues_for_code:
issues_for_code[issue['code']] = copy.deepcopy(issue)
code_issue = issues_for_code[issue['code']]
if 'location' in code_issue:
del code_issue['location']
if 'data' in code_issue:
del code_issue['data']
code_issue['occurences'] = []
code_issue = issues_for_code[issue['code']]
issue_data = {}
for key in ('location','data'):
if key in issue:
issue_data[key] = issue[key]
code_issue['occurences'].append(issue_data)
return issues_for_code.values()
for file_revision in file_revisions:
for analyzer_name,results in file_revision.results.items():
if 'issues' in results:
if len(results['issues']) > 1000:
results['issues'] = [
{
'code' : 'TooManyIssues',
'data' : {
'analyzer' : analyzer_name,
'count' : len(results['issues'])
},
'occurences' : []
}
]
documents = []
grouped_issues = group_issues_by_code(results['issues'])
for issue in grouped_issues:
document = self.project.Issue(issue)
document.project = self.project
document.file_revision = file_revision
document.analyzer = analyzer_name
documents.append(document)
annotations['issues'].extend(documents)
del results['issues']
return annotations
def analyze_snapshot(self,snapshot,code_environment,save_if_empty = False):
logger.info("Analyzing snapshot...")
file_revisions = code_environment.file_revisions
file_revisions_by_pk = dict([(fr.fr_pk,fr) for fr in file_revisions])
filtered_file_revisions = code_environment.filter_file_revisions(file_revisions)
filtered_file_revisions_by_pk = dict([(fr.fr_pk,fr) for fr in filtered_file_revisions])
excluded_file_revisions = [file_revisions_by_pk[pk]
for pk in file_revisions_by_pk.keys()
if not pk in filtered_file_revisions_by_pk
]
logger.info("Excluding %d file revisions" % len(excluded_file_revisions))
file_revisions = filtered_file_revisions
file_revisions_by_pk = filtered_file_revisions_by_pk
max_file_revisions = 1000
if len(file_revisions) > max_file_revisions:
if not 'snapshot_issues' in snapshot:
snapshot.snapshot_issues = []
snapshot.snapshot_issues.append({
'code' : 'TooManyFileRevisions',
'data' : {
'count' : len(file_revisions),
'limit' : max_file_revisions
}
})
logger.warning("Too many file revisions (%d) in snapshot, truncating at %d" %
(len(file_revisions),max_file_revisions))
file_revisions_by_pk = dict(sorted(file_revisions_by_pk.items(),
key = lambda x:x[0])[:max_file_revisions])
file_revisions = file_revisions_by_pk.values()
existing_file_revisions = list(self.backend.filter(snapshot.FileRevision,{
'project.pk' : self.project.pk,
'fr_pk' : {'$in' : file_revisions_by_pk.keys()}
}))
existing_file_revisions_by_pk = dict([(fr.fr_pk,fr) for fr in existing_file_revisions])
new_file_revisions = [file_revision for file_revision in file_revisions
if not file_revision.fr_pk in existing_file_revisions_by_pk]
file_revisions_dict = {}
for file_revision in existing_file_revisions+new_file_revisions:
file_revisions_dict[file_revision.path] = file_revision
logger.info("Analyzing %d new file revisions (%d are already analyzed)" % (
len(new_file_revisions),
len(existing_file_revisions)
))
i = 0
snapshot_issues = list(self.backend.filter(self.project.Issue,
{'file_revision.pk' : {'$in' : [fr.pk
for fr in existing_file_revisions] }
}))
logger.info("Found %d existing issues..." % len(snapshot_issues))
#We set the project information in the snapshot.
snapshot.project = self.project
snapshot.file_revisions = [fr.pk for fr in file_revisions_dict.values()]
code_environment.env['snapshot'] = snapshot
try:
while i < len(new_file_revisions):
j = i+10 if i+10 < len(new_file_revisions) else len(new_file_revisions)
logger.info("Analyzing and saving: %d - %d (%d remaining)" %
(i, j, len(new_file_revisions) - i ))
file_revisions_slice = new_file_revisions[i:j]
analyzed_file_revisions = code_environment.analyze_file_revisions(file_revisions_slice)
logger.info("Annotating and saving file revisions...")
annotations = self.annotate_file_revisions(snapshot,analyzed_file_revisions)
if 'issues' in annotations:
snapshot_issues.extend(annotations['issues'])
for file_revision in analyzed_file_revisions:
self.backend.save(file_revision)
self.backend.commit()
for issue in annotations['issues']:
self.backend.save(issue)
self.backend.commit()
i+=10
logger.info("Summarizing file revisions...")
snapshot.summary = code_environment.summarize(file_revisions_dict.values())
logger.info("Summarizing issues...")
snapshot.issues_summary = code_environment.summarize_issues(snapshot_issues)
finally:
del code_environment.env['snapshot']
snapshot.analyzed = True
logger.info("Saving snapshot...")
self.backend.save(snapshot)
self.backend.commit()
logger.info("Done analyzing snapshot %s" % snapshot.pk)
return snapshot
| agpl-3.0 | 7,005,460,754,880,088,000 | 40.189119 | 103 | 0.551418 | false | 4.469778 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.