metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "116pythonZS/twisted",
"score": 2
} |
#### File: twisted/python/util.py
```python
from __future__ import division, absolute_import, print_function
import os, sys, errno, warnings
try:
import pwd, grp
except ImportError:
pwd = grp = None
try:
from os import setgroups, getgroups
except ImportError:
setgroups = getgroups = None
from functools import wraps
from twisted.python.compat import _PY3, unicode
from twisted.python.versions import Version
from twisted.python.deprecate import deprecatedModuleAttribute
# For backwards compatibility, some things import this, so just link it
from collections import OrderedDict
deprecatedModuleAttribute(
Version("Twisted", 15, 5, 0),
"Use collections.OrderedDict instead.",
"twisted.python.util",
"OrderedDict")
class InsensitiveDict:
"""
Dictionary, that has case-insensitive keys.
Normally keys are retained in their original form when queried with
.keys() or .items(). If initialized with preserveCase=0, keys are both
looked up in lowercase and returned in lowercase by .keys() and .items().
"""
"""
Modified recipe at
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66315 originally
contributed by <NAME>.
"""
def __init__(self, dict=None, preserve=1):
"""
Create an empty dictionary, or update from 'dict'.
"""
self.data = {}
self.preserve=preserve
if dict:
self.update(dict)
def __delitem__(self, key):
k=self._lowerOrReturn(key)
del self.data[k]
def _lowerOrReturn(self, key):
if isinstance(key, bytes) or isinstance(key, unicode):
return key.lower()
else:
return key
def __getitem__(self, key):
"""
Retrieve the value associated with 'key' (in any case).
"""
k = self._lowerOrReturn(key)
return self.data[k][1]
def __setitem__(self, key, value):
"""
Associate 'value' with 'key'. If 'key' already exists, but
in different case, it will be replaced.
"""
k = self._lowerOrReturn(key)
self.data[k] = (key, value)
def has_key(self, key):
"""
Case insensitive test whether 'key' exists.
"""
k = self._lowerOrReturn(key)
return k in self.data
__contains__ = has_key
def _doPreserve(self, key):
if not self.preserve and (isinstance(key, bytes)
or isinstance(key, unicode)):
return key.lower()
else:
return key
def keys(self):
"""
List of keys in their original case.
"""
return list(self.iterkeys())
def values(self):
"""
List of values.
"""
return list(self.itervalues())
def items(self):
"""
List of (key,value) pairs.
"""
return list(self.iteritems())
def get(self, key, default=None):
"""
Retrieve value associated with 'key' or return default value
if 'key' doesn't exist.
"""
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default):
"""
If 'key' doesn't exist, associate it with the 'default' value.
Return value associated with 'key'.
"""
if not self.has_key(key):
self[key] = default
return self[key]
def update(self, dict):
"""
Copy (key,value) pairs from 'dict'.
"""
for k,v in dict.items():
self[k] = v
def __repr__(self):
"""
String representation of the dictionary.
"""
items = ", ".join([("%r: %r" % (k,v)) for k,v in self.items()])
return "InsensitiveDict({%s})" % items
def iterkeys(self):
for v in self.data.values():
yield self._doPreserve(v[0])
def itervalues(self):
for v in self.data.values():
yield v[1]
def iteritems(self):
for (k, v) in self.data.values():
yield self._doPreserve(k), v
def popitem(self):
i=self.items()[0]
del self[i[0]]
return i
def clear(self):
for k in self.keys():
del self[k]
def copy(self):
return InsensitiveDict(self, self.preserve)
def __len__(self):
return len(self.data)
def __eq__(self, other):
for k,v in self.items():
if not (k in other) or not (other[k]==v):
return 0
return len(self)==len(other)
def uniquify(lst):
"""
Make the elements of a list unique by inserting them into a dictionary.
This must not change the order of the input lst.
"""
dct = {}
result = []
for k in lst:
if k not in dct:
result.append(k)
dct[k] = 1
return result
def padTo(n, seq, default=None):
"""
Pads a sequence out to n elements,
filling in with a default value if it is not long enough.
If the input sequence is longer than n, raises ValueError.
Details, details:
This returns a new list; it does not extend the original sequence.
The new list contains the values of the original sequence, not copies.
"""
if len(seq) > n:
raise ValueError("%d elements is more than %d." % (len(seq), n))
blank = [default] * n
blank[:len(seq)] = list(seq)
return blank
def getPluginDirs():
warnings.warn(
"twisted.python.util.getPluginDirs is deprecated since Twisted 12.2.",
DeprecationWarning, stacklevel=2)
import twisted
systemPlugins = os.path.join(os.path.dirname(os.path.dirname(
os.path.abspath(twisted.__file__))), 'plugins')
userPlugins = os.path.expanduser("~/TwistedPlugins")
confPlugins = os.path.expanduser("~/.twisted")
allPlugins = filter(os.path.isdir, [systemPlugins, userPlugins, confPlugins])
return allPlugins
def addPluginDir():
warnings.warn(
"twisted.python.util.addPluginDir is deprecated since Twisted 12.2.",
DeprecationWarning, stacklevel=2)
sys.path.extend(getPluginDirs())
def sibpath(path, sibling):
"""
Return the path to a sibling of a file in the filesystem.
This is useful in conjunction with the special C{__file__} attribute
that Python provides for modules, so modules can load associated
resource files.
"""
return os.path.join(os.path.dirname(os.path.abspath(path)), sibling)
def _getpass(prompt):
"""
Helper to turn IOErrors into KeyboardInterrupts.
"""
import getpass
try:
return getpass.getpass(prompt)
except IOError as e:
if e.errno == errno.EINTR:
raise KeyboardInterrupt
raise
except EOFError:
raise KeyboardInterrupt
def getPassword(prompt = 'Password: ', confirm = 0, forceTTY = 0,
confirmPrompt = 'Confirm password: ',
mismatchMessage = "Passwords don't match."):
"""
Obtain a password by prompting or from stdin.
If stdin is a terminal, prompt for a new password, and confirm (if
C{confirm} is true) by asking again to make sure the user typed the same
thing, as keystrokes will not be echoed.
If stdin is not a terminal, and C{forceTTY} is not true, read in a line
and use it as the password, less the trailing newline, if any. If
C{forceTTY} is true, attempt to open a tty and prompt for the password
using it. Raise a RuntimeError if this is not possible.
@returns: C{str}
"""
isaTTY = hasattr(sys.stdin, 'isatty') and sys.stdin.isatty()
old = None
try:
if not isaTTY:
if forceTTY:
try:
old = sys.stdin, sys.stdout
sys.stdin = sys.stdout = open('/dev/tty', 'r+')
except:
raise RuntimeError("Cannot obtain a TTY")
else:
password = sys.stdin.readline()
if password[-1] == '\n':
password = password[:-1]
return password
while 1:
try1 = _getpass(prompt)
if not confirm:
return try1
try2 = _getpass(confirmPrompt)
if try1 == try2:
return try1
else:
sys.stderr.write(mismatchMessage + "\n")
finally:
if old:
sys.stdin.close()
sys.stdin, sys.stdout = old
def println(*a):
sys.stdout.write(' '.join(map(str, a))+'\n')
# XXX
# This does not belong here
# But where does it belong?
def str_xor(s, b):
return ''.join([chr(ord(c) ^ b) for c in s])
def makeStatBar(width, maxPosition, doneChar = '=', undoneChar = '-', currentChar = '>'):
"""
Creates a function that will return a string representing a progress bar.
"""
aValue = width / float(maxPosition)
def statBar(position, force = 0, last = ['']):
assert len(last) == 1, "Don't mess with the last parameter."
done = int(aValue * position)
toDo = width - done - 2
result = "[%s%s%s]" % (doneChar * done, currentChar, undoneChar * toDo)
if force:
last[0] = result
return result
if result == last[0]:
return ''
last[0] = result
return result
statBar.__doc__ = """statBar(position, force = 0) -> '[%s%s%s]'-style progress bar
returned string is %d characters long, and the range goes from 0..%d.
The 'position' argument is where the '%s' will be drawn. If force is false,
'' will be returned instead if the resulting progress bar is identical to the
previously returned progress bar.
""" % (doneChar * 3, currentChar, undoneChar * 3, width, maxPosition, currentChar)
return statBar
def spewer(frame, s, ignored):
"""
A trace function for sys.settrace that prints every function or method call.
"""
from twisted.python import reflect
if 'self' in frame.f_locals:
se = frame.f_locals['self']
if hasattr(se, '__class__'):
k = reflect.qual(se.__class__)
else:
k = reflect.qual(type(se))
print('method %s of %s at %s' % (
frame.f_code.co_name, k, id(se)))
else:
print('function %s in %s, line %s' % (
frame.f_code.co_name,
frame.f_code.co_filename,
frame.f_lineno))
def searchupwards(start, files=[], dirs=[]):
"""
Walk upwards from start, looking for a directory containing
all files and directories given as arguments::
>>> searchupwards('.', ['foo.txt'], ['bar', 'bam'])
If not found, return None
"""
start=os.path.abspath(start)
parents=start.split(os.sep)
exists=os.path.exists; join=os.sep.join; isdir=os.path.isdir
while len(parents):
candidate=join(parents)+os.sep
allpresent=1
for f in files:
if not exists("%s%s" % (candidate, f)):
allpresent=0
break
if allpresent:
for d in dirs:
if not isdir("%s%s" % (candidate, d)):
allpresent=0
break
if allpresent: return candidate
parents.pop(-1)
return None
class LineLog:
"""
A limited-size line-based log, useful for logging line-based
protocols such as SMTP.
When the log fills up, old entries drop off the end.
"""
def __init__(self, size=10):
"""
Create a new log, with size lines of storage (default 10).
A log size of 0 (or less) means an infinite log.
"""
if size < 0:
size = 0
self.log = [None]*size
self.size = size
def append(self,line):
if self.size:
self.log[:-1] = self.log[1:]
self.log[-1] = line
else:
self.log.append(line)
def str(self):
return '\n'.join(filter(None,self.log))
def __getitem__(self, item):
return filter(None,self.log)[item]
def clear(self):
"""
Empty the log
"""
self.log = [None]*self.size
def raises(exception, f, *args, **kwargs):
"""
Determine whether the given call raises the given exception.
"""
try:
f(*args, **kwargs)
except exception:
return 1
return 0
class IntervalDifferential(object):
"""
Given a list of intervals, generate the amount of time to sleep between
"instants".
For example, given 7, 11 and 13, the three (infinite) sequences::
7 14 21 28 35 ...
11 22 33 44 ...
13 26 39 52 ...
will be generated, merged, and used to produce::
(7, 0) (4, 1) (2, 2) (1, 0) (7, 0) (1, 1) (4, 2) (2, 0) (5, 1) (2, 0)
New intervals may be added or removed as iteration proceeds using the
proper methods.
"""
def __init__(self, intervals, default=60):
"""
@type intervals: C{list} of C{int}, C{long}, or C{float} param
@param intervals: The intervals between instants.
@type default: C{int}, C{long}, or C{float}
@param default: The duration to generate if the intervals list
becomes empty.
"""
self.intervals = intervals[:]
self.default = default
def __iter__(self):
return _IntervalDifferentialIterator(self.intervals, self.default)
class _IntervalDifferentialIterator(object):
def __init__(self, i, d):
self.intervals = [[e, e, n] for (e, n) in zip(i, range(len(i)))]
self.default = d
self.last = 0
def __next__(self):
if not self.intervals:
return (self.default, None)
last, index = self.intervals[0][0], self.intervals[0][2]
self.intervals[0][0] += self.intervals[0][1]
self.intervals.sort()
result = last - self.last
self.last = last
return result, index
# Iterators on Python 2 use next(), not __next__()
next = __next__
def addInterval(self, i):
if self.intervals:
delay = self.intervals[0][0] - self.intervals[0][1]
self.intervals.append([delay + i, i, len(self.intervals)])
self.intervals.sort()
else:
self.intervals.append([i, i, 0])
def removeInterval(self, interval):
for i in range(len(self.intervals)):
if self.intervals[i][1] == interval:
index = self.intervals[i][2]
del self.intervals[i]
for i in self.intervals:
if i[2] > index:
i[2] -= 1
return
raise ValueError("Specified interval not in IntervalDifferential")
class FancyStrMixin:
"""
Mixin providing a flexible implementation of C{__str__}.
C{__str__} output will begin with the name of the class, or the contents
of the attribute C{fancybasename} if it is set.
The body of C{__str__} can be controlled by overriding C{showAttributes} in
a subclass. Set C{showAttributes} to a sequence of strings naming
attributes, or sequences of C{(attributeName, callable)}, or sequences of
C{(attributeName, displayName, formatCharacter)}. In the second case, the
callable is passed the value of the attribute and its return value used in
the output of C{__str__}. In the final case, the attribute is looked up
using C{attributeName}, but the output uses C{displayName} instead, and
renders the value of the attribute using C{formatCharacter}, e.g. C{"%.3f"}
might be used for a float.
"""
# Override in subclasses:
showAttributes = ()
def __str__(self):
r = ['<', (hasattr(self, 'fancybasename') and self.fancybasename)
or self.__class__.__name__]
for attr in self.showAttributes:
if isinstance(attr, str):
r.append(' %s=%r' % (attr, getattr(self, attr)))
elif len(attr) == 2:
r.append((' %s=' % (attr[0],)) + attr[1](getattr(self, attr[0])))
else:
r.append((' %s=' + attr[2]) % (attr[1], getattr(self, attr[0])))
r.append('>')
return ''.join(r)
__repr__ = __str__
class FancyEqMixin:
"""
Mixin that implements C{__eq__} and C{__ne__}.
Comparison is done using the list of attributes defined in
C{compareAttributes}.
"""
compareAttributes = ()
def __eq__(self, other):
if not self.compareAttributes:
return self is other
if isinstance(self, other.__class__):
return (
[getattr(self, name) for name in self.compareAttributes] ==
[getattr(other, name) for name in self.compareAttributes])
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
try:
# initgroups is available in Python 2.7+ on UNIX-likes
from os import initgroups as _initgroups
except ImportError:
_initgroups = None
if _initgroups is None:
def initgroups(uid, primaryGid):
"""
Do nothing.
Underlying platform support require to manipulate groups is missing.
"""
else:
def initgroups(uid, primaryGid):
"""
Initializes the group access list.
This uses the stdlib support which calls initgroups(3) under the hood.
If the given user is a member of more than C{NGROUPS}, arbitrary
groups will be silently discarded to bring the number below that
limit.
@type uid: C{int}
@param uid: The UID for which to look up group information.
@type primaryGid: C{int} or L{None}
@param primaryGid: If provided, an additional GID to include when
setting the groups.
"""
return _initgroups(pwd.getpwuid(uid)[0], primaryGid)
def switchUID(uid, gid, euid=False):
"""
Attempts to switch the uid/euid and gid/egid for the current process.
If C{uid} is the same value as L{os.getuid} (or L{os.geteuid}),
this function will issue a L{UserWarning} and not raise an exception.
@type uid: C{int} or L{None}
@param uid: the UID (or EUID) to switch the current process to. This
parameter will be ignored if the value is L{None}.
@type gid: C{int} or L{None}
@param gid: the GID (or EGID) to switch the current process to. This
parameter will be ignored if the value is L{None}.
@type euid: C{bool}
@param euid: if True, set only effective user-id rather than real user-id.
(This option has no effect unless the process is running
as root, in which case it means not to shed all
privileges, retaining the option to regain privileges
in cases such as spawning processes. Use with caution.)
"""
if euid:
setuid = os.seteuid
setgid = os.setegid
getuid = os.geteuid
else:
setuid = os.setuid
setgid = os.setgid
getuid = os.getuid
if gid is not None:
setgid(gid)
if uid is not None:
if uid == getuid():
uidText = (euid and "euid" or "uid")
actionText = "tried to drop privileges and set%s %s" % (uidText, uid)
problemText = "%s is already %s" % (uidText, getuid())
warnings.warn("%s but %s; should we be root? Continuing."
% (actionText, problemText))
else:
initgroups(uid, gid)
setuid(uid)
class SubclassableCStringIO(object):
"""
A wrapper around cStringIO to allow for subclassing.
"""
__csio = None
def __init__(self, *a, **kw):
from cStringIO import StringIO
self.__csio = StringIO(*a, **kw)
def __iter__(self):
return self.__csio.__iter__()
def next(self):
return self.__csio.next()
def close(self):
return self.__csio.close()
def isatty(self):
return self.__csio.isatty()
def seek(self, pos, mode=0):
return self.__csio.seek(pos, mode)
def tell(self):
return self.__csio.tell()
def read(self, n=-1):
return self.__csio.read(n)
def readline(self, length=None):
return self.__csio.readline(length)
def readlines(self, sizehint=0):
return self.__csio.readlines(sizehint)
def truncate(self, size=None):
return self.__csio.truncate(size)
def write(self, s):
return self.__csio.write(s)
def writelines(self, list):
return self.__csio.writelines(list)
def flush(self):
return self.__csio.flush()
def getvalue(self):
return self.__csio.getvalue()
def untilConcludes(f, *a, **kw):
"""
Call C{f} with the given arguments, handling C{EINTR} by retrying.
@param f: A function to call.
@param *a: Positional arguments to pass to C{f}.
@param **kw: Keyword arguments to pass to C{f}.
@return: Whatever C{f} returns.
@raise: Whatever C{f} raises, except for C{IOError} or C{OSError} with
C{errno} set to C{EINTR}.
"""
while True:
try:
return f(*a, **kw)
except (IOError, OSError) as e:
if e.args[0] == errno.EINTR:
continue
raise
def mergeFunctionMetadata(f, g):
"""
Overwrite C{g}'s name and docstring with values from C{f}. Update
C{g}'s instance dictionary with C{f}'s.
@return: A function that has C{g}'s behavior and metadata merged from
C{f}.
"""
try:
g.__name__ = f.__name__
except TypeError:
pass
try:
g.__doc__ = f.__doc__
except (TypeError, AttributeError):
pass
try:
g.__dict__.update(f.__dict__)
except (TypeError, AttributeError):
pass
try:
g.__module__ = f.__module__
except TypeError:
pass
return g
def nameToLabel(mname):
"""
Convert a string like a variable name into a slightly more human-friendly
string with spaces and capitalized letters.
@type mname: C{str}
@param mname: The name to convert to a label. This must be a string
which could be used as a Python identifier. Strings which do not take
this form will result in unpredictable behavior.
@rtype: C{str}
"""
labelList = []
word = ''
lastWasUpper = False
for letter in mname:
if letter.isupper() == lastWasUpper:
# Continuing a word.
word += letter
else:
# breaking a word OR beginning a word
if lastWasUpper:
# could be either
if len(word) == 1:
# keep going
word += letter
else:
# acronym
# we're processing the lowercase letter after the acronym-then-capital
lastWord = word[:-1]
firstLetter = word[-1]
labelList.append(lastWord)
word = firstLetter + letter
else:
# definitely breaking: lower to upper
labelList.append(word)
word = letter
lastWasUpper = letter.isupper()
if labelList:
labelList[0] = labelList[0].capitalize()
else:
return mname.capitalize()
labelList.append(word)
return ' '.join(labelList)
def uidFromString(uidString):
"""
Convert a user identifier, as a string, into an integer UID.
@type uid: C{str}
@param uid: A string giving the base-ten representation of a UID or the
name of a user which can be converted to a UID via L{pwd.getpwnam}.
@rtype: C{int}
@return: The integer UID corresponding to the given string.
@raise ValueError: If the user name is supplied and L{pwd} is not
available.
"""
try:
return int(uidString)
except ValueError:
if pwd is None:
raise
return pwd.getpwnam(uidString)[2]
def gidFromString(gidString):
"""
Convert a group identifier, as a string, into an integer GID.
@type uid: C{str}
@param uid: A string giving the base-ten representation of a GID or the
name of a group which can be converted to a GID via L{grp.getgrnam}.
@rtype: C{int}
@return: The integer GID corresponding to the given string.
@raise ValueError: If the group name is supplied and L{grp} is not
available.
"""
try:
return int(gidString)
except ValueError:
if grp is None:
raise
return grp.getgrnam(gidString)[2]
def runAsEffectiveUser(euid, egid, function, *args, **kwargs):
"""
Run the given function wrapped with seteuid/setegid calls.
This will try to minimize the number of seteuid/setegid calls, comparing
current and wanted permissions
@param euid: effective UID used to call the function.
@type euid: C{int}
@type egid: effective GID used to call the function.
@param egid: C{int}
@param function: the function run with the specific permission.
@type function: any callable
@param *args: arguments passed to C{function}
@param **kwargs: keyword arguments passed to C{function}
"""
uid, gid = os.geteuid(), os.getegid()
if uid == euid and gid == egid:
return function(*args, **kwargs)
else:
if uid != 0 and (uid != euid or gid != egid):
os.seteuid(0)
if gid != egid:
os.setegid(egid)
if euid != 0 and (euid != uid or gid != egid):
os.seteuid(euid)
try:
return function(*args, **kwargs)
finally:
if euid != 0 and (uid != euid or gid != egid):
os.seteuid(0)
if gid != egid:
os.setegid(gid)
if uid != 0 and (uid != euid or gid != egid):
os.seteuid(uid)
def runWithWarningsSuppressed(suppressedWarnings, f, *args, **kwargs):
"""
Run C{f(*args, **kwargs)}, but with some warnings suppressed.
Unlike L{twisted.internet.utils.runWithWarningsSuppressed}, it has no
special support for L{twisted.internet.defer.Deferred}.
@param suppressedWarnings: A list of arguments to pass to filterwarnings.
Must be a sequence of 2-tuples (args, kwargs).
@param f: A callable.
@param args: Arguments for C{f}.
@param kwargs: Keyword arguments for C{f}
@return: The result of C{f(*args, **kwargs)}.
"""
with warnings.catch_warnings():
for a, kw in suppressedWarnings:
warnings.filterwarnings(*a, **kw)
return f(*args, **kwargs)
def _replaceIf(condition, alternative):
"""
If C{condition}, replace this function with C{alternative}.
@param condition: A L{bool} which says whether this should be replaced.
@param alternative: An alternative function that will be swapped in instead
of the original, if C{condition} is truthy.
@return: A decorator.
"""
def decorator(func):
if condition is True:
call = alternative
elif condition is False:
call = func
else:
raise ValueError(("condition argument to _replaceIf requires a "
"bool, not {}").format(repr(condition)))
@wraps(func)
def wrapped(*args, **kwargs):
return call(*args, **kwargs)
return wrapped
return decorator
__all__ = [
"uniquify", "padTo", "getPluginDirs", "addPluginDir", "sibpath",
"getPassword", "println", "makeStatBar", "OrderedDict",
"InsensitiveDict", "spewer", "searchupwards", "LineLog",
"raises", "IntervalDifferential", "FancyStrMixin", "FancyEqMixin",
"switchUID", "SubclassableCStringIO", "mergeFunctionMetadata",
"nameToLabel", "uidFromString", "gidFromString", "runAsEffectiveUser",
"untilConcludes", "runWithWarningsSuppressed",
]
if _PY3:
__notported__ = ["SubclassableCStringIO", "makeStatBar"]
for name in __all__[:]:
if name in __notported__:
__all__.remove(name)
del globals()[name]
del name, __notported__
``` |
{
"source": "1170300521/FPN_Pytorch",
"score": 2
} |
#### File: model/rpn/anchor_target_layer_fpn.py
```python
import torch
import torch.nn as nn
import numpy as np
import numpy.random as npr
from model.utils.config import cfg
from .generate_anchors import generate_anchors, generate_anchors_all_pyramids
from .bbox_transform import clip_boxes, bbox_overlaps_batch, bbox_transform_batch
try:
long # Python 2
except NameError:
long = int # Python 3
import pdb
DEBUG = False
class _AnchorTargetLayer_FPN(nn.Module):
"""
Assign anchors to ground-truth targets. Produces anchor classification
labels and bounding-box regression targets.
"""
def __init__(self, feat_stride, scales, ratios):
super(_AnchorTargetLayer_FPN, self).__init__()
self._anchor_ratios = ratios
self._feat_stride = feat_stride
self._fpn_scales = np.array(cfg.FPN_ANCHOR_SCALES)
self._fpn_feature_strides = np.array(cfg.FPN_FEAT_STRIDES)
self._fpn_anchor_stride = cfg.FPN_ANCHOR_STRIDE
# allow boxes to sit over the edge by a small amount
self._allowed_border = 0 # default is 0
def forward(self, input):
# Algorithm:
#
# for each (H, W) location i
# generate 9 anchor boxes centered on cell i
# apply predicted bbox deltas at cell i to each of the 9 anchors
# filter out-of-image anchors
scores = input[0]
gt_boxes = input[1]
im_info = input[2]
num_boxes = input[3]
feat_shapes = input[4]
# NOTE: need to change
# height, width = scores.size(2), scores.size(3)
height, width = 0, 0
batch_size = gt_boxes.size(0)
anchors = torch.from_numpy(generate_anchors_all_pyramids(self._fpn_scales, self._anchor_ratios,
feat_shapes, self._fpn_feature_strides, self._fpn_anchor_stride)).type_as(scores)
total_anchors = anchors.size(0)
keep = ((anchors[:, 0] >= -self._allowed_border) &
(anchors[:, 1] >= -self._allowed_border) &
(anchors[:, 2] < long(im_info[0][1]) + self._allowed_border) &
(anchors[:, 3] < long(im_info[0][0]) + self._allowed_border))
inds_inside = torch.nonzero(keep).view(-1)
# keep only inside anchors
anchors = anchors[inds_inside, :]
# label: 1 is positive, 0 is negative, -1 is dont care
labels = gt_boxes.new(batch_size, inds_inside.size(0)).fill_(-1)
bbox_inside_weights = gt_boxes.new(batch_size, inds_inside.size(0)).zero_()
bbox_outside_weights = gt_boxes.new(batch_size, inds_inside.size(0)).zero_()
overlaps = bbox_overlaps_batch(anchors, gt_boxes)
max_overlaps, argmax_overlaps = torch.max(overlaps, 2)
gt_max_overlaps, _ = torch.max(overlaps, 1)
if not cfg.TRAIN.RPN_CLOBBER_POSITIVES:
labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0
gt_max_overlaps[gt_max_overlaps==0] = 1e-5
keep = torch.sum(overlaps.eq(gt_max_overlaps.view(batch_size,1,-1).expand_as(overlaps)), 2)
if torch.sum(keep) > 0:
labels[keep>0] = 1
# fg label: above threshold IOU
labels[max_overlaps >= cfg.TRAIN.RPN_POSITIVE_OVERLAP] = 1
if cfg.TRAIN.RPN_CLOBBER_POSITIVES:
labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0
num_fg = int(cfg.TRAIN.RPN_FG_FRACTION * cfg.TRAIN.RPN_BATCHSIZE)
sum_fg = torch.sum((labels == 1).int(), 1)
sum_bg = torch.sum((labels == 0).int(), 1)
for i in range(batch_size):
# subsample positive labels if we have too many
if sum_fg[i] > num_fg:
fg_inds = torch.nonzero(labels[i] == 1).view(-1)
# torch.randperm seems has a bug on multi-gpu setting that cause the segfault.
# See https://github.com/pytorch/pytorch/issues/1868 for more details.
# use numpy instead.
#rand_num = torch.randperm(fg_inds.size(0)).type_as(gt_boxes).long()
rand_num = torch.from_numpy(np.random.permutation(fg_inds.size(0))).type_as(gt_boxes).long()
disable_inds = fg_inds[rand_num[:fg_inds.size(0)-num_fg]]
labels[i][disable_inds] = -1
num_bg = cfg.TRAIN.RPN_BATCHSIZE - sum_fg[i]
# subsample negative labels if we have too many
if sum_bg[i] > num_bg:
bg_inds = torch.nonzero(labels[i] == 0).view(-1)
#rand_num = torch.randperm(bg_inds.size(0)).type_as(gt_boxes).long()
rand_num = torch.from_numpy(np.random.permutation(bg_inds.size(0))).type_as(gt_boxes).long()
disable_inds = bg_inds[rand_num[:bg_inds.size(0)-num_bg]]
labels[i][disable_inds] = -1
offset = torch.arange(0, batch_size)*gt_boxes.size(1)
argmax_overlaps = argmax_overlaps + offset.view(batch_size, 1).type_as(argmax_overlaps)
bbox_targets = _compute_targets_batch(anchors, gt_boxes.view(-1,5)[argmax_overlaps.view(-1), :].view(batch_size, -1, 5))
# use a single value instead of 4 values for easy index.
bbox_inside_weights[labels==1] = cfg.TRAIN.RPN_BBOX_INSIDE_WEIGHTS[0]
if cfg.TRAIN.RPN_POSITIVE_WEIGHT < 0:
num_examples = torch.sum(labels[i] >= 0).float()
positive_weights = 1.0 / num_examples
negative_weights = 1.0 / num_examples
#print(positive_weights)
else:
assert ((cfg.TRAIN.RPN_POSITIVE_WEIGHT > 0) &
(cfg.TRAIN.RPN_POSITIVE_WEIGHT < 1))
#print(num_examples)
#print(labels)
#print(positive_weights)
#print(negative_weights)
bbox_outside_weights[labels == 1] = positive_weights
bbox_outside_weights[labels == 0] = negative_weights
labels = _unmap(labels, total_anchors, inds_inside, batch_size, fill=-1)
bbox_targets = _unmap(bbox_targets, total_anchors, inds_inside, batch_size, fill=0)
bbox_inside_weights = _unmap(bbox_inside_weights, total_anchors, inds_inside, batch_size, fill=0)
bbox_outside_weights = _unmap(bbox_outside_weights, total_anchors, inds_inside, batch_size, fill=0)
outputs = []
# labels = labels.view(batch_size, height, width, A).permute(0,3,1,2).contiguous()
# labels = labels.view(batch_size, 1, A * height, width)
outputs.append(labels)
# bbox_targets = bbox_targets.view(batch_size, height, width, A*4).permute(0,3,1,2).contiguous()
outputs.append(bbox_targets)
# anchors_count = bbox_inside_weights.size(1)
# bbox_inside_weights = bbox_inside_weights.view(batch_size,anchors_count,1).expand(batch_size, anchors_count, 4)
# bbox_inside_weights = bbox_inside_weights.contiguous().view(batch_size, height, width, 4*A)\
# .permute(0,3,1,2).contiguous()
outputs.append(bbox_inside_weights)
# bbox_outside_weights = bbox_outside_weights.view(batch_size,anchors_count,1).expand(batch_size, anchors_count, 4)
# bbox_outside_weights = bbox_outside_weights.contiguous().view(batch_size, height, width, 4*A)\
# .permute(0,3,1,2).contiguous()
outputs.append(bbox_outside_weights)
return outputs
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
def _unmap(data, count, inds, batch_size, fill=0):
""" Unmap a subset of item (data) back to the original set of items (of
size count) """
if data.dim() == 2:
ret = torch.Tensor(batch_size, count).fill_(fill).type_as(data)
ret[:, inds] = data
else:
ret = torch.Tensor(batch_size, count, data.size(2)).fill_(fill).type_as(data)
ret[:, inds,:] = data
return ret
def _compute_targets_batch(ex_rois, gt_rois):
"""Compute bounding-box regression targets for an image."""
return bbox_transform_batch(ex_rois, gt_rois[:, :, :4])
``` |
{
"source": "1170300521/RCCF",
"score": 2
} |
#### File: 1170300521/RCCF/test_ref.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import sys
import torch
import torch.utils.data
from opts import opts
from models.model import create_model, load_model, save_model
from models.data_parallel import DataParallel
from logger import Logger
from datasets.dataset_factory import get_dataset
from trains.train_factory import train_factory
from models.networks.pose_dla_dcn import get_ref_net as get_dla_ref_net
def main(opt):
torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
Dataset = get_dataset(opt.dataset, opt.task)
opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
print(opt)
#train_dataset = Dataset(opt, 'train')
val_dataset = Dataset(opt, 'val')
print('Setting up data...')
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=1,
shuffle=False,
num_workers=1,
pin_memory=True
)
logger = Logger(opt)
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')
print('Creating model...')
model = get_dla_ref_net(num_layers=opt.arch.split("_")[-1], heads=opt.heads, vocab_size=val_dataset.vocab_size, head_conv=opt.head_conv, use_aux=opt.use_aux)
optimizer = torch.optim.Adam(model.parameters(), opt.lr)
start_epoch = 0
if opt.load_model != '':
model, optimizer, start_epoch, best = load_model(
model, opt.load_model, optimizer, opt.resume, opt.lr, opt.lr_step)
print("BEST ACC: ", best)
Trainer = train_factory[opt.task]
trainer = Trainer(opt, model, optimizer)
trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)
_, preds = trainer.val(0, val_loader)
val_loader.dataset.run_eval_ref(preds)
if __name__ == '__main__':
opt = opts().parse()
main(opt)
``` |
{
"source": "1170300521/Real-time-Global-Inference-Network",
"score": 2
} |
#### File: Real-time-Global-Inference-Network/data/prepare_c01_flickr_splits.py
```python
from typing import Dict, List, Any
from ds_prep_utils import Cft, ID, BaseCSVPrepare, DF
from dataclasses import dataclass
from yacs.config import CfgNode as CN
from pathlib import Path
import json
import pandas as pd
import spacy
from tqdm import tqdm
from collections import Counter
import numpy as np
import copy
import pickle
nlp = spacy.load("en_core_web_sm")
np.random.seed(5)
class FlickrUnseenWordsCSVPrepare(BaseCSVPrepare):
def after_init(self):
self.flickr_ann_file = self.ds_root.parent / 'all_annot_new.json'
self.flickr_ann = None
self.load_annotations()
def load_annotations(self):
if self.flickr_ann is None:
self.flickr_ann = json.load(open(self.flickr_ann_file))
return pd.DataFrame(self.flickr_ann)
def get_annotations(self):
return
def get_query_word_list(self):
self.query_word_lemma_file = self.ds_root / 'query_word_lemma_counter.json'
if not self.query_word_lemma_file.exists():
query_word_list = []
for ind, grnd_dict in enumerate(tqdm(self.flickr_ann)):
queries = grnd_dict['query']
for query in queries:
tmp_query = nlp(query)
query_word_list += [t.lemma_ for t in tmp_query]
query_word_counter = Counter(query_word_list)
json.dump(query_word_counter, open(
self.query_word_lemma_file, 'w'))
return Counter(json.load(open(self.query_word_lemma_file)))
def create_exclude_include_list(self):
self.exclude_include_list_file = self.ds_root / 'inc_exc_word_list.json'
if not self.exclude_include_list_file.exists():
self.load_annotations()
queries_lemma_count = self.get_query_word_list()
# create include list
qmost_common = queries_lemma_count.most_common(500)
include_list = [q[0] for q in qmost_common]
# exclude list
remaining_list = [
r for r in queries_lemma_count if r not in set(include_list)]
to_include_prob = 0.7
num_to_incl = int(to_include_prob * len(remaining_list))
id_list = np.random.permutation(len(remaining_list))
to_include = id_list[:num_to_incl]
to_exclude = id_list[num_to_incl:]
include_list += [remaining_list[t] for t in to_include]
exclude_list = [remaining_list[t] for t in to_exclude]
out_dict = {'exclude_list': exclude_list,
'include_list': include_list}
json.dump(out_dict, self.exclude_include_list_file.open('w'))
return json.load(self.exclude_include_list_file.open('r'))
def get_trn_val_test_ids(self, output_annot=None):
inc_excl_lists = self.create_exclude_include_list()
incl_set = inc_excl_lists['include_list']
excl_set = inc_excl_lists['exclude_list']
test_ids_file = self.ds_root / 'test_ids.pkl'
new_output_annot_file = self.ds_root / 'test_output_annot.pkl'
if not test_ids_file.exists():
test_ids = []
new_output_annot = []
for ind, grnd_dict in enumerate(tqdm(self.flickr_ann)):
queries = grnd_dict['query']
qs_to_use = []
for query in queries:
tmp_query = nlp(query)
last_idx = -1
qu = tmp_query[last_idx]
while not len(qu.text) > 1:
print('why', qu.text)
try:
last_idx -= 1
qu = tmp_query[last_idx]
except IndexError:
print('noope')
break
if not (qu.lemma_ in incl_set):
assert qu.lemma_ in excl_set
qs_to_use.append(query)
if len(qs_to_use) > 0:
qs_to_use = list(set(qs_to_use))
grnd_dict1 = copy.deepcopy(grnd_dict)
grnd_dict1['query'] = qs_to_use
grnd_dict1['split_type'] = 'test'
new_output_annot.append(grnd_dict1)
test_ids.append(grnd_dict1['img_id'])
pickle.dump(test_ids, test_ids_file.open('wb'))
pickle.dump(new_output_annot, new_output_annot_file.open('wb'))
test_ids = pickle.load(test_ids_file.open('rb'))
new_output_annot = pickle.load(new_output_annot_file.open('rb'))
flickr_df = pd.DataFrame(self.flickr_ann)
all_ids = set(list(flickr_df.img_id))
trn_val_ids = list(all_ids - set(test_ids))
to_include_prob = 0.8
num_to_incl = int(to_include_prob * len(trn_val_ids))
id_list = np.random.permutation(len(trn_val_ids))
trids = id_list[:num_to_incl]
vlids = id_list[num_to_incl:]
trn_ids = [trn_val_ids[trid] for trid in trids]
val_ids = [trn_val_ids[vlid] for vlid in vlids]
for ind, grnd_dict in enumerate(tqdm(self.flickr_ann)):
if grnd_dict['img_id'] in trn_val_ids:
queries = grnd_dict['query']
# if not all([nlp(q)[-1].lemma_ in incl_set for q in queries]):
# continue
new_output_annot.append(grnd_dict)
return trn_ids, val_ids, test_ids, pd.DataFrame(new_output_annot)
def save_annot_to_format(self):
"""
Saves the annotations to the following csv format
img_name,x1,y1,x2,y2,query(ies)
"""
output_annot = self.load_annotations()
trn_ids, val_ids, test_ids, output_annot = self.get_trn_val_test_ids(
output_annot)
output_annot = output_annot[['img_id', 'bbox', 'query']]
trn_df = self.get_df_from_ids(
trn_ids, output_annot, split_type='train')
trn_df.to_csv(self.csv_root / 'train.csv', index=False, header=True)
val_df = self.get_df_from_ids(val_ids, output_annot)
val_df.to_csv(self.csv_root / 'val.csv', index=False, header=True)
if test_ids is not None:
test_df = self.get_df_from_ids(test_ids, output_annot)
test_df.to_csv(self.csv_root / 'test.csv',
index=False, header=True)
if __name__ == '__main__':
ds_cfg = json.load(open('./data/ds_prep_config.json'))
fl0 = FlickrUnseenWordsCSVPrepare(ds_cfg['flickr_unseen_words'])
# fl0.create_exclude_include_list()
fl0.save_annot_to_format()
``` |
{
"source": "1170300521/Sound-of-Pixels",
"score": 3
} |
#### File: Sound-of-Pixels/scripts/preprocess_videos.py
```python
import os
import os.path as osp
import cv2
import moviepy.editor as mp
video_root = "./data/video"
audio_root = "./data/audio"
frame_root = "./data/frames"
def get_frames(instr, video, fps=8):
"""
extract frames from video
"""
cap = cv2.VideoCapture(osp.join(video_root, instr, video))
i = 0
count = 1
img_path = osp.join(frame_root, instr, video)
if not osp.exists(img_path):
os.makedirs(img_path)
else:
cv2.destroyAllWindows()
return
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
if i >= fps - 1:
img_name = (6 - len(str(count)))*'0' + str(count) + '.jpg'
cv2.imwrite(osp.join(img_path, img_name), frame)
count += 1
i = 0
continue
i += 1
cap.release()
cv2.destroyAllWindows()
def get_audio(instr, video, hz=11025):
"""
Extract audio from video
"""
audio_path = osp.join(audio_root, instr)
if not osp.exists(audio_path):
os.makedirs(audio_path)
audio_name = video.split(".")[0] + ".mp3"
if osp.isfile(osp.join(audio_path,audio_name)):
return
clip = mp.VideoFileClip(osp.join(video_root, instr, video))
clip.audio.write_audiofile(osp.join(audio_path, audio_name))
def main():
for instr in os.listdir(video_root):
for video in os.listdir(osp.join(video_root,instr)):
get_frames(instr, video)
get_audio(instr, video)
print("Complete " + instr)
print("Complete datset preprocessing!")
if __name__ == "__main__":
main()
``` |
{
"source": "1170300521/StyleGAN-nada",
"score": 2
} |
#### File: ZSSGAN/criteria/clip_loss.py
```python
import pickle
import torch
import torchvision.transforms as transforms
import torch.nn.functional as F
import numpy as np
import os
import clip
from PIL import Image
from sklearn.decomposition import PCA
from ZSSGAN.utils.text_templates import imagenet_templates, part_templates, imagenet_templates_small
class DirectionLoss(torch.nn.Module):
def __init__(self, loss_type='mse'):
super(DirectionLoss, self).__init__()
self.loss_type = loss_type
self.loss_func = {
'mse': torch.nn.MSELoss,
'cosine': torch.nn.CosineSimilarity,
'mae': torch.nn.L1Loss
}[loss_type]()
def forward(self, x, y):
if self.loss_type == "cosine":
return 1. - self.loss_func(x, y)
return self.loss_func(x, y)
class CLIPLoss(torch.nn.Module):
def __init__(self, device, lambda_direction=1., lambda_patch=0., lambda_global=0., \
lambda_manifold=0., lambda_texture=0., patch_loss_type='mae', \
direction_loss_type='cosine', clip_model='ViT-B/32', args=None):
super(CLIPLoss, self).__init__()
self.device = device
self.args = args
self.model_name = clip_model
self.model, clip_preprocess = clip.load(clip_model, device=self.device)
# self.model.requires_grad_(False)
self.clip_preprocess = clip_preprocess
self.preprocess = transforms.Compose([transforms.Normalize(mean=[-1.0, -1.0, -1.0], std=[2.0, 2.0, 2.0])] + # Un-normalize from [-1.0, 1.0] (GAN output) to [0, 1].
clip_preprocess.transforms[:2] + # to match CLIP input scale assumptions
clip_preprocess.transforms[4:]) # + skip convert PIL to tensor
self.target_direction = None
self.patch_text_directions = None
self.patch_loss = DirectionLoss(patch_loss_type)
self.direction_loss = DirectionLoss(direction_loss_type)
self.patch_direction_loss = torch.nn.CosineSimilarity(dim=2)
self.lambda_global = lambda_global
self.lambda_patch = lambda_patch
self.lambda_direction = lambda_direction
self.lambda_manifold = lambda_manifold
self.lambda_texture = lambda_texture
self.alpha = args.alpha
self.src_text_features = None
self.target_text_features = None
self.angle_loss = torch.nn.L1Loss()
self.id_loss = DirectionLoss('cosine')
# self.model_cnn, preprocess_cnn = clip.load("RN50", device=self.device)
# self.preprocess_cnn = transforms.Compose([transforms.Normalize(mean=[-1.0, -1.0, -1.0], std=[2.0, 2.0, 2.0])] + # Un-normalize from [-1.0, 1.0] (GAN output) to [0, 1].
# preprocess_cnn.transforms[:2] + # to match CLIP input scale assumptions
# preprocess_cnn.transforms[4:]) # + skip convert PIL to tensor
self.texture_loss = torch.nn.MSELoss()
self.pca_components = None
self.condition = None
self.pca_threshold = None
self.clip_mean = None
self.pca = self.get_pca()
def get_pca(self):
orig_sample_path = os.path.join('../weights/clip_mean/', f"{self.args.dataset}_{self.model_name[-2::]}_samples.pkl")
with open(orig_sample_path, 'rb') as f:
X = pickle.load(f)
X = np.array(X)
self.samples = X
self.clip_mean = torch.from_numpy(np.mean(X, axis=0)).float().to(self.device)
# Define a pca and train it
pca = PCA(n_components=self.args.pca_dim)
pca.fit(X)
# Get the standar deviation of samples and set threshold for each dimension
# threshold = np.sqrt(pca.explained_variance_) * self.alpha
# self.pca_threshold = torch.from_numpy(threshold).float().to(self.device)
# self.pca_components = torch.from_numpy(pca.components_).float().to(self.device)
return pca
def tokenize(self, strings: list):
return clip.tokenize(strings).to(self.device)
def encode_text(self, tokens: list) -> torch.Tensor:
return self.model.encode_text(tokens)
def encode_images(self, images: torch.Tensor) -> torch.Tensor:
images = self.preprocess(images).to(self.device)
return self.model.encode_image(images)
def encode_images_with_cnn(self, images: torch.Tensor) -> torch.Tensor:
images = self.preprocess_cnn(images).to(self.device)
return self.model_cnn.encode_image(images)
def distance_with_templates(self, img: torch.Tensor, class_str: str, templates=imagenet_templates) -> torch.Tensor:
text_features = self.get_text_features(class_str, templates)
image_features = self.get_image_features(img)
similarity = image_features @ text_features.T
return 1. - similarity
def get_text_features(self, class_str: str, templates=imagenet_templates, norm: bool = True) -> torch.Tensor:
template_text = self.compose_text_with_templates(class_str, templates)
tokens = clip.tokenize(template_text).to(self.device)
text_features = self.encode_text(tokens).detach()
if norm:
text_features /= text_features.norm(dim=-1, keepdim=True)
return text_features
def get_image_features(self, img: torch.Tensor, norm: bool = True) -> torch.Tensor:
image_features = self.encode_images(img)
if norm:
image_features /= image_features.clone().norm(dim=-1, keepdim=True)
return image_features
def get_similar_img(self, tgt_vec):
tgt = tgt_vec[0].cpu().numpy()
sim = np.dot(self.samples, tgt)
orders = np.argsort(sim)[::-1]
print("Orders: {}, Similarities: {}".format(orders[0:20], sim[orders[0:20]]))
src = self.samples[orders[0:1]]
src = src * sim[orders[0:1], None]
src = torch.from_numpy(src).to(tgt_vec.device, dtype=tgt_vec.dtype).mean(axis=0, keepdim=True)
# src /= src.norm(dim=-1, keepdim=True)
return src
def supress_normal_features(self, vec, is_target=False):
'''
Supress normal features of the given vector based on original StyleGAN
Params:
vec: the vector to be supressed
'''
if self.args.supress == 0:
return vec
elif self.args.supress == 1:
if self.condition is None or isinstance(self.condition, np.ndarray):
self.condition = torch.from_numpy(self.condition).unsqueeze(0).float().to(vec.device)
print("The number of style and special attrs: ", self.condition.sum())
return vec
elif self.args.supress == 2:
if self.clip_mean is not None:
vec = vec - self.clip_mean
vec_pca = vec @ self.pca_components.t()
if self.condition is None:
self.condition = (vec_pca[0].abs() > self.pca_threshold).unsqueeze(0).float()
return vec_pca * self.condition if is_target else vec_pca
else:
raise RuntimeError(f"The choice {self.args.supress} is illegal! Please choose it among 0, 1, 2.")
def keep_normal_features(self, vec):
'''
Keep normal features of the given vector based on original StyleGAN
'''
if self.args.supress == 0:
return vec * 0
elif self.args.supress == 1:
return vec * (1 - self.condition)
elif self.args.supress == 2:
if self.clip_mean is not None:
vec = vec - self.clip_mean
vec_pca = vec @ self.pca_components.t()
# return vec_pca * (1 - self.condition)
return vec_pca
else:
raise RuntimeError(f"The choice {self.args.supress} is illegal! Please choose it among 0, 1, 2.")
def get_pca_features(self, vec):
'''
Convert CLIP features to PCA features
'''
if self.clip_mean is None:
return vec
vec = vec - self.clip_mean
return vec @ self.pca_components.t()
def compute_text_direction(self, source_class: str, target_class: str) -> torch.Tensor:
source_features = self.clip_mean
target_features = self.get_text_features(target_class)
# Supress normal features and keep special features in the text feature
# target_features = self.supress_normal_features(target_features, is_target=True)
# source_features = self.supress_normal_features(source_features, is_target=True)
# source_features = 0
text_direction = (target_features - source_features).mean(axis=0, keepdim=True)
# text_direction = target_features.mean(axis=0, keepdim=True)
text_direction /= text_direction.norm(dim=-1, keepdim=True)
return text_direction
def get_raw_img_features(self, imgs: str):
pre_i = self.clip_preprocess(Image.open(imgs)).unsqueeze(0).to(self.device)
encoding = self.model.encode_image(pre_i)
encoding /= encoding.norm(dim=-1, keepdim=True)
return encoding
def compute_img2img_direction(self, source_images: torch.Tensor, target_images: list) -> torch.Tensor:
with torch.no_grad():
target_encodings = []
for target_img in target_images:
preprocessed = self.clip_preprocess(Image.open(target_img)).unsqueeze(0).to(self.device)
encoding = self.model.encode_image(preprocessed)
encoding /= encoding.norm(dim=-1, keepdim=True)
target_encodings.append(encoding)
target_encoding = torch.cat(target_encodings, axis=0)
# target_encoding = self.supress_normal_features(target_encoding, is_target=True)
target_encoding = target_encoding.mean(dim=0, keepdim=True)
# src_encoding = self.get_image_features(source_images)
# src_encoding = src_encoding.mean(dim=0, keepdim=True)
src_encoding = self.clip_mean
# src_encoding = self.supress_normal_features(src_encoding, is_target=True)
direction = target_encoding - src_encoding
direction /= direction.norm(dim=-1, keepdim=True)
return direction
def compute_corresponding_img2img_direction(self, source_images: list, target_images: list) -> torch.Tensor:
with torch.no_grad():
target_encodings = []
for target_img in target_images:
preprocessed = self.clip_preprocess(Image.open(target_img)).unsqueeze(0).to(self.device)
encoding = self.model.encode_image(preprocessed)
encoding /= encoding.norm(dim=-1, keepdim=True)
target_encodings.append(encoding)
target_encoding = torch.cat(target_encodings, axis=0)
# target_encoding = self.supress_normal_features(target_encoding, is_target=True)
target_encoding = target_encoding.mean(dim=0, keepdim=True)
source_encodings = []
for source_img in source_images:
preprocessed = self.clip_preprocess(Image.open(source_img)).unsqueeze(0).to(self.device)
encoding = self.model.encode_image(preprocessed)
encoding /= encoding.norm(dim=-1, keepdim=True)
source_encodings.append(encoding)
source_encoding = torch.cat(source_encodings, axis=0)
# target_encoding = self.supress_normal_features(target_encoding, is_target=True)
source_encoding = source_encoding.mean(dim=0, keepdim=True)
direction = target_encoding - source_encoding
direction /= direction.norm(dim=-1, keepdim=True)
return direction
def set_text_features(self, source_class: str, target_class: str) -> None:
source_features = self.get_text_features(source_class).mean(axis=0, keepdim=True)
self.src_text_features = source_features / source_features.norm(dim=-1, keepdim=True)
target_features = self.get_text_features(target_class).mean(axis=0, keepdim=True)
self.target_text_features = target_features / target_features.norm(dim=-1, keepdim=True)
def clip_angle_loss(self, src_img: torch.Tensor, source_class: str, target_img: torch.Tensor, target_class: str) -> torch.Tensor:
if self.src_text_features is None:
self.set_text_features(source_class, target_class)
cos_text_angle = self.target_text_features @ self.src_text_features.T
text_angle = torch.acos(cos_text_angle)
src_img_features = self.get_image_features(src_img).unsqueeze(2)
target_img_features = self.get_image_features(target_img).unsqueeze(1)
cos_img_angle = torch.clamp(target_img_features @ src_img_features, min=-1.0, max=1.0)
img_angle = torch.acos(cos_img_angle)
text_angle = text_angle.unsqueeze(0).repeat(img_angle.size()[0], 1, 1)
cos_text_angle = cos_text_angle.unsqueeze(0).repeat(img_angle.size()[0], 1, 1)
return self.angle_loss(cos_img_angle, cos_text_angle)
def compose_text_with_templates(self, text: str, templates=imagenet_templates) -> list:
return [template.format(text) for template in templates]
def clip_directional_loss(self, src_img: torch.Tensor, source_class: str, target_img: torch.Tensor, target_class: str) -> torch.Tensor:
if self.target_direction is None:
self.target_direction = self.compute_text_direction(source_class, target_class)
src_encoding = self.get_image_features(src_img)
# src_encoding = self.supress_normal_features(src_encoding, is_target=True)
target_encoding = self.get_image_features(target_img)
# target_encoding = self.supress_normal_features(target_encoding, is_target=True)
edit_direction = (target_encoding - src_encoding)
edit_direction /= edit_direction.clone().norm(dim=-1, keepdim=True)
return self.direction_loss(edit_direction, self.target_direction).mean()
def pca_directional_loss(self, src_img: torch.Tensor, source_class: str, target_img: torch.Tensor, target_class: str) -> torch.Tensor:
if self.target_direction is None:
self.target_direction = self.compute_text_direction(source_class, target_class)
# if self.args.use_mean:
# src_encoding = self.clip_mean
# else:
src_encoding = self.get_image_features(src_img)
src_encoding = self.get_pca_features(src_encoding)
target_encoding = self.get_image_features(target_img)
target_encoding = self.get_pca_features(target_encoding)
edit_direction = (target_encoding - src_encoding)
edit_direction /= edit_direction.clone().norm(dim=-1, keepdim=True)
return self.direction_loss(edit_direction, self.target_direction).mean()
def global_clip_loss(self, img: torch.Tensor, text) -> torch.Tensor:
if not isinstance(text, list):
text = [text]
tokens = clip.tokenize(text).to(self.device)
image = self.preprocess(img)
logits_per_image, _ = self.model(image, tokens)
return (1. - logits_per_image / 100).mean()
def adaptive_global_clip_loss(self, img: torch.Tensor, text) -> torch.Tensor:
if self.alpha == 0:
return self.global_clip_loss(img, text)
text_features = self.get_text_features(text, templates=['{}'])
img_features = self.get_image_features(img)
text_features = text_features - self.pca_mean.unsqueeze(0)
text_features = text_features @ self.pca_cov.t()
img_features = img_features - self.pca_mean.unsqueeze(0)
img_features = img_features @ self.pca_cov.t()
logits_per_img = img_features @ text_features.t()
return (1. - logits_per_img).mean()
def random_patch_centers(self, img_shape, num_patches, size):
batch_size, channels, height, width = img_shape
half_size = size // 2
patch_centers = np.concatenate([np.random.randint(half_size, width - half_size, size=(batch_size * num_patches, 1)),
np.random.randint(half_size, height - half_size, size=(batch_size * num_patches, 1))], axis=1)
return patch_centers
def generate_patches(self, img: torch.Tensor, patch_centers, size):
batch_size = img.shape[0]
num_patches = len(patch_centers) // batch_size
half_size = size // 2
patches = []
for batch_idx in range(batch_size):
for patch_idx in range(num_patches):
center_x = patch_centers[batch_idx * num_patches + patch_idx][0]
center_y = patch_centers[batch_idx * num_patches + patch_idx][1]
patch = img[batch_idx:batch_idx+1, :, center_y - half_size:center_y + half_size, center_x - half_size:center_x + half_size]
patches.append(patch)
patches = torch.cat(patches, axis=0)
return patches
def patch_scores(self, img: torch.Tensor, class_str: str, patch_centers, patch_size: int) -> torch.Tensor:
parts = self.compose_text_with_templates(class_str, part_templates)
tokens = clip.tokenize(parts).to(self.device)
text_features = self.encode_text(tokens).detach()
patches = self.generate_patches(img, patch_centers, patch_size)
image_features = self.get_image_features(patches)
similarity = image_features @ text_features.T
return similarity
def clip_patch_similarity(self, src_img: torch.Tensor, source_class: str, target_img: torch.Tensor, target_class: str) -> torch.Tensor:
patch_size = 196 #TODO remove magic number
patch_centers = self.random_patch_centers(src_img.shape, 4, patch_size) #TODO remove magic number
src_scores = self.patch_scores(src_img, source_class, patch_centers, patch_size)
target_scores = self.patch_scores(target_img, target_class, patch_centers, patch_size)
return self.patch_loss(src_scores, target_scores)
def patch_directional_loss(self, src_img: torch.Tensor, source_class: str, target_img: torch.Tensor, target_class: str) -> torch.Tensor:
if self.patch_text_directions is None:
src_part_classes = self.compose_text_with_templates(source_class, part_templates)
target_part_classes = self.compose_text_with_templates(target_class, part_templates)
parts_classes = list(zip(src_part_classes, target_part_classes))
self.patch_text_directions = torch.cat([self.compute_text_direction(pair[0], pair[1]) for pair in parts_classes], dim=0)
patch_size = 510 # TODO remove magic numbers
patch_centers = self.random_patch_centers(src_img.shape, 1, patch_size)
patches = self.generate_patches(src_img, patch_centers, patch_size)
src_features = self.get_image_features(patches)
patches = self.generate_patches(target_img, patch_centers, patch_size)
target_features = self.get_image_features(patches)
edit_direction = (target_features - src_features)
edit_direction /= edit_direction.clone().norm(dim=-1, keepdim=True)
cosine_dists = 1. - self.patch_direction_loss(edit_direction.unsqueeze(1), self.patch_text_directions.unsqueeze(0))
patch_class_scores = cosine_dists * (edit_direction @ self.patch_text_directions.T).softmax(dim=-1)
return patch_class_scores.mean()
def cnn_feature_loss(self, src_img: torch.Tensor, target_img: torch.Tensor) -> torch.Tensor:
src_features = self.encode_images_with_cnn(src_img)
target_features = self.encode_images_with_cnn(target_img)
return self.texture_loss(src_features, target_features)
def forward(self, src_img: torch.Tensor, source_class: str, target_img: torch.Tensor, target_class: str, texture_image: torch.Tensor = None):
clip_loss = 0.0
if self.lambda_global:
clip_loss += self.lambda_global * self.global_clip_loss(target_img, [f"a {target_class}"])
if self.lambda_patch:
clip_loss += self.lambda_patch * self.patch_directional_loss(src_img, source_class, target_img, target_class)
if self.lambda_direction:
clip_loss += self.lambda_direction * self.clip_directional_loss(src_img, source_class, target_img, target_class)
if self.lambda_manifold:
clip_loss += self.lambda_manifold * self.clip_angle_loss(src_img, source_class, target_img, target_class)
# if self.lambda_texture and (texture_image is not None):
if self.lambda_texture:
# clip_loss += self.lambda_texture * self.cnn_feature_loss(texture_image, target_img)
clip_loss += self.lambda_texture * self.cnn_feature_loss(src_img, target_img)
return clip_loss
```
#### File: ZSSGAN/criteria/psp_loss.py
```python
import os
import torch
import torchvision.transforms as transforms
import torch.nn.functional as F
import numpy as np
from ZSSGAN.criteria.clip_loss import DirectionLoss
from ZSSGAN.model.psp import pSp
from utils.svm import train_boundary
from sklearn import svm
def adjust_sigmoid(x, beta=1):
return torch.sigmoid(beta * x)
class PSPLoss(torch.nn.Module):
def __init__(self, device, args=None):
super(PSPLoss, self).__init__()
self.device = device
self.args = args
# Moving Average Coefficient
self.beta = 0.02
self.source_mean, self.svm_source = self.get_source_mean()
self.target_mean = self.source_mean
self.target_set = []
self.svm_target = []
self.target_pos = 0
self.model = pSp(self.args.psp_path, device, output_size=args.size, has_decoder=False)
self.model.to(device)
self.psp_preprocess = transforms.Compose([transforms.Normalize(mean=[-1.0, -1.0, -1.0], std=[2.0, 2.0, 2.0]), # Un-normalize from [-1.0, 1.0] (GAN output) to [0, 1].]
transforms.Resize((256, 256)),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
self.target_direction = self.get_target_direction()
self.direction_loss = DirectionLoss('cosine')
self.iter_diff = []
self.iter_mean = []
self.iter_sim = []
self.svm_C = 1
def get_source_mean(self):
source_path = "/home/ybyb/CODE/StyleGAN-nada/results/invert/A_gen_w.npy"
source_codes = np.load(source_path)
unmasked_num = 18
if self.args.num_mask_last > 0:
unmasked_num = 18 - self.args.num_mask_last
unmasked_num = max(unmasked_num, 1)
source_codes = source_codes.reshape((-1, 18, 512))[:, 0:unmasked_num]
source_codes = torch.from_numpy(source_codes).to(self.device).float().view(-1, unmasked_num*512)
return source_codes.mean(dim=0, keepdim=True), source_codes.cpu().numpy()
def get_target_direction(self, normalize=True):
# delta_w_path = os.path.join(self.args.output_dir, 'w_delta.npy')
delta_w_path = os.path.join(self.args.output_dir, f"{self.args.delta_w_type}_w.npy")
if os.path.exists(delta_w_path):
delta_w = np.load(delta_w_path)
else:
delta_w = np.ones((18, 512))
unmasked_num = 18
if self.args.num_mask_last > 0:
unmasked_num = 18 - self.args.num_mask_last
unmasked_num = max(unmasked_num, 1)
delta_w = delta_w[0: unmasked_num]
delta_w = torch.from_numpy(delta_w).to(self.device).float().flatten()
num_channel = len(delta_w)
order = delta_w.abs().argsort()
chosen_order = order[0:int(self.args.psp_alpha * num_channel)]
# chosen_order = order[-int(self.args.psp_alpha * num_channel)::] # Choose most important channels
self.cond = torch.zeros(num_channel).to(self.device)
self.cond[chosen_order] = 1
self.cond = self.cond.unsqueeze(0)
print(f"supress_num / overall = {self.cond.sum().item()} / {unmasked_num * 512}")
if normalize:
delta_w /= delta_w.clone().norm(dim=-1, keepdim=True)
return delta_w.unsqueeze(0)
def get_image_features(self, images, norm=False):
images = self.psp_preprocess(images)
encodings, invert_img = self.model(images)
# encodings = encodings[:, -1:]
encodings = encodings.view(images.size(0), -1)
# TODO: different from clip encodings, normalize may be harmful
if norm:
encodings /= encodings.clone().norm(dim=-1, keepdim=True)
return encodings, invert_img
def get_conditional_mask(self):
if self.args.psp_loss_type == "multi_stage":
return self.cond, None
elif self.args.psp_loss_type == "dynamic":
if self.args.delta_w_type == 'mean':
delta_w = self.target_mean - self.source_mean
else:
delta_w = self.args.svm_boundary.detach()[:, 0:-1]
else:
raise RuntimeError(f"No psp loss whose type is {self.psp_loss_type} !")
delta_w = delta_w.flatten()
num_channel = len(delta_w)
order = delta_w.abs().argsort()
chosen_order = order[0:int(self.args.psp_alpha * num_channel)]
# chosen_order = order[-int(self.args.psp_alpha * num_channel)::] # Choose most important channels
cond = torch.zeros(num_channel).to(self.device)
cond[chosen_order] = 1
cond = cond.unsqueeze(0)
delta_w = delta_w.unsqueeze(0)
return cond, delta_w
def update_target_set(self, vec):
if len(self.target_set) < self.args.sliding_window_size:
self.target_set.append(vec.mean(0).detach())
else:
self.target_set[self.target_pos] = vec.mean(0).detach()
self.target_pos = (self.target_pos + 1) % self.args.sliding_window_size
def multi_stage_loss(self, target_encodings, source_encodings):
if self.cond is not None:
target_encodings = self.cond * target_encodings
source_encodings = self.cond * source_encodings
return F.l1_loss(target_encodings, source_encodings)
def constrained_loss(self, cond):
return torch.abs(cond.mean(1)-self.args.psp_alpha).mean()
def prepare_svm_samples(self, target_encodings, source_encodings):
pos_vecs = target_encodings.detach().cpu().numpy()
neg_vecs = source_encodings.detach().cpu().numpy()
labels = np.concatenate([np.ones(len(pos_vecs)), np.zeros(len(neg_vecs))], axis=0)
vecs = np.concatenate([pos_vecs, neg_vecs], axis=0)
return vecs, labels
def get_svm_boundary(self, tgt_num, src_num=1000):
pos_vecs = self.svm_target[-tgt_num:]
neg_ids = np.arange(len(self.svm_source))
np.random.shuffle(neg_ids)
neg_vecs = self.svm_source[neg_ids[0:src_num]]
labels = np.concatenate([np.ones(len(pos_vecs)), np.zeros(len(neg_vecs))], axis=0)
clf = svm.SVC(kernel='linear')
classifier = clf.fit(np.concatenate([pos_vecs, neg_vecs], axis=0), labels)
return classifier.coef_
def cosine_similarity(self, vec1, vec2):
vec1 = vec1.flatten()
vec2 = vec2.flatten()
vec1 = vec1 / vec1.norm()
vec2 = vec2 / vec2.norm()
return (vec1 * vec2).sum()
def update_w(self, target_encodings, source_encodings, iters=0):
if self.args.delta_w_type == 'mean':
# Compute new mean direction of target domain
# TODO: Option 1: Moving Average
# self.target_mean = self.beta * target_encodings.mean(0, keepdim=True).detach() + \
# (1 - self.beta) * self.target_mean
# TODO: Option 2: Sliding Window
self.update_target_set(target_encodings)
self.target_mean = torch.stack(self.target_set).mean(0, keepdim=True)
# Get the editing direction
delta_w = self.target_mean - self.source_mean
elif self.args.delta_w_type == 'svm':
if iters > 0:
self.svm_target.extend(target_encodings.detach().cpu().numpy())
# if len(self.svm_target) >= self.args.batch * self.args.sliding_window_size and \
# self.args.svm_boundary.sum() == 0:
# svm_a = train_boundary(np.array(self.svm_target), np.array(self.svm_source)[0:10000], split_ratio=1)
# svm_a = torch.from_numpy(svm_a).float().to(self.device).view(1, -1)
# svm_a.requires_grad = True
# self.args.svm_boundary = svm_a
delta_w = self.args.svm_boundary.detach()[:, 0:-1]
# TODO: Optimize SVM by sklearn every iteration
# if len(self.svm_target) >= self.args.batch * self.args.sliding_window_size and \
# iters % 10 == 0:
# delta_w = self.get_svm_boundary(tgt_num=self.args.batch * self.args.sliding_window_size, \
# src_num=5000)
# delta_w = torch.from_numpy(delta_w).float().view(1, -1).to(self.device)
# delta_w = delta_w / delta_w.norm()
# self.args.svm_boundary = delta_w
# delta_w = self.args.svm_boundary
return delta_w
def svm_loss(self, target_encodings, source_encodings):
# See target as pos and source as neg, and only update delta_w
batch = len(source_encodings)
source_encodings = -torch.cat([source_encodings.detach(), \
torch.ones(batch, 1, device=self.device)], dim=-1)
target_encodings = torch.cat([target_encodings.detach(), \
torch.ones(batch, 1, device=self.device)], dim=-1)
samples = torch.cat([target_encodings, source_encodings], dim=0).t()
w = self.args.svm_boundary
loss = 1/2 * (w @ w.t()).sum() + self.svm_C * (F.relu(1 - w @ samples)).sum()
return loss
def dynamic_loss(self, target_encodings, source_encodings, delta_w):
# Get the conditional vector to mask special enough channels
delta_w = delta_w.flatten()
num_channel = len(delta_w)
order = delta_w.abs().argsort()
chosen_order = order[0:int(self.args.psp_alpha * num_channel)]
# chosen_order = order[-int(self.args.psp_alpha * num_channel)::] # Choose most important channels
cond = torch.zeros(num_channel).to(self.device)
cond[chosen_order] = 1
cond = cond.unsqueeze(0)
# Get masked encodings
target_encodings = cond * target_encodings
source_encodings = cond * source_encodings
# Update the mean direction of target domain and difference
self.iter_diff.append(torch.abs(cond - self.cond).sum().cpu().item() / len(delta_w))
self.iter_mean.append(cond.mean().cpu().item())
self.iter_sim.append(self.cosine_similarity(delta_w, self.target_direction).sum().cpu().item())
loss = F.l1_loss(target_encodings, source_encodings)
# if self.args.lambda_constrain > 0:
# loss += self.constrained_loss(cond)
return loss
def forward(self, target_imgs, source_imgs, iters=0, return_codes=False):
target_encodings, _ = self.get_image_features(target_imgs)
source_encodings, _ = self.get_image_features(source_imgs)
# Mask w+ codes controlling style and fine details
if self.args.num_mask_last > 0:
keep_num = (18 - self.args.num_mask_last) * 512
target_encodings = target_encodings[:, 0:keep_num]
source_encodings = source_encodings[:, 0:keep_num]
if self.args.psp_loss_type == "multi_stage":
# edit_direction = target_encodings - source_encodings
# theta = (edit_direction.clone() * self.target_direction).sum(dim=-1, keepdim=True)
# return F.l1_loss(edit_direction, theta * self.target_direction)
loss = self.multi_stage_loss(target_encodings, source_encodings)
elif self.args.psp_loss_type == "dynamic":
delta_w = self.update_w(target_encodings, source_encodings, iters=iters)
regular_weight = max(0, \
(iters - self.args.sliding_window_size) / (self.args.iter - self.args.sliding_window_size))
loss = regular_weight * self.dynamic_loss(target_encodings, source_encodings, delta_w=delta_w)
else:
raise RuntimeError(f"No psp loss whose type is {self.psp_loss_type} !")
if return_codes:
return loss, [target_encodings.detach(), source_encodings.detach()]
else:
return loss
```
#### File: ZSSGAN/model/psp.py
```python
from argparse import Namespace
from tokenize import Name
import matplotlib
matplotlib.use('Agg')
import math
import torch
from torch import nn
from ZSSGAN.model.encoders import psp_encoders
from ZSSGAN.model.sg2_model import Generator
def get_keys(d, name):
if 'state_dict' in d:
d = d['state_dict']
d_filt = {k[len(name) + 1:]: v for k, v in d.items() if k[:len(name)] == name}
return d_filt
class pSp(nn.Module):
def __init__(self, checkpoint_path, device, output_size=1024, has_decoder=False):
super(pSp, self).__init__()
self.opts = self.set_opts(checkpoint_path)
self.has_decoder = has_decoder
self.device = device
# compute number of style inputs based on the output resolution
self.opts.n_styles = int(math.log(output_size, 2)) * 2 - 2
# Define architecture
self.encoder = self.set_encoder()
if self.has_decoder:
self.decoder = Generator(output_size, 512, 8)
# Load weights if needed
self.load_weights()
def set_encoder(self):
if self.opts.encoder_type == 'GradualStyleEncoder':
encoder = psp_encoders.GradualStyleEncoder(50, 'ir_se', self.opts)
elif self.opts.encoder_type == 'BackboneEncoderUsingLastLayerIntoW':
encoder = psp_encoders.BackboneEncoderUsingLastLayerIntoW(50, 'ir_se', self.opts)
elif self.opts.encoder_type == 'BackboneEncoderUsingLastLayerIntoWPlus':
encoder = psp_encoders.BackboneEncoderUsingLastLayerIntoWPlus(50, 'ir_se', self.opts)
else:
raise Exception('{} is not a valid encoders'.format(self.opts.encoder_type))
return encoder
def load_weights(self):
if self.opts.checkpoint_path is not None:
print('Loading pSp from checkpoint: {}'.format(self.opts.checkpoint_path))
ckpt = torch.load(self.opts.checkpoint_path, map_location='cpu')
self.encoder.load_state_dict(get_keys(ckpt, 'encoder'), strict=True)
if self.has_decoder:
self.decoder.load_state_dict(get_keys(ckpt, 'decoder'), strict=True)
self.__load_latent_avg(ckpt)
else:
raise RuntimeError(f"There isn't psp encoder in {self.opts.checkpoint_path}")
def forward(self, x, randomize_noise=True):
codes = self.encoder(x)
# normalize with respect to the center of an average face
# if self.opts.start_from_latent_avg:
# if self.opts.learn_in_w:
# codes = codes + self.latent_avg.repeat(codes.shape[0], 1)
# else:
codes = codes + self.latent_avg.repeat(codes.shape[0], 1, 1)
if self.has_decoder:
images, result_latent = self.decoder([codes],
input_is_latent=True,
randomize_noise=randomize_noise,
return_latents=True)
else:
result_latent = codes
# if resize:
# images = self.face_pool(images)
if self.has_decoder:
return result_latent, images
else:
return result_latent, None
def set_opts(self, opts_path):
opts = torch.load(opts_path, map_location='cpu')['opts']
opts['checkpoint_path'] = opts_path
opts = Namespace(**opts)
return opts
def __load_latent_avg(self, ckpt, repeat=None):
if 'latent_avg' in ckpt:
self.latent_avg = ckpt['latent_avg'].to(self.device)
if repeat is not None:
self.latent_avg = self.latent_avg.repeat(repeat, 1)
else:
self.latent_avg = None
``` |
{
"source": "1170300802/Advanced_Algorithms",
"score": 4
} |
#### File: Advanced_Algorithms/Finite_Domain_Sorting/bucket_sort.py
```python
__author__ = "<NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
def sort(inputList, mappingFactor, bucketCount):
buckets = [[] for i in range(bucketCount)]
outputList = [0 for i in inputList]
for e in inputList:
buckets[int(e*mappingFactor)].append(e)
for i in range(bucketCount):
buckets[i].sort()
index = 0
for l in buckets:
for e in l:
outputList[index] = e
index += 1
return outputList
def bucket_sort(inputList):
uniqueValues = len(set(inputList))
return sort(inputList, (uniqueValues-1)/float(max(inputList)), uniqueValues)
```
#### File: Advanced_Algorithms/General_Sorting/mergesort.py
```python
__author__ = "<NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
import random
import time
def merge (l1, l2):
i = 0
j = 0
newList = []
while i < len(l1) and j < len(l2):
if l1[i] < l2[j]:
newList.append(l1[i])
i = i+1
else:
newList.append(l2[j])
j = j+1
while i < len(l1):
newList.append(l1[i])
i = i+1
while j < len(l2):
newList.append(l2[j])
j = j+1
return newList
def mergeSort (l):
if len(l) <= 1:
return l
firstHalf = mergeSort (l[:len(l)/2])
secondHalf = mergeSort (l[len(l)/2:])
return merge(firstHalf, secondHalf)
# Some tests
if __name__ == '__main__':
size = 1000000
unsortedList = range(size)
#random.shuffle(unsortedList)
#unsortedList.reverse()
t0 = time.time()
res = mergeSort (unsortedList)
print time.time() - t0
#print res
``` |
{
"source": "1170301027/Advanced_Algorithm",
"score": 3
} |
#### File: src/ApproximateAlgorithm/SetCoverage.py
```python
import pulp
import random
import time
import matplotlib.pyplot as plt
'''
集合覆盖问题:
• 输入: 有限集 X, X 的子集合族 F, X=∪𝑆∈𝐹 S
• 输出: C⊆F, 满足
(1)X=∪𝑆∈𝐶 S
(2)C 是满足条件(1)的最小集族, 即|C|最小.
'''
# 生成实验数据
class generate():
def __init__(self):
self.X = [] # 有限集
self.F = [] # 子集合族
def generateData(self, size):
X = set(list(range(size))) # 0~n-1
self.X = X
S = [random.randint(0, size - 1) for i in range(20)] # 随机选X中的 20 个点放入S0
S = set(S)
self.F.append(list(S))
# union_s表示∪Sj,rest表示X-union_s
union_s = S
rest = X - union_s
while len(rest) >= 20:
n = random.randint(1, 20)
x = random.randint(1, n)
S = set(random.sample(rest, x))
S.update(random.sample(union_s, n - x))
union_s.update(S) # 更新已经被选择的数据集合
rest = X - union_s
self.F.append(list(S))
# 小于20时直接加入
if len(rest) > 0:
self.F.append(list(rest))
# 生成|F|-y个随机集合
y = len(self.F)
for i in range(size - y):
n = random.randint(1, 20)
S = random.sample(X, n)
self.F.append(list(S))
for i in range(len(self.F)):
self.F[i] = set(self.F[i])
class set_coverage:
"""Set coverage"""
def __init__(self,):
"""Constructor for set_coverage"""
def greedy(self, X, F) -> list:
print(F)
U = set(X)
C = []
while U:
# 贪心策略:每次选择覆盖U中元素最多的集合加入到C中,
S = max(F,key=(lambda x:len(U.intersection(x))))
U -= S
C.append(S)
return C
def liner_programming(self, X:list, F) -> list:
# 加权集合覆盖:舍入
# xs = {0,1}
X = list(X)
A = [] # 系数矩阵
for i in range(len(X)):
row = []
for j in range(len(F)): # e属于X, e属于Si
if X[i] in F[j]:
row.append(1)
else:
row.append(0)
A.append(row)
f = max([sum(r) for r in A]) # 统计X的元素在F中的最大频率,
t = 1 / f
# 构建线性方程
prob = pulp.LpProblem("Linear minimize problem", pulp.LpMinimize)
# 添加变量 x_num
ingredient_vars = pulp.LpVariable.dicts("x", X, lowBound=0, upBound=1, cat="Continuous")
# 添加目标函数
prob += pulp.lpSum([1 * ingredient_vars[i] for i in X])
# 添加约束
for i in range(len(X)):
prob += pulp.lpSum([A[i][j] * ingredient_vars[j] for j in range(len(F))]) >= 1
prob.solve()
prob = prob.variables()
# 按照目标方程排序
prob = sorted(prob, key=lambda x: int(x.name[2:]))
# 按照阈值进行舍入
C = [set(f) for i, f in enumerate(F) if prob[i].varValue > t]
return C
if __name__ == "__main__":
time_lp = []
time_greedy = []
def write(time1:list, time2:list, filename='a.csv'):
with open(filename, "w+") as f:
for i in range(len(time1)):
f.write(str(time1[i]) + ',' + str(time2[i]) + "\n")
def read(time1, time2, filename='a.csv'):
for line in open(filename,"r"):
time1.append(float(line.split(",")[0]))
time2.append(float(line.split(",")[1]))
def draw(x,time1, time2):
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
plt.title('结果')
plt.xlabel("数据量/个") # xlabel、ylabel:分别设置X、Y轴的标题文字。
plt.ylabel("算法运行时间/ms")
plt.plot(x, time1, color='red', label='线性规划')
plt.plot(x, time2, color='blue', label='贪心')
for a,b,c in zip(x,time1,time2):
plt.text(a, b, "%f" % b , ha='center', va='bottom', fontsize=12)
plt.text(a, c, "%f" % c, ha='center', va='top', fontsize=12)
plt.legend()
plt.show()
datasize = [100, 1000, 5000]
# 读文件
read(time_lp, time_greedy)
draw(datasize, time_lp, time_greedy)
time_lp.clear()
time_greedy.clear()
for i in datasize:
g = generate()
g.generateData(i)
s = set_coverage()
time1 = time.time()
s.liner_programming(g.X, g.F)
time2 = time.time()
time_lp.append((time2 - time1)*1000)
print("线性规划:"+str(time_lp[-1]))
time1 = time.time()
s.greedy(g.X, g.F)
time2 = time.time()
time_greedy.append((time2 - time1) * 1000)
print("贪心:"+str(time_greedy[-1]))
print(time_lp)
print(time_greedy)
write(time_lp, time_greedy)
draw(datasize,time_lp, time_greedy)
# print (s.LP(g.rawData, g.data))
```
#### File: src/SearchAlgorithm/A_star.py
```python
from math import sqrt
from tkinter import *
tk = Tk()
class Point:
def __init__(self, father, cur, g, h):
"""
point information
:param father: 父节点
:param cur: (cur_x,cur_y)
:param g: 从起始到n的实际代价
:param h: 从n到目标节点的预估代价
:param f: 实际代价
"""
self.father = father # 根据父节点找路径
self.cur = cur # current_position [x,y]
self.g = g
self.h = h
self.f = self.g + self.h
def __str__(self) -> str:
if not self.father and not self.cur:
return "cur:" + str(self.cur) + ",father:(" + str(self.father.cur[0]) + "," + str(
self.father.cur[1]) + ") ,f:" + str(self.f) + ",h:" + str(self.h)
class A_star:
def __init__(self, points):
self.points = points # 地图
'''计算h:欧几里得距离'''
def cal_h(self, point_1, point_2):
return sqrt((point_1[0] - point_2[0]) ** 2 + (point_1[1] - point_2[1]) ** 2)
'''
获取下一步可扩展节点
gray:不可扩展
white:+1
blue:+2
yellow:+4
对角:+√2
'''
def next_step(self, cur_pos, target, open, close):
# 获取当前坐标
g = cur_pos.g
cur = cur_pos.cur
for i in range(max(cur[0] - 1, 0), min(len(self.points), cur[0] + 2)): # 计算可走点坐标,点周围的八个坐标
for j in range(max(cur[1] - 1, 0), min(len(self.points[0]), cur[1] + 2)):
# 原节点不扩展
if i == cur[0] and j == cur[1]:
continue
# 遇到障碍物不可扩展 gray
if self.points[i][j] == "g":
continue
if (i, j) in close:
continue
newg = g
# 计算已经经过的代价
# 对角线 v2
if abs(i - cur[0]) == 1 and abs(j - cur[1]) == 1:
newg += sqrt(2)
else:
newg += 1
# 沙漠 yellow
if self.points[i][j] == "y":
newg += 4
# 河流 blue
elif self.points[i][j] == "b":
newg += 2
next_point = Point(cur_pos, (i, j), newg, self.cal_h(target, [i, j]))
descend = open.get((i, j)) # 获取当前节点的后继节点
if not descend: # 若空,则设置为该遍历节点
open[(i, j)] = next_point
elif descend.g > next_point.g: # 若非空,则比较遍历节点与后继节点的实际代价
open.pop((i, j))
open[(i, j)] = next_point
'''选择最优的可扩展节点'''
def select_best(self, open, close):
res, min_f = None, float('inf') # infinity
for point, info in open.items():
if info.f < min_f:
res, min_f = point, info.f
close.add(res)
return open.pop(res)
'''辅助:判断两个点是否相同'''
def equal(self, cur, target): # [x,y]
if cur[0] == target[0] and cur[1] == target[1]:
return True
return False
'''单向寻路实现'''
def one_way(self, start, target):
open = {} # 存储 {(x,y):Point(next_x,next_y)}
open[(start[0], start[1])] = Point(None, start, 0, self.cal_h(target, start))
close = set()
while True:
cur_point = self.select_best(open, close)
# 寻找下一个扩展节点
self.next_step(cur_point, target, open, close)
# 判断是否为目标节点
if self.equal(cur_point.cur, target):
path = []
print("\ng h f:")
print(cur_point.g, cur_point.h, cur_point.f)
print("total g :"+str(cur_point.g))
# 回溯路径, 倒叙输出的
while cur_point:
path.append((cur_point.cur[0], cur_point.cur[1]))
cur_point = cur_point.father
# print(close)
print(path)
return path
'''双向寻路实现'''
def two_way(self, start, target):
def find(start_open, target_open): # 判断是否找到双向的交点
for key in start_open.keys():
if key in target_open:
endCur, startCur = start_open[key], target_open[key]
return startCur, endCur
return None, None
start_open = {}
start_open[(start[0], start[1])] = Point(None, start, 0, self.cal_h(start, target))
start_close = set()
target_open = {}
target_open[(target[0], target[1])] = Point(None, target, 0, self.cal_h(start, target))
target_close = set()
while True:
# start 出发
cur_start = self.select_best(start_open, start_close)
cur1, cur2 = find(start_open, target_open)
if cur1 and cur2:
break
self.next_step(cur_start, target, start_open, start_close)
# target 出发
cur_target = self.select_best(target_open, target_close)
# print(cur_target.cur)
cur1, cur2 = find(target_open, start_open)
if cur1 and cur2:
break
self.next_step(cur_target, start, target_open, target_close)
# 回溯路径, 倒叙输出的
print("\ng h f:")
if cur1: print(cur1.g, cur1.h, cur1.f)
if cur2: print(cur2.g, cur2.h, cur2.f)
# print("total g :" + str(cur1.father.g + cur2.g))
print("total g :" + str(cur1.g + cur2.g))
path1 = []
while cur1:
path1.append([cur1.cur[0], cur1.cur[1]])
cur1 = cur1.father
path2 = []
while cur2:
path2.append([cur2.cur[0], cur2.cur[1]])
cur2 = cur2.father
print(path1)
print(path2)
if path1[-1] != start:
path1,path2 = path2,path1
return path1, path2
if __name__ == "__main__":
# 让res1和res2分别表示从start, target出发的路径,既可以画单向的,又可以画双向的
def draw(points, path1, path2, width, height):
# 地图
for i in range(len(points)):
for j in range(len(points[0])):
if points[i][j] == "g":
Button(tk, bg="gray", width=width, height=height).grid(row=i, column=j, sticky=W + E + N + S)
elif points[i][j] == "y":
Button(tk, bg="yellow", width=width, height=height).grid(row=i, column=j, sticky=W + E + N + S)
elif points[i][j] == "b":
Button(tk, bg="blue", width=width, height=height).grid(row=i, column=j, sticky=W + E + N + S)
else:
Button(tk, bg="white", width=width, height=height).grid(row=i, column=j, sticky=W + E + N + S)
# 对以start为起点的路径进行着色
for i in range(len(path1)):
if i == len(path1) - 1:
Button(tk, bg="Violet", width=width, height=height, text="Start").grid(row=path1[i][0],
column=path1[i][1],
sticky=W + E + N + S)
else:
Button(tk, bg="Violet", width=width, height=height).grid(row=path1[i][0], column=path1[i][1],
sticky=W + E + N + S)
# 单向判断
if len(path2) == 0:
Button(tk, bg="Violet", width=width, height=height, text="Target").grid(row=path1[0][0], column=path1[0][1],
sticky=W + E + N + S)
return
for i in range(len(path2)):
if i == len(path2) - 1:
# print(res2[i][0], res2[i][1])
Button(tk, bg="Green", width=width, height=height, text="Target").grid(row=path2[i][0],
column=path2[i][1],
sticky=W + E + N + S)
else:
Button(tk, bg="Green", width=width, height=height).grid(row=path2[i][0], column=path2[i][1],
sticky=W + E + N + S)
# y代表yellow沙漠,b代表blue溪流,g代表gray灰色障碍物,w代表white普通格子
problem1 = [["w", "w", "w", "w", "w", "w", "w", "w", "w", "w", "w", "w"],
["w", "w", "w", "g", "w", "w", "w", "w", "w", "w", "w", "w"],
["w", "w", "w", "g", "w", "w", "w", "w", "w", "w", "w", "w"],
["w", "w", "w", "w", "g", "w", "w", "w", "w", "w", "w", "w"],
["w", "w", "w", "w", "g", "w", "w", "w", "w", "w", "w", "w"],
["w", "w", "w", "w", "g", "g", "w", "w", "w", "w", "w", "w"],
["w", "w", "w", "w", "w", "g", "w", "w", "w", "w", "w", "w"],
["w", "w", "w", "w", "w", "g", "w", "w", "w", "w", "w", "w"],
["w", "w", "w", "w", "w", "w", "w", "w", "w", "w", "w", "w"]]
problem2 = [
["w", "w", "w", "g", "w", "w", "w", "g", "w", "w", "w", "w", "g", "w", "w", "w", "w", "w", "w", "w", "w", "w",
"w", "w", "y", "y", "y", "y", "y", "y", "y", "y", "y", "y", "y", "y", "y", "y", "y", "y"],
["w", "w", "w", "w", "w", "w", "w", "g", "w", "w", "w", "w", "g", "w", "w", "w", "w", "w", "w", "w", "w", "w",
"w", "w", "w", "y", "y", "y", "y", "y", "y", "y", "y", "y", "b", "y", "y", "y", "y", "y"],
["g", "g", "g", "g", "g", "g", "w", "g", "g", "g", "g", "w", "g", "w", "w", "w", "w", "w", "w", "w", "w", "w",
"w", "w", "w", "w", "y", "y", "y", "y", "y", "y", "y", "b", "y", "y", "y", "y", "y", "y"],
["w", "w", "w", "w", "w", "w", "w", "w", "g", "w", "w", "w", "g", "w", "w", "w", "w", "w", "w", "w", "w", "w",
"w", "w", "w", "w", "y", "y", "y", "y", "y", "y", "b", "y", "y", "y", "y", "w", "w", "w"],
["w", "w", "w", "w", "w", "w", "w", "w", "w", "w", "w", "w", "g", "w", "w", "w", "w", "w", "w", "w", "w", "w",
"w", "w", "w", "w", "y", "y", "y", "y", "y", "y", "y", "b", "y", "y", "w", "w", "w", "w"],
["w", "w", "w", "w", "w", "w", "w", "g", "g", "w", "w", "w", "g", "w", "w", "w", "w", "w", "w", "w", "w", "w",
"w", "w", "w", "w", "w", "y", "y", "y", "y", "y", "y", "b", "b", "w", "w", "w", "w", "w"],
["w", "w", "g", "g", "g", "g", "g", "g", "w", "w", "w", "w", "g", "w", "w", "w", "w", "w", "w", "w", "w", "w",
"w", "w", "w", "w", "w", "y", "y", "y", "y", "y", "y", "b", "b", "w", "w", "w", "w", "w"],
["w", "w", "g", "w", "w", "g", "w", "g", "w", "w", "w", "w", "g", "w", "w", "w", "w", "w", "w", "w", "w", "w",
"w", "w", "w", "w", "w", "w", "w", "y", "y", "y", "y", "b", "b", "b", "g", "w", "w", "w"],
["w", "w", "w", "w", "w", "g", "w", "w", "w", "w", "w", "w", "w", "w", "w", "w", "w", "w", "w", "w", "w", "w",
"w", "w", "w", "w", "w", "w", "w", "w", "w", "w", "b", "b", "b", "b", "w", "w", "w", "w"],
["w", "w", "w", "w", "w", "g", "w", "g", "w", "w", "w", "w", "w", "w", "w", "w", "w", "w", "w", "w", "w", "w",
"w", "w", "w", "w", "w", "w", "w", "w", "w", "w", "b", "b", "b", "w", "g", "w", "w", "w"],
["w", "w", "g", "w", "w", "g", "w", "g", "g", "w", "w", "w", "w", "w", "w", "w", "w", "w", "w", "g", "g", "g",
"w", "w", "w", "w", "w", "w", "g", "w", "w", "w", "b", "b", "w", "b", "b", "w", "w", "w"],
["w", "w", "g", "g", "g", "g", "w", "w", "g", "w", "w", "w", "w", "w", "w", "w", "w", "w", "w", "g", "g", "g",
"w", "w", "w", "w", "w", "w", "w", "w", "w", "g", "b", "w", "b", "b", "w", "w", "w", "w"],
["w", "w", "w", "g", "w", "w", "w", "w", "g", "w", "w", "w", "g", "w", "w", "w", "w", "w", "w", "g", "g", "g",
"w", "w", "w", "w", "w", "w", "w", "w", "w", "w", "w", "b", "b", "w", "w", "w", "w", "w"],
["w", "w", "w", "g", "w", "w", "w", "w", "g", "g", "w", "g", "g", "w", "w", "w", "w", "w", "w", "w", "w", "w",
"w", "w", "w", "w", "w", "w", "w", "w", "w", "g", "b", "b", "b", "w", "w", "w", "w", "w"],
["w", "w", "w", "g", "w", "w", "w", "w", "g", "w", "w", "w", "g", "w", "w", "w", "w", "w", "w", "w", "w", "w",
"w", "w", "w", "w", "w", "w", "w", "w", "w", "w", "b", "b", "b", "w", "w", "w", "w", "w"],
["w", "w", "w", "g", "g", "g", "g", "g", "g", "w", "w", "w", "g", "w", "w", "w", "w", "w", "w", "w", "w", "w",
"w", "w", "g", "g", "w", "w", "w", "w", "w", "b", "b", "b", "w", "w", "w", "w", "w", "w"],
["w", "w", "w", "g", "w", "w", "w", "w", "w", "w", "w", "w", "g", "w", "w", "w", "w", "w", "w", "w", "w", "w",
"w", "w", "g", "g", "w", "w", "w", "w", "w", "b", "b", "b", "w", "w", "w", "w", "w", "w"],
["w", "w", "w", "w", "w", "w", "w", "g", "w", "w", "w", "w", "g", "w", "w", "w", "w", "w", "w", "w", "w", "w",
"w", "w", "w", "w", "w", "w", "w", "w", "b", "b", "b", "w", "w", "w", "w", "w", "w", "w"],
["w", "w", "w", "g", "w", "w", "w", "g", "w", "w", "w", "w", "g", "w", "w", "w", "w", "w", "w", "w", "w", "w",
"w", "w", "w", "w", "w", "w", "w", "b", "b", "b", "w", "w", "w", "w", "w", "w", "w", "w"],
["w", "w", "w", "g", "w", "w", "w", "g", "w", "w", "w", "w", "g", "w", "w", "w", "w", "w", "w", "w", "w", "w",
"w", "w", "w", "w", "w", "w", "b", "b", "b", "w", "w", "w", "w", "w", "w", "w", "w", "w"]]
def test1():
# 左上角为 (0,0) 向下为x轴向右为y轴
start1 = [4, 1]
target1 = [5, 11]
s = A_star(problem1)
path1, path2 = s.one_way(start1, target1), []
path1, path2 = s.two_way(start1, target1)
draw(problem1, path1, path2, width=7, height=2)
def test2():
# 左上角为 (0,0) 向下为x轴向右为y轴
start2 = [10, 4]
target2 = [0, 35]
s = A_star(problem2)
path1, path2 = s.one_way(start2, target2), []
path1, path2 = s.two_way(start2, target2)
draw(problem2, path1, path2, width=3, height=1)
test1()
# test2()
mainloop()
```
#### File: src/SortAlgorithm/QuickSort.py
```python
import random
import time
import matplotlib.pyplot as plt
class quickSort:
"""a class to implement quick sort algorithm."""
def __init__(self, ):
"""Constructor for quickSort"""
def quick_sort(self, A, p, r):
def rand_partition(A, p, r):
i = random.randint(p, r)
swap(A, i, r)
x = A[r]
i = p - 1
for j in range(p, r):
if A[j] <= x:
i += 1
swap(A, i, j)
swap(A, i + 1, r)
return i + 1
def swap(A, i, j):
A[i], A[j] = A[j], A[i]
if p < r:
try:
# print("p, r: ", p, r)
q = rand_partition(A, p, r)
self.quick_sort(A, p, q - 1)
self.quick_sort(A, q + 1, r)
except Exception as e:
print(e)
def quick_sort_three_way_division(self, A, l, r):
def rand_partition(A, l, r):
v = random.randint(l, r) # pivot
# v = l
swap(A, v, r) # r is index of pivot
x = A[r] # value
# record less_than pivot and grater_than pivot
# sort [l,r-1]
lt = l
gt = r - 1
i = l
while i < gt:
if A[i] < x: # less than , swap lt , i
swap(A, lt, i)
lt += 1
i += 1
elif A[i] == x: # equal to , next i
i += 1
else: # greater than, swap gt , i
swap(A, i, gt)
gt -= 1
swap(A, gt, r)
return lt, gt
def swap(A, i, j):
A[i], A[j] = A[j], A[i]
if l < r:
try:
# print("l, r: ", l, r)
q = rand_partition(A, l, r)
lt, gt = q[0], q[1]
self.quick_sort(A, l, lt)
self.quick_sort(A, gt + 1, r)
except Exception as e:
print(e)
if __name__ == '__main__':
def generate(count, limit=None):
if limit == None: limit = count
nums = random.sample(range(0, count), count - limit)
nums += [nums[random.randint(0, len(nums) - 1)]] * limit
return nums
def write_to_file(list, filepath="a.csv"):
with open(filepath, "w") as f:
for i in list:
f.write(str(i) + "\n")
def read_from_file(filepath="b.csv"):
result = []
for line in open(filepath):
result.append(float(line))
return result
# 结果记录,a 原始排序结果,b 三路排序结果,c 自己电脑三路快排的结果。
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
plt.title('快排结果')
plt.ylabel('运行时间/s')
plt.xlabel('重复元素(i% * 100 * n) -> i/num')
plt.plot(range(1, 10), read_from_file('a.csv'), color='red', label='origin')
plt.plot(range(1, 10), read_from_file('b.csv'), color='blue', label='three ways')
plt.plot(range(1, 10), read_from_file("c.csv"), color='yellow', label='system call')
plt.legend()
plt.show()
Max = 1000000
# sys.setrecursionlimit(Max)
qs = quickSort()
n = 100000
resultList = []
for i in range(1, 10):
repeat = int(n * 10 * 0.01 * i)
nums = generate(n, repeat)
start = time.time()
# qs.quick_sort(nums, 0, len(nums) - 1)
# qs.quick_sort_three_way_division(nums, 0, len(nums) - 1)
nums.sort()
end = time.time()
print(nums)
timespan = (float)(end - start)
# print("start: " + str(start) + ", end: "+ str(end) + ", timespan: " +str(timespan))
resultList.append(timespan)
print(resultList)
# write_to_file(resultList, "a.csv")
# write_to_file(resultList, "b.csv")
write_to_file(resultList, "c.csv")
``` |
{
"source": "1170301027/ConvexOpt",
"score": 3
} |
#### File: ConvexOpt/Lab1/Newton.py
```python
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import ticker
def f(x, y):
return (1 - x) ** 2 + 100 * (y - x * x) ** 2
def H(x, y):
return np.matrix([[1200 * x * x - 400 * y + 2, -400 * x],
[-400 * x, 200]])
def grad(x, y):
return np.matrix([[2 * x - 2 + 400 * x * (x * x - y)],
[200 * (y - x * x)]])
def delta_newton(x, y):
lambda_ = 1.0
delta = lambda_ * H(x, y).I * grad(x, y)
return delta
# ----- 绘制等高线 -----
# 数据数目
n = 256
# 定义x, y
x = np.linspace(-1, 1.1, n)
y = np.linspace(-1, 1.1, n)
# 生成网格数据
X, Y = np.meshgrid(x, y)
plt.figure()
# 填充等高线的颜色, 8是等高线分为几部分
plt.contourf(X, Y, f(X, Y), 5, alpha=0, cmap=plt.cm.hot)
# 绘制等高线
C = plt.contour(X, Y, f(X, Y), 8, locator=ticker.LogLocator(), colors='black', linewidth=0.01)
# 绘制等高线数据
plt.clabel(C, inline=True, fontsize=10)
# ---------------------
x = np.matrix([[-0.3],
[0.4]])
tol = 0.00001
xv = [x[0, 0]]
yv = [x[1, 0]]
plt.text(x[0, 0], x[1, 0],"start")
start=time.time()
for t in range(6000):
delta = delta_newton(x[0, 0], x[1, 0])
if abs(delta[0, 0]) < tol and abs(delta[1, 0]) < tol:
break
x = x - delta
xv.append(x[0, 0])
yv.append(x[1, 0])
end = time.time()
print("iteration:" + str(t))
print(xv)
print(yv)
print("耗时:"+str(end-start))
plt.plot(xv, yv, label='track')
# plt.plot(xv, yv, label='track', marker='o')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Newton\'s Method for Rosenbrock Function')
plt.legend()
plt.show()
```
#### File: ConvexOpt/Lab2/main.py
```python
import os, sys
current_dir = os.path.abspath(os.path.dirname(__file__))
sys.path.append(current_dir)
from pre import point, example, drawResult
from constrainedOptimization import ALM, ADMM
if __name__ == '__main__':
epsilon = 0.01
loss_function, start = example(rho=1.0), point(-2, -2)
y_t = 0
def testALM():
points = ALM(loss_function, start, y_t=y_t, epsilon=epsilon)
for i in points:
print(str(i))
drawResult(loss_function, points, 'ALM', epsilon)
def testADMM():
points = ADMM(loss_function, start, y_t=y_t, epsilon=epsilon)
for i in points:
print(str(i))
drawResult(loss_function, points, 'ADMM', epsilon)
testALM()
# testADMM()
```
#### File: 1170301027/ConvexOpt/Quasi-Newton_BFGS.py
```python
from numpy.linalg import linalg
'''拟牛顿法:BFGS'''
# coding:UTF-8
from numpy import *
from numpy.ma import shape
import matplotlib.pyplot as plt
# fun
def fun(x):
# return 100 * (x[0, 0] ** 2 - x[1, 0]) ** 2 + (x[0, 0] - 1) ** 2
x1 = x[0,0]
x2 = x[1,0]
return x1 ** 2 + 4 * x2 ** 2 - 4 * x1 - 8 * x2
# gfun
def gfun(x):
result = zeros((2, 1))
x1 = x[0, 0]
x2 = x[1, 0]
result[0, 0] = 2 * x1 - 4
result[1, 0] = 8 * x2 - 8
return result
def bfgs(fun, gfun, x0, precision=0.01):
result = []
maxk = 500
rho = 0.55
sigma = 0.4
m = shape(x0)[0]
Bk = eye(m)
k = 0
while (k < maxk):
gk = mat(gfun(x0)) # 计算梯度
print(gk)
if gk[1][0] ** 2 + gk[0][0] ** 2 < precision ** 2: break
dk = mat(-linalg.solve(Bk, gk))
m = 0
mk = 0
while (m < 20):
newf = fun(x0 + rho ** m * dk)
oldf = fun(x0)
if (newf < oldf + sigma * (rho ** m) * (gk.T * dk)[0, 0]):
mk = m
break
m = m + 1
# BFGS校正
x = x0 + rho ** mk * dk
sk = x - x0
yk = gfun(x) - gk
if (yk.T * sk > 0):
Bk = Bk - (Bk * sk * sk.T * Bk) / (sk.T * Bk * sk) + (yk * yk.T) / (yk.T * sk)
k = k + 1
x0 = x
result.append(fun(x0))
return result
if __name__ == '__main__':
x0 = mat([[0], [0]]) # 矩阵
result = bfgs(fun, gfun, x0, precision=0.01)
n = len(result)
x = arange(0, n, 1)
y = result
print(y)
# for i,j in x,y:
# plt.plot(i,j,"r*")
plt.plot(x,y)
plt.title("BFGS")
plt.show()
``` |
{
"source": "1173710105/Text2JSON",
"score": 3
} |
#### File: Text2JSON/entity_named_recog/hour_entity_recognition.py
```python
import sys
from os.path import dirname, abspath
path = dirname(dirname(dirname(abspath(__file__))))
sys.path.append(path)
import datetime
import re
from datetime import date, timedelta
from Text2JSON.entity_named_recog.day_entity_recognition import day_absolute_comfirm, day_relative_comfirm
from Text2JSON.entity_named_recog.entity_utils import *
def hour_recon(line, placeholders_list: dict):
line = hour_relative_recognition(line, placeholders_list)
line = hour_absolute_recognition(line, placeholders_list)
return line
def hour_absolute_recognition(line, placeholders_list: dict):
time_map = {'零': '0', '一': '1', '二': '2', '三': '3', '四': '4',
'五': '5', '六': '6', '七': '7', '八': '8', '九': '9', '十': '10', '两': '2'}
now = date.today()
# 数字 上午
date_all_morning_1 = re.findall(r"(上午\d{1,2}点)", line)
date_all_morning_2 = re.findall(r"(早上\d{1,2}点)", line)
date_all_morning_3 = re.findall(r"(早\d{1,2}点)", line)
for data in date_all_morning_1 + date_all_morning_2 + date_all_morning_3:
index = line.index(data)
first_line = line[0: index]
sec_line = line[index + len(data):]
hour_minute = re.sub(r'(上午)', '', data)
hour_minute = re.sub(r'(早上)', '', hour_minute)
hour_minute = re.sub(r'(早)', '', hour_minute)
hour_minute = hour_minute.replace('点', ':')
# 确定年月日
handle_line, year, month, day = day_relative_comfirm(first_line, placeholders_list)
if year is None:
handle_line, year, month, day = day_absolute_comfirm(first_line, placeholders_list)
year = now.year if year is None else year
month = now.month if month is None else month
day = now.day if day is None else day
# 确定小时 和 分钟
hour = int(hour_minute[:hour_minute.index(':')])
try:
# 进一天
if hour == 24:
hour = 0
target_time = datetime.datetime(year, month, day, hour)
target_time = target_time + timedelta(days=1)
else:
target_time = datetime.datetime(year, month, day, hour)
except ValueError:
# 识别不出来就认为是错误,后续不再识别
time_str = data
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [None])
line = first_line + placeholder + sec_line
continue
assert target_time is not None, '逻辑错误'
time_str = '“' + time_to_str(target_time) + "”"
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [target_time])
line = handle_line + placeholder + sec_line
# 数字 下午, 傍晚,晚上
date_all_afternoon = re.findall(r"(下午\d{1,2}点)", line)
date_all_nightfall = re.findall(r"([傍]?晚\d{1,2}点)", line)
date_all_night = re.findall(r"(晚上\d{1,2}点)", line)
for data in date_all_afternoon + date_all_nightfall + date_all_night:
index = line.index(data)
first_line = line[0: index]
sec_line = line[index + len(data):]
hour_minute = re.sub(r'(下午)', '', data)
hour_minute = re.sub(r'(晚上)', '', hour_minute)
hour_minute = re.sub(r'([傍]?晚)', '', hour_minute)
hour_minute = hour_minute.replace('点', ':')
# 确定年月日
handle_line, year, month, day = day_relative_comfirm(first_line, placeholders_list)
if year is None:
handle_line, year, month, day = day_absolute_comfirm(first_line, placeholders_list)
year = now.year if year is None else year
month = now.month if month is None else month
day = now.day if day is None else day
# 确定小时 和 分钟
hour = int(hour_minute[:hour_minute.index(':')])
try:
# 进一天
if hour == 24:
hour = 0
target_time = datetime.datetime(year, month, day, hour)
target_time = target_time + timedelta(days=1)
else:
target_time = datetime.datetime(year, month, day, hour)
# 加时间
if hour <= 12:
target_time = target_time + timedelta(hours=12)
except ValueError:
# 识别不出来就认为是错误,后续不再识别
time_str = data
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [None])
line = first_line + placeholder + sec_line
continue
assert target_time is not None, '逻辑错误'
time_str = '“' + time_to_str(target_time) + "”"
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [target_time])
line = handle_line + placeholder + sec_line
# 数字 中午
date_all_1 = re.findall(r"(中午[01]?[123]点)", line)
date_all_2 = re.findall(r"(正午[01]?[123]点)", line)
for data in date_all_1 + date_all_2:
index = line.index(data)
first_line = line[0: index]
sec_line = line[index + len(data):]
hour_minute = re.sub(r'(中午)', '', data)
hour_minute = re.sub(r'(正午)', '', hour_minute)
hour_minute = hour_minute.replace('点', ':')
# 确定年月日
handle_line, year, month, day = day_relative_comfirm(first_line, placeholders_list)
if year is None:
handle_line, year, month, day = day_absolute_comfirm(first_line, placeholders_list)
year = now.year if year is None else year
month = now.month if month is None else month
day = now.day if day is None else day
# 确定小时 和 分钟
hour = int(hour_minute[:hour_minute.index(':')])
try:
# 进一天
if hour == 24:
hour = 0
target_time = datetime.datetime(year, month, day, hour)
target_time = target_time + timedelta(days=1)
else:
target_time = datetime.datetime(year, month, day, hour)
# 加时间
if hour <= 2:
target_time = target_time + timedelta(hours=12)
except ValueError:
# 识别不出来就认为是错误,后续不再识别
time_str = data
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [None])
line = first_line + placeholder + sec_line
continue
assert target_time is not None, '逻辑错误'
time_str = '“' + time_to_str(target_time) + "”"
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [target_time])
line = handle_line + placeholder + sec_line
# 数字 凌晨
date_all_early = re.findall(r"(凌晨[0]?[123456]点)", line)
for data in date_all_early:
index = line.index(data)
first_line = line[0: index]
sec_line = line[index + len(data):]
hour_minute = re.sub(r'(凌晨)', '', data)
hour_minute = hour_minute.replace('点', ':')
# 确定年月日
handle_line, year, month, day = day_relative_comfirm(first_line, placeholders_list)
if year is None:
handle_line, year, month, day = day_absolute_comfirm(first_line, placeholders_list)
year = now.year if year is None else year
month = now.month if month is None else month
day = now.day if day is None else day
# 确定小时 和 分钟
hour = int(hour_minute[:hour_minute.index(':')])
try:
# 进一天
if hour == 24:
hour = 0
target_time = datetime.datetime(year, month, day, hour)
target_time = target_time + timedelta(days=1)
else:
target_time = datetime.datetime(year, month, day, hour)
except ValueError:
# 识别不出来就认为是错误,后续不再识别
time_str = data
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [None])
line = first_line + placeholder + sec_line
continue
assert target_time is not None, '逻辑错误'
time_str = '“' + time_to_str(target_time) + "”"
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [target_time])
line = handle_line + placeholder + sec_line
# 数字
date_all = re.findall(r"(\d{1,2}点)", line)
for data in date_all:
index = line.index(data)
first_line = line[0: index]
sec_line = line[index + len(data):]
hour_minute = data.replace('点', ':')
# 确定年月日
handle_line, year, month, day = day_relative_comfirm(first_line, placeholders_list)
if year is None:
handle_line, year, month, day = day_absolute_comfirm(first_line, placeholders_list)
year = now.year if year is None else year
month = now.month if month is None else month
day = now.day if day is None else day
# 确定小时 和 分钟
hour = int(hour_minute[:hour_minute.index(':')])
try:
# 进一天
if hour == 24:
hour = 0
target_time = datetime.datetime(year, month, day, hour)
target_time = target_time + timedelta(days=1)
else:
target_time = datetime.datetime(year, month, day, hour)
except ValueError:
# 识别不出来就认为是错误,后续不再识别
time_str = data
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [None])
line = first_line + placeholder + sec_line
continue
assert target_time is not None, '逻辑错误'
time_str = '“' + time_to_str(target_time) + "”"
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [target_time])
line = handle_line + placeholder + sec_line
# 中文 上午
date_all_morning_1 = re.findall(r"(上午[二]?[十]?[零两一二三四五六七八九十]点)", line)
date_all_morning_2 = re.findall(r"(早上[二]?[十]?[零两一二三四五六七八九十]点)", line)
date_all_morning_3 = re.findall(r"(早[二]?[十]?[零两一二三四五六七八九十]点)", line)
for data in date_all_morning_1 + date_all_morning_2 + date_all_morning_3:
index = line.index(data)
first_line = line[0: index]
sec_line = line[index + len(data):]
hour_minute = re.sub(r'(上午)', '', data)
hour_minute = re.sub(r'(早上)', '', hour_minute)
hour_minute = re.sub(r'(早)', '', hour_minute)
hour_minute = re.sub(r'(分)', '', hour_minute)
hour_minute = re.sub(r'(钟)', '', hour_minute)
hour_minute = hour_minute.replace('点', ':')
# 确定年月日
handle_line, year, month, day = day_relative_comfirm(first_line, placeholders_list)
if year is None:
handle_line, year, month, day = day_absolute_comfirm(first_line, placeholders_list)
year = now.year if year is None else year
month = now.month if month is None else month
day = now.day if day is None else day
# 确定小时 和 分钟
hour = hour_minute[:hour_minute.index(':')]
# 解析小时
if len(hour) > 1:
if '十' not in hour:
hour = 25
else:
pre_hour = hour[0:hour.index('十')]
post_hour = hour[hour.index('十') + 1:]
# 10
if len(pre_hour) == 0 and len(post_hour) == 0:
hour = 10
# 11 ~ 19
elif len(pre_hour) == 0 and len(post_hour) != 0:
hour = 10 + int(time_map[post_hour])
# 20, 30
elif len(pre_hour) != 0 and len(post_hour) == 0:
hour = int(time_map[pre_hour]) * 10
else:
# 21 ~ 29
hour = int(time_map[pre_hour]) * 10 + int(time_map[post_hour])
else:
hour = int(time_map[hour])
try:
# 进一天
if hour == 24:
target_time = datetime.datetime(year, month, day, 0)
target_time = target_time + timedelta(days=1)
else:
target_time = datetime.datetime(year, month, day, hour)
except ValueError:
# 识别不出来就认为是错误,后续不再识别
time_str = data
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [None])
line = first_line + placeholder + sec_line
continue
assert target_time is not None, '逻辑错误'
time_str = '“' + time_to_str(target_time) + "”"
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [target_time])
line = handle_line + placeholder + sec_line
# 中文 下午
date_all_afternoon = re.findall(r"(下午[二]?[十]?[两一二三四五六七八九十]点)", line)
date_all_nightfall = re.findall(r"([傍]?晚[二]?[十]?[两一二三四五六七八九十]点)", line)
date_all_night = re.findall(r"(晚上[二]?[十]?[两一二三四五六七八九十]点)", line)
for data in date_all_afternoon + date_all_nightfall + date_all_night:
index = line.index(data)
first_line = line[0: index]
sec_line = line[index + len(data):]
hour_minute = re.sub(r'(下午)', '', data)
hour_minute = re.sub(r'(晚上)', '', hour_minute)
hour_minute = re.sub(r'([傍]?晚)', '', hour_minute)
hour_minute = re.sub(r'(分)', '', hour_minute)
hour_minute = re.sub(r'(钟)', '', hour_minute)
hour_minute = hour_minute.replace('点', ':')
# 确定年月日
handle_line, year, month, day = day_relative_comfirm(first_line, placeholders_list)
if year is None:
handle_line, year, month, day = day_absolute_comfirm(first_line, placeholders_list)
year = now.year if year is None else year
month = now.month if month is None else month
day = now.day if day is None else day
# 确定小时 和 分钟
hour = hour_minute[:hour_minute.index(':')]
# 解析小时
if len(hour) > 1:
if '十' not in hour:
hour = 25
else:
pre_hour = hour[0:hour.index('十')]
post_hour = hour[hour.index('十') + 1:]
# 10
if len(pre_hour) == 0 and len(post_hour) == 0:
hour = 10
# 11 ~ 19
elif len(pre_hour) == 0 and len(post_hour) != 0:
hour = 10 + int(time_map[post_hour])
# 20, 30
elif len(pre_hour) != 0 and len(post_hour) == 0:
hour = int(time_map[pre_hour]) * 10
else:
# 21 ~ 29
hour = int(time_map[pre_hour]) * 10 + int(time_map[post_hour])
else:
hour = int(time_map[hour])
try:
# 进一天
if hour == 24:
target_time = datetime.datetime(year, month, day, 0)
target_time = target_time + timedelta(days=1)
else:
target_time = datetime.datetime(year, month, day, hour)
# 加时间
if hour <= 12:
target_time = target_time + timedelta(hours=12)
except ValueError:
# 识别不出来就认为是错误,后续不再识别
time_str = data
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [None])
line = first_line + placeholder + sec_line
continue
assert target_time is not None, '逻辑错误'
time_str = '“' + time_to_str(target_time) + "”"
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [target_time])
line = handle_line + placeholder + sec_line
# 中文 中午
date_all_1 = re.findall(r"(中午十[一二三]点)", line)
date_all_2 = re.findall(r"(正午十[一二三]点)", line)
for data in date_all_1 + date_all_2:
index = line.index(data)
first_line = line[0: index]
sec_line = line[index + len(data):]
hour_minute = re.sub(r'(中午)', '', data)
hour_minute = re.sub(r'(正午)', '', hour_minute)
hour_minute = re.sub(r'(分)', '', hour_minute)
hour_minute = re.sub(r'(钟)', '', hour_minute)
hour_minute = hour_minute.replace('点', ':')
# 确定年月日
handle_line, year, month, day = day_relative_comfirm(first_line, placeholders_list)
if year is None:
handle_line, year, month, day = day_absolute_comfirm(first_line, placeholders_list)
year = now.year if year is None else year
month = now.month if month is None else month
day = now.day if day is None else day
# 确定小时 和 分钟
hour = hour_minute[:hour_minute.index(':')]
# 解析小时
if len(hour) > 1:
if '十' not in hour:
hour = 25
else:
pre_hour = hour[0:hour.index('十')]
post_hour = hour[hour.index('十') + 1:]
# 10
if len(pre_hour) == 0 and len(post_hour) == 0:
hour = 10
# 11 ~ 19
elif len(pre_hour) == 0 and len(post_hour) != 0:
hour = 10 + int(time_map[post_hour])
# 20, 30
elif len(pre_hour) != 0 and len(post_hour) == 0:
hour = int(time_map[pre_hour]) * 10
else:
# 21 ~ 29
hour = int(time_map[pre_hour]) * 10 + int(time_map[post_hour])
else:
hour = int(time_map[hour])
try:
# 进一天
if hour == 24:
target_time = datetime.datetime(year, month, day, 0)
target_time = target_time + timedelta(days=1)
else:
target_time = datetime.datetime(year, month, day, hour)
except ValueError:
# 识别不出来就认为是错误,后续不再识别
time_str = data
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [None])
line = first_line + placeholder + sec_line
continue
assert target_time is not None, '逻辑错误'
time_str = '“' + time_to_str(target_time) + "”"
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [target_time])
line = handle_line + placeholder + sec_line
# 中文 中午
date_all_1 = re.findall(r"(中午一点)", line)
date_all_2 = re.findall(r"(正午一点)", line)
for data in date_all_1 + date_all_2:
index = line.index(data)
first_line = line[0: index]
sec_line = line[index + len(data):]
# 确定年月日
handle_line, year, month, day = day_relative_comfirm(first_line, placeholders_list)
if year is None:
handle_line, year, month, day = day_absolute_comfirm(first_line, placeholders_list)
year = now.year if year is None else year
month = now.month if month is None else month
day = now.day if day is None else day
hour = 13
try:
# 进一天
if hour == 24:
target_time = datetime.datetime(year, month, day, 0)
target_time = target_time + timedelta(days=1)
else:
target_time = datetime.datetime(year, month, day, hour)
except ValueError:
# 识别不出来就认为是错误,后续不再识别
time_str = data
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [None])
line = first_line + placeholder + sec_line
continue
assert target_time is not None, '逻辑错误'
time_str = '“' + time_to_str(target_time) + "”"
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [target_time])
line = handle_line + placeholder + sec_line
# 中文 凌晨
date_all_early = re.findall(r"(凌晨[二]?[十]?[一两二三四五六七八九十]点)", line)
for data in date_all_early:
index = line.index(data)
first_line = line[0: index]
sec_line = line[index + len(data):]
hour_minute = re.sub(r'(凌晨)', '', data)
hour_minute = re.sub(r'(分)', '', hour_minute)
hour_minute = re.sub(r'(钟)', '', hour_minute)
hour_minute = hour_minute.replace('点', ':')
# 确定年月日
handle_line, year, month, day = day_relative_comfirm(first_line, placeholders_list)
if year is None:
handle_line, year, month, day = day_absolute_comfirm(first_line, placeholders_list)
year = now.year if year is None else year
month = now.month if month is None else month
day = now.day if day is None else day
# 确定小时 和 分钟
hour = int(time_map[hour_minute[:hour_minute.index(':')]])
try:
# 进一天
if hour == 24:
target_time = datetime.datetime(year, month, day, 0)
target_time = target_time + timedelta(days=1)
else:
target_time = datetime.datetime(year, month, day, hour)
except ValueError:
# 识别不出来就认为是错误,后续不再识别
time_str = data
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [None])
line = first_line + placeholder + sec_line
continue
assert target_time is not None, '逻辑错误'
time_str = '“' + time_to_str(target_time) + "”"
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [target_time])
line = handle_line + placeholder + sec_line
# 数字
date_all = re.findall(r"([二]?[十]?[两一二三四五六七八九十]点)", line)
for data in date_all:
index = line.index(data)
first_line = line[0: index]
sec_line = line[index + len(data):]
hour_minute = re.sub(r'(分)', '', data)
hour_minute = re.sub(r'(钟)', '', hour_minute)
hour_minute = hour_minute.replace('点', ':')
# 确定年月日
handle_line, year, month, day = day_relative_comfirm(first_line, placeholders_list)
if year is None:
handle_line, year, month, day = day_absolute_comfirm(first_line, placeholders_list)
year = now.year if year is None else year
month = now.month if month is None else month
day = now.day if day is None else day
# 确定小时 和 分钟
hour = hour_minute[:hour_minute.index(':')]
# 解析小时
if len(hour) > 1:
if '十' not in hour:
hour = 25
else:
pre_hour = hour[0:hour.index('十')]
post_hour = hour[hour.index('十') + 1:]
# 10
if len(pre_hour) == 0 and len(post_hour) == 0:
hour = 10
# 11 ~ 19
elif len(pre_hour) == 0 and len(post_hour) != 0:
hour = 10 + int(time_map[post_hour])
# 20, 30
elif len(pre_hour) != 0 and len(post_hour) == 0:
hour = int(time_map[pre_hour]) * 10
else:
# 21 ~ 29
hour = int(time_map[pre_hour]) * 10 + int(time_map[post_hour])
else:
hour = int(time_map[hour])
try:
# 进一天
if hour == 24:
target_time = datetime.datetime(year, month, day, 0)
target_time = target_time + timedelta(days=1)
else:
target_time = datetime.datetime(year, month, day, hour)
except ValueError:
# 识别不出来就认为是错误,后续不再识别
time_str = data
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [None])
line = first_line + placeholder + sec_line
continue
assert target_time is not None, '逻辑错误'
time_str = '“' + time_to_str(target_time) + "”"
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [target_time])
line = handle_line + placeholder + sec_line
return line
def hour_relative_recognition(line, placeholders_list: dict):
time_map = {'一': '1', '二': '2', '三': '3', '四': '4',
'五': '5', '六': '6', '七': '7', '八': '8', '九': '9', '两': '2'}
# 半单独拿出来,放到分钟那里
now = datetime.datetime.now()
# 中文前 0-99
date_all = re.findall(r"([二三四五六七八九]?[十]?[零一二三四五六七八九十半][个]?小时前)", line)
for data in date_all:
index = line.index(data)
first_line = line[0: index]
sec_line = line[index + len(data):]
# 计算时差,获取目标时间
hour = re.sub(r"[个]?小时前", '', data)
try:
# 解析小时
if len(hour) > 1:
if '十' not in hour:
raise ValueError
else:
pre_hour = hour[0:hour.index('十')]
post_hour = hour[hour.index('十') + 1:]
# 10
if len(pre_hour) == 0 and len(post_hour) == 0:
hour = 10
# 11 ~ 19
elif len(pre_hour) == 0 and len(post_hour) != 0:
hour = 10 + int(time_map[post_hour])
# 20, 30
elif len(pre_hour) != 0 and len(post_hour) == 0:
hour = int(time_map[pre_hour]) * 10
else:
# 21 ~ 29
hour = int(time_map[pre_hour]) * 10 + int(time_map[post_hour])
else:
hour = int(time_map[hour])
except ValueError:
# 识别不出来就认为是错误,后续不再识别
time_str = data
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [None])
line = first_line + placeholder + sec_line
continue
target_time = now - timedelta(hours=hour)
time_str = '“' + time_to_str(target_time) + "”"
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [target_time])
line = first_line + placeholder + sec_line
# 数字前
date_all = re.findall(r"(\d{1,2}[个]?小时前)", line)
for data in date_all:
index = line.index(data)
first_line = line[0: index]
sec_line = line[index + len(data):]
# 计算时差,获取目标时间
data = re.sub(r"[个]?小时前", '', data)
hours_delta = int(data)
target_time = now - timedelta(hours=hours_delta)
time_str = '“' + time_to_str(target_time) + "”"
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [target_time])
line = first_line + placeholder + sec_line
# 中文后 10-99
date_all = re.findall(r"([二三四五六七八九]?[十]?[零一二三四五六七八九十半][个]?小时后)", line)
for data in date_all:
index = line.index(data)
first_line = line[0: index]
sec_line = line[index + len(data):]
# 计算时差,获取目标时间
data = re.sub(r"[个]?小时后", '', data)
hour = data
try:
# 解析小时
if len(hour) > 1:
if '十' not in hour:
raise ValueError
else:
pre_hour = hour[0:hour.index('十')]
post_hour = hour[hour.index('十') + 1:]
# 10
if len(pre_hour) == 0 and len(post_hour) == 0:
hour = 10
# 11 ~ 19
elif len(pre_hour) == 0 and len(post_hour) != 0:
hour = 10 + int(time_map[post_hour])
# 20, 30
elif len(pre_hour) != 0 and len(post_hour) == 0:
hour = int(time_map[pre_hour]) * 10
else:
# 21 ~ 29
hour = int(time_map[pre_hour]) * 10 + int(time_map[post_hour])
else:
hour = int(time_map[hour])
except ValueError:
# 识别不出来就认为是错误,后续不再识别
time_str = data
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [None])
line = first_line + placeholder + sec_line
continue
target_time = now + timedelta(hours=hour)
time_str = '“' + time_to_str(target_time) + "”"
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [target_time])
line = first_line + placeholder + sec_line
# 数字后
date_all = re.findall(r"(\d{1,2}[个]?小时后)", line)
for data in date_all:
index = line.index(data)
first_line = line[0: index]
sec_line = line[index + len(data):]
# 计算时差,获取目标时间
data = re.sub(r"[个]?小时后", '', data)
hours_delta = int(data)
target_time = now + timedelta(hours=hours_delta)
time_str = '“' + time_to_str(target_time) + "”"
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [target_time])
line = first_line + placeholder + sec_line
# 半 前
date_all = re.findall(r"(半[个]?小时前)", line)
for data in date_all:
index = line.index(data)
first_line = line[0: index]
sec_line = line[index + len(data):]
# 计算时差,获取目标时间
minute_delta = 30
target_time = now - timedelta(minutes=minute_delta)
time_str = '“' + time_to_str(target_time) + "”"
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [target_time])
line = first_line + placeholder + sec_line
# 半 后
date_all = re.findall(r"(半[个]?小时后)", line)
for data in date_all:
index = line.index(data)
first_line = line[0: index]
sec_line = line[index + len(data):]
# 计算时差,获取目标时间
minute_delta = 30
target_time = now + timedelta(minutes=minute_delta)
time_str = '“' + time_to_str(target_time) + "”"
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [target_time])
line = first_line + placeholder + sec_line
return line
def hour_absolute_comfirm(line, placeholders_list: dict):
time_map = {'零': '0', '一': '1', '二': '2', '三': '3', '四': '4',
'五': '5', '六': '6', '七': '7', '八': '8', '九': '9', '十': '10', '两': '2'}
connect_char = ['-', '~', '到', '至']
now = date.today()
# 数字 上午
date_all_morning_1 = re.findall(r"(上午\d{1,2}[点:])", line)
date_all_morning_2 = re.findall(r"(早上\d{1,2}[点:])", line)
for data in date_all_morning_1 + date_all_morning_2:
index = line.rindex(data)
if index + len(data) == len(line):
line = line[:index]
hour = re.sub(r'(上午)', '', data)
hour = re.sub(r'(早上)', '', hour)
hour = re.sub(r'([点:])', '', hour)
# 确定年月日
line, year, month, day = day_relative_comfirm(line, placeholders_list)
if year is None:
line, year, month, day = day_absolute_comfirm(line, placeholders_list)
year = now.year if year is None else year
month = now.month if month is None else month
day = now.day if day is None else day
hour = int(hour)
return line, year, month, day, hour
# 数字 下午, 傍晚,晚上
date_all_afternoon = re.findall(r"(下午\d{1,2}[点:])", line)
date_all_nightfall = re.findall(r"(傍晚\d{1,2}[点:])", line)
date_all_night = re.findall(r"(晚上\d{1,2}[点:])", line)
for data in date_all_afternoon + date_all_nightfall + date_all_night:
index = line.rindex(data)
if index + len(data) == len(line):
line = line[:index]
hour = re.sub(r'(下午)', '', data)
hour = re.sub(r'(傍晚)', '', hour)
hour = re.sub(r'(晚上)', '', hour)
hour = re.sub(r'([点:])', '', hour)
# 确定年月日
line, year, month, day = day_relative_comfirm(line, placeholders_list)
if year is None:
line, year, month, day = day_absolute_comfirm(line, placeholders_list)
year = now.year if year is None else year
month = now.month if month is None else month
day = now.day if day is None else day
# 确定小时 和 分钟
hour = int(hour)
if hour <= 12:
hour += 12
return line, year, month, day, hour
# 数字 中午
date_all_1 = re.findall(r"(中午[01]?[123][点:])", line)
date_all_2 = re.findall(r"(正午[01]?[123][点:])", line)
for data in date_all_1 + date_all_2:
index = line.rindex(data)
if index + len(data) == len(line):
line = line[:index]
hour = re.sub(r'(中午)', '', data)
hour = re.sub(r'(正午)', '', hour)
hour = re.sub(r'([点:])', '', hour)
# 确定年月日
line, year, month, day = day_relative_comfirm(line, placeholders_list)
if year is None:
line, year, month, day = day_absolute_comfirm(line, placeholders_list)
year = now.year if year is None else year
month = now.month if month is None else month
day = now.day if day is None else day
# 确定小时 和 分钟
hour = int(hour)
return line, year, month, day, hour
# 数字 凌晨
date_all_early = re.findall(r"(凌晨[0]?[123456][点:])", line)
for data in date_all_early:
index = line.rindex(data)
if index + len(data) == len(line):
line = line[:index]
hour = re.sub(r'(凌晨)', '', data)
hour = re.sub(r'([点:])', '', hour)
# 确定年月日
line, year, month, day = day_relative_comfirm(line, placeholders_list)
if year is None:
line, year, month, day = day_absolute_comfirm(line, placeholders_list)
year = now.year if year is None else year
month = now.month if month is None else month
day = now.day if day is None else day
# 确定小时 和 分钟
hour = int(hour)
return line, year, month, day, hour
# 数字
date_all = re.findall(r"(\d{1,2}[点:])", line)
for data in date_all:
index = line.rindex(data)
if index + len(data) == len(line):
line = line[:index]
hour = re.sub(r'([点:])', '', data)
# 确定年月日
line, year, month, day = day_relative_comfirm(line, placeholders_list)
if year is None:
line, year, month, day = day_absolute_comfirm(line, placeholders_list)
year = now.year if year is None else year
month = now.month if month is None else month
day = now.day if day is None else day
# 确定小时 和 分钟
hour = int(hour)
return line, year, month, day, hour
# 中文 上午
date_all_morning_1 = re.findall(r"(上午[二]?[十]?[零两一二三四五六七八九十][点:])", line)
date_all_morning_2 = re.findall(r"(早上[二]?[十]?[零两一二三四五六七八九十][点:])", line)
for data in date_all_morning_1 + date_all_morning_2:
index = line.rindex(data)
if index + len(data) == len(line):
line = line[:index]
hour = re.sub(r'(上午)', '', data)
hour = re.sub(r'(早上)', '', hour)
hour = re.sub(r'(分)', '', hour)
hour = re.sub(r'(钟)', '', hour)
hour = re.sub(r'([点:])', '', hour)
# 确定年月日
line, year, month, day = day_relative_comfirm(line, placeholders_list)
if year is None:
line, year, month, day = day_absolute_comfirm(line, placeholders_list)
year = now.year if year is None else year
month = now.month if month is None else month
day = now.day if day is None else day
# 解析小时
if len(hour) > 1:
if '十' not in hour:
hour = 25
else:
pre_hour = hour[0:hour.index('十')]
post_hour = hour[hour.index('十') + 1:]
# 10
if len(pre_hour) == 0 and len(post_hour) == 0:
hour = 10
# 11 ~ 19
elif len(pre_hour) == 0 and len(post_hour) != 0:
hour = 10 + int(time_map[post_hour])
# 20, 30
elif len(pre_hour) != 0 and len(post_hour) == 0:
hour = int(time_map[pre_hour]) * 10
else:
# 21 ~ 29
hour = int(time_map[pre_hour]) * 10 + int(time_map[post_hour])
else:
hour = int(time_map[hour])
return line, year, month, day, hour
# 中文 下午
date_all_afternoon = re.findall(r"(下午[二]?[十]?[两一二三四五六七八九十][点:])", line)
date_all_nightfall = re.findall(r"(傍晚[二]?[十]?[两一二三四五六七八九十][点:])", line)
date_all_night = re.findall(r"(晚上[二]?[十]?[两一二三四五六七八九十][点:])", line)
for data in date_all_afternoon + date_all_nightfall + date_all_night:
index = line.rindex(data)
if index + len(data) == len(line):
line = line[:index]
hour = re.sub(r'(下午)', '', data)
hour = re.sub(r'(傍晚)', '', hour)
hour = re.sub(r'(晚上)', '', hour)
hour = re.sub(r'(分)', '', hour)
hour = re.sub(r'(钟)', '', hour)
hour = re.sub(r'([点:])', '', hour)
# 确定年月日
line, year, month, day = day_relative_comfirm(line, placeholders_list)
if year is None:
line, year, month, day = day_absolute_comfirm(line, placeholders_list)
year = now.year if year is None else year
month = now.month if month is None else month
day = now.day if day is None else day
# 解析小时
if len(hour) > 1:
if '十' not in hour:
hour = 25
else:
pre_hour = hour[0:hour.index('十')]
post_hour = hour[hour.index('十') + 1:]
# 10
if len(pre_hour) == 0 and len(post_hour) == 0:
hour = 10
# 11 ~ 19
elif len(pre_hour) == 0 and len(post_hour) != 0:
hour = 10 + int(time_map[post_hour])
# 20, 30
elif len(pre_hour) != 0 and len(post_hour) == 0:
hour = int(time_map[pre_hour]) * 10
else:
# 21 ~ 29
hour = int(time_map[pre_hour]) * 10 + int(time_map[post_hour])
else:
hour = int(time_map[hour])
if hour <= 12:
hour += 12
return line, year, month, day, hour
# 中文 中午
date_all_1 = re.findall(r"(中午十[一二三][点:])", line)
date_all_2 = re.findall(r"(正午十[一二三][点:])", line)
for data in date_all_1 + date_all_2:
index = line.rindex(data)
if index + len(data) == len(line):
line = line[:index]
hour = re.sub(r'(中午)', '', data)
hour = re.sub(r'(正午)', '', hour)
hour = re.sub(r'(分)', '', hour)
hour = re.sub(r'(钟)', '', hour)
hour = re.sub(r'([点:])', '', hour)
# 确定年月日
line, year, month, day = day_relative_comfirm(line, placeholders_list)
if year is None:
line, year, month, day = day_absolute_comfirm(line, placeholders_list)
year = now.year if year is None else year
month = now.month if month is None else month
day = now.day if day is None else day
# 解析小时
if len(hour) > 1:
if '十' not in hour:
hour = 25
else:
pre_hour = hour[0:hour.index('十')]
post_hour = hour[hour.index('十') + 1:]
# 10
if len(pre_hour) == 0 and len(post_hour) == 0:
hour = 10
# 11 ~ 19
elif len(pre_hour) == 0 and len(post_hour) != 0:
hour = 10 + int(time_map[post_hour])
# 20, 30
elif len(pre_hour) != 0 and len(post_hour) == 0:
hour = int(time_map[pre_hour]) * 10
else:
# 21 ~ 29
hour = int(time_map[pre_hour]) * 10 + int(time_map[post_hour])
else:
hour = int(time_map[hour])
return line, year, month, day, hour
# 中文 中午
date_all_1 = re.findall(r"(中午一[点:])", line)
date_all_2 = re.findall(r"(正午一[点:])", line)
for data in date_all_1 + date_all_2:
index = line.rindex(data)
if index + len(data) == len(line):
line = line[:index]
# 确定年月日
line, year, month, day = day_relative_comfirm(line, placeholders_list)
if year is None:
line, year, month, day = day_absolute_comfirm(line, placeholders_list)
year = now.year if year is None else year
month = now.month if month is None else month
day = now.day if day is None else day
hour = 13
return line, year, month, day, hour
# 中文 凌晨
date_all_early = re.findall(r"(凌晨[二]?[十]?[一两二三四五六七八九十][点:])", line)
for data in date_all_early:
index = line.rindex(data)
if index + len(data) == len(line):
line = line[:index]
hour = re.sub(r'(凌晨)', '', data)
hour = re.sub(r'(分)', '', hour)
hour = re.sub(r'(钟)', '', hour)
hour = re.sub(r'([点:])', '', hour)
# 确定年月日
line, year, month, day = day_relative_comfirm(line, placeholders_list)
if year is None:
line, year, month, day = day_absolute_comfirm(line, placeholders_list)
year = now.year if year is None else year
month = now.month if month is None else month
day = now.day if day is None else day
# 解析小时
if len(hour) > 1:
if '十' not in hour:
hour = 25
else:
pre_hour = hour[0:hour.index('十')]
post_hour = hour[hour.index('十') + 1:]
# 10
if len(pre_hour) == 0 and len(post_hour) == 0:
hour = 10
# 11 ~ 19
elif len(pre_hour) == 0 and len(post_hour) != 0:
hour = 10 + int(time_map[post_hour])
# 20, 30
elif len(pre_hour) != 0 and len(post_hour) == 0:
hour = int(time_map[pre_hour]) * 10
else:
# 21 ~ 29
hour = int(time_map[pre_hour]) * 10 + int(time_map[post_hour])
else:
hour = int(time_map[hour])
return line, year, month, day, hour
# 数字
date_all = re.findall(r"([二]?[十]?[两一二三四五六七八九十][点:])", line)
for data in date_all:
index = line.rindex(data)
if index + len(data) == len(line):
line = line[:index]
hour = re.sub(r'(分)', '', data)
hour = re.sub(r'(钟)', '', hour)
hour = re.sub(r'([点:])', '', hour)
# 确定年月日
line, year, month, day = day_relative_comfirm(line, placeholders_list)
if year is None:
line, year, month, day = day_absolute_comfirm(line, placeholders_list)
year = now.year if year is None else year
month = now.month if month is None else month
day = now.day if day is None else day
# 解析小时
if len(hour) > 1:
if '十' not in hour:
hour = 25
else:
pre_hour = hour[0:hour.index('十')]
post_hour = hour[hour.index('十') + 1:]
# 10
if len(pre_hour) == 0 and len(post_hour) == 0:
hour = 10
# 11 ~ 19
elif len(pre_hour) == 0 and len(post_hour) != 0:
hour = 10 + int(time_map[post_hour])
# 20, 30
elif len(pre_hour) != 0 and len(post_hour) == 0:
hour = int(time_map[pre_hour]) * 10
else:
# 21 ~ 29
hour = int(time_map[pre_hour]) * 10 + int(time_map[post_hour])
else:
hour = int(time_map[hour])
return line, year, month, day, hour
if len(line) >= 16 and line[-1] in connect_char:
holder = line[-16:-1]
if holder in placeholders_list.keys():
_, time_list = placeholders_list[holder]
if len(time_list) == 1 and time_list[0] is not None:
return line[:-1] + '到', time_list[0].year, time_list[0].month, time_list[0].day, time_list[0].hour
return line, None, None, None, None
```
#### File: Text2JSON/entity_named_recog/minute_entity_recognition.py
```python
import sys
from os.path import dirname, abspath
path = dirname(dirname(dirname(abspath(__file__))))
sys.path.append(path)
import datetime
import re
from datetime import timedelta
from Text2JSON.entity_named_recog.hour_entity_recognition import hour_absolute_comfirm
from Text2JSON.entity_named_recog.entity_utils import *
def minute_recon(line, placeholders_list: dict):
line = minute_relative_recognition(line, placeholders_list)
line = minute_absolute_recognition(line, placeholders_list)
return line
def minute_absolute_recognition(line, placeholders_list: dict):
now = datetime.datetime.now()
time_map = {'零': '0', '一': '1', '二': '2', '三': '3', '四': '4',
'五': '5', '六': '6', '七': '7', '八': '8', '九': '9', '十': '10', '两': '2', '半':'30'}
# 数字 分钟
date_all = re.findall(r"([:点]\d{1,2}[分]?[钟]?)", line)
for data in date_all:
data = data[1:]
index = line.index(data)
first_line = line[0: index]
sec_line = line[index + len(data):]
minute = re.sub(r'([分]?[钟]?)', '', data)
# 确定年月日
handle_line, year, month, day, hour = hour_absolute_comfirm(first_line, placeholders_list)
year = now.year if year is None else year
month = now.month if month is None else month
day = now.day if day is None else day
hour = now.hour if hour is None else hour
# 确定分钟
minute = int(minute)
try:
if minute > 60:
raise ValueError
if hour > 24:
raise ValueError
target_time = datetime.datetime(year, month, day, 0, 0)
target_time = target_time + timedelta(minutes=minute)
target_time = target_time + timedelta(hours=hour)
except ValueError:
# 识别不出来就认为是错误,后续不再识别
time_str = data
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [None])
line = first_line + placeholder + sec_line
continue
assert target_time is not None, '逻辑错误'
time_str = '“' + time_to_str(target_time) + "”"
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [target_time])
line = handle_line + placeholder + sec_line
# 数字 分钟
date_all = re.findall(r"([:点][二三四五六七八九]?[十]?[零一二三四五六七八九十半][分]?[钟]?)", line)
for data in date_all:
data = data[1:]
index = line.index(data)
first_line = line[0: index]
sec_line = line[index + len(data):]
minute = re.sub(r'([分]?[钟]?)', '', data)
# 确定年月日
handle_line, year, month, day, hour = hour_absolute_comfirm(first_line, placeholders_list)
year = now.year if year is None else year
month = now.month if month is None else month
day = now.day if day is None else day
hour = now.hour if hour is None else hour
# 解析分钟
if len(minute) > 1:
if '十' not in minute:
# 错误处理
minute = 61
else:
pre_minute = minute[0:minute.index('十')]
post_minute = minute[minute.index('十') + 1:]
# 10
if len(pre_minute) == 0 and len(post_minute) == 0:
minute = 10
# 11 ~ 19
elif len(pre_minute) == 0 and len(post_minute) != 0:
minute = 10 + int(time_map[post_minute])
# 20, 30
elif len(pre_minute) != 0 and len(post_minute) == 0:
minute = int(time_map[pre_minute]) * 10
else:
# 21 ~ 24
minute = int(time_map[pre_minute]) * 10 + int(time_map[post_minute])
else:
minute = int(time_map[minute])
# 确定分钟
try:
if minute > 60:
raise ValueError
if hour > 24:
raise ValueError
target_time = datetime.datetime(year, month, day, 0, 0)
target_time = target_time + timedelta(minutes=minute)
target_time = target_time + timedelta(hours=hour)
except ValueError:
# 识别不出来就认为是错误,后续不再识别
time_str = data
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [None])
line = first_line + placeholder + sec_line
continue
assert target_time is not None, '逻辑错误'
time_str = '“' + time_to_str(target_time) + "”"
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [target_time])
line = handle_line + placeholder + sec_line
return line
def minute_relative_recognition(line, placeholders_list: dict):
time_map = {'一': '1', '二': '2', '三': '3', '四': '4',
'五': '5', '六': '6', '七': '7', '八': '8', '九': '9', '两': '2', '十': '10'}
# 半单独拿出来,放到分钟那里
now = datetime.datetime.now()
# 中文前 1-99
date_all = re.findall(r"([二三四五六七八九]?[十]?[一两二三四五六七八九十]分钟前)", line)
for data in date_all:
index = line.index(data)
first_line = line[0: index]
sec_line = line[index + len(data):]
# 计算时差,获取目标时间
minute = re.sub(r"分钟前", '', data)
try:
# 解析分钟
if len(minute) > 1:
if '十' not in minute:
# 错误处理
raise ValueError
else:
pre_minute = minute[0:minute.index('十')]
post_minute = minute[minute.index('十') + 1:]
# 10
if len(pre_minute) == 0 and len(post_minute) == 0:
minute = 10
# 11 ~ 19
elif len(pre_minute) == 0 and len(post_minute) != 0:
minute = 10 + int(time_map[post_minute])
# 20, 30
elif len(pre_minute) != 0 and len(post_minute) == 0:
minute = int(time_map[pre_minute]) * 10
else:
# 21 ~ 24
minute = int(time_map[pre_minute]) * 10 + int(time_map[post_minute])
else:
minute = int(time_map[minute])
except ValueError:
# 识别不出来就认为是错误,后续不再识别
time_str = data
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [None])
line = first_line + placeholder + sec_line
continue
target_time = now - timedelta(minutes=minute)
time_str = '“' + time_to_str(target_time) + "”"
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [target_time])
line = first_line + placeholder + sec_line
# 数字前
date_all = re.findall(r"(\d{1,2}分钟前)", line)
for data in date_all:
index = line.index(data)
first_line = line[0: index]
sec_line = line[index + len(data):]
# 计算时差,获取目标时间
data = re.sub(r"分钟前", '', data)
minute_delta = int(data)
target_time = now - timedelta(minutes=minute_delta)
time_str = '“' + time_to_str(target_time) + "”"
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [target_time])
line = first_line + placeholder + sec_line
# 中文后 10-99
date_all = re.findall(r"([二三四五六七八九]?[十]?[一两二三四五六七八九十]分钟后)", line)
for data in date_all:
index = line.index(data)
first_line = line[0: index]
sec_line = line[index + len(data):]
# 计算时差,获取目标时间
minute = re.sub(r"分钟后", '', data)
try:
# 解析分钟
if len(minute) > 1:
if '十' not in minute:
raise ValueError
else:
pre_minute = minute[0:minute.index('十')]
post_minute = minute[minute.index('十') + 1:]
# 10
if len(pre_minute) == 0 and len(post_minute) == 0:
minute = 10
# 11 ~ 19
elif len(pre_minute) == 0 and len(post_minute) != 0:
minute = 10 + int(time_map[post_minute])
# 20, 30
elif len(pre_minute) != 0 and len(post_minute) == 0:
minute = int(time_map[pre_minute]) * 10
else:
# 21 ~ 24
minute = int(time_map[pre_minute]) * 10 + int(time_map[post_minute])
else:
minute = int(time_map[minute])
except ValueError:
# 识别不出来就认为是错误,后续不再识别
time_str = data
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [None])
line = first_line + placeholder + sec_line
continue
target_time = now + timedelta(minutes=minute)
time_str = '“' + time_to_str(target_time) + "”"
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [target_time])
line = first_line + placeholder + sec_line
# 数字后
date_all = re.findall(r"(\d{1,2}分钟后)", line)
for data in date_all:
index = line.index(data)
first_line = line[0: index]
sec_line = line[index + len(data):]
# 计算时差,获取目标时间
data = re.sub(r"分钟后", '', data)
minute_delta = int(data)
target_time = now + timedelta(minutes=minute_delta)
time_str = '“' + time_to_str(target_time) + "”"
placeholder = random_str()
placeholders_list[placeholder] = (time_str, [target_time])
line = first_line + placeholder + sec_line
return line
```
#### File: Text2JSON/featurizer/schema_featurizer.py
```python
import sys
from os.path import dirname, abspath
path = dirname(dirname(dirname(abspath(__file__))))
sys.path.append(path)
import numpy as np
import torch.utils.data as torch_data
from Text2JSON.featurizer.schema_parser import schema_parsed
from Text2JSON.featurizer.Input_SQL import SQLExample
from collections import defaultdict
stats = defaultdict(int)
class SchemaInputFeature(object):
def __init__(self, qid, question, schema_id, relative_schema_id):
self.qid = qid
self.question = question
self.schema_id = schema_id
self.relative_schema_id = relative_schema_id
self.tokens = []
# 用于训练和测试
self.input_ids = []
self.input_mask = []
self.segment_ids = []
self.schema_rel = []
self.schema_mask = []
# 用于测试
self.schema_sec_rel = []
def to_dict(self):
return {
'qid': self.qid,
'question': self.question,
'schema_id': self.schema_id,
'relative_schema_id': self.relative_schema_id,
'tokens': self.tokens,
'input_ids': self.input_ids,
'input_mask': self.input_mask,
'segment_ids': self.segment_ids,
'table_rel': self.schema_rel,
'mask': self.schema_mask
}
class HydraSchemaFeaturizer(object):
"""
特征提取器,将一条训练数据集,转变成为InputFeature
"""
def __init__(self, config, tokenizer):
self.config = config
# 根据不同模型,设置tokenizer
self.tokenizer = tokenizer
def get_input_schema_feature(self, example: SQLExample, schema: schema_parsed, config, positive=True, negative=True,
note=False):
max_query_length = int(config["max_query_length"])
max_column_length = int(config["max_column_length"])
max_total_length = max_column_length + max_query_length
# 一条feature包含一条train,包含多个<>column<>question<>编码
input_feature = SchemaInputFeature(
example.qid,
example.question,
example.table_id,
example.table_id_relative,
)
# 记录映射关系
tokens = []
word_to_subword_base = [] # 词 到 子词 的映射关系,形式(start_index, end_index)
subword_to_word_base = [] # 子词 到 词 的映射关系,形式(0,1,1,2,2,2)
for i, query_token in enumerate(example.tokens):
sub_tokens = self.tokenizer.tokenize(query_token)
cur_pos = len(tokens)
if len(sub_tokens) > 0:
word_to_subword_base += [(cur_pos, cur_pos + len(sub_tokens))]
tokens.extend(sub_tokens)
subword_to_word_base.extend([i] * len(sub_tokens))
# positive 只加载正例, negative 只加载负例, 两个都为true,一起加载
# 遍历表中所有columns,为每一个column生成<>column<>question<>编码
# 这里只要是表相关的都作为正例
rel_table = [example.table_id]
# 是否需要对输入进行拓展
if note:
feature_type = 'feature_schema'
else:
feature_type = 'feature_schema_without_note'
for key, value in schema.extend_schema.items():
# 只要正例
if positive and not negative:
if key not in rel_table:
continue
# 只要负例
elif not positive and negative:
if key in rel_table:
continue
# 正例和负例都要
elif positive and negative:
''
table = schema.extend_schema[key]
for table_column_feature in table[feature_type]:
# 编码,输入为type, table, column, table_note, column_note, space, token
# 输出[cls]type, table, column, table_note, column_note, space[SEP]question[SEP]
assert len(
table_column_feature) <= max_column_length, "{0} URL中参数特征长度过大,请缩小参数参数特征或将配置文件中的" \
"max_column_length增大".format(table['url'])
assert len(''.join(
tokens)) <= max_query_length, '问句: {0} 输入问句长度大于最大字符长度,请将配置文件中的max_query_length' \
'增大或者缩短输入问句长度'.format(example.question)
tokenize_result = self.tokenizer.encode_plus(
table_column_feature,
tokens,
max_length=max_total_length,
truncation_strategy="longest_first",
pad_to_max_length=True,
)
input_ids = tokenize_result["input_ids"] # 将输入tokens, 通过词表转换成id
segment_ids = tokenize_result["token_type_ids"] # 标识column和question位置[0,0,1,1]
input_mask = tokenize_result["attention_mask"] # 标识column+question位置编码,也就是整个输入所占的编码
new_tokens = self.tokenizer.convert_ids_to_tokens(input_ids) # 获取编码后的序列[cls]column[SEP]question[SEP]
assert len(input_ids) == max_total_length
assert len(input_mask) == max_total_length
assert len(segment_ids) == max_total_length
input_feature.tokens.append(new_tokens)
input_feature.input_ids.append(input_ids)
input_feature.input_mask.append(input_mask)
input_feature.segment_ids.append(segment_ids)
return input_feature
def get_predict_input_schema_feature(self, example: SQLExample, schema: schema_parsed, config, note=True):
max_query_length = int(config["max_query_length"])
max_column_length = int(config["max_column_length"])
max_total_length = max_column_length + max_query_length
# 一条feature包含一条train,包含多个<>column<>question<>编码
input_feature = SchemaInputFeature(
example.qid,
example.question,
example.table_id,
example.table_id_relative
)
# 记录映射关系
tokens = []
word_to_subword_base = [] # 词 到 子词 的映射关系,形式(start_index, end_index)
subword_to_word_base = [] # 子词 到 词 的映射关系,形式(0,1,1,2,2,2)
for i, query_token in enumerate(example.tokens):
sub_tokens = self.tokenizer.tokenize(query_token)
cur_pos = len(tokens)
if len(sub_tokens) > 0:
word_to_subword_base += [(cur_pos, cur_pos + len(sub_tokens))]
tokens.extend(sub_tokens)
subword_to_word_base.extend([i] * len(sub_tokens))
# 是否需要对输入进行拓展
if note:
feature_type = 'feature_schema'
else:
feature_type = 'feature_schema_without_note'
for key, value in schema.extend_schema.items():
table = schema.extend_schema[key]
for table_column_feature in table[feature_type]:
# 编码,输入为type, table, column, table_note, column_note, space, token
# 输出[cls]type, table, column, table_note, column_note, space[SEP]question[SEP]
assert len(table_column_feature) <= max_column_length, "列特征长度过小,请将配置文件中的max_column_length增大"
assert len(''.join(tokens)) <= max_query_length, "输入问句长度大于最大字符长度,请将配置文件中的max_query_length" \
"增大或者缩短输入问句长度"
tokenize_result = self.tokenizer.encode_plus(
table_column_feature,
tokens,
max_length=max_total_length,
truncation_strategy="longest_first",
pad_to_max_length=True,
)
input_ids = tokenize_result["input_ids"] # 将输入tokens, 通过词表转换成id
segment_ids = tokenize_result["token_type_ids"] # 标识column和question位置[0,0,1,1]
input_mask = tokenize_result["attention_mask"] # 标识column+question位置编码,也就是整个输入所占的编码
new_tokens = self.tokenizer.convert_ids_to_tokens(input_ids) # 获取编码后的序列[cls]column[SEP]question[SEP]
assert len(input_ids) == max_total_length, 'input_ids, 错误'
assert len(input_mask) == max_total_length, 'input_mask, 错误'
assert len(segment_ids) == max_total_length, 'segment_ids, 错误'
input_feature.tokens.append(new_tokens)
input_feature.input_ids.append(input_ids)
input_feature.input_mask.append(input_mask)
input_feature.segment_ids.append(segment_ids)
return input_feature
def fill_label_schema_feature_split(self, example: SQLExample, input_feature: SchemaInputFeature,
schema: schema_parsed, positive=True, negative=True):
# 只要正例,对应训练
if positive and not negative:
global_table_id = 0
# 只要负例,对应训练
elif not positive and negative:
global_table_id = None
# 正例和负例都要,对应测试
elif positive and negative:
global_table_id = schema.table_local_to_global[str(example.table_id)]
else:
assert False, 'Positive 和 Negative 不能全为负'
# set schema number
schema_num = len(input_feature.input_ids) # 表的数量
# set schema relative
input_feature.schema_rel = [0] * schema_num
if global_table_id is not None:
input_feature.schema_rel[global_table_id] = 1
# set mask
input_feature.schema_mask = [example.seq] * schema_num
return True
# 加载数据
def load_schema_data(self, data_paths, config, schema, include_label=False, positive=True, negative=True,
note=False):
'''
data_paths: 数据路径
config: 模型参数设置
include_label: 是否包含标记label
'''
# 将input,封装到model_input 中
# 每一条数据是<>col<>question<>的特征
model_inputs = {k: [] for k in ["input_ids", "input_mask", "segment_ids"]}
# 拓展
if include_label:
for k in ['schema_rel', 'schema_mask']:
model_inputs[k] = []
# 记录输入每一个question所对应的特征序列
pos = []
# 多条<>column<>question<>特征
input_features = []
# 支持多个数据集
for data_path in data_paths.split("|"):
cnt = 0
for line in open(data_path, encoding="utf8"):
# 加载一个样本, 一行里面包含一个问句,一个问句有可能对应两个SQL,按SQL进行拆分,拆分成两个样本
# 将所有正负样本标记在一起
for index, example in enumerate(SQLExample.load_from_json(line)):
# 丢弃未经过验证并且包含sql_label的数据
if not example.valid and include_label == True:
continue
# 获取输入特征,返回一个对象,里面封装了多个<col,question>,<col,question>
input_feature = self.get_input_schema_feature(example, schema, config, positive, negative, note)
# 如果包含sql_label, 则需要填充特征
if include_label:
success = self.fill_label_schema_feature_split(example, input_feature, schema, positive,
negative)
if not success:
continue
input_features.append(input_feature) # 总特征
# 当前模型有多少个特征
cur_start = len(model_inputs["input_ids"])
# 当前样本输入有多少个输入特征
cur_sample_num = len(input_feature.input_ids)
# 标记当前输入模型对应特征的开始和结束位置
pos.append((cur_start, cur_start + cur_sample_num))
model_inputs["input_ids"].extend(input_feature.input_ids)
model_inputs["input_mask"].extend(input_feature.input_mask)
model_inputs["segment_ids"].extend(input_feature.segment_ids)
if include_label:
model_inputs['schema_rel'].extend(input_feature.schema_rel)
model_inputs['schema_mask'].extend(input_feature.schema_mask)
else:
# 如果 include_label is False,则说明这是预测时用的数据,那么只需要一条特征就行了
# 因为可以对一条特征进行两次预测,分别是SQL1 和 SQL2
break
cnt += 1
if cnt % 5000 == 0:
print('featuring:', cnt)
'''
input_features : 数据的特征,可以认为是label
model_inputs : 数据输入
pos : 一条数据与多条<>column<>question<>的对应关系
'''
return input_features, model_inputs, pos
# 加载数据
def load_schema_predict_data(self, example: SQLExample, config, schema, note=True):
'''
data_paths: 数据路径
config: 模型参数设置
include_label: 是否包含标记label
'''
# 将input,封装到model_input 中
# 每一条数据是<>col<>question<>的特征
model_inputs = {k: [] for k in ["input_ids", "input_mask", "segment_ids"]}
# 记录输入每一个question所对应的特征序列
pos = []
# 多条<>column<>question<>特征
input_features = []
# 获取输入特征,返回一个对象,里面封装了多个<col,question>,<col,question>
input_feature = self.get_predict_input_schema_feature(example, schema, config, note)
input_features.append(input_feature) # 总特征
# 当前模型有多少个特征
cur_start = len(model_inputs["input_ids"])
# 当前样本输入有多少个输入特征
cur_sample_num = len(input_feature.input_ids)
# 标记当前输入模型对应特征的开始和结束位置
pos.append((cur_start, cur_start + cur_sample_num))
model_inputs["input_ids"].extend(input_feature.input_ids)
model_inputs["input_mask"].extend(input_feature.input_mask)
model_inputs["segment_ids"].extend(input_feature.segment_ids)
'''
input_features : 数据的特征,可以认为是label
model_inputs : 数据输入
pos : 一条数据与多条<>column<>question<>的对应关系
'''
return input_features, model_inputs, pos
class SchemaDataset(torch_data.Dataset):
def __init__(self, config, featurizer: HydraSchemaFeaturizer, schema):
self.config = config
self.featurizer = featurizer
self.schema = schema
self.input_features = None
self.model_inputs = None
self.pos = None
def loads(self, data_paths, include_label=False, positive=True, negative=True, note=False):
self.input_features, self.model_inputs, self.pos = self.featurizer.load_schema_data(data_paths,
self.config,
self.schema,
include_label,
positive,
negative,
note)
self.trans_data_type()
print("file {0} loaded".format(data_paths))
def load_for_predict(self, example, note=True):
self.input_features, self.model_inputs, self.pos = self.featurizer.load_schema_predict_data(example,
self.config,
self.schema,
note)
self.trans_data_type()
# 转换数据类型
def trans_data_type(self):
for k in self.model_inputs:
self.model_inputs[k] = np.array(self.model_inputs[k], dtype=np.int64)
# 这两个函数是dataloader的关键
def __len__(self):
return self.model_inputs["input_ids"].shape[0]
def __getitem__(self, idx):
return {k: v[idx] for k, v in self.model_inputs.items()}
if __name__ == "__main__":
''
```
#### File: Text2JSON/predict/prediction.py
```python
import sys
from os.path import dirname, abspath
path = dirname(dirname(dirname(abspath(__file__))))
sys.path.append(path)
import json
import numpy as np
import os
from Text2JSON.model.base_model import BaseModel
from Text2JSON.featurizer.schema_featurizer import SchemaDataset
from Text2JSON.featurizer.column_featurizer import ColumnDataset
from Text2JSON.featurizer.Input_SQL import SQLExample
from Text2JSON.train.utils import dump_model, load_model
op_map = {'': 0, 'eq': 1, 'not': 2, 'gt': 3, 'lt': 4, 'gte': 5, 'lte': 6, 'like': 7,
'isnull': 8, 'notnull': 9, 'btw': 10, 'in': 11}
agg_map = {'': 0, 'eq': 1, 'not': 2, 'gt': 3, 'lt': 4, 'gte': 5, 'lte': 6, 'like': 7,
'isnull': 8, 'notnull': 9, 'btw': 10, 'in': 11}
agg_map_reverse = {0: '', 1: 'eq', 2: 'not', 3: 'gt', 4: 'lt', 5: 'gte', 6: 'lte', 7: 'like', 8: 'isnull', 9: 'notnull',
10: 'btw', 11: 'in'}
op_map_reverse = {0: '', 1: 'eq', 2: 'not', 3: 'gt', 4: 'lt', 5: 'gte', 6: 'lte', 7: 'like', 8: 'isnull', 9: 'notnull',
10: 'btw', 11: 'in'}
class request_template(object):
def __init__(self, method, url, params):
self.method = method
self.url = url
self.params = params
def to_dict(self):
return {
'method': self.method,
'url': self.url,
'params': self.params
}
class GenJSON(object):
def __init__(self, config, schema_data, column_data, schema, model1, model2):
self.config = config
self.schema_data = schema_data
self.column_data = column_data
self.schema = schema
self.model1 = model1
self.model2 = model2
def gen_json(self, question, first_json=1, sec_json=1, qid=0, size=50):
self.config['schema_evaluator_batch_size'] = size
self.config['column_evaluator_batch_size'] = size
example = self.question_pre_handle(question, qid)
self.schema_data.load_for_predict(example, True)
schema_model_outputs = self.model1.dataset_inference(self.schema_data)
# SQL1 relative
first_table_relevant_prob = np.exp(schema_model_outputs[0]['schema_first_sim'][:, 0])
first_table_id_prob_exp = sorted(enumerate(first_table_relevant_prob), key=lambda x: x[1], reverse=True)
first_table_rel = []
for table_index, table_pro in first_table_id_prob_exp:
if table_pro >= float(self.config['schema_threshold']):
first_table_rel.append(table_index)
if len(first_table_rel) == int(self.config['schema_remain']):
break
# SQL2 relative
sec_table_relevant_prob = np.exp(schema_model_outputs[0]['schema_sec_sim'][:, 0])
sec_table_id_prob_exp = sorted(enumerate(sec_table_relevant_prob), key=lambda x: x[1], reverse=True)
sec_table_rel = []
for table_index, table_pro in sec_table_id_prob_exp:
if table_pro >= float(self.config['schema_threshold']):
sec_table_rel.append(table_index)
if len(first_table_rel) == int(self.config['schema_remain']):
break
result = None
# 先不考虑剪枝
if len(first_table_rel) == 0:
result = {'qid': qid, 'question': question, 'query': []}
# 两句SQL
else:
# zip SQL1 and SQL2
example.first_schema_id = [self.schema.table_global_to_local[index] for index in first_table_rel]
example.sec_schema_id = [self.schema.table_global_to_local[index] for index in sec_table_rel]
self.column_data.load_for_predict(example, True)
# 用于标识两个语句的关系
# model output SQL
column_model_outputs = self.model2.dataset_inference(self.column_data)
for sql1_index, first_schema_id in enumerate(first_table_rel):
first_relation, sec_relation = None, None
if sql1_index == int(first_json):
break
first_schema_id = self.schema.table_global_to_local[first_schema_id]
input_feature = self.column_data.input_features[sql1_index]
model_output = column_model_outputs[sql1_index]
first_request, first_relation = self.sql_parser(self.model2, first_schema_id, input_feature,
model_output, 0)
if len(sec_table_rel) == 0:
result = {'qid': qid, 'question': question, 'query': [first_request.to_dict()]}
continue
for sql2_index, sec_schema_id in enumerate(sec_table_rel):
if sql2_index == int(sec_json):
break
sec_schema_id = self.schema.table_global_to_local[sec_schema_id]
input_feature = self.column_data.input_features[len(first_table_rel) + sql2_index]
model_output = column_model_outputs[len(first_table_rel) + sql2_index]
sec_request, sec_relation = self.sql_parser(self.model2, sec_schema_id, input_feature, model_output,
1)
# 剪枝
if first_request.url == sec_request.url:
# 只要第一句
result = {'qid': qid, 'question': question, 'query': [first_request.to_dict()]}
elif first_relation == 0 and sec_relation == 0:
# 只要第一句
result = {'qid': qid, 'question': question, 'query': [first_request.to_dict()]}
else:
# 两句都要
result = {'qid': qid, 'question': question,
'query': [first_request.to_dict(), sec_request.to_dict()]}
return result
def question_pre_handle(self, line, qid):
"""
qid
question
table_id
tokens
word_to_char_start
"""
example = SQLExample()
example.qid = qid
example.table_id = None
line = line.replace(' ', '')
tokens = [char for char in line]
example.tokens = tokens
example.question = line
space_list = [i for i, x in enumerate(' '.join(tokens)) if x == ' ']
word_to_char_start = [0]
word_to_char_start.extend([space_list[index] + 1 for index in range(0, len(space_list) - 1)])
example.word_to_char_start = word_to_char_start
return example
def generate_json(self, model: BaseModel, input_feature, model_output, sql_seq=0):
"""
predict sql on dataset
dataset: 需要预测的数据
model_outputs: 数据经过模型预测后的输出
"""
stop_word_list = ['【', '】', '[', ']', '"', "'", '‘', '’', '“', ',', '.', '。', ' ', '”']
sel, agg, where, _, conditions, relationship = model. \
parse_output_with_threshold(input_feature, model_output, sql_seq, float(self.config['column_threshold']))
conditions_with_value_texts = []
for wc in where:
_, op, span_list = conditions[wc]
# 通过subword_to_word获取对应的token的start_index, end_index
value_span_text = []
for se in span_list:
# 获取是第几个词
start_index = se[0]
end_index = se[1]
if start_index >= len(input_feature.subword_to_word[wc]):
start_index = len(input_feature.subword_to_word[wc]) - 1
if end_index >= len(input_feature.subword_to_word[wc]):
end_index = len(input_feature.subword_to_word[wc]) - 1
word_start = input_feature.subword_to_word[wc][start_index]
word_end = input_feature.subword_to_word[wc][end_index] + 1
span = ''.join(input_feature.tokens_common[word_start:word_end]).rstrip()
for stop_word in stop_word_list:
if stop_word in span:
span.replace(stop_word, '')
value_span_text.append(span)
conditions_with_value_texts.append((wc, op, value_span_text))
# 将agg,select, conditions封装到sql中
return sel, agg, conditions_with_value_texts, relationship
def parse_params(self, schema_id, sel, agg, conditions_with_value_texts):
# 遍历SQL语句
params = []
select_params = []
where_params = []
# 这里可以进行剪枝,看看select 有没有出现在 where 中,或者反过来
for index, sel_col_index in enumerate(sel):
col_name_value = self.schema.extend_schema[schema_id]['header'][sel_col_index]
if '&' not in col_name_value:
'去除占位符'
continue
col_name = col_name_value.split('&')[0]
col_value = col_name_value.split('&')[1]
agg_name = agg_map_reverse[agg[index][1]]
if agg_name == 'in' or agg_name == 'btw':
col_value = '"' + col_value + '"'
select_params.append((col_name, agg_name, col_value))
for where_col_index, op_index, value_list in conditions_with_value_texts:
col_name = self.schema.extend_schema[schema_id]['header'][where_col_index]
op_name = op_map_reverse[op_index]
if len(value_list) >= 2:
for value_index, value in enumerate(value_list):
value = value_list[value_index]
value = value.replace('”', '')
value = value.replace('"', '')
value_list[value_index] = '"' + str(value) + '"'
value = value_list
else:
value = value_list[0]
value = value.replace('”', '')
value = value.replace('"', '')
value = ['"' + str(value) + '"']
where_params.append((col_name, op_name, value))
# 合并同类项
select_name_agg = {}
select_name_value = {}
for col_name, agg_name, col_value in select_params:
if col_name not in select_name_agg.keys():
select_name_agg[col_name] = [agg_name]
else:
select_name_agg[col_name].append(agg_name)
if col_name not in select_name_value.keys():
select_name_value[col_name] = [col_value]
else:
select_name_value[col_name].append(col_value)
# 对agg进行剪枝, 选择可能性最大的agg
for key in select_name_agg.keys():
if len(select_name_agg[key]) == 1:
continue
else:
agg_name_num = {}
for agg_name in select_name_agg[key]:
if agg_name not in agg_name_num.keys():
agg_name_num[agg_name] = 1
else:
agg_name_num[agg_name] += 1
if len(agg_name_num.keys()) == 1:
continue
find_max = -1
max_agg_name = None
for agg_name in agg_name_num.keys():
if agg_name_num[agg_name] > find_max:
find_max = agg_name_num[agg_name]
max_agg_name = agg_name
# 这里引入的特征
if max_agg_name == 'eq' and len(select_name_value[key]) > 1:
select_name_agg[key] = ['in']
else:
select_name_agg[key] = [max_agg_name]
params = []
# 解析成JSON格式
for col_name in select_name_agg.keys():
params.append({'name': col_name, 'option': select_name_agg[col_name][0],
'value': str(select_name_value[col_name]) if len(select_name_value[col_name]) > 1
or select_name_agg[col_name][0] == 'in'
else str(select_name_value[col_name][0])})
for col_name, op_name, value in where_params:
if op_name != 'in' and len(value) == 1:
value = value[0].replace('"', '')
params.append({'name': col_name, 'option': op_name, 'value': str(value)})
return params
def get_url_method(self, table_id):
url = self.schema.extend_schema[table_id]['url']
method = self.schema.url_method[url]
return url, method
def sql_parser(self, model: BaseModel, schema_id, input_feature, model_output, sql_seq):
sel, agg, conditions_with_value_texts, relationship = self.generate_json(model, input_feature, model_output,
sql_seq)
# parsing params
params = self.parse_params(schema_id, sel, agg, conditions_with_value_texts)
# get url and method
url, method = self.get_url_method(schema_id)
# request
return request_template(method, url, params), self.first_or_sec(relationship)
def first_or_sec(self, relationship):
if len(relationship) == 0:
return 0
first = 0
sec = 0
for index, rel in relationship:
if rel == 0:
first += 1
else:
sec += 1
return 0 if first > sec else 1
def load_test_data(data_path):
question_list = []
for line in open(data_path, encoding="utf8"):
data = json.loads(line)
question_list.append((data['qid'], data['question'].replace(' ', '')))
return question_list
class Predict(object):
def __init__(self):
# 设置参数
self.config = None
# 词编码器
self.tokenizer = None
# schema特征提取器
self.schema_featurizer = None
# column特征提取器
self.column_featurizer = None
# 加载数据表
self.schema = None
# schema模型
self.model1 = None
# column模型
self.model2 = None
# 数据加载器
self.schema_data = None
self.column_data = None
# 预测器
self.prediction = None
def set_params(self, config, tokenizer, schema_featurizer, column_featurizer, schema,
schema_model, column_model):
# 设置参数
self.config = config
# 词编码器
self.tokenizer = tokenizer
# schema特征提取器
self.schema_featurizer = schema_featurizer
# column特征提取器
self.column_featurizer = column_featurizer
# 加载数据表
self.schema = schema
# 选择模型, 加载模型
self.model1 = schema_model
self.model2 = column_model
# 数据加载器
self.schema_data = SchemaDataset(self.config, self.schema_featurizer, self.schema)
self.column_data = ColumnDataset(self.config, self.column_featurizer, self.schema)
# 预测器
self.prediction = GenJSON(self.config, self.schema_data, self.column_data, self.schema, self.model1,
self.model2)
def load_to_gpu(self):
self.model1.to_gpu()
self.model2.to_gpu()
self.config['has_cuda'] = True
def load_to_cpu(self):
self.model1.to_cpu()
self.model2.to_cpu()
self.config['has_cuda'] = False
def load_model(self, model_path):
model = load_model(model_path)
# 设置参数
self.config = model.config
# 词编码器
self.tokenizer = model.tokenizer
# schema特征提取器
self.schema_featurizer = model.schema_featurizer
# column特征提取器
self.column_featurizer = model.column_featurizer
# 加载数据表
self.schema = model.schema
# 选择模型, 加载模型
self.model1 = model.model1
self.model2 = model.model2
# 数据加载器
self.schema_data = model.schema_data
self.column_data = model.column_data
# 预测器
self.prediction = model.prediction
self.config['has_cuda'] = False
def dump_model(self, model_path):
dump_model(self, model_path)
print('Client model saved in path {0}'.format(model_path))
def predict(self, question, first_sql=1, sec_sql=1, qid=0, size=50):
return self.prediction.gen_json(question, first_sql, sec_sql, qid, size)
```
#### File: Text2JSON/train/evaluator.py
```python
import sys
from os.path import dirname, abspath
path = dirname(dirname(dirname(abspath(__file__))))
sys.path.append(path)
from Text2JSON.train import utils
from Text2JSON.predict.prediction import Predict
import datetime
def f_one(label_file, predict_file):
stop_word_list = ['【', '】', '[', ']', '"', "'", '‘', '’', '“', ',', '.', '。', ' ', '”']
label_data = utils.load_jsonl_by_key('qid', label_file)
predit_data = utils.load_jsonl_by_key('qid', predict_file)
sum_method_recall, sum_url_recall, sum_params_name_recall, \
sum_params_op_recall, sum_params_value_recall, sum_rel_recall = 0., 0., 0., 0., 0., 0.
sum_method_acc, sum_url_acc, sum_params_name_acc, \
sum_params_op_acc, sum_params_value_acc, sum_rel_acc = 0., 0., 0., 0., 0., 0.
for qid in label_data.keys():
label = label_data[qid]
predict = predit_data[qid]
label_query = label['query']
predict_query = predict['query']
# predict parsing
predict_params_name = {}
predict_params_op = {}
predict_params_value = {}
for query in predict_query:
url = query['url']
name_to_option_dict = {}
name_to_value_dict = {}
predict_params_name[url] = []
predict_params_op[url] = name_to_option_dict
predict_params_value[url] = name_to_value_dict
for params in query['params']:
name = params['name']
option = params['option']
value = str(params['value'])
for i in stop_word_list:
value = value.replace(i, '')
value = value.split(',')
predict_params_name[url].append(name)
predict_params_op[url][name] = option
predict_params_value[url][name] = value
# label parsing
label_params_name = {}
label_params_op = {}
label_params_value = {}
for query in label_query:
url = query['url']
name_to_option_dict = {}
name_to_value_dict = {}
label_params_name[url] = []
label_params_op[url] = name_to_option_dict
label_params_value[url] = name_to_value_dict
if 'params' not in query.keys():
continue
for params in query['params']:
name = params['name']
option = params['option']
value = str(params['value'])
for i in stop_word_list:
value = value.replace(i, '')
value = value.split(',')
label_params_name[url].append(name)
label_params_op[url][name] = option
label_params_value[url][name] = value
rel_recall = 0.
rel_acc = 0.
# eval structure message
if len(label_query) == len(predict_query):
rel_acc = 1
rel_recall = 1
if len(label_query) > len(predict_query):
rel_recall = (float(len(predict_query)) + 0.0001) / (len(label_query) + 0.0001)
if len(predict_query) == 0:
rel_acc = 0
else:
rel_acc = 1
if len(predict_query) > len(label_query):
rel_acc = (float(len(label_query)) + 0.0001) / (len(predict_query) + 0.0001)
rel_recall = 1
# eval url and method
url_num = 0
label_url = label_params_name.keys()
predict_url = predict_params_name.keys()
for url in label_url:
if url in predict_url:
url_num += 1
url_acc = (url_num + 0.0001) / (float(len(predict_url)) + 0.0001)
url_recall = (url_num + 0.0001) / (float(len(label_url)) + 0.0001)
method_acc = url_acc
method_recall = url_recall
# eval params_name recall
params_name_recall = 0
for url, label_name_list in label_params_name.items():
sub_name_num = 0
if url not in predict_params_name.keys():
continue
predict_name_list = predict_params_name[url]
for param in label_name_list:
if param in predict_name_list:
sub_name_num += 1
params_name_recall += (sub_name_num + 0.0001) / (float(len(label_name_list)) + 0.0001)
params_name_recall = (params_name_recall + 0.0001) / (float(len(label_params_name.keys())) + 0.0001)
# params acc
params_name_acc = 0
for url, predict_name_list in predict_params_name.items():
sub_name_num = 0
if url not in label_params_name.keys():
continue
label_name_list = label_params_name[url]
for param in predict_name_list:
if param in label_name_list:
sub_name_num += 1
params_name_acc += (sub_name_num + 0.0001) / (float(len(predict_name_list)) + 0.0001)
params_name_acc = (params_name_acc + 0.0001) / (float(len(predict_params_name.keys())) + 0.0001)
# option recall
params_op_recall = 0
for url, label_option_dict in label_params_op.items():
sub_op_num = 0
if url not in predict_params_op.keys():
continue
predict_option_dict = predict_params_op[url]
for key, op in label_option_dict.items():
if key in predict_option_dict.keys():
if op == predict_option_dict[key]:
sub_op_num += 1
params_op_recall += (sub_op_num + 0.0001) / (float(len(label_option_dict.keys())) + 0.0001)
params_op_recall = (params_op_recall + 0.0001) / (float(len(label_params_op.keys())) + 0.0001)
# option acc
params_op_acc = 0
for url, predict_option_dict in predict_params_op.items():
sub_op_num = 0
if url not in label_params_name.keys():
continue
label_option_dict = label_params_op[url]
for key, op in predict_option_dict.items():
if key in label_option_dict.keys():
if op == label_option_dict[key]:
sub_op_num += 1
params_op_acc += (sub_op_num + 0.0001) / (float(len(predict_option_dict.keys())) + 0.0001)
params_op_acc = (params_op_acc + 0.0001) / (float(len(predict_params_op.keys())) + 0.0001)
# value recall
params_value_recall = 0
for url, label_value_dict in label_params_value.items():
if url not in predict_params_value.keys():
continue
predict_value_dict = predict_params_value[url]
sec_layer_recall = 0
for key, value_list in label_value_dict.items():
if key not in predict_value_dict:
continue
value_num = 0
for value in value_list:
if value in predict_value_dict[key]:
value_num += 1
sec_layer_recall += (value_num + 0.0001) / (float(len(value_list) + 0.0001))
params_value_recall += (sec_layer_recall + 0.0001) / (float(len(label_value_dict.keys()) + 0.0001))
params_value_recall = (params_value_recall + 0.0001) / (float(len(label_params_value.keys()) + 0.0001))
# value acc
params_value_acc = 0
for url, predict_value_dict in predict_params_value.items():
if url not in label_params_value.keys():
continue
label_value_dict = label_params_value[url]
sec_layer_acc = 0
for key, value_list in predict_value_dict.items():
if key not in label_value_dict:
continue
value_num = 0
for value in value_list:
if value in label_value_dict[key]:
value_num += 1
sec_layer_acc += (value_num + 0.0001) / (float(len(value_list) + 0.0001))
params_value_acc += (sec_layer_acc + 0.0001) / (float(len(predict_value_dict.keys()) + 0.0001))
params_value_acc = (params_value_acc + 0.0001) / (float(len(predict_params_value.keys()) + 0.0001))
sum_method_recall += method_recall
sum_url_recall += url_recall
sum_params_name_recall += params_name_recall
sum_params_op_recall += params_op_recall
sum_params_value_recall += params_value_recall
sum_rel_recall += rel_recall
sum_method_acc += method_acc
sum_url_acc += url_acc
sum_params_name_acc += params_name_acc
sum_params_op_acc += params_op_acc
sum_params_value_acc += params_value_acc
sum_rel_acc += rel_acc
cnt = len(label_data.keys())
sum_method_recall = sum_method_recall / cnt
sum_url_recall = sum_url_recall / cnt
sum_params_name_recall = sum_params_name_recall / cnt
sum_params_op_recall = sum_params_op_recall / cnt
sum_params_value_recall = sum_params_value_recall / cnt
sum_rel_recall = sum_rel_recall / cnt
sum_method_acc = sum_method_acc / cnt
sum_url_acc = sum_url_acc / cnt
sum_params_name_acc = sum_params_name_acc / cnt
sum_params_op_acc = sum_params_op_acc / cnt
sum_params_value_acc = sum_params_value_acc / cnt
sum_rel_acc = sum_rel_acc / cnt
sum_recall = (sum_method_recall + sum_url_recall + sum_params_name_recall
+ sum_params_op_recall + sum_params_value_recall + sum_rel_recall) / 6.
sum_acc = (sum_method_acc + sum_url_acc + sum_params_name_acc + sum_params_op_acc
+ sum_params_value_acc + sum_rel_acc) / 6.
f_one = (2 * sum_recall * sum_acc) / (sum_recall + sum_acc)
result_str = 'F1:{0:.4f}, Recall:{1:.4f}, ACC:{2:.4f}'.format(f_one, sum_recall, sum_acc) + '\n' + \
'-------------------------------------------------------------------------------------' + '\n' + \
'Recall' + '\n' + \
'Method:{0:.4f}, URL:{1:.4f}, Name:{2:.4f}, Option:{3:.4f}, Value:{4:.4f}, Rel:{5:.4f}'. \
format(sum_url_recall, sum_url_recall, sum_params_name_recall, sum_params_op_recall,
sum_params_value_recall, sum_rel_recall) + '\n' + \
'-------------------------------------------------------------------------------------' + '\n' + \
'Accuracy' + '\n' + \
'Method:{0:.4f}, URL:{1:.4f}, Name:{2:.4f}, Option:{3:.4f}, Value:{4:.4f}, Rel:{5:.4f}'. \
format(sum_url_acc, sum_url_acc, sum_params_name_acc, sum_params_op_acc, sum_params_value_acc,
sum_rel_acc) + '\n'
print('F1:{0:.4f}, Recall:{1:.4f}, ACC:{2:.4f}'.format(f_one, sum_recall, sum_acc))
print('----------------------------------------------------------------------------------')
print('Recall')
print('Method:{0:.4f}, URL:{1:.4f}, Name:{2:.4f}, Option:{3:.4f}, Value:{4:.4f}, Rel:{5:.4f}'.format(sum_url_recall,
sum_url_recall,
sum_params_name_recall,
sum_params_op_recall,
sum_params_value_recall,
sum_rel_recall))
print('----------------------------------------------------------------------------------')
print('Accuracy')
print('Method:{0:.4f}, URL:{1:.4f}, Name:{2:.4f}, Option:{3:.4f}, Value:{4:.4f}, Rel:{5:.4f}'.
format(sum_url_acc, sum_url_acc, sum_params_name_acc, sum_params_op_acc, sum_params_value_acc, sum_rel_acc))
return result_str
def bleu_one(label_file, predict_file):
stop_word_list = ['【', '】', '[', ']', '"', "'", '‘', '’', '“', ',', '.', '。', ' ', '”']
label_data = utils.load_jsonl_by_key('qid', label_file)
predit_data = utils.load_jsonl_by_key('qid', predict_file)
sum_bleu = 0
for qid in label_data.keys():
label = label_data[qid]
predict = predit_data[qid]
label_query = label['query']
predict_query = predict['query']
# predict parsing
predict_list = []
for query in predict_query:
url = query['url']
method = query['method']
predict_list.append(url)
predict_list.append(method)
for params in query['params']:
name = params['name']
option = params['option']
value = str(params['value'])
for i in stop_word_list:
value = value.replace(i, '')
value = value.split(',')
predict_list.append(name)
predict_list.append(option)
predict_list.extend(value)
# label parsing
label_list = []
for query in label_query:
url = query['url']
method = query['method']
label_list.append(url)
label_list.append(method)
if 'params' not in query.keys():
continue
for params in query['params']:
name = params['name']
option = params['option']
value = str(params['value'])
for i in stop_word_list:
value = value.replace(i, '')
value = value.split(',')
label_list.append(name)
label_list.append(option)
label_list.extend(value)
bleu = 0
for token in predict_list:
if token in label_list:
bleu += 1
sum_bleu += float(bleu) / len(predict_list)
sum_bleu = sum_bleu / len(predit_data.keys())
result_str = 'BLEU-1:{0:.4f}'.format(sum_bleu) + \
'\n----------------------------------------------------------------------'
print('BLEU-1:{0:.4f}'.format(sum_bleu))
return result_str
def eval_bleu1_f1(model: Predict, label_path, output_path):
"""
param: model 客户端模型
label_path:
"""
predict_list = []
for data in utils.load_jsonl(label_path):
qid = data['qid']
question = data['question']
predict_list.append(model.predict(question, qid=qid))
utils.dump_jsonl(predict_list, output_path)
a_result = bleu_one(label_path, output_path)
b_result = f_one(label_path, output_path)
return a_result, b_result
``` |
{
"source": "1175592624/Py-Packing",
"score": 3
} |
#### File: 1175592624/Py-Packing/Py_Packing.py
```python
import os
import shutil
import tkinter
from tkinter import filedialog
def main():
def Deletefolder(Foldername):
shutil.rmtree(Folderpath + f'/{Foldername}')
Installer = os.popen('pip install PyInstaller')
p=Installer.read()
if p.find('completed successfully.',-30,-1)\
or p.find('Requirement already satisfied',0,50):
root = tkinter.Tk()
root.withdraw()
Filepath = filedialog.askopenfilename(
title='选择您想打包的Py文件',
filetypes=[('Python文件','.py')])
(Folderpath,Filename) = os.path.split(Filepath)
os.system('cd '+ Folderpath + '&& pyinstaller -F ' + Filename)
Savefilepath = str(filedialog.asksaveasfilename(
title='请选择exe文件存储路径',
filetypes=[('可执行程序','.exe')],
initialfile=Filename.replace('py','exe')))
(Savepath,Savename) = os.path.split(Savefilepath)
Filepath_new = Folderpath + '/dist/' + Filename.replace('py','exe')
shutil.move(Filepath_new,Savepath + Filename.replace('py','exe'))
os.rename(Savepath + Filename.replace('py','exe'),Savefilepath)
os.remove(Filepath.replace('py','spec'))
Deletefolder('dist')
Deletefolder('build')
Deletefolder('__pycache__')
if __name__ == '__main__':
main()
``` |
{
"source": "1179069501/-",
"score": 2
} |
#### File: demo/classdemo/views.py
```python
from django.http import HttpResponse
from django.shortcuts import render
# Create your views here.
from django.views.generic.base import View
class RegisterView(View):
def get(self,request):
print('RegisterView get')
return HttpResponse('RegisterView get')
def set(self,request):
print('RegisterView set')
return HttpResponse('RegisterView set')
```
#### File: demo/cookdemo/views.py
```python
from django.http import HttpResponse
from django.shortcuts import render
# Create your views here.
def setcookfunc(request):
response = HttpResponse('setcookfunc')
# response.set_cookie('key','value','有效期')
response.set_cookie('itcast','python',max_age=3600*24*14)
return response
def getcookfunc(request):
'''读取cookie的值'''
value = request.COOKIES.get('itcast')
print(value)
return HttpResponse('value')
```
#### File: demo/users/views.py
```python
from django.http import HttpResponse
from django.shortcuts import render, redirect
# Create your views here.
from django.urls import reverse
def haha(request):
print("haha函数")
return HttpResponse('haha函数')
def index(request):
"""
index视图
:param request:包含了请求信息的请求对象
:return: 响应对象
"""
print('index函数')
# a = 1/0
return HttpResponse("index函数")
def say(request):
'''say'''
print('say')
url = reverse('uu:index')
print(url)
return redirect(url)
def sayhello(request):
"""sayhello"""
print('sayhello')
return HttpResponse('sayhello')
``` |
{
"source": "11813147ak/MusicPi",
"score": 3
} |
#### File: MusicPi/test/test_init.py
```python
from unittest import TestCase
import os
from nose.tools import with_setup, raises
fifo = "/tmp/mplayercontrol.fifo"
class FifoTestCase(TestCase):
def setUp(self):
print 'before test'
def tearDown(self):
print 'after test'
def fifo_test(self):
exist = os.path.exists(fifo)
assert exist
``` |
{
"source": "119068489/game_server",
"score": 2
} |
#### File: 119068489/game_server/build_windows_server.py
```python
import os
import shutil
import subprocess
import sys
import hashlib
import socket
import time
import build_linux_yaml_template as template
NOW_TEXT = time.strftime("%Y-%m-%d_%H_%M_%S%z", time.localtime())
# 要保证有 2 位数来表示子游戏类型,只能 2 位,不能多不能少。
DIR_LIST = ["hall","login","backstage","shop","statistics"]
DIR_NAME = "linux_server_" + NOW_TEXT# 临时打包目录,打包完后删除
EXECUTE_NAME = "chat_server.exe" # 打包出来的可执行文件名
NEED_HASH_DIR_LIST = [ # 后续优化: 不手工维护这个表了,自动递归全部目录就好了,发现 *.go 就 hash
"execute",
"hall",
"login",
"shop",
"backstage",
"mongo_init",
"easygo",
"for_game",
"pb",
"deleter",
]
FINGERPRINT_FILE_NAME = DIR_NAME + "/fingerprint_{now}.txt".format(now=NOW_TEXT)
# CONFIG_DIR_NAME = "config_package"
DEFAULT_HOST = '192.168.50.27'
DEFAULT_DB='192.168.50.27'
# 检查有没有改了没有提交的或是新的 *.go 文件
def has_change_or_new_file():
code, s = subprocess.getstatusoutput("git status")
if code != 0:
raise Exception(s)
return ".go" in s
def build(dir_name):
os.chdir("execute")
print("准备编译可执行文件 {} ...".format(EXECUTE_NAME))
#text = "set CGO_ENABLED=0&&set GOOS=linux&&set GOARCH=amd64&&go build -o ../{dir_name}/{exe_name}".format(dir_name=dir_name, exe_name=EXECUTE_NAME)
text = "go build -o ../{dir_name}/{exe_name}".format(dir_name=dir_name, exe_name=EXECUTE_NAME)
code,s = subprocess.getstatusoutput(text) #必须在同一个线程运行 否则不成功
if code != 0:
raise Exception(s)
print("编译成功")
os.chdir(".." )
def deal_yaml_json_py_etc(dir_name, dir_list, is_full, group, protocol, host,db): # 打包linux版本文件夹
if not is_full:
return
os.chdir("execute")
os.chdir("../%s"%dir_name)
# 复制文件到当前目录
shutil.copy("../start.py", "./")
shutil.copy("../stop_by_pid_file.py", "./")
shutil.copy("../stop_by_grep.py", "./")
shutil.copy("../backup.py", "./")
shutil.copy("../deploy.py", "./")
content = template.TEMPLATE_SHARE.format(group=group,center="127.0.0.1",db="127.0.0.1")
with open("./config_share.yaml", "w", encoding="utf-8") as f:
f.write(content)
content = template.TEMPLATE_HALL_SECRET # 直接写,无需 format .format(group=group, host=host)
with open("./config_hall_secret.yaml", "w", encoding="utf-8") as f:
f.write(content)
# os.mkdir(CONFIG_DIR_NAME)
# os.chdir(CONFIG_DIR_NAME)
# os.system('xcopy "../../cheat" "cheat" /s /e /i /y')
# os.system('xcopy "../../config" "config" /s /e /i /y')
# os.chdir("../")
for dir in dir_list: #把配置文件复制到各个文件夹下
os.mkdir(dir)
print("创建 %s\t子目录,并生成了 yaml 配置文件进去 "%(dir,))
os.chdir(dir)
if dir == "hall":
content = template.TEMPLATE_HALL.format(group=group, host=host)
with open("./config_hall.yaml", "w", encoding="utf-8") as f:
f.write(content)
elif dir == "login":
content = template.TEMPLATE_LOGIN.format(group=group, host=host)
with open("./config_login.yaml", "w", encoding="utf-8") as f:
f.write(content)
elif dir == "shop":
content = template.TEMPLATE_SHOP.format(group=group, host=host)
with open("./config_shop.yaml", "w", encoding="utf-8") as f:
f.write(content)
elif dir == "backstage":
content = template.TEMPLATE_BACKSTAGE.format(group=group, host=host)
with open("./config_backstage.yaml", "w", encoding="utf-8") as f:
f.write(content)
shutil.copy("../../backstage/version.json", "./")
shutil.copy("../../backstage/tfserver.json", "./")
elif dir == "statistics":
content = template.TEMPLATE_STATISTICS.format(group=group, host=host)
with open("./config_statistics.yaml", "w", encoding="utf-8") as f:
f.write(content)
else:
raise Exception("未知的目录 "+ dir)
os.mkdir("logs")
os.chdir("../")
os.chdir("../")
def package_zip(dir_name, is_full): # 把打包文件夹压缩成zip文件
print("开始压缩 %s 目录,耗时较长,耐心等候 ...... " %(dir_name,))
if is_full:
t = "full"
else:
t = "execute"
name = "%s_%s.zip" %(dir_name, t)
text = "7z.exe -tZip a %s ./%s -mx9"%(name, dir_name)
code, s = subprocess.getstatusoutput(text)
if code != 0:
text = "安装7z压缩软件了吗???设置7z的环境变量了吗???"
raise Exception(text + s)
print("压缩 OK,包名是 "+name)
def remove_dir(dir_name): # 删除打包文件夹
if os.path.exists(dir_name):
print("删除临时打包目录 "+ dir_name)
shutil.rmtree(dir_name)
def hash_file(file_name): # hash 出 md5 值
if not os.path.isfile(file_name):
return
myhash = hashlib.md5()
with open(file_name,'rb') as f:
while True:
b = f.read(8096)
if not b:
break
myhash.update(b)
return myhash.hexdigest()
def hash_all_file(dir_name): # 获取到所有当前路径下的文件
lst = []
for (root, dirs, files) in os.walk(dir_name):
_ = dirs
for file_name in files:
s1 = hash_file(root+"\\"+file_name)
s2 = "%s\\%s: %s\n" % (root,file_name, s1)
lst.append(s2)
return "".join(lst)
def gen_fingerprint_file(fingerprint_file, need_hash_dir_list, branch_name): # 哈希 *.go 代码文件
if os.path.exists(fingerprint_file): # 检测如果有这个文件就删除新建
os.remove(fingerprint_file)
with open(fingerprint_file,"a",encoding="utf8") as f:
host_name = socket.gethostname() # 获取本机计算机名
f.write("计算机名: %s\n"%host_name)
f.write("打包时间: %s\n" % NOW_TEXT)
f.write("打包工作目录: %s\n" % os.getcwd())
f.write("打包分支名: {}\n".format(branch_name))
# 获取当前提交版本号
code, s = subprocess.getstatusoutput("git rev-parse HEAD")
f.write("最后 Commit: %s\n" % s)
if code != 0:
raise Exception(s)
# 获取当前环境 Golang 版本
code,s = subprocess.getstatusoutput("go version")
if code != 0:
raise Exception(s)
f.write("打包机器 Golang 版本: %s" % s)
f.write("\n")
digest = hash_file("./{dir_name}/{exe_name}".format(dir_name=DIR_NAME, exe_name=EXECUTE_NAME))
f.write("可执行文件 {} MD5 值: {}\n".format(EXECUTE_NAME, digest))
f.write("\n各源代码文件 MD5 值:\n")
for dir_name in need_hash_dir_list: # 循环遍历所有需要 hash 的目录
text = hash_all_file(dir_name)
f.write(text)
print("生成各 *.go 源码文件的 hash 值成功")
def main():
code, branch_name = subprocess.getstatusoutput("git symbolic-ref --short -q HEAD")
if code != 0:
raise Exception(branch_name)
if branch_name != "master":
while True:
q = input("严重警告!!!!!! 当前分支是 {},你真的要对这个分支而不是 master 进行打包 (输入 y 或 n): ".format(branch_name))
if q == "":
continue
elif q == 'y':
break
else:
print("中止打包")
return
if has_change_or_new_file():
while True:
q = input("严重警告!!!!!! 发现有新的或是改动未提交的 go 文件,是否仍要继续打包? (输入 y 或 n): ")
if q == "":
continue
elif q == 'y':
break
else:
print("中止打包")
return
while True:
s = input("打完整包还是只打可执行文件?(输入 full 代表打完整包,输入 exe 代表打可执行文件): ")
if s == "":
continue
if s in ["full", "exe"]:
is_full = {"full":True, "exe":False}[s]
break
if is_full:
while True:
group = input("请输入服务器组,用于各监听端口的最后一位数,有效值为 0 - 9: ")
if len(group) == 1 and group.isdigit():
break
while True:
protocol = input("游戏客户端和服务器走什么协议?请输入 ws 或 wss : ")
if protocol in ("ws", "wss"):
break
host = input("请输入目标服务器的外网 IP 或域名(直接回车则是 {}): ".format(DEFAULT_HOST))
if host == "":
host = DEFAULT_HOST
db = input("请输入mongodb的IP(直接回车则是 {}): ".format(DEFAULT_DB))
if db == "":
db = DEFAULT_DB
while True:
is_all = input("打包服务器all表示全部[login 、hall、backstage、shop、statistics]其中一个): ")
if is_all == "all" or is_all in DIR_LIST:
break
while True:
s = input("是否压缩? (输入 y 或 n): ")
if s == "":
continue
if s in ["y", "n"]:
compress = {"y":True, "n":False}[s]
break
remove_dir(DIR_NAME)
os.mkdir(DIR_NAME)
build(DIR_NAME)
gen_fingerprint_file(FINGERPRINT_FILE_NAME, NEED_HASH_DIR_LIST, branch_name)
if is_full:
server_list = []
if is_all =="all":
server_list=DIR_LIST
else:
server_list=[is_all]
deal_yaml_json_py_etc(DIR_NAME, server_list, is_full, group, protocol, host,db)
if compress:
package_zip(DIR_NAME, is_full) # 压缩
remove_dir(DIR_NAME) # 删除临时打包文件夹
if __name__ == "__main__":
main()
```
#### File: 119068489/game_server/stop_by_pid_file.py
```python
import sys
import os
import readline # 使 Backspace 键和方向键不乱码
PID_FILE_NAME = "pid"
def get_dict_from_pid_file(): # 在 start.py 也存在此函数定义
pid_info = {}
if not os.path.exists(PID_FILE_NAME):
return pid_info
with open('./%s' % PID_FILE_NAME, 'r') as f:
s = f.read()
if s == "":
return pid_info
for line in s.split("\n"):
if line == "":
continue
exe_dir, pid = line.split(":")
pid_info[exe_dir] = int(pid)
return pid_info
def write_pid_file(pid_info):
with open('./%s' % PID_FILE_NAME, 'w') as f:
for exe_dir, pid in pid_info.iteritems():
f.write("%s:%s\n"%(exe_dir, pid))
def main():
pid_file = PID_FILE_NAME
if not os.path.exists(pid_file):
print("没有 pid 文件存在.无能为力")
return
pid_info = get_dict_from_pid_file()
need_input = True
if len(sys.argv) > 1:
dir_name = sys.argv[1]
if dir_name == "all" or dir_name in pid_info:
need_input = False
else:
print("你输入的目录 {0} 在 pid 文件中找不到".format(dir_name))
while need_input:
prompt = "请输入你想要 kill 的目录名. pid 文件中记录的有: {0} (all 表示 kill 全部): ".format(pid_info.keys())
dir_name = raw_input(prompt)
if dir_name == '':
continue
if dir_name != "all" and dir_name not in pid_info:
print("目录名 {0} 在 pid 文件中不存在".format(dir_name))
continue
break
if dir_name == "all": #关闭所有子游戏进程
for pid in pid_info.values():
os.system("kill %d"%pid)
os.system("rm -rf %s" % pid_file)
else: # 关闭某个子游戏进程
pid = pid_info[dir_name]
os.system("kill %d" % pid)
del pid_info[dir_name]
write_pid_file(pid_info) # 覆盖 pid 文件
if __name__ == "__main__":
main()
``` |
{
"source": "119253/BankingApp",
"score": 3
} |
#### File: 119253/BankingApp/main.py
```python
from tkinter import *
import os
from PIL import ImageTk, Image
# Main Screen
master = Tk()
master.title('Banking App')
# Functions
def finish_reg():
name = temp_name.get()
age = temp_age.get()
gender = temp_gender.get()
password = temp_password.get()
all_accounts = os.listdir()
if name == "" or age == "" or gender == "" or password == "":
notif.config(fg="red", text="All fields requried * ")
return
for name_check in all_accounts:
if name == name_check:
notif.config(fg="red", text="Account already exists")
return
else:
new_file = open(name, "w")
new_file.write(name + '\n')
new_file.write(password + '\n')
new_file.write(age + '\n')
new_file.write(gender + '\n')
new_file.write('0')
new_file.close()
notif.config(fg="green", text="Account has been created")
def register():
# Vars
global temp_name
global temp_age
global temp_gender
global temp_password
global notif
temp_name = StringVar()
temp_age = StringVar()
temp_gender = StringVar()
temp_password = StringVar()
# Register Screen
register_screen = Toplevel(master)
register_screen.title('Register')
# Labels
Label(register_screen, text="Please enter your details below to register", font=('Calibri', 12)).grid(row=0,
sticky=N,
pady=10)
Label(register_screen, text="Name", font=('Calibri', 12)).grid(row=1, sticky=W)
Label(register_screen, text="Age", font=('Calibri', 12)).grid(row=2, sticky=W)
Label(register_screen, text="Gender", font=('Calibri', 12)).grid(row=3, sticky=W)
Label(register_screen, text="Password", font=('Calibri', 12)).grid(row=4, sticky=W)
notif = Label(register_screen, font=('Calibri', 12))
notif.grid(row=6, sticky=N, pady=10)
# Entries
Entry(register_screen, textvariable=temp_name).grid(row=1, column=0)
Entry(register_screen, textvariable=temp_age).grid(row=2, column=0)
Entry(register_screen, textvariable=temp_gender).grid(row=3, column=0)
Entry(register_screen, textvariable=temp_password, show="*").grid(row=4, column=0)
# Buttons
Button(register_screen, text="Register", command=finish_reg, font=('Calibri', 12)).grid(row=5, sticky=N, pady=10)
def login_session():
global login_name
all_accounts = os.listdir()
login_name = temp_login_name.get()
login_password = temp_login_password.<PASSWORD>()
for name in all_accounts:
if name == login_name:
file = open(name, "r")
file_data = file.read()
file_data = file_data.split('\n')
password = <PASSWORD>_<PASSWORD>[1]
# Account Dashboard
if login_password == password:
login_screen.destroy()
account_dashboard = Toplevel(master)
account_dashboard.title('Dashboard')
# Labels
Label(account_dashboard, text="Account Dashboard", font=('Calibri', 12)).grid(row=0, sticky=N, pady=10)
Label(account_dashboard, text="Welcome " + name, font=('Calibri', 12)).grid(row=1, sticky=N, pady=5)
# Buttons
Button(account_dashboard, text="Personal Details", font=('Calibri', 12), width=30,
command=personal_details).grid(row=2, sticky=N, padx=10)
Button(account_dashboard, text="Deposit", font=('Calibri', 12), width=30, command=deposit).grid(row=3,
sticky=N,
padx=10)
Button(account_dashboard, text="Withdraw", font=('Calibri', 12), width=30, command=withdraw).grid(row=4,
sticky=N,
padx=10)
Label(account_dashboard).grid(row=5, sticky=N, pady=10)
return
else:
login_notif.config(fg="red", text="Password incorrect!!")
return
login_notif.config(fg="red", text="No account found !!")
def deposit():
# Vars
global amount
global deposit_notif
global current_balance_label
amount = StringVar()
file = open(login_name, "r")
file_data = file.read()
user_details = file_data.split('\n')
details_balance = user_details[4]
# Deposit Screen
deposit_screen = Toplevel(master)
deposit_screen.title('Deposit')
# Label
Label(deposit_screen, text="Deposit", font=('Calibri', 12)).grid(row=0, sticky=N, pady=10)
current_balance_label = Label(deposit_screen, text="Current Balance : £" + details_balance, font=('Calibri', 12))
current_balance_label.grid(row=1, sticky=W)
Label(deposit_screen, text="Amount : ", font=('Calibri', 12)).grid(row=2, sticky=W)
deposit_notif = Label(deposit_screen, font=('Calibri', 12))
deposit_notif.grid(row=4, sticky=N, pady=5)
# Entry
Entry(deposit_screen, textvariable=amount).grid(row=2, column=1)
# Button
Button(deposit_screen, text="Finish", font=('Calibri', 12), command=finish_deposit).grid(row=3, sticky=W, pady=5)
def finish_deposit():
if amount.get() == "":
deposit_notif.config(text='Amount is required!', fg="red")
return
if float(amount.get()) <= 0:
deposit_notif.config(text='Negative currency is not accepted', fg='red')
return
file = open(login_name, 'r+')
file_data = file.read()
details = file_data.split('\n')
current_balance = details[4]
updated_balance = current_balance
updated_balance = float(updated_balance) + float(amount.get())
file_data = file_data.replace(current_balance, str(updated_balance))
file.seek(0)
file.truncate(0)
file.write(file_data)
file.close()
current_balance_label.config(text="Current Balance : £" + str(updated_balance), fg="green")
deposit_notif.config(text='Balance Updated', fg='green')
def withdraw():
# Vars
global withdraw_amount
global withdraw_notif
global current_balance_label
withdraw_amount = StringVar()
file = open(login_name, "r")
file_data = file.read()
user_details = file_data.split('\n')
details_balance = user_details[4]
# Deposit Screen
withdraw_screen = Toplevel(master)
withdraw_screen.title('Withdraw')
# Label
Label(withdraw_screen, text="Withdraw", font=('Calibri', 12)).grid(row=0, sticky=N, pady=10)
current_balance_label = Label(withdraw_screen, text="Current Balance : £" + details_balance, font=('Calibri', 12))
current_balance_label.grid(row=1, sticky=W)
Label(withdraw_screen, text="Amount : ", font=('Calibri', 12)).grid(row=2, sticky=W)
withdraw_notif = Label(withdraw_screen, font=('Calibri', 12))
withdraw_notif.grid(row=4, sticky=N, pady=5)
# Entry
Entry(withdraw_screen, textvariable=withdraw_amount).grid(row=2, column=1)
# Button
Button(withdraw_screen, text="Finish", font=('Calibri', 12), command=finish_withdraw).grid(row=3, sticky=W, pady=5)
def finish_withdraw():
if withdraw_amount.get() == "":
withdraw_notif.config(text='Amount is required!', fg="red")
return
if float(withdraw_amount.get()) <= 0:
withdraw_notif.config(text='Negative currency is not accepted', fg='red')
return
file = open(login_name, 'r+')
file_data = file.read()
details = file_data.split('\n')
current_balance = details[4]
if float(withdraw_amount.get()) > float(current_balance):
withdraw_notif.config(text='Insufficient Funds!', fg='red')
return
updated_balance = current_balance
updated_balance = float(updated_balance) - float(withdraw_amount.get())
file_data = file_data.replace(current_balance, str(updated_balance))
file.seek(0)
file.truncate(0)
file.write(file_data)
file.close()
current_balance_label.config(text="Current Balance : £" + str(updated_balance), fg="green")
withdraw_notif.config(text='Balance Updated', fg='green')
def personal_details():
# Vars
file = open(login_name, 'r')
file_data = file.read()
user_details = file_data.split('\n')
details_name = user_details[0]
details_age = user_details[2]
details_gender = user_details[3]
details_balance = user_details[4]
# Personal details screen
personal_details_screen = Toplevel(master)
personal_details_screen.title('Personal Details')
# Labels
Label(personal_details_screen, text="Personal Details", font=('Calibri', 12)).grid(row=0, sticky=N, pady=10)
Label(personal_details_screen, text="Name : " + details_name, font=('Calibri', 12)).grid(row=1, sticky=W)
Label(personal_details_screen, text="Age : " + details_age, font=('Calibri', 12)).grid(row=2, sticky=W)
Label(personal_details_screen, text="Gender : " + details_gender, font=('Calibri', 12)).grid(row=3, sticky=W)
Label(personal_details_screen, text="Balance :£" + details_balance, font=('Calibri', 12)).grid(row=4, sticky=W)
def login():
# Vars
global temp_login_name
global temp_login_password
global login_notif
global login_screen
temp_login_name = StringVar()
temp_login_password = StringVar()
# Login Screen
login_screen = Toplevel(master)
login_screen.title('Login')
# Labels
Label(login_screen, text="Login to your account", font=('Calibri', 12)).grid(row=0, sticky=N, pady=10)
Label(login_screen, text="Username", font=('Calibri', 12)).grid(row=1, sticky=W)
Label(login_screen, text="Password", font=('Calibri', 12)).grid(row=2, sticky=W)
login_notif = Label(login_screen, font=('Calibri', 12))
login_notif.grid(row=4, sticky=N)
# Entry
Entry(login_screen, textvariable=temp_login_name).grid(row=1, column=1, padx=5)
Entry(login_screen, textvariable=temp_login_password, show="*").grid(row=2, column=1, padx=5)
# Button
Button(login_screen, text="Login", command=login_session, width=15, font=('Calibri', 12)).grid(row=3, sticky=W,
pady=5, padx=5)
# Image import
img = Image.open('secure.png')
img = img.resize((200, 200))
img = ImageTk.PhotoImage(img)
# Labels
Label(master, text="Shipley's Banking System", font=('Calibri', 14)).grid(row=0, sticky=N, pady=10)
Label(master, text="The most secure banking system by <NAME>", font=('Calibri', 12)).grid(row=1, sticky=N)
Label(master, image=img).grid(row=2, sticky=N, pady=15)
# Buttons
Button(master, text="Register", font=('Calibri', 12), width=20, command=register).grid(row=3, sticky=N)
Button(master, text="Login", font=('Calibri', 12), width=20, command=login).grid(row=4, sticky=N, pady=10)
master.mainloop()
``` |
{
"source": "1194060084/python",
"score": 3
} |
#### File: 1194060084/python/BPNN.py
```python
from keras.datasets import boston_housing
from keras import models
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import pandas as pd
from sklearn.model_selection import train_test_split
def file_open():
data = pd.read_csv(r'D:\ML-2020.1.4\database-master\database-master\Data base for ML prediction-new.csv')
data_list = list(data)
# drop_name_unname = 'Unnamed: 28'
# data = file_read.drop(drop_name_unname, axis=1)
drop_name_unnames = 'Unnamed: 0'
data = data.drop(drop_name_unnames, axis=1)
data_list = list(data)
y = data['Eads(H)']
x = data.drop(['Eads(H)'], axis=1, inplace=False)
train_x, test_x, train_y, test_y = train_test_split(x, y, test_size=0.20)
train_y = train_y.values
test_y = test_y.values
return train_x, train_y, test_x, test_y
def bulid_model(train_x, train_y, test_x, test_y):
train_x = train_x
train_y = train_y
model = keras.Sequential((
layers.Dense(64, kernel_regularizer=keras.regularizers.l2(0.001), activation='relu',
input_shape=[len(list(train_x))]),
layers.Dense(64, kernel_regularizer=keras.regularizers.l2(0.001), activation='relu'),
layers.Dense(1)
))
optimizer = tf.keras.optimizers.Adam(0.001)
model.compile(optimizer=optimizer,
loss='mse',
metrics=['mae', 'mse']
)
return model
def map_plot(test_x,test_y,pre_y):
test_x_m = test_x
test_y_m = test_y
pre_y = pre_y
plt.plot(test_x_m,pre_y,'r-')
plt.scatter(test_x_m,test_y)
plt.show()
def guji(pre_y, test_y):
pre = pre_y
pre_pd = pd.DataFrame(pre)
test_y_g = test_y
mae = mean_squared_error(test_y_g, pre)
r2 = r2_score(test_y_g, pre)
error = round((mae+r2)/2, 2)
print('r2=', r2)
print('error=', error)
return error
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
if __name__ =="__main__":
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
train_x, train_y, test_x, test_y = file_open()
model = bulid_model(train_x, train_y, test_x, test_y)
model.summary()
model_fit = model.fit(train_x,train_y,epochs=1000,validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()])
pre_y = model.predict(test_x)
guji(pre_y,test_y)
loss1, mae1, mse1 = model.evaluate(test_x, test_y, verbose=2)
# test_x, test_y, pre_y = map_plot(test_x, test_y, pre_y)
``` |
{
"source": "119Vik/open-kilda",
"score": 2
} |
#### File: app/examples/mininet_rest_test.py
```python
from .. import mininet_rest
import requests
import logging
from functools import partial
logger = logging.getLogger(__name__)
topo_json = {
"controllers": [
{
"host": "kilda",
"name": "floodlight",
"port": 6653
}
],
"links": [
{
"node1": "00000001",
"node2": "00000002"
},
{
"node1": "00000002",
"node2": "00000003"
},
{
"node1": "00000003",
"node2": "00000004"
},
{
"node1": "00000004",
"node2": "00000005"
},
{
"node1": "00000005",
"node2": "00000006"
},
{
"node1": "00000006",
"node2": "00000007"
}
],
"switches": [
{
"dpid": "deadbeef00000001",
"name": "00000001"
},
{
"dpid": "deadbeef00000002",
"name": "00000002"
},
{
"dpid": "deadbeef00000003",
"name": "00000003"
},
{
"dpid": "deadbeef00000004",
"name": "00000004"
},
{
"dpid": "deadbeef00000005",
"name": "00000005"
},
{
"dpid": "deadbeef00000006",
"name": "00000006"
},
{
"dpid": "deadbeef00000007",
"name": "00000007"
}
]
}
def cleanup():
result = requests.post(url="http://localhost:38080/cleanup")
print result
def create_topology():
result = requests.post(url="http://localhost:38080/topology",json=topo_json)
print result
def test_ping(src_switch, src_port, src_vlan, dst_switch, dst_port, dst_vlan):
"""setup our rules for host / switch / switch and see if we can ping"""
urlbase = "http://localhost:38080/checkpingtraffic"
args = "srcswitch={}&srcport={}&srcvlan={}&dstswitch={}&dstport={}&dstvlan={}".format(
src_switch,src_port,src_vlan,dst_switch,dst_port,dst_vlan)
url = "{}?{}".format(urlbase,args)
print ("** PING: {}".format(url))
result = requests.get(url=url)
return result
def print_ping_result(response, should_ping):
if response.status_code == 503:
if should_ping:
print "FAILURE - CAN'T PING"
else:
print "SUCCESS - NO PING"
elif response.status_code == 200:
if should_ping:
print "SUCCESS - CAN PING"
else:
print "FAILURE - CAN PING"
else:
print "ERROR - WRONG CODE"
def call_pingagle(pingable, should_ping):
result = pingable()
print_ping_result( result, should_ping) # should fail
if result.status_code == 503 and not should_ping:
return 1
elif result.status_code == 200 and should_ping:
return 1
return 0
def test_scenario(name, pingable, psetup, pclear):
total_result = 0; # add a +1 for each passed test
total_expected = 3;
# There should not be a ping beforehand
total_result += call_pingagle(pingable, should_ping=False)
# Now there should be a ping, after adding the rule that Kilda should add when running
for setup in psetup:
setup()
total_result += call_pingagle(pingable, should_ping=True)
# After removing rules, ping shouldn't work
for clear in pclear:
clear()
total_result += call_pingagle(pingable, should_ping=False)
if total_result == total_expected:
print ("\n{} ... ALL TESTS PASSED\n".format(name))
else:
print ("\n{} ... FAILURE: {} of {} passed\n".format(name, total_result, total_expected))
s2 = "00000002"
s3 = "00000003"
s4 = "00000004"
s5 = "00000005"
s6 = "00000006"
def test_single_switch_scenario():
"""
This test is similar to the Kilda single switch test(s), but without kilda issuing the rules.
The intent is to confirm that the test harness works (ie pingable works as intended), but also
serve as an example of what the rules may look like for the kilda deployment.
Examples: # flows without transit vlans and intermediate switches
| flow_id | source_switch | source_port | source_vlan | destination_switch | destination_port | destination_vlan | bandwidth |
| c1none | fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b | 1 | 0 | fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b | 2 | 0 | 10000 |
"""
pingable = partial(test_ping,s2,2,0,s4,1,0)
psetup = [ partial(mininet_rest.add_single_switch_rules, s3, 1, 2, 0, 0 ) ]
pclear = [ partial(mininet_rest.clear_single_switch_rules, s3, 1, 2) ]
test_scenario("test_single_switch_scenario", pingable, psetup, pclear)
def test_two_switch_scenario():
"""
Examples: # flows with transit vlans and without intermediate switches
| flow_id | source_switch | source_port | source_vlan | destination_switch | destination_port | destination_vlan | bandwidth |
| c2none | fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b | 1 | 0 | fdf8:f53e:61e4::18 | 2 | 0 | 10000 |
"""
pingable = partial(test_ping, s2, 2, 0, s5, 1, 0)
psetup = [
partial( mininet_rest.add_single_switch_rules, s3, 1, 2, 0, 0 ),
partial( mininet_rest.add_single_switch_rules, s4, 1, 2, 0, 0 )
]
pclear = [
partial(mininet_rest.clear_single_switch_rules, s3, 1, 2),
partial(mininet_rest.clear_single_switch_rules, s4, 1, 2)
]
test_scenario("test_two_switch_scenario", pingable, psetup, pclear)
def test_three_switch_scenario():
"""
Examples: # flows with transit vlans and intermediate switches
| flow_id | source_switch | source_port | source_vlan | destination_switch | destination_port | destination_vlan | bandwidth |
| c3none | fc00:e968:6179::de52:7100 | 1 | 0 | fdf8:f53e:61e4::18 | 2 | 0 | 10000 |
"""
pingable = partial(test_ping, s2, 2, 0, s6, 1, 0)
psetup = [
partial( mininet_rest.add_single_switch_rules, s3, 1, 2, 0, 0 ),
partial( mininet_rest.add_single_switch_rules, s4, 1, 2, 0, 0 ),
partial( mininet_rest.add_single_switch_rules, s5, 1, 2, 0, 0 )
]
pclear = [
partial(mininet_rest.clear_single_switch_rules, s3, 1, 2),
partial(mininet_rest.clear_single_switch_rules, s4, 1, 2),
partial(mininet_rest.clear_single_switch_rules, s5, 1, 2)
]
test_scenario("test_three_switch_scenario", pingable, psetup, pclear)
def main():
mininet_rest.init()
create_topology()
test_single_switch_scenario()
test_two_switch_scenario()
test_three_switch_scenario()
cleanup()
if __name__ == '__main__':
main()
```
#### File: app/examples/plan-b.py
```python
from mininet.net import Mininet
from mininet.node import OVSSwitch, Controller, RemoteController
from mininet.topo import LinearTopo
from mininet.log import setLogLevel
from mininet.cli import CLI
import subprocess
import re
class KildaSwitch( OVSSwitch ):
"Add the OpenFlow13 Protocol"
def __init__(self,name,**params):
params['protocols'] = 'OpenFlow13'
OVSSwitch.__init__(self, name, **params)
setLogLevel( 'info' )
gateway = '127.0.0.1'
netstat = subprocess.check_output(['netstat', '-rn']).split('\n')
for line in netstat:
if line.startswith('0.0.0.0'):
gateway = re.split('\s+', line)[1]
break
print "gateway=", gateway
c0 = RemoteController( 'c0', ip=gateway, port=6653 )
topo = LinearTopo( k=2, n=2 )
net = Mininet( controller=c0, topo=topo, switch=KildaSwitch, build=False )
net.build()
net.start()
CLI( net )
net.stop()
```
#### File: app/examples/plan-d.py
```python
from mininet.net import Mininet
from mininet.node import OVSSwitch, Controller, RemoteController
from mininet.log import setLogLevel, info
from mininet.cli import CLI
import subprocess
import re
class KildaSwitch( OVSSwitch ):
"Add the OpenFlow13 Protocol"
def __init__(self,name,**params):
params['protocols'] = 'OpenFlow13'
OVSSwitch.__init__(self, name, **params)
setLogLevel( 'info' )
gateway = '127.0.0.1'
netstat = subprocess.check_output(['netstat', '-rn']).split('\n')
for line in netstat:
if line.startswith('0.0.0.0'):
gateway = re.split('\s+', line)[1]
break
print "gateway=", gateway
net = Mininet( controller=RemoteController, switch=KildaSwitch, build=False )
info( "*** Creating (Remote) controllers\n" )
c0 = net.addController( 'c0', ip=gateway, port=6653)
info( "*** Creating switches\n" )
s1 = net.addSwitch( 's1' )
s2 = net.addSwitch( 's2' )
info( "*** Creating hosts\n" )
hosts1 = [ net.addHost( 'h%ds1' % n ) for n in ( 1, 2 ) ]
hosts2 = [ net.addHost( 'h%ds2' % n ) for n in ( 1, 2 ) ]
info( "*** Creating links\n" )
for h in hosts1:
net.addLink( h, s1 )
for h in hosts2:
net.addLink( h, s2 )
net.addLink( s1, s2 )
info( "*** Starting network\n" )
net.configHosts()
# c0.start()
# s1.start( [c0] )
# s2.start( [c0] )
net.start()
p = subprocess.Popen(["ovs-ofctl","-O","OpenFlow13","add-flow","s1",
"idle_timeout=0,priority=1000,in_port=1,actions=output:2"],
stdout=subprocess.PIPE)
p = subprocess.Popen(["ovs-ofctl","-O","OpenFlow13","add-flow","s1",
"idle_timeout=0,priority=1000,in_port=2,actions=output:1"],
stdout=subprocess.PIPE)
p.wait()
result = hosts1[0].cmd( 'ping -c1 %s' % (hosts1[1].IP()) )
lines = result.split("\n")
if "1 packets received" in lines[3]:
print "CONNECTION BETWEEN ", hosts1[0].IP(), "and", hosts1[1].IP()
else:
print "NO CONNECTION BETWEEN ", hosts1[0].IP(), "and", hosts1[1].IP()
result = hosts1[0].cmd( 'ping -c1 %s' % (hosts2[0].IP()) )
lines = result.split("\n")
if "1 packets received" in lines[3]:
print "CONNECTION BETWEEN ", hosts1[0].IP(), "and", hosts2[0].IP()
else:
print "NO CONNECTION BETWEEN ", hosts1[0].IP(), "and", hosts2[0].IP()
#info( "loss=" + loss )
info( "*** Running CLI\n" )
CLI( net )
info( "*** Stopping network\n" )
net.stop()
```
#### File: kilda/mininet/flow_tool.py
```python
from bottle import run, get, response, request, post, error, install
import ctypes
import multiprocessing
import os
import scapy.all as s
import socket
import logging
import json
from logging.config import dictConfig
from functools import wraps
logger = logging.getLogger()
def log_to_logger(fn):
'''
Wrap a Bottle request so that a log line is emitted after it's handled.
(This decorator can be extended to take the desired logger as a param.)
'''
@wraps(fn)
def _log_to_logger(*args, **kwargs):
actual_response = fn(*args, **kwargs)
logger.info('%s %s %s %s' % (request.remote_addr,
request.method,
request.url,
response.status))
return actual_response
return _log_to_logger
install(log_to_logger)
number_of_packets = 1000
expected_delta = 500
of_ctl = "ovs-ofctl -O openflow13"
def required_parameters(*pars):
def _hatch(__):
def _hatchet():
for _ in pars:
if request.query.get(_) is None:
response.status = 500
return "%s: %s must be specified\n" % (request.path, _)
return __(dict([(_, request.query.get(_)) for _ in pars]))
return _hatchet
return _hatch
def respond(status, ok_message, fail_message):
if status:
response.status = 200
return ok_message
response.status = 503
return fail_message
@error(404)
def not_found(error):
return "Thank you, Mario! but our princess is in another castle!\n"
@post('/set_link_state')
@required_parameters("switch", "port", "newstate")
def link_state_changer(p):
iface = "%s-eth%s" % (p['switch'], p['port'])
newstate = iface, p['newstate']
result = os.system("ifconfig %s %s" % newstate)
return respond(result == 0,
"Successfully put link %s in state %s\n" % newstate,
"Failed to put link %s in state %s\n" % newstate)
@get('/checkflowtraffic')
@required_parameters("srcswitch", "dstswitch", "srcport", "dstport", "srcvlan",
"dstvlan")
def check_traffic(p):
def traffic_sender(linkid, vlanid):
payload = s.Ether()/s.Dot1Q(vlan=int(vlanid))/s.IP()/s.ICMP()
s.sendp(payload, iface=linkid, count=number_of_packets)
def traffic_listener(traffic_goes_through, vlanid, link):
# NOTE: sniff() takes optional filter argument which is supposed to
# contain BPF string. This filter is then supposed to be applied to
# captured packets in a manner similar to other traffic capture tools.
# However in case sniff() fails to use filtering it apparently just
# returns any packet instead of failing. It appears that running
# scapy in a container with insufficient (i.e. any other set than full
# set) privileges results exactly in this behavior. lfilter argument
# apparently makes things even worse since sniff appears to loose
# packets when lfilter is used.
# That is why an approach with a delta of packets and sniff timeout
# is used now. It appears to be the most reliable way to test traffic
# through flow.
result = s.sniff(timeout=5, iface=link)
received = sum(1 for _ in result if _.haslayer(s.ICMP))
if number_of_packets - received < expected_delta:
traffic_goes_through.value = True
traffic_goes_through = multiprocessing.Value(ctypes.c_bool, False)
sender = multiprocessing.Process(
target=traffic_sender,
args=("%s-eth%s" % (p['srcswitch'], p['srcport']), p['srcvlan']))
checker = multiprocessing.Process(
target=traffic_listener,
args=(traffic_goes_through, p['dstvlan'],
"%s-eth%s" % (p['dstswitch'], p['dstport'])))
checker.start(), sender.start(), sender.join(5), checker.join(7)
return respond(traffic_goes_through.value,
"Traffic seems to go through\n",
"Traffic does not seem to go through\n")
@post("/knockoutswitch")
@required_parameters("switch")
def switch_knock_out(p):
result = os.system("ovs-vsctl del-controller %s" % p['switch'])
return respond(result == 0,
"Switch %s is successfully knocked out\n" % p['switch'],
"Failed to knock out switch %s\n" % p['switch'])
@post("/reviveswitch")
@required_parameters("switch", "controller")
def switch_revive(p):
params = p['controller'].split(":", 3)
ip = socket.gethostbyname(params[1])
controller = params[0] + ":" + ip + ":" + params[2]
result = os.system("ovs-vsctl set-controller %s %s" %
(p['switch'], controller))
return respond(result == 0,
"Switch %s is successfully revived\n" % p['switch'],
"Failed to revive switch %s\n" % p['switch'])
@post("/cutlink")
@required_parameters("switch", "port")
def cut_link(p):
sppair = (p['switch'], p['port'])
result = os.system("ovs-ofctl add-flow %s priority=65500,in_port=%s,"
"action=drop -O openflow13" % sppair)
return respond(result == 0,
"Link to switch %s port %s is successfully cut\n" % sppair,
"Failed to cut link to switch %s port %s\n" % sppair)
@post("/restorelink")
@required_parameters("switch", "port")
def restore_link(p):
sppair = (p['switch'], p['port'])
result = os.system("ovs-ofctl del-flows %s -O openflow13 \"priority=65500"
",in_port=%s\" --strict" % (p['switch'], p['port']))
return respond(result == 0,
"Link to switch %s port %s is restored\n" % sppair,
"Failed to restore link to switch %s port %s\n" % sppair)
def port_mod(switch, port, action):
return os.system("%s mod-port %s %s %s" % (of_ctl, switch, port, action))
@post("/port/down")
@required_parameters("switch", "port")
def port_down(p):
result = port_mod(p['switch'], p['port'], 'down')
return respond(result == 0,
"Switch %s port %s down\n" % (p['switch'], p['port']),
"Fail switch %s port %s down\n" % (p['switch'], p['port']))
@post("/port/up")
@required_parameters("switch", "port")
def port_up(p):
result = port_mod(p['switch'], p['port'], 'up')
return respond(result == 0,
"Switch %s port %s up\n" % (p['switch'], p['port']),
"Fail switch %s port %s up\n" % (p['switch'], p['port']))
@post("/send_malformed_packet")
def send_malformed_packet():
# This packet create isl between de:ad:be:ef:00:00:00:02 and
# fdf8:f53e:61e4::18
data = '\x02\x07\x04\xbe\xef\x00\x00\x00\x02\x04\x03\x02\x00\x01\x06\x02' \
'\x00x\xfe\x0c\x00&\xe1\x00\xde\xad\xbe\xef\x00\x00\x00\x02\xfe' \
'\x0c\x00&\xe1\x01\x00\x00\x01_\xb6\x8c\xacG\xfe\x08\x00&\xe1\x02' \
'\x00\x00\x00\x00\x00\x00'
payload = (s.Ether(dst="00:26:e1:ff:ff:ff") /
s.IP(dst="192.168.0.255") /
s.UDP(dport=61231, sport=61231) /
data)
try:
s.sendp(payload, iface="00000001-eth1")
return "ok"
except Exception as ex:
response.status = 500
return "can't send malformed packet {}".format(ex)
def main():
with open("/app/log.json", "r") as fd:
logging.config.dictConfig(json.load(fd))
run(host='0.0.0.0', port=17191, debug=True)
```
#### File: probe/command/dump_state.py
```python
import logging
import pprint
import json
from collections import OrderedDict
import click
from datetime import datetime
import prettytable.prettytable as prettytable
from prettytable import PrettyTable
from kilda.probe.entity.message import create_dump_state
from kilda.probe.messaging import send_with_context
from kilda.probe.messaging import receive_with_context_async
LOG = logging.getLogger(__name__)
def print_flow(flow, border):
table = PrettyTable(['Property', 'Forward', 'Reverse'], border=border,
valign='m')
for k, v in flow['forward'].items():
if k == 'flowpath':
table.add_row(['flowpath:latency_ns', v['latency_ns'],
flow['reverse'][k]['latency_ns']])
else:
table.add_row([k, v, flow['reverse'][k]])
table.add_row(['path', print_path(flow['forward'], border),
print_path(flow['reverse'], border)])
print(table)
def print_path(flow, border):
path = flow['flowpath']['path']
keys = ['switch_id', 'port_no', 'segment_latency', 'seq_id']
table = PrettyTable(keys, border=border, vrules=prettytable.NONE,
hrules=prettytable.HEADER,
padding_width=0)
for p in path:
table.add_row([p.get(x, None) for x in keys])
return table
def print_isls_tower(isls, border):
table = PrettyTable(['Isl'], border=border)
for isl in isls:
child_table = PrettyTable(['Property', 'Value'], border=border)
for k, v in isl.items():
if k == 'path':
for p in v:
for kk, vv in p.items():
child_table.add_row(
['path:{}:{}'.format(p['seq_id'], kk), vv])
else:
child_table.add_row([k, v])
table.add_row([child_table])
print(table)
def print_isls(isls, border):
if not isls:
return
columns = set()
raw = []
for isl in isls:
d = isl.copy()
if 'path' in d:
for p in d['path']:
for kk, vv in p.items():
d['p{}:{}'.format(p['seq_id'], kk)] = vv
raw.append(d)
columns.update(d.keys())
columns -= {'id', 'path', 'message_type', 'p0:segment_latency',
'created_in_cache', 'updated_in_cache', 'clazz'}
sorted_columns = ['id'] + sorted(list(columns)) + ['created_in_cache',
'updated_in_cache']
sorted_columns_with_names = OrderedDict(
zip(sorted_columns, sorted_columns))
sorted_columns_with_names.update({'available_bandwidth': 'av/bw',
'created_in_cache': 'created',
'updated_in_cache': 'updated',
'latency_ns': 'lat'})
table = PrettyTable(sorted_columns_with_names.values(),
border=border,
sortby='id',
vrules=prettytable.FRAME,
hrules=prettytable.FRAME)
convert_timefied_to_human(raw)
for d in raw:
table.add_row([d.get(x, '-') for x in sorted_columns_with_names.keys()])
print(table)
def convert_timefied_to_human(data):
for r in data:
for time_field in ['created_in_cache', 'updated_in_cache']:
if time_field in r:
r[time_field] = datetime.utcfromtimestamp(r[time_field])
def print_switches(switches, border):
if not switches:
return
columns = set(switches[0].keys())
columns -= {'switch_id', 'created_in_cache', 'updated_in_cache'}
sorted_columns = ['switch_id'] + sorted(columns) + ['created_in_cache',
'updated_in_cache']
sorted_columns_with_names = OrderedDict(
zip(sorted_columns, sorted_columns))
sorted_columns_with_names.update({'created_in_cache': 'created',
'updated_in_cache': 'updated'})
table = PrettyTable(sorted_columns_with_names.values(),
border=border,
sortby='switch_id',
vrules=prettytable.FRAME,
hrules=prettytable.FRAME)
convert_timefied_to_human(switches)
for s in switches:
table.add_row([s[x] for x in sorted_columns_with_names.keys()])
print(table)
def print_flows_from_payload(payload, border):
flows = payload['state']['flow']['flows']
if flows:
print('+----------')
print('| Flows')
for flow in flows:
print_flow(flow, border)
def cache_bolt_print_table(payload, border):
print_flows_from_payload(payload, border)
isls = payload['state']['network']['isls']
if isls:
print('+----------')
print('| Isls')
print_isls(isls, border)
switches = payload['state']['network']['switches']
if switches:
print('+----------')
print('| Switches')
print_switches(switches, border)
def crud_bolt_print_table(payload, border):
print_flows_from_payload(payload, border)
def print_table(records, border):
for record in records:
data = json.loads(record.value)
payload = data['payload']
LOG.debug(pprint.pformat(data))
table = PrettyTable(['Topology', 'Component', 'Task ID'],
border=border)
table.add_row(
[payload['topology'], payload['component'], payload['task_id']])
print(table)
clazz = payload['state']['clazz']
if clazz == 'org.openkilda.messaging.ctrl.state.CacheBoltState':
cache_bolt_print_table(payload, border)
elif clazz == 'org.openkilda.messaging.ctrl.state.CrudBoltState':
crud_bolt_print_table(payload, border)
else:
print(pprint.pformat(payload['state']))
print('\n')
@click.command(name='dump-state')
@click.argument('destination')
@click.option('--border/--no-border', default=True)
@click.option('--table', 'output_type', flag_value='table', default=True)
@click.option('--json', 'output_type', flag_value='json')
@click.pass_obj
def dump_state_command(ctx, destination, border, output_type):
message = create_dump_state(ctx.correlation_id, destination=destination)
LOG.debug('command = {}'.format(message.serialize()))
with receive_with_context_async(ctx) as records:
send_with_context(ctx, message.serialize())
if output_type == 'table':
print_table(records, border)
elif output_type == 'json':
for record in records:
data = json.loads(record.value)
print(pprint.pformat(data))
```
#### File: kilda/probe/main.py
```python
import logging
import time
import socket
import click
from kilda.probe.command.list import list_command
from kilda.probe.command.monitor import monitor_command, bolt_command
from kilda.probe.command.dump_state import dump_state_command
from kilda.probe.command.switch_port_status import switch_port_status_command
LOG = logging.getLogger(__name__)
def init_logger(level):
if level <= logging.DEBUG:
logging.basicConfig(level=level,
format='%(asctime)s - %(name)s - %(levelname)s | '
'%(message)s')
else:
logging.basicConfig(level=level,
format='%(asctime)s | %(message)s')
logging.getLogger('kafka').setLevel(logging.ERROR)
logging.getLogger('neo4j').setLevel(logging.ERROR)
logging.getLogger('httpstream').setLevel(logging.ERROR)
def generate_correlation_id():
return 'probe-{}-{}'.format(socket.gethostname(),
int(round(time.time() * 1000)))
class Context(object):
def __init__(self):
self._debug = False
self._correlation_id = None
self._kafka_bootstrap_servers = None
self._kafka_topic = None
self._timeout = None
self._fl_host = None
self._neo4j_host = None
self._neo4j_user = None
self._neo4j_pass = None
@property
def debug(self):
return self._debug
@debug.setter
def debug(self, value):
self._debug = value
@property
def correlation_id(self):
return self._correlation_id
@correlation_id.setter
def correlation_id(self, value):
self._correlation_id = value
@property
def kafka_bootstrap_servers(self):
return self._kafka_bootstrap_servers
@kafka_bootstrap_servers.setter
def kafka_bootstrap_servers(self, value):
self._kafka_bootstrap_servers = value
@property
def kafka_topic(self):
return self._kafka_topic
@kafka_topic.setter
def kafka_topic(self, value):
self._kafka_topic = value
@property
def fl_host(self):
return self._fl_host
@fl_host.setter
def fl_host(self, value):
self._fl_host = value
@property
def neo4j_host(self):
return self._neo4j_host
@neo4j_host.setter
def neo4j_host(self, value):
self._neo4j_host = value
@property
def neo4j_user(self):
return self._neo4j_user
@neo4j_user.setter
def neo4j_user(self, value):
self._neo4j_user = value
@property
def neo4j_pass(self):
return self._neo4j_pass
@neo4j_pass.setter
def neo4j_pass(self, value):
self._neo4j_pass = value
@property
def timeout(self):
return self._timeout
@timeout.setter
def timeout(self, value):
self._timeout = value
@click.group()
@click.option('--debug/--no-debug', default=False, envvar='DEBUG')
@click.option('--correlation-id', default=generate_correlation_id())
@click.option('--kafka-bootstrap-servers', default='localhost',
envvar='KAFKA_BOOTSTRAP_SERVERS')
@click.option('--kafka-topic', default='kilda.ctrl', envvar='KAFKA_TOPIC')
@click.option('--fl-host', default='http://localhost:8180', envvar='FL')
@click.option('--neo4j-host', default='localhost',
envvar='NEO4G_HOST')
@click.option('--neo4j-user', default='neo4j', envvar='NEO4G_USER')
@click.option('--neo4j-pass', default='<PASSWORD>', envvar='NEO4G_PASS')
@click.option('--timeout', default=2)
@click.pass_obj
def cli(ctx, debug, correlation_id, kafka_bootstrap_servers, kafka_topic,
fl_host, neo4j_host, neo4j_user, neo4j_pass, timeout):
init_logger(logging.DEBUG if debug else logging.INFO)
ctx.debug = debug
ctx.correlation_id = correlation_id
LOG.debug('correlation_id = %s', correlation_id)
ctx.kafka_bootstrap_servers = kafka_bootstrap_servers
ctx.kafka_topic = kafka_topic
ctx.timeout = timeout
ctx.fl_host = fl_host
ctx.neo4j_host = neo4j_host
ctx.neo4j_user = neo4j_user
ctx.neo4j_pass = <PASSWORD>
cli.add_command(list_command)
cli.add_command(monitor_command)
cli.add_command(bolt_command)
cli.add_command(dump_state_command)
cli.add_command(switch_port_status_command)
def main():
cli(obj=Context())
```
#### File: kilda/probe/messaging.py
```python
import kafka
import json
import pprint
import logging
import gevent
import sys
import time
from contextlib import contextmanager
LOG = logging.getLogger(__name__)
def send_with_context(context, message):
send(context.kafka_bootstrap_servers, context.kafka_topic, message)
def send(bootstrap_servers, topic, message):
producer = kafka.KafkaProducer(bootstrap_servers=bootstrap_servers)
future = producer.send(topic, message)
future.get(timeout=60)
@contextmanager
def receive_with_context_async(context):
records = []
def collector(record):
try:
data = json.loads(record.value)
if (data['correlation_id'] == context.correlation_id and
data['destination'] == 'CTRL_CLIENT'):
LOG.debug('New message in topic:\n%s', pprint.pformat(data))
records.append(record)
except Exception:
LOG.exception('error on %s', record)
progress_green_thread = gevent.spawn(progress)
offset = get_last_offset_with_context(context)
green_thread = gevent.spawn(receive_with_context, context, collector,
offset)
yield records
green_thread.join(context.timeout)
green_thread.kill()
progress_green_thread.kill()
sys.stdout.write("\r")
sys.stdout.flush()
def receive_with_context(context, callback, offset=None):
receive(context.kafka_bootstrap_servers, context.kafka_topic, callback,
offset)
def receive(bootstrap_servers, topic, callback, offset):
consumer = kafka.KafkaConsumer(bootstrap_servers=bootstrap_servers,
enable_auto_commit=False)
partition = kafka.TopicPartition(topic, 0)
consumer.assign([partition])
if offset is not None:
consumer.seek(partition, offset)
for msg in consumer:
callback(msg)
def get_last_offset_with_context(context):
consumer = kafka.KafkaConsumer(
bootstrap_servers=context.kafka_bootstrap_servers,
enable_auto_commit=False)
partition = kafka.TopicPartition(context.kafka_topic, 0)
consumer.assign([partition])
pos = consumer.position(partition)
consumer.close(autocommit=False)
return pos
def progress():
while True:
sys.stderr.write('.')
sys.stderr.flush()
time.sleep(0.5)
```
#### File: tests/smoke-tests/stress_test.py
```python
from kafka import KafkaProducer
from itertools import izip
bootstrap_servers = 'kafka.pendev:9092'
topic = 'kilda.topo.eng'
MT_INFO = "org.openkilda.messaging.info.InfoMessage"
MT_SWITCH = "org.openkilda.messaging.info.event.SwitchInfoData"
producer = KafkaProducer(bootstrap_servers=bootstrap_servers)
def generate_swith_name(n):
i = iter(hex(n)[2:].zfill(16))
return ':'.join([''.join(a) for a in izip(i, i)])
x = xrange(10000)
for n in x:
switch = generate_swith_name(n)
producer.send(topic, b'{"clazz": "%s", "timestamp": 23478952134, '
b'"destination":"TOPOLOGY_ENGINE", "payload": '
b'{"clazz": "%s", '
b'"switch_id": "%s",'
b' "state": "ADDED", '
b'"address":"%s", '
b'"hostname":"hostname", '
b'"description":"description", '
b'"controller":"controller"}}' % (MT_INFO, MT_SWITCH, switch, switch))
producer.send(topic, b'{"clazz": "%s", "timestamp": 23478952134, "destination":"STOP"}' % (MT_INFO))
producer.flush()
```
#### File: app/app/login.py
```python
from flask import Flask, flash, redirect, render_template, request, session, abort, url_for, Response
from flask_login import LoginManager, UserMixin, login_required, login_user, logout_user, current_user
from app import application
from app import models, utils, db
import sys, os
login_manager = LoginManager()
login_manager.init_app(application)
login_manager.login_view = "login"
@application.route('/')
@login_required
def index():
user = models.Users.query.filter(models.Users.username == 'admin').first()
return render_template('index.html', username=user.username)
@application.route("/login", methods=["GET", "POST"])
def login():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
otp = request.form['twofactor']
hashed_password = utils.hash_password(password)
db_user = models.Users.query.filter(models.Users.username == username).first()
try:
otp_result = utils.check_otp(otp, db_user.twofactor)
otp_result = True
except:
return render_template('login.html')
if db_user and otp_result and hashed_password == str(db_user.password):
login_user(db_user)
return redirect(url_for('index'))
else:
return render_template('login.html')
else:
return render_template('login.html')
@application.route("/logout")
@login_required
def logout():
logout_user()
return redirect(url_for('login'))
@login_manager.user_loader
def user_loader(username):
try:
user = models.Users.query.filter(models.Users.username == username).first()
if user:
return user
return None
except Exception as e:
return e
``` |
{
"source": "11a66k/python_training",
"score": 2
} |
#### File: python_training/test/test_delete_group.py
```python
from model.group import Group
import random
def test_delete_first(app, db, chech_ui):
if len(db.get_group_list()) == 0:
app.group.create(Group(name="test"))
old_groups = db.get_group_list()
group = random.choice(old_groups)
app.group.delete_group_by_id(group.id)
new_groups = db.get_group_list()
assert len(old_groups) - 1 == app.group.count()
old_groups.remove(group)
assert old_groups == new_groups
if chech_ui:
assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
``` |
{
"source": "11ajith/Beginner-python-automation-scripts",
"score": 4
} |
#### File: 11ajith/Beginner-python-automation-scripts/url_shortener.py
```python
import requests
from urllib import parse
def url_shortener(url):
url = url.strip()
if not parse.urlparse(url).scheme:
url = 'http://' + url
query_url = f'http://tinyurl.com/api-create.php?url={url}'
return requests.get(query_url).text
print(url_shortener("enter the url to be shortened"))
``` |
{
"source": "11aparna91/LeetCodesPython",
"score": 3
} |
#### File: 11aparna91/LeetCodesPython/add-binary.py
```python
class Solution:
def addBinary(self, a: str, b: str) -> str:
l1=list(a)
l2=list(b)
carry=0
res=""
sum=0
while(l1 or l2 or carry):
if l1:
carry = carry + int(l1.pop())
if l2:
carry = carry + int(l2.pop())
out=carry%2
res= res + str(out)
carry=carry//2
return res[::-1]
```
#### File: 11aparna91/LeetCodesPython/add-to-array-form-of-integer.py
```python
class Solution:
def addToArrayForm(self, num: List[int], k: int) -> List[int]:
str1=""
for i in num:
str1 = str1 + str(i)
print(str1)
str1= int(str1)
str1= str1 + k
str1= str(str1)
output=[]
for i in str1:
output.append(i)
return output
```
#### File: 11aparna91/LeetCodesPython/binary-search.py
```python
class Solution:
def search(self, nums: List[int], target: int) -> int:
self.nums, self.target = nums, target
return self.helper(0, len(nums)-1)
def helper(self, low: int, high: int) -> int:
if low > high: return -1
mid = (high + low) // 2
if self.target == self.nums[mid]: return mid
elif self.target < self.nums[mid]: return self.helper(low, mid-1)
else: return self.helper(mid+1, high)
############################## Solution 2 Time complexity= O(log N) Space Complexity= O(1)#####################
class Solution:
def search(self, nums: List[int], target: int) -> int:
left=0
right=len(nums)-1
while(left<=right):
mid= (left+right)//2
if nums[mid] == target:
return mid
if nums[mid] < target:
left=mid+1
if nums[mid] > target:
right=mid-1
return -1
```
#### File: 11aparna91/LeetCodesPython/check-if-every-row-and-column-contains-all-numbers.py
```python
import math
class Solution:
def checkValid(self, matrix: List[List[int]]) -> bool:
leng= len(matrix)
for i in range(leng):
num=1
while(num<=leng):
if num in matrix[i]:
num=num+1
else:
return False
if leng==1:
if len(matrix[0])==1:
return True
else:
return False
else:
sum_i= math.floor(leng*(leng+1)/2)
print(sum_i)
cnt=0
for i in range(leng):
total=0
total_col=0
for j in range(leng):
total=total + matrix[i][j]
total_col= total_col + matrix[j][i]
if (total==sum_i and total_col==sum_i):
cnt=cnt+1
else:
return False
print(cnt,leng)
if cnt==leng:
return True
else:
return False
############################################################################# Another Solution with constant space##############################################
##########################################you can use sum of elements to check along with no repeating elements anywhere to avoid the edge case#################
class Solution:
def checkValid(self, matrix: List[List[int]]) -> bool:
n = len(matrix)
s = (n*(n+1))//2
for row in range(n):
if n>1 and matrix[row][0]==matrix[row][1]:
return False
if sum(matrix[row])!=s:
return False
for column in range(n):
ss=0
for row in range(n):
ss+=matrix[row][column]
if ss!=s:
return False
return True
```
#### File: 11aparna91/LeetCodesPython/Even_number_of_digits.py
```python
class Solution:
def findNumbers(self, nums: List[int]) -> int:
count=0
lennum=len(nums)
for i in range(lennum):
even=0
number=nums[i]
while(number!=0):
rem=number%10
even+=1
number=int(number/10)
if even%2==0:
count+=1
return count
```
#### File: 11aparna91/LeetCodesPython/find-peak-element.py
```python
class Solution:
def findPeakElement(self, nums: List[int]) -> int:
left=0
right= len(nums)-1
while left< right:
mid= left + (right-left)//2
if nums[mid+1]>nums[mid]:
left= mid+1
else:
right= mid
return left
```
#### File: 11aparna91/LeetCodesPython/maximum-product-of-two-elements-in-an-array.py
```python
import heapq
class Solution:
def maxProduct(self, nums: List[int]) -> int:
arr=[]
arr=heapq.nlargest(2,nums)
result=1
for i in arr:
result = result * (i-1)
return result
###### Second Solution #######
import heapq
class Solution:
def maxProduct(self, nums: List[int]) -> int:
arr=heapq.nlargest(2,nums)
return (arr[0]-1) * (arr[1]-1)
############################################ 3rd Solution #########
class Solution:
def maxProduct(self, nums: List[int]) -> int:
first=second=0
for n in nums:
if n>first:
second=first
first=n
else:
second=max(second,n)
return (first-1) * (second-1)
```
#### File: 11aparna91/LeetCodesPython/merge-two-sorted-lists.py
```python
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def mergeTwoLists(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:
if None in (l1,l2):
return l1 or l2
if(l1.val < l2.val):
l1.next=self.mergeTwoLists(l1.next,l2)
return l1
else:
l2.next=self.mergeTwoLists(l1,l2.next)
return l2
```
#### File: 11aparna91/LeetCodesPython/pascals-triangle.py
```python
class Solution:
def generate(self, numRows: int) -> List[List[int]]:
arr=[]
for i in range (0,numRows,1):
array=[]
for j in range(0,i+1,1):
if j==0 or j==i:
array.append(1)
else:
array.append(arr[i-1][j-1] + arr[i-1][j])
arr.append(array)
return arr
```
#### File: 11aparna91/LeetCodesPython/plus-one.py
```python
class Solution:
def plusOne(self, digits: List[int]) -> List[int]:
str1=""
for i in range(len(digits)):
str1= str1 + str(digits[i])
str1=int(str1)
str1= str1+1
str1=str(str1)
output=[]
for i in range(len(str1)):
output.append(str1[i])
return output
```
#### File: 11aparna91/LeetCodesPython/Plus_One.py
```python
class Solution:
def plusOne(self, digits: List[int]) -> List[int]:
total=0
multiply=1
for i in range(len(digits)-1,-1,-1):
d= digits[i]
multiply=multiply*1
total= total + (d*multiply)
multiply=multiply*10
total= total + 1
res = [int(x) for x in str(total)]
return res
```
#### File: 11aparna91/LeetCodesPython/remove-duplicates-from-sorted-list.py
```python
class Solution:
def deleteDuplicates(self, head: Optional[ListNode]) -> Optional[ListNode]:
cur=head
while cur:
while cur.next and cur.next.val==cur.val:
cur.next=cur.next.next
cur=cur.next
return head
```
#### File: 11aparna91/LeetCodesPython/repeated-dna-sequences.py
```python
class Solution:
def findRepeatedDnaSequences(self, s: str) -> List[str]:
n=len(s)
l=10
all_str=set()
output= set()
for i in range(n-l+1):
temp= s[i:i+l]
if temp in all_str:
output.add(temp)
all_str.add(temp)
return output
```
#### File: 11aparna91/LeetCodesPython/sqrtx.py
```python
class Solution:
def mySqrt(self, x: int) -> int:
low=0
high=x
middle= (low+high)//2
while(high>middle and middle>low):
square=middle*middle
if (square==x):
return int(middle)
if (square>x):
high=middle
else:
low=middle
middle=(low+high)//2
if x<high*high: #check this condition for x=8 or x=1
return int(low)
else:
return int(high)
```
#### File: 11aparna91/LeetCodesPython/symmetric-tree.py
```python
class Solution:
def isSymmetric(self, root: Optional[TreeNode]) -> bool:
if root is not None:
return self.Symmetric(root.left, root.right)
else:
return True
def Symmetric(self, left, right):
if left is not None and right is not None:
if left.val==right.val:
return self.Symmetric(left.left, right.right) and self.Symmetric(left.right, right.left)
else:
return False
if left is None and right is None:
return True
return False
```
#### File: 11aparna91/LeetCodesPython/valid-anagram.py
```python
class Solution:
def isAnagram(self, s: str, t: str) -> bool:
list1=list(s)
dict1={}
for i in list1:
if i in dict1:
dict1[i]+=1
else:
dict1[i]=1
print(dict1)
list2=list(t)
dict2={}
for i in list2:
if i in dict2:
dict2[i]+=1
else:
dict2[i]=1
print(dict2)
if dict1==dict2:
return True
else:
return False
```
#### File: 11aparna91/LeetCodesPython/Valid_Parentheses.py
```python
class Solution:
def isValid(self, s: str) -> bool:
Open_Brackets=['(','{','[']
Closed_Brackets=[')','}',']']
Stack=[]
for i in s:
if(i in Open_Brackets):
Stack.append(i)
else:
pos=Closed_Brackets.index(i)
if(len(Stack)>0 and Open_Brackets[pos]==Stack[len(Stack)-1]):
Stack.pop()
else:
return False
if (len(Stack)==0):
return True
else:
return False
```
#### File: 11aparna91/LeetCodesPython/word-search.py
```python
class Solution:
def exist(self, board, word):
if not word:
return True
if not board:
return False
m=len(board)
n=len(board[0])
w=len(word)-1
def dfs(i,j,k):
if (i<0) or (i>=m) or (j<0) or (j>=n):
return False
if board[i][j] != word[k]:
return False
if board[i][j]=='#':
return False
if k==w:
return True
tmp = board[i][j]
board[i][j]='#'
k +=1
for x,y in ((+1,0),(-1,0),(0,-1),(0,+1)):
if dfs(i+x,j+y,k):
return True
board[i][j]=tmp
return False
for i in range(m):
for j in range(n):
if dfs(i,j,0):
return True
return False
``` |
{
"source": "11bio/examples",
"score": 3
} |
#### File: examples/python/example01.py
```python
import json
import requests
import ssl
import sys
from config import config
def print_json(parsed_json):
"""PRint JSON"""
print(json.dumps(parsed_json, indent=4))
def check_response(res):
"""Checks response for errors.
Terminates application in case of error.
Returns json data otherwise.
"""
try:
data = res.json()
except Exception as ex:
print(ex)
data = None
if res.status_code != 200 or not data or 'error' in data:
print('Error! Status: %s; Message: %s' % (res.status_code, data.get('error')))
if data:
print('Response: ')
print_json(data)
sys.exit(1)
return data
def main():
headers = {
'Authorization': 'Bearer ' + config.API_KEY,
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded'
}
# GET currencies
print('\nGet Currencies: ')
res = requests.get(url = config.API_URL + '/api/v1/currencies', headers = headers)
res_data = check_response(res)
print_json(res_data)
# GET symbols
print('\nGet Symbols: ')
res = requests.get(url = config.API_URL + '/api/v1/symbols', headers = headers)
res_data = check_response(res)
print_json(res_data)
# GET quotes
print('\nGet Quotes: ')
res = requests.get(url = config.API_URL + '/api/v1/quotes?symbols=EUR/USD,USD/JPY', headers = headers)
res_quotes = check_response(res)
print_json(res_quotes)
# GET accounts
print('\nGet Accounts: ')
res = requests.get(url = config.API_URL + '/api/v1/accounts', headers = headers)
res_data = check_response(res)
print_json(res_data)
# GET orders
print('\nGet Orders: ')
res = requests.get(url = config.API_URL + '/api/v1/orders', headers = headers)
res_data = check_response(res)
print_json(res_data)
# GET positions
print('\nGet Positions: ')
res = requests.get(url = config.API_URL + '/api/v1/positions', headers = headers)
res_data = check_response(res)
print_json(res_data)
# GET closed_positions
print('\nGet Closed Positions: ')
r = requests.get(url = config.API_URL + '/api/v1/closed_positions', headers = headers)
res_data = check_response(res)
print_json(res_data)
# Create Limit Entry
print('\nCreate Limit: ')
params = {
'account_id': config.API_ACCOUNT,
'order_type': 'LIMIT',
'symbol': 'EUR/USD',
'side': 'BUY',
'quantity': 10000,
'price': res_quotes['quote_snapshot'][0]['offer']-.0050,
'stop_loss_price': res_quotes['quote_snapshot'][0]['offer']-.010,
'take_profit_price': res_quotes['quote_snapshot'][0]['offer'],
'client_order_id': 'order-limit_entry-buy-' + config.API_ACCOUNT
}
res = requests.post(url = config.API_URL + '/api/v1/orders', headers = headers, data = params)
res_data = check_response(res)
print_json(res_data)
limit_order_id = None
if ('order' in res_data) and ('order_id' in res_data['order']):
limit_order_id = res_data['order']['order_id']
print('Limit Order ID: %s' % limit_order_id)
take_profit_order_id = None
print_json(res_data)
if ('linked_orders' in res_data) and (len(res_data['linked_orders']) > 0) and ('order_id' in res_data['linked_orders'][0]):
take_profit_order_id = res_data['linked_orders'][0]['order_id']
print('TP Order ID: %s' % take_profit_order_id)
# Change Entry Price
print('\nChange Limit Price: ')
params = {
'order_id': limit_order_id,
'price':res_quotes['quote_snapshot'][0]['offer']-.0040
}
res = requests.patch(url = config.API_URL + '/api/v1/orders', headers = headers, data = params)
res_data = check_response(res)
print_json(res_data)
# Delete Limit from Entry
print('\nDelete TP for Limit: ')
params = {
'order_id': take_profit_order_id
}
res = requests.delete(url = config.API_URL + '/api/v1/orders', headers = headers, data = params)
res_data = check_response(res)
print_json(res_data)
# Delete Limit Entry
print('\nDelete Limit: ')
params = {
'order_id': limit_order_id
}
res = requests.delete(url = config.API_URL + '/api/v1/orders', headers = headers, data = params)
res_data = check_response(res)
print_json(res_data)
# Create Market order
print('\nCreate Market Order: ')
params = {
'account_id': config.API_ACCOUNT,
'order_type': 'MARKET',
'symbol': 'EUR/USD',
'side': 'BUY',
'quantity': 10000,
'price': 1.1280,
'client_order_id': 'order-market-buy-' + config.API_ACCOUNT
}
res = requests.post(url = config.API_URL + '/api/v1/orders', headers = headers, data = params)
res_data = check_response(res)
print_json(res_data)
position_id = None
if ('positions' in res_data) and (len(res_data['positions']) > 0) and ('position_id' in res_data['positions'][0]):
position_id = res_data['positions'][0]['position_id']
print('Position ID: %s' % position_id)
# Create closing Market order
print('\nCreate Closing Market Order: ')
params = {
'account_id': config.API_ACCOUNT,
'order_type': 'MARKET',
'position_id': position_id,
'symbol': 'EUR/USD',
'side': 'SELL',
'quantity': 10000,
'price': 1.1280,
'client_order_id': 'order-market-sell-' + config.API_ACCOUNT
}
res = requests.post(url = config.API_URL + '/api/v1/orders', headers = headers, data = params)
res_data = check_response(res)
print_json(res_data)
# Successfully Finished
return 0
if __name__ == '__main__':
sys.exit(main())
``` |
{
"source": "11bit/fuzzy-import",
"score": 2
} |
#### File: fuzzy-import/tools/exports_parser.py
```python
import re
import itertools
from FuzzyImports.lark import Lark, Transformer, Tree
from FuzzyImports.lark.lexer import Token
l = Lark('''
start: NEWLINE? (exports | other)+
exports: EXPORT DEFAULT? FUNCTION (_WS+ NAME)? "(" -> export_function
| EXPORT DEFAULT? CLASS NAME -> export_class
| EXPORT DEFAULT expression SEP? -> export_default_expression
| EXPORT VAR var_decl "=" -> export_var
| EXPORT TYPE var_decl "=" -> export_type
?var_decl: NAME -> var_name
| obj_decl_group
obj_decl_group: "{" obj_decl ("," obj_decl)* ","? "}"
obj_decl: NAME -> var_name
| obj_decl_pair
obj_decl_pair: NAME ":" obj_decl_pair_right
?obj_decl_pair_right: NAME -> var_name
| obj_decl_group
expression : any+
// | NAME SEP -> export_default_name
?any: /./
other: /./+ SEP?
EXPORT: SEP WS* "export" _WS+
DEFAULT: "default" _WS+
FUNCTION: "function"
CLASS.2: "class" _WS+
VAR: ("const" | "let" | "var") _WS+
TYPE: "type"
_WS: WS | NEWLINE
SEP: (";" | NEWLINE)+
// Insert grammar from common.g here so we can still use zipped sublime package
// ----- content from common.g ---------
WS: /[ \\t\\f\\r\\n]/+
WS_INLINE: (" "|/\\t/)+
DIGIT: "0".."9"
LCASE_LETTER: "a".."z"
UCASE_LETTER: "A".."Z"
LETTER: UCASE_LETTER | LCASE_LETTER
WORD: LETTER+
NAME: ("_"|LETTER) ("_"|LETTER|DIGIT)*
CR : /\\r/
LF : /\\n/
NEWLINE: (CR? LF)+
// ----- end grammar from common.g ------
%ignore WS
''', start='start', parser="lalr", lexer="contextual")
def flatten(names):
return list(itertools.chain.from_iterable(names))
class CodeToExports(Transformer):
def __init__(self):
self.exports = []
@staticmethod
def isDefault(arr):
return len([x for x in arr if x.type == 'DEFAULT']) == 1
def start(self, _):
return self.exports
def export_default_function(self, s):
names = [x for x in s if x.type == 'NAME']
assert len(names) == 1, 'export_default_function parse error: should be only one NAME'
self.exports.append(dict(
name="export default function",
value=names[0].value
))
def export_default_name(self, s):
names = [x for x in s if x.type == 'NAME']
assert len(names) == 1, 'export_default_name parse error: should be only one NAME'
self.exports.append(dict(
name="export named variable",
isDefault=True,
value=names[0]
))
def export_function(self, s):
names = [x for x in s if x.type == 'NAME']
assert len(names) <= 1, 'export_function parse error: should be no more than one NAME'
func_name = names[0].value if len(names) == 1 else 'Anonymous Function'
self.exports.append(dict(
name="export function",
isDefault=CodeToExports.isDefault(s),
value=func_name
))
def export_class(self, s):
names = [x for x in s if x.type == 'NAME']
assert len(names) == 1, 'export_function parse error: should be only one NAME'
self.exports.append(dict(
name="export class",
isDefault=CodeToExports.isDefault(s),
value=names[0].value
))
def export_default_expression(self, s):
expression_tree = [x for x in s if isinstance(x, Tree) and x.data == 'expression'][0]
expression = ''.join(expression_tree.children)
if re.match('^[^!@#%^&*()+-=<>/\\\\"\',.]*$', expression) is not None:
name = expression
else:
name = ''
self.exports.append(dict(
name="export default expression" if name == '' else "export named variable",
isDefault=True,
value=name
))
def export_var(self, s):
names = [x for x in s if isinstance(x, list)]
names = flatten(names)
for name in names:
self.exports.append(dict(
name="export const",
isDefault=False,
value=name
))
def obj_decl_group(self, s):
print(s)
names = []
for token in s:
if isinstance(token, Token):
names.append(token)
else:
names += token
return names
def obj_decl(self, s):
return flatten(s)
def obj_decl_pair(self, s):
return s[1]
def var_name(self, s):
return [s[0]]
def parse(file):
with open(file, errors="replace") as f:
return parse_source(f.read())
def parse_source(src):
tree = l.parse(";" + src)
# print(tree.pretty())
return CodeToExports().transform(tree)
```
#### File: fuzzy-import/tools/file_utils.py
```python
from os import path
def is_js(file):
ext = path.splitext(file)[1]
return ext.lower() in ['.js', '.jsx']
def get_relative_file_dir(file, startFile, no_extension=False, no_index=False):
file_dir = path.dirname(file)
if file_dir.startswith(startFile):
res = "./" + path.relpath(file, startFile)
else:
res = path.relpath(file, startFile)
if no_index and path.splitext(path.basename(file))[0] == 'index':
return path.dirname(res)
return path.splitext(res)[0] if no_extension else res
def kebab_to_camel(name):
return ''.join((
part if index == 0 else part[0].upper() + part[1:]
for index, part in enumerate(name.split('-'))
))
def guess_import_name(file):
file_name = path.splitext(path.basename(file))[0]
print(file_name)
print(kebab_to_camel(file_name))
return kebab_to_camel(file_name)
``` |
{
"source": "11bluetree/weblogic-deploy-tooling",
"score": 2
} |
#### File: main/python/encrypt.py
```python
import os
import sys
from java.io import IOException
from java.lang import IllegalArgumentException
from java.lang import String, System
from oracle.weblogic.deploy.encrypt import EncryptionException
from oracle.weblogic.deploy.util import CLAException
from oracle.weblogic.deploy.util import FileUtils
from oracle.weblogic.deploy.util import TranslateException
from oracle.weblogic.deploy.util import VariableException
from oracle.weblogic.deploy.util import WebLogicDeployToolingVersion
sys.path.append(os.path.dirname(os.path.realpath(sys.argv[0])))
# imports from local packages start here
from wlsdeploy.aliases.aliases import Aliases
from wlsdeploy.aliases.wlst_modes import WlstModes
from wlsdeploy.exception import exception_helper
from wlsdeploy.exception.expection_types import ExceptionType
from wlsdeploy.logging.platform_logger import PlatformLogger
from wlsdeploy.tool.encrypt import encryption_utils
from wlsdeploy.tool.util.alias_helper import AliasHelper
from wlsdeploy.util import cla_helper
from wlsdeploy.util import getcreds
from wlsdeploy.util import variables as variable_helper
from wlsdeploy.util.cla_utils import CommandLineArgUtil
from wlsdeploy.util.model_context import ModelContext
from wlsdeploy.util.model_translator import FileToPython
from wlsdeploy.util.model_translator import PythonToFile
_program_name = 'encryptModel'
_class_name = 'encrypt'
__logger = PlatformLogger('wlsdeploy.encrypt')
__required_arguments = [
CommandLineArgUtil.ORACLE_HOME_SWITCH,
]
__optional_arguments = [
CommandLineArgUtil.DOMAIN_TYPE_SWITCH,
CommandLineArgUtil.MODEL_FILE_SWITCH,
CommandLineArgUtil.VARIABLE_FILE_SWITCH,
CommandLineArgUtil.PASSPHRASE_SWITCH,
CommandLineArgUtil.ENCRYPT_MANUAL_SWITCH,
CommandLineArgUtil.ONE_PASS_SWITCH
]
def __process_args(args):
"""
Process the command-line arguments and prompt the user for any missing information
:param args: the command-line arguments list
:raises CLAException: if an error occurs while validating and processing the command-line arguments
"""
_method_name = '__process_args'
cla_util = CommandLineArgUtil(_program_name, __required_arguments, __optional_arguments)
required_arg_map, optional_arg_map = cla_util.process_args(args)
cla_helper.verify_required_args_present(_program_name, __required_arguments, required_arg_map)
__validate_mode_args(optional_arg_map)
__process_passphrase_arg(optional_arg_map)
#
# Prompt for the password to encrypt if the -manual switch was specified
#
if CommandLineArgUtil.ENCRYPT_MANUAL_SWITCH in optional_arg_map and \
CommandLineArgUtil.ONE_PASS_SWITCH not in optional_arg_map:
try:
pwd = getcreds.getpass('<PASSWORD>')
except IOException, ioe:
ex = exception_helper.create_encryption_exception('WLSDPLY-04201', ioe.getLocalizedMessage(), error=ioe)
__logger.throwing(ex, class_name=_class_name, method_name=_method_name)
raise ex
optional_arg_map[CommandLineArgUtil.ONE_PASS_SWITCH] = String(pwd)
combined_arg_map = optional_arg_map.copy()
combined_arg_map.update(required_arg_map)
model_context = ModelContext(_program_name, combined_arg_map)
return model_context
def __validate_mode_args(optional_arg_map):
"""
Verify that either the model_file or the manual switch was specified.
:param optional_arg_map: the optional arguments map
:raises CLAException: if the arguments are not valid
"""
_method_name = '__validate_mode_args'
if CommandLineArgUtil.MODEL_FILE_SWITCH in optional_arg_map:
model_file_name = optional_arg_map[CommandLineArgUtil.MODEL_FILE_SWITCH]
try:
FileUtils.validateExistingFile(model_file_name)
except IllegalArgumentException, iae:
ex = exception_helper.create_cla_exception('WLSDPLY-20006', _program_name, model_file_name,
iae.getLocalizedMessage(), error=iae)
ex.setExitCode(CommandLineArgUtil.ARG_VALIDATION_ERROR_EXIT_CODE)
__logger.throwing(ex, class_name=_class_name, method_name=_method_name)
raise ex
elif CommandLineArgUtil.ENCRYPT_MANUAL_SWITCH not in optional_arg_map:
ex = exception_helper.create_cla_exception('WLSDPLY-04202', _program_name, CommandLineArgUtil.MODEL_FILE_SWITCH,
CommandLineArgUtil.ENCRYPT_MANUAL_SWITCH)
ex.setExitCode(CommandLineArgUtil.USAGE_ERROR_EXIT_CODE)
__logger.throwing(ex, class_name=_class_name, method_name=_method_name)
raise ex
return
def __process_passphrase_arg(optional_arg_map):
"""
Prompt for the passphrase.
:param optional_arg_map: the optional arguments map
:raises CLAException: if an error occurs reading the passphrase input from the user
"""
_method_name = '__process_passphrase_arg'
if CommandLineArgUtil.PASSPHRASE_SWITCH not in optional_arg_map:
got_matching_passphrases = False
while not got_matching_passphrases:
try:
passphrase = getcreds.getpass('<PASSWORD>')
passphrase2 = getcreds.getpass('<PASSWORD>')
except IOException, ioe:
ex = exception_helper.create_encryption_exception('WLSDPLY-04205', ioe.getLocalizedMessage(), error=ioe)
__logger.throwing(ex, class_name=_class_name, method_name=_method_name)
raise ex
if passphrase == passphrase2:
got_matching_passphrases = True
optional_arg_map[CommandLineArgUtil.PASSPHRASE_SWITCH] = String(passphrase)
else:
# if it is script mode do not prompt again
if System.console() is None:
ex = exception_helper.create_cla_exception('WLSDPLY-04213')
ex.setExitCode(CommandLineArgUtil.PROG_ERROR_EXIT_CODE)
__logger.throwing(ex, class_name=_class_name, method_name=_method_name)
raise ex
return
def __encrypt_model_and_variables(model_context):
"""
Encrypt the model and variables file, if provided.
:param model_context: the model context object containing the processed command-line arguments
:return: the exit code that should be used to exit the program
"""
_method_name = '__encrypt_model_and_variables'
model_file = model_context.get_model_file()
try:
model = FileToPython(model_file, True).parse()
except TranslateException, te:
__logger.severe('WLSDPLY-04206', _program_name, model_file, te.getLocalizedMessage(), error=te,
class_name=_class_name, method_name=_method_name)
return CommandLineArgUtil.PROG_ERROR_EXIT_CODE
variable_file = model_context.get_variable_file()
variables = None
if variable_file is not None:
try:
variables = variable_helper.load_variables(variable_file)
except VariableException, ve:
__logger.severe('WLSDPLY-04207', _program_name, variable_file, ve.getLocalizedMessage(), error=ve,
class_name=_class_name, method_name=_method_name)
return CommandLineArgUtil.PROG_ERROR_EXIT_CODE
aliases = Aliases(model_context, wlst_mode=WlstModes.OFFLINE)
alias_helper = AliasHelper(aliases, __logger, ExceptionType.ENCRYPTION)
try:
passphrase = model_context.get_encryption_passphrase()
model_change_count, variable_change_count = \
encryption_utils.encrypt_model_dictionary(passphrase, model, alias_helper, variables)
except EncryptionException, ee:
__logger.severe('WLSDPLY-04208', _program_name, ee.getLocalizedMessage(), error=ee,
class_name=_class_name, method_name=_method_name)
return CommandLineArgUtil.PROG_ERROR_EXIT_CODE
if variable_change_count > 0:
try:
variable_helper.write_variables(_program_name, variables, variable_file)
__logger.info('WLSDPLY-04209', _program_name, variable_change_count, variable_file,
class_name=_class_name, method_name=_method_name)
except VariableException, ve:
__logger.severe('WLSDPLY-20007', _program_name, variable_file, ve.getLocalizedMessage(), error=ve,
class_name=_class_name, method_name=_method_name)
return CommandLineArgUtil.PROG_ERROR_EXIT_CODE
if model_change_count > 0:
try:
model_writer = PythonToFile(model)
model_writer.write_to_file(model_file)
__logger.info('WLSDPLY-04210', _program_name, model_change_count, model_file,
class_name=_class_name, method_name=_method_name)
except TranslateException, te:
__logger.severe('WLSDPLY-04211', _program_name, model_file, te.getLocalizedMessage(), error=te,
class_name=_class_name, method_name=_method_name)
return CommandLineArgUtil.PROG_ERROR_EXIT_CODE
return CommandLineArgUtil.PROG_OK_EXIT_CODE
# Factored out for unit testing...
def _process_request(args):
"""
Performs the work for the encryptModel tool.
:param args: the command-line arguments list
:return: the exit code that should be used to exit the program
"""
_method_name = '_process_request'
__logger.entering(args[0], class_name=_class_name, method_name=_method_name)
try:
model_context = __process_args(args)
except CLAException, ex:
exit_code = ex.getExitCode()
if exit_code != CommandLineArgUtil.HELP_EXIT_CODE:
__logger.severe('WLSDPLY-20008', _program_name, ex.getLocalizedMessage(), error=ex,
class_name=_class_name, method_name=_method_name)
return exit_code
if model_context.is_encryption_manual():
try:
passphrase = model_context.get_encryption_passphrase()
encrypted_password = encryption_utils.encrypt_one_password(passphrase, model_context.get_encrypt_one_pass())
print ""
print encrypted_password
exit_code = CommandLineArgUtil.PROG_OK_EXIT_CODE
except EncryptionException, ee:
exit_code = CommandLineArgUtil.PROG_ERROR_EXIT_CODE
__logger.severe('WLSDPLY-04212', _program_name, ee.getLocalizedMessage(), error=ee,
class_name=_class_name, method_name=_method_name)
else:
exit_code = __encrypt_model_and_variables(model_context)
__logger.exiting(class_name=_class_name, method_name=_method_name, result=exit_code)
return exit_code
def main(args):
"""
The main entry point for the encryptModel tool.
:param args:
:return:
"""
_method_name = 'main'
__logger.entering(args[0], class_name=_class_name, method_name=_method_name)
for index, arg in enumerate(args):
__logger.finer('sys.argv[{0}] = {1}', str(index), str(arg), class_name=_class_name, method_name=_method_name)
exit_code = _process_request(args)
__logger.exiting(class_name=_class_name, method_name=_method_name, result=exit_code)
sys.exit(exit_code)
if __name__ == '__main__' or __name__ == 'main':
WebLogicDeployToolingVersion.logVersionInfo(_program_name)
main(sys.argv)
```
#### File: tool/create/rcudbinfo_helper.py
```python
from wlsdeploy.aliases import alias_utils
from wlsdeploy.aliases.model_constants import ATP_ADMIN_USER
from wlsdeploy.aliases.model_constants import ATP_DEFAULT_TABLESPACE
from wlsdeploy.aliases.model_constants import ATP_TEMPORARY_TABLESPACE
from wlsdeploy.aliases.model_constants import ATP_TNS_ENTRY
from wlsdeploy.aliases.model_constants import DRIVER_PARAMS_KEYSTOREPWD_PROPERTY
from wlsdeploy.aliases.model_constants import DRIVER_PARAMS_NET_TNS_ADMIN
from wlsdeploy.aliases.model_constants import DRIVER_PARAMS_TRUSTSTOREPWD_PROPERTY
from wlsdeploy.aliases.model_constants import RCU_ADMIN_PASSWORD
from wlsdeploy.aliases.model_constants import RCU_DB_CONN
from wlsdeploy.aliases.model_constants import RCU_PREFIX
from wlsdeploy.aliases.model_constants import RCU_SCHEMA_PASSWORD
from wlsdeploy.aliases.model_constants import RCU_VARIABLES
from wlsdeploy.aliases.model_constants import USE_ATP
class RcuDbInfo(object):
"""
Accesses the fields of the domainInfo/RCUDbInfo section of the model.
Decrypts fields if the model was encrypted.
Returns default values for some unspecified fields.
"""
def __init__(self, alias_helper, rcu_properties_map):
self.alias_helper = alias_helper
self.rcu_properties_map = rcu_properties_map
def get_atp_tns_admin(self):
return self.rcu_properties_map[DRIVER_PARAMS_NET_TNS_ADMIN]
def get_atp_entry(self):
return self.rcu_properties_map[ATP_TNS_ENTRY]
def get_rcu_prefix(self):
return self.rcu_properties_map[RCU_PREFIX]
def get_rcu_schema_password(self):
password = self.rcu_properties_map[RCU_SCHEMA_PASSWORD]
return self.alias_helper.decrypt_password(password)
def get_keystore_password(self):
password = self.rcu_properties_map[DRIVER_PARAMS_KEYSTOREPWD_PROPERTY]
return self.alias_helper.decrypt_password(password)
def get_truststore_password(self):
password = self.rcu_properties_map[DRIVER_PARAMS_TRUSTSTOREPWD_PROPERTY]
return self.alias_helper.decrypt_password(password)
def get_admin_password(self):
password = self.rcu_properties_map[RCU_ADMIN_PASSWORD]
return self.alias_helper.decrypt_password(password)
def get_rcu_regular_db_conn(self):
return self.rcu_properties_map[RCU_DB_CONN]
def get_atp_default_tablespace(self):
if ATP_DEFAULT_TABLESPACE in self.rcu_properties_map:
return self.rcu_properties_map[ATP_DEFAULT_TABLESPACE]
else:
return 'DATA'
def get_atp_temporary_tablespace(self):
if ATP_TEMPORARY_TABLESPACE in self.rcu_properties_map:
return self.rcu_properties_map[ATP_TEMPORARY_TABLESPACE]
else:
return 'TEMP'
def get_atp_admin_user(self):
if ATP_ADMIN_USER in self.rcu_properties_map:
return self.rcu_properties_map[ATP_ADMIN_USER]
else:
return 'admin'
def get_rcu_variables(self):
if RCU_VARIABLES in self.rcu_properties_map:
return self.rcu_properties_map[RCU_VARIABLES]
else:
return None
# has_tns_admin is used to find the extract location if it is already extracted by the user
# its an optional field, so insufficient to determine whether it has atp
def has_tns_admin(self):
return DRIVER_PARAMS_NET_TNS_ADMIN in self.rcu_properties_map
def has_atpdbinfo(self):
return self.is_use_atp()
def is_regular_db(self):
is_regular = 0
if not self.is_use_atp():
is_regular = 1
if RCU_DB_CONN in self.rcu_properties_map:
is_regular = 1
return is_regular
def is_use_atp(self):
"""
Determine if the RCU DB info uses the ATP database.
The model should allow all the values allowed by boolean alias model elements.
The default when not specified is False.
:return: True if the model value is present and indicates true, False otherwise
"""
if USE_ATP in self.rcu_properties_map:
model_value = self.rcu_properties_map[USE_ATP]
value = alias_utils.convert_to_type('boolean', model_value)
return value == 'true'
return False
```
#### File: tool/discover/domain_info_discoverer.py
```python
import glob
import os
from java.io import File
from oracle.weblogic.deploy.util import WLSDeployArchiveIOException
from oracle.weblogic.deploy.util import FileUtils
from wlsdeploy.aliases import alias_constants
from wlsdeploy.aliases import model_constants
from wlsdeploy.aliases.location_context import LocationContext
from wlsdeploy.aliases.wlst_modes import WlstModes
from wlsdeploy.exception import exception_helper
from wlsdeploy.logging.platform_logger import PlatformLogger
from wlsdeploy.tool.discover import discoverer
from wlsdeploy.tool.discover.discoverer import Discoverer
from wlsdeploy.tool.util.variable_injector import STANDARD_PASSWORD_INJECTOR
from wlsdeploy.util import path_utils
_class_name = 'DomainInfoDiscoverer'
_logger = PlatformLogger(discoverer.get_discover_logger_name())
class DomainInfoDiscoverer(Discoverer):
"""
Discover extra information about the domain. This information is not what is stored in domain
configuration files, but extra information that is required for the completeness of the domain.
"""
def __init__(self, model_context, domain_info_dictionary, base_location,
wlst_mode=WlstModes.OFFLINE, aliases=None, variable_injector=None):
Discoverer.__init__(self, model_context, base_location, wlst_mode, aliases, variable_injector)
self._dictionary = domain_info_dictionary
def discover(self):
"""
Discover the domain extra info resources. This information goes into a section of the model
that does not contain the WLST mbean information that describes the weblogic domain.
:return: dictionary containing the domain extra info
"""
_method_name = 'discover'
_logger.entering(class_name=_class_name, method_name=_method_name)
self.add_admin_credentials()
model_top_folder_name, result = self.get_domain_libs()
discoverer.add_to_model_if_not_empty(self._dictionary, model_top_folder_name, result)
model_top_folder_name, result = self.get_user_env_scripts()
discoverer.add_to_model_if_not_empty(self._dictionary, model_top_folder_name, result)
_logger.exiting(class_name=_class_name, method_name=_method_name)
return self._dictionary
def add_admin_credentials(self):
injector = self._get_variable_injector()
self._dictionary[model_constants.ADMIN_USERNAME] = alias_constants.PASSWORD_TOKEN
self._dictionary[model_constants.ADMIN_PASSWORD] = alias_constants.PASSWORD_TOKEN
if injector is not None:
location = LocationContext()
injector.custom_injection(self._dictionary, model_constants.ADMIN_USERNAME, location,
STANDARD_PASSWORD_INJECTOR)
injector.custom_injection(self._dictionary, model_constants.ADMIN_PASSWORD, location,
STANDARD_PASSWORD_INJECTOR)
def get_domain_libs(self):
"""
Add the java archive files stored in the domain lib into the archive file. Add the information for each
domain library to the domain info dictionary.
:raise DiscoverException: an unexpected exception occurred writing a jar file to the archive file
"""
_method_name = 'get_domain_libs'
_logger.entering(class_name=_class_name, method_name=_method_name)
archive_file = self._model_context.get_archive_file()
domain_lib = self._convert_path('lib')
entries = []
if os.path.isdir(domain_lib):
_logger.finer('WLSDPLY-06420', domain_lib, class_name=_class_name, method_name=_method_name)
for entry in os.listdir(domain_lib):
entry_path = os.path.join(domain_lib, entry)
if path_utils.is_jar_file(entry_path):
try:
updated_name = archive_file.addDomainLibLibrary(File(entry_path))
except WLSDeployArchiveIOException, wioe:
de = exception_helper.create_discover_exception('WLSDPLY-06421', entry,
wioe.getLocalizedMessage())
_logger.throwing(class_name=_class_name, method_name=_method_name, error=de)
raise de
entries.append(updated_name)
_logger.finer('WLSDPLY-06422', entry, updated_name, class_name=_class_name,
method_name=_method_name)
_logger.exiting(class_name=_class_name, method_name=_method_name, result=entries)
return model_constants.DOMAIN_LIBRARIES, entries
def get_user_env_scripts(self):
"""
Look for the user overrides scripts run in setDomainEnv in the domain bin directory
:raise: DiscoverException: an unexpected exception occurred writing a jar file to the archive file
"""
_method_name = 'get_user_env_scripts'
_logger.entering(class_name=_class_name, method_name=_method_name)
archive_file = self._model_context.get_archive_file()
domain_bin = self._convert_path('bin')
entries = []
if os.path.isdir(domain_bin):
search_directory = FileUtils.fixupFileSeparatorsForJython(os.path.join(domain_bin, "setUserOverrides*.*"))
_logger.finer('WLSDPLY-06425', search_directory, class_name=_class_name, method_name=_method_name)
file_list = glob.glob(search_directory)
if file_list:
_logger.finer('WLSDPLY-06423', domain_bin, class_name=_class_name, method_name=_method_name)
for entry in file_list:
try:
updated_name = archive_file.addDomainBinScript(File(entry))
except WLSDeployArchiveIOException, wioe:
de = exception_helper.create_discover_exception('WLSDPLY-06426', entry,
wioe.getLocalizedMessage())
_logger.throwing(class_name=_class_name, method_name=_method_name, error=de)
raise de
entries.append(updated_name)
_logger.finer('WLSDPLY-06424', entry, updated_name, class_name=_class_name,
method_name=_method_name)
_logger.exiting(class_name=_class_name, method_name=_method_name, result=entries)
return model_constants.DOMAIN_SCRIPTS, entries
```
#### File: tool/util/string_output_stream.py
```python
from java.io import OutputStream, ByteArrayOutputStream
from java.lang import String
"""
This class allows redirecting the stdout to a string array for wlst.
"""
class StringOutputStream(OutputStream):
def __init__(self):
self.stream = ByteArrayOutputStream()
def write(self,b,off,len):
self.stream.write(b,off,len)
def get_string(self):
output = String(self.stream.toByteArray())
if self.stream is not None:
self.stream.close()
return output
```
#### File: wlsdeploy/util/model.py
```python
import os
import pprint
import oracle.weblogic.deploy.util.PyOrderedDict as OrderedDict
from wlsdeploy.aliases.model_constants import KNOWN_TOPLEVEL_MODEL_SECTIONS
from wlsdeploy.aliases.model_constants import KUBERNETES
from wlsdeploy.logging.platform_logger import PlatformLogger
from wlsdeploy.util.weblogic_helper import WebLogicHelper
class Model(object):
"""
Class documentation
"""
_class_name = 'Model'
def __init__(self, model_dictionary=None, wls_version=None):
self._logger = PlatformLogger('wlsdeploy.model')
self._wls_helper = WebLogicHelper(wls_version)
self._topology = OrderedDict()
self._resources = OrderedDict()
self._deployments = OrderedDict()
self._domain_info = OrderedDict()
self._kubernetes = OrderedDict()
if model_dictionary is not None:
if 'topology' in model_dictionary:
self._topology = model_dictionary['topology']
if 'resources' in model_dictionary:
self._resources = model_dictionary['resources']
if 'appDeployments' in model_dictionary:
self._deployments = model_dictionary['appDeployments']
if 'domainInfo' in model_dictionary:
self._domain_info = model_dictionary['domainInfo']
if KUBERNETES in model_dictionary:
self._kubernetes = model_dictionary[KUBERNETES]
return
def get_model_resources(self):
"""
Get the resources section of the model.
:return: the resources dictionary
"""
return self._resources
def get_model_app_deployments(self):
"""
Get the appDeployments section of the model.
:return: the appDeployments dictionary
"""
return self._deployments
def get_model_topology(self):
"""
Get the topology section of the model.
:return: the topology dictionary
"""
return self._topology
def get_model_domain_info(self):
"""
Get the domainInfo section of the model.
:return: the domainInfo dictionary
"""
return self._domain_info
def get_model_kubernetes(self):
"""
Get the kubernetes section of the model.
:return: the kubernetes dictionary
"""
return self._kubernetes
def get_model(self):
"""
Get the model.
:return: the model dictionary
"""
model = OrderedDict()
if len(self._domain_info):
model['domainInfo'] = self._domain_info
if len(self._topology) > 0:
model['topology'] = self._topology
if len(self._resources) > 0:
model['resources'] = self._resources
if len(self._deployments) > 0:
model['appDeployments'] = self._deployments
return model
def log_model(self, level, message, method_name, class_name='Model'):
"""
Log the model.
:param level: the level to log at
:param message: the message to log
:param method_name: the method requesting the logging of the model
:param class_name: the class requesting the logging of the model
"""
self._logger.log(level, '{0} for WebLogic {1} is:', message, self._wls_helper.wl_version,
method_name=method_name, class_name=class_name)
self._logger.log(level, '"domainInfo": {0}', pprint.pformat(self._domain_info),
method_name=method_name, class_name=class_name)
self._logger.log(level, '"topology": {0}', self._topology,
method_name=method_name, class_name=class_name)
self._logger.log(level, '"resources": {0}', pprint.pformat(self._resources),
method_name=method_name, class_name=class_name)
self._logger.log(level, '"appDeployments": {0}', pprint.pformat(self._deployments),
method_name=method_name, class_name=class_name)
return
def get_model_resources_key():
"""
Get the model resources element key
:return: the model resources element key
"""
return 'resources'
def get_model_deployments_key():
"""
Get the model appDeployments element key
:return: the model appDeployments element key
"""
return 'appDeployments'
def get_model_topology_key():
"""
Get the model topology element key
:return: the model topology element key
"""
return 'topology'
def get_model_domain_info_key():
"""
Get the model domainInfo element key
:return: the model domainInfo element key
"""
return 'domainInfo'
def get_model_kubernetes_key():
"""
Get the model kubernetes element key
:return: the model kubernetes element key
"""
return KUBERNETES
def get_model_top_level_keys():
"""
Get the known top-level model element keys.
:return: a list of the known top-level model element keys
"""
return list(KNOWN_TOPLEVEL_MODEL_SECTIONS)
```
#### File: wlsdeploy/yaml/yaml_translator.py
```python
import re
import java.io.FileNotFoundException as JFileNotFoundException
import java.io.FileOutputStream as JFileOutputStream
import java.io.IOException as JIOException
import java.io.PrintWriter as JPrintWriter
import java.lang.IllegalArgumentException as JIllegalArgumentException
import oracle.weblogic.deploy.util.FileUtils as JFileUtils
import oracle.weblogic.deploy.yaml.YamlStreamTranslator as JYamlStreamTranslator
import oracle.weblogic.deploy.yaml.YamlTranslator as JYamlTranslator
from wlsdeploy.exception import exception_helper
from wlsdeploy.logging.platform_logger import PlatformLogger
from wlsdeploy.yaml.dictionary_list import DictionaryList
class YamlToPython(object):
"""
A class that translates a YAML file into a python dictionary.
"""
_class_name = 'YamlToPython'
def __init__(self, file_name, use_ordering=False):
_method_name = '__init__'
self._file_name = file_name
self._use_ordering = use_ordering
self._logger = PlatformLogger('wlsdeploy.yaml')
try:
self._translator = JYamlTranslator(self._file_name, self._use_ordering)
except JIllegalArgumentException, iae:
yaml_ex = \
exception_helper.create_yaml_exception('WLSDPLY-18008', file_name, iae.getLocalizedMessage(), error=iae)
self._logger.throwing(class_name=self._class_name, method_name=_method_name, error=yaml_ex)
raise yaml_ex
return
def parse(self):
"""
Parse the Yaml content from the file and convert it to a Python dictionary.
:return: the Python dictionary
:raises: YamlException: if an error occurs while parsing the Yaml or converting it to the dictionary
"""
_method_name = 'parse'
self._logger.entering(class_name=self._class_name, method_name=_method_name)
# throws YamlException with details, nothing we can really add here...
result_dict = self._translator.parse()
# don't log the model on exit, it may contain passwords
self._logger.exiting(class_name=self._class_name, method_name=_method_name)
return result_dict
class YamlStreamToPython(object):
"""
A class that translates a YAML input stream into a python dictionary.
"""
_class_name = 'YamlStreamToPython'
def __init__(self, file_name, input_stream, use_ordering=False):
_method_name = '__init__'
self._file_name = file_name
self._use_ordering = use_ordering
self._logger = PlatformLogger('wlsdeploy.yaml')
try:
self._translator = JYamlStreamTranslator(self._file_name, input_stream, self._use_ordering)
except JIllegalArgumentException, iae:
yaml_ex = \
exception_helper.create_yaml_exception('WLSDPLY-18008', file_name, iae.getLocalizedMessage(), error=iae)
self._logger.throwing(class_name=self._class_name, method_name=_method_name, error=yaml_ex)
raise yaml_ex
return
def parse(self):
"""
Parse the Yaml content from the input stream and convert it to a Python dictionary.
:return: the Python dictionary
:raises: YamlException: if an error occurs while parsing the Yaml or converting it to the dictionary
"""
_method_name = 'parse'
self._logger.entering(class_name=self._class_name, method_name=_method_name)
# throws YamlException with details, nothing we can really add here...
result_dict = self._translator.parse()
self._logger.exiting(class_name=self._class_name, method_name=_method_name, result=result_dict)
return result_dict
class PythonToYaml(object):
"""
A class that converts a Python dictionary into Yaml and writes the output to a file.
"""
_class_name = 'PythonToYaml'
# 4 spaces
_indent_unit = ' '
_requires_quotes_chars_regex = '[:{}\[\],&*#?|<>=!%@`-]'
def __init__(self, dictionary):
# Fix error handling for None
self._dictionary = dictionary
self._logger = PlatformLogger('wlsdeploy.yaml')
return
def write_to_yaml_file(self, file_name):
"""
Convert the Python dictionary to Yaml and write it to the specified file.
:param file_name: the file name to which to write the Yaml output
:return: The canonical java.io.File object for the Yaml File
:raises: YamlException: if an error occurs while converting the dictionary to Yaml or writing to the file
"""
_method_name = 'writeToYamlFile'
self._logger.entering(file_name, class_name=self._class_name, method_name=_method_name)
try:
yaml_file = JFileUtils.validateWritableFile(file_name)
except JIllegalArgumentException, iae:
yaml_ex = exception_helper.create_yaml_exception('WLSDPLY-18009', file_name,
iae.getLocalizedMessage(), error=iae)
self._logger.throwing(class_name=self._class_name, method_name=_method_name, error=yaml_ex)
raise yaml_ex
fos = None
writer = None
try:
fos = JFileOutputStream(yaml_file, False)
writer = JPrintWriter(fos, True)
self._write_dictionary_to_yaml_file(self._dictionary, writer)
except JFileNotFoundException, fnfe:
yaml_ex = exception_helper.create_yaml_exception('WLSDPLY-18010', file_name,
fnfe.getLocalizedMessage(), error=fnfe)
self._logger.throwing(class_name=self._class_name, method_name=_method_name, error=yaml_ex)
self._close_streams(fos, writer)
raise yaml_ex
except JIOException, ioe:
yaml_ex = exception_helper.create_yaml_exception('WLSDPLY-18011', file_name,
ioe.getLocalizedMessage(), error=ioe)
self._logger.throwing(class_name=self._class_name, method_name=_method_name, error=yaml_ex)
self._close_streams(fos, writer)
raise yaml_ex
self._close_streams(fos, writer)
self._logger.exiting(class_name=self._class_name, method_name=_method_name, result=yaml_file)
return yaml_file
def _write_dictionary_to_yaml_file(self, dictionary, writer, indent=''):
"""
Do the actual heavy lifting of converting a dictionary and writing it to the file. This method is
called recursively when a value of the dictionary entry is itself a dictionary.
:param dictionary: the Python dictionary to convert
:param writer: the java.io.PrintWriter for the output file
:param indent: the amount of indent to use (based on the level of recursion)
:raises: IOException: if an error occurs while writing the output
"""
if dictionary is None:
return
for key, value in dictionary.iteritems():
quoted_key = self._quotify_string(key)
if isinstance(value, DictionaryList):
writer.println(indent + quoted_key + ':')
self._write_dictionary_list_to_yaml_file(value, writer, indent)
elif isinstance(value, dict):
writer.println(indent + quoted_key + ':')
self._write_dictionary_to_yaml_file(value, writer, indent + self._indent_unit)
else:
writer.println(indent + quoted_key + ': ' + self._get_value_string(value))
return
def _write_dictionary_list_to_yaml_file(self, dictionary_list, writer, indent=''):
"""
Dictionary list is a special case for YAML. The result should look like:
items:
- key1: value1
key2: value2
key3:
subkey1: value1
subkey2: value2
:param dictionary: the Python dictionary to convert
:param writer: the java.io.PrintWriter for the output file
:param indent: the amount of indent to use (based on the level of recursion)
:raises: IOException: if an error occurs while writing the output
"""
if dictionary_list is None:
return
for dictionary in dictionary_list:
first = True
for key, value in dictionary.items():
quoted_key = self._quotify_string(key)
this_indent = indent + self._indent_unit
if first:
this_indent = indent + "- "
if isinstance(value, dict):
writer.println(this_indent + quoted_key + ':')
self._write_dictionary_to_yaml_file(value, writer, this_indent + self._indent_unit)
else:
writer.println(this_indent + quoted_key + ': ' + self._get_value_string(value))
first = False
return
def _get_value_string(self, value):
"""
Convert the Python value into the proper Yaml value
:param value: the Python value
:return: the Yaml value
"""
if value is None:
result = 'null'
elif type(value) is int or type(value) is long or type(value) is float:
result = str(value)
elif type(value) is list:
new_value = '['
for element in value:
new_value += ' ' + self._get_value_string(element) + ','
if len(new_value) > 1:
new_value = new_value[:-1]
new_value += ' ]'
result = str(new_value)
else:
result = self._quotify_string(str(value))
return result
def _close_streams(self, fos, writer):
"""
Method used to simplify closing output streams since WLST Jython does not support finally blocks...
:param fos: the output stream
:param writer: the print writer
"""
_method_name = '_close_streams'
# closing the writer also closes the fos...
if writer is not None:
writer.close()
elif fos is not None:
try:
fos.close()
except JIOException, ioe:
self._logger.fine('WLSDPLY-18012', ioe, ioe.getLocalizedMessage(),
class_name=self._class_name, method_name=_method_name)
return
def _quotify_string(self, text):
"""
Insert quotes around the string value if it contains Yaml special characters that require it,
or if the string is zero length.
:param text: the input string
:return: the quoted string, or the original string if no quoting was required
"""
if bool(re.search(self._requires_quotes_chars_regex, text)):
result = '\'' + _quote_embedded_quotes(text) + '\''
elif len(text) == 0:
result = '\'\''
else:
result = _quote_embedded_quotes(text)
return result
def _quote_embedded_quotes(text):
"""
Replace any embedded quotes with two quotes.
:param text: the text to quote
:return: the quoted text
"""
result = text
if '\'' in text:
result = result.replace('\'', '\'\'')
if '"' in text:
result = result.replace('"', '""')
return result
``` |
{
"source": "11BP11/inverse_problems_GAN",
"score": 2
} |
#### File: 11BP11/inverse_problems_GAN/main.py
```python
import os
import sys
import scipy.misc
import numpy as np
import tensorflow as tf
import argparse
from datetime import datetime
import model
import utils
def main(args):
if not os.path.exists(args.checkpoint_dir):
os.makedirs(args.checkpoint_dir)
if not os.path.exists(args.sample_dir):
os.makedirs(args.sample_dir)
if not os.path.exists('samples_progress'):
os.makedirs('samples_progress')
for i in range(8):
if not os.path.exists('samples_progress/part{:1d}'.format(i+1)):
os.makedirs('samples_progress/part{:1d}'.format(i+1))
run_config = tf.ConfigProto()
run_config.gpu_options.allow_growth=True
with open(args.settings_file_name,"a") as settings_file:
for key, val in sorted(vars(args).items()):
settings_file.write(key + ": " + str(val) + "\n")
with open(args.progress_file_name,"a") as prog_file:
prog_file.write("\n" + datetime.now().strftime("%H:%M:%S ") + "Started\n")
with tf.Session(config=run_config) as sess:
dcgan = model.DCGAN(sess, args)
if args.train:
dcgan.train()
with open(args.progress_file_name,'a') as prog_file:
prog_file.write("\n" + datetime.now().strftime("%H:%M:%S ") + "Finished training.\n")
else:
if not dcgan.load(args.checkpoint_dir)[0]:
raise Exception("[!] Train a model first, then run test mode")
# Below is codes for visualization
if args.vis_type == 0:
vis_options = [6,7,9,10]
for option in vis_options:
print("Visualizing option %s" % option)
OPTION = option
#utils.visualize(sess, dcgan, args, OPTION)
utils.visualize(sess, dcgan, OPTION, save_input = True)
else:
OPTION = args.vis_type
utils.visualize(sess, dcgan, OPTION)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument("--nrof_epochs", type=int,
help="Epochs to train [8]", default=8)
parser.add_argument("--learning_rate", type=float,
help="Learning rate of for adam [0.0002]", default=0.0002)
parser.add_argument("--beta1", type=float,
help="Momentum term of adam [0.5]", default=0.5)
parser.add_argument("--train_size", type=int,
help="Number of train images to be used. If None, uses all. [None]", default=None)
parser.add_argument("--batch_size", type=int,
help="The size of batch images [64]", default=64)
parser.add_argument("--input_height", type=int,
help="The size of image to use (will be center cropped). [108]", default=108)
parser.add_argument("--input_width", type=int,
help="The size of image to use (will be center cropped). If None, same value as input_height [None]", default=None)
parser.add_argument("--output_height", type=int,
help="The size of the output images to produce [64]", default=64)
parser.add_argument("--output_width", type=int,
help="The size of the output images to produce. If None, same value as output_height [None]", default=None)
parser.add_argument("--dataset_name", type=str,
help="The name of dataset [celebA, mnist, lsun]", default="celebA")
parser.add_argument("--input_fname_pattern", type=str,
help="Glob pattern of filename of input images [*]", default="*.jpg")
parser.add_argument("--sample_dir", type=str,
help="Directory name to save the image samples [samples]", default="samples")
parser.add_argument("--checkpoint_dir", type=str,
help="Directory name to save the checkpoints [checkpoint]", default="checkpoint")
parser.add_argument("--train",
help="True for training, False for testing [False]", action='store_true')
parser.add_argument("--crop",
help="True for training, False for testing [False]", action='store_true')
parser.add_argument("--vis_type", type=int,
help="Visualization option; 0=all. [0]", default=0)
parser.add_argument("--lambda_loss", type=float,
help="Coefficient of additional loss. [10.]", default=10.)
parser.add_argument("--z_dim", type=int,
help="Dimension of the random input. [100]", default=100)
parser.add_argument("--g_feature_dim", type=int,
help="Dimension of the bottleneck layer. [100]", default=100)
parser.add_argument("--max_reach", type=int,
help="Parameter for mask creation. [12]", default=12)
parser.add_argument("--data_dir", type=str,
help="Directory name to load data. [data]", default="../../../data")
parser.add_argument('--settings_file_name', type=str,
help='Name (path) of the settings file.', default='settings.txt')
parser.add_argument('--progress_file_name', type=str,
help='Name (path) of the progress file.', default='progress.txt')
parser.add_argument('--problem_name', type=str,
help='Name (path) of the problem python file.', default='problems.problem')
parser.add_argument('--save_freq', type=int,
help='How often picuteres are saved.', default=100)
# Output Args
args = parser.parse_args(argv)
# Change some defaults
if args.dataset_name == "mnist":
args.input_height = 28
args.output_height = 28
if args.dataset_name == "cifar10":
args.input_height = 32
args.output_height = 32
if args.input_width is None:
args.input_width = args.input_height
if args.output_width is None:
args.output_width = args.output_height
options = vars(args)
with open(args.settings_file_name,"w") as settings_file:
settings_file.write("\n" + " ".join(sys.argv) + "\n\n")
return args
if __name__ == '__main__':
args = parse_arguments(sys.argv[1:])
main(args)
```
#### File: 11BP11/inverse_problems_GAN/model.py
```python
from __future__ import division
import os
import time
import math
import glob
import tensorflow as tf
import numpy as np
import importlib
import pickle
from datetime import datetime
import ops
import utils
class DCGAN(object):
def __init__(self, sess, args,
batch_size=64, nrof_samples = 64,
gf_dim=64, df_dim=64,
gfc_dim=1024, dfc_dim=1024, c_dim=3):
"""
Args:
sess: TensorFlow session
args: Values for most things
batch_size: The size of batch. Should be specified before training.
nrof_samples: The number of samples. Should be specified before training.
gf_dim: (optional) Dimension of gen filters in first conv layer. [64]
df_dim: (optional) Dimension of discrim filters in first conv layer. [64]
gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024]
dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024]
c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3]
"""
self.sess = sess
self.args = args
self.args.batch_size = batch_size
self.args.nrof_samples = nrof_samples
self.args.gf_dim = gf_dim
self.args.df_dim = df_dim
self.args.gfc_dim = gfc_dim
self.args.dfc_dim = dfc_dim
# Import Problem Modul:
self.Problem = importlib.import_module(args.problem_name)
self.prepare_dataset()
self.args.grayscale = (self.args.c_dim == 1)
self.args.g_input_dim = [self.args.output_height, self.args.output_width,self.args.c_dim]
self.build_model()
def build_model(self):
if self.args.crop:
image_dims = [self.args.output_height, self.args.output_width, self.args.c_dim]
else:
image_dims = [self.args.input_height, self.args.input_width, self.args.c_dim]
self.d_inputs = tf.placeholder(
tf.float32, [self.args.batch_size] + image_dims, name='real_images')
self.z = tf.placeholder(
tf.float32, [None, self.args.z_dim], name='z')
self.g_inputs = tf.placeholder(
tf.float32, [self.args.batch_size] + self.args.g_input_dim, name='g_inputs')
#self.z_sum = ops.histogram_summary("z", self.z)
self.g_tform_info = self.Problem.g_tform_info_placeholder
self.G = self.generator(self.z, self.g_inputs)
self.D, self.D_logits = self.discriminator(self.d_inputs, reuse=False)
self.sampler = self.generator(self.z, self.g_inputs, sampler=True)
self.D_, self.D_logits_ = self.discriminator(self.G, reuse=True)
#self.d_sum = ops.histogram_summary("d", self.D)
#self.d__sum = ops.histogram_summary("d_", self.D_)
#self.G_sum = ops.image_summary("G", self.G)
def sigmoid_cross_entropy_with_logits(x, y):
return tf.nn.sigmoid_cross_entropy_with_logits(logits=x, labels=y)
self.G_tformed = self.Problem.transform_tf(self.G, self.g_tform_info)
self.sampler_tformed = self.Problem.transform_tf(self.sampler, self.g_tform_info)
self.d_loss_real = tf.reduce_mean(
sigmoid_cross_entropy_with_logits(self.D_logits, tf.ones_like(self.D))) # = -log(sigmoid( D_logits ))
self.d_loss_fake = tf.reduce_mean(
sigmoid_cross_entropy_with_logits(self.D_logits_, tf.zeros_like(self.D_))) # = -log(1 - sigmoid( D_logits_ ))
# Normalising (should hold): errD <= log(4) ~ 1.39 (= error for random guessing)
self.d_loss = (self.d_loss_real + self.d_loss_fake) / np.log(4)
self.g_disc_loss = tf.reduce_mean(
sigmoid_cross_entropy_with_logits(self.D_logits_, tf.ones_like(self.D_))) # = -log(sigmoid( D_logits_ ))
self.g_disc_loss = (self.g_disc_loss - np.log(2)) / np.log(4)
self.g_prob_loss = tf.reduce_mean(self.Problem.problem_loss(self.g_inputs, self.G_tformed))
assert self.g_prob_loss.dtype == tf.float32
self.g_loss = self.g_disc_loss + self.args.lambda_loss * self.g_prob_loss
#self.d_loss_real_sum = ops.scalar_summary("d_loss_real", self.d_loss_real)
#self.d_loss_fake_sum = ops.scalar_summary("d_loss_fake", self.d_loss_fake)
#self.g_loss_sum = ops.scalar_summary("g_loss", self.g_loss)
#self.d_loss_sum = ops.scalar_summary("d_loss", self.d_loss)
t_vars = tf.trainable_variables()
self.d_vars = [var for var in t_vars if 'd_' in var.name]
self.g_vars = [var for var in t_vars if 'g_' in var.name]
self.saver = tf.train.Saver()
def train(self):
d_optim = tf.train.AdamOptimizer(self.args.learning_rate, beta1=self.args.beta1) \
.minimize(self.d_loss, var_list=self.d_vars)
g_optim = tf.train.AdamOptimizer(self.args.learning_rate, beta1=self.args.beta1) \
.minimize(self.g_loss, var_list=self.g_vars)
self.sess.run(tf.global_variables_initializer(), feed_dict=None)
#self.g_sum = ops.merge_summary([self.z_sum, self.d__sum,
# self.G_sum, self.d_loss_fake_sum, self.g_loss_sum])
#self.d_sum = ops.merge_summary(
# [self.z_sum, self.d_sum, self.d_loss_real_sum, self.d_loss_sum])
#self.writer = ops.SummaryWriter("./logs", self.sess.graph)
sample_z = np.random.uniform(-1, 1, size=(self.args.nrof_samples , self.args.z_dim))
sample_images = utils.get_img(self, 0, self.args.nrof_samples, self.args.dataset_name, test=False)
test_images = utils.get_img(self, 0, self.args.nrof_samples, self.args.dataset_name, test=True)
sample_tform_info = self.Problem.create_tform_info(self.args)
test_tform_info = self.Problem.create_tform_info(self.args)
sample_g_inputs = self.Problem.transform(sample_images,sample_tform_info)
test_g_inputs = self.Problem.transform(test_images,test_tform_info)
utils.save_images(sample_images,'samples\original_images_s.png')
utils.save_images(test_images,'samples\original_images_t.png')
utils.save_images(self.Problem.safe_format(sample_g_inputs),'samples\original_inputs_s.png')
utils.save_images(self.Problem.safe_format(test_g_inputs),'samples\original_inputs_t.png')
sample_dict={self.z: sample_z, #For generator and discriminator
self.g_inputs: sample_g_inputs,
self.d_inputs: sample_images,
self.g_tform_info: sample_tform_info}
test_dict={self.z: sample_z,
self.g_inputs: test_g_inputs,
self.d_inputs: test_images,
self.g_tform_info: test_tform_info}
#Set up for visualizing difference from z value
z_range = utils.get_z_range(self.args.z_dim, self.args.batch_size)
nrof_batches = self.args.train_size // self.args.batch_size
counter = 0
start_epoch_nr = 0
start_batch_nr = 0
start_time = time.time()
could_load, checkpoint_counter = self.load(self.args.checkpoint_dir)
if could_load:
counter = checkpoint_counter
start_epoch_nr = checkpoint_counter // nrof_batches
start_batch_nr = checkpoint_counter % nrof_batches
print(" [*] Load SUCCESS (counter: " + str(counter) + ")")
with open(self.args.progress_file_name,"a") as prog_file:
prog_file.write("\n" + datetime.now().strftime("%H:%M:%S ") + \
"Loaded checkpoint " + str(counter) + "\n")
else:
print(" [!] Load failed...")
with open(self.args.progress_file_name,"w") as prog_file:
prog_file.write("\n" + datetime.now().strftime("%H:%M:%S ") + \
"No checkpoint found, starting training from scratch.\n")
for epoch0 in range(self.args.nrof_epochs):
epoch = start_epoch_nr + epoch0
with open(self.args.progress_file_name,'a') as prog_file:
prog_file.write("\n" + datetime.now().strftime("%H:%M:%S ") + \
"Started training epoch " + str(epoch) + "\n")
for idx0 in range(start_batch_nr,nrof_batches):
idx = idx0+1
batch_images = utils.get_img(self, idx0*self.args.batch_size,
self.args.batch_size, self.args.dataset_name, test=False)
batch_tform_info = self.Problem.create_tform_info(self.args)
batch_g_inputs = self.Problem.transform(batch_images, batch_tform_info)
batch_z = np.random.uniform(-1, 1, [self.args.batch_size, self.args.z_dim]).astype(np.float32)
D_dict = {self.d_inputs: batch_images,
self.z: batch_z,
self.g_inputs: batch_g_inputs}
G_dict = {self.z: batch_z,
self.g_inputs: batch_g_inputs,
self.g_tform_info: batch_tform_info}
# Update D network
_, err_D, err_G, err_G_disc, err_G_prob = \
self.sess.run([d_optim, self.d_loss, self.g_loss,self.g_disc_loss,self.g_prob_loss],
feed_dict={**D_dict,**G_dict})
#self.writer.add_summary(summary_str, counter)
# Update G network
self.sess.run(g_optim, feed_dict=G_dict)
#self.writer.add_summary(summary_str, counter)
# Run g_optim twice to make sure that d_loss does not go to zero (different from paper)
self.sess.run(g_optim, feed_dict=G_dict)
#self.writer.add_summary(summary_str, counter)
counter += 1
time_str = time.strftime("%H:%M:%S",time.gmtime(time.time() - start_time))
print("Epoch: [{:2d}] [{:4d}/{:4d}] time: {}, d_loss: {:6.4f}, g_loss: {:6.4f}" \
.format(epoch, idx, nrof_batches, time_str, err_D, err_G))
#Should hold: errD <= log(4) ~ 1.39 (= error for random guessing)
if np.mod(counter, self.args.save_freq) == 0:
print("g_loss: {:.8f} (D) + {:g} * {:.8f} (problem) = {:.8f}".\
format(err_G_disc, self.args.lambda_loss, err_G_prob, err_G))
samples, samples_tformed, d_loss, g_loss = self.sess.run(
[self.sampler, self.sampler_tformed, self.d_loss, self.g_loss], feed_dict=sample_dict)
utils.save_images(samples, '{}/train_{:02d}_{:04d}_samples_s.png'.format(self.args.sample_dir, epoch, idx))
print("[Sample] d_loss: %.8f, g_loss: %.8f" % (d_loss, g_loss))
test_samples, test_samples_tformed, test_d_loss, test_g_loss = self.sess.run(
[self.sampler, self.sampler_tformed, self.d_loss, self.g_loss], feed_dict=test_dict)
utils.save_images(test_samples,'{}/train_{:02d}_{:04d}_samples_t.png'.format(self.args.sample_dir, epoch, idx))
print("[Test] d_loss: %.8f, g_loss: %.8f" % (test_d_loss, test_g_loss))
with open(self.args.progress_file_name,'a') as prog_file:
out_str = "Epoch: [{:2d}] [{:4d}/{:4d}] ".format(epoch, idx, nrof_batches) + \
"\td_loss: {:6.4f} ".format(err_D) + \
"\tg_loss: {:6.4f} = {:.8f} (D) + {:g} * {:.8f} (problem)" \
.format(err_G, err_G_disc, self.args.lambda_loss, err_G_prob)
prog_file.write(out_str + "\n")
if np.mod(counter, 5*self.args.save_freq) == 0:
self.save(self.args.checkpoint_dir, counter)
utils.save_multiple(2,[samples, sample_images], 'train_{:02d}_{:04d}_comp'.format(epoch, idx))
save_pics = [sample_images, self.Problem.safe_format(sample_g_inputs), \
samples, self.Problem.safe_format(samples_tformed)]
utils.save_multiple(4, save_pics, 'train_{:02d}_{:04d}_ovw_s'.format(epoch, idx))
utils.save_multiple(2,[test_samples, test_images], 'train_{:02d}_{:04d}_comp_test'.format(epoch, idx))
save_pics = [test_images, self.Problem.safe_format(test_g_inputs), \
test_samples, self.Problem.safe_format(test_samples_tformed)]
utils.save_multiple(4, save_pics, 'train_{:02d}_{:04d}_ovw_t'.format(epoch, idx))
print("Checkpoint!")
#Visualize change with z:
print("visualizing for different z values ...")
for i in range(2):
input_idx = np.random.randint(self.args.batch_size)
vis_z_g_inputs = np.repeat([test_g_inputs[input_idx]],self.args.batch_size,axis=0)
vis_z_images = np.repeat([test_images[input_idx]],self.args.batch_size,axis=0)
vis_z_tform_info = np.repeat([test_tform_info[input_idx]],self.args.batch_size,axis=0)
vis_z_dict={self.z: z_range,
self.g_inputs: vis_z_g_inputs,
self.d_inputs: vis_z_images,
self.g_tform_info: vis_z_tform_info}
vis_z_samples = self.sess.run(self.sampler, feed_dict=vis_z_dict)
vis_z_merged = self.Problem.merge(vis_z_samples, vis_z_g_inputs, vis_z_tform_info)
utils.save_images(vis_z_merged, '{}/train_{:02d}_{:04d}_vis_z_{:01d}.png'.format(self.args.sample_dir, epoch, idx, input_idx))
print("Mean Standard deviation: " + str(np.mean(np.std(samples, axis=0))))
with open(self.args.progress_file_name,'a') as prog_file:
prog_file.write("\tMean Standard deviation: " + \
str(np.mean(np.std(samples, axis=0))) + "\n")
#Visualize at the end of every epoch
if epoch0<8:
for i in range(8):
for j in range(8):
pic_idx = 8*i + j
utils.save_images(test_samples[pic_idx:pic_idx+1:],
'samples_progress/part{:01d}/pic{:02d}_epoch{:02d}.jpg'.format(i+1, pic_idx, epoch))
start_batch_nr = 0
#save a final checkpoint
self.save(self.args.checkpoint_dir, counter)
with open(self.args.progress_file_name,'a') as prog_file:
prog_file.write("\n" + datetime.now().strftime("%H:%M:%S ") + "Finished training." + "\n")
def discriminator(self, image, y=None, reuse=False):
with tf.variable_scope("discriminator") as scope:
if reuse:
scope.reuse_variables()
h0 = ops.lrelu(ops.conv2d(image, self.args.df_dim, name='d_h0_conv'))
h1 = ops.lrelu(ops.bn_layer(ops.conv2d(h0, self.args.df_dim*2, name='d_h1_conv'), name="d_bn1"))
h2 = ops.lrelu(ops.bn_layer(ops.conv2d(h1, self.args.df_dim*4, name='d_h2_conv'), name="d_bn2"))
h3 = ops.lrelu(ops.bn_layer(ops.conv2d(h2, self.args.df_dim*8, name='d_h3_conv'), name="d_bn3"))
h4 = ops.linear(tf.reshape(h3, [self.args.batch_size, -1]), 1, 'd_h4_lin')
return tf.nn.sigmoid(h4), h4
def generator(self, z, g_inputs, y=None, sampler=False):
with tf.variable_scope("generator") as scope:
if sampler:
scope.reuse_variables()
do_train = not sampler
bs = self.args.batch_size
conv_out_size_same = lambda h, w, stride: [ int(math.ceil(s/stride)) for s in [h,w] ]
s_h, s_w = self.args.output_height, self.args.output_width
s_h2, s_w2 = conv_out_size_same(s_h, s_w, 2)
s_h4, s_w4 = conv_out_size_same(s_h2, s_w2, 2)
s_h8, s_w8 = conv_out_size_same(s_h4, s_w4, 2)
s_h16, s_w16 = conv_out_size_same(s_h8, s_w8, 2)
# *** First layers: g_inputs => g_flat *** #
gi = ops.lrelu(ops.conv2d(g_inputs, self.args.df_dim, name='g_gi0_conv'))
for idx in range(1,4):
conv = ops.conv2d(gi, self.args.df_dim*(2**idx), name="g_gi" + str(idx) + "_conv")
gi = ops.lrelu(ops.bn_layer(conv,train=do_train, name="gi" + str(idx) + "_bn"))
gi_flat = ops.linear(tf.reshape(gi, [bs, -1]), self.args.g_feature_dim, 'g_gi4_lin')
# *** Map gi_flat to [-1,1] to be more similar to z: *** #
gi_flat = tf.nn.tanh(gi_flat)
# *** Layers from flat (z and gi_flat) to full size: *** #
z0 = ops.concat( [gi_flat, z], -1 )
gd0 = ops.linear( z0, self.args.gf_dim*8*s_h16*s_w16, 'g_h0_lin')
gd0 = tf.reshape( gd0, [bs, s_h16, s_w16, self.args.gf_dim * 8])
gd0 = tf.nn.relu(ops.bn_layer(gd0, train=do_train, name="g_bn0"))
gd = gd0
s = [None, s_h8,s_h4,s_h2,s_h]
m = [None, 4,2,2,2]
for idx in range(1,5):
deconv = ops.deconv2d(gd,[bs,s[idx],s[idx],self.args.gf_dim*m[idx]],name="g_h"+str(idx))
gd = tf.nn.relu(ops.bn_layer(deconv, train=do_train, name="g_bn"+str(idx)))
gd4 = ops.concat( [ gd, g_inputs], -1)
# *** 2 Layers to merge gd and g_inputs: *** #
gd5 = ops.deconv2d(gd4, [bs, s_h, s_w, self.args.gf_dim], k_h = 1, k_w = 1, d_h=1, d_w=1, name='g_h5')
gd5 = tf.nn.relu(gd5)
gd6 = ops.deconv2d(gd5, [bs, s_h, s_w, self.args.c_dim], k_h = 1, k_w = 1, d_h=1, d_w=1, name='g_h6')
return tf.nn.sigmoid(gd6)
def prepare_dataset(self):
#if "jpg" in self.input_fname_pattern or "png" in self.input_fname_pattern:
if self.args.dataset_name == "celebA":
data_paths = glob.glob(os.path.join(self.args.data_dir, self.args.dataset_name, self.args.input_fname_pattern))
imreadImg = utils.imread(data_paths[0])
if len(imreadImg.shape) >= 3: #check if image is a non-grayscale image by checking channel number
self.args.c_dim = imreadImg.shape[-1]
else:
self.args.c_dim = 1
train_size = len(data_paths)-1000
if self.args.train_size is not None:
self.args.train_size = min(self.args.train_size,train_size)
else:
self.args.train_size = train_size
self.data_paths = data_paths[1000:1000+self.args.train_size]
self.data_paths_val = data_paths[:1000]
#elif "ubyte" in self.input_fname_pattern
elif self.args.dataset_name == "mnist" or self.args.dataset_name == "cifar10":
if self.args.dataset_name == "mnist":
self.data_X, self.data_X_val = self.load_mnist()
elif self.args.dataset_name == "cifar10":
#data = self.load_cifar10_batch(1)
#self.data_X_val, self.data_X = data[:100], data[100:]
self.data_X = self.load_cifar10_batch(1)
for i in range(2,6):
self.data_X = np.concatenate((self.data_X, self.load_cifar10_batch(i)), axis=0)
self.data_X_val = self.load_cifar10_batch("test")
if self.args.train_size is not None:
self.data_X = self.data_X[:self.args.train_size]
self.args.train_size = len(self.data_X)
self.args.c_dim = self.data_X[0].shape[-1]
def load_mnist(self):
data_dir = os.path.join(self.args.data_dir, self.args.dataset_name)
with open(os.path.join(data_dir,'train-images-idx3-ubyte')) as train_file:
loaded = np.fromfile(file=train_file,dtype=np.uint8)
trX = loaded[16:].reshape((60000,28,28,1)).astype(np.float)
with open(os.path.join(data_dir,'t10k-images-idx3-ubyte')) as test_file:
loaded = np.fromfile(file=test_file,dtype=np.uint8)
teX = loaded[16:].reshape((10000,28,28,1)).astype(np.float)
#X = np.concatenate((trX, teX), axis=0)
seed = 547
np.random.seed(seed)
#np.random.shuffle(X)
np.random.shuffle(trX)
np.random.shuffle(teX)
return trX/255., teX/255.
def load_cifar10_batch(self, batch_id):
data_dir = os.path.join(self.args.data_dir, self.args.dataset_name, "cifar-10-batches-py")
batch_name = "test_batch" if batch_id == "test" else "data_batch_" + str(batch_id)
with open(os.path.join(data_dir,batch_name), mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
features = np.array(features).astype(np.float)
labels = batch['labels']
labels = np.array(labels)
good_labels = [0,1,5,7,8] #plane, car, dog, horse, ship
features = features[np.isin(labels, good_labels)]
return features/255.
@property
def model_dir(self):
return "{}_{}_{}_{}".format(
self.args.dataset_name, self.args.batch_size,
self.args.output_height, self.args.output_width)
def save(self, checkpoint_dir, step):
model_name = "DCGAN.model"
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess,
os.path.join(checkpoint_dir, model_name),
global_step=step)
def load(self, checkpoint_dir):
import re
print(" [*] Reading checkpoints...")
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
counter = int(next(re.finditer("(\d+)(?!.*\d)",ckpt_name)).group(0))
print(" [*] Success to read {}".format(ckpt_name))
return True, counter
else:
print(" [*] Failed to find a checkpoint")
return False, 0
```
#### File: inverse_problems_GAN/problems/upsampling.py
```python
import tensorflow as tf
import numpy as np
from problems.problem import *
name = "upsample pictures"
g_tf_info_placeholder = tf.placeholder(tf.float32, [None], name='g_transform_info')
def problem_loss(x_tformed, g_tformed):
return tf.reduce_mean(tf.abs(x_tformed-g_tformed),[1,2,3])
def transform_tf(x, g_tf_info): #use assign?
h, w = x.shape[1:3]
assert h % 8 == 0 and w % 8 == 0
h0, w0 = h//8, w//8
avg_line_list = []
for i in range(8):
avg_list = []
for j in range(8):
region = x[:,i*h0:(i+1)*h0,j*w0:(j+1)*w0,:]
avg_list += [tf.reduce_mean(region,axis=[1,2])]*8
avg_line_list += [tf.stack(avg_list,axis=1)]*8
output = tf.stack(avg_line_list,axis=1)
return output
def transform(x, g_tf_info):
output = np.zeros_like(x, dtype=np.float32)
h, w = output.shape[1:3]
assert h % 8 == 0 and w % 8 == 0
h0, w0 = h//8, w//8
for i in range(8):
for j in range(8):
region = x[:,i*h0:(i+1)*h0,j*w0:(j+1)*w0,:]
avg = np.mean(region,axis=(1,2), keepdims=True)
avg_region = np.tile(avg,[1,8,8,1])
output[:,i*h0:(i+1)*h0,j*w0:(j+1)*w0,:] = avg_region
return output
def create_tform_info(args):
return [0]*args.batch_size
def safe_format(tformed):
return tformed
def merge(g_output, x_tformed, g_tform_info):
merged = np.copy(g_output)
for itr in range(3): #make result such that transform(result) ~ x_tformed.
merged = g_output - transform(g_output, None) + x_tformed
merged = np.clip(merged,0,1)
return merged
``` |
{
"source": "11BP11/Semantic_Inpainting_using_a_GAN",
"score": 2
} |
#### File: 11BP11/Semantic_Inpainting_using_a_GAN/main.py
```python
import os
import sys
import scipy.misc
import numpy as np
from model import DCGAN
from utils import pp, visualize, to_json, show_all_variables
import tensorflow as tf
flags = tf.app.flags
flags.DEFINE_integer("epoch", 25, "Epoch to train [25]")
#Adam default, TensorFlow: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08.
flags.DEFINE_float("learning_rate", 0.0002, "Learning rate of for adam [0.0002]")
flags.DEFINE_float("beta1", 0.5, "Momentum term of adam [0.5]")
flags.DEFINE_integer("train_size", np.inf, "The size of train images [np.inf]")
flags.DEFINE_integer("batch_size", 64, "The size of batch images [64]")
flags.DEFINE_integer("input_height", 108, "The size of image to use (will be center cropped). [108]")
flags.DEFINE_integer("input_width", None, "The size of image to use (will be center cropped). If None, same value as input_height [None]")
flags.DEFINE_integer("output_height", 64, "The size of the output images to produce [64]")
flags.DEFINE_integer("output_width", None, "The size of the output images to produce. If None, same value as output_height [None]")
flags.DEFINE_string("dataset", "celebA", "The name of dataset [celebA, mnist, lsun]")
flags.DEFINE_string("input_fname_pattern", "*.jpg", "Glob pattern of filename of input images [*]")
flags.DEFINE_string("sample_dir", "samples", "Directory name to save the image samples [samples]")
flags.DEFINE_string("checkpoint_dir", "checkpoint", "Directory name to save the checkpoints [checkpoint]")
flags.DEFINE_boolean("train", False, "True for training, False for testing [False]")
flags.DEFINE_boolean("crop", False, "True for training, False for testing [False]")
flags.DEFINE_boolean("visualize", False, "True for visualizing, False for nothing [False]")
flags.DEFINE_integer("generate_test_images", 100, "Number of images to generate during test. [100]")
flags.DEFINE_integer("vis_type", 0, "Visualization option; 0=all. [0]")
flags.DEFINE_integer("img_height", None, "Height of img given to G. If None, 4*floor(output_height/10) [None]")
flags.DEFINE_boolean("use_labels", False, "Whether to use labels. Only for mnist [False]")
flags.DEFINE_float("lambda_loss", 100., "Coefficient of L1-loss. [100.]")
flags.DEFINE_boolean("split_data", False, "Split data for Gen and Dist, further to between train and test. [False]")
flags.DEFINE_boolean("gen_use_img", False, "True for the generator using the input picture (img) as output. [False]")
flags.DEFINE_boolean("use_border", False, "True for using the top row throughout the generator. [False]")
flags.DEFINE_integer("z_dim", 100, "Dimension of the random input. [100]")
flags.DEFINE_boolean("drop_discriminator", False, "If True ignores the D and uses loss function instead. [False]")
FLAGS = flags.FLAGS
def main(_):
pp.pprint(flags.FLAGS.__flags)
if FLAGS.input_width is None:
FLAGS.input_width = FLAGS.input_height
if FLAGS.output_width is None:
FLAGS.output_width = FLAGS.output_height
if FLAGS.img_height is None:
FLAGS.img_height = 4*int(FLAGS.output_height / 10)
print("No img_hight supplied. img_height = %s" % FLAGS.img_height)
if FLAGS.split_data is True:
FLAGS.epoch = 2*FLAGS.epoch
print("Number of epochs doubled to make it more comparable to version without split date")
if not os.path.exists(FLAGS.checkpoint_dir):
os.makedirs(FLAGS.checkpoint_dir)
if not os.path.exists(FLAGS.sample_dir):
os.makedirs(FLAGS.sample_dir)
#if not os.path.exists('samples_progress'):
# os.makedirs('samples_progress')
for i in range(8):
if not os.path.exists('samples_progress/part{:1d}'.format(i+1)):
os.makedirs('samples_progress/part{:1d}'.format(i+1))
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
run_config = tf.ConfigProto()
run_config.gpu_options.allow_growth=True
with open('settings.txt', "w") as f:
f.write("\n" + " ".join(sys.argv) + "\n\n")
print("FLAGS values:")
for key, val in flags.FLAGS.__flags.items():
print(str([key, val]))
f.write(str([key, val])+"\n")
print()
with tf.Session(config=run_config) as sess:
if FLAGS.dataset == 'mnist':
dcgan = DCGAN(
sess,
input_width=FLAGS.input_width,
input_height=FLAGS.input_height,
output_width=FLAGS.output_width,
output_height=FLAGS.output_height,
batch_size=FLAGS.batch_size,
sample_num=FLAGS.batch_size,
y_dim=10,
z_dim=FLAGS.z_dim,
dataset_name=FLAGS.dataset,
input_fname_pattern=FLAGS.input_fname_pattern,
crop=FLAGS.crop,
checkpoint_dir=FLAGS.checkpoint_dir,
sample_dir=FLAGS.sample_dir,
img_height=FLAGS.img_height,
use_labels=FLAGS.use_labels,
lambda_loss=FLAGS.lambda_loss,
split_data=FLAGS.split_data,
gen_use_img=FLAGS.gen_use_img,
drop_discriminator=FLAGS.drop_discriminator,
use_border=FLAGS.use_border)
else:
dcgan = DCGAN(
sess,
input_width=FLAGS.input_width,
input_height=FLAGS.input_height,
output_width=FLAGS.output_width,
output_height=FLAGS.output_height,
batch_size=FLAGS.batch_size,
sample_num=FLAGS.batch_size,
z_dim=FLAGS.z_dim,
dataset_name=FLAGS.dataset,
input_fname_pattern=FLAGS.input_fname_pattern,
crop=FLAGS.crop,
checkpoint_dir=FLAGS.checkpoint_dir,
sample_dir=FLAGS.sample_dir,
img_height=FLAGS.img_height,
lambda_loss=FLAGS.lambda_loss,
split_data=FLAGS.split_data,
gen_use_img=FLAGS.gen_use_img,
drop_discriminator=FLAGS.drop_discriminator,
use_border=FLAGS.use_border)
show_all_variables()
if FLAGS.train:
dcgan.train(FLAGS)
else:
if not dcgan.load(FLAGS.checkpoint_dir)[0]:
raise Exception("[!] Train a model first, then run test mode")
# Below is codes for visualization
#OPTION = 1
if FLAGS.vis_type == 0:
vis_options = [6,7,9,10]
for option in vis_options:
print("Visualizing option %s" % option)
OPTION = option
visualize(sess, dcgan, FLAGS, OPTION)
else:
OPTION = FLAGS.vis_type
visualize(sess, dcgan, FLAGS, OPTION)
if __name__ == '__main__':
tf.app.run()
``` |
{
"source": "11chankun/debaised-analysis",
"score": 3
} |
#### File: intents/util/test_insert_as_column.py
```python
import sys
sys.path.append(".")
import pandas
import time
import randstr, random
from util import aspects, enums, insert_as_column
def test_1():
"""
This test tests the list_index_slicing_passed function of the
insert_as_column module.
"""
table = pandas.read_csv('data/data_for_test_aspects/test_1.csv')
# selecting only first 20 rows of the table
table = table.head(20)
# bool_list contains True/False for each row that passes slicing condition
bool_list = insert_as_column.list_index_slicing_passed(table, \
[('Age', enums.Filters.LESS_THAN, 51)])
print(bool_list)
expected_bool_list = '[True, False, True, True, False, False, False, False, False, False, True, False, False, True, True, True, False, True, True, False]'
assert(expected_bool_list == str(bool_list))
def test_2():
"""
This test tests the list_index_in_topk function of the
insert_as_column module.
"""
table = pandas.read_csv('data/data_for_test_aspects/test_1.csv')
# selecting only first 20 rows of the table
table = table.head(20)
# bool_list contains True/False for each row that will it appear in the top-k
bool_list = insert_as_column.list_index_in_topk(table, 'Age', [], True,
5,
slices=[('Age', enums.Filters.LESS_THAN, 51)])
print(bool_list)
expected_bool_list = '[False, False, True, False, False, False, False, False, False, False, True, False, False, False, True, True, False, False, True, False]'
assert(expected_bool_list == str(bool_list))
print(test_1.__doc__)
test_1()
print(test_2.__doc__)
test_2()
print('Test cases completed')
``` |
{
"source": "11craft/immercv",
"score": 2
} |
#### File: immercv/immercv/context_processors.py
```python
from django.conf import settings
from django.contrib.sites.models import Site
def analytics(request):
return {
'PIWIK_SERVER': settings.PIWIK_SERVER,
'PIWIK_SITE_ID': settings.PIWIK_SITE_ID,
}
def caching(request):
return {
'cache_timeout': 0 if request.user.is_authenticated() else 300,
}
def disqus(request):
return {
'DISQUS_SITE_ID': settings.DISQUS_SITE_ID,
}
def site(request):
return {
'site': Site.objects.get_current(),
}
```
#### File: immercv/cvgraph/sitemaps.py
```python
from django.contrib.sitemaps import Sitemap
from django.core.urlresolvers import reverse
from django.utils.text import slugify
from immercv.cvgraph.models import CV
class CvsSitemap(Sitemap):
changefreq = 'weekly'
priority = 0.5
def items(self):
return CV.nodes.all()
def location(self, obj):
return reverse('cvgraph:cv_detail', kwargs=dict(
id=obj._id,
slug=slugify(obj.name),
))
``` |
{
"source": "11craft/pelican-ipythonnb",
"score": 2
} |
#### File: 11craft/pelican-ipythonnb/ipynb.py
```python
from __future__ import unicode_literals
import os
import json
import logging
import markdown
try:
# Py3k
from html.parser import HTMLParser
except ImportError:
# Py2.7
from HTMLParser import HTMLParser
from pelican import signals
from pelican.readers import MarkdownReader, HTMLReader, BaseReader
import IPython
from IPython.config import Config
from IPython.nbconvert.exporters import HTMLExporter
try:
from IPython.nbconvert.filters.highlight import _pygment_highlight
except ImportError:
# IPython < 2.0
from IPython.nbconvert.filters.highlight import _pygments_highlight
try:
from bs4 import BeautifulSoup
except:
BeautifulSoup = None
from pygments.formatters import HtmlFormatter
logger = logging.getLogger(__name__)
CUSTOM_CSS = '''
<style type="text/css">
/* General text, input and output cells */
div.cell {
border: none;
}
.inner_cell {
width: 100%
}
.text_cell .prompt {
display: none;
}
div.cell {
margin: 0;
padding: 0;
}
div.input_area {
border: none;
background: none;
margin-left: 6px;
}
div.output_subarea {
padding: 0;
}
pre.ipynb {
padding: 5px 5px 5px 10px;
}
/* DataFrame */
table.dataframe {
font-family: Arial, sans-serif;
font-size: 13px;
line-height: 20px;
}
table.dataframe th, td {
padding: 4px;
text-align: left;
}
</style>
'''
LATEX_CUSTOM_SCRIPT = '''
<script type="text/javascript">if (!document.getElementById('mathjaxscript_pelican_#%@#$@#')) {
var mathjaxscript = document.createElement('script');
mathjaxscript.id = 'mathjaxscript_pelican_#%@#$@#';
mathjaxscript.type = 'text/javascript';
mathjaxscript.src = '//cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML';
mathjaxscript[(window.opera ? "innerHTML" : "text")] =
"MathJax.Hub.Config({" +
" config: ['MMLorHTML.js']," +
" TeX: { extensions: ['AMSmath.js','AMSsymbols.js','noErrors.js','noUndefined.js'], equationNumbers: { autoNumber: 'AMS' } }," +
" jax: ['input/TeX','input/MathML','output/HTML-CSS']," +
" extensions: ['tex2jax.js','mml2jax.js','MathMenu.js','MathZoom.js']," +
" displayAlign: 'center'," +
" displayIndent: '0em'," +
" showMathMenu: true," +
" tex2jax: { " +
" inlineMath: [ ['$','$'] ], " +
" displayMath: [ ['$$','$$'] ]," +
" processEscapes: true," +
" preview: 'TeX'," +
" }, " +
" 'HTML-CSS': { " +
" styles: { '.MathJax_Display, .MathJax .mo, .MathJax .mi, .MathJax .mn': {color: 'black ! important'} }" +
" } " +
"}); ";
(document.body || document.getElementsByTagName('head')[0]).appendChild(mathjaxscript);
}
</script>
'''
def register():
signals.initialized.connect(add_reader)
def add_reader(arg):
arg.settings['READERS']['ipynb'] = IPythonNB
class IPythonNB(BaseReader):
enabled = True
file_extensions = ['ipynb']
def read(self, filepath):
logger.info(2)
metadata = {}
# Files
filedir = os.path.dirname(filepath)
filename = os.path.basename(filepath)
metadata_filename = filename.split('.')[0] + '.ipynb-meta'
metadata_filepath = os.path.join(filedir, metadata_filename)
# Load metadata
if os.path.exists(metadata_filepath):
# Metadata is on a external file, process using Pelican MD Reader
md_reader = MarkdownReader(self.settings)
_content, metadata = md_reader.read(metadata_filepath)
else:
# Load metadata from ipython notebook file
ipynb_file = open(filepath)
metadata = json.load(ipynb_file)['metadata']
# Fix metadata to pelican standards
for key, value in metadata.items():
del metadata[key]
key = key.lower()
metadata[key] = self.process_metadata(key, value)
metadata['ipython'] = True
# Convert ipython notebook to html
config = Config({'CSSHTMLHeaderTransformer': {'enabled': True,
'highlight_class': '.highlight-ipynb'}})
exporter = HTMLExporter(config=config, template_file='basic',
filters={'highlight2html': custom_highlighter})
content, info = exporter.from_filename(filepath)
if BeautifulSoup:
soup = BeautifulSoup(content)
for i in soup.findAll("div", {"class" : "input"}):
if i.findChildren()[1].find(text='#ignore') is not None:
i.extract()
else:
soup = content
# Process using Pelican HTMLReader
content = '<body>{0}</body>'.format(soup) # So Pelican HTMLReader works
parser = MyHTMLParser(self.settings, filename)
parser.feed(content)
parser.close()
body = parser.body
summary = parser.summary
metadata['summary'] = summary
# Remove some CSS styles, so it doesn't break the themes.
def filter_tags(style_text):
style_list = style_text.split('\n')
exclude = ['p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'a', 'ul', 'ol', 'li',
'.rendered_html', '@media', '.navbar', 'nav.navbar', '.navbar-text',
'code', 'pre', 'div.text_cell_render']
style_list = [i for i in style_list if len(list(filter(i.startswith, exclude))) == 0]
ans = '\n'.join(style_list)
return '<style type=\"text/css\">{0}</style>'.format(ans)
css = '\n'.join(filter_tags(css) for css in info['inlining']['css'])
css = CUSTOM_CSS + css
body = css + body + LATEX_CUSTOM_SCRIPT
return body, metadata
class MyHTMLParser(HTMLReader._HTMLParser):
"""
Custom Pelican `HTMLReader._HTMLParser` to create the summary of the content
based on settings['SUMMARY_MAX_LENGTH'].
Summary is stoped if founds any div containing ipython notebook code cells.
This is needed in order to generate valid HTML for the summary,
a simple string split will break the html generating errors on the theme.
The downside is that the summary length is not exactly the specified, it stops at
completed div/p/li/etc tags.
"""
def __init__(self, settings, filename):
HTMLReader._HTMLParser.__init__(self, settings, filename)
self.wordcount = 0
self.summary = None
self.stop_tags = [('div', ('class', 'input')), ('div', ('class', 'output'))]
if 'IPYNB_STOP_SUMMARY_TAGS' in self.settings.keys():
self.stop_tags = self.settings['IPYNB_STOP_SUMMARY_TAGS']
if 'IPYNB_EXTEND_STOP_SUMMARY_TAGS' in self.settings.keys():
self.stop_tags.extend(self.settings['IPYNB_EXTEND_STOP_SUMMARY_TAGS'])
def handle_starttag(self, tag, attrs):
HTMLReader._HTMLParser.handle_starttag(self, tag, attrs)
if self.wordcount < self.settings['SUMMARY_MAX_LENGTH']:
mask = [stoptag[0] == tag and (stoptag[1] is None or stoptag[1] in attrs) for stoptag in self.stop_tags]
if any(mask):
self.summary = self._data_buffer
self.wordcount = self.settings['SUMMARY_MAX_LENGTH']
def handle_endtag(self, tag):
HTMLReader._HTMLParser.handle_endtag(self, tag)
if self.wordcount < self.settings['SUMMARY_MAX_LENGTH']:
self.wordcount = len(strip_tags(self._data_buffer).split(' '))
if self.wordcount >= self.settings['SUMMARY_MAX_LENGTH']:
self.summary = self._data_buffer
def strip_tags(html):
s = HTMLTagStripper()
s.feed(html)
return s.get_data()
class HTMLTagStripper(HTMLParser):
"""
Custom HTML Parser to strip HTML tags
Useful for summary creation
"""
def __init__(self):
HTMLParser.__init__(self)
self.reset()
self.fed = []
def handle_data(self, html):
self.fed.append(html)
def get_data(self):
return ''.join(self.fed)
def custom_highlighter(source, language='ipython', metadata=None):
"""
Makes the syntax highlighting from pygments have prefix(`highlight-ipynb`)
So it doesn't break the theme pygments
It modifies both css prefixes and html tags
"""
if not language:
language = 'ipython'
formatter = HtmlFormatter(cssclass='highlight-ipynb')
output = _pygments_highlight(source, formatter, language, metadata)
output = output.replace('<pre>', '<pre class="ipynb">')
return output
``` |
{
"source": "11Domino/netmiko",
"score": 2
} |
#### File: netmiko/centec/centec_os.py
```python
from netmiko.cisco_base_connection import CiscoBaseConnection
import time
import re
class CentecOSBase(CiscoBaseConnection):
def session_preparation(self):
"""Prepare the session after the connection has been established."""
self._test_channel_read(pattern=r"[>#]")
self.set_base_prompt()
self.disable_paging()
self.set_terminal_width(command="terminal length 0")
# Clear the read buffer
time.sleep(0.3 * self.global_delay_factor)
self.clear_buffer()
def config_mode(self, config_command="configure terminal", pattern=""):
"""
Enter into configuration mode on remote device.
Centec IOS devices abbreviate the prompt at 20 chars in config mode
"""
if not pattern:
pattern = re.escape(self.base_prompt[:16])
return super().config_mode(config_command=config_command, pattern=pattern)
def save_config(self, cmd="write", confirm=False, confirm_response=""):
"""Save config: write"""
return super().save_config(
cmd=cmd, confirm=confirm, confirm_response=confirm_response
)
class CentecOSSSH(CentecOSBase):
pass
class CentecOSTelnet(CentecOSBase):
pass
``` |
{
"source": "11Firefox11/CleverTemplate",
"score": 3
} |
#### File: CleverTemplate/modules/ManageConfigs.py
```python
from .CtExceptions import *
import os, json, pathlib
class CleverConfig:
ConfigFile = "ct-config.json"
parameterobj = {
"text": {"type": str, "defval": "None"},
"number": {"type": float, "defval": 1},
"boolean": {"type": bool, "defval": True},
"list":{"type":list, "defval":[1,2,3]}
}
def __init__(self, path):
self.configpath = path
self.skippedfiles = []
self.skippedparameters = {}
def ConfigParameters(self, forcedefparval=True, checkparams=None):
data = self.validfilesdict
output = {}
for file in data:
if checkparams == None or file in checkparams and type(checkparams) == dict:
params = {}
skips = {}
for param in data[file]:
if checkparams == None:
if param != None and param not in params:
paramoptions = data[file][param]
try:
params[param] = CleverConfig.check_parameteroptions(paramoptions, forcedefparval)
except Exception as e:
skips[param] = str(e)
else:
if file in checkparams and param in checkparams[file]:
currentparam = data[file][param]
currtype = CleverConfig.parameterobj[currentparam[0]]['type']
try:
currentcustomparam = currtype(checkparams[file][param])
except:
currentcustomparam = checkparams[file][param]
try:
if currtype == type(currentcustomparam) and currentcustomparam != "":
if currtype == float:
if float(currentcustomparam).is_integer():
currentcustomparam = int(currentcustomparam)
params[param] = currentcustomparam
elif len(currentparam) > 1 and CleverConfig.parameterobj[currentparam[0]]['type'] == type(currentparam[1]):
params[param] = currentparam[1]
elif forcedefparval:
params[param] = CleverConfig.parameterobj[currentparam[1]]['defval']
else:
raise CustomParameterValue(currentcustomparam, currentparam[0], "custom parameter value", "does not match with the actual config file parameter type: ")
except Exception as e:
skips[param] = str(e)
if params != {}:
output[file] = params
if skips != {}:
self.skippedparameters[file] = skips
return output
@staticmethod
def check_parameteroptions(options, forcedefval=True):
if len(options) > 0 and type(options) == list:
if options[0] in CleverConfig.parameterobj:
paramtype = CleverConfig.parameterobj[options[0]]['type']
if len(options) > 1 and type(options[1]) != paramtype or len(options) == 1:
if forcedefval:
currdefval = CleverConfig.parameterobj[options[0]]['defval']
if len(options) > 1:
options[1] = currdefval
else:
options.append(currdefval)
else:
raise ParameterOptions(options, message=f"parameter does not have default value")
else:
raise ParameterOptions(options, message=f"'{options[0]}' is not a valid parameter type")
else:
raise ParameterOptions(options, message="parameter options can't be empty")
return options
@property
def validfilesdict(self):
data = self.configcontent
outputdata = {}
maindir = pathlib.Path(self.configpath).parent
for file in data:
fullfile = os.path.join(maindir, file)
if os.path.isfile(fullfile):
outputdata[fullfile] = data[file]
else:
self.skippedfiles.append(fullfile)
if outputdata != {}:
return outputdata
else:
raise ConfigFileSyntaxError(self.ConfigFile, message="can't find any existing files to use in the directory, bad syntax")
@property
def configcontent(self):
return json.load(open(self.configpath))
@property
def configpath(self):
canbepaths = [self.path, os.path.join(self.path, CleverConfig.ConfigFile)]
for canbepath in canbepaths:
if os.path.isfile(canbepath):
return os.path.abspath(canbepath)
raise PathMustBe(self.path, "path", mustbetype=f"directory that contains the {CleverConfig.ConfigFile} file, or must point directly on the file")
@configpath.setter
def configpath(self, path):
self.path = path
@classmethod
def set_configfile(cls, filename):
cls.ConfigFile = filename
``` |
{
"source": "11harveyj/qa-scrapers",
"score": 3
} |
#### File: yahoo-answers/discretizer/main.py
```python
import sys
import os
from discretizer import Discretizer
def main():
program_name = os.path.basename(sys.argv[0])
#Database name
db_files = {'yahoo': 'no_date_database.pdl'}
try:
db_names = sys.argv[1]
except IndexError:
raise Exception('No db name. Please, re-run as {0} dbname.pdl'.format(program_name))
if db_names == 'all':
discretizer = Discretizer(db_names, db_files)
else:
try:
discretizer = Discretizer(db_names, {db_names: db_files.get(db_names)})
except KeyError:
raise Exception('Invalid db name {0}. Please, check the name and re-run.'.format(db_names))
discretizer.load_db(check=False, fix=False, save_to_file=False)
corpus = discretizer.build_corpus()
stems = discretizer.build_stems(corpus)
stemmed_vocabulary = discretizer.build_vocabulary(stems)
distib_matrix = discretizer.build_distribution_matrix(stems)
# grouping
threads = discretizer.load_threads()
# discretization and sorting
threads = discretizer.compute_features(threads, stemmed_vocabulary, distib_matrix)
discretizer.save_csv(threads)
if __name__ == "__main__":
sys.exit(main())
"""db = Base('dotnet-v1.pydb', save_to_file=False)
db.open()
#recs = [r for r in db if r('type') == 'question' and r('answers') > 0]
rec = (db("type") == 'question') & (db("answers") > 0)
print len(rec)"""
```
#### File: yahooscraper/yahooscraper/pipelines.py
```python
from pydblite import Base
from scrapy.xlib.pydispatch import dispatcher
from scrapy import signals
import codecs
import datetime
class DBPipeline(object):
def __init__(self):
#Creating log file
filename = "session_log.txt"
self.log_target = codecs.open(filename, 'a+', encoding='utf-8')
self.log_target.truncate()
self.log_target.write("***New session started at: "+ str(datetime.datetime.strftime(datetime.datetime.now(), ' %Y-%m-%d %H:%M:%S ')) + " ***" +"\n")
#Creating database for items
self.db = Base('QuestionThreadExtracted.pdl')
self.db.create('uid', 'type', 'author', 'title', 'text', 'date_time',
'tags', 'views', 'answers', 'resolve', 'upvotes', 'url', mode="open")
#Some data for the log file
self.number_of_questions = 0
self.number_of_answers = 0
self.last_id=0
dispatcher.connect(self.spider_closed, signals.spider_closed)
def process_item(self, item, spider):
self.db.insert(uid=item['uid'],
type=item['type'],
author=item['author'],
title=item['title'],
text=item['text'],
date_time=item['date_time'],
tags=item['tags'],
views=item['views'],
answers=item['answers'],
resolve=item['resolve'],
upvotes=item['upvotes'],
url=item['url']
)
#Count questions and answers
if "question" in item['type']:
self.number_of_questions+=1
if self.last_id<item['uid']:
self.last_id=item['uid']
else:
self.number_of_answers+=1
self.db.commit()
return item
def spider_closed(self, spider):
self.log_target.write("Questions founded: "+ str(self.number_of_questions) + "\n")
self.log_target.write("Answers founded: "+ str(self.number_of_answers) + "\n")
self.log_target.write("Last UID: "+str(self.last_id) + "\n" + "\n")
self.log_target.write("***Session End at: "+ str(datetime.datetime.strftime(datetime.datetime.now(), ' %Y-%m-%d %H:%M:%S ')) + " ***" +"\n")
self.log_target.close()
```
#### File: yahooscraper/spiders/YahooScraper.py
```python
import datetime
import html2text
from scrapy.selector import HtmlXPathSelector
from selenium.common.exceptions import NoSuchElementException
import scrapy
from pydblite import Base
from ..items import YahooItem
import sys
import parsedatetime as pdt
# This class contains element related to question thread URL
# and question date insertion
class UrlDate():
def __init__(self, url, date):
self.url = url
self.date = date
class YahooScraper(scrapy.Spider):
# This is the start uid related to question thread
uid = 0
url_to_scrape = []
# Name of this spider
name = "yahoo"
allowed_domains = ["yahoo.com"]
start_urls = ["https://answers.yahoo.com/dir/index/discover"]
BASE_URL = 'https://answers.yahoo.com/question'
def __init__(self, database_name=None):
print ("Opening " + database_name)
db_r = Base(database_name)
# Choose the DB of the Question Thread URL
db_r.create('url', 'date', mode="open")
# Check if the DB is empty or new
if len(db_r)==0:
print ("ERROR: Database not found or empty")
sys.exit()
else:
print ("Database elements: " + str(len(db_r)))
for r in db_r:
self.url_to_scrape.append(UrlDate(r["url"], r["date"]))
# Making a SET of the Database in order to delete duplicate URLS
self.url_to_scrape = {x.url: x for x in self.url_to_scrape}.values()
print ("Database elements after set operation: " + str(len(db_r)))
def parse(self, response):
# Send scrapy scrape request for any question thread
print ("Start the scraping process from the URL database...")
for any_url in self.url_to_scrape:
yield scrapy.Request(any_url.url, callback=self.parse_page)
def parse_page(self, response):
# Time tools
c = pdt.Constants()
p = pdt.Calendar(c)
f = '%Y-%m-%d %H:%M:%S'
now = datetime.datetime.now()
# Start to scraping a single question
#Checking question category
try:
hxs = HtmlXPathSelector(response)
category = hxs.xpath(
'(//a[contains(@class,"Clr-b")])[2]').extract()
h = html2text.HTML2Text()
h.ignore_links = True
category_text = h.handle(category[0])
url_category = str(category_text).strip()
except IndexError:
print (str(self.uid) + "Warning: this Url is not more available...")
url_category = "Error"
# increment id
# copy id and use uid_copy in order to preserve from concurrent request
self.uid = self.uid + 1
uid_copy = self.uid
# Print current uid any 100 times
if self.uid % 100 == 0:
print (str(self.uid))
# Initialize scrapy item
item = YahooItem()
# Read in the date field associated to URL if info data are present
for istance in self.url_to_scrape:
if response.url == istance.url:
if istance.date == "not available":
item['date_time'] = "not available"
break
else:
data_format = p.parseDT(str(
str(istance.date).replace("\xc2\xb7", "").strip()))
item['date_time'] = data_format[0].strftime(f)
break
item['type'] = "question"
item['uid'] = uid_copy
item['url'] = response.url
item['tags'] = "N/A"
item['views'] = 0
item['upvotes'] = 0
text_to_gain = hxs.xpath('//h1').extract()
# Take title of the question
item['title'] = (
html2text.html2text(text_to_gain[0]).encode("utf8").strip())
# Take text from the question
full_text_answer = hxs.xpath(
'//span[contains(@class,"ya-q-full-text Ol-n")]').extract()
if full_text_answer:
item['text'] = html2text.html2text(full_text_answer[0]).encode(
'utf-8', 'ignore')
else:
text_to_gain = hxs.xpath(
'//span[contains(@class,"ya-q-text")]').extract()
if text_to_gain:
item['text'] = html2text.html2text(text_to_gain[0]).encode(
'utf-8', 'ignore')
# Take username of the questioner
text_to_gain = hxs.xpath(
'//div[contains(@id,"yq-question-detail-profile-img")]'+
'/a/img/@alt').extract()
if text_to_gain:
try:
h = html2text.HTML2Text()
h.ignore_links = True
author_string = h.handle(text_to_gain[0])
item['author'] = author_string.encode('utf-8',
'ignore').strip()
# Handle HTMLtoText except
except:
item['author'] = "anonymous"
else:
item['author'] = "anonymous"
text_to_gain = hxs.xpath(
'(//div[contains(@class,"Mend-10 Fz-13 Fw-n D-ib")])'+
'[2]/span[2]').extract()
# Read number of answers
if text_to_gain:
if " answers" in (
str(html2text.html2text(text_to_gain[0])).strip()):
item['answers'] = int(
str(html2text.html2text(text_to_gain[0])).replace(
" answers", "").strip())
else:
if " answer" in (
str(html2text.html2text(text_to_gain[0])).strip()):
item['answers'] = int(
str(html2text.html2text(text_to_gain[0])).replace(
" answer", "").strip())
else:
item['answers'] = 0
# Check if question is closed (resolve with a best answer)
text_to_gain = hxs.xpath(
'//span[contains(@class,"ya-ba-title Fw-b")]/text()').extract()
if text_to_gain:
item['resolve'] = "True"
else:
item['resolve'] = "False"
# yield item for the question istance
yield item
# Taking the best answer if present
if hxs.xpath('//div[contains(@id,"ya-best-answer")]'):
ans_uid = 1
item = YahooItem()
ans_data = hxs.xpath(
'(//div[contains(@class,"Pt-15")]/'+
'span[contains(@class, "Clr-88")])[1]').extract()
data_string = html2text.html2text(ans_data[0]).strip()
data_format = p.parseDT(str(
data_string.encode("utf8").replace("\xc2\xb7",
"").strip()))
item['date_time'] = data_format[0].strftime(f)
item['uid'] = str(str(uid_copy) + ("." + str(ans_uid)))
item['type'] = "answer"
item['resolve'] = "solution"
item['tags'] = "N/A"
item['title'] = ""
item['answers'] = 0
item['views'] = 0
best_text = hxs.xpath(
'(//span[contains(@class,"ya-q-full-text")])[1]').extract()
item['text'] = html2text.html2text(best_text[0]).encode(
'utf-8', 'ignore')
text_to_gain = hxs.xpath(
'(//a[contains(@class,"uname Clr-b")])[1]').extract()
if text_to_gain:
h = html2text.HTML2Text()
h.ignore_links = True
author_string = h.handle(text_to_gain[0])
item['author'] = str(
author_string.encode('utf-8', 'ignore').strip())
else:
item['author'] = "anonymous"
upvote_text = hxs.xpath(
'(//div[contains(@class,"D-ib Mstart-23 count")])[1]/text()').extract()
item['upvotes'] = int(
str(html2text.html2text(upvote_text[0])).strip())
item['url'] = response.url
ans_uid = ans_uid + 1
yield item
else:
ans_uid = 1
# Taking all the other answers
all_answer = hxs.xpath('//ul[contains(@id,"ya-qn-answers")]/li')
for single_answer in all_answer:
item = YahooItem()
# In this case data is always present
ans_data = single_answer.xpath(
'.//div[contains(@class,"Pt-15")]/span[contains(@class, "Clr-88")]').extract()
data_string = html2text.html2text(ans_data[0])
data_format = p.parseDT(str(
data_string.encode("utf8").replace("\xc2\xb7",
"").strip()))
item['date_time'] = data_format[0].strftime(f)
item['uid'] = str(str(uid_copy) + ("." + str(ans_uid)))
item['tags'] = "N/A"
item['title'] = ""
item['answers'] = 0
item['views'] = 0
item['type'] = "answer"
item['resolve'] = ""
text_to_gain = single_answer.xpath(
'.//a[contains(@class,"uname Clr-b")]').extract()
if text_to_gain:
h = html2text.HTML2Text()
h.ignore_links = True
author_string = h.handle(text_to_gain[0])
item['author'] = str(
author_string.encode('utf-8', 'ignore'))
else:
item['author'] = "anonymous"
# Take url of the question becouse answer don't have URL ref
item['url'] = response.url
# Check if is present long text version of the answer
text_to_gain = single_answer.xpath(
'.//span[contains(@class,"ya-q-full-text")][@itemprop="text"]').extract()
if text_to_gain:
item['text'] = html2text.html2text(text_to_gain[0]).encode(
'utf-8', 'ignore')
else:
item['text'] = ""
text_to_gain = single_answer.xpath(
'.//div[contains(@class,"D-ib Mend-10 Clr-93")]/div[1]/div[1]').extract()
if text_to_gain:
item['upvotes'] = int(
str(html2text.html2text(text_to_gain[0])).strip())
else:
item['upvotes'] = 0
ans_uid = ans_uid + 1
yield item
# Checking if there are more then 10 answers
# in this case there are other answers in other page
try:
if (hxs.xpath(
'//div[contains(@id, "ya-qn-pagination")]'+
'/a[contains(@class,"Clr-bl")][last()]/@href')):
url_of_the_next_page = hxs.xpath(
'//div[contains(@id, "ya-qn-pagination")]'+
'/a[contains(@class,"Clr-bl")][last()]/@href').extract()
next_page_composed = "https://answers.yahoo.com" + \
url_of_the_next_page[0]
# Go to the next page and take more urls
# passing uid as parameter
request = scrapy.Request(next_page_composed,
meta={'ans_id': uid_copy},
callback=self.parse_other_answer_page)
request.meta['quest_id'] = uid_copy
request.meta['ult_ans_id'] = ans_uid
yield request
except NoSuchElementException:
pass
# This method is used when question have more then 10 answer and usesed page number
# works like the simple parse of a question because page and xpath are still the same
def parse_other_answer_page(self, response):
c = pdt.Constants()
p = pdt.Calendar(c)
f = '%Y-%m-%d %H:%M:%S'
hxs = HtmlXPathSelector(response)
all_answer = hxs.xpath('//ul[contains(@id,"ya-qn-answers")]/li')
current_ans_id = response.meta['ult_ans_id']
for single_answer in all_answer:
item = YahooItem()
ans_data = single_answer.xpath(
'.//div[contains(@class,"Pt-15")]/span[contains(@class, "Clr-88")]').extract()
data_string = html2text.html2text(ans_data[0])
data_format = p.parseDT(str(
data_string.encode("utf8").replace("\xc2\xb7", "").strip()))
item['date_time'] = data_format[0].strftime(f)
item['uid'] = str(
str(response.meta['quest_id']) + "." + str(current_ans_id))
item['type'] = "answer"
item['tags'] = "N/A"
item['title'] = ""
item['resolve'] = ""
item['answers'] = 0
item['views'] = 0
text_to_gain = single_answer.xpath(
'.//a[contains(@class,"uname Clr-b")]').extract()
if text_to_gain:
h = html2text.HTML2Text()
h.ignore_links = True
author_string = h.handle(text_to_gain[0])
item['author'] = str(
author_string.encode('utf-8', 'ignore').strip())
else:
item['author'] = "anonymous"
item['url'] = response.url
text_to_gain = single_answer.xpath(
'.//span[contains(@class,"ya-q-full-text")][@itemprop="text"]').extract()
if text_to_gain:
item['text'] = html2text.html2text(text_to_gain[0]).encode(
'utf-8', 'ignore')
else:
item['text'] = ""
text_to_gain = single_answer.xpath(
'.//div[contains(@class,"D-ib Mend-10 Clr-93")]/div[1]/div[1]').extract()
if text_to_gain:
item['upvotes'] = int(html2text.html2text(text_to_gain[0]))
else:
item['upvotes'] = 0
current_ans_id = current_ans_id + 1
yield item
try:
if (hxs.xpath(
'//div[contains(@id, "ya-qn-pagination")]'+
'/a[contains(@class,"Clr-bl")][last()]/@href')):
url_of_the_next_page = hxs.xpath(
'//div[contains(@id, "ya-qn-pagination")]'+
'/a[contains(@class,"Clr-bl")][last()]/@href').extract()
next_page_composed = "https://answers.yahoo.com" + \
url_of_the_next_page[0]
request = scrapy.Request(next_page_composed,
callback=self.parse_other_answer_page)
request.meta['quest_id'] = response.meta['quest_id']
request.meta['ult_ans_id'] = current_ans_id
yield request
except NoSuchElementException:
pass
``` |
{
"source": "11hifish/robust_css",
"score": 2
} |
#### File: code_v2/baselines/simple_kcss2.py
```python
import numpy as np
from tt.maxvol import maxvol
from code_v2.baselines.rank_k_svd import rank_k_svd
def simple_kcss2(A, rank):
"""
k columns, s.t. <= (k sqrt(log k)) |A - A_k|_F
:param A: size d x n
:param rank: rank k
:return: k columns of A
"""
# stage 1: initial setup
_, n = A.shape
_, _, vh = np.linalg.svd(A)
V_k = vh[:rank].T # size n x k
part1 = np.linalg.norm(V_k, axis=1) ** 2 / (2 * rank) # size n
AVV_T = A @ V_k @ V_k.T
part2_top = np.linalg.norm(A, axis=0) ** 2 - np.linalg.norm(AVV_T, axis=0) ** 2 # size n
part2_bottom = 2 * (np.linalg.norm(A, ord='fro') ** 2 - np.linalg.norm(AVV_T, ord='fro') ** 2)
part2 = part2_top / part2_bottom
sampling = part1 + part2 # size n
# stage 2: randomized phase
c = int(rank * np.log(rank))
if c > n:
sel_idx = np.arange(n)
A_sel = A
T = V_k.T
else:
probabilities = sampling / np.sum(sampling)
sel_idx = np.random.choice(n, size=c, p=probabilities, replace=False)
entries = 1 / np.sqrt(sampling[sel_idx])
SD = np.zeros((n, c))
SD[sel_idx, np.arange(c)] = entries
A_sel = A[:, sel_idx] # d x c
T = V_k.T @ SD # k x c
# stage 3: deterministic phase
fix_rank_sel_idx = maxvol(T.T, rank) # c x k => k x k
cols = A_sel[:, fix_rank_sel_idx]
final_sel_idx = sel_idx[fix_rank_sel_idx]
return cols, final_sel_idx
def compare_Fro_norm_squared(A, cols, rank):
A_k = rank_k_svd(A, rank)
l2_error_svd = np.linalg.norm(A - A_k, ord='fro') ** 2
q, r = np.linalg.qr(cols)
l2_error_css = np.linalg.norm(A - q @ q.T @ A, ord='fro') ** 2
print('l2 error svd: {}'.format(l2_error_svd))
print('l2 error css: {}'.format(l2_error_css))
print('ratio: {}'.format(l2_error_css / l2_error_svd))
def test_simple_kcss2():
# A = np.array([[1,2,3,4,8,10],
# [5,6,7,8,3,5],
# [9,0,0,2,6,0]])
# rank = 3
A = np.random.normal(0, 10, size=(50, 100))
rank = 10
cols, sel_idx = simple_kcss2(A, rank)
# print('cols')
# print(cols)
# print('final sel idx', sel_idx)
compare_Fro_norm_squared(A, cols, rank)
if __name__ == '__main__':
test_simple_kcss2()
```
#### File: code_v2/common/generate_synthetic.py
```python
import numpy as np
def generate_synthetic_matrix(rank, n):
"""
Returns a (k + n) x (k + n) matrix.
:param rank: target rank -- k
:param n: matrix size parameter
:return: a synthetic matrix
"""
M = np.zeros((rank + n, rank + n))
M[:rank, :rank] = np.eye(rank) * (n ** (3 / 2))
M[rank:, rank:] = np.ones((n, n))
return M
def test():
rank = 2
n = 3
M = generate_synthetic_matrix(rank, n)
print(M)
if __name__ == '__main__':
test()
```
#### File: code_v2/common/kCSS12.py
```python
import numpy as np
from code_v2.common.lewis_weights import generate_OSNAP_sparse_embedding, perform_l1_lewis_weight_sampling
def regular_CSS_l12(A, sketch_size, sparsity, rank, approx_factor=25,
only_indices=True, exact=False):
# 1. Sparse Embedding Matrix
emb = generate_OSNAP_sparse_embedding(sketch_size, A.shape[0], sparsity)
SA = np.matmul(emb, A)
# 2. Lewis weights sampling
S, sel_indices = perform_l1_lewis_weight_sampling(SA.T, rank,
approx_factor, exact=exact)
if only_indices:
return None, sel_indices
else:
AS_prime = np.matmul(A, S.T)
# 3. New sparse embedding matrix for regression
R = generate_OSNAP_sparse_embedding(sketch_size, A.shape[0], sparsity)
# 4. Regression - not actually used for distributed protocol
RA = np.matmul(R, A)
RAS_prime_inv = np.linalg.pinv(np.matmul(R, AS_prime))
return np.matmul(RAS_prime_inv, RA), sel_indices
```
#### File: code_v2/common/l1_regression.py
```python
import numpy as np
import cvxpy as cp
import ray
# Parameters
MAXITER_L1 = 3000 # maximum iterations of pwSGD
########################################################################
# Slow l1 regression - Baseline using MOSEK
"""
solve_l1_regression_MOSEK - MOSEK-based l1
regression solver on a single column.
"""
@ray.remote
def solve_l1_regression_MOSEK(A, b, c_idx=None):
# print('start solving l1 regression for column {}'.format(c_idx))
m, n = A.shape
b = b.ravel()
x = cp.Variable(n)
t = cp.Variable(m)
# objective
objective = cp.sum(t)
# contraints
constraints = [cp.matmul(A, x) - b <= t, -cp.matmul(A, x) + b <= t, t >= 0]
# problem
problem = cp.Problem(cp.Minimize(objective), constraints)
try:
problem.solve(solver=cp.MOSEK, verbose=False)
except:
print("MOSEK FAILED")
problem.solve(solver=cp.GLPK, verbose=False)
if problem.status in ["infeasible", "unbounded"]:
print('Problem status: {}'.format(problem.status))
return None
else:
# print('c_idx: {}, sol: {}'.format(c_idx, x.value))
return np.sum(np.abs(A @ x.value - b))
def compute_l1_error(U, A):
"""
Compute the error: min_v |UV - A|_1
:param U: Selected columns (left factor).
:param A: The original data matrix of size d x n.
:param verbose: debugging mode.
:return: l1 error.
"""
_, n = A.shape
total_error = []
for i in range(n):
col = A[:, i].ravel()
res = solve_l1_regression_MOSEK.remote(U, col)
if res is None:
res = 0
total_error.append(res)
results = ray.get(total_error)
results = np.sum(np.array(results))
return results
``` |
{
"source": "11joker/inception-v3-flower",
"score": 2
} |
#### File: 11joker/inception-v3-flower/bottleneck.py
```python
import glob
import os.path
import random
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import gfile
sess = tf.InteractiveSession()
a = []
BOTTLENECK_TENSOR_SIZE = 2048
BOTTLENECK_TENSOR_NAME = r"pool_3/_reshape:0"
JPEG_DATA_TENSOR_NAME = r"DecodeJpeg/contents:0"
MODEL_DIR = r"C:/Users/25493/Documents/GitHub/inception-v3-flower"
MODEL_FILE = r"tensorflow_inception_graph.pb"
CACHE_DIR = r"C:/Users/25493/Documents/GitHub/inception-v3-flower"
INPUT_DATA = r"C:/Users/25493/Documents/GitHub/inception-v3-flower/flower_photos"
VALIDATION_PERCENTAGE = 10
TEST_PERCENTAGE = 10
LEARNING_RATE = 0.01
STEPS = 4000
BATCH = 100
def create_image_lists(testing_percentage, validation_percentage):
result = {}
sub_dirs = [x[0] for x in os.walk(INPUT_DATA)]
is_root_dir = True
for sub_dir in sub_dirs:
if is_root_dir == False:
continue
extensions = ["jpg","jpeg","JPG","JPEG"]
file_list = []
dir_name = os.path.basename(sub_dir)
for extension in extensions:
file_glob = os.path.join(INPUT_DATA,dir_name,"*."+extension)
file_list.extend(glob.glob(file_glob))
if not file_list:continue
label_name = dir_name.lower()
training_images = []
testing_images = []
validation_images = []
for file_name in file_list:
base_name = os.path.basename(file_name)
chance = np.random.randint(100)
if chance < validation_percentage:
validation_images.append(base_name)
else:
training_images.append(base_name)
result[label_name] = {
"dir": dir_name,
"training": training_images,
"testing": testing_images,
"validation": validation_images,
}
return result
def get_image_path(image_lists, image_dir, label_name, index, category):
label_lists = image_lists[label_name]
category_list = label_lists[category]
mod_index = index % len(category_list)
base_name = category_list[mod_index]
sub_dir = label_lists["dir"]
full_path = os.path.join(image_dir,sub_dir,base_name)
return full_path
def get_bottleneck_path(image_lists, label_name, index, category):
return get_image_path(image_lists,CACHE_DIR,
label_name, index, category) + ".txt"
def run_bottleneck_on_image(sess, image_data, image_data_tensor,
bottleneck_tensor):
bottleneck_values = sess.run(bottleneck_tensor,
{image_data_tensor:image_data})
bottleneck_values = np.squeeze(bottleneck_values)
return bottleneck_values
def get_or_create_bottleneck(
sess,image_lists,label_name,index,
category, jpeg_data_tensor, bottleneck_tensor):
label_lists = image_lists[label_name]
sub_dir = label_lists["dir"]
sub_dir_path = os.path.join(CACHE_DIR, sub_dir)
if not os.path.exists(sub_dir_path):os.makedirs(sub_dir_path)
bottleneck_path = get_bottleneck_path(
image_lists, label_name, index, category)
if not os.path.exists(bottleneck_path):
image_path = get_image_path(
image_lists, INPUT_DATA, label_name, index, category)
image_data = gfile.FastGFile(image_path, "rb").read()
bottleneck_values = run_bottleneck_on_image(
sess, image_data, jpeg_data_tensor, bottleneck_tensor)
bottleneck_string = ",".join(str(x) for x in bottleneck_values)
with open(bottleneck_path,"w") as bottleneck_file:
bottleneck_file.write(bottleneck_string)
else:
with open(bottleneck_path, "r") as bottleneck_file:
bottleneck_string = bottleneck_file.read()
bottleneck_values = [float(x.strip()) for x in bottleneck_string.split(",")]
return bottleneck_values
def get_random_cached_bottlenecks(
sess, n_classes, image_lists, how_many, category,
jpeg_data_tensor, bottleneck_tensor):
bottlenecks = []
ground_truths = []
for _ in range(how_many):
label_index = random.randrange(n_classes)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(65536)
bottleneck = get_or_create_bottleneck(
sess, image_lists, label_name, image_index, category,
jpeg_data_tensor, bottleneck_tensor)
ground_truth = np.zeros(n_classes, dtype = np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
return bottlenecks, ground_truths
def get_test_bottlenecks(sess, image_lists, n_classes,
jpeg_data_tensor, bottleneck_tensor):
bottlenecks = []
ground_truths = []
label_name_list = list(image_lists.keys())
for label_index, label_name in enumerate(label_name_list):
category = "testing"
for index, unused_base_name in enumerate(
image_lists[label_name][category]):
bottleneck = get_or_create_bottleneck(
sess, image_lists, label_name, index, category,
jpeg_data_tensor, bottleneck_tensor)
ground_truth = np.zeros(n_classes, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
return bottlenecks,ground_truths
def main(_):
image_lists = create_image_lists(TEST_PERCENTAGE, VALIDATION_PERCENTAGE)
n_classes = len(image_lists.keys())
with gfile.FastGFile(os.path.join(MODEL_DIR, MODEL_FILE), "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
bottleneck_tensor, jpeg_data_tensor = tf.import_graph_def(
graph_def,
return_elements=[BOTTLENECK_TENSOR_NAME,JPEG_DATA_TENSOR_NAME])
bottleneck_input = tf.placeholder(
tf.float32,[None, BOTTLENECK_TENSOR_SIZE],
name="BottleneckInputPlaceholder")
ground_truth_input = tf.placeholder(
tf.float32,[None, n_classes], name="GroundTruthInput")
with tf.name_scope("final_training_ops"):
weights = tf.Variable(tf.truncated_normal(
[BOTTLENECK_TENSOR_SIZE, n_classes], stddev=0.001))
biases = tf.Variable(tf.zeros([n_classes]))
logits = tf.matmul(bottleneck_input, weights) + biases
final_tensor = tf.nn.softmax(logits)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
logits=logits,labels = ground_truth_input)
cross_entropy_mean = tf.reduce_mean(cross_entropy)
train_step = tf.train.GradientDescentOptimizer(LEARNING_RATE)\
.minimize(cross_entropy_mean)
with tf.name_scope("evaluation"):
correct_prediction = tf.equal(tf.argmax(final_tensor, 1),
tf.argmax(ground_truth_input, 1))
evaluation_step = tf.reduce_mean(
tf.cast(correct_prediction,tf.float32))
with tf.Session() as sess:
init = tf.initialize_all_variables()
sess.run(init)
for i in range(STEPS):
train_bottlenecks, train_ground_truth = \
get_random_cached_bottlenecks(
sess, n_classes, image_lists, BATCH,
"training", jpeg_data_tensor, bottleneck_tensor)
sess.run(train_step,
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
if i % 100 == 0 or i+1 == STEPS:
validation_bottlenecks, validation_ground_truth = \
get_random_cached_bottlenecks(
sess, n_classes, image_lists, BATCH,
"validation", jpeg_data_tensor, bottleneck_tensor)
validation_accuracy = sess.run(
evaluation_step, feed_dict={
bottleneck_input:validation_bottlenecks,
ground_truth_input: validation_ground_truth})
print("step %d: Validation accuracy on random sampled "
"%d example = %.1f%%" %
(i, BATCH, validation_accuracy * 100))
test_bottlenecks, test_ground_truth = get_test_bottlenecks(
sess, image_lists, n_classes, jpeg_data_tensor,
bottleneck_tensor)
test_accuracy = sess.run(evaluation_step, feed_dict={
bottleneck_input: test_bottlenecks,
ground_truth_input: test_ground_truth})
print("Final test accuracy = %.1f%%" % (test_accuracy * 100))
if __name__ == "__main__":
tf.app.run()
``` |
{
"source": "11k/pylivetrader",
"score": 2
} |
#### File: pylivetrader/backend/base.py
```python
import abc
from abc import abstractmethod
import pandas as pd
class BaseBackend(abc.ABC):
@abstractmethod
def get_equities(self):
pass
@property
@abstractmethod
def positions(self):
pass
@property
@abstractmethod
def portfolio(self):
pass
@property
@abstractmethod
def account(self):
pass
@abstractmethod
def order(self, asset, amount, style):
pass
@abstractmethod
def batch_order(self, args):
pass
@property
@abstractmethod
def orders(self):
pass
@abstractmethod
def all_orders(self):
pass
@abstractmethod
def get_last_traded_dt(self, asset):
pass
@abstractmethod
def get_spot_value(
self,
assets,
field,
dt,
date_frequency,
quantopian_compatible=True):
pass
@abstractmethod
def get_bars(self, assets, data_frequency, bar_count=500):
pass
@property
def time_skew(self):
'''
Returns:
skew (pd.Timedelta):
Time skew between local clock and broker server clock
'''
return pd.Timedelta('0s')
``` |
{
"source": "11l-lang/_11l",
"score": 4
} |
#### File: 11l-lang/_11l/_11l.py
```python
Char = str
Byte = int
Int64 = int
UInt64 = int
UInt32 = int
BigInt = int
def move(obj):
return obj
def ref(obj):
return obj
from copy import copy, deepcopy
from typing import List, Tuple, NamedTuple, Dict, DefaultDict, Callable, Set, Optional, IO, TextIO, BinaryIO
from enum import IntEnum
import collections # for `defaultdict` and `deque`
# [https://stackoverflow.com/questions/4223349/python-implementation-for-next-permutation-in-stl <- google:‘next_permutation python’]
def next_permutation(a):
"""Generate the lexicographically next permutation inplace.
https://en.wikipedia.org/wiki/Permutation#Generation_in_lexicographic_order
Return false if there is no next permutation.
"""
# Find the largest index i such that a[i] < a[i + 1]. If no such
# index exists, the permutation is the last permutation
for i in reversed(range(len(a) - 1)):
if a[i] < a[i + 1]:
break # found
else: # no break: not found
a.reverse()
return False # no next permutation
# Find the largest index j greater than i such that a[i] < a[j]
j = next(j for j in reversed(range(i + 1, len(a))) if a[i] < a[j])
# Swap the value of a[i] with that of a[j]
a[i], a[j] = a[j], a[i]
# Reverse sequence from a[i + 1] up to and including the final element a[n]
a[i + 1:] = reversed(a[i + 1:])
return True
# [https://stackoverflow.com/a/41278973/2692494 <- google:‘is_sorted python’]
def is_sorted(a):
for i in range(1, len(a)):
if a[i-1] > a[i]:
return False
return True
# [https://www.rosettacode.org/wiki/Suffixation_of_decimal_numbers#Python]
def format_float(x, precision):
return f'{x:.{precision}f}'
def format_float_exp(x, precision, width = 0):
return f'{x:{width}.{precision}e}'
# For [https://www.rosettacode.org/wiki/Peaceful_chess_queen_armies#D]
# class IVec2
#def set_str_char(s, char_index, char)
# For [https://www.rosettacode.org/wiki/Range_modifications#Python]
# def set_tuple_element(t, element_index, element_value):
# l = list(t)
# l[element_index] = element_value
# return tuple(l)
# def add_to_tuple_element(t, element_index, addendum):
# l = list(t)
# l[element_index] += addendum
# return tuple(l)
class MutTupleClass: # [-FIX mypy error: Invalid type "1.MutTuple"-]
def __getitem__(self, params):
return list
def __call__(self, *elems):
return list(elems)
MutTuple = MutTupleClass()
def hexu(n):
return hex(n)[2:].upper()
def wrap(x, min_val, max_val):
return (x - min_val) % (max_val - min_val) + min_val
# [https://www.rosettacode.org/wiki/First_perfect_square_in_base_n_with_n_unique_digits#D]
ALPHABET = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def int_to_str_with_radix(num, base):
assert(base >= 2 and base <= 36)
cnum = abs(num)
result = ''
while True:
result += ALPHABET[cnum % base]
cnum //= base
if cnum == 0: break
if num < 0:
result += '-'
return result[::-1]
# [https://www.rosettacode.org/wiki/MD5/Implementation#Python]
def rotl32(x, amount):
x &= 0xFFFFFFFF
return ((x<<amount) | (x>>(32-amount))) & 0xFFFFFFFF
def rotr32(x, amount):
x &= 0xFFFFFFFF
return ((x>>amount) | (x<<(32-amount))) & 0xFFFFFFFF
def Bytes(s): # if you need `b"\xAE"`, please note that `Bytes("\xAE")` will not work, use `[Byte(0xAE)]` or `bytes(b"\xAE")` instead
return bytes(s, 'ascii')
# [https://en.wikipedia.org/wiki/Product_(mathematics)#Product_of_a_sequence]
def product_of_a_seq(seq):
r = 1
for v in seq:
r *= v
return r
``` |
{
"source": "11l-lang/_11l_to_cpp",
"score": 3
} |
#### File: python_to_cpp/Shed Skin Examples/10.ac_encode.py
```python
import os
from typing import List
BETA0=1;BETA1=1 ## default prior distribution
M = 30 ; ONE = (1<<M) ; HALF = (1<<(M-1))
QUARTER = (1<<(M-2)) ; THREEQU = HALF+QUARTER
def clear (c,charstack : List[int]):
## print out character c, and other queued characters
a = str(c) + str(1-c)*charstack[0]
charstack[0]=0
return a
def encode (string, c0=BETA0, c1=BETA1, adaptive=1,verbose=0):
b=ONE; a=0; tot0=0;tot1=0; assert c0>0; assert c1>0
p0 : float
if adaptive==0:
p0 = c0*1.0/(c0+c1)
ans=""
charstack=[0] ## how many undecided characters remain to print
for c in string:
w=b-a
if adaptive :
cT = c0+c1
p0 = c0*1.0/cT
boundary = a + int(p0*w)
if (boundary == a):
boundary += 1; print("warningA") # these warnings mean that some of the probabilities
if (boundary == b):
boundary -= 1; print("warningB") # requested by the probabilistic model
## are so small (compared to our integers) that we had to round them up to bigger values
if (c=='1') :
a = boundary
tot1 += 1
if adaptive: c1 += 1
elif (c=='0'):
b = boundary
tot0 +=1
if adaptive: c0 += 1
#pass ## ignore other characters
while ( (a>=HALF) or (b<=HALF) ) : ## output bits
if (a>=HALF) :
ans = ans + clear(1,charstack)
a = a-HALF
b = b-HALF
else :
ans = ans + clear(0,charstack)
a *= 2 ; b *= 2
assert a<=HALF; assert b>=HALF; assert a>=0; assert b<=ONE
## if the gap a-b is getting small, rescale it
while ( (a>QUARTER) and (b<THREEQU) ):
charstack[0] += 1
a = 2*a-HALF
b = 2*b-HALF
assert a<=HALF; assert b>=HALF; assert a>=0; assert b<=ONE
# terminate
if ( (HALF-a) > (b-HALF) ) :
w = (HALF-a)
ans = ans + clear(0,charstack)
while ( w < HALF ) :
ans = ans + clear(1,charstack)
w *=2
else :
w = (b-HALF)
ans = ans + clear(1,charstack)
while ( w < HALF ) :
ans = ans + clear(0,charstack)
w *=2
return ans
def decode (string, n=10000, c0=BETA0, c1=BETA1, adaptive=1,verbose=0):
## must supply n, the number of source characters remaining.
b=ONE ; a=0 ; tot0=0;tot1=0 ; assert c0>0 ; assert c1>0
model_needs_updating = 1
p0 : float
if adaptive==0:
p0 = c0*1.0/(c0+c1)
ans=""
u=0.0 ; v=float(ONE)
boundary : int
for c in string :
if n<=0 :
break ## break out of the string-reading loop
assert n>0
## // (u,v) is the current "encoded alphabet" binary interval, and halfway is its midpoint.
## // (a,b) is the current "source alphabet" interval, and boundary is the "midpoint"
assert u>=0 ; assert v<=ONE
halfway = u + (v-u)/2
if( c == '1' ) :
u = halfway
elif ( c=='0' ):
v = halfway
## // Read bits until we can decide what the source symbol was.
## // Then emulate the encoder's computations, and tie (u,v) to tag along for the ride.
while (1): ## condition at end
firsttime = 0
if(model_needs_updating):
w = b-a
if adaptive :
cT = c0 + c1 ; p0 = c0 *1.0/cT
boundary = a + int(p0*w)
if (boundary == a):
boundary += 1; print("warningA")
if (boundary == b):
boundary -= 1; print("warningB")
model_needs_updating = 0
if ( boundary <= u ) :
ans = ans + "1"; tot1 +=1
if adaptive: c1 += 1
a = boundary ; model_needs_updating = 1 ; n-=1
elif ( boundary >= v ) :
ans = ans + "0"; tot0 +=1
if adaptive: c0 += 1
b = boundary ; model_needs_updating = 1 ; n-=1
## // every time we discover a source bit, implement exactly the
## // computations that were done by the encoder (below).
## // not enough bits have yet been read to know the decision.
## // emulate outputting of bits by the encoder, and tie (u,v) to tag along for the ride.
while ( (a>=HALF) or (b<=HALF) ) :
if (a>=HALF) :
a = a-HALF ; b = b-HALF ; u = u-HALF ; v = v-HALF
a *= 2 ; b *= 2 ; u *= 2 ; v *= 2
model_needs_updating = 1
assert a<=HALF; assert b>=HALF; assert a>=0; assert b<=ONE
## if the gap a-b is getting small, rescale it
while ( (a>QUARTER) and (b<THREEQU) ):
a = 2*a-HALF; b = 2*b-HALF ; u = 2*u-HALF ; v = 2*v-HALF
if not (n>0 and model_needs_updating) : ## this is the "while" for this "do" loop
break
return ans
def hardertest():
print("Reading the BentCoinFile")
inputfile = open( "testdata/BentCoinFile" , "r" )
outputfile = open( "tmp.zip" , "w" )
print("Compressing to tmp.zip")
s = inputfile.read()
n = len(s)
zip = encode(s, 10, 1)
outputfile.write(zip)
outputfile.close(); inputfile.close()
print("DONE compressing")
inputfile = open( "tmp.zip" , "r" )
outputfile = open( "tmp2" , "w" )
print("Uncompressing to tmp2")
unc = decode(list(inputfile.read()), n, 10, 1)
outputfile.write(unc)
outputfile.close(); inputfile.close()
print("DONE uncompressing")
print("Checking for differences...")
print(s == unc)
#os.system( "diff testdata/BentCoinFile tmp2" )
#os.system( "wc tmp.zip testdata/BentCoinFile tmp2" )
def test():
sl=["1010", "111", "00001000000000000000",\
"1", "10" , "01" , "0" ,"0000000", \
"000000000000000100000000000000000000000000000000100000000000000000011000000" ]
for s in sl:
print("encoding " + s)
n=len(s)
e = encode(s,10,1)
print("decoding " + e)
ds = decode(e,n,10,1)
print(ds)
if (ds != s) :
print(s)
print("ERR@")
else:
print("ok ---------- ")
if __name__ == '__main__':
test()
hardertest()
```
#### File: python_to_cpp/Shed Skin Examples/12.amaze.py
```python
import sys
import random
from typing import List
class MazeReaderException(Exception):
def __init__(self, message):
pass
STDIN = 0
FILE_ = 1
SOCKET = 2
PATH = -1
START = -2
EXIT = -3
null_point = (1 << 30, 1 << 30)
class MazeReader:
def __init__(self):
self.maze_rows : List[List[int]] = []
def readStdin(self):
print('Enter a maze')
print('You can enter a maze row by row')
print()
data = input('Enter the dimension of the maze as Width X Height: ')
(w1, h1) = data.split() # XXX SS
(w, h) = (int(w1), int(h1))
for x in range(h):
row = ''
while row == '':
row = input('Enter row number %d: ' % (x+1))
rowsplit = [int(y) for y in row.split()] # XXX SS
if len(rowsplit) != w:
raise MazeReaderException('invalid size of maze row')
self.maze_rows.append(rowsplit)
def readFile(self):
fname = 'testdata/maze.txt' #raw_input('Enter maze filename: ')
try:
f = open(fname)
lines = f.readlines()
f.close()
def workaround_for_MSVC_2017(line : str): # [https://developercommunity.visualstudio.com/content/problem/565417/bug-with-operator-in-c.html]
return line.strip() != ''
lines = [ line for line in lines if workaround_for_MSVC_2017(line) ]
w = len(lines[0].split())
for line in lines:
row = [int(y) for y in line.split()]
if len(row) != w:
raise MazeReaderException('Invalid maze file - error in maze dimensions')
else:
self.maze_rows.append(row)
except:
raise MazeReaderException('read error')
def getData(self):
return self.maze_rows
def readMaze(self, source=STDIN):
if source==STDIN:
self.readStdin()
elif source == FILE_:
self.readFile()
return self.getData()
class MazeError(Exception):
def __init__(self, message):
pass
class Maze:
_rows : List[List[int]]
_height : int
_width : int
def __init__(self, rows):
self._rows = rows
self.__validate()
self.__normalize()
def __str__(self):
s = '\n'
for row in self._rows:
for item in row:
sitem : str
if item == PATH: sitem = '*'
elif item == START: sitem = 'S'
elif item == EXIT: sitem = 'E'
else: sitem = str(item)
s = s + ' ' + sitem + ' '
s = s + '\n\n'
return s
def __validate(self):
width = len(self._rows[0])
widths = [len(row) for row in self._rows]
if widths.count(width) != len(widths):
raise MazeError('Invalid maze!')
self._height = len(self._rows)
self._width = width
def __normalize(self):
for x in range(len(self._rows)):
row = self._rows[x]
row = [min(int(y), 1) for y in row] #map(lambda x: min(int(x), 1), row) # SS
self._rows[x] = row
def validatePoint(self, pt): # const
(x,y) = pt
w = self._width
h = self._height
# Don't support Pythonic negative indices
if x > w - 1 or x<0:
raise MazeError('x co-ordinate out of range!')
if y > h - 1 or y<0:
raise MazeError('y co-ordinate out of range!')
def getItem(self, x, y):
self.validatePoint((x,y))
w = self._width
h = self._height
row = self._rows[h-y-1]
return row[x]
def setItem(self, x, y, value):
h = self._height
self.validatePoint((x,y))
row = self._rows[h-y-1]
row[x] = value
def getNeighBours(self, pt):
self.validatePoint(pt)
(x,y) = pt
h = self._height
w = self._width
poss_nbors = ((x-1,y),(x-1,y+1),(x,y+1),(x+1,y+1),(x+1,y),(x+1,y-1),(x,y-1),(x-1,y-1))
nbors : List[Tuple[int, int]] = []
for xx,yy in poss_nbors:
if (xx>=0 and xx<=w-1) and (yy>=0 and yy<=h-1):
nbors.append((xx,yy))
return nbors
def getExitPoints(self, pt):
exits : List[Tuple[int, int]] = []
for xx,yy in self.getNeighBours(pt):
if self.getItem(xx,yy)==0: # SS
exits.append((xx,yy))
return exits
def calcDistance(self, pt1, pt2):
self.validatePoint(pt1)
self.validatePoint(pt2)
(x1,y1) = pt1
(x2,y2) = pt2
return pow( (pow((x1-x2), 2) + pow((y1-y2),2)), 0.5)
class MazeFactory:
def makeMaze(self, source=STDIN):
reader = MazeReader()
return Maze(reader.readMaze(source))
class MazeSolver:
maze : Maze
def __init__(self, maze):
self.maze = maze
self._start = (0,0)
self._end = (0,0)
self._current = (0,0)
self._steps = 0
self._path : List[Tuple[int, int]] = []
self._tryalternate = False
self._trynextbest = False
self._disputed = (0,0)
self._loops = 0
self._retrace = False
self._numretraces = 0
def setStartPoint(self, pt):
self.maze.validatePoint(pt)
self._start = pt
def setEndPoint(self, pt):
self.maze.validatePoint(pt)
self._end = pt
def boundaryCheck(self):
exits1 = self.maze.getExitPoints(self._start)
exits2 = self.maze.getExitPoints(self._end)
if len(exits1)==0 or len(exits2)==0:
return False
return True
def setCurrentPoint(self, point):
self._current = point
self._path.append(point)
def isSolved(self):
return (self._current == self._end)
def retracePath(self):
print('Retracing...')
self._retrace = True
path2 = self._path[:]
path2.reverse()
idx = path2.index(self._start)
self._path.extend(self._path[len(self._path)-2:idx:-1]) # [-TODO: `self._path[-2:idx:-1]`-]
self._numretraces += 1
def endlessLoop(self):
if self._loops>100:
print('Seems to be hitting an endless loop.')
return True
elif self._numretraces>8:
print('Seem to be retracing loop.')
return True
return False
def getNextPoint(self):
points = self.maze.getExitPoints(self._current)
point = self.getBestPoint(points)
while self.checkClosedLoop(point):
if self.endlessLoop():
print(self._loops)
point = null_point
break
point2 = point
if point==self._start and len(self._path)>2:
self._tryalternate = True
break
else:
point = self.getNextClosestPointNotInPath(points, point2)
if point == null_point:
self.retracePath()
self._tryalternate = True
point = self._start
break
return point
def checkClosedLoop(self, point):
l = list(range(0, len(self._path)-1, 2))
l.reverse()
for x in l:
if self._path[x] == point:
self._loops += 1
return True
return False
def getBestPoint(self, points):
point = self.getClosestPoint(points)
point2 = point
altpoint = point
if point2 in self._path:
point = self.getNextClosestPointNotInPath(points, point2)
if point == null_point:
point = point2
if self._tryalternate:
point = self.getAlternatePoint(points, altpoint)
print('Trying alternate... ', end = '')
print(self._current, end = ' ')
print(point)
self._trynextbest = False
self._tryalternate = False
self._retrace = False
return point
def sortPoints(self, points):
distances = [self.maze.calcDistance(point, self._end) for point in points]
distances2 = distances[:]
distances.sort()
points2 = [(0, 0)]*len(points) # SS
count = 0
for dist in distances:
idx = distances2.index(dist)
point = points[idx]
while point in points2:
idx = distances2.index(dist, idx+1)
point = points[idx]
points2[count] = point
count += 1
return points2
def getClosestPoint(self, points):
points2 = self.sortPoints(points)
closest = points2[0]
return closest
def getAlternatePoint(self, points, point):
points2 = points[:]
print(points2, end = ' ')
print(point)
points2.remove(point)
if len(points2):
return random.choice(points2)
return null_point
def getNextClosestPoint(self, points, point):
points2 = self.sortPoints(points)
idx = points2.index(point)
try:
return points2[idx+1]
except:
return null_point
def getNextClosestPointNotInPath(self, points, point):
point2 = self.getNextClosestPoint(points, point)
while point2 in self._path:
point2 = self.getNextClosestPoint(points, point2)
return point2
def printResult(self):
""" Print the maze showing the path """
for x,y in self._path:
self.maze.setItem(x,y,PATH)
self.maze.setItem(self._start[0], self._start[1], START)
self.maze.setItem(self._end[0], self._end[1], EXIT)
#print 'Maze with solution path'
#print self.maze
def solve(self):
#print 'Starting point is', self._start
#print 'Ending point is', self._end
# First check if both start and end are same
if self._start == self._end:
print('Start/end points are the same. Trivial maze.')
print([self._start, self._end])
return
# Check boundary conditions
if not self.boundaryCheck():
print('Either start/end point are unreachable. Maze cannot be solved.')
return
# Proper maze
#print 'Maze is a proper maze.'
# Initialize solver
self.setCurrentPoint(self._start)
unsolvable = False
while not self.isSolved():
self._steps += 1
pt = self.getNextPoint()
if pt != null_point:
self.setCurrentPoint(pt)
else:
print('Dead-lock - maze unsolvable')
unsolvable = True
break
if not unsolvable:
pass #print 'Solution path is',self._path
else:
print('Path till deadlock is ', end = '')
print(self._path)
self.printResult()
class MazeGame:
def __init__(self):
self._start = (0,0)
self._end = (0,0)
def createMaze(self) -> Maze:
raise NotImplementedError()
def getStartEndPoints(self, maze : Maze) -> None:
raise NotImplementedError()
def runGame(self):
maze = self.createMaze()
# if not maze:
# return None
#print maze
self.getStartEndPoints(maze)
#open('maze.txt','w').write(str(maze))
solver = MazeSolver(maze)
#open ('maze_pts.txt','w').write(str(self._start) + ' ' + str(self._end) + '\n')
solver.setStartPoint(self._start)
solver.setEndPoint(self._end)
solver.solve()
class FilebasedMazeGame(MazeGame):
def createMaze(self):
f = MazeFactory()
m = f.makeMaze(FILE_)
print(m)
return m
def getStartEndPoints(self, maze : Maze):
while True:
try:
#pt1 = raw_input('Enter starting point: ')
pt1 = '0 4'
(x,y) = pt1.split()
self._start = (int(x), int(y))
maze.validatePoint(self._start)
break
except:
pass
while True:
try:
pt2 = '5 4' #pt2 = raw_input('Enter ending point: ')
(x,y) = pt2.split()
self._end = (int(x), int(y))
maze.validatePoint(self._end)
break
except:
pass
if __name__ == '__main__':
game = FilebasedMazeGame()
for x in range(1):
game.runGame()
```
#### File: python_to_cpp/Shed Skin Examples/18.genetic.py
```python
import random
import math
from copy import copy
from typing import List
infiniteNeg = -1e302
class Individual:
ngenes : int
genome : List[bool]
def __init__(self, ngenes = 0):
self.ngenes = ngenes
self.genome = [random.random()<0.5 for i in range(ngenes)]
self.fitness = infiniteNeg
def bin2dec(self, inf=0, sup=0):
if sup == 0: sup = self.ngenes - 1
result = 0
for i in range(inf, sup+1):
if self.genome[i]:
result += 1 << (i-inf)
return result
def computeFitness(self):
self.fitness = self.fitnessFun(self.computeValuesGenome())
def __repr__(self):
return "".join([str(int(gene)) for gene in self.genome])
def fitnessFun(self, x):
return x + abs(math.sin(32*x))
def computeValuesGenome(self, xMin=0, xMax=math.pi):
scaleFactor = (xMax-xMin) / (1<<self.ngenes)
return self.bin2dec() * scaleFactor
class SGA:
population : List[Individual]
bestIndividual : Individual
def __init__(self):
self.popSize = 200 # Ex. 200
self.genomeSize = 16 # Ex. 16
self.generationsMax = 16 # Ex. 100
self.crossingOverProb = 0.75 # In [0,1] ex. 0.75
self.selectivePressure = 0.75 # In [0,1] ex. 0.75
self.geneMutationProb = 0.005 # Ex. 0.005
def generateRandomPop(self):
self.population = [Individual(self.genomeSize) for i in range(self.popSize)]
def computeFitnessPop(self):
for individual in self.population:
individual.computeFitness()
def mutatePop(self):
nmutations = int(round(self.popSize * self.genomeSize * self.geneMutationProb))
for i in range(nmutations):
individual = random.choice(self.population)
gene = random.randint(0, self.genomeSize-1)
individual.genome[gene] = not individual.genome[gene]
def tounamentSelectionPop(self):
pop2 : List[Individual] = []
for i in range(self.popSize):
individual1 = random.choice(self.population)
individual2 = random.choice(self.population)
if random.random() < self.selectivePressure:
if individual1.fitness > individual2.fitness:
pop2.append(individual1)
else:
pop2.append(individual2)
else:
if individual1.fitness > individual2.fitness:
pop2.append(individual2)
else:
pop2.append(individual1)
return pop2 # fixed
def crossingOverPop(self):
nCrossingOver = int(round(self.popSize * self.crossingOverProb))
for i in range(nCrossingOver):
ind1 = random.choice(self.population)
ind2 = random.choice(self.population)
crossPosition = random.randint(0, self.genomeSize-1)
for j in range(crossPosition+1):
(ind1.genome[j], ind2.genome[j]) = (ind2.genome[j], ind1.genome[j])
def showGeneration_bestIndFind(self):
fitnessTot = 0.0
bestIndividualGeneration = self.population[0]
for individual in self.population:
fitnessTot += individual.fitness
if individual.fitness > bestIndividualGeneration.fitness:
bestIndividualGeneration = individual
if self.bestIndividual.fitness < bestIndividualGeneration.fitness:
self.bestIndividual = copy(bestIndividualGeneration)
def run(self):
self.generateRandomPop()
self.bestIndividual = Individual(self.genomeSize)
for generation in range(1, self.generationsMax+1):
if generation % 300 == 0:
print('generation ' + str(generation))
self.computeFitnessPop()
self.showGeneration_bestIndFind()
self.population = self.tounamentSelectionPop()
self.mutatePop()
self.crossingOverPop()
if __name__ == '__main__':
sga = SGA()
sga.generationsMax = 3000
sga.genomeSize = 20
sga.popSize = 30
sga.geneMutationProb = 0.01
sga.run()
```
#### File: python_to_cpp/Shed Skin Examples/19.hq2x.py
```python
from typing import List
LUT16to32 = 65536*[0]
RGBtoYUV = 65536*[0]
Ymask = 0x00FF0000
Umask = 0x0000FF00
Vmask = 0x000000FF
trY = 0x00300000
trU = 0x00000700
trV = 0x00000006
class PPM:
w : int
h : int
rgb : List[int]
def __init__(self, w, h, rgb):
(self.w, self.h) = (w, h)
self.rgb = rgb
# else:
# self.rgb = [0 for i in range(w*h)]
def save(self, filename):
f = open(filename, 'w', newline = "\n")
f.write("P3\n")
f.write(str(self.w) + ' ' + str(self.h) + "\n")
f.write("255\n")
for rgb in self.rgb:
r = ((rgb >> 16) & 0xff)
g = ((rgb >> 8) & 0xff)
b = (rgb & 0xff)
f.write(str(r) + ' ' + str(g) + ' ' + str(b) + "\n")
f.close()
def loadPPM(filename):
lines = [l.strip() for l in open(filename).readlines()]
assert lines[0] == 'P3'
wh = list(map(lambda i: int(i), lines[1].split()))
w = wh[0]
h = wh[1]
assert int(lines[2]) == 255
values : List[int] = []
for line in lines[3:]:
values.extend(list(map(lambda i: int(i), line.split())))
rgb : List[int] = []
for i in range(0, len(values), 3):
r = values[i] >> 3
g = values[i+1] >> 2
b = values[i+2] >> 3
rgb.append(r << 11 | g << 5 | b)
return PPM(w, h, rgb)
def diff(w1, w2):
YUV1 = RGBtoYUV[w1]
YUV2 = RGBtoYUV[w2]
return (abs((YUV1 & Ymask) - (YUV2 & Ymask)) > trY) or \
(abs((YUV1 & Umask) - (YUV2 & Umask)) > trU) or \
(abs((YUV1 & Vmask) - (YUV2 & Vmask)) > trV)
def Interp1(c1, c2):
return (c1*3+c2) >> 2
def Interp2(c1, c2, c3):
return (c1*2+c2+c3) >> 2
def Interp6(c1, c2, c3):
return ((((c1 & 0x00FF00)*5 + (c2 & 0x00FF00)*2 + (c3 & 0x00FF00) ) & 0x0007F800) + \
(((c1 & 0xFF00FF)*5 + (c2 & 0xFF00FF)*2 + (c3 & 0xFF00FF) ) & 0x07F807F8)) >> 3
def Interp7(c1, c2, c3):
return ((((c1 & 0x00FF00)*6 + (c2 & 0x00FF00) + (c3 & 0x00FF00) ) & 0x0007F800) + \
(((c1 & 0xFF00FF)*6 + (c2 & 0xFF00FF) + (c3 & 0xFF00FF) ) & 0x07F807F8)) >> 3
def Interp9(c1, c2, c3):
return ((((c1 & 0x00FF00)*2 + ((c2 & 0x00FF00) + (c3 & 0x00FF00))*3 ) & 0x0007F800) + \
(((c1 & 0xFF00FF)*2 + ((c2 & 0xFF00FF) + (c3 & 0xFF00FF))*3 ) & 0x07F807F8)) >> 3
def Interp10(c1, c2, c3):
return ((((c1 & 0x00FF00)*14 + (c2 & 0x00FF00) + (c3 & 0x00FF00) ) & 0x000FF000) +
(((c1 & 0xFF00FF)*14 + (c2 & 0xFF00FF) + (c3 & 0xFF00FF) ) & 0x0FF00FF0)) >> 4
def PIXEL00_0(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut] = c[5]
def PIXEL00_10(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut] = Interp1(c[5], c[1])
def PIXEL00_11(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut] = Interp1(c[5], c[4])
def PIXEL00_12(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut] = Interp1(c[5], c[2])
def PIXEL00_20(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut] = Interp2(c[5], c[4], c[2])
def PIXEL00_21(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut] = Interp2(c[5], c[1], c[2])
def PIXEL00_22(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut] = Interp2(c[5], c[1], c[4])
def PIXEL00_60(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut] = Interp6(c[5], c[2], c[4])
def PIXEL00_61(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut] = Interp6(c[5], c[4], c[2])
def PIXEL00_70(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut] = Interp7(c[5], c[4], c[2])
def PIXEL00_90(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut] = Interp9(c[5], c[4], c[2])
def PIXEL00_100(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut] = Interp10(c[5], c[4], c[2])
def PIXEL01_0(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+1] = c[5]
def PIXEL01_10(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+1] = Interp1(c[5], c[3])
def PIXEL01_11(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+1] = Interp1(c[5], c[2])
def PIXEL01_12(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+1] = Interp1(c[5], c[6])
def PIXEL01_20(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+1] = Interp2(c[5], c[2], c[6])
def PIXEL01_21(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+1] = Interp2(c[5], c[3], c[6])
def PIXEL01_22(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+1] = Interp2(c[5], c[3], c[2])
def PIXEL01_60(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+1] = Interp6(c[5], c[6], c[2])
def PIXEL01_61(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+1] = Interp6(c[5], c[2], c[6])
def PIXEL01_70(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+1] = Interp7(c[5], c[2], c[6])
def PIXEL01_90(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+1] = Interp9(c[5], c[2], c[6])
def PIXEL01_100(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+1] = Interp10(c[5], c[2], c[6])
def PIXEL10_0(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+BpL] = c[5]
def PIXEL10_10(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+BpL] = Interp1(c[5], c[7])
def PIXEL10_11(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+BpL] = Interp1(c[5], c[8])
def PIXEL10_12(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+BpL] = Interp1(c[5], c[4])
def PIXEL10_20(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+BpL] = Interp2(c[5], c[8], c[4])
def PIXEL10_21(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+BpL] = Interp2(c[5], c[7], c[4])
def PIXEL10_22(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+BpL] = Interp2(c[5], c[7], c[8])
def PIXEL10_60(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+BpL] = Interp6(c[5], c[4], c[8])
def PIXEL10_61(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+BpL] = Interp6(c[5], c[8], c[4])
def PIXEL10_70(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+BpL] = Interp7(c[5], c[8], c[4])
def PIXEL10_90(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+BpL] = Interp9(c[5], c[8], c[4])
def PIXEL10_100(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+BpL] = Interp10(c[5], c[8], c[4])
def PIXEL11_0(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+BpL+1] = c[5]
def PIXEL11_10(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+BpL+1] = Interp1(c[5], c[9])
def PIXEL11_11(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+BpL+1] = Interp1(c[5], c[6])
def PIXEL11_12(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+BpL+1] = Interp1(c[5], c[8])
def PIXEL11_20(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+BpL+1] = Interp2(c[5], c[6], c[8])
def PIXEL11_21(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+BpL+1] = Interp2(c[5], c[9], c[8])
def PIXEL11_22(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+BpL+1] = Interp2(c[5], c[9], c[6])
def PIXEL11_60(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+BpL+1] = Interp6(c[5], c[8], c[6])
def PIXEL11_61(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+BpL+1] = Interp6(c[5], c[6], c[8])
def PIXEL11_70(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+BpL+1] = Interp7(c[5], c[6], c[8])
def PIXEL11_90(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+BpL+1] = Interp9(c[5], c[6], c[8])
def PIXEL11_100(rgb_out : List[int], pOut, BpL, c): rgb_out[pOut+BpL+1] = Interp10(c[5], c[6], c[8])
def hq2x(xres, yres, rgb):
'''
+--+--+--+
|w1|w2|w3|
+--+--+--+
|w4|w5|w6|
+--+--+--+
|w7|w8|w9|
+--+--+--+
'''
c = 10*[0]
w = 10*[0]
rgb_out = 4*len(rgb)*[0]
BpL = 2*xres
for j in range(yres):
prevline = -xres if j>0 else 0
nextline = xres if j<yres-1 else 0
for i in range(xres):
pos = j*xres+i
pOut = j*xres*4+2*i
w[3] = rgb[pos+prevline]
w[2] = w[3]
w[1] = w[3]
w[6] = rgb[pos]
w[5] = w[6]
w[4] = w[6]
w[9] = rgb[pos+nextline]
w[8] = w[9]
w[7] = w[9]
if i>0:
w[1] = rgb[pos+prevline-1]
w[4] = rgb[pos-1]
w[7] = rgb[pos+nextline-1]
if i<xres-1:
w[3] = rgb[pos+prevline+1]
w[6] = rgb[pos+1]
w[9] = rgb[pos+nextline+1]
pattern = 0
flag = 1
YUV1 = RGBtoYUV[w[5]]
for k in range(1,10):
if k == 5:
continue
if w[k] != w[5]:
YUV2 = RGBtoYUV[w[k]]
if (abs((YUV1 & Ymask) - (YUV2 & Ymask)) > trY ) or \
(abs((YUV1 & Umask) - (YUV2 & Umask)) > trU ) or \
(abs((YUV1 & Vmask) - (YUV2 & Vmask)) > trV ):
pattern |= flag
flag <<= 1
for k in range(1,10):
c[k] = LUT16to32[w[k]]
if pattern == 0 or pattern == 1 or pattern == 4 or pattern == 32 or pattern == 128 or pattern == 5 or pattern == 132 or pattern == 160 or pattern == 33 or pattern == 129 or pattern == 36 or pattern == 133 or pattern == 164 or pattern == 161 or pattern == 37 or pattern == 165:
PIXEL00_20(rgb_out, pOut, BpL, c)
PIXEL01_20(rgb_out, pOut, BpL, c)
PIXEL10_20(rgb_out, pOut, BpL, c)
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 2 or pattern == 34 or pattern == 130 or pattern == 162:
PIXEL00_22(rgb_out, pOut, BpL, c)
PIXEL01_21(rgb_out, pOut, BpL, c)
PIXEL10_20(rgb_out, pOut, BpL, c)
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 16 or pattern == 17 or pattern == 48 or pattern == 49:
PIXEL00_20(rgb_out, pOut, BpL, c)
PIXEL01_22(rgb_out, pOut, BpL, c)
PIXEL10_20(rgb_out, pOut, BpL, c)
PIXEL11_21(rgb_out, pOut, BpL, c)
elif pattern == 64 or pattern == 65 or pattern == 68 or pattern == 69:
PIXEL00_20(rgb_out, pOut, BpL, c)
PIXEL01_20(rgb_out, pOut, BpL, c)
PIXEL10_21(rgb_out, pOut, BpL, c)
PIXEL11_22(rgb_out, pOut, BpL, c)
elif pattern == 8 or pattern == 12 or pattern == 136 or pattern == 140:
PIXEL00_21(rgb_out, pOut, BpL, c)
PIXEL01_20(rgb_out, pOut, BpL, c)
PIXEL10_22(rgb_out, pOut, BpL, c)
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 3 or pattern == 35 or pattern == 131 or pattern == 163:
PIXEL00_11(rgb_out, pOut, BpL, c)
PIXEL01_21(rgb_out, pOut, BpL, c)
PIXEL10_20(rgb_out, pOut, BpL, c)
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 6 or pattern == 38 or pattern == 134 or pattern == 166:
PIXEL00_22(rgb_out, pOut, BpL, c)
PIXEL01_12(rgb_out, pOut, BpL, c)
PIXEL10_20(rgb_out, pOut, BpL, c)
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 20 or pattern == 21 or pattern == 52 or pattern == 53:
PIXEL00_20(rgb_out, pOut, BpL, c)
PIXEL01_11(rgb_out, pOut, BpL, c)
PIXEL10_20(rgb_out, pOut, BpL, c)
PIXEL11_21(rgb_out, pOut, BpL, c)
elif pattern == 144 or pattern == 145 or pattern == 176 or pattern == 177:
PIXEL00_20(rgb_out, pOut, BpL, c)
PIXEL01_22(rgb_out, pOut, BpL, c)
PIXEL10_20(rgb_out, pOut, BpL, c)
PIXEL11_12(rgb_out, pOut, BpL, c)
elif pattern == 192 or pattern == 193 or pattern == 196 or pattern == 197:
PIXEL00_20(rgb_out, pOut, BpL, c)
PIXEL01_20(rgb_out, pOut, BpL, c)
PIXEL10_21(rgb_out, pOut, BpL, c)
PIXEL11_11(rgb_out, pOut, BpL, c)
elif pattern == 96 or pattern == 97 or pattern == 100 or pattern == 101:
PIXEL00_20(rgb_out, pOut, BpL, c)
PIXEL01_20(rgb_out, pOut, BpL, c)
PIXEL10_12(rgb_out, pOut, BpL, c)
PIXEL11_22(rgb_out, pOut, BpL, c)
elif pattern == 40 or pattern == 44 or pattern == 168 or pattern == 172:
PIXEL00_21(rgb_out, pOut, BpL, c)
PIXEL01_20(rgb_out, pOut, BpL, c)
PIXEL10_11(rgb_out, pOut, BpL, c)
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 9 or pattern == 13 or pattern == 137 or pattern == 141:
PIXEL00_12(rgb_out, pOut, BpL, c)
PIXEL01_20(rgb_out, pOut, BpL, c)
PIXEL10_22(rgb_out, pOut, BpL, c)
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 18 or pattern == 50:
PIXEL00_22(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_10(rgb_out, pOut, BpL, c)
else:
PIXEL01_20(rgb_out, pOut, BpL, c)
PIXEL10_20(rgb_out, pOut, BpL, c)
PIXEL11_21(rgb_out, pOut, BpL, c)
elif pattern == 80 or pattern == 81:
PIXEL00_20(rgb_out, pOut, BpL, c)
PIXEL01_22(rgb_out, pOut, BpL, c)
PIXEL10_21(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_10(rgb_out, pOut, BpL, c)
else:
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 72 or pattern == 76:
PIXEL00_21(rgb_out, pOut, BpL, c)
PIXEL01_20(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_10(rgb_out, pOut, BpL, c)
else:
PIXEL10_20(rgb_out, pOut, BpL, c)
PIXEL11_22(rgb_out, pOut, BpL, c)
elif pattern == 10 or pattern == 138:
if (diff(w[4], w[2])):
PIXEL00_10(rgb_out, pOut, BpL, c)
else:
PIXEL00_20(rgb_out, pOut, BpL, c)
PIXEL01_21(rgb_out, pOut, BpL, c)
PIXEL10_22(rgb_out, pOut, BpL, c)
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 66:
PIXEL00_22(rgb_out, pOut, BpL, c)
PIXEL01_21(rgb_out, pOut, BpL, c)
PIXEL10_21(rgb_out, pOut, BpL, c)
PIXEL11_22(rgb_out, pOut, BpL, c)
elif pattern == 24:
PIXEL00_21(rgb_out, pOut, BpL, c)
PIXEL01_22(rgb_out, pOut, BpL, c)
PIXEL10_22(rgb_out, pOut, BpL, c)
PIXEL11_21(rgb_out, pOut, BpL, c)
elif pattern == 7 or pattern == 39 or pattern == 135:
PIXEL00_11(rgb_out, pOut, BpL, c)
PIXEL01_12(rgb_out, pOut, BpL, c)
PIXEL10_20(rgb_out, pOut, BpL, c)
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 148 or pattern == 149 or pattern == 180:
PIXEL00_20(rgb_out, pOut, BpL, c)
PIXEL01_11(rgb_out, pOut, BpL, c)
PIXEL10_20(rgb_out, pOut, BpL, c)
PIXEL11_12(rgb_out, pOut, BpL, c)
elif pattern == 224 or pattern == 228 or pattern == 225:
PIXEL00_20(rgb_out, pOut, BpL, c)
PIXEL01_20(rgb_out, pOut, BpL, c)
PIXEL10_12(rgb_out, pOut, BpL, c)
PIXEL11_11(rgb_out, pOut, BpL, c)
elif pattern == 41 or pattern == 169 or pattern == 45:
PIXEL00_12(rgb_out, pOut, BpL, c)
PIXEL01_20(rgb_out, pOut, BpL, c)
PIXEL10_11(rgb_out, pOut, BpL, c)
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 22 or pattern == 54:
PIXEL00_22(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_0(rgb_out, pOut, BpL, c)
else:
PIXEL01_20(rgb_out, pOut, BpL, c)
PIXEL10_20(rgb_out, pOut, BpL, c)
PIXEL11_21(rgb_out, pOut, BpL, c)
elif pattern == 208 or pattern == 209:
PIXEL00_20(rgb_out, pOut, BpL, c)
PIXEL01_22(rgb_out, pOut, BpL, c)
PIXEL10_21(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_0(rgb_out, pOut, BpL, c)
else:
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 104 or pattern == 108:
PIXEL00_21(rgb_out, pOut, BpL, c)
PIXEL01_20(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_0(rgb_out, pOut, BpL, c)
else:
PIXEL10_20(rgb_out, pOut, BpL, c)
PIXEL11_22(rgb_out, pOut, BpL, c)
elif pattern == 11 or pattern == 139:
if (diff(w[4], w[2])):
PIXEL00_0(rgb_out, pOut, BpL, c)
else:
PIXEL00_20(rgb_out, pOut, BpL, c)
PIXEL01_21(rgb_out, pOut, BpL, c)
PIXEL10_22(rgb_out, pOut, BpL, c)
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 19 or pattern == 51:
if (diff(w[2], w[6])):
PIXEL00_11(rgb_out, pOut, BpL, c)
PIXEL01_10(rgb_out, pOut, BpL, c)
else:
PIXEL00_60(rgb_out, pOut, BpL, c)
PIXEL01_90(rgb_out, pOut, BpL, c)
PIXEL10_20(rgb_out, pOut, BpL, c)
PIXEL11_21(rgb_out, pOut, BpL, c)
elif pattern == 146 or pattern == 178:
PIXEL00_22(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_10(rgb_out, pOut, BpL, c)
PIXEL11_12(rgb_out, pOut, BpL, c)
else:
PIXEL01_90(rgb_out, pOut, BpL, c)
PIXEL11_61(rgb_out, pOut, BpL, c)
PIXEL10_20(rgb_out, pOut, BpL, c)
elif pattern == 84 or pattern == 85:
PIXEL00_20(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL01_11(rgb_out, pOut, BpL, c)
PIXEL11_10(rgb_out, pOut, BpL, c)
else:
PIXEL01_60(rgb_out, pOut, BpL, c)
PIXEL11_90(rgb_out, pOut, BpL, c)
PIXEL10_21(rgb_out, pOut, BpL, c)
elif pattern == 112 or pattern == 113:
PIXEL00_20(rgb_out, pOut, BpL, c)
PIXEL01_22(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL10_12(rgb_out, pOut, BpL, c)
PIXEL11_10(rgb_out, pOut, BpL, c)
else:
PIXEL10_61(rgb_out, pOut, BpL, c)
PIXEL11_90(rgb_out, pOut, BpL, c)
elif pattern == 200 or pattern == 204:
PIXEL00_21(rgb_out, pOut, BpL, c)
PIXEL01_20(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_10(rgb_out, pOut, BpL, c)
PIXEL11_11(rgb_out, pOut, BpL, c)
else:
PIXEL10_90(rgb_out, pOut, BpL, c)
PIXEL11_60(rgb_out, pOut, BpL, c)
elif pattern == 73 or pattern == 77:
if (diff(w[8], w[4])):
PIXEL00_12(rgb_out, pOut, BpL, c)
PIXEL10_10(rgb_out, pOut, BpL, c)
else:
PIXEL00_61(rgb_out, pOut, BpL, c)
PIXEL10_90(rgb_out, pOut, BpL, c)
PIXEL01_20(rgb_out, pOut, BpL, c)
PIXEL11_22(rgb_out, pOut, BpL, c)
elif pattern == 42 or pattern == 170:
if (diff(w[4], w[2])):
PIXEL00_10(rgb_out, pOut, BpL, c)
PIXEL10_11(rgb_out, pOut, BpL, c)
else:
PIXEL00_90(rgb_out, pOut, BpL, c)
PIXEL10_60(rgb_out, pOut, BpL, c)
PIXEL01_21(rgb_out, pOut, BpL, c)
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 14 or pattern == 142:
if (diff(w[4], w[2])):
PIXEL00_10(rgb_out, pOut, BpL, c)
PIXEL01_12(rgb_out, pOut, BpL, c)
else:
PIXEL00_90(rgb_out, pOut, BpL, c)
PIXEL01_61(rgb_out, pOut, BpL, c)
PIXEL10_22(rgb_out, pOut, BpL, c)
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 67:
PIXEL00_11(rgb_out, pOut, BpL, c)
PIXEL01_21(rgb_out, pOut, BpL, c)
PIXEL10_21(rgb_out, pOut, BpL, c)
PIXEL11_22(rgb_out, pOut, BpL, c)
elif pattern == 70:
PIXEL00_22(rgb_out, pOut, BpL, c)
PIXEL01_12(rgb_out, pOut, BpL, c)
PIXEL10_21(rgb_out, pOut, BpL, c)
PIXEL11_22(rgb_out, pOut, BpL, c)
elif pattern == 28:
PIXEL00_21(rgb_out, pOut, BpL, c)
PIXEL01_11(rgb_out, pOut, BpL, c)
PIXEL10_22(rgb_out, pOut, BpL, c)
PIXEL11_21(rgb_out, pOut, BpL, c)
elif pattern == 152:
PIXEL00_21(rgb_out, pOut, BpL, c)
PIXEL01_22(rgb_out, pOut, BpL, c)
PIXEL10_22(rgb_out, pOut, BpL, c)
PIXEL11_12(rgb_out, pOut, BpL, c)
elif pattern == 194:
PIXEL00_22(rgb_out, pOut, BpL, c)
PIXEL01_21(rgb_out, pOut, BpL, c)
PIXEL10_21(rgb_out, pOut, BpL, c)
PIXEL11_11(rgb_out, pOut, BpL, c)
elif pattern == 98:
PIXEL00_22(rgb_out, pOut, BpL, c)
PIXEL01_21(rgb_out, pOut, BpL, c)
PIXEL10_12(rgb_out, pOut, BpL, c)
PIXEL11_22(rgb_out, pOut, BpL, c)
elif pattern == 56:
PIXEL00_21(rgb_out, pOut, BpL, c)
PIXEL01_22(rgb_out, pOut, BpL, c)
PIXEL10_11(rgb_out, pOut, BpL, c)
PIXEL11_21(rgb_out, pOut, BpL, c)
elif pattern == 25:
PIXEL00_12(rgb_out, pOut, BpL, c)
PIXEL01_22(rgb_out, pOut, BpL, c)
PIXEL10_22(rgb_out, pOut, BpL, c)
PIXEL11_21(rgb_out, pOut, BpL, c)
elif pattern == 26 or pattern == 31:
if (diff(w[4], w[2])):
PIXEL00_0(rgb_out, pOut, BpL, c)
else:
PIXEL00_20(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_0(rgb_out, pOut, BpL, c)
else:
PIXEL01_20(rgb_out, pOut, BpL, c)
PIXEL10_22(rgb_out, pOut, BpL, c)
PIXEL11_21(rgb_out, pOut, BpL, c)
elif pattern == 82 or pattern == 214:
PIXEL00_22(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_0(rgb_out, pOut, BpL, c)
else:
PIXEL01_20(rgb_out, pOut, BpL, c)
PIXEL10_21(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_0(rgb_out, pOut, BpL, c)
else:
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 88 or pattern == 248:
PIXEL00_21(rgb_out, pOut, BpL, c)
PIXEL01_22(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_0(rgb_out, pOut, BpL, c)
else:
PIXEL10_20(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_0(rgb_out, pOut, BpL, c)
else:
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 74 or pattern == 107:
if (diff(w[4], w[2])):
PIXEL00_0(rgb_out, pOut, BpL, c)
else:
PIXEL00_20(rgb_out, pOut, BpL, c)
PIXEL01_21(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_0(rgb_out, pOut, BpL, c)
else:
PIXEL10_20(rgb_out, pOut, BpL, c)
PIXEL11_22(rgb_out, pOut, BpL, c)
elif pattern == 27:
if (diff(w[4], w[2])):
PIXEL00_0(rgb_out, pOut, BpL, c)
else:
PIXEL00_20(rgb_out, pOut, BpL, c)
PIXEL01_10(rgb_out, pOut, BpL, c)
PIXEL10_22(rgb_out, pOut, BpL, c)
PIXEL11_21(rgb_out, pOut, BpL, c)
elif pattern == 86:
PIXEL00_22(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_0(rgb_out, pOut, BpL, c)
else:
PIXEL01_20(rgb_out, pOut, BpL, c)
PIXEL10_21(rgb_out, pOut, BpL, c)
PIXEL11_10(rgb_out, pOut, BpL, c)
elif pattern == 216:
PIXEL00_21(rgb_out, pOut, BpL, c)
PIXEL01_22(rgb_out, pOut, BpL, c)
PIXEL10_10(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_0(rgb_out, pOut, BpL, c)
else:
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 106:
PIXEL00_10(rgb_out, pOut, BpL, c)
PIXEL01_21(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_0(rgb_out, pOut, BpL, c)
else:
PIXEL10_20(rgb_out, pOut, BpL, c)
PIXEL11_22(rgb_out, pOut, BpL, c)
elif pattern == 30:
PIXEL00_10(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_0(rgb_out, pOut, BpL, c)
else:
PIXEL01_20(rgb_out, pOut, BpL, c)
PIXEL10_22(rgb_out, pOut, BpL, c)
PIXEL11_21(rgb_out, pOut, BpL, c)
elif pattern == 210:
PIXEL00_22(rgb_out, pOut, BpL, c)
PIXEL01_10(rgb_out, pOut, BpL, c)
PIXEL10_21(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_0(rgb_out, pOut, BpL, c)
else:
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 120:
PIXEL00_21(rgb_out, pOut, BpL, c)
PIXEL01_22(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_0(rgb_out, pOut, BpL, c)
else:
PIXEL10_20(rgb_out, pOut, BpL, c)
PIXEL11_10(rgb_out, pOut, BpL, c)
elif pattern == 75:
if (diff(w[4], w[2])):
PIXEL00_0(rgb_out, pOut, BpL, c)
else:
PIXEL00_20(rgb_out, pOut, BpL, c)
PIXEL01_21(rgb_out, pOut, BpL, c)
PIXEL10_10(rgb_out, pOut, BpL, c)
PIXEL11_22(rgb_out, pOut, BpL, c)
elif pattern == 29:
PIXEL00_12(rgb_out, pOut, BpL, c)
PIXEL01_11(rgb_out, pOut, BpL, c)
PIXEL10_22(rgb_out, pOut, BpL, c)
PIXEL11_21(rgb_out, pOut, BpL, c)
elif pattern == 198:
PIXEL00_22(rgb_out, pOut, BpL, c)
PIXEL01_12(rgb_out, pOut, BpL, c)
PIXEL10_21(rgb_out, pOut, BpL, c)
PIXEL11_11(rgb_out, pOut, BpL, c)
elif pattern == 184:
PIXEL00_21(rgb_out, pOut, BpL, c)
PIXEL01_22(rgb_out, pOut, BpL, c)
PIXEL10_11(rgb_out, pOut, BpL, c)
PIXEL11_12(rgb_out, pOut, BpL, c)
elif pattern == 99:
PIXEL00_11(rgb_out, pOut, BpL, c)
PIXEL01_21(rgb_out, pOut, BpL, c)
PIXEL10_12(rgb_out, pOut, BpL, c)
PIXEL11_22(rgb_out, pOut, BpL, c)
elif pattern == 57:
PIXEL00_12(rgb_out, pOut, BpL, c)
PIXEL01_22(rgb_out, pOut, BpL, c)
PIXEL10_11(rgb_out, pOut, BpL, c)
PIXEL11_21(rgb_out, pOut, BpL, c)
elif pattern == 71:
PIXEL00_11(rgb_out, pOut, BpL, c)
PIXEL01_12(rgb_out, pOut, BpL, c)
PIXEL10_21(rgb_out, pOut, BpL, c)
PIXEL11_22(rgb_out, pOut, BpL, c)
elif pattern == 156:
PIXEL00_21(rgb_out, pOut, BpL, c)
PIXEL01_11(rgb_out, pOut, BpL, c)
PIXEL10_22(rgb_out, pOut, BpL, c)
PIXEL11_12(rgb_out, pOut, BpL, c)
elif pattern == 226:
PIXEL00_22(rgb_out, pOut, BpL, c)
PIXEL01_21(rgb_out, pOut, BpL, c)
PIXEL10_12(rgb_out, pOut, BpL, c)
PIXEL11_11(rgb_out, pOut, BpL, c)
elif pattern == 60:
PIXEL00_21(rgb_out, pOut, BpL, c)
PIXEL01_11(rgb_out, pOut, BpL, c)
PIXEL10_11(rgb_out, pOut, BpL, c)
PIXEL11_21(rgb_out, pOut, BpL, c)
elif pattern == 195:
PIXEL00_11(rgb_out, pOut, BpL, c)
PIXEL01_21(rgb_out, pOut, BpL, c)
PIXEL10_21(rgb_out, pOut, BpL, c)
PIXEL11_11(rgb_out, pOut, BpL, c)
elif pattern == 102:
PIXEL00_22(rgb_out, pOut, BpL, c)
PIXEL01_12(rgb_out, pOut, BpL, c)
PIXEL10_12(rgb_out, pOut, BpL, c)
PIXEL11_22(rgb_out, pOut, BpL, c)
elif pattern == 153:
PIXEL00_12(rgb_out, pOut, BpL, c)
PIXEL01_22(rgb_out, pOut, BpL, c)
PIXEL10_22(rgb_out, pOut, BpL, c)
PIXEL11_12(rgb_out, pOut, BpL, c)
elif pattern == 58:
if (diff(w[4], w[2])):
PIXEL00_10(rgb_out, pOut, BpL, c)
else:
PIXEL00_70(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_10(rgb_out, pOut, BpL, c)
else:
PIXEL01_70(rgb_out, pOut, BpL, c)
PIXEL10_11(rgb_out, pOut, BpL, c)
PIXEL11_21(rgb_out, pOut, BpL, c)
elif pattern == 83:
PIXEL00_11(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_10(rgb_out, pOut, BpL, c)
else:
PIXEL01_70(rgb_out, pOut, BpL, c)
PIXEL10_21(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_10(rgb_out, pOut, BpL, c)
else:
PIXEL11_70(rgb_out, pOut, BpL, c)
elif pattern == 92:
PIXEL00_21(rgb_out, pOut, BpL, c)
PIXEL01_11(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_10(rgb_out, pOut, BpL, c)
else:
PIXEL10_70(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_10(rgb_out, pOut, BpL, c)
else:
PIXEL11_70(rgb_out, pOut, BpL, c)
elif pattern == 202:
if (diff(w[4], w[2])):
PIXEL00_10(rgb_out, pOut, BpL, c)
else:
PIXEL00_70(rgb_out, pOut, BpL, c)
PIXEL01_21(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_10(rgb_out, pOut, BpL, c)
else:
PIXEL10_70(rgb_out, pOut, BpL, c)
PIXEL11_11(rgb_out, pOut, BpL, c)
elif pattern == 78:
if (diff(w[4], w[2])):
PIXEL00_10(rgb_out, pOut, BpL, c)
else:
PIXEL00_70(rgb_out, pOut, BpL, c)
PIXEL01_12(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_10(rgb_out, pOut, BpL, c)
else:
PIXEL10_70(rgb_out, pOut, BpL, c)
PIXEL11_22(rgb_out, pOut, BpL, c)
elif pattern == 154:
if (diff(w[4], w[2])):
PIXEL00_10(rgb_out, pOut, BpL, c)
else:
PIXEL00_70(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_10(rgb_out, pOut, BpL, c)
else:
PIXEL01_70(rgb_out, pOut, BpL, c)
PIXEL10_22(rgb_out, pOut, BpL, c)
PIXEL11_12(rgb_out, pOut, BpL, c)
elif pattern == 114:
PIXEL00_22(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_10(rgb_out, pOut, BpL, c)
else:
PIXEL01_70(rgb_out, pOut, BpL, c)
PIXEL10_12(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_10(rgb_out, pOut, BpL, c)
else:
PIXEL11_70(rgb_out, pOut, BpL, c)
elif pattern == 89:
PIXEL00_12(rgb_out, pOut, BpL, c)
PIXEL01_22(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_10(rgb_out, pOut, BpL, c)
else:
PIXEL10_70(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_10(rgb_out, pOut, BpL, c)
else:
PIXEL11_70(rgb_out, pOut, BpL, c)
elif pattern == 90:
if (diff(w[4], w[2])):
PIXEL00_10(rgb_out, pOut, BpL, c)
else:
PIXEL00_70(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_10(rgb_out, pOut, BpL, c)
else:
PIXEL01_70(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_10(rgb_out, pOut, BpL, c)
else:
PIXEL10_70(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_10(rgb_out, pOut, BpL, c)
else:
PIXEL11_70(rgb_out, pOut, BpL, c)
elif pattern == 55 or pattern == 23:
if (diff(w[2], w[6])):
PIXEL00_11(rgb_out, pOut, BpL, c)
PIXEL01_0(rgb_out, pOut, BpL, c)
else:
PIXEL00_60(rgb_out, pOut, BpL, c)
PIXEL01_90(rgb_out, pOut, BpL, c)
PIXEL10_20(rgb_out, pOut, BpL, c)
PIXEL11_21(rgb_out, pOut, BpL, c)
elif pattern == 182 or pattern == 150:
PIXEL00_22(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_0(rgb_out, pOut, BpL, c)
PIXEL11_12(rgb_out, pOut, BpL, c)
else:
PIXEL01_90(rgb_out, pOut, BpL, c)
PIXEL11_61(rgb_out, pOut, BpL, c)
PIXEL10_20(rgb_out, pOut, BpL, c)
elif pattern == 213 or pattern == 212:
PIXEL00_20(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL01_11(rgb_out, pOut, BpL, c)
PIXEL11_0(rgb_out, pOut, BpL, c)
else:
PIXEL01_60(rgb_out, pOut, BpL, c)
PIXEL11_90(rgb_out, pOut, BpL, c)
PIXEL10_21(rgb_out, pOut, BpL, c)
elif pattern == 241 or pattern == 240:
PIXEL00_20(rgb_out, pOut, BpL, c)
PIXEL01_22(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL10_12(rgb_out, pOut, BpL, c)
PIXEL11_0(rgb_out, pOut, BpL, c)
else:
PIXEL10_61(rgb_out, pOut, BpL, c)
PIXEL11_90(rgb_out, pOut, BpL, c)
elif pattern == 236 or pattern == 232:
PIXEL00_21(rgb_out, pOut, BpL, c)
PIXEL01_20(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_0(rgb_out, pOut, BpL, c)
PIXEL11_11(rgb_out, pOut, BpL, c)
else:
PIXEL10_90(rgb_out, pOut, BpL, c)
PIXEL11_60(rgb_out, pOut, BpL, c)
elif pattern == 109 or pattern == 105:
if (diff(w[8], w[4])):
PIXEL00_12(rgb_out, pOut, BpL, c)
PIXEL10_0(rgb_out, pOut, BpL, c)
else:
PIXEL00_61(rgb_out, pOut, BpL, c)
PIXEL10_90(rgb_out, pOut, BpL, c)
PIXEL01_20(rgb_out, pOut, BpL, c)
PIXEL11_22(rgb_out, pOut, BpL, c)
elif pattern == 171 or pattern == 43:
if (diff(w[4], w[2])):
PIXEL00_0(rgb_out, pOut, BpL, c)
PIXEL10_11(rgb_out, pOut, BpL, c)
else:
PIXEL00_90(rgb_out, pOut, BpL, c)
PIXEL10_60(rgb_out, pOut, BpL, c)
PIXEL01_21(rgb_out, pOut, BpL, c)
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 143 or pattern == 15:
if (diff(w[4], w[2])):
PIXEL00_0(rgb_out, pOut, BpL, c)
PIXEL01_12(rgb_out, pOut, BpL, c)
else:
PIXEL00_90(rgb_out, pOut, BpL, c)
PIXEL01_61(rgb_out, pOut, BpL, c)
PIXEL10_22(rgb_out, pOut, BpL, c)
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 124:
PIXEL00_21(rgb_out, pOut, BpL, c)
PIXEL01_11(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_0(rgb_out, pOut, BpL, c)
else:
PIXEL10_20(rgb_out, pOut, BpL, c)
PIXEL11_10(rgb_out, pOut, BpL, c)
elif pattern == 203:
if (diff(w[4], w[2])):
PIXEL00_0(rgb_out, pOut, BpL, c)
else:
PIXEL00_20(rgb_out, pOut, BpL, c)
PIXEL01_21(rgb_out, pOut, BpL, c)
PIXEL10_10(rgb_out, pOut, BpL, c)
PIXEL11_11(rgb_out, pOut, BpL, c)
elif pattern == 62:
PIXEL00_10(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_0(rgb_out, pOut, BpL, c)
else:
PIXEL01_20(rgb_out, pOut, BpL, c)
PIXEL10_11(rgb_out, pOut, BpL, c)
PIXEL11_21(rgb_out, pOut, BpL, c)
elif pattern == 211:
PIXEL00_11(rgb_out, pOut, BpL, c)
PIXEL01_10(rgb_out, pOut, BpL, c)
PIXEL10_21(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_0(rgb_out, pOut, BpL, c)
else:
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 118:
PIXEL00_22(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_0(rgb_out, pOut, BpL, c)
else:
PIXEL01_20(rgb_out, pOut, BpL, c)
PIXEL10_12(rgb_out, pOut, BpL, c)
PIXEL11_10(rgb_out, pOut, BpL, c)
elif pattern == 217:
PIXEL00_12(rgb_out, pOut, BpL, c)
PIXEL01_22(rgb_out, pOut, BpL, c)
PIXEL10_10(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_0(rgb_out, pOut, BpL, c)
else:
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 110:
PIXEL00_10(rgb_out, pOut, BpL, c)
PIXEL01_12(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_0(rgb_out, pOut, BpL, c)
else:
PIXEL10_20(rgb_out, pOut, BpL, c)
PIXEL11_22(rgb_out, pOut, BpL, c)
elif pattern == 155:
if (diff(w[4], w[2])):
PIXEL00_0(rgb_out, pOut, BpL, c)
else:
PIXEL00_20(rgb_out, pOut, BpL, c)
PIXEL01_10(rgb_out, pOut, BpL, c)
PIXEL10_22(rgb_out, pOut, BpL, c)
PIXEL11_12(rgb_out, pOut, BpL, c)
elif pattern == 188:
PIXEL00_21(rgb_out, pOut, BpL, c)
PIXEL01_11(rgb_out, pOut, BpL, c)
PIXEL10_11(rgb_out, pOut, BpL, c)
PIXEL11_12(rgb_out, pOut, BpL, c)
elif pattern == 185:
PIXEL00_12(rgb_out, pOut, BpL, c)
PIXEL01_22(rgb_out, pOut, BpL, c)
PIXEL10_11(rgb_out, pOut, BpL, c)
PIXEL11_12(rgb_out, pOut, BpL, c)
elif pattern == 61:
PIXEL00_12(rgb_out, pOut, BpL, c)
PIXEL01_11(rgb_out, pOut, BpL, c)
PIXEL10_11(rgb_out, pOut, BpL, c)
PIXEL11_21(rgb_out, pOut, BpL, c)
elif pattern == 157:
PIXEL00_12(rgb_out, pOut, BpL, c)
PIXEL01_11(rgb_out, pOut, BpL, c)
PIXEL10_22(rgb_out, pOut, BpL, c)
PIXEL11_12(rgb_out, pOut, BpL, c)
elif pattern == 103:
PIXEL00_11(rgb_out, pOut, BpL, c)
PIXEL01_12(rgb_out, pOut, BpL, c)
PIXEL10_12(rgb_out, pOut, BpL, c)
PIXEL11_22(rgb_out, pOut, BpL, c)
elif pattern == 227:
PIXEL00_11(rgb_out, pOut, BpL, c)
PIXEL01_21(rgb_out, pOut, BpL, c)
PIXEL10_12(rgb_out, pOut, BpL, c)
PIXEL11_11(rgb_out, pOut, BpL, c)
elif pattern == 230:
PIXEL00_22(rgb_out, pOut, BpL, c)
PIXEL01_12(rgb_out, pOut, BpL, c)
PIXEL10_12(rgb_out, pOut, BpL, c)
PIXEL11_11(rgb_out, pOut, BpL, c)
elif pattern == 199:
PIXEL00_11(rgb_out, pOut, BpL, c)
PIXEL01_12(rgb_out, pOut, BpL, c)
PIXEL10_21(rgb_out, pOut, BpL, c)
PIXEL11_11(rgb_out, pOut, BpL, c)
elif pattern == 220:
PIXEL00_21(rgb_out, pOut, BpL, c)
PIXEL01_11(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_10(rgb_out, pOut, BpL, c)
else:
PIXEL10_70(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_0(rgb_out, pOut, BpL, c)
else:
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 158:
if (diff(w[4], w[2])):
PIXEL00_10(rgb_out, pOut, BpL, c)
else:
PIXEL00_70(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_0(rgb_out, pOut, BpL, c)
else:
PIXEL01_20(rgb_out, pOut, BpL, c)
PIXEL10_22(rgb_out, pOut, BpL, c)
PIXEL11_12(rgb_out, pOut, BpL, c)
elif pattern == 234:
if (diff(w[4], w[2])):
PIXEL00_10(rgb_out, pOut, BpL, c)
else:
PIXEL00_70(rgb_out, pOut, BpL, c)
PIXEL01_21(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_0(rgb_out, pOut, BpL, c)
else:
PIXEL10_20(rgb_out, pOut, BpL, c)
PIXEL11_11(rgb_out, pOut, BpL, c)
elif pattern == 242:
PIXEL00_22(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_10(rgb_out, pOut, BpL, c)
else:
PIXEL01_70(rgb_out, pOut, BpL, c)
PIXEL10_12(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_0(rgb_out, pOut, BpL, c)
else:
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 59:
if (diff(w[4], w[2])):
PIXEL00_0(rgb_out, pOut, BpL, c)
else:
PIXEL00_20(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_10(rgb_out, pOut, BpL, c)
else:
PIXEL01_70(rgb_out, pOut, BpL, c)
PIXEL10_11(rgb_out, pOut, BpL, c)
PIXEL11_21(rgb_out, pOut, BpL, c)
elif pattern == 121:
PIXEL00_12(rgb_out, pOut, BpL, c)
PIXEL01_22(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_0(rgb_out, pOut, BpL, c)
else:
PIXEL10_20(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_10(rgb_out, pOut, BpL, c)
else:
PIXEL11_70(rgb_out, pOut, BpL, c)
elif pattern == 87:
PIXEL00_11(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_0(rgb_out, pOut, BpL, c)
else:
PIXEL01_20(rgb_out, pOut, BpL, c)
PIXEL10_21(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_10(rgb_out, pOut, BpL, c)
else:
PIXEL11_70(rgb_out, pOut, BpL, c)
elif pattern == 79:
if (diff(w[4], w[2])):
PIXEL00_0(rgb_out, pOut, BpL, c)
else:
PIXEL00_20(rgb_out, pOut, BpL, c)
PIXEL01_12(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_10(rgb_out, pOut, BpL, c)
else:
PIXEL10_70(rgb_out, pOut, BpL, c)
PIXEL11_22(rgb_out, pOut, BpL, c)
elif pattern == 122:
if (diff(w[4], w[2])):
PIXEL00_10(rgb_out, pOut, BpL, c)
else:
PIXEL00_70(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_10(rgb_out, pOut, BpL, c)
else:
PIXEL01_70(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_0(rgb_out, pOut, BpL, c)
else:
PIXEL10_20(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_10(rgb_out, pOut, BpL, c)
else:
PIXEL11_70(rgb_out, pOut, BpL, c)
elif pattern == 94:
if (diff(w[4], w[2])):
PIXEL00_10(rgb_out, pOut, BpL, c)
else:
PIXEL00_70(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_0(rgb_out, pOut, BpL, c)
else:
PIXEL01_20(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_10(rgb_out, pOut, BpL, c)
else:
PIXEL10_70(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_10(rgb_out, pOut, BpL, c)
else:
PIXEL11_70(rgb_out, pOut, BpL, c)
elif pattern == 218:
if (diff(w[4], w[2])):
PIXEL00_10(rgb_out, pOut, BpL, c)
else:
PIXEL00_70(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_10(rgb_out, pOut, BpL, c)
else:
PIXEL01_70(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_10(rgb_out, pOut, BpL, c)
else:
PIXEL10_70(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_0(rgb_out, pOut, BpL, c)
else:
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 91:
if (diff(w[4], w[2])):
PIXEL00_0(rgb_out, pOut, BpL, c)
else:
PIXEL00_20(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_10(rgb_out, pOut, BpL, c)
else:
PIXEL01_70(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_10(rgb_out, pOut, BpL, c)
else:
PIXEL10_70(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_10(rgb_out, pOut, BpL, c)
else:
PIXEL11_70(rgb_out, pOut, BpL, c)
elif pattern == 229:
PIXEL00_20(rgb_out, pOut, BpL, c)
PIXEL01_20(rgb_out, pOut, BpL, c)
PIXEL10_12(rgb_out, pOut, BpL, c)
PIXEL11_11(rgb_out, pOut, BpL, c)
elif pattern == 167:
PIXEL00_11(rgb_out, pOut, BpL, c)
PIXEL01_12(rgb_out, pOut, BpL, c)
PIXEL10_20(rgb_out, pOut, BpL, c)
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 173:
PIXEL00_12(rgb_out, pOut, BpL, c)
PIXEL01_20(rgb_out, pOut, BpL, c)
PIXEL10_11(rgb_out, pOut, BpL, c)
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 181:
PIXEL00_20(rgb_out, pOut, BpL, c)
PIXEL01_11(rgb_out, pOut, BpL, c)
PIXEL10_20(rgb_out, pOut, BpL, c)
PIXEL11_12(rgb_out, pOut, BpL, c)
elif pattern == 186:
if (diff(w[4], w[2])):
PIXEL00_10(rgb_out, pOut, BpL, c)
else:
PIXEL00_70(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_10(rgb_out, pOut, BpL, c)
else:
PIXEL01_70(rgb_out, pOut, BpL, c)
PIXEL10_11(rgb_out, pOut, BpL, c)
PIXEL11_12(rgb_out, pOut, BpL, c)
#elif pattern == 115: # MSVC fatal error C1061: compiler limit : blocks nested too deeply
if pattern == 115:
PIXEL00_11(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_10(rgb_out, pOut, BpL, c)
else:
PIXEL01_70(rgb_out, pOut, BpL, c)
PIXEL10_12(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_10(rgb_out, pOut, BpL, c)
else:
PIXEL11_70(rgb_out, pOut, BpL, c)
elif pattern == 93:
PIXEL00_12(rgb_out, pOut, BpL, c)
PIXEL01_11(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_10(rgb_out, pOut, BpL, c)
else:
PIXEL10_70(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_10(rgb_out, pOut, BpL, c)
else:
PIXEL11_70(rgb_out, pOut, BpL, c)
elif pattern == 206:
if (diff(w[4], w[2])):
PIXEL00_10(rgb_out, pOut, BpL, c)
else:
PIXEL00_70(rgb_out, pOut, BpL, c)
PIXEL01_12(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_10(rgb_out, pOut, BpL, c)
else:
PIXEL10_70(rgb_out, pOut, BpL, c)
PIXEL11_11(rgb_out, pOut, BpL, c)
elif pattern == 205 or pattern == 201:
PIXEL00_12(rgb_out, pOut, BpL, c)
PIXEL01_20(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_10(rgb_out, pOut, BpL, c)
else:
PIXEL10_70(rgb_out, pOut, BpL, c)
PIXEL11_11(rgb_out, pOut, BpL, c)
elif pattern == 174 or pattern == 46:
if (diff(w[4], w[2])):
PIXEL00_10(rgb_out, pOut, BpL, c)
else:
PIXEL00_70(rgb_out, pOut, BpL, c)
PIXEL01_12(rgb_out, pOut, BpL, c)
PIXEL10_11(rgb_out, pOut, BpL, c)
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 179 or pattern == 147:
PIXEL00_11(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_10(rgb_out, pOut, BpL, c)
else:
PIXEL01_70(rgb_out, pOut, BpL, c)
PIXEL10_20(rgb_out, pOut, BpL, c)
PIXEL11_12(rgb_out, pOut, BpL, c)
elif pattern == 117 or pattern == 116:
PIXEL00_20(rgb_out, pOut, BpL, c)
PIXEL01_11(rgb_out, pOut, BpL, c)
PIXEL10_12(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_10(rgb_out, pOut, BpL, c)
else:
PIXEL11_70(rgb_out, pOut, BpL, c)
elif pattern == 189:
PIXEL00_12(rgb_out, pOut, BpL, c)
PIXEL01_11(rgb_out, pOut, BpL, c)
PIXEL10_11(rgb_out, pOut, BpL, c)
PIXEL11_12(rgb_out, pOut, BpL, c)
elif pattern == 231:
PIXEL00_11(rgb_out, pOut, BpL, c)
PIXEL01_12(rgb_out, pOut, BpL, c)
PIXEL10_12(rgb_out, pOut, BpL, c)
PIXEL11_11(rgb_out, pOut, BpL, c)
elif pattern == 126:
PIXEL00_10(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_0(rgb_out, pOut, BpL, c)
else:
PIXEL01_20(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_0(rgb_out, pOut, BpL, c)
else:
PIXEL10_20(rgb_out, pOut, BpL, c)
PIXEL11_10(rgb_out, pOut, BpL, c)
elif pattern == 219:
if (diff(w[4], w[2])):
PIXEL00_0(rgb_out, pOut, BpL, c)
else:
PIXEL00_20(rgb_out, pOut, BpL, c)
PIXEL01_10(rgb_out, pOut, BpL, c)
PIXEL10_10(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_0(rgb_out, pOut, BpL, c)
else:
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 125:
if (diff(w[8], w[4])):
PIXEL00_12(rgb_out, pOut, BpL, c)
PIXEL10_0(rgb_out, pOut, BpL, c)
else:
PIXEL00_61(rgb_out, pOut, BpL, c)
PIXEL10_90(rgb_out, pOut, BpL, c)
PIXEL01_11(rgb_out, pOut, BpL, c)
PIXEL11_10(rgb_out, pOut, BpL, c)
elif pattern == 221:
PIXEL00_12(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL01_11(rgb_out, pOut, BpL, c)
PIXEL11_0(rgb_out, pOut, BpL, c)
else:
PIXEL01_60(rgb_out, pOut, BpL, c)
PIXEL11_90(rgb_out, pOut, BpL, c)
PIXEL10_10(rgb_out, pOut, BpL, c)
elif pattern == 207:
if (diff(w[4], w[2])):
PIXEL00_0(rgb_out, pOut, BpL, c)
PIXEL01_12(rgb_out, pOut, BpL, c)
else:
PIXEL00_90(rgb_out, pOut, BpL, c)
PIXEL01_61(rgb_out, pOut, BpL, c)
PIXEL10_10(rgb_out, pOut, BpL, c)
PIXEL11_11(rgb_out, pOut, BpL, c)
elif pattern == 238:
PIXEL00_10(rgb_out, pOut, BpL, c)
PIXEL01_12(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_0(rgb_out, pOut, BpL, c)
PIXEL11_11(rgb_out, pOut, BpL, c)
else:
PIXEL10_90(rgb_out, pOut, BpL, c)
PIXEL11_60(rgb_out, pOut, BpL, c)
elif pattern == 190:
PIXEL00_10(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_0(rgb_out, pOut, BpL, c)
PIXEL11_12(rgb_out, pOut, BpL, c)
else:
PIXEL01_90(rgb_out, pOut, BpL, c)
PIXEL11_61(rgb_out, pOut, BpL, c)
PIXEL10_11(rgb_out, pOut, BpL, c)
elif pattern == 187:
if (diff(w[4], w[2])):
PIXEL00_0(rgb_out, pOut, BpL, c)
PIXEL10_11(rgb_out, pOut, BpL, c)
else:
PIXEL00_90(rgb_out, pOut, BpL, c)
PIXEL10_60(rgb_out, pOut, BpL, c)
PIXEL01_10(rgb_out, pOut, BpL, c)
PIXEL11_12(rgb_out, pOut, BpL, c)
elif pattern == 243:
PIXEL00_11(rgb_out, pOut, BpL, c)
PIXEL01_10(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL10_12(rgb_out, pOut, BpL, c)
PIXEL11_0(rgb_out, pOut, BpL, c)
else:
PIXEL10_61(rgb_out, pOut, BpL, c)
PIXEL11_90(rgb_out, pOut, BpL, c)
elif pattern == 119:
if (diff(w[2], w[6])):
PIXEL00_11(rgb_out, pOut, BpL, c)
PIXEL01_0(rgb_out, pOut, BpL, c)
else:
PIXEL00_60(rgb_out, pOut, BpL, c)
PIXEL01_90(rgb_out, pOut, BpL, c)
PIXEL10_12(rgb_out, pOut, BpL, c)
PIXEL11_10(rgb_out, pOut, BpL, c)
elif pattern == 237 or pattern == 233:
PIXEL00_12(rgb_out, pOut, BpL, c)
PIXEL01_20(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_0(rgb_out, pOut, BpL, c)
else:
PIXEL10_100(rgb_out, pOut, BpL, c)
PIXEL11_11(rgb_out, pOut, BpL, c)
elif pattern == 175 or pattern == 47:
if (diff(w[4], w[2])):
PIXEL00_0(rgb_out, pOut, BpL, c)
else:
PIXEL00_100(rgb_out, pOut, BpL, c)
PIXEL01_12(rgb_out, pOut, BpL, c)
PIXEL10_11(rgb_out, pOut, BpL, c)
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 183 or pattern == 151:
PIXEL00_11(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_0(rgb_out, pOut, BpL, c)
else:
PIXEL01_100(rgb_out, pOut, BpL, c)
PIXEL10_20(rgb_out, pOut, BpL, c)
PIXEL11_12(rgb_out, pOut, BpL, c)
elif pattern == 245 or pattern == 244:
PIXEL00_20(rgb_out, pOut, BpL, c)
PIXEL01_11(rgb_out, pOut, BpL, c)
PIXEL10_12(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_0(rgb_out, pOut, BpL, c)
else:
PIXEL11_100(rgb_out, pOut, BpL, c)
elif pattern == 250:
PIXEL00_10(rgb_out, pOut, BpL, c)
PIXEL01_10(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_0(rgb_out, pOut, BpL, c)
else:
PIXEL10_20(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_0(rgb_out, pOut, BpL, c)
else:
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 123:
if (diff(w[4], w[2])):
PIXEL00_0(rgb_out, pOut, BpL, c)
else:
PIXEL00_20(rgb_out, pOut, BpL, c)
PIXEL01_10(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_0(rgb_out, pOut, BpL, c)
else:
PIXEL10_20(rgb_out, pOut, BpL, c)
PIXEL11_10(rgb_out, pOut, BpL, c)
elif pattern == 95:
if (diff(w[4], w[2])):
PIXEL00_0(rgb_out, pOut, BpL, c)
else:
PIXEL00_20(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_0(rgb_out, pOut, BpL, c)
else:
PIXEL01_20(rgb_out, pOut, BpL, c)
PIXEL10_10(rgb_out, pOut, BpL, c)
PIXEL11_10(rgb_out, pOut, BpL, c)
elif pattern == 222:
PIXEL00_10(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_0(rgb_out, pOut, BpL, c)
else:
PIXEL01_20(rgb_out, pOut, BpL, c)
PIXEL10_10(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_0(rgb_out, pOut, BpL, c)
else:
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 252:
PIXEL00_21(rgb_out, pOut, BpL, c)
PIXEL01_11(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_0(rgb_out, pOut, BpL, c)
else:
PIXEL10_20(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_0(rgb_out, pOut, BpL, c)
else:
PIXEL11_100(rgb_out, pOut, BpL, c)
elif pattern == 249:
PIXEL00_12(rgb_out, pOut, BpL, c)
PIXEL01_22(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_0(rgb_out, pOut, BpL, c)
else:
PIXEL10_100(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_0(rgb_out, pOut, BpL, c)
else:
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 235:
if (diff(w[4], w[2])):
PIXEL00_0(rgb_out, pOut, BpL, c)
else:
PIXEL00_20(rgb_out, pOut, BpL, c)
PIXEL01_21(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_0(rgb_out, pOut, BpL, c)
else:
PIXEL10_100(rgb_out, pOut, BpL, c)
PIXEL11_11(rgb_out, pOut, BpL, c)
elif pattern == 111:
if (diff(w[4], w[2])):
PIXEL00_0(rgb_out, pOut, BpL, c)
else:
PIXEL00_100(rgb_out, pOut, BpL, c)
PIXEL01_12(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_0(rgb_out, pOut, BpL, c)
else:
PIXEL10_20(rgb_out, pOut, BpL, c)
PIXEL11_22(rgb_out, pOut, BpL, c)
elif pattern == 63:
if (diff(w[4], w[2])):
PIXEL00_0(rgb_out, pOut, BpL, c)
else:
PIXEL00_100(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_0(rgb_out, pOut, BpL, c)
else:
PIXEL01_20(rgb_out, pOut, BpL, c)
PIXEL10_11(rgb_out, pOut, BpL, c)
PIXEL11_21(rgb_out, pOut, BpL, c)
elif pattern == 159:
if (diff(w[4], w[2])):
PIXEL00_0(rgb_out, pOut, BpL, c)
else:
PIXEL00_20(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_0(rgb_out, pOut, BpL, c)
else:
PIXEL01_100(rgb_out, pOut, BpL, c)
PIXEL10_22(rgb_out, pOut, BpL, c)
PIXEL11_12(rgb_out, pOut, BpL, c)
elif pattern == 215:
PIXEL00_11(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_0(rgb_out, pOut, BpL, c)
else:
PIXEL01_100(rgb_out, pOut, BpL, c)
PIXEL10_21(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_0(rgb_out, pOut, BpL, c)
else:
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 246:
PIXEL00_22(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_0(rgb_out, pOut, BpL, c)
else:
PIXEL01_20(rgb_out, pOut, BpL, c)
PIXEL10_12(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_0(rgb_out, pOut, BpL, c)
else:
PIXEL11_100(rgb_out, pOut, BpL, c)
elif pattern == 254:
PIXEL00_10(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_0(rgb_out, pOut, BpL, c)
else:
PIXEL01_20(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_0(rgb_out, pOut, BpL, c)
else:
PIXEL10_20(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_0(rgb_out, pOut, BpL, c)
else:
PIXEL11_100(rgb_out, pOut, BpL, c)
elif pattern == 253:
PIXEL00_12(rgb_out, pOut, BpL, c)
PIXEL01_11(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_0(rgb_out, pOut, BpL, c)
else:
PIXEL10_100(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_0(rgb_out, pOut, BpL, c)
else:
PIXEL11_100(rgb_out, pOut, BpL, c)
elif pattern == 251:
if (diff(w[4], w[2])):
PIXEL00_0(rgb_out, pOut, BpL, c)
else:
PIXEL00_20(rgb_out, pOut, BpL, c)
PIXEL01_10(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_0(rgb_out, pOut, BpL, c)
else:
PIXEL10_100(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_0(rgb_out, pOut, BpL, c)
else:
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 239:
if (diff(w[4], w[2])):
PIXEL00_0(rgb_out, pOut, BpL, c)
else:
PIXEL00_100(rgb_out, pOut, BpL, c)
PIXEL01_12(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_0(rgb_out, pOut, BpL, c)
else:
PIXEL10_100(rgb_out, pOut, BpL, c)
PIXEL11_11(rgb_out, pOut, BpL, c)
elif pattern == 127:
if (diff(w[4], w[2])):
PIXEL00_0(rgb_out, pOut, BpL, c)
else:
PIXEL00_100(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_0(rgb_out, pOut, BpL, c)
else:
PIXEL01_20(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_0(rgb_out, pOut, BpL, c)
else:
PIXEL10_20(rgb_out, pOut, BpL, c)
PIXEL11_10(rgb_out, pOut, BpL, c)
elif pattern == 191:
if (diff(w[4], w[2])):
PIXEL00_0(rgb_out, pOut, BpL, c)
else:
PIXEL00_100(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_0(rgb_out, pOut, BpL, c)
else:
PIXEL01_100(rgb_out, pOut, BpL, c)
PIXEL10_11(rgb_out, pOut, BpL, c)
PIXEL11_12(rgb_out, pOut, BpL, c)
elif pattern == 223:
if (diff(w[4], w[2])):
PIXEL00_0(rgb_out, pOut, BpL, c)
else:
PIXEL00_20(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_0(rgb_out, pOut, BpL, c)
else:
PIXEL01_100(rgb_out, pOut, BpL, c)
PIXEL10_10(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_0(rgb_out, pOut, BpL, c)
else:
PIXEL11_20(rgb_out, pOut, BpL, c)
elif pattern == 247:
PIXEL00_11(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_0(rgb_out, pOut, BpL, c)
else:
PIXEL01_100(rgb_out, pOut, BpL, c)
PIXEL10_12(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_0(rgb_out, pOut, BpL, c)
else:
PIXEL11_100(rgb_out, pOut, BpL, c)
elif pattern == 255:
if (diff(w[4], w[2])):
PIXEL00_0(rgb_out, pOut, BpL, c)
else:
PIXEL00_100(rgb_out, pOut, BpL, c)
if (diff(w[2], w[6])):
PIXEL01_0(rgb_out, pOut, BpL, c)
else:
PIXEL01_100(rgb_out, pOut, BpL, c)
if (diff(w[8], w[4])):
PIXEL10_0(rgb_out, pOut, BpL, c)
else:
PIXEL10_100(rgb_out, pOut, BpL, c)
if (diff(w[6], w[8])):
PIXEL11_0(rgb_out, pOut, BpL, c)
else:
PIXEL11_100(rgb_out, pOut, BpL, c)
return rgb_out
def init_LUTs():
global LUT16to32, RGBtoYUV
for i in range(65536):
LUT16to32[i] = ((i & 0xF800) << 8) | ((i & 0x07E0) << 5) | ((i & 0x001F) << 3)
for i in range(32):
for j in range(64):
for k in range(32):
r = i << 3
g = j << 2
b = k << 3
Y = (r + g + b) >> 2
u = 128 + ((r - b) >> 2)
v = 128 + ((-r + 2*g -b)>>3)
RGBtoYUV[ (i << 11) | (j << 5) | k ] = (Y<<16) | (u<<8) | v
if __name__ == '__main__':
init_LUTs()
print('scaling randam.ppm to randam2.ppm (100 times)..')
ppm = loadPPM('testdata/randam.ppm')
rgb : List[int]
for i in range(100):
rgb = hq2x(ppm.w, ppm.h, ppm.rgb)
PPM(2*ppm.w, 2*ppm.h, rgb).save('testdata/randam2.ppm')
```
#### File: python_to_cpp/Shed Skin Examples/3.sudoku3.py
```python
TRIPLETS = [[0,1,2],[3,4,5],[6,7,8]]
ROW_ITER = [[(row,col) for col in range(0,9)] for row in range(0,9)]
COL_ITER = [[(row,col) for row in range(0,9)] for col in range(0,9)]
TxT_ITER = [[(row,col) for row in rows for col in cols] for rows in TRIPLETS for cols in TRIPLETS]
class soduko:
def __init__(self, start_grid) :
self.squares : List[List[List[int]]] =[ [list(range(1,10)) for col in range(0,9)] for row in range(0,9)]
if len(start_grid) != 0:
assert len(start_grid)==9, "Bad input!"
for row in range(0,9) :
self.set_row(row, start_grid[row])
self._changed=False
def copy(self) :
e : List[str] = []
soduko_copy = soduko(e) # [-TODO something-]
for row in range(0,9) :
for col in range(0,9) :
soduko_copy.squares[row][col] = self.squares[row][col][:]
soduko_copy._changed=False
return soduko_copy
def set_row(self,row, x_list) :
assert len(x_list)==9, 'not 9'
for col in range(0,9) :
x : int
try :
x = int(x_list[col])
except :
x = 0
self.set_cell(row,col,x)
def set_cell(self,row,col,x):
if self.squares[row][col] == [x] :
pass
elif x not in range(1,9+1) :
pass
else:
assert x in self.squares[row][col], "bugger2"
self.squares[row][col] = [x]
self.update_neighbours(row,col,x)
self._changed=True
class Error(Exception):
pass
def cell_exclude(self, row,col,x) :
assert x in range(1,9+1), 'inra'
if x in self.squares[row][col] :
self.squares[row][col].remove(x)
if len(self.squares[row][col]) == 0:
raise self.Error()
if len(self.squares[row][col]) == 1 :
self._changed=True
self.update_neighbours(row,col,self.squares[row][col][0])
else :
pass
return
def update_neighbours(self,set_row,set_col,x) -> None:
for row in range(0,9) :
if row != set_row :
self.cell_exclude(row,set_col,x)
for col in range(0,9) :
if col != set_col :
self.cell_exclude(set_row,col,x)
rows : List[int]
cols : List[int]
for triplet in TRIPLETS :
if set_row in triplet : rows = triplet[:]
if set_col in triplet : cols = triplet[:]
rows.remove(set_row)
cols.remove(set_col)
for row in rows :
for col in cols :
assert row != set_row or col != set_col , 'meuh'
self.cell_exclude(row,col,x)
def get_cell_digit_str(self,row,col) : # const
if len(self.squares[row][col])==1 :
return str(self.squares[row][col][0])
else :
return "0"
def __str__(self):
answer = " 123 456 789\n"
for row in range(0,9) :
answer = answer + str(row+1) + " [" + "".join([self.get_cell_digit_str(row,col).replace("0","?") for col in range(0,3)]) + "] [" + "".join([self.get_cell_digit_str(row,col).replace("0","?") for col in range(3,6)]) + "] [" + "".join([self.get_cell_digit_str(row,col).replace("0","?") for col in range(6,9)]) + "]\n"
if row+1 in [3,6] :
answer = answer + " --- --- ---\n"
return answer
def check_for_single_occurances(self):
for check_type in [ROW_ITER, COL_ITER, TxT_ITER]:
for check_list in check_type :
for x in range(1,9+1) : #1 to 9 inclusive
x_in_list : List[Tuple[int, int]] = []
for row,col in check_list :
if x in self.squares[row][col] :
x_in_list.append((row,col))
if len(x_in_list)==1 :
(row,col) = x_in_list[0] # [+TODO: (row,col) = x_in_list[0]+]
if len(self.squares[row][col]) > 1 :
self.set_cell(row,col,x)
def check_for_last_in_row_col_3x3(self):
for type_name, check_type in [("Row",ROW_ITER),("Col",COL_ITER),("3x3",TxT_ITER)]:
for check_list in check_type :
unknown_entries : List[Tuple[int, int]] = []
unassigned_values = list(range(1,9+1)) #1-9 inclusive
known_values : List[int] = []
for row,col in check_list :
if len(self.squares[row][col]) == 1 :
assert self.squares[row][col][0] not in known_values, "bugger3"
known_values.append(self.squares[row][col][0])
assert self.squares[row][col][0] in unassigned_values, "bugger4"
unassigned_values.remove(self.squares[row][col][0])
else :
unknown_entries.append((row,col))
assert len(unknown_entries) + len(known_values) == 9, 'bugger5'
assert len(unknown_entries) == len(unassigned_values), 'bugger6'
if len(unknown_entries) == 1 :
x = unassigned_values[0]
(row,col) = unknown_entries[0] # [+TODO: (row,col) = unknown_entries[0]+]
self.set_cell(row,col,x)
return
def check(self) :
self._changed=True
while self._changed:
self._changed=False
self.check_for_single_occurances()
self.check_for_last_in_row_col_3x3()
return
def one_level_supposition(self):
progress=True
while progress == True:
progress=False
for row in range(0,9) :
for col in range(0,9):
if len(self.squares[row][col]) > 1 :
bad_x : List[int] = []
for x in self.squares[row][col] :
soduko_copy = self.copy()
try:
soduko_copy.set_cell(row,col,x)
soduko_copy.check()
except self.Error as e :
bad_x.append(x)
#del soduko_copy
if len(bad_x) == 0 :
pass
elif len(bad_x) < len(self.squares[row][col]) :
for x in bad_x :
self.cell_exclude(row,col,x)
self.check()
progress=True
else :
assert False, "bugger7"
if __name__ == '__main__':
for x in range(50):
t = soduko(["800000600",
"040500100",
"070090000",
"030020007",
"600008004",
"500000090",
"000030020",
"001006050",
"004000003"])
t.check()
t.one_level_supposition()
t.check()
print(t)
```
#### File: python_to_cpp/Shed Skin Examples/5.sudoku5.py
```python
from itertools import product
def exact_cover(x1, y):
x = dict((j, set()) for j in x1)
for i, row in y.items():
for j in row:
x[j].add(i)
return (x, y)
def select(x, y, r):
cols : List[Set[Tuple[int, int, int]]] = []
for j in y[r]:
for i in x[j]:
for k in y[i]:
if k != j:
x[k].remove(i)
cols.append(x.pop(j))
return cols
def solve(x, y, solution):
if not x:
yield list(solution)
else:
#c = min(x, key=lambda c: len(x[c])) # shedskin doesn't support closures!
c = min([(len(x[c]), c) for c in x])[1]
for r in list(x[c]):
solution.append(r)
cols = select(x, y, r)
for solution in solve(x, y, solution):
yield solution
deselect(x, y, r, cols)
solution.pop()
def solve_sudoku(rr, cc, grid):
"""An efficient Sudoku solver using Algorithm X."""
nn = rr * cc
x1 = ([("rc", rc) for rc in product(range(nn), range(nn))] +
[("rn", rn) for rn in product(range(nn), range(1, nn + 1))] +
[("cn", cn) for cn in product(range(nn), range(1, nn + 1))] +
[("bn", bn) for bn in product(range(nn), range(1, nn + 1))])
y : Dict[int] = {}
for r, c, n in product(range(nn), range(nn), range(1, nn + 1)):
b = (r // rr) * rr + (c // cc) # Box number
y[(r, c, n)] = [
("rc", (r, c)),
("rn", (r, n)),
("cn", (c, n)),
("bn", (b, n))]
x : Dict[int]
(x, y) = exact_cover(x1, y)
for i, row in enumerate(grid):
for j, n in enumerate(row):
if n:
select(x, y, (i, j, n))
g : List[List[List[int]]] = []
for solution in solve(x, y, []):
for r, c, n in solution:
grid[r][c] = n
g.append(grid)
return g
def deselect(x, y, r, cols):
for j in reversed(y[r]):
x[j] = cols.pop()
for i in x[j]:
for k in y[i]:
if k != j:
x[k].add(i)
if __name__ == "__main__":
grid = [
[5, 3, 0, 0, 7, 0, 0, 0, 0],
[6, 0, 0, 1, 9, 5, 0, 0, 0],
[0, 9, 8, 0, 0, 0, 0, 6, 0],
[8, 0, 0, 0, 6, 0, 0, 0, 3],
[4, 0, 0, 8, 0, 3, 0, 0, 1],
[7, 0, 0, 0, 2, 0, 0, 0, 6],
[0, 6, 0, 0, 0, 0, 2, 8, 0],
[0, 0, 0, 4, 1, 9, 0, 0, 5],
[0, 0, 0, 0, 8, 0, 0, 7, 9],
]
for solution in solve_sudoku(3, 3, grid):
print("\n".join(str(s) for s in solution))
```
#### File: python_to_cpp/Shed Skin Examples/9.yopyra.py
```python
import math
import sys
from typing import Optional
MAX_DIST = 1999999999.0 # 9999999999
PI_SOBRE_180 = 0.017453292
PEQUENO = 0.000000001
class Vector:
x : float
y : float
z : float
def __init__(self, vx=0.0, vy=0.0, vz=0.0):
(self.x, self.y, self.z) = (vx, vy, vz)
# def set(self, vx, vy, vz):
# self.x, self.y, self.z = vx, vy, vz
def pEscalar(self, vv):
return (self.x * vv.x + self.y * vv.y + self.z * vv.z)
def pVectorial(self, vv):
r = Vector()
r.x = vv.y*self.z - vv.z*self.y
r.y = vv.z*self.x - vv.x*self.z
r.z = vv.x*self.y - vv.y*self.x
return r
def modulo(self):
return math.sqrt(self.x*self.x + self.y*self.y + self.z*self.z)
def normalizar(self):
m = self.modulo()
if m != 0.0:
self.x /= m; self.y /= m; self.z /= m
return self
def __add__(self, other):
return Vector(self.x+other.x, self.y+other.y, self.z+other.z)
def __sub__(self, other):
return Vector(self.x-other.x, self.y-other.y, self.z-other.z)
def __mul__(self, other):
return Vector(self.x*other, self.y*other, self.z*other)
# def __idiv__(self, other):
# return Vector(self.x / float(other), self.y / float(other), self.z / float(other))
# def __iadd__(self, other):
# return Vector(self.x + other.x, self.y + other.y, self.z + other.z)
# def __repr__(self):
# return "<V: %.2f %.2f %.2f>" % (self.x, self.y, self.z) # for debugging
class Color:
r : float
g : float
b : float
def __init__(self, vr=0.0, vg=0.0, vb=0.0):
(self.r, self.g, self.b) = (vr, vg, vb)
def __add__(self, other):
return Color(self.r+other.r, self.g+other.g, self.b+other.b)
# def __iadd__(self, other):
# return Color(self.r+other.r, self.g+other.g, self.b+other.b)
def __mul__(self, other):
return Color(self.r*other, self.g*other, self.b*other)
# def __imul__(self, other):
# return Color(self.r*other, self.y*other, self.z*other)
def __str__(self):
return "%d %d %d" % (int(max(0.0, min(self.r*255.0, 255.0))),
int(max(0.0, min(self.g*255.0, 255.0))),
int(max(0.0, min(self.b*255.0, 255.0))))
# def __repr__(self):
# return "<C: %.2f %.2f %.2f>" % (self.r, self.g, self.b) # for debugging
class Luz:
posicion : Vector
color : Color
tipo : str
def __init__(self, posicion, color, tipo):
self.posicion = posicion
self.color = color
self.tipo = tipo
# def __repr__(self):
# return "<L: %s %s %s>" % (self.posicion, self.color, self.tipo) # for debugging
class Material:
color : Color
cDifuso : float
cEspecular : float
dEspecular : float
cReflexion : float
cTransmitividad : float
iRefraccion : float
def __init__(self, color, cDifuso=0.0, cEspecular=0.0, dEspecular=0.0,
cReflexion=0.0, cTransmitividad=0.0, iRefraccion=0.0):
self.color = color
self.cDifuso = cDifuso
self.cEspecular = cEspecular
self.dEspecular = dEspecular
self.cReflexion = cReflexion
self.cTransmitividad = cTransmitividad
self.iRefraccion = iRefraccion
# def __repr__(self): # for debugging
# return "<M: %r %.2f %.2f %.2f %.2f %.2f %.2f>" % (
# self.color, self.cDifuso, self.cEspecular, self.dEspecular,
# self.cReflexion, self.cTransmitividad, self.iRefraccion)
class Rayo: # &
origen : Vector
direccion : Vector
disInter : float
objInter : Optional['Cuerpo'] # &
def __init__(self, origen, direccion):
self.origen = origen
self.direccion = direccion
self.disInter = MAX_DIST
self.objInter = None
class Cuerpo:
tipo : str
material : int
def init(self, tipo, material):
self.tipo = tipo
self.material = material
def intersecta(self, r : Rayo) -> bool:
raise NotImplementedError()
def getNormal(self, punto : Vector) -> Vector:
raise NotImplementedError()
class Esfera(Cuerpo):
posicion : Vector
radio : float
def __init__(self, material, posicion, radio):
self.init('esfera', material)
self.posicion = posicion
self.radio = radio
def intersecta(self, r : Rayo):
esfera_rayo = self.posicion - r.origen
v = esfera_rayo.pEscalar(r.direccion)
if v - self.radio > r.disInter: return False
distChoque = self.radio*self.radio + v*v - esfera_rayo.x*esfera_rayo.x - \
esfera_rayo.y*esfera_rayo.y - esfera_rayo.z*esfera_rayo.z
if distChoque < 0.0: return False
distChoque = v - math.sqrt(distChoque)
if distChoque > r.disInter or distChoque < 0.0: return False
r.disInter = distChoque
r.objInter = self
return True
def getNormal(self, punto : Vector):
normal = punto - self.posicion
return normal.normalizar()
# def __repr__(self): # for debugging
# return "<S: %d %s %.2f>" % (self.material, self.posicion, self.radio)
class Plano(Cuerpo):
normal : Vector
distancia : float
def __init__(self, material, normal, distancia):
self.init('plano', material)
self.normal = normal
self.normal.normalizar()
self.distancia = distancia
def intersecta(self, r : Rayo):
v = self.normal.pEscalar(r.direccion)
if v == 0.0: return False
distChoque = -(self.normal.pEscalar(r.origen) + self.distancia) / v
if distChoque < 0.0: return False # Direccion del rayo negativa
if distChoque > r.disInter: return False # No es el mas cercano
r.disInter = distChoque
r.objInter = self
return True
def getNormal(self, punto : Vector):
return self.normal
# def __repr__(self): # for debugging
# return "<P: %d %s %.2f>" % (self.material, self.normal, self.distancia)
class Scene:
endline : int
posCamara : Vector
lookCamara : Vector
upCamara : Vector
anchoGrid : int
altoGrid : int
look : Vector
Vhor : Vector
Vver : Vector
Vp : Vector
def __init__(self, scene_filename):
lines = [l.split() for l in open(scene_filename).readlines() if l.strip() != '' and l.strip()[0] != "#"]
self.lObjetos : List[Cuerpo] = []
self.lLuces : List[Luz] = []
self.lMateriales : List[Material] = []
# defaults
self.imgAncho = 320
self.imgAlto = 200
self.profTrazado = 3 # bounces
self.oversampling = 1 # 1 implica que no hay oversampling
self.campoVision = 60.0
self.startline = 0 # Start rendering line
self.endline = self.imgAlto - 1 # End rendering line
for line in lines:
word = line[0]
line = line[1:]
if word == "size":
self.imgAncho = int(line[0])
self.imgAlto = int(line[1])
self.endline = self.imgAlto - 1 # End rendering line
elif word == "nbounces":
self.profTrazado = int(line[0]) # n. bounces
elif word == "oversampling":
self.oversampling = int(line[0])
elif word == "vision":
self.campoVision = float(line[0])
elif word == "renderslice":
self.startline = max(0, int(line[0])) # Start rendering line
self.endline = min(self.imgAlto-1, int(line[1])) # End rendering line
elif word == "posCamara":
self.posCamara = self.parse_vector(line)
elif word == "lookCamara":
self.lookCamara = self.parse_vector(line)
elif word == "upCamara":
self.upCamara = self.parse_vector(line)
elif word == "sphere":
sph = Esfera( int(line[0]), self.parse_vector(line[1:4]), float(line[-1]) )
self.lObjetos.append(sph)
elif word == "plano":
pl = Plano( int(line[0]), self.parse_vector(line[1:4]), float(line[-1]) )
self.lObjetos.append(pl)
elif word == "light":
light = Luz(self.parse_vector(line[0:3]), self.parse_color(line[3:6]), line[-1])
self.lLuces.append(light)
elif word == "material":
mat = self.parse_material(line)
self.lMateriales.append(mat)
# iniciamos el raytracer -------------------------------
self.anchoGrid = self.imgAncho * self.oversampling
self.altoGrid = self.imgAlto * self.oversampling
self.look = self.lookCamara - self.posCamara
self.Vhor = self.look.pVectorial(self.upCamara)
self.Vhor.normalizar()
self.Vver = self.look.pVectorial(self.Vhor)
self.Vver.normalizar()
fl = self.anchoGrid / (2 * math.tan((0.5 * self.campoVision) * PI_SOBRE_180))
Vp = self.look
Vp.normalizar()
Vp.x = Vp.x * fl - 0.5 * (self.anchoGrid * self.Vhor.x + self.altoGrid * self.Vver.x)
Vp.y = Vp.y * fl - 0.5 * (self.anchoGrid * self.Vhor.y + self.altoGrid * self.Vver.y)
Vp.z = Vp.z * fl - 0.5 * (self.anchoGrid * self.Vhor.z + self.altoGrid * self.Vver.z)
self.Vp = Vp
# Auxiliary methods
def parse_vector(self, line):
return Vector(float(line[0]), float(line[1]), float(line[2]))
def parse_color(self, line):
return Color(float(line[0]), float(line[1]), float(line[2]))
def parse_material(self, line):
f = [float(x) for x in line[3:]]
return Material(self.parse_color(line[0:3]), f[0], f[1], f[2], f[3], f[4], f[5])
scene_namefile = 'testdata/scene.txt'
scene = Scene(scene_namefile)
# ----------------- Calcula la sombra de un rayo ------------
def calculaSombra(r : Rayo, objChoque):
sombra = 1.0 # Incialmente no hay sombra
for obj in scene.lObjetos:
r.objInter = None
r.disInter = MAX_DIST
if obj.intersecta(r) and obj is not objChoque:
sombra *= scene.lMateriales[obj.material].cTransmitividad
return sombra
def trazar(r : Rayo, prof) -> Color:
c = Color()
for obj in scene.lObjetos: # Probamos con todos los objetos
obj.intersecta(r)
if r.objInter is not None:
matIndex = r.objInter.material
pInterseccion = r.origen + r.direccion * r.disInter
vIncidente = pInterseccion - r.origen
vVueltaOrigen = r.direccion * -1.0
vVueltaOrigen.normalizar()
vNormal = r.objInter.getNormal(pInterseccion)
for luz in scene.lLuces:
if luz.tipo == 'ambiental':
c += luz.color
elif luz.tipo == 'puntual':
dirLuz = luz.posicion - pInterseccion
dirLuz.normalizar()
rayoLuz = Rayo(pInterseccion, dirLuz)
sombra = calculaSombra(rayoLuz, r.objInter)
NL = vNormal.pEscalar(dirLuz)
if NL > 0.0:
if scene.lMateriales[matIndex].cDifuso > 0.0: # ------- Difuso
colorDifuso = luz.color * scene.lMateriales[matIndex].cDifuso * NL
colorDifuso.r *= scene.lMateriales[matIndex].color.r * sombra
colorDifuso.g *= scene.lMateriales[matIndex].color.g * sombra
colorDifuso.b *= scene.lMateriales[matIndex].color.b * sombra
c += colorDifuso
if scene.lMateriales[matIndex].cEspecular > 0.0: # ----- Especular
rr = (vNormal * 2 * NL) - dirLuz
espec = vVueltaOrigen.pEscalar(rr)
if espec > 0.0:
espec = scene.lMateriales[matIndex].cEspecular * \
math.pow(espec, scene.lMateriales[matIndex].dEspecular)
colorEspecular = luz.color * espec * sombra
c += colorEspecular
if prof < scene.profTrazado:
if scene.lMateriales[matIndex].cReflexion > 0.0: # -------- Reflexion
t = vVueltaOrigen.pEscalar(vNormal)
if t > 0.0:
vDirRef = (vNormal * 2 * t) - vVueltaOrigen
vOffsetInter = pInterseccion + vDirRef * PEQUENO
rayoRef = Rayo(vOffsetInter, vDirRef)
c += trazar (rayoRef, prof+1.0) * scene.lMateriales[matIndex].cReflexion
if scene.lMateriales[matIndex].cTransmitividad > 0.0: # ---- Refraccion
RN = vNormal.pEscalar(vIncidente * -1.0)
n1 : float
n2 : float
vIncidente.normalizar()
if vNormal.pEscalar(vIncidente) > 0.0:
vNormal = vNormal * -1.0
RN = -RN
n1 = scene.lMateriales[matIndex].iRefraccion
n2 = 1.0
else:
n2 = scene.lMateriales[matIndex].iRefraccion
n1 = 1.0
if n1 != 0.0 and n2 != 0.0:
par_sqrt = math.sqrt(1 - (n1*n1/n2*n2)*(1-RN*RN))
vDirRefrac = vIncidente + (vNormal * RN) * (n1/n2) - (vNormal * par_sqrt)
vOffsetInter = pInterseccion + vDirRefrac * PEQUENO
rayoRefrac = Rayo(vOffsetInter, vDirRefrac)
c += trazar(rayoRefrac, prof+1.0) * scene.lMateriales[matIndex].cTransmitividad
return c
def renderPixel(x, y):
c = Color()
x *= scene.oversampling
y *= scene.oversampling
for i in range(scene.oversampling):
for j in range(scene.oversampling):
direc = Vector()
direc.x = x * scene.Vhor.x + y * scene.Vver.x + scene.Vp.x
direc.y = x * scene.Vhor.y + y * scene.Vver.y + scene.Vp.y
direc.z = x * scene.Vhor.z + y * scene.Vver.z + scene.Vp.z
direc.normalizar()
r = Rayo(scene.posCamara, direc)
c += trazar(r, 1.0)
y += 1
x += 1
srq_oversampling = scene.oversampling * scene.oversampling
c.r /= srq_oversampling
c.g /= srq_oversampling
c.b /= srq_oversampling
return c
if __name__ == '__main__':
print("Rendering: " + scene_namefile)
fileout = open(scene_namefile+".ppm", "w", newline = "\n")
fileout.write("P3\n")
fileout.write(str(scene.imgAncho) + ' ' + str(scene.endline - scene.startline + 1) + "\n")
fileout.write("255\n")
print("Line (from %d to %d):" % (scene.startline, scene.endline), end=' ')
for y in range(scene.startline, scene.endline+1):
for x in range(scene.imgAncho):
fileout.write(str(renderPixel(x, y)) + ' ')
fileout.write("\n")
print(y, end=' ')
sys.stdout.flush()
#fileout.close()
```
#### File: tests/python_to_cpp/syntax_highlighter_for_pqmarkup.py
```python
import python_to_11l.tokenizer
import _11l_to_cpp.tokenizer
css = R'''<style>
span.keyword {color: #0000FF; font-weight: bold;}
span.identifier {color: #00009F;}
span.string-literal {color: #800000;}
span.numeric-literal {color: #008000;}
span.constant {color: #008000;}
span.comment {color: #808080;}
</style>'''
cat_to_class_python = {
python_to_11l.tokenizer.Token.Category.NAME : 'identifier',
python_to_11l.tokenizer.Token.Category.KEYWORD : 'keyword',
python_to_11l.tokenizer.Token.Category.CONSTANT : 'constant',
python_to_11l.tokenizer.Token.Category.OPERATOR_OR_DELIMITER : '',
python_to_11l.tokenizer.Token.Category.NUMERIC_LITERAL : 'numeric-literal',
python_to_11l.tokenizer.Token.Category.STRING_LITERAL : 'string-literal',
python_to_11l.tokenizer.Token.Category.INDENT : '',
python_to_11l.tokenizer.Token.Category.DEDENT : '',
python_to_11l.tokenizer.Token.Category.STATEMENT_SEPARATOR : '',
}
cat_to_class_11l = {
_11l_to_cpp.tokenizer.Token.Category.NAME : 'identifier',
_11l_to_cpp.tokenizer.Token.Category.KEYWORD : 'keyword',
_11l_to_cpp.tokenizer.Token.Category.CONSTANT : 'constant',
_11l_to_cpp.tokenizer.Token.Category.DELIMITER : '',
_11l_to_cpp.tokenizer.Token.Category.OPERATOR : '',
_11l_to_cpp.tokenizer.Token.Category.NUMERIC_LITERAL : 'numeric-literal',
_11l_to_cpp.tokenizer.Token.Category.STRING_LITERAL : 'string-literal',
_11l_to_cpp.tokenizer.Token.Category.STRING_CONCATENATOR : '', # why '' and not 'string-literal': because this is rather operator than string literal
_11l_to_cpp.tokenizer.Token.Category.SCOPE_BEGIN : '',
_11l_to_cpp.tokenizer.Token.Category.SCOPE_END : '',
_11l_to_cpp.tokenizer.Token.Category.STATEMENT_SEPARATOR : '',
}
def is_lang_supported(lang):
return lang in ('11l', 'Python')
class Error(Exception):
message : str
pos : int
def __init__(self, message, pos):
self.message = message
self.pos = pos
def highlight(lang, source):
writepos = 0
comments : List[Tuple[int, int]] = []
res = ''
def html_escape(s):
return s.replace('&', '&').replace('<', '<')
if lang == 'Python':
try:
for token in python_to_11l.tokenizer.tokenize(source, comments = comments) + [python_to_11l.tokenizer.Token(len(source), len(source), python_to_11l.tokenizer.Token.Category.STATEMENT_SEPARATOR)]:
while len(comments) and comments[0][0] < token.start:
res += html_escape(source[writepos:comments[0][0]])
writepos = comments[0][1]
res += '<span class="comment">' + html_escape(source[comments[0][0]:comments[0][1]]) + '</span>'
comments.pop(0)
res += html_escape(source[writepos:token.start])
writepos = token.end
css_class = cat_to_class_python[token.category]
if css_class != '':
res += '<span class="' + css_class + '">' + html_escape(token.value(source)) + '</span>'
else:
res += html_escape(token.value(source))
except python_to_11l.tokenizer.Error as e:
raise Error(e.message, e.pos)
else:
assert(lang == '11l')
try:
for token in _11l_to_cpp.tokenizer.tokenize(source, comments = comments) + [_11l_to_cpp.tokenizer.Token(len(source), len(source), _11l_to_cpp.tokenizer.Token.Category.STATEMENT_SEPARATOR)]:
while len(comments) and comments[0][0] < token.start:
res += html_escape(source[writepos:comments[0][0]])
writepos = comments[0][1]
res += '<span class="comment">' + html_escape(source[comments[0][0]:comments[0][1]]) + '</span>'
comments.pop(0)
res += html_escape(source[writepos:token.start])
writepos = token.end
tokstr = html_escape(token.value(source))
css_class : str
if (token.category == _11l_to_cpp.tokenizer.Token.Category.NAME and tokstr in ('V', 'П', 'var', 'перем')) \
or (token.category == _11l_to_cpp.tokenizer.Token.Category.OPERATOR and tokstr in ('C', 'С', 'in', '!C', '!С', '!in')) \
or tokstr.split('.')[0] in _11l_to_cpp.tokenizer.keywords:
css_class = 'keyword'
else:
css_class = cat_to_class_11l[token.category]
if css_class != '':
if token.category == _11l_to_cpp.tokenizer.Token.Category.STRING_LITERAL:
if tokstr[0] == "'":
apos = 1
while tokstr[apos] == "'":
apos += 1
assert(tokstr[:apos*2+1] == "'"*apos + '‘'*apos + '‘')
tokstr = '<span style="opacity: 0.25">' + tokstr[:apos*2] + '</span>' + tokstr[apos*2:]
if tokstr[-1] == "'":
apos = 1
while tokstr[-(apos+1)] == "'":
apos += 1
assert(tokstr[-(apos*2+1):] == '’' + '’'*apos + "'"*apos)
tokstr = tokstr[:-(apos*2)] + '<span style="opacity: 0.25">' + tokstr[-(apos*2):] + '</span>'
res += '<span class="' + css_class + '">' + tokstr + '</span>'
else:
res += tokstr
except _11l_to_cpp.tokenizer.Error as e:
raise Error(e.message, e.pos)
return res
``` |
{
"source": "11mariom/ansible-bender",
"score": 3
} |
#### File: ansible-bender/ansible_bender/exceptions.py
```python
class AbBuildUnsuccesful(Exception):
""" Build was not successful """
def __init__(self, msg, output):
self.msg = msg
self.output = output
def __str__(self):
return "%s" % self.msg
```
#### File: tests/integration/test_conf.py
```python
import os
import jsonschema
import pytest
from ansible_bender.utils import set_logging
from ansible_bender.conf import ImageMetadata, Build
from ansible_bender.core import PbVarsParser
from tests.spellbook import b_p_w_vars_path, basic_playbook_path, full_conf_pb_path, multiplay_path
def test_expand_pb_vars():
p = PbVarsParser(b_p_w_vars_path)
data = p.expand_pb_vars()
assert data["base_image"] == "docker.io/library/python:3-alpine"
assert data["ansible_extra_args"] == "-vvv"
playbook_dir = os.path.dirname(b_p_w_vars_path)
assert data["working_container"]["volumes"] == [f"{playbook_dir}:/src:Z"]
assert data["target_image"]["name"] == "challet"
assert data["target_image"]["labels"] == {"x": "y"}
assert data["target_image"]["environment"] == {"asd": playbook_dir}
def test_b_m_empty():
""" test that build and metadata are 'empty' when there are no vars """
p = PbVarsParser(basic_playbook_path)
b, m = p.get_build_and_metadata()
b.playbook_path = "/somewhere.yaml"
b.base_image = "fedora:29"
b.playbook_path = "/asd.yaml"
b.target_image = "lolz"
b.validate()
m.validate()
assert isinstance(b, Build)
assert isinstance(m, ImageMetadata)
assert b.cache_tasks is True
assert b.layering is True
def test_set_all_params():
""" test that we can set all the parameters """
p = PbVarsParser(full_conf_pb_path)
b, m = p.get_build_and_metadata()
b.playbook_path = "/somewhere.yaml"
b.validate()
m.validate()
assert isinstance(b, Build)
assert isinstance(m, ImageMetadata)
assert b.base_image == "mona_lisa"
assert b.layering
assert not b.cache_tasks
assert b.ansible_extra_args == "--some --args"
assert b.build_volumes == ["/c:/d"]
assert b.target_image == "funky-mona-lisa"
assert m.env_vars == {"z": "value"}
assert m.volumes == ["/a"]
assert m.working_dir == "/workshop"
assert m.labels == {"x": "y"}
assert m.annotations == {"bohemian": "rhapsody"}
assert m.cmd == "command -x -y z"
assert m.user == "leonardo"
def test_validation_err_ux():
""" Test that validation errors are useful """
p = PbVarsParser(basic_playbook_path)
b, m = p.get_build_and_metadata()
with pytest.raises(jsonschema.exceptions.ValidationError) as ex:
b.validate()
s = str(ex.value)
assert "is not of type" in s
assert "Failed validating 'type' in schema" in s
def test_multiplay(caplog):
set_logging()
p = PbVarsParser(multiplay_path)
b, m = p.get_build_and_metadata()
assert b.target_image != "nope"
assert "Variables are loaded only from the first play." == caplog.records[0].msg
assert "no bender data found in the playbook" == caplog.records[1].msg
```
#### File: tests/unit/test_ansibla.py
```python
import importlib
from functools import partial
from pathlib import Path
import pytest
from flexmock import flexmock
from ansible_bender.core import PbVarsParser
def mock_read_text(return_val=None, raise_exc=False):
if raise_exc:
def _f():
raise FileNotFoundError()
flexmock(Path, read_text=_f)
else:
flexmock(Path, read_text=lambda: return_val)
def mock_import_module(raise_exc=False):
if raise_exc:
def _f(name, package=None):
raise ModuleNotFoundError()
flexmock(importlib, import_module=_f)
else:
flexmock(importlib, import_module=lambda name: None)
@pytest.mark.parametrize("mock_r_t,mock_i_m,should_raise", (
(
partial(mock_read_text, "1"),
partial(mock_import_module, False),
False
),
(
partial(mock_read_text, "1"),
partial(mock_import_module, True),
True
),
(
partial(mock_read_text, "0"),
partial(mock_import_module, False),
False
),
(
partial(mock_read_text, "0"),
partial(mock_import_module, True),
True
),
(
partial(mock_read_text, None, True),
partial(mock_import_module, False),
False
),
))
def test_ansible_selinux_workaround(mock_r_t, mock_i_m, should_raise):
mock_r_t()
mock_i_m()
p = PbVarsParser("")
if should_raise:
with pytest.raises(RuntimeError) as ex:
p._check_selinux_iz_gud()
assert "libselinux" in str(ex.value)
else:
p._check_selinux_iz_gud()
``` |
{
"source": "11mariom/pastebin",
"score": 3
} |
#### File: pastebin/tests/test_hello.py
```python
from unittest import TestCase
from pastebin import pastebin
class TestHello(TestCase):
def setUp(self):
#pastebin.app.config['TESTING'] = True
self.app = pastebin.app.test_client()
def test_hello(self):
rv = self.app.get("/")
self.assertEqual(b'hello world', rv.data)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "11michalis11/AmbulanceDecisionGame",
"score": 3
} |
#### File: AmbulanceDecisionGame/tests/test_comparisons.py
```python
import numpy as np
import pytest
from hypothesis import given, settings
from hypothesis.strategies import floats, integers
from ambulance_game.comparisons import (
get_heatmaps,
get_mean_blocking_time_from_simulation_state_probabilities,
get_mean_waiting_time_from_simulation_state_probabilities,
get_proportion_within_target_from_simulation_state_probabilities,
plot_output_comparisons,
)
NUMBER_OF_DIGITS_TO_ROUND = 8
def test_get_heatmaps_example_1():
"""
Test to ensure that the probabilities generated by the simulation and the
Markov model are as expected.
"""
sim_calculated_probs, markov_calculated_probs, diff_calculated_probs = get_heatmaps(
lambda_2=2,
lambda_1=1,
mu=2,
num_of_servers=2,
threshold=3,
system_capacity=5,
buffer_capacity=5,
seed_num=0,
runtime=100,
num_of_trials=10,
linear_positioning=False,
algebraic_function=np.linalg.solve,
)
sim_expected_probs, markov_expected_probs, diff_expected_probs = (
np.array(
[
[0.15657134, 0.23662749, 0.16391817, 0.13420543, 0.02070944, 0.0036757],
[np.nan, np.nan, np.nan, 0.08165133, 0.02249408, 0.00498913],
[np.nan, np.nan, np.nan, 0.05124684, 0.01655216, 0.00379816],
[np.nan, np.nan, np.nan, 0.03741792, 0.01048049, 0.00129502],
[np.nan, np.nan, np.nan, 0.02189239, 0.00640466, 0.00116072],
[np.nan, np.nan, np.nan, 0.01507139, 0.00871438, 0.00112376],
]
),
np.array(
[
[
0.15459909,
0.23189863,
0.17392397,
0.13044298,
0.02059626,
0.00343271,
],
[np.nan, np.nan, np.nan, 0.07723598, 0.01942191, 0.00438122],
[np.nan, np.nan, np.nan, 0.05051955, 0.01503237, 0.0039658],
[np.nan, np.nan, np.nan, 0.03475886, 0.01107021, 0.00316697],
[np.nan, np.nan, np.nan, 0.02449802, 0.0080307, 0.00239411],
[np.nan, np.nan, np.nan, 0.01746141, 0.00957775, 0.00359149],
]
),
np.array(
[
[
0.00197225,
0.00472886,
-0.0100058,
0.00376245,
0.00011318,
0.00024299,
],
[np.nan, np.nan, np.nan, 0.00441536, 0.00307217, 0.0006079],
[np.nan, np.nan, np.nan, 0.00072728, 0.00151979, -0.00016765],
[np.nan, np.nan, np.nan, 0.00265906, -0.00058972, -0.00187194],
[np.nan, np.nan, np.nan, -0.00260564, -0.00162603, -0.00123339],
[np.nan, np.nan, np.nan, -0.00239002, -0.00086337, -0.00246773],
]
),
)
assert np.allclose(sim_calculated_probs, sim_expected_probs, equal_nan=True)
assert np.allclose(markov_calculated_probs, markov_expected_probs, equal_nan=True)
assert np.allclose(diff_calculated_probs, diff_expected_probs, equal_nan=True)
def test_get_heatmaps_example_2():
"""
Test to ensure that the probabilities generated by the simulation and the
Markov model are as expected.
"""
sim_calculated_probs, markov_calculated_probs, diff_calculated_probs = get_heatmaps(
lambda_2=1.5,
lambda_1=1.5,
mu=4,
num_of_servers=1,
threshold=2,
system_capacity=6,
buffer_capacity=1,
seed_num=2,
runtime=150,
num_of_trials=5,
linear_positioning=True,
algebraic_function=np.linalg.solve,
)
sim_expected_probs, markov_expected_probs, diff_expected_probs = (
np.array(
[
[
0.31415055,
0.22936987,
0.17661768,
0.04897618,
0.01226239,
0.00191243,
0.00063125,
],
[
np.nan,
np.nan,
0.09676506,
0.06857442,
0.0296508,
0.01747934,
0.00361002,
],
]
),
np.array(
[
[
0.3236358,
0.24272685,
0.18204514,
0.04553079,
0.01141196,
0.00289688,
0.00079006,
],
[
np.nan,
np.nan,
0.09100306,
0.05686228,
0.02698544,
0.01150214,
0.00460958,
],
]
),
np.array(
[
[
-0.00948526,
-0.01335698,
-0.00542746,
0.00344539,
0.00085043,
-0.00098445,
-0.00015881,
],
[
np.nan,
np.nan,
0.005762,
0.01171214,
0.00266535,
0.0059772,
-0.00099956,
],
]
),
)
assert np.allclose(sim_calculated_probs, sim_expected_probs, equal_nan=True)
assert np.allclose(markov_calculated_probs, markov_expected_probs, equal_nan=True)
assert np.allclose(diff_calculated_probs, diff_expected_probs, equal_nan=True)
def test_get_mean_waiting_time_from_simulation_state_probabilities():
"""
Test for the mean waiting time using the Markov formula and the simulation
state probabilities
"""
mean_waiting_time = get_mean_waiting_time_from_simulation_state_probabilities(
lambda_2=0.2,
lambda_1=0.2,
mu=0.2,
num_of_servers=3,
threshold=4,
system_capacity=10,
buffer_capacity=10,
class_type=0,
seed_num=0,
runtime=2000,
num_of_trials=1,
)
assert round(mean_waiting_time, NUMBER_OF_DIGITS_TO_ROUND) == round(
1.3988142785295379, NUMBER_OF_DIGITS_TO_ROUND
)
def test_get_mean_blocking_time_from_simulation_state_probabilities():
"""
Test for the mean blocking time using the Markov formula and the simulation
state probabilities
"""
mean_blocking_time = get_mean_blocking_time_from_simulation_state_probabilities(
lambda_2=5,
lambda_1=6,
mu=2,
num_of_servers=7,
threshold=5,
system_capacity=15,
buffer_capacity=7,
seed_num=0,
num_of_trials=1,
runtime=1000,
)
assert round(mean_blocking_time, NUMBER_OF_DIGITS_TO_ROUND) == round(
0.6247616245889802, NUMBER_OF_DIGITS_TO_ROUND
)
def test_get_proportion_within_target_from_simulation_state_probabilities():
"""
Test for the proportion of customers that are within the target waiting
time using the Markov formula and the simulation state probabilities
"""
mean_proportion = get_proportion_within_target_from_simulation_state_probabilities(
lambda_1=1,
lambda_2=1,
mu=1,
num_of_servers=3,
threshold=7,
system_capacity=10,
buffer_capacity=5,
target=4,
class_type=0,
seed_num=0,
num_of_trials=2,
runtime=100,
)
assert round(mean_proportion, NUMBER_OF_DIGITS_TO_ROUND) == round(
0.9605868280871762, NUMBER_OF_DIGITS_TO_ROUND
)
def test_plot_output_comparisons_waiting_class_1():
"""
Test that the values to be plotted by the function for the mean waiting time
of class 1 individuals are the expected when using:
- Markov formula and simulation state probabilities
- Markov formula and Markov state probabilities
- Simulation
"""
(
range_space,
simulation_times_using_markov_formula,
markov_times,
simulation_times,
) = plot_output_comparisons(
lambda_1=3,
lambda_2=4,
mu=1,
num_of_servers=3,
threshold=6,
system_capacity=15,
buffer_capacity=7,
seed_num=0,
num_of_trials=1,
runtime=100,
measure_to_compare="waiting",
class_type=0,
plot_over="mu",
max_parameter_value=5,
accuracy=5,
)
expected_range_space = [1, 2, 3, 4, 5]
expected_sim_times_using_formula = [
2.377120739790196,
0.7785480327193071,
0.21825612502962743,
0.0633853178321979,
0.02219807426322811,
]
expected_markov_times = [
2.666380625245361,
0.7505484517766888,
0.201787897652177,
0.06072282228882266,
0.024434222615639434,
]
expected_sim_times = [
[2.100498503091243],
[0.8060558886538617],
[0.24673859227916475],
[0.06673599211050996],
[0.026042424326131127],
]
assert np.all(range_space == expected_range_space)
assert np.allclose(
simulation_times_using_markov_formula, expected_sim_times_using_formula
)
assert np.allclose(markov_times, expected_markov_times)
assert np.allclose(simulation_times, expected_sim_times)
def test_plot_output_comparisons_waiting_class_2():
"""
Test that the values to be plotted by the function for the mean waiting time
of class 2 individuals are the expected when using:
- Markov formula and simulation state probabilities
- Markov formula and Markov state probabilities
- Simulation
"""
(
range_space,
simulation_times_using_markov_formula,
markov_times,
simulation_times,
) = plot_output_comparisons(
lambda_1=3,
lambda_2=4,
mu=1,
num_of_servers=3,
threshold=6,
system_capacity=10,
buffer_capacity=7,
seed_num=0,
num_of_trials=1,
runtime=100,
measure_to_compare="waiting",
class_type=1,
plot_over="system_capacity",
max_parameter_value=18,
accuracy=5,
)
expected_range_space = [
10,
12,
14,
16,
18,
]
expected_sim_times_using_formula = [
0.9518119232230957,
0.9314674163209273,
0.8815151220881429,
0.9520317760341209,
0.9522967196743792,
]
expected_markov_times = [
0.9996062485853283,
0.9996071004169865,
0.9996071216135696,
0.9996071221161823,
0.9996071221275438,
]
expected_sim_times = [
[0.8587675978623437],
[0.9410302653948986],
[0.6712503805879015],
[0.7596612894701423],
[0.7466921877207321],
]
assert np.all(range_space == expected_range_space)
assert np.allclose(
simulation_times_using_markov_formula, expected_sim_times_using_formula
)
assert np.allclose(markov_times, expected_markov_times)
assert np.allclose(simulation_times, expected_sim_times)
def test_plot_output_comparisons_waiting_both_classes():
"""
Test that the values to be plotted by the function for the mean waiting time
of all individuals are the expected when using:
- Markov formula and simulation state probabilities
- Markov formula and Markov state probabilities
- Simulation
"""
(
range_space,
simulation_times_using_markov_formula,
markov_times,
simulation_times,
) = plot_output_comparisons(
lambda_1=3,
lambda_2=4,
mu=1,
num_of_servers=3,
threshold=5,
system_capacity=10,
buffer_capacity=7,
seed_num=0,
num_of_trials=1,
runtime=100,
measure_to_compare="waiting",
class_type=None,
plot_over="threshold",
max_parameter_value=9,
accuracy=5,
)
expected_range_space = [
5,
6,
7,
8,
9,
]
expected_sim_times_using_formula = [
1.4383683274990688,
1.6172139699602939,
1.7871674638990411,
1.902900393648282,
2.0799187425189745,
]
expected_markov_times = [
1.4997317350805834,
1.6663508613218276,
1.8329697824825426,
1.999548467136932,
2.165791830248812,
]
expected_sim_times = [
[1.4595100304540891],
[1.5414680277219233],
[1.8463653589649593],
[1.9638358136060718],
[2.1872623359765617],
]
assert np.all(range_space == expected_range_space)
assert np.allclose(
simulation_times_using_markov_formula, expected_sim_times_using_formula
)
assert np.allclose(markov_times, expected_markov_times)
assert np.allclose(simulation_times, expected_sim_times)
def test_plot_output_comparisons_blocking_class_1():
"""
Test to ensure an error comes up when trying to get the blocking times of
class 1 individuals
"""
with pytest.raises(Exception):
plot_output_comparisons(
lambda_1=1,
lambda_2=1,
mu=1,
num_of_servers=3,
threshold=5,
system_capacity=10,
buffer_capacity=7,
seed_num=0,
num_of_trials=1,
runtime=100,
measure_to_compare="blocking",
class_type=0,
plot_over="lambda_1",
max_parameter_value=3,
accuracy=5,
)
def test_plot_output_comparisons_blocking_class_2():
"""
Test that the values to be plotted by the function for the mean blocking time
of class 2 individuals are the expected when using:
- Markov formula and simulation state probabilities
- Markov formula and Markov state probabilities
- Simulation
"""
(
range_space,
simulation_times_using_markov_formula,
markov_times,
simulation_times,
) = plot_output_comparisons(
lambda_1=1,
lambda_2=1,
mu=1,
num_of_servers=3,
threshold=5,
system_capacity=10,
buffer_capacity=7,
seed_num=0,
num_of_trials=1,
runtime=100,
measure_to_compare="blocking",
class_type=1,
plot_over="lambda_2",
max_parameter_value=3,
accuracy=None,
)
expected_range_space = [
1,
1.5,
2,
2.5,
3,
]
expected_sim_times_using_formula = [
0.09939633736936365,
0.3428086786668058,
1.258688113496702,
1.550748270791677,
2.4490455912594884,
]
expected_markov_times = [
0.25749828422874693,
0.7336269690016299,
1.4059020459868858,
2.0166211860863115,
2.446138025813656,
]
expected_sim_times = [
[0.05675700649642476],
[0.2035750550633296],
[1.0204972927807057],
[1.4297836865197424],
[2.276273474404749],
]
assert np.all(range_space == expected_range_space)
assert np.allclose(
simulation_times_using_markov_formula, expected_sim_times_using_formula
)
assert np.allclose(markov_times, expected_markov_times)
assert np.allclose(simulation_times, expected_sim_times)
def test_plot_output_comparisons_blocking_both_classes():
"""
Test that the values to be plotted by the function for the mean waiting time
of all individuals are the expected when using:
- Markov formula and simulation state probabilities
- Markov formula and Markov state probabilities
- Simulation
"""
(
range_space,
simulation_times_using_markov_formula,
markov_times,
simulation_times,
) = plot_output_comparisons(
lambda_1=1,
lambda_2=1,
mu=1,
num_of_servers=1,
threshold=5,
system_capacity=10,
buffer_capacity=7,
seed_num=0,
num_of_trials=1,
runtime=100,
measure_to_compare="blocking",
class_type=None,
plot_over="num_of_servers",
max_parameter_value=5,
accuracy=None,
)
expected_range_space = [
1,
2,
3,
4,
5,
]
expected_sim_times_using_formula = [
30.454703888754974,
0.8000539978455747,
0.09939633736936365,
0.08297030340373893,
0.06341488800287158,
]
expected_markov_times = [
40.065612220723104,
2.820781651110878,
0.25749828422874693,
0.05700263606859959,
0.024799827726554754,
]
expected_sim_times = [
[10.427934396602263],
[0.25420006034794723],
[0.05675700649642476],
[0.08092456927729426],
[0.08979883878110877],
]
assert np.all(range_space == expected_range_space)
assert np.allclose(
simulation_times_using_markov_formula, expected_sim_times_using_formula
)
assert np.allclose(markov_times, expected_markov_times)
assert np.allclose(simulation_times, expected_sim_times)
@given(
lambda_1=floats(min_value=1, max_value=3),
lambda_2=floats(min_value=1, max_value=3),
mu=floats(min_value=1, max_value=3),
num_of_servers=integers(min_value=2, max_value=5),
threshold=integers(min_value=2, max_value=10),
system_capacity=integers(min_value=10, max_value=20),
buffer_capacity=integers(min_value=2, max_value=10),
)
@settings(max_examples=5, deadline=None)
def test_plot_output_comparisons_blocking_property(
lambda_1, lambda_2, mu, num_of_servers, threshold, system_capacity, buffer_capacity
):
"""
Test that the values to be plotted by the function for the mean blocking time
of either CLASS 2 INDIVIDUALS or ALL INDIVIDUALS are the same for all methods
used:
- Markov formula and simulation state probabilities
- Markov formula and Markov state probabilities
- Simulation
These values are expected to be the same because class 1 individuals do not
have any blocking time, and thus the overall blocking time is calculated just
from class 2 individuals.
"""
(
range_space_1,
simulation_times_using_markov_formula_1,
markov_times_1,
simulation_times_1,
) = plot_output_comparisons(
lambda_1=lambda_1,
lambda_2=lambda_2,
mu=mu,
num_of_servers=num_of_servers,
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
seed_num=0,
num_of_trials=1,
runtime=100,
measure_to_compare="blocking",
class_type=1,
plot_over="buffer_capacity",
max_parameter_value=5,
accuracy=None,
)
(
range_space_2,
simulation_times_using_markov_formula_2,
markov_times_2,
simulation_times_2,
) = plot_output_comparisons(
lambda_1=lambda_1,
lambda_2=lambda_2,
mu=mu,
num_of_servers=num_of_servers,
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
seed_num=0,
num_of_trials=1,
runtime=100,
measure_to_compare="blocking",
class_type=None,
plot_over="buffer_capacity",
max_parameter_value=5,
accuracy=None,
)
assert np.all(range_space_1 == range_space_2)
assert np.all(
simulation_times_using_markov_formula_1
== simulation_times_using_markov_formula_2
)
assert np.all(markov_times_1 == markov_times_2)
assert np.all(simulation_times_1 == simulation_times_2)
def test_plot_of_proportion_within_target_class_1():
"""
Test the values to be plotted by the function for the mean proportion of
individuals for class 1 are as expected for all methods used:
- Markov formula and simulation state probabilities
- Markov formula and Markov state probabilities
- Simulation
"""
(
range_space,
simulation_props_using_markov_formula,
markov_props,
simulation_props,
) = plot_output_comparisons(
lambda_1=1,
lambda_2=1,
mu=1,
num_of_servers=3,
threshold=2,
system_capacity=20,
buffer_capacity=10,
seed_num=1,
num_of_trials=2,
runtime=100,
target=4,
class_type=0,
measure_to_compare="proportion",
accuracy=5,
plot_over="threshold",
max_parameter_value=10,
)
expected_range_space = [
2,
4,
6,
8,
10,
]
expected_sim_props_using_formula = [
0.9790136369646812,
0.9694014142792851,
0.9607171712756224,
0.9512206646084153,
0.9435197873252772,
]
expected_markov_props = [
0.9769758299950714,
0.9698065422230608,
0.9624762629273674,
0.9564778065163335,
0.9524639194726416,
]
expected_sim_props = [
[0.9615384615384616, 1.0],
[0.9504132231404959, 0.978494623655914],
[0.9338842975206612, 0.989247311827957],
[0.9090909090909091, 0.989247311827957],
[0.9008264462809917, 0.989247311827957],
]
assert np.all(range_space == expected_range_space)
assert np.allclose(
simulation_props_using_markov_formula, expected_sim_props_using_formula
)
assert np.allclose(markov_props, expected_markov_props)
assert np.allclose(simulation_props, expected_sim_props)
def test_plot_of_proportion_within_target_class_2():
"""
Test the values to be plotted by the function for the mean proportion of
individuals for class 2 are as expected for all methods used:
- Markov formula and simulation state probabilities
- Markov formula and Markov state probabilities
- Simulation
"""
(
range_space,
simulation_props_using_markov_formula,
markov_props,
simulation_props,
) = plot_output_comparisons(
lambda_1=1,
lambda_2=1,
mu=1,
num_of_servers=3,
threshold=2,
system_capacity=20,
buffer_capacity=10,
seed_num=1,
num_of_trials=2,
runtime=100,
target=4,
class_type=1,
measure_to_compare="proportion",
accuracy=5,
plot_over="threshold",
max_parameter_value=10,
)
expected_range_space = [
2,
4,
6,
8,
10,
]
expected_sim_props_using_formula = [
0.9816843611112658,
0.9776764633348265,
0.9695135798097695,
0.9607212930115949,
0.9505136921747348,
]
expected_markov_props = [
0.9816843611112656,
0.9776309516976318,
0.9695851706481967,
0.96203774630283,
0.9559521606811459,
]
expected_sim_props = [
[1.0, 0.9880952380952381],
[0.978021978021978, 0.9770114942528736],
[0.967032967032967, 0.9655172413793104],
[0.9560439560439561, 0.9655172413793104],
[0.9230769230769231, 0.9655172413793104],
]
assert np.all(range_space == expected_range_space)
assert np.allclose(
simulation_props_using_markov_formula, expected_sim_props_using_formula
)
assert np.allclose(markov_props, expected_markov_props)
assert np.allclose(simulation_props, expected_sim_props)
def test_plot_of_proportion_within_target_both_classes():
"""
Test the values to be plotted by the function for the mean proportion of
individuals for both classes are as expected for all methods used:
- Markov formula and simulation state probabilities
- Markov formula and Markov state probabilities
- Simulation
"""
(
range_space,
simulation_props_using_markov_formula,
markov_props,
simulation_props,
) = plot_output_comparisons(
lambda_1=1,
lambda_2=1,
mu=1,
num_of_servers=3,
threshold=2,
system_capacity=20,
buffer_capacity=10,
seed_num=1,
num_of_trials=2,
runtime=100,
target=4,
class_type=None,
measure_to_compare="proportion",
accuracy=5,
plot_over="threshold",
max_parameter_value=10,
)
expected_range_space = [
2,
4,
6,
8,
10,
]
expected_sim_props_using_formula = [
0.9803420072819845,
0.973534925815833,
0.965115375542696,
0.955970978810005,
0.947016739750006,
]
expected_markov_props = [
0.9793015428995077,
0.9737157940379565,
0.966029525931023,
0.959257362821785,
0.9542079250880933,
]
expected_sim_props = [
[0.9786096256684492, 0.9938650306748467],
[0.9622641509433962, 0.9777777777777777],
[0.9481132075471698, 0.9777777777777777],
[0.9292452830188679, 0.9777777777777777],
[0.910377358490566, 0.9777777777777777],
]
assert np.all(range_space == expected_range_space)
assert np.allclose(
simulation_props_using_markov_formula, expected_sim_props_using_formula
)
assert np.allclose(markov_props, expected_markov_props)
assert np.allclose(simulation_props, expected_sim_props)
def test_plot_output_comparisons_invalid_measure():
"""
Test that an error is raised when an invalid measure is passed to the
function.
"""
with pytest.raises(ValueError):
plot_output_comparisons(
lambda_1=None,
lambda_2=0.1,
mu=None,
num_of_servers=None,
threshold=None,
system_capacity=None,
buffer_capacity=None,
seed_num=None,
num_of_trials=None,
runtime=None,
measure_to_compare="invalid_measure",
)
``` |
{
"source": "11michalis11/Ciw",
"score": 3
} |
#### File: ciw/dists/distributions.py
```python
from ciw.auxiliary import *
from itertools import cycle
import copy
from operator import add, mul, sub, truediv
from random import (expovariate, uniform, triangular, gammavariate,
lognormvariate, weibullvariate)
class Distribution(object):
"""
A general distribution from which all other distirbutions will inherit.
"""
def __repr__(self):
return 'Distribution'
def sample(self, t=None, ind=None):
pass
def _sample(self, t=None, ind=None):
"""
Performs vaildity checks before sampling.
"""
s = self.sample(t=t, ind=ind)
if (isinstance(s, float) or isinstance(s, int)) and s >= 0:
return s
else:
raise ValueError('Invalid time sampled.')
def __add__(self, dist):
"""
Add two distributions such that sampling is the sum of the samples.
"""
return CombinedDistribution(self, dist, add)
def __sub__(self, dist):
"""
Subtract two distributions such that sampling is the difference of the samples.
"""
return CombinedDistribution(self, dist, sub)
def __mul__(self, dist):
"""
Multiply two distributions such that sampling is the product of the samples.
"""
return CombinedDistribution(self, dist, mul)
def __truediv__(self, dist):
"""
Divide two distributions such that sampling is the ratio of the samples.
"""
return CombinedDistribution(self, dist, truediv)
class CombinedDistribution(Distribution):
"""
A distribution that combines the samples of two other distributions, `dist1`
and `dist2`, using `operator`.
"""
def __init__(self, dist1, dist2, operator):
self.d1 = copy.deepcopy(dist1)
self.d2 = copy.deepcopy(dist2)
self.operator = operator
def __repr__(self):
return 'CombinedDistribution'
def sample(self, t=None, ind=None):
s1 = self.d1.sample()
s2 = self.d2.sample()
return self.operator(s1, s2)
class Uniform(Distribution):
"""
The Uniform distribution.
Takes:
- `lower` the lower bound
- `upper` the upper bound
"""
def __init__(self, lower, upper):
if lower < 0.0 or upper < 0.0:
raise ValueError('Uniform distribution must sample positive numbers only.')
if upper < lower:
raise ValueError('Uniform distirbution upper bound should be >= lower bound.')
self.lower = lower
self.upper = upper
def __repr__(self):
return 'Uniform: {0}, {1}'.format(self.lower, self.upper)
def sample(self, t=None, ind=None):
return uniform(self.lower, self.upper)
class Deterministic(Distribution):
"""
The Deterministic distribution.
Takes:
- `value` the value to return
"""
def __init__(self, value):
if value < 0.0:
raise ValueError('Deterministic distribution must sample positive numbers only.')
self.value = value
def __repr__(self):
return 'Deterministic: {0}'.format(self.value)
def sample(self, t=None, ind=None):
return self.value
class Triangular(Distribution):
"""
The Triangular distribution.
Takes:
- `lower` the lower bound
- `upper` the upper bound
- `mode` the modal value
"""
def __init__(self, lower, mode, upper):
if lower < 0.0 or upper < 0.0 or mode < 0.0:
raise ValueError('Triangular distribution must sample positive numbers only.')
if not lower <= mode <= upper:
raise ValueError('Triangular distribution lower bound must be <= mode must be <= upper bound.')
self.lower = lower
self.mode = mode
self.upper = upper
def __repr__(self):
return 'Triangular: {0}, {1}, {2}'.format(self.lower, self.mode, self.upper)
def sample(self, t=None, ind=None):
return triangular(self.lower, self.upper, self.mode)
class Exponential(Distribution):
"""
The Exponential distribution.
Takes:
- `rate` the rate parameter, lambda
"""
def __init__(self, rate):
if rate <= 0.0:
raise ValueError('Exponential distribution must sample positive numbers only.')
self.rate = rate
def __repr__(self):
return 'Exponential: {0}'.format(self.rate)
def sample(self, t=None, ind=None):
return expovariate(self.rate)
class Gamma(Distribution):
"""
The Gamma distribution.
Takes:
- `shape` the shape parameter, alpha
- `scale` the scale parameter, beta
"""
def __init__(self, shape, scale):
self.shape = shape
self.scale = scale
def __repr__(self):
return 'Gamma: {0}, {1}'.format(self.shape, self.scale)
def sample(self, t=None, ind=None):
return gammavariate(self.shape, self.scale)
class Normal(Distribution):
"""
The Truncated Normal distribution.
Takes:
- `mean` the mean of the Normal, mu
- `sd` the standard deviation of the Normal, sigma
"""
def __init__(self, mean, sd):
self.mean = mean
self.sd = sd
def __repr__(self):
return 'Normal: {0}, {1}'.format(self.mean, self.sd)
def sample(self, t=None, ind=None):
return truncated_normal(self.mean, self.sd)
class Lognormal(Distribution):
"""
The Lognormal distribution.
Takes:
- `mean` the mean of the Normal, mu
- `sd` the standard deviation of the Normal, sigma
"""
def __init__(self, mean, sd):
self.mean = mean
self.sd = sd
def __repr__(self):
return 'Lognormal: {0}, {1}'.format(self.mean, self.sd)
def sample(self, t=None, ind=None):
return lognormvariate(self.mean, self.sd)
class Weibull(Distribution):
"""
The Weibull distribution.
Takes:
- `scale` the scale parameter, alpha
- `shape` the shape parameter, beta
"""
def __init__(self, scale, shape):
self.scale = scale
self.shape = shape
def __repr__(self):
return 'Weibull: {0}, {1}'.format(self.scale, self.shape)
def sample(self, t=None, ind=None):
return weibullvariate(self.scale, self.shape)
class Empirical(Distribution):
"""
The Empirical distribution.
Takes:
- `observations` the observations from which to sample
"""
def __init__(self, observations):
if any(o < 0 for o in observations):
raise ValueError('Empirical distribution must sample positive numbers only.')
self.observations = observations
def __repr__(self):
return 'Empirical'
def sample(self, t=None, ind=None):
return random_choice(self.observations)
class Sequential(Distribution):
"""
The Sequential distribution.
Takes:
- `sequence` the sequence to cycle through
"""
def __init__(self, sequence):
if any(o < 0 for o in sequence):
raise ValueError('Sequential distribution must sample positive numbers only.')
self.sequence = sequence
self.generator = cycle(self.sequence)
def __repr__(self):
return 'Sequential'
def sample(self, t=None, ind=None):
return next(self.generator)
class Pmf(Distribution):
"""
A distribution defined by a probability mass function (pmf).
Takes:
- `values` the values to sample
- `probs` the associated probabilities
"""
def __init__(self, values, probs):
if any(o < 0 for o in values):
raise ValueError('Pmf must sample positive numbers only.')
if any(p < 0 or p > 1.0 for p in probs):
raise ValueError('Pmf must have valid probabilities.')
if sum(probs) != 1.0:
raise ValueError('Pmf probabilities must sum to 1.0.')
self.values = values
self.probs = probs
def __repr__(self):
return 'Pmf'
def sample(self, t=None, ind=None):
return random_choice(self.values, self.probs)
class PhaseType(Distribution):
"""
A distribution defined by an initial vector and an absorbing Markov chain
Takes:
- `initial_state` the intial probabilities of being in each state
- `absorbing_matrix` the martix representation of the absorbing Markov
chain, with the final state the absorbing state
"""
def __init__(self, initial_state, absorbing_matrix):
if any(p < 0 or p > 1.0 for p in initial_state):
raise ValueError('Initial state vector must have valid probabilities.')
if sum(initial_state) > 1.0 + 10**(-10) or sum(initial_state) < 1.0 - 10**(-10):
raise ValueError('Initial state vector probabilities must sum to 1.0.')
if any(len(absorbing_matrix) != len(row) for row in absorbing_matrix):
raise ValueError('Matrix of the absorbing Markov chain must be square.')
if len(initial_state) != len(absorbing_matrix):
raise ValueError('Initial state vector must have same number of states as absorbing Markov chain matrix.')
if any(row[j] < 0 for i, row in enumerate(absorbing_matrix) for j in range(len(absorbing_matrix)) if i != j):
raise ValueError('Transition rates must be positive.')
if not all(-(10**(-10)) < sum(row) < 10**(-10) for i, row in enumerate(absorbing_matrix)):
raise ValueError('Matrix rows must sum to 0.')
if not all(r == 0 for r in absorbing_matrix[-1]):
raise ValueError('Final state must be the absorbing state.')
if not any(row[-1] > 0 for row in absorbing_matrix):
raise ValueError('Must be possible to reach the absorbing state.')
self.initial_state = initial_state
self.states = tuple(range(len(initial_state)))
self.absorbing_matrix = absorbing_matrix
def __repr__(self):
return 'PhaseType'
def sample_transition(self, rate):
if rate <= 0.0:
return float('Inf')
return expovariate(rate)
def sample(self, t=None, ind=None):
cumulative_time = 0
current_state = random_choice(self.states, probs=self.initial_state)
while current_state != self.states[-1]:
potential_transitions = [self.sample_transition(r) for r in self.absorbing_matrix[current_state]]
time, idx = min((time, idx) for (idx, time) in enumerate(potential_transitions))
cumulative_time += time
current_state = idx
return cumulative_time
class Erlang(PhaseType):
"""
An shortcut for the Erlang distribution, using the PhaseType distribution
Takes:
- `rate` the rate spent in each phase
- `num_phases` the number of phases in series
"""
def __init__(self, rate, num_phases):
if rate <= 0.0:
raise ValueError('Rate must be positive.')
if num_phases < 1:
raise ValueError('At least one phase is required.')
self.rate = rate
self.num_phases = num_phases
initial_state = [1] + [0] * num_phases
absorbing_matrix = [[0] * (num_phases + 1) for _ in range(num_phases + 1)]
for phase in range(num_phases):
absorbing_matrix[phase][phase] = -self.rate
absorbing_matrix[phase][phase + 1] = self.rate
super().__init__(initial_state, absorbing_matrix)
def __repr__(self):
return f'Erlang: {self.rate}, {self.num_phases}'
class HyperExponential(PhaseType):
"""
A shortcut for the HyperExponential distribution, using the PhaseType distribution
Takes:
- `rates` a vector of rates for each phase
- `probs` a probability vector for starting in each phase
"""
def __init__(self, rates, probs):
if any(r <= 0.0 for r in rates):
raise ValueError('Rates must be positive.')
initial_state = probs + [0]
num_phases = len(probs)
absorbing_matrix = [[0] * (num_phases + 1) for _ in range(num_phases + 1)]
for phase in range(num_phases):
absorbing_matrix[phase][phase] = -rates[phase]
absorbing_matrix[phase][num_phases] = rates[phase]
super().__init__(initial_state, absorbing_matrix)
def __repr__(self):
return "HyperExponential"
class HyperErlang(PhaseType):
"""
A shortcut for the HyperErlang distribution, using the PhaseType distribution
Takes:
- `rates` a vector of rates for each phase
- `probs` a probability vector for starting in each phase
- `phase_lengths` the number of sub-phases in each phase
"""
def __init__(self, rates, probs, phase_lengths):
if any(r <= 0.0 for r in rates):
raise ValueError('Rates must be positive.')
if any(n < 1 for n in phase_lengths):
raise ValueError('At least one phase is required for each sub-phase.')
initial_state = []
for p, n in zip(probs, phase_lengths):
initial_state += [p]
initial_state += [0] * (n - 1)
initial_state += [0]
num_phases = sum(phase_lengths)
absorbing_matrix = [[0] * (num_phases + 1) for _ in range(num_phases + 1)]
for i, r in enumerate(rates):
for subphase in range(phase_lengths[i]):
offset = sum(phase_lengths[:i])
absorbing_matrix[offset + subphase][offset + subphase] = -r
if subphase < phase_lengths[i] - 1:
absorbing_matrix[offset + subphase][offset + subphase + 1] = r
else:
absorbing_matrix[offset + subphase][-1] = r
super().__init__(initial_state, absorbing_matrix)
def __repr__(self):
return "HyperErlang"
class Coxian(PhaseType):
"""
A shortcut for the Coxian distribuion, using the PhaseType distribution
Takes:
- `rates` a vector of rates for each phase
- `probs` a vector of the probability of absorption at each phase
"""
def __init__(self, rates, probs):
if any(r <= 0.0 for r in rates):
raise ValueError('Rates must be positive.')
if any(p < 0 or p > 1.0 for p in probs):
raise ValueError('Probability vector must have valid probabilities.')
if probs[-1] != 1.0:
raise ValueError('The probability of going to the absorbing state from the final phase must be 1.0.')
num_phases = len(rates)
initial_state = [1] + [0] * num_phases
absorbing_matrix = [[0] * (num_phases + 1) for _ in range(num_phases + 1)]
for i, (p, r) in enumerate(zip(probs, rates)):
absorbing_matrix[i][i] = -r
absorbing_matrix[i][i + 1] = (1 - p) * r
absorbing_matrix[i][-1] = p * r
super().__init__(initial_state, absorbing_matrix)
def __repr__(self):
return "Coxian"
class NoArrivals(Distribution):
"""
A placeholder distribution if there are no arrivals.
"""
def __repr__(self):
return 'NoArrivals'
def sample(self, t=None, ind=None):
return float('Inf')
```
#### File: ciw/tests/test_network.py
```python
import unittest
import ciw
import copy
import random
from hypothesis import given
from hypothesis.strategies import floats, integers, lists, random_module
def example_baulking_function(n):
if n < 5:
return 0.0
return 1.0
class TestServiceCentre(unittest.TestCase):
def test_init_method(self):
number_of_servers = 2
queueing_capacity = float('inf')
class_change_matrix = [[0.2, 0.8],
[1.0, 0.0]]
schedule = None
SC = ciw.ServiceCentre(number_of_servers, queueing_capacity, class_change_matrix, schedule)
self.assertEqual(SC.number_of_servers, number_of_servers)
self.assertEqual(SC.queueing_capacity, queueing_capacity)
self.assertEqual(SC.class_change_matrix, class_change_matrix)
self.assertEqual(SC.schedule, schedule)
self.assertFalse(SC.schedule_preempt)
@given(number_of_servers=integers(min_value=1),
queueing_capacity=integers(min_value=0),
class_change_prob1=floats(min_value=0.0, max_value=1.0),
class_change_prob2=floats(min_value=0.0, max_value=1.0))
def test_init_method_h(self, number_of_servers, queueing_capacity, class_change_prob1, class_change_prob2):
class_change_matrix = [[class_change_prob1,
1 - class_change_prob1],
[class_change_prob2,
1 - class_change_prob2]]
schedule = None
SC = ciw.ServiceCentre(number_of_servers, queueing_capacity, class_change_matrix, schedule)
self.assertEqual(SC.number_of_servers, number_of_servers)
self.assertEqual(SC.queueing_capacity, queueing_capacity)
self.assertEqual(SC.class_change_matrix, class_change_matrix)
self.assertEqual(SC.schedule, schedule)
self.assertFalse(SC.schedule_preempt)
class TestCustomerClass(unittest.TestCase):
def test_init_method(self):
arrival_distributions = [ciw.dists.Uniform(4.0, 9.0),
ciw.dists.Exponential(5),
ciw.dists.Gamma(0.6, 1.2)]
service_distributions = [ciw.dists.Gamma(4.0, 9.0),
ciw.dists.Uniform(0.6, 1.2),
ciw.dists.Exponential(5)]
routing = [[.2, .6, .2], [0, 0, 0], [.5, 0, 0]]
priority_class = 2
baulking_functions = [None, None, example_baulking_function]
batching_distributions = [ciw.dists.Deterministic(1),
ciw.dists.Deterministic(1),
ciw.dists.Deterministic(1)]
reneging_time_distributions = [None, None, None]
reneging_destinations = [-1, -1, -1]
class_change_time_distributions = [None]
CC = ciw.CustomerClass(arrival_distributions, service_distributions, routing, priority_class, baulking_functions, batching_distributions, reneging_time_distributions, reneging_destinations, class_change_time_distributions)
self.assertEqual(CC.arrival_distributions, arrival_distributions)
self.assertEqual(CC.service_distributions, service_distributions)
self.assertEqual(CC.batching_distributions, batching_distributions)
self.assertEqual(CC.routing, routing)
self.assertEqual(CC.priority_class, priority_class)
self.assertEqual(CC.reneging_time_distributions, reneging_time_distributions)
self.assertEqual(CC.reneging_destinations, reneging_destinations)
self.assertEqual(CC.class_change_time_distributions, class_change_time_distributions)
# check baulking function works
self.assertEqual(CC.baulking_functions[2](0), 0.0)
self.assertEqual(CC.baulking_functions[2](1), 0.0)
self.assertEqual(CC.baulking_functions[2](2), 0.0)
self.assertEqual(CC.baulking_functions[2](3), 0.0)
self.assertEqual(CC.baulking_functions[2](4), 0.0)
self.assertEqual(CC.baulking_functions[2](5), 1.0)
self.assertEqual(CC.baulking_functions[2](6), 1.0)
self.assertEqual(CC.baulking_functions[2](7), 1.0)
self.assertEqual(CC.baulking_functions[2](8), 1.0)
class TestNetwork(unittest.TestCase):
def test_init_method(self):
number_of_servers = 2
queueing_capacity = float('inf')
schedule = None
class_change_matrix = [[0.2, 0.8],
[1.0, 0.0]]
arrival_distributions = [ciw.dists.Uniform(4.0, 9.0),
ciw.dists.Exponential(5.0),
ciw.dists.Gamma(0.6, 1.2)]
service_distributions = [ciw.dists.Gamma(4.0, 9.0),
ciw.dists.Uniform(0.6, 1.2),
ciw.dists.Exponential(5)]
routing = [[0.2, 0.6, 0.2],
[0.0, 0.0, 0.0],
[0.5, 0.0, 0.0]]
priority_class = 0
batching_distributions = [ciw.dists.Deterministic(1),
ciw.dists.Deterministic(1),
ciw.dists.Deterministic(1)]
baulking_functions = [None, None, example_baulking_function]
reneging_time_distributions = [None, None, None]
reneging_destinations = [-1, -1, -1]
class_change_time_distributions = [None, None]
service_centres = [ciw.ServiceCentre(number_of_servers,
queueing_capacity,
class_change_matrix,
schedule) for i in range(3)]
customer_classes = [ciw.CustomerClass(arrival_distributions,
service_distributions,
routing,
priority_class,
baulking_functions,
batching_distributions,
reneging_time_distributions,
reneging_destinations,
class_change_time_distributions) for i in range(2)]
N = ciw.Network(service_centres, customer_classes)
self.assertEqual(N.service_centres, service_centres)
self.assertEqual(N.customer_classes, customer_classes)
self.assertEqual(N.number_of_nodes, 3)
self.assertEqual(N.number_of_classes, 2)
self.assertEqual(N.number_of_priority_classes, 1)
self.assertEqual(N.priority_class_mapping, {0:0, 1:0})
self.assertFalse(N.service_centres[0].reneging)
self.assertFalse(N.service_centres[1].reneging)
self.assertFalse(N.service_centres[0].class_change_time)
self.assertFalse(N.service_centres[1].class_change_time)
def test_create_network_from_dictionary(self):
params = {'arrival_distributions': {'Class 0': [ciw.dists.Exponential(3.0)]},
'service_distributions': {'Class 0': [ciw.dists.Exponential(7.0)]},
'number_of_servers': [9],
'routing': {'Class 0': [[0.5]]},
'queue_capacities': [float('inf')],
'ps_thresholds': [4]}
N = ciw.create_network_from_dictionary(params)
self.assertEqual(N.number_of_nodes, 1)
self.assertEqual(N.number_of_classes, 1)
self.assertEqual(N.service_centres[0].queueing_capacity, float('inf'))
self.assertEqual(N.service_centres[0].number_of_servers, 9)
self.assertEqual(N.service_centres[0].class_change_matrix, None)
self.assertEqual(N.service_centres[0].schedule, None)
self.assertEqual(N.service_centres[0].ps_threshold, 4)
self.assertFalse(N.service_centres[0].schedule_preempt)
self.assertEqual([str(d) for d in N.customer_classes[0].arrival_distributions], ['Exponential: 3.0'])
self.assertEqual([str(d) for d in N.customer_classes[0].service_distributions], ['Exponential: 7.0'])
self.assertEqual(N.customer_classes[0].routing, [[0.5]])
self.assertEqual(N.number_of_priority_classes, 1)
self.assertEqual(N.priority_class_mapping, {0:0})
params = {'arrival_distributions': [ciw.dists.Exponential(3.0),
ciw.dists.Uniform(0.2, 0.6)],
'service_distributions': [ciw.dists.Exponential(7.0),
ciw.dists.Deterministic(0.7)],
'number_of_servers': [[[1, 20], [4, 50]], 3],
'routing': [[0.5, 0.2],
[0.0, 0.0]],
'queue_capacities': [10, float('inf')]
}
N = ciw.create_network_from_dictionary(params)
self.assertEqual(N.number_of_nodes, 2)
self.assertEqual(N.number_of_classes, 1)
self.assertEqual(N.service_centres[0].queueing_capacity, 10)
self.assertEqual(N.service_centres[0].number_of_servers, 'schedule')
self.assertEqual(N.service_centres[0].class_change_matrix, None)
self.assertEqual(N.service_centres[0].schedule, [[1, 20], [4, 50]])
self.assertFalse(N.service_centres[0].schedule_preempt)
self.assertEqual(N.service_centres[1].queueing_capacity, float('inf'))
self.assertEqual(N.service_centres[1].number_of_servers, 3)
self.assertEqual(N.service_centres[1].class_change_matrix, None)
self.assertEqual(N.service_centres[1].schedule, None)
self.assertFalse(N.service_centres[1].schedule_preempt)
self.assertEqual([str(d) for d in N.customer_classes[0].arrival_distributions], ['Exponential: 3.0', 'Uniform: 0.2, 0.6'])
self.assertEqual([str(d) for d in N.customer_classes[0].service_distributions], ['Exponential: 7.0', 'Deterministic: 0.7'])
self.assertEqual(N.customer_classes[0].routing, [[0.5, 0.2], [0.0, 0.0]])
self.assertEqual(N.number_of_priority_classes, 1)
self.assertEqual(N.priority_class_mapping, {0:0})
params = {'arrival_distributions': {'Class 0': [ciw.dists.Exponential(3.0)],
'Class 1': [ciw.dists.Exponential(4.0)]},
'service_distributions': {'Class 0': [ciw.dists.Exponential(7.0)],
'Class 1': [ciw.dists.Uniform(0.4, 1.2)]},
'number_of_servers': [9],
'routing': {'Class 0': [[0.5]],
'Class 1': [[0.0]]},
'queue_capacities': [float('inf')],
'class_change_matrices': {'Node 1': [[0.0, 1.0],
[0.2, 0.8]]}}
N = ciw.create_network_from_dictionary(params)
self.assertEqual(N.number_of_nodes, 1)
self.assertEqual(N.number_of_classes, 2)
self.assertEqual(N.service_centres[0].queueing_capacity, float('inf'))
self.assertEqual(N.service_centres[0].number_of_servers, 9)
self.assertEqual(N.service_centres[0].class_change_matrix, [[0.0, 1.0], [0.2, 0.8]])
self.assertEqual(N.service_centres[0].schedule, None)
self.assertFalse(N.service_centres[0].schedule_preempt)
self.assertEqual(N.service_centres[0].ps_threshold, 1)
self.assertEqual([str(d) for d in N.customer_classes[0].arrival_distributions], ['Exponential: 3.0'])
self.assertEqual([str(d) for d in N.customer_classes[0].service_distributions], ['Exponential: 7.0'])
self.assertEqual(N.customer_classes[0].routing, [[0.5]])
self.assertEqual([str(d) for d in N.customer_classes[1].arrival_distributions], ['Exponential: 4.0'])
self.assertEqual([str(d) for d in N.customer_classes[1].service_distributions], ['Uniform: 0.4, 1.2'])
self.assertEqual(N.customer_classes[1].routing, [[0.0]])
self.assertEqual(N.number_of_priority_classes, 1)
self.assertEqual(N.priority_class_mapping, {0:0, 1:0})
params = {'arrival_distributions': {'Class 0': [ciw.dists.Exponential(3.0)],
'Class 1': [ciw.dists.Exponential(4.0)]},
'service_distributions': {'Class 0': [ciw.dists.Exponential(7.0)],
'Class 1': [ciw.dists.Uniform(0.4, 1.2)]},
'number_of_servers': [9],
'routing': {'Class 0': [[0.5]],
'Class 1': [[0.0]]},
'class_change_time_distributions': [
[None, ciw.dists.Deterministic(5)],
[ciw.dists.Deterministic(10), None]
]
}
N = ciw.create_network_from_dictionary(params)
self.assertEqual(N.number_of_nodes, 1)
self.assertEqual(N.number_of_classes, 2)
self.assertEqual(N.service_centres[0].queueing_capacity, float('inf'))
self.assertEqual(N.service_centres[0].number_of_servers, 9)
self.assertEqual([str(d) for d in N.customer_classes[0].class_change_time_distributions], ['None', 'Deterministic: 5'])
self.assertEqual([str(d) for d in N.customer_classes[1].class_change_time_distributions], ['Deterministic: 10', 'None'])
self.assertEqual(N.service_centres[0].schedule, None)
self.assertFalse(N.service_centres[0].schedule_preempt)
self.assertEqual(N.service_centres[0].ps_threshold, 1)
self.assertEqual([str(d) for d in N.customer_classes[0].arrival_distributions], ['Exponential: 3.0'])
self.assertEqual([str(d) for d in N.customer_classes[0].service_distributions], ['Exponential: 7.0'])
self.assertEqual(N.customer_classes[0].routing, [[0.5]])
self.assertEqual([str(d) for d in N.customer_classes[1].arrival_distributions], ['Exponential: 4.0'])
self.assertEqual([str(d) for d in N.customer_classes[1].service_distributions], ['Uniform: 0.4, 1.2'])
self.assertEqual(N.customer_classes[1].routing, [[0.0]])
self.assertEqual(N.number_of_priority_classes, 1)
self.assertEqual(N.priority_class_mapping, {0:0, 1:0})
params = {'arrival_distributions': {'Class 0': [ciw.dists.Exponential(3.0)],
'Class 1': [ciw.dists.Exponential(4.0)]},
'service_distributions': {'Class 0': [ciw.dists.Exponential(7.0)],
'Class 1': [ciw.dists.Uniform(0.4, 1.2)]},
'number_of_servers': [9],
'routing': {'Class 0': [[0.5]],
'Class 1': [[0.0]]},
'queue_capacities': [float('inf')],
'priority_classes': {'Class 0': 1,
'Class 1': 0}}
N = ciw.create_network_from_dictionary(params)
self.assertEqual(N.number_of_nodes, 1)
self.assertEqual(N.number_of_classes, 2)
self.assertEqual(N.service_centres[0].queueing_capacity, float('inf'))
self.assertEqual(N.service_centres[0].number_of_servers, 9)
self.assertEqual(N.service_centres[0].schedule, None)
self.assertEqual(N.service_centres[0].ps_threshold, 1)
self.assertFalse(N.service_centres[0].schedule_preempt)
self.assertEqual([str(d) for d in N.customer_classes[0].arrival_distributions], ['Exponential: 3.0'])
self.assertEqual([str(d) for d in N.customer_classes[0].service_distributions], ['Exponential: 7.0'])
self.assertEqual(N.customer_classes[0].routing, [[0.5]])
self.assertEqual([str(d) for d in N.customer_classes[1].arrival_distributions], ['Exponential: 4.0'])
self.assertEqual([str(d) for d in N.customer_classes[1].service_distributions], ['Uniform: 0.4, 1.2'])
self.assertEqual(N.customer_classes[1].routing, [[0.0]])
self.assertEqual(N.customer_classes[0].priority_class, 1)
self.assertEqual(N.customer_classes[1].priority_class, 0)
self.assertEqual(N.number_of_priority_classes, 2)
self.assertEqual(N.priority_class_mapping, {0:1, 1:0})
params = {'arrival_distributions': [ciw.dists.Exponential(3.0), ciw.dists.Exponential(4.0), ciw.dists.Exponential(2.0)],
'service_distributions': [ciw.dists.Exponential(7.0), ciw.dists.Uniform(0.4, 1.2), ciw.dists.Deterministic(5.33)],
'number_of_servers': [9, 2, 4],
'routing': [[0.5, 0.0, 0.1],
[0.2, 0.1, 0.0],
[0.0, 0.0, 0.0]],
'queue_capacities': [float('inf'), float('inf'), float('inf')],
'baulking_functions': [None, None, example_baulking_function]}
N = ciw.create_network_from_dictionary(params)
self.assertEqual(N.number_of_nodes, 3)
self.assertEqual(N.number_of_classes, 1)
self.assertEqual(N.service_centres[0].queueing_capacity, float('inf'))
self.assertEqual(N.service_centres[0].number_of_servers, 9)
self.assertEqual(N.service_centres[0].schedule, None)
self.assertFalse(N.service_centres[0].schedule_preempt)
self.assertEqual(N.service_centres[1].queueing_capacity, float('inf'))
self.assertEqual(N.service_centres[1].number_of_servers, 2)
self.assertEqual(N.service_centres[1].schedule, None)
self.assertFalse(N.service_centres[1].schedule_preempt)
self.assertEqual(N.service_centres[2].queueing_capacity, float('inf'))
self.assertEqual(N.service_centres[2].number_of_servers, 4)
self.assertEqual(N.service_centres[2].schedule, None)
self.assertFalse(N.service_centres[2].schedule_preempt)
self.assertEqual([str(d) for d in N.customer_classes[0].arrival_distributions], ['Exponential: 3.0', 'Exponential: 4.0', 'Exponential: 2.0'])
self.assertEqual([str(d) for d in N.customer_classes[0].service_distributions], ['Exponential: 7.0', 'Uniform: 0.4, 1.2', 'Deterministic: 5.33'])
self.assertEqual(N.customer_classes[0].routing, [[0.5, 0.0, 0.1],
[0.2, 0.1, 0.0],
[0.0, 0.0, 0.0]])
self.assertEqual(N.customer_classes[0].baulking_functions, [None, None, example_baulking_function])
self.assertEqual(N.number_of_priority_classes, 1)
def test_create_network_from_yml(self):
N = ciw.create_network_from_yml(
'ciw/tests/testing_parameters/params_change_class_dynamic.yml')
self.assertEqual(N.number_of_nodes, 4)
self.assertEqual(N.number_of_classes, 3)
self.assertEqual(N.service_centres[0].queueing_capacity, 20)
self.assertEqual(N.service_centres[1].queueing_capacity, float('inf'))
self.assertEqual(N.service_centres[2].queueing_capacity, 30)
self.assertEqual(N.service_centres[3].queueing_capacity, float('inf'))
self.assertEqual(N.service_centres[0].number_of_servers, 9)
self.assertEqual(N.service_centres[1].number_of_servers, 10)
self.assertEqual(N.service_centres[2].number_of_servers, 8)
self.assertEqual(N.service_centres[3].number_of_servers, 8)
self.assertEqual(N.service_centres[0].class_change_matrix, None)
self.assertEqual(N.service_centres[1].class_change_matrix, None)
self.assertEqual(N.service_centres[2].class_change_matrix, None)
self.assertEqual(N.service_centres[3].class_change_matrix, None)
self.assertEqual(N.service_centres[0].schedule, None)
self.assertEqual(N.service_centres[1].schedule, None)
self.assertEqual(N.service_centres[2].schedule, None)
self.assertEqual(N.service_centres[3].schedule, None)
self.assertFalse(N.service_centres[0].schedule_preempt)
self.assertFalse(N.service_centres[1].schedule_preempt)
self.assertFalse(N.service_centres[2].schedule_preempt)
self.assertFalse(N.service_centres[3].schedule_preempt)
self.assertEqual([str(d) for d in N.customer_classes[0].arrival_distributions], ['Exponential: 3.0', 'Exponential: 7.0', 'Exponential: 4.0', 'Exponential: 1.0'])
self.assertEqual([str(d) for d in N.customer_classes[1].arrival_distributions], ['Exponential: 2.0', 'Exponential: 3.0', 'Exponential: 6.0', 'Exponential: 4.0'])
self.assertEqual([str(d) for d in N.customer_classes[2].arrival_distributions], ['Exponential: 2.0', 'Exponential: 1.0', 'Exponential: 2.0', 'Exponential: 0.5'])
self.assertEqual([str(d) for d in N.customer_classes[0].service_distributions], ['Exponential: 7.0', 'Exponential: 7.0', 'Gamma: 0.4, 0.6', 'Deterministic: 0.5'])
self.assertEqual([str(d) for d in N.customer_classes[1].service_distributions], ['Exponential: 7.0', 'Triangular: 0.1, 0.8, 0.85', 'Exponential: 8.0', 'Exponential: 5.0'])
self.assertEqual([str(d) for d in N.customer_classes[2].service_distributions], ['Deterministic: 0.3', 'Deterministic: 0.2', 'Exponential: 8.0', 'Exponential: 9.0'])
self.assertEqual(N.customer_classes[0].routing, [[0.1, 0.2, 0.1, 0.4], [0.2, 0.2, 0.0, 0.1], [0.0, 0.8, 0.1, 0.1], [0.4, 0.1, 0.1, 0.0]])
self.assertEqual(N.customer_classes[1].routing, [[0.6, 0.0, 0.0, 0.2], [0.1, 0.1, 0.2, 0.2], [0.9, 0.0, 0.0, 0.0], [0.2, 0.1, 0.1, 0.1]])
self.assertEqual(N.customer_classes[2].routing, [[0.0, 0.0, 0.4, 0.3], [0.1, 0.1, 0.1, 0.1], [0.1, 0.3, 0.2, 0.2], [0.0, 0.0, 0.0, 0.3]])
self.assertEqual([str(d) for d in N.customer_classes[0].class_change_time_distributions], ['None', 'Exponential: 6.0', 'Exponential: 6.0'])
self.assertEqual([str(d) for d in N.customer_classes[1].class_change_time_distributions], ['None', 'None', 'Exponential: 6.0'])
self.assertEqual([str(d) for d in N.customer_classes[2].class_change_time_distributions], ['None', 'None', 'None'])
def test_raising_errors(self):
params = {'arrival_distributions': {'Class 0':[['Exponential', 3.0]]},
'service_distributions': {'Class 0':[['Exponential', 7.0]]},
'number_of_servers': [9],
'number_of_classes': 1,
'routing': {'Class 0': [[0.5]]},
'number_of_nodes': 1,
'queue_capacities': [float('inf')]}
params_list = [copy.deepcopy(params) for i in range(27)]
params_list[0]['number_of_classes'] = -2
self.assertRaises(ValueError, ciw.create_network_from_dictionary, params_list[0])
params_list[1]['number_of_nodes'] = -2
self.assertRaises(ValueError, ciw.create_network_from_dictionary, params_list[1])
params_list[2]['number_of_servers'] = [5, 6, 7]
self.assertRaises(ValueError, ciw.create_network_from_dictionary, params_list[2])
params_list[3]['number_of_servers'] = [-3]
self.assertRaises(ValueError, ciw.create_network_from_dictionary, params_list[3])
params_list[4]['number_of_servers'] = ['my_missing_schedule']
self.assertRaises(ValueError, ciw.create_network_from_dictionary, params_list[4])
params_list[5]['queue_capacities'] = ['Inf', 1, 2]
self.assertRaises(ValueError, ciw.create_network_from_dictionary, params_list[5])
params_list[6]['queue_capacities'] = [-2]
self.assertRaises(ValueError, ciw.create_network_from_dictionary, params_list[6])
params_list[7]['arrival_distributions'] = {'Class 0':[['Exponential', 3.2]],
'Class 1':[['Exponential', 2.1]]}
self.assertRaises(ValueError, ciw.create_network_from_dictionary, params_list[7])
params_list[8]['arrival_distributions'] = {'Patient 0':[['Exponential', 11.5]]}
self.assertRaises(ValueError, ciw.create_network_from_dictionary, params_list[8])
params_list[9]['arrival_distributions']['Class 0'] = [['Exponential', 3.1],
['Exponential', 2.4]]
self.assertRaises(ValueError, ciw.create_network_from_dictionary, params_list[9])
params_list[10]['service_distributions'] = {'Class 0':[['Exponential', 3.2]],
'Class 1':[['Exponential', 2.1]]}
self.assertRaises(ValueError, ciw.create_network_from_dictionary, params_list[10])
params_list[11]['service_distributions'] = {'Patient 0':[['Exponential', 11.5]]}
self.assertRaises(ValueError, ciw.create_network_from_dictionary, params_list[11])
params_list[12]['service_distributions']['Class 0'] = [['Exponential', 3.1],
['Exponential', 2.4]]
self.assertRaises(ValueError, ciw.create_network_from_dictionary, params_list[12])
params_list[13]['routing'] = {'Class 0':[[0.2]],
'Class 1':[[0.3]]}
self.assertRaises(ValueError, ciw.create_network_from_dictionary, params_list[13])
params_list[14]['routing'] = {'Patient 0':[[0.5]]}
self.assertRaises(ValueError, ciw.create_network_from_dictionary, params_list[14])
params_list[15]['routing']['Class 0'] = [[0.2], [0.1]]
self.assertRaises(ValueError, ciw.create_network_from_dictionary, params_list[15])
params_list[16]['routing']['Class 0'] = [[0.2, 0.1]]
self.assertRaises(ValueError, ciw.create_network_from_dictionary, params_list[16])
params_list[17]['routing']['Class 0'] = [[-0.6]]
self.assertRaises(ValueError, ciw.create_network_from_dictionary, params_list[17])
params_list[18]['routing']['Class 0'] = [[1.4]]
self.assertRaises(ValueError, ciw.create_network_from_dictionary, params_list[18])
params_list[19]['class_change_matrices'] = {'Node 1':[[0.0]],
'Node 2':[[0.0]]}
self.assertRaises(ValueError, ciw.create_network_from_dictionary, params_list[19])
params_list[20]['class_change_matrices'] = {'Patient 0':[[0.0]]}
self.assertRaises(ValueError, ciw.create_network_from_dictionary, params_list[20])
params_list[21]['class_change_matrices'] = {'Node 1':[[-0.4]]}
self.assertRaises(ValueError, ciw.create_network_from_dictionary, params_list[21])
params_list[22]['class_change_matrices'] = {'Node 1':[[1.5]]}
self.assertRaises(ValueError, ciw.create_network_from_dictionary, params_list[22])
params_list[23]['reneging_time_distributions'] = {'Class 0': [ciw.dists.Exponential(1), ciw.dists.Exponential(1)]}
self.assertRaises(ValueError, ciw.create_network_from_dictionary, params_list[23])
params_list[24]['reneging_destinations'] = {'Class 0': [-1, -1, -1]}
self.assertRaises(ValueError, ciw.create_network_from_dictionary, params_list[24])
params_list[25]['reneging_destinations'] = {'Class 0': [7]}
self.assertRaises(ValueError, ciw.create_network_from_dictionary, params_list[25])
params_list[26]['class_change_time_distributions'] = [[None], [None, None]]
self.assertRaises(ValueError, ciw.create_network_from_dictionary, params_list[26])
class TestImportNoMatrix(unittest.TestCase):
def test_optional_transition_matrix(self):
params = {'arrival_distributions': [ciw.dists.Exponential(1.0)],
'service_distributions': [ciw.dists.Exponential(2.0)],
'number_of_servers': [1]}
N = ciw.create_network(**params)
self.assertEqual([c.routing for c in N.customer_classes], [[[0.0]]])
N = ciw.create_network(
arrival_distributions={'Class 0': [ciw.dists.Exponential(1.0)],
'Class 1': [ciw.dists.Exponential(1.0)]},
service_distributions={'Class 0': [ciw.dists.Exponential(2.0)],
'Class 1': [ciw.dists.Exponential(1.0)]},
number_of_servers=[1]
)
self.assertEqual([c.routing for c in N.customer_classes], [[[0.0]], [[0.0]]])
params = {'arrival_distributions': [ciw.dists.Exponential(1.0), ciw.dists.Exponential(1.0)],
'service_distributions': [ciw.dists.Exponential(2.0), ciw.dists.Exponential(2.0)],
'number_of_servers': [1, 2]}
self.assertRaises(ValueError, ciw.create_network_from_dictionary, params)
class TestCreateNetworkKwargs(unittest.TestCase):
def test_network_from_kwargs(self):
N = ciw.create_network(
arrival_distributions={'Class 0': [ciw.dists.Exponential(3.0)]},
service_distributions={'Class 0': [ciw.dists.Exponential(7.0)]},
number_of_servers=[9],
routing={'Class 0': [[0.5]]},
queue_capacities=[float('inf')]
)
self.assertEqual(N.number_of_nodes, 1)
self.assertEqual(N.number_of_classes, 1)
self.assertEqual(N.service_centres[0].queueing_capacity, float('inf'))
self.assertEqual(N.service_centres[0].number_of_servers, 9)
self.assertEqual(N.service_centres[0].class_change_matrix, None)
self.assertEqual(N.service_centres[0].schedule, None)
self.assertFalse(N.service_centres[0].schedule_preempt)
self.assertEqual([str(d) for d in N.customer_classes[0].arrival_distributions], ['Exponential: 3.0'])
self.assertEqual([str(d) for d in N.customer_classes[0].service_distributions], ['Exponential: 7.0'])
self.assertEqual(N.customer_classes[0].routing, [[0.5]])
self.assertEqual(N.number_of_priority_classes, 1)
self.assertEqual(N.priority_class_mapping, {0:0})
N = ciw.create_network(
arrival_distributions=[ciw.dists.Exponential(3.0),
ciw.dists.Uniform(0.2, 0.6)],
service_distributions=[ciw.dists.Exponential(7.0),
ciw.dists.Deterministic(0.7)],
number_of_servers=[[[1, 20], [4, 50]], 3],
routing=[[0.5, 0.2],
[0.0, 0.0]],
queue_capacities=[10, float('inf')]
)
self.assertEqual(N.number_of_nodes, 2)
self.assertEqual(N.number_of_classes, 1)
self.assertEqual(N.service_centres[0].queueing_capacity, 10)
self.assertEqual(N.service_centres[0].number_of_servers, 'schedule')
self.assertEqual(N.service_centres[0].class_change_matrix, None)
self.assertEqual(N.service_centres[0].schedule, [[1, 20], [4, 50]])
self.assertFalse(N.service_centres[0].schedule_preempt)
self.assertEqual(N.service_centres[1].queueing_capacity, float('inf'))
self.assertEqual(N.service_centres[1].number_of_servers, 3)
self.assertEqual(N.service_centres[1].class_change_matrix, None)
self.assertEqual(N.service_centres[1].schedule, None)
self.assertFalse(N.service_centres[1].schedule_preempt)
self.assertEqual([str(d) for d in N.customer_classes[0].arrival_distributions], ['Exponential: 3.0', 'Uniform: 0.2, 0.6'])
self.assertEqual([str(d) for d in N.customer_classes[0].service_distributions], ['Exponential: 7.0', 'Deterministic: 0.7'])
self.assertEqual(N.customer_classes[0].routing, [[0.5, 0.2], [0.0, 0.0]])
self.assertEqual(N.number_of_priority_classes, 1)
self.assertEqual(N.priority_class_mapping, {0:0})
N = ciw.create_network(
arrival_distributions={'Class 0': [ciw.dists.Exponential(3.0)],
'Class 1': [ciw.dists.Exponential(4.0)]},
service_distributions={'Class 0': [ciw.dists.Exponential(7.0)],
'Class 1': [ciw.dists.Uniform(0.4, 1.2)]},
number_of_servers=[9],
routing={'Class 0': [[0.5]],
'Class 1': [[0.0]]},
queue_capacities=[float('inf')],
class_change_matrices={'Node 1': [[0.0, 1.0],
[0.2, 0.8]]}
)
self.assertEqual(N.number_of_nodes, 1)
self.assertEqual(N.number_of_classes, 2)
self.assertEqual(N.service_centres[0].queueing_capacity, float('inf'))
self.assertEqual(N.service_centres[0].number_of_servers, 9)
self.assertEqual(N.service_centres[0].class_change_matrix, [[0.0, 1.0], [0.2, 0.8]])
self.assertEqual(N.service_centres[0].schedule, None)
self.assertFalse(N.service_centres[0].schedule_preempt)
self.assertEqual([str(d) for d in N.customer_classes[0].arrival_distributions], ['Exponential: 3.0'])
self.assertEqual([str(d) for d in N.customer_classes[0].service_distributions], ['Exponential: 7.0'])
self.assertEqual(N.customer_classes[0].routing, [[0.5]])
self.assertEqual([str(d) for d in N.customer_classes[1].arrival_distributions], ['Exponential: 4.0'])
self.assertEqual([str(d) for d in N.customer_classes[1].service_distributions], ['Uniform: 0.4, 1.2'])
self.assertEqual(N.customer_classes[1].routing, [[0.0]])
self.assertEqual(N.number_of_priority_classes, 1)
self.assertEqual(N.priority_class_mapping, {0:0, 1:0})
N = ciw.create_network(
arrival_distributions={'Class 0': [ciw.dists.Exponential(3.0)],
'Class 1': [ciw.dists.Exponential(4.0)]},
service_distributions={'Class 0': [ciw.dists.Exponential(7.0)],
'Class 1': [ciw.dists.Uniform(0.4, 1.2)]},
number_of_servers=[9],
routing={'Class 0': [[0.5]],
'Class 1': [[0.0]]},
queue_capacities=['Inf'],
priority_classes={'Class 0': 1,
'Class 1': 0}
)
self.assertEqual(N.number_of_nodes, 1)
self.assertEqual(N.number_of_classes, 2)
self.assertEqual(N.service_centres[0].queueing_capacity, float('inf'))
self.assertEqual(N.service_centres[0].number_of_servers, 9)
self.assertEqual(N.service_centres[0].schedule, None)
self.assertFalse(N.service_centres[0].schedule_preempt)
self.assertEqual([str(d) for d in N.customer_classes[0].arrival_distributions], ['Exponential: 3.0'])
self.assertEqual([str(d) for d in N.customer_classes[0].service_distributions], ['Exponential: 7.0'])
self.assertEqual(N.customer_classes[0].routing, [[0.5]])
self.assertEqual([str(d) for d in N.customer_classes[1].arrival_distributions], ['Exponential: 4.0'])
self.assertEqual([str(d) for d in N.customer_classes[1].service_distributions], ['Uniform: 0.4, 1.2'])
self.assertEqual(N.customer_classes[1].routing, [[0.0]])
self.assertEqual(N.customer_classes[0].priority_class, 1)
self.assertEqual(N.customer_classes[1].priority_class, 0)
self.assertEqual(N.number_of_priority_classes, 2)
self.assertEqual(N.priority_class_mapping, {0:1, 1:0})
N = ciw.create_network(
arrival_distributions=[ciw.dists.Exponential(3.0),
ciw.dists.Exponential(4.0),
ciw.dists.Exponential(2.0)],
service_distributions=[ciw.dists.Exponential(7.0),
ciw.dists.Uniform(0.4, 1.2),
ciw.dists.Deterministic(5.33)],
number_of_servers=[9, 2, 4],
routing=[[0.5, 0.0, 0.1],
[0.2, 0.1, 0.0],
[0.0, 0.0, 0.0]],
queue_capacities=[float('inf'), float('inf'), float('inf')],
baulking_functions=[None, None, example_baulking_function]
)
self.assertEqual(N.number_of_nodes, 3)
self.assertEqual(N.number_of_classes, 1)
self.assertEqual(N.service_centres[0].queueing_capacity, float('inf'))
self.assertEqual(N.service_centres[0].number_of_servers, 9)
self.assertEqual(N.service_centres[0].schedule, None)
self.assertFalse(N.service_centres[0].schedule_preempt)
self.assertEqual(N.service_centres[1].queueing_capacity, float('inf'))
self.assertEqual(N.service_centres[1].number_of_servers, 2)
self.assertEqual(N.service_centres[1].schedule, None)
self.assertFalse(N.service_centres[1].schedule_preempt)
self.assertEqual(N.service_centres[2].queueing_capacity, float('inf'))
self.assertEqual(N.service_centres[2].number_of_servers, 4)
self.assertEqual(N.service_centres[2].schedule, None)
self.assertFalse(N.service_centres[2].schedule_preempt)
self.assertEqual([str(d) for d in N.customer_classes[0].arrival_distributions], ['Exponential: 3.0', 'Exponential: 4.0', 'Exponential: 2.0'])
self.assertEqual([str(d) for d in N.customer_classes[0].service_distributions], ['Exponential: 7.0', 'Uniform: 0.4, 1.2', 'Deterministic: 5.33'])
self.assertEqual(N.customer_classes[0].routing, [[0.5, 0.0, 0.1],
[0.2, 0.1, 0.0],
[0.0, 0.0, 0.0]])
self.assertEqual(N.customer_classes[0].baulking_functions, [None, None, example_baulking_function])
self.assertEqual(N.number_of_priority_classes, 1)
N = ciw.create_network(
arrival_distributions=[ciw.dists.Exponential(5), ciw.dists.Exponential(5)],
service_distributions=[ciw.dists.Exponential(4), ciw.dists.Exponential(3)],
number_of_servers=[2, 2],
routing=[[0.0, 1.0], [0.2, 0.2]],
reneging_time_distributions=[ciw.dists.Exponential(1), None],
reneging_destinations=[2, -1]
)
self.assertEqual(N.number_of_nodes, 2)
self.assertEqual(N.number_of_classes, 1)
self.assertEqual(N.service_centres[0].queueing_capacity, float('inf'))
self.assertEqual(N.service_centres[0].number_of_servers, 2)
self.assertEqual(N.service_centres[0].schedule, None)
self.assertFalse(N.service_centres[0].schedule_preempt)
self.assertTrue(N.service_centres[0].reneging)
self.assertEqual(N.service_centres[1].queueing_capacity, float('inf'))
self.assertEqual(N.service_centres[1].number_of_servers, 2)
self.assertEqual(N.service_centres[1].schedule, None)
self.assertFalse(N.service_centres[1].schedule_preempt)
self.assertFalse(N.service_centres[1].reneging)
self.assertEqual(str(N.customer_classes[0].reneging_time_distributions[0]), 'Exponential: 1')
self.assertEqual(N.customer_classes[0].reneging_time_distributions[1], None)
self.assertEqual(N.customer_classes[0].reneging_destinations[0], 2)
self.assertEqual(N.customer_classes[0].reneging_destinations[1], -1)
N = ciw.create_network(
arrival_distributions={'Class 0': [ciw.dists.Exponential(3.0)],
'Class 1': [ciw.dists.Exponential(4.0)]},
service_distributions={'Class 0': [ciw.dists.Exponential(7.0)],
'Class 1': [ciw.dists.Uniform(0.4, 1.2)]},
number_of_servers=[9],
routing={'Class 0': [[0.5]],
'Class 1': [[0.0]]},
class_change_time_distributions=[
[None, ciw.dists.Deterministic(5)],
[ciw.dists.Deterministic(10), None]]
)
self.assertEqual(N.number_of_nodes, 1)
self.assertEqual(N.number_of_classes, 2)
self.assertEqual(N.service_centres[0].queueing_capacity, float('inf'))
self.assertEqual(N.service_centres[0].number_of_servers, 9)
self.assertEqual([str(d) for d in N.customer_classes[0].class_change_time_distributions], ['None', 'Deterministic: 5'])
self.assertEqual([str(d) for d in N.customer_classes[1].class_change_time_distributions], ['Deterministic: 10', 'None'])
self.assertEqual(N.service_centres[0].schedule, None)
self.assertFalse(N.service_centres[0].schedule_preempt)
self.assertEqual(N.service_centres[0].ps_threshold, 1)
self.assertEqual([str(d) for d in N.customer_classes[0].arrival_distributions], ['Exponential: 3.0'])
self.assertEqual([str(d) for d in N.customer_classes[0].service_distributions], ['Exponential: 7.0'])
self.assertEqual(N.customer_classes[0].routing, [[0.5]])
self.assertEqual([str(d) for d in N.customer_classes[1].arrival_distributions], ['Exponential: 4.0'])
self.assertEqual([str(d) for d in N.customer_classes[1].service_distributions], ['Uniform: 0.4, 1.2'])
self.assertEqual(N.customer_classes[1].routing, [[0.0]])
self.assertEqual(N.number_of_priority_classes, 1)
self.assertEqual(N.priority_class_mapping, {0:0, 1:0})
def test_create_network_preempt_priorities(self):
N = ciw.create_network(
arrival_distributions={'Class 0': [ciw.dists.Exponential(3.0), ciw.dists.Exponential(2.0)],
'Class 1': [ciw.dists.Exponential(4.0), ciw.dists.Exponential(2.0)],
'Class 2': [ciw.dists.Exponential(3.0), ciw.dists.Exponential(1.0)]},
service_distributions={'Class 0': [ciw.dists.Exponential(7.0), ciw.dists.Exponential(5.0)],
'Class 1': [ciw.dists.Exponential(8.0), ciw.dists.Exponential(1.0)],
'Class 2': [ciw.dists.Uniform(0.4, 1.2), ciw.dists.Uniform(0.4, 1.2)]},
number_of_servers=[9, 2],
routing={'Class 0': [[0.0, 0.5], [0.1, 0.3]],
'Class 1': [[0.0, 0.5], [0.3, 0.1]],
'Class 2': [[0.1, 0.4], [0.3, 0.1]]},
priority_classes=({'Class 0': 0, 'Class 1': 1, 'Class 2': 0}, [True, False])
)
self.assertEqual(N.number_of_nodes, 2)
self.assertEqual(N.number_of_classes, 3)
self.assertEqual(N.service_centres[0].priority_preempt, True)
self.assertEqual(N.service_centres[1].priority_preempt, False)
self.assertEqual(N.customer_classes[0].priority_class, 0)
self.assertEqual(N.customer_classes[1].priority_class, 1)
self.assertEqual(N.customer_classes[2].priority_class, 0)
def test_error_no_arrivals_servers_services(self):
with self.assertRaises(ValueError):
ciw.create_network()
with self.assertRaises(ValueError):
ciw.create_network(arrival_distributions=[ciw.dists.Exponential(0.2)])
with self.assertRaises(ValueError):
ciw.create_network(service_distributions=[ciw.dists.Exponential(0.2)])
with self.assertRaises(ValueError):
ciw.create_network(number_of_servers=[1])
with self.assertRaises(ValueError):
ciw.create_network(arrival_distributions=[ciw.dists.Exponential(0.2)], number_of_servers=[1])
with self.assertRaises(ValueError):
ciw.create_network(arrival_distributions=[ciw.dists.Exponential(0.2)], service_distributions=[ciw.dists.Exponential(0.2)])
with self.assertRaises(ValueError):
ciw.create_network(service_distributions=[ciw.dists.Exponential(0.2)], number_of_servers=[1])
def test_error_extra_args(self):
params = {'arrival_distributions': [ciw.dists.Exponential(3.0)],
'service_distributions': [ciw.dists.Exponential(7.0)],
'number_of_servers': [4],
'something_else': 56
}
with self.assertRaises(TypeError):
ciw.create_network(**params)
def test_raise_error_wrong_batch_dist(self):
params = {'arrival_distributions': [ciw.dists.Exponential(3.0)],
'service_distributions': [ciw.dists.Exponential(7.0)],
'number_of_servers': [4],
'batching_distributions': [ciw.dists.Exponential(1.3)]
}
N = ciw.create_network(**params)
with self.assertRaises(ValueError):
Q = ciw.Simulation(N)
Q.simulate_until_max_time(10)
def test_read_dists_from_yml(self):
N = ciw.create_network_from_yml('ciw/tests/testing_parameters/params_dists.yml')
self.assertEqual([str(d) for d in N.customer_classes[0].arrival_distributions], ['Uniform: 1.4, 2.3', 'Deterministic: 3.0', 'Triangular: 0.5, 0.9, 1.4'])
self.assertEqual([str(d) for d in N.customer_classes[1].arrival_distributions], ['Exponential: 0.4', 'Gamma: 8.8, 9.9', 'NoArrivals'])
self.assertEqual([str(d) for d in N.customer_classes[0].service_distributions], ['Lognormal: 5.5, 3.6', 'Weibull: 5.0, 8.4', 'Distribution'])
self.assertEqual([str(d) for d in N.customer_classes[1].service_distributions], ['Exponential: 0.5', 'Pmf', 'Normal: 5.0, 0.6'])
``` |
{
"source": "11michalis11/Nashpy",
"score": 4
} |
#### File: discussion/mypy/main_with_wrong_types.py
```python
from typing import Iterable
def get_mean(collection: Iterable) -> float:
"""
Obtain the average of a collection of objects.
Parameters
----------
collection : Iterable
A list of numbers
Returns
-------
float
The mean of the numbers.
"""
return sum(collection) / len(collection)
```
#### File: tests/unit/test_is_best_response.py
```python
import numpy as np
from nashpy.utils.is_best_response import (
is_best_response,
)
def test_is_best_response_example_1():
"""
This tests an example from the discussion documentation.
The second assert checks that the column player strategy is as expected.
"""
A = np.array(((0, -1, 1), (1, 0, -1), (-1, 1, 0)))
sigma_c = np.array((0, 1 / 2, 1 / 2))
sigma_r = np.array((0, 0, 1))
assert is_best_response(A=A, sigma_c=sigma_c, sigma_r=sigma_r) is True
assert is_best_response(A=-A.T, sigma_c=sigma_r, sigma_r=sigma_c) is False
def test_is_best_response_example_2():
"""
This tests an example from the discussion documentation.
The second assert checks that the column player strategy is as expected.
"""
A = np.array(((0, -1, 1), (1, 0, -1), (-1, 1, 0)))
sigma_c = np.array((0, 1 / 2, 1 / 2))
sigma_r = np.array((1 / 3, 1 / 3, 1 / 3))
assert is_best_response(A=A, sigma_c=sigma_c, sigma_r=sigma_r) is False
assert is_best_response(A=-A.T, sigma_c=sigma_r, sigma_r=sigma_c) is True
def test_is_best_response_example_3():
"""
This tests an example from the discussion documentation.
The second assert checks that the column player strategy is as expected.
"""
A = np.array(((0, -1, 1), (1, 0, -1), (-1, 1, 0)))
sigma_c = np.array((1 / 3, 1 / 3, 1 / 3))
sigma_r = np.array((1 / 3, 1 / 3, 1 / 3))
assert is_best_response(A=A, sigma_c=sigma_c, sigma_r=sigma_r) is True
assert is_best_response(A=-A.T, sigma_c=sigma_r, sigma_r=sigma_c) is True
``` |
{
"source": "11michalis11/pfm",
"score": 3
} |
#### File: 07-testing/tutorial/test_absorption.py
```python
import numpy as np
import absorption
def test_long_run_state_for_known_number_of_states():
"""
This tests the `long_run_state` for a small example matrix
"""
pi = np.array([1, 0, 0])
P = np.array([[1 / 2, 1 / 4, 1 / 4], [1 / 3, 1 / 3, 1 / 3], [0, 0, 1]])
pi_after_5_steps = absorption.get_long_run_state(pi=pi, k=5, P=P)
assert np.array_equal(
pi_after_5_steps, pi @ np.linalg.matrix_power(P, 5)
), "Did not get expected result for pi after 5 steps"
def test_long_run_state_when_starting_in_absorbing_state():
"""
This tests the `long_run_state` for a small example matrix.
In this test we start in the absorbing state, the state vector should not
change.
"""
pi = np.array([0, 0, 1])
P = np.array([[1 / 2, 1 / 4, 1 / 4], [1 / 3, 1 / 3, 1 / 3], [0, 0, 1]])
pi_after_5_steps = absorption.get_long_run_state(pi=pi, k=5, P=P)
assert np.array_equal(pi_after_5_steps, pi)
def test_extract_Q():
"""
This tests that the submatrix Q can be extracted from a given matrix P.
"""
P = np.array([[1 / 2, 1 / 4, 1 / 4], [1 / 3, 1 / 3, 1 / 3], [0, 0, 1]])
Q = absorption.extract_Q(P)
expected_Q = np.array([[1 / 2, 1 / 4], [1 / 3, 1 / 3]])
assert np.array_equal(
Q, expected_Q
), f"The expected Q did not match, the code obtained {Q}"
def test_compute_N():
"""
This tests the computation of the fundmantal matrix N
"""
P = np.array([[1 / 2, 1 / 4, 1 / 4], [1 / 3, 1 / 3, 1 / 3], [0, 0, 1]])
Q = absorption.extract_Q(P)
N = absorption.compute_N(Q)
expected_N = np.array([[8 / 3, 1], [4 / 3, 2]])
assert np.allclose(
N, expected_N
), f"The expected N did not match, the code obtained {N}, the expected value was {expected_N}"
def test_compute_t():
"""
This tests the computation of the number of steps until absorption t.
"""
P = np.array([[1 / 2, 1 / 4, 1 / 4], [1 / 3, 1 / 3, 1 / 3], [0, 0, 1]])
t = absorption.compute_t(P)
expected_t = np.array([11 / 3, 10 / 3])
assert np.allclose(
t, expected_t
), f"The expected t did not match, the code obtained {t}"
test_long_run_state_for_known_number_of_states()
test_long_run_state_when_starting_in_absorbing_state()
test_extract_Q()
test_compute_N()
test_compute_t()
``` |
{
"source": "11pawan11/ehealth",
"score": 3
} |
#### File: ehealth/patient/image_block.py
```python
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
from keras.preprocessing import image
import zipfile
import os
import pickle
from keras.models import model_from_json
from django.conf import settings
from django.conf import settings
def training():
zip_ref = zipfile.ZipFile("check.zip", 'r')
zip_ref.extractall("check/chest_xray")
zip_ref.close()
train_datagen = ImageDataGenerator(rescale = 1./255,shear_range = 0.2,zoom_range = 0.2,horizontal_flip = True)
training_set = train_datagen.flow_from_directory('check/chest_xray/train',target_size = (64, 64),batch_size = 32,class_mode = 'binary')
train_datagen = ImageDataGenerator(rescale = 1./255,shear_range = 0.2,zoom_range = 0.2,horizontal_flip = True)
test_set = train_datagen.flow_from_directory('check/chest_xray/test',target_size = (64, 64),batch_size = 32,class_mode = 'binary')
DESIRED_ACCURACY = 0.95
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('acc')>DESIRED_ACCURACY):
print("\nReached 99.9% accuracy so cancelling training!")
self.model.stop_training = True
callbacks = myCallback()
cnn = tf.keras.models.Sequential()
# Convolution
cnn.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation='relu', input_shape=[64, 64, 3]))
# Pooling
cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))
### Adding a second convolutional layer
cnn.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation='relu'))
##Pooling
cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))
# Flattening
cnn.add(tf.keras.layers.Flatten())
### Step 4 - Full Connection
cnn.add(tf.keras.layers.Dense(units=128, activation='relu'))
# Output layer
cnn.add(tf.keras.layers.Dense(units=1, activation='sigmoid'))
# Compiling the CNN
cnn.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Training the CNN on the Training set and evaluating it on the Test set
cnn.fit(x = training_set, validation_data = test_set, epochs = 1)
# serialize model to JSON
model_json = cnn.to_json()
with open("datasets/model_check.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
cnn.save_weights("datasets/model_check.h5")
print("Saved model to disk")
def predImageBlock(ob):
name = ob.file.name
fullpath = os.path.abspath(name)
test_image = image.load_img(fullpath, target_size = (64, 64 ))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
# later...
# load json and create model
json_file = open('datasets/model_check.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("datasets/model_check.h5")
result = loaded_model.predict(test_image)
print("yes"*20, result)
return result
if __name__=="__main__":
training()
# pred1()
```
#### File: ehealth/patient/models.py
```python
from django.db import models
from django.core.validators import MinLengthValidator, RegexValidator
from django.db.models import CASCADE
from django.contrib.auth.models import User
#from patient.models import PatientInfo
from django.db.models.signals import post_save
from django.db.models import signals
from django.dispatch import receiver
from doctor.models import *
# Create your models here.
class Profile(models.Model):
user = models.OneToOneField(User,on_delete=models.CASCADE)
name=models.CharField(max_length=20)
phone_regex = RegexValidator(regex=r'^\+?1?\d{9,15}$', message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.")
phone_number = models.CharField(validators=[phone_regex], max_length=17, blank=True) # validators should be a list
address=models.CharField(max_length=40)
gender=models.CharField( max_length=50,blank=True,choices=(('Female','Female'),('Male','Male'),('Other','Other')))
age=models.IntegerField(blank=True , null=True)
status=models.CharField(max_length=20,blank=True,choices=(('Single','Single'),('Married','Married')))
profile_pic=models.ImageField(blank=True,null=True)
def __str__(self):
return self.name
@receiver(post_save,sender=User)
def create_user_profile(sender,instance,created,**kwargs):
if created:
Profile.objects.create(user=instance,name=instance.username)
@receiver(post_save,sender=User)
def save_user_profile(sender,instance,**kwargs):
instance.profile.save()
# when profile is deleted user is also delete
def delete_user(sender, instance=None, **kwargs):
try:
instance.user
except User.DoesNotExist:
pass
else:
instance.user.delete()
signals.post_delete.connect(delete_user, sender=Profile)
class Feedback(models.Model):
text=models.TextField(max_length=200)
title=models.CharField(max_length=50)
picture=models.ImageField()
date=models.DateTimeField(auto_now_add=True)
uploaded_by=models.ForeignKey(Profile,on_delete=models.CASCADE)
def __str__(self):
return self.title
class WhoPredictDisease(models.Model):
predict_by=models.ForeignKey(Profile,on_delete=models.CASCADE)
predicted_disease=models.CharField(max_length=30)
def __str__(self):
return self.predicted_disease
class Disease1(models.Model):
name=models.CharField(max_length=200)
doctor=models.ForeignKey(DoctorInfo,on_delete=models.CASCADE,null=True)
def __str__(self):
return self.name
# Create your models here.
class Heart(models.Model):
age=models.IntegerField()
sex=models.IntegerField()
cp=models.IntegerField()
trestbps=models.IntegerField()
chol=models.IntegerField()
fbs=models.IntegerField()
restecg=models.IntegerField()
thalach=models.IntegerField()
exang=models.IntegerField()
oldpeak=models.IntegerField()
slope=models.IntegerField()
ca =models.IntegerField()
thal=models.IntegerField()
def to_dict(self):
return{
'age':self.age,
'sex':self.sex,
'cp':self.cp,
'trestbps':self.trestbps,
'chol':self.chol,
'fbs':self.fbs,
'restecg':self.restecg,
'thalach':self.thalach,
'exang':self.exang,
'slope':self.slope,
'oldpeak':self.oldpeak,
'ca':self.ca,
'thal':self.thal,
}
class Diabetes(models.Model):
Pregnancies=models.IntegerField()
Glucose=models.IntegerField()
BloodPressure=models.IntegerField()
SkinThickness=models.IntegerField()
Insulin=models.IntegerField()
BMI=models.IntegerField()
DiabetesPedigreeFunction=models.IntegerField()
Age=models.IntegerField()
def to_dict(self):
return{
'Pregnancies':self.Pregnancies,
'Glucose':self.Glucose,
'BloodPressure':self.BloodPressure,
'SkinThickness':self.SkinThickness,
'Insulin':self.Insulin,
'BMI':self.BMI,
'DiabetesPedigreeFunction':self.DiabetesPedigreeFunction,
'Age':self.Age,
}
class Image(models.Model):
imagefile= models.FileField(upload_to='images/')
class ImageBlock(models.Model):
imageblock = models.FileField(upload_to='images/')
```
#### File: ehealth/roleadmin/forms.py
```python
from patient.models import Disease1
from doctor.models import DoctorInfo
from django import forms
from django.forms import ModelChoiceField
class AddDiseaseForm(forms.ModelForm):
name=forms.CharField(widget=forms.TextInput(),label='Disease Name',error_messages={'required':'Enter upto 200 characters'})
doctor=forms.ModelChoiceField(queryset=DoctorInfo.objects.all(),initial=0,error_messages={'required':'Please select a doctor'})
class Meta:
model=Disease1
fields=["name","doctor",]
def __init__(self,*args,**kwargs):
super(AddDiseaseForm,self).__init__(*args,**kwargs)
self.fields['name'].error_messages.update({'required':'Enter upto 200 characters'})
self.fields['doctor'].error_messages.update({'required':'Please select a doctor'})
``` |
{
"source": "11petr11/ScansSeparate",
"score": 3
} |
#### File: 11petr11/ScansSeparate/fil.py
```python
import asyncio, evdev
import asyncio
import time
from evdev import UInput, ecodes as e
UINPUT_NAME = "VKey"
ui = UInput(None,UINPUT_NAME)
lastN = 0
activeID = ""
def scanFilter(id,ev):
n = time.time()
global lastN
global activeID
d = n - lastN
b = d > 0.4
#print(b, d, n, e.KEY[ev.code])
if b:
activeID = id
if activeID == id:
#print(id," : ",evdev.categorize(ev), "diff: ", b)
ui.write_event(ev)
else:
print("BLOCK",b, d, n, e.KEY[ev.code])
lastN = n
@asyncio.coroutine
def print_events(device,path):
with device.grab_context():
while True:
try:
events = yield from device.async_read()
for event in events:
#print(device.path, evdev.categorize(event), sep=': ')
scanFilter(device.path,event)
except OSError:
global listDevices
listDevices.remove(path)
break
listDevices = []
async def CheckDevices(interval):
global listDevices
while True:
print("CheckDevice",[evdev.InputDevice(dev).name for dev in listDevices])
for dev in evdev.list_devices():
if not (dev in listDevices):
devices = evdev.InputDevice(dev)
if not (devices.name in [UINPUT_NAME,"gpio_keys"]):
listDevices.append(dev)
asyncio.ensure_future(print_events(devices,dev))
await asyncio.sleep(interval)
loop = asyncio.get_event_loop()
try:
asyncio.ensure_future(CheckDevices(2))
# devices = [evdev.InputDevice(path) for path in evdev.list_devices()]
# for device in devices:
# asyncio.async(print_events(device))
loop = asyncio.get_event_loop()
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
print("Closing Loop")
loop.close()
print("END Script")
``` |
{
"source": "11philip22/scripts",
"score": 3
} |
#### File: 11philip22/scripts/sshuttle.py
```python
import os
import sys
import time
def get_current_ip(): # gets your current ip
ip = os.popen("curl -s checkip.dyndns.org | \
sed -e 's/.*Current IP Address: //' -e 's/<.*$//'").read()
ip = ip.rstrip()
return ip
def connection_check(ip): # checks if the connection is successfull
t_end = time.time() + 60 * 1
while time.time() < t_end:
if get_current_ip() == ip:
print("you are now connected to {0}".format(ip))
exit(0)
print("unable to connect")
exit(1)
def main():
if len(sys.argv) < 2: # checks if there are arguments
print("Usage is vpn-hole start/stop")
exit(1)
if sys.argv[1] == "start":
try:
ip = os.popen("cat /home/philip/scripts/nicetryfbi.txt | \
grep wan | awk '{print $2}'").read()
ip = ip.rstrip()
# replace the ip variable with your ip
if get_current_ip() == ip: # checks if you are not already connected
print("you are already connected")
exit(1)
os.system("sshuttle --dns -x {0} -r philip@{0} \
0/0 --python=python3 -D".format(ip)) # start a sshuttle vpn
connection_check(ip)
except Exception as e:
print(e)
print("could not start sshuttle")
exit(1)
if sys.argv[1] == "stop": # finds sshuttle process and kills it
try:
os.system("kill $(pgrep sshuttle) > /dev/null 2>&1")
print("sshuttle killed")
exit(0)
except Exception as e:
print(e)
print("sshuttle is not running")
exit(1)
if __name__ == "__main__":
main()
``` |
{
"source": "11rohans/food-classifier",
"score": 2
} |
#### File: dogs/cv/vlad.py
```python
import dogs.cv.bow.Bow as Bow
class VLAD():
def __init__(self, dict_path="./dict", dict_size=64):
self.dict_path = dict_path
self.bow = Bow()
``` |
{
"source": "11rohans/modin",
"score": 2
} |
#### File: modin/pandas/io.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas
from pandas.io.common import _infer_compression
import inspect
from io import BytesIO
import os
import py
import ray
import re
import warnings
import numpy as np
from .dataframe import DataFrame
from .utils import from_pandas
from ..data_management.partitioning.partition_collections import RayBlockPartitions
from ..data_management.partitioning.remote_partition import PandasOnRayRemotePartition
from ..data_management.partitioning.axis_partition import (
split_result_of_axis_func_pandas,
)
from modin.data_management.query_compiler import PandasQueryCompiler
PQ_INDEX_REGEX = re.compile("__index_level_\d+__") # noqa W605
# Parquet
def read_parquet(path, engine="auto", columns=None, **kwargs):
"""Load a parquet object from the file path, returning a DataFrame.
Ray DataFrame only supports pyarrow engine for now.
Args:
path: The filepath of the parquet file.
We only support local files for now.
engine: Ray only support pyarrow reader.
This argument doesn't do anything for now.
kwargs: Pass into parquet's read_pandas function.
Notes:
ParquetFile API is used. Please refer to the documentation here
https://arrow.apache.org/docs/python/parquet.html
"""
return _read_parquet_pandas_on_ray(path, engine, columns, **kwargs)
def _read_parquet_pandas_on_ray(path, engine, columns, **kwargs):
from pyarrow.parquet import ParquetFile
if not columns:
pf = ParquetFile(path)
columns = [
name for name in pf.metadata.schema.names if not PQ_INDEX_REGEX.match(name)
]
num_partitions = RayBlockPartitions._compute_num_partitions()
num_splits = min(len(columns), num_partitions)
# Each item in this list will be a list of column names of the original df
column_splits = (
len(columns) // num_partitions
if len(columns) % num_partitions == 0
else len(columns) // num_partitions + 1
)
col_partitions = [
columns[i : i + column_splits] for i in range(0, len(columns), column_splits)
]
# Each item in this list will be a list of columns of original df
# partitioned to smaller pieces along rows.
# We need to transpose the oids array to fit our schema.
blk_partitions = np.array(
[
_read_parquet_columns._submit(
args=(path, cols, num_splits, kwargs), num_return_vals=num_splits + 1
)
for cols in col_partitions
]
).T
remote_partitions = np.array(
[
[PandasOnRayRemotePartition(obj) for obj in row]
for row in blk_partitions[:-1]
]
)
index_len = ray.get(blk_partitions[-1][0])
index = pandas.RangeIndex(index_len)
new_manager = PandasQueryCompiler(
RayBlockPartitions(remote_partitions), index, columns
)
df = DataFrame(query_compiler=new_manager)
return df
# CSV
def _skip_header(f, kwargs={}):
lines_read = 0
comment = kwargs.get("comment", None)
skiprows = kwargs.get("skiprows", None)
encoding = kwargs.get("encoding", None)
header = kwargs.get("header", "infer")
names = kwargs.get("names", None)
if header is None:
return lines_read
elif header == "infer":
if names is not None:
return lines_read
else:
header = 0
# Skip lines before the header
if isinstance(skiprows, int):
lines_read += skiprows
for _ in range(skiprows):
f.readline()
skiprows = None
header_lines = header + 1 if isinstance(header, int) else max(header) + 1
header_lines_skipped = 0
# Python 2 files use a read-ahead buffer which breaks our use of tell()
for line in iter(f.readline, ""):
lines_read += 1
skip = False
if not skip and comment is not None:
if encoding is not None:
skip |= line.decode(encoding)[0] == comment
else:
skip |= line.decode()[0] == comment
if not skip and callable(skiprows):
skip |= skiprows(lines_read)
elif not skip and hasattr(skiprows, "__contains__"):
skip |= lines_read in skiprows
if not skip:
header_lines_skipped += 1
if header_lines_skipped == header_lines:
return lines_read
return lines_read
def _read_csv_from_file_pandas_on_ray(filepath, kwargs={}):
"""Constructs a DataFrame from a CSV file.
Args:
filepath (str): path to the CSV file.
npartitions (int): number of partitions for the DataFrame.
kwargs (dict): args excluding filepath provided to read_csv.
Returns:
DataFrame or Series constructed from CSV file.
"""
empty_pd_df = pandas.read_csv(filepath, **dict(kwargs, nrows=0, skipfooter=0))
column_names = empty_pd_df.columns
skipfooter = kwargs.get("skipfooter", None)
partition_kwargs = dict(kwargs, header=None, names=column_names, skipfooter=0)
with open(filepath, "rb") as f:
# Get the BOM if necessary
prefix = b""
if kwargs.get("encoding", None) is not None:
prefix = f.readline()
partition_kwargs["skiprows"] = 1
f.seek(0, os.SEEK_SET) # Return to beginning of file
prefix_id = ray.put(prefix)
partition_kwargs_id = ray.put(partition_kwargs)
# Skip the header since we already have the header information
_skip_header(f, kwargs)
# Launch tasks to read partitions
partition_ids = []
index_ids = []
total_bytes = os.path.getsize(filepath)
# Max number of partitions available
num_parts = RayBlockPartitions._compute_num_partitions()
# This is the number of splits for the columns
num_splits = min(len(column_names), num_parts)
# This is the chunksize each partition will read
chunk_size = max(1, (total_bytes - f.tell()) // num_parts)
while f.tell() < total_bytes:
start = f.tell()
f.seek(chunk_size, os.SEEK_CUR)
f.readline() # Read a whole number of lines
partition_id = _read_csv_with_offset_pandas_on_ray._submit(
args=(
filepath,
num_splits,
start,
f.tell(),
partition_kwargs_id,
prefix_id,
),
num_return_vals=num_splits + 1,
)
partition_ids.append(
[PandasOnRayRemotePartition(obj) for obj in partition_id[:-1]]
)
index_ids.append(partition_id[-1])
index_col = kwargs.get("index_col", None)
if index_col is None:
new_index = pandas.RangeIndex(sum(ray.get(index_ids)))
else:
new_index_ids = get_index.remote([empty_pd_df.index.name], *index_ids)
new_index = ray.get(new_index_ids)
new_manager = PandasQueryCompiler(
RayBlockPartitions(np.array(partition_ids)), new_index, column_names
)
df = DataFrame(query_compiler=new_manager)
if skipfooter:
df = df.drop(df.index[-skipfooter:])
if kwargs.get("squeeze", False) and len(df.columns) == 1:
return df[df.columns[0]]
return df
def _read_csv_from_pandas(filepath_or_buffer, kwargs):
pd_obj = pandas.read_csv(filepath_or_buffer, **kwargs)
if isinstance(pd_obj, pandas.DataFrame):
return from_pandas(pd_obj)
elif isinstance(pd_obj, pandas.io.parsers.TextFileReader):
# Overwriting the read method should return a ray DataFrame for calls
# to __next__ and get_chunk
pd_read = pd_obj.read
pd_obj.read = lambda *args, **kwargs: from_pandas(pd_read(*args, **kwargs))
return pd_obj
def read_csv(
filepath_or_buffer,
sep=",",
delimiter=None,
header="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
nrows=None,
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
parse_dates=False,
infer_datetime_format=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
iterator=False,
chunksize=None,
compression="infer",
thousands=None,
decimal=b".",
lineterminator=None,
quotechar='"',
quoting=0,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
tupleize_cols=None,
error_bad_lines=True,
warn_bad_lines=True,
skipfooter=0,
doublequote=True,
delim_whitespace=False,
low_memory=True,
memory_map=False,
float_precision=None,
):
"""Read csv file from local disk.
Args:
filepath:
The filepath of the csv file.
We only support local files for now.
kwargs: Keyword arguments in pandas::from_csv
"""
# The intention of the inspection code is to reduce the amount of
# communication we have to do between processes and nodes. We take a quick
# pass over the arguments and remove those that are default values so we
# don't have to serialize and send them to the workers. Because the
# arguments list is so long, this does end up saving time based on the
# number of nodes in the cluster.
frame = inspect.currentframe()
_, _, _, kwargs = inspect.getargvalues(frame)
try:
args, _, _, defaults, _, _, _ = inspect.getfullargspec(read_csv)
defaults = dict(zip(args[1:], defaults))
kwargs = {
kw: kwargs[kw]
for kw in kwargs
if kw in defaults and kwargs[kw] != defaults[kw]
}
# This happens on Python2, we will just default to serializing the entire dictionary
except AttributeError:
# We suppress the error and delete the kwargs not needed in the remote function.
del kwargs["filepath_or_buffer"]
del kwargs["frame"]
if isinstance(filepath_or_buffer, str):
if not os.path.exists(filepath_or_buffer):
warnings.warn(
"File not found on disk. Defaulting to Pandas implementation.",
UserWarning,
)
return _read_csv_from_pandas(filepath_or_buffer, kwargs)
elif not isinstance(filepath_or_buffer, py.path.local):
read_from_pandas = True
# Pandas read_csv supports pathlib.Path
try:
import pathlib
if isinstance(filepath_or_buffer, pathlib.Path):
read_from_pandas = False
except ImportError:
pass
if read_from_pandas:
warnings.warn(
"Reading from buffer. Defaulting to Pandas implementation.", UserWarning
)
return _read_csv_from_pandas(filepath_or_buffer, kwargs)
if _infer_compression(filepath_or_buffer, compression) is not None:
warnings.warn(
"Compression detected. Defaulting to Pandas implementation.", UserWarning
)
return _read_csv_from_pandas(filepath_or_buffer, kwargs)
if chunksize is not None:
warnings.warn(
"Reading chunks from a file. Defaulting to Pandas implementation.",
UserWarning,
)
return _read_csv_from_pandas(filepath_or_buffer, kwargs)
if skiprows is not None and not isinstance(skiprows, int):
warnings.warn(
(
"Defaulting to Pandas implementation. To speed up "
"read_csv through the Modin implementation, "
"comment the rows to skip instead."
)
)
return _read_csv_from_pandas(filepath_or_buffer, kwargs)
# TODO: replace this by reading lines from file.
if nrows is not None:
warnings.warn("Defaulting to Pandas implementation.", UserWarning)
return _read_csv_from_pandas(filepath_or_buffer, kwargs)
else:
return _read_csv_from_file_pandas_on_ray(filepath_or_buffer, kwargs)
def read_json(
path_or_buf=None,
orient=None,
typ="frame",
dtype=True,
convert_axes=True,
convert_dates=True,
keep_default_dates=True,
numpy=False,
precise_float=False,
date_unit=None,
encoding=None,
lines=False,
chunksize=None,
compression="infer",
):
warnings.warn("Defaulting to Pandas implementation", UserWarning)
port_frame = pandas.read_json(
path_or_buf,
orient,
typ,
dtype,
convert_axes,
convert_dates,
keep_default_dates,
numpy,
precise_float,
date_unit,
encoding,
lines,
chunksize,
compression,
)
ray_frame = from_pandas(port_frame)
return ray_frame
def read_gbq(
query,
project_id=None,
index_col=None,
col_order=None,
reauth=False,
verbose=None,
private_key=None,
dialect="legacy",
**kwargs
):
warnings.warn("Defaulting to Pandas implementation", UserWarning)
port_frame = pandas.read_gbq(
query,
project_id=project_id,
index_col=index_col,
col_order=col_order,
reauth=reauth,
verbose=verbose,
private_key=private_key,
dialect=dialect,
**kwargs
)
ray_frame = from_pandas(port_frame)
return ray_frame
def read_html(
io,
match=".+",
flavor=None,
header=None,
index_col=None,
skiprows=None,
attrs=None,
parse_dates=False,
tupleize_cols=None,
thousands=",",
encoding=None,
decimal=".",
converters=None,
na_values=None,
keep_default_na=True,
):
warnings.warn("Defaulting to Pandas implementation", UserWarning)
port_frame = pandas.read_html(
io,
match,
flavor,
header,
index_col,
skiprows,
attrs,
parse_dates,
tupleize_cols,
thousands,
encoding,
decimal,
converters,
na_values,
keep_default_na,
)
ray_frame = from_pandas(port_frame[0])
return ray_frame
def read_clipboard(sep=r"\s+"):
warnings.warn("Defaulting to Pandas implementation", UserWarning)
port_frame = pandas.read_clipboard(sep)
ray_frame = from_pandas(port_frame)
return ray_frame
def read_excel(
io,
sheet_name=0,
header=0,
skiprows=None,
index_col=None,
names=None,
usecols=None,
parse_dates=False,
date_parser=None,
na_values=None,
thousands=None,
convert_float=True,
converters=None,
dtype=None,
true_values=None,
false_values=None,
engine=None,
squeeze=False,
):
warnings.warn("Defaulting to Pandas implementation", UserWarning)
port_frame = pandas.read_excel(
io,
sheet_name,
header,
skiprows,
index_col,
names,
usecols,
parse_dates,
date_parser,
na_values,
thousands,
convert_float,
converters,
dtype,
true_values,
false_values,
engine,
squeeze,
)
ray_frame = from_pandas(port_frame)
return ray_frame
def read_hdf(path_or_buf, key=None, mode="r"):
warnings.warn("Defaulting to Pandas implementation", UserWarning)
port_frame = pandas.read_hdf(path_or_buf, key, mode)
ray_frame = from_pandas(port_frame)
return ray_frame
def read_feather(path, nthreads=1):
warnings.warn("Defaulting to Pandas implementation", UserWarning)
port_frame = pandas.read_feather(path)
ray_frame = from_pandas(port_frame)
return ray_frame
def read_msgpack(path_or_buf, encoding="utf-8", iterator=False):
warnings.warn("Defaulting to Pandas implementation", UserWarning)
port_frame = pandas.read_msgpack(path_or_buf, encoding, iterator)
ray_frame = from_pandas(port_frame)
return ray_frame
def read_stata(
filepath_or_buffer,
convert_dates=True,
convert_categoricals=True,
encoding=None,
index_col=None,
convert_missing=False,
preserve_dtypes=True,
columns=None,
order_categoricals=True,
chunksize=None,
iterator=False,
):
warnings.warn("Defaulting to Pandas implementation", UserWarning)
port_frame = pandas.read_stata(
filepath_or_buffer,
convert_dates,
convert_categoricals,
encoding,
index_col,
convert_missing,
preserve_dtypes,
columns,
order_categoricals,
chunksize,
iterator,
)
ray_frame = from_pandas(port_frame)
return ray_frame
def read_sas(
filepath_or_buffer,
format=None,
index=None,
encoding=None,
chunksize=None,
iterator=False,
):
warnings.warn("Defaulting to Pandas implementation", UserWarning)
port_frame = pandas.read_sas(
filepath_or_buffer, format, index, encoding, chunksize, iterator
)
ray_frame = from_pandas(port_frame)
return ray_frame
def read_pickle(path, compression="infer"):
warnings.warn("Defaulting to Pandas implementation", UserWarning)
port_frame = pandas.read_pickle(path, compression)
ray_frame = from_pandas(port_frame)
return ray_frame
def read_sql(
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
columns=None,
chunksize=None,
):
warnings.warn("Defaulting to Pandas implementation", UserWarning)
port_frame = pandas.read_sql(
sql, con, index_col, coerce_float, params, parse_dates, columns, chunksize
)
ray_frame = from_pandas(port_frame)
return ray_frame
@ray.remote
def get_index(index_name, *partition_indices):
index = partition_indices[0].append(partition_indices[1:])
index.names = index_name
return index
@ray.remote
def _read_csv_with_offset_pandas_on_ray(fname, num_splits, start, end, kwargs, header):
"""Use a Ray task to read a chunk of a CSV into a Pandas DataFrame.
Args:
fname: The filename of the file to open.
num_splits: The number of splits (partitions) to separate the DataFrame into.
start: The start byte offset.
end: The end byte offset.
kwargs: The kwargs for the Pandas `read_csv` function.
header: The header of the file.
Returns:
A list containing the split Pandas DataFrames and the Index as the last
element. If there is not `index_col` set, then we just return the length.
This is used to determine the total length of the DataFrame to build a
default Index.
"""
bio = open(fname, "rb")
bio.seek(start)
to_read = header + bio.read(end - start)
bio.close()
pandas_df = pandas.read_csv(BytesIO(to_read), **kwargs)
pandas_df.columns = pandas.RangeIndex(len(pandas_df.columns))
if kwargs.get("index_col", None) is not None:
index = pandas_df.index
# Partitions must have RangeIndex
pandas_df.index = pandas.RangeIndex(0, len(pandas_df))
else:
# We will use the lengths to build the index if we are not given an
# `index_col`.
index = len(pandas_df)
return split_result_of_axis_func_pandas(1, num_splits, pandas_df) + [index]
@ray.remote
def _read_parquet_columns(path, columns, num_splits, kwargs):
"""Use a Ray task to read a column from Parquet into a Pandas DataFrame.
Args:
path: The path of the Parquet file.
columns: The list of column names to read.
num_splits: The number of partitions to split the column into.
Returns:
A list containing the split Pandas DataFrames and the Index as the last
element. If there is not `index_col` set, then we just return the length.
This is used to determine the total length of the DataFrame to build a
default Index.
"""
import pyarrow.parquet as pq
df = pq.read_pandas(path, columns=columns, **kwargs).to_pandas()
# Append the length of the index here to build it externally
return split_result_of_axis_func_pandas(0, num_splits, df) + [len(df.index)]
``` |
{
"source": "11sonali/Assignment_1",
"score": 4
} |
#### File: 11sonali/Assignment_1/HCDS Code1.py
```python
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
def create_graph():
# Initialize the lists for X, Y1, and Y2
data = pd.read_csv('C:\\Users\\sonal\\OneDrive\\Desktop\\Education\\06 Spring 2022\\Comp Sci\\Homework\\HCDS Data1.csv')
df = pd.DataFrame(data)
x = np.arange(7)
y1 = list(df.iloc[:, 1])
y2 = list(df.iloc[:, 2])
width = 0.40
# Plot data in grouped manner of bar type
plt.ylim(ymin = 0, ymax = 100)
plt.bar(x-0.2, y1, width, color='b')
plt.bar(x+0.2, y2, width, color='r')
plt.xticks(x,list(df.iloc[:, 0]))
plt.title("Hancock vs Norwood Estate Dog Park: Amount of Dogs Who Shed & Other Attributes")
plt.xlabel("\nDog and Owner Information")
plt.ylabel("Amount of Dogs in Percent")
plt.legend(["Hancock", "Norwood Estate"])
# Show the plot
plt.show()
create_graph()
``` |
{
"source": "11Takanori/AutowareAuto_",
"score": 2
} |
#### File: benchmark_tool/benchmark_task/euclidean_cluster_node_task.py
```python
from benchmark_tool.benchmark_task.benchmark_task import BenchmarkTask
from benchmark_tool.dataset.kitti_3d_benchmark_dataset \
import Kitti3DBenchmarkDataset
from benchmark_tool.output_formatter.generic_stream_formatter \
import GenericStreamFormatter
from benchmark_tool.output_formatter.euclidean_cluster_node_output_formatter \
import EuclideanClusterNodeOutputFormatter
from benchmark_tool.metric.kitti_3d_object_detection_metric \
import Kitti3dObjectDetectionMetric
from benchmark_tool.metric.average_metric import AverageMetric
from benchmark_tool.time_estimator.time_estimator_header \
import TimeEstimatorHeader
from benchmark_tool.player.synced_player import SyncedPlayer
from benchmark_tool.player.relay_player import RelayPlayer
from benchmark_tool.utility import getParameter, info, error
from sensor_msgs.msg import PointCloud2
from std_msgs.msg import Int64
class EuclideanClusterNodeTask(BenchmarkTask):
"""
The EuclideanClusterNodeTask class benchmarks the lidar_euclidean_cluster_detect node.
It is a specialized BenchmarkTask class holding all the peculiarities of that node.
"""
# Main loop is at 1000 Hz, so counting to 10000 will be a wait of
# 10 seconds before shutdown
SHUTDOWN_TICK_WAIT = 10000
POINT_CLOUD_DATASET_TRACK = 0
POINT_CLOUD_TRACK_RATE_HZ = 10
POINT_CLOUD_TRACK_TIMEOUT_MS = 120000
# POINT_CLOUD_TRACK_MIN_SUBS is 1 because the minimum number of
# subscriber before publish the data is 1: the node under test
POINT_CLOUD_TRACK_MIN_SUBS = 1
SPEED_METRIC_TOPIC = "/euclidean_cluster_node_speed_metric"
SPEED_METRIC_RESULT_PATH = "/euclidean_cluster_node/speed"
OBJDET_METRIC_RESULT_PATH = "/euclidean_cluster_node/objdet"
SPEED_METRIC_OUTPUT_PATH = "/euclidean_cluster_node"
OBJDET_METRIC_OUTPUT_PATH = "/euclidean_cluster_node"
SPEED_MEASURE_FILE_NAME = "speed.txt"
SPEED_METRIC_OUTPUT_FILENAME = "speed_metric.txt"
EXPECTED_KITTI_FRAME_ACCURACY = 7481
def __init__(self, node):
"""
Create a EuclideanClusterNodeTask object.
@param node: ROS2 node
@type node: rclpy.node.Node
"""
super(EuclideanClusterNodeTask, self).__init__(node)
self._data_player = None
self._time_estimator = None
self._kitti_3d_metric = None
self._speed_metric = None
self._shutdown_counter = 0
self._speed_formatter = None
self._limit_frame = -1
def init(self):
"""
Initialize the task structure.
Uses the Kitti object detection benchmark dataset.
The task computes two metrics:
- speed
- detection precision (Kitti measure)
@return: True on success, False on failure
"""
dataset_path = getParameter(self.node, "dataset_path")
input_topic = getParameter(self.node, "input_topic")
output_topic = getParameter(self.node, "output_topic")
benchmarked_in_topic = getParameter(self.node, "benchmarked_input_topic")
benchmarked_out_topic = getParameter(self.node, "benchmarked_output_topic")
result_path = getParameter(self.node, "result_path")
end_frame = getParameter(self.node, "force_end_at_frame_n")
if end_frame is not None and end_frame >= 0:
self._limit_frame = end_frame
else:
error(self.node,
"A positive limit frame number must be provided")
return False
# Load the Kitti 3D object detection benchmark dataset
self._dataset = Kitti3DBenchmarkDataset(self.node, dataset_path)
if not self._dataset.init():
error(self.node,
"There is a problem initializing the dataset")
return False
if (self._limit_frame >= 0):
self._dataset.set_frame_limit(end_frame)
# Load TimeEstimator to get the speed metric
self._time_estimator = TimeEstimatorHeader(self.node)
# Load the lidar_euclidean_cluster_detect output formatter to get the
# output of the lidar node and translate it to the kitti format
self._output_formatter = EuclideanClusterNodeOutputFormatter(
self.node,
(result_path + self.OBJDET_METRIC_RESULT_PATH),
dataset_path +
Kitti3DBenchmarkDataset.KITTI_3D_BENCH_FOLDER_STRUCTURE[
Kitti3DBenchmarkDataset.KITTI_3D_CALIB_PATH_IDX]
)
# Load the speed output formatter to get the output of the TimeEstimator
# and save it to a file
self._speed_formatter = GenericStreamFormatter(
self.node,
result_path + self.SPEED_METRIC_RESULT_PATH,
self.SPEED_MEASURE_FILE_NAME
)
# Prepare the 3d object detection metric object
self._kitti_3d_metric = Kitti3dObjectDetectionMetric(
self.node,
(result_path + self.OBJDET_METRIC_RESULT_PATH),
self._dataset.get_ground_truth_path(),
(result_path + self.OBJDET_METRIC_OUTPUT_PATH)
)
# Prepare the speed metric object
self._speed_metric = AverageMetric(
self.node,
(result_path + self.SPEED_METRIC_RESULT_PATH),
(result_path + self.SPEED_METRIC_OUTPUT_PATH),
self.SPEED_MEASURE_FILE_NAME,
self.SPEED_METRIC_OUTPUT_FILENAME
)
# Create the Player to play the data to the lidar node
self._data_player = SyncedPlayer(self.node, self._dataset)
if not self._data_player.add_track(
self.POINT_CLOUD_DATASET_TRACK,
input_topic,
PointCloud2,
output_topic,
self.POINT_CLOUD_TRACK_MIN_SUBS,
self.POINT_CLOUD_TRACK_RATE_HZ,
self.POINT_CLOUD_TRACK_TIMEOUT_MS
):
error(self.node,
"There is a problem initializing the player")
return False
# Create the relay player listening to the ray ground classifier
# output topic, it is the preprocessing stage of this benchmarked node
self._relay_player = RelayPlayer(
self.node,
benchmarked_in_topic,
"/relay" + benchmarked_in_topic
)
if not self._time_estimator.start_listener(benchmarked_out_topic,
self.SPEED_METRIC_TOPIC):
error(self.node,
"There is a problem initializing the TimeEstimator")
return False
# Listen to the speed metric
if not self._speed_formatter.start_output_listener(
self.SPEED_METRIC_TOPIC,
Int64
):
error(self.node,
"Problem initializing the speed metric listener")
return False
# Listen the lidar node output
if not self._output_formatter.start_output_listener(output_topic):
error(self.node,
"Problem initializing the speed metric listener")
return False
# Everything is fine
return True
def run(self):
"""
Run the task.
This function is called repeatedly in a loop until the task has finished its operations.
It uses a Player class instance to play the pointcloud data to the blackbox system.
When there is no more data to play, the task waits SHUTDOWN_TICK_WAIT before notifying the
end of any operation using the return value.
@return True: if the task has not finished operation, False when the task has nothing more
to do
"""
# Check if the player has data to play
if not self._data_player.play_data():
if self._shutdown_counter > self.SHUTDOWN_TICK_WAIT:
# Return running state of the task: stopping
return False
elif self._shutdown_counter == 0:
print("") # needed to keep visualizing the progress bar
info(self.node,
"No more data to play, waiting last frames...")
self._shutdown_counter += 1
# Return running state of the task: running
return True
def compute_results(self):
"""
Compute the final benchmark results.
@return: None
"""
info(self.node, "Start metric computation...")
# Compute the kitti 3D object detection metrics
if self._limit_frame >= self.EXPECTED_KITTI_FRAME_ACCURACY:
if not self._kitti_3d_metric.compute_metric():
error(self.node,
"Problem computing 3D object detection metrics")
else:
info(self.node, "Accuracy computation needs a number of frame " +
"equal to %d. Skipping..." % self.EXPECTED_KITTI_FRAME_ACCURACY)
# Compute the speed metric
if not self._speed_metric.compute_metric("Speed during benchmark (milliseconds): " +
"\nMin: %.1f\nAverage: %.1f\nMax: %.1f"):
error(self.node,
"Problem computing speed metrics")
```
#### File: benchmark_tool/time_estimator/time_estimator_header.py
```python
import rclpy
from rclpy.qos import QoSProfile
from ros2topic.api import get_msg_class
from std_msgs.msg import Int64
from benchmark_tool.utility import error
class TimeEstimatorHeader(object):
"""
The TimeEstimatorHeader class.
It measures the time elapsed from the reception of a message using the header stamp
information. It measures the difference between the received time and the time in the message
header, it uses millisecond resolution.
This class should be used when the node that produces the message, is copying the header from
its input topic message to the output topic message.
This is the flow:
1) The benchmark_tool node publish a message with a timestamp
2) The blackbox system receives the message and compute the output message
3) The blackbox system copies the header from the input message to the output message and send
it
4) The benchmark_tool is subscribed to the blackbox system, so upon receiving the output
message, we know that the time elapsed between the reception and the timestamp on the
message is an upper bound for how long the blackbox system has taken to perform its
operations.
"""
def __init__(self, node):
"""
Create a TimeEstimator object.
@param node: ROS2 node
@type node: rclpy.node.Node
"""
self._sub_output_topic = None
self._publisher = None
self.node = node
def start_listener(self, output_topic, publish_topic):
"""
Start the time measurement.
@param output_topic: Topic to be listened to measure the time
@type output_topic: str
@param publish_topic: Topic where the measurement is published
@type publish_topic: str
@return: True on success, False on failure
"""
try:
# Create publisher to publish the speed measure
self._publisher = self.node.create_publisher(
Int64,
publish_topic,
qos_profile=QoSProfile(depth=1)
)
# Get type of the output topic and subscribe to it
output_topic_type = get_msg_class(self.node, output_topic,
blocking=True)
self._sub_output_topic = self.node.create_subscription(
output_topic_type,
output_topic,
self.output_topic_callback,
qos_profile=QoSProfile(depth=1)
)
except Exception as e:
error(self.node, "%s" % str(e))
return False
return True
def output_topic_callback(self, msg):
"""
Compute a time difference on message reception.
Callback function triggered by the reception of a message from the output topic.
When a message is received, the time is computed using the difference between the actual
receiving time and the time of the message header.
After the computation, the speed is published on the publish topic.
@param msg: The topic message
@type msg: The type can vary depending on the listened topic
@return: None
"""
# Get actual time from ROS
time_now = self.node.get_clock().now().nanoseconds
# Compute the amount of time elapsed from receiving this message and
# the time written in the message header
time_message = rclpy.time.Time()
time_message = time_message.from_msg(msg.header.stamp)
measure = time_now - time_message.nanoseconds
# Transform from nanoseconds to milliseconds
measure = measure / (1000 * 1000)
publish_msg = Int64()
publish_msg.data = int(measure)
# Publish the measurement
self._publisher.publish(publish_msg)
``` |
{
"source": "11Tuvork28/DPY-Anti-Spam",
"score": 2
} |
#### File: DPY-Anti-Spam/testing/test_message.py
```python
import datetime
import unittest
from antispam.message import Message
class TestMessage(unittest.TestCase):
"""
Used to test the Message object
"""
def setUp(self):
"""
Simply setup our Message obj before usage
"""
self.message = Message(0, "Hello world", 2, 3, 4)
def test_intAssignment(self):
self.assertIsInstance(self.message.id, int)
self.assertIsInstance(self.message.guild_id, int)
self.assertIsInstance(self.message.channel_id, int)
self.assertIsInstance(self.message.author_id, int)
def test_strAssignment(self):
self.assertIsInstance(self.message.content, str)
def test_datetimeAssignment(self):
self.assertIsInstance(self.message.creation_time, datetime.datetime)
def test_valueAssignment(self):
creationTime = self.message.creation_time
self.assertEqual(self.message.id, 0)
self.assertEqual(self.message.content, "Hello world")
self.assertEqual(self.message.author_id, 2)
self.assertEqual(self.message.channel_id, 3)
self.assertEqual(self.message.guild_id, 4)
self.assertFalse(self.message.is_duplicate)
self.message.id = 10
self.message.content = "Testing"
self.message.author_id = 10
self.message.channel_id = 10
self.message.guild_id = 10
self.message.creation_time = datetime.datetime.now()
self.message.is_duplicate = True
self.assertEqual(self.message.id, 10)
self.assertEqual(self.message.content, "Testing")
self.assertEqual(self.message.author_id, 10)
self.assertEqual(self.message.channel_id, 10)
self.assertEqual(self.message.guild_id, 10)
self.assertEqual(self.message.creation_time, creationTime)
self.assertTrue(self.message.is_duplicate)
def test_idRaises(self):
with self.assertRaises(ValueError):
self.message.id = "String"
def test_authorIdRaises(self):
with self.assertRaises(ValueError):
self.message.author_id = "String"
def test_channelIdRaises(self):
with self.assertRaises(ValueError):
self.message.channel_id = "String"
def test_guildIdRaises(self):
with self.assertRaises(ValueError):
self.message.guild_id = "String"
@unittest.skip
@unittest.expectedFailure
def test_contentRaises(self):
with self.assertRaises(ValueError):
self.message.content = {"key", "value"}
self.message.content = 1
def test_str(self):
self.assertEqual(
str(self.message),
f"{self.message.__class__.__name__} object - '{self.message.content}'",
)
def test_repr(self):
self.assertEqual(
repr(self.message),
(
f"'{self.message.__class__.__name__} object. Content: {self.message.content}, Message Id: {self.message.id}, "
f"Author Id: {self.message.author_id}, Channel Id: {self.message.channel_id}, Guild Id: {self.message.guild_id} "
f"Creation time: {self.message.creation_time}'"
),
)
def test_eqEqual(self):
obj = Message(0, "Hello world", 2, 3, 4)
self.assertTrue(self.message == obj)
def test_eqNotEqual(self):
obj = Message(1, "Hello world", 2, 3, 4)
self.assertFalse(self.message == obj)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "11Tuvork28/M3E5",
"score": 3
} |
#### File: modules/base/db_management.py
```python
from sqlalchemy.dialects.mysql import CHAR
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, FLOAT, BIGINT, DATETIME, ForeignKey, VARCHAR, TEXT, BOOLEAN
from sqlalchemy.orm import relationship
from base_folder.config import Session
'''
The following classes represent there corresponding database table
'''
Base = declarative_base()
class Banlist(Base):
__tablename__ = "blacklist"
id = Column(BIGINT, primary_key=True, autoincrement=True)
user_id = Column(BIGINT)
class BannedChannelsCmds(Base):
__tablename__ = "banned_channels_cmds"
id = Column(Integer, primary_key=True, autoincrement=True)
guild_id = Column(BIGINT, ForeignKey('guilds.guild_id'))
channel_id = Column(BIGINT, ForeignKey('channels.channel_id'))
cmd_id = Column(BIGINT, ForeignKey('commands.id'))
guilds = relationship("Guild", back_populates="banned_channels_cmds")
channels = relationship("Channels", back_populates="banned_channels_cmds")
commands = relationship("Commands", back_populates="banned_channels_cmds")
class BannedRolesCmds(Base):
__tablename__ = "banned_roles_cmds"
id = Column(Integer, primary_key=True, autoincrement=True)
guild_id = Column(BIGINT, ForeignKey('guilds.guild_id'))
role_id = Column(BIGINT, ForeignKey("roles.role_id"))
cmd_id = Column(BIGINT, ForeignKey('commands.id'))
guilds = relationship("Guild", back_populates="banned_roles_cmds")
roles = relationship("Roles", back_populates="banned_roles_cmds")
commands = relationship("Commands", back_populates="banned_roles_cmds")
class BannedUsersCmds(Base):
__tablename__ = "banned_users_cmds"
id = Column(Integer, primary_key=True, autoincrement=True)
guild_id = Column(BIGINT, ForeignKey('guilds.guild_id'))
user_id = Column(BIGINT, ForeignKey('user_info.user_id'))
cmd_id = Column(BIGINT, ForeignKey('commands.id'))
guilds = relationship("Guild", back_populates="banned_users_cmds")
user_info = relationship("UserInfo", back_populates="banned_users_cmds")
commands = relationship("Commands", back_populates="banned_users_cmds")
class BannedChannelsSpam(Base):
__tablename__ = "banned_channels_spam"
guild_id = Column(BIGINT, ForeignKey('guilds.guild_id'))
channel_id = Column(BIGINT, ForeignKey('channels.channel_id'), primary_key=True)
guilds = relationship("Guild", back_populates="banned_channels_spam")
channels = relationship("Channels", back_populates="banned_channels_spam")
class BannedRolesSpam(Base):
__tablename__ = "banned_roles_spam"
guild_id = Column(BIGINT, ForeignKey('guilds.guild_id'))
role_id = Column(BIGINT, ForeignKey("roles.role_id"), primary_key=True)
guilds = relationship("Guild", back_populates="banned_roles_spam")
roles = relationship("Roles", back_populates="banned_roles_spam")
class BannedUsersSpam(Base):
__tablename__ = "banned_users_spam"
guild_id = Column(BIGINT, ForeignKey('guilds.guild_id'))
user_id = Column(BIGINT, ForeignKey('user_info.user_id'), primary_key=True)
guilds = relationship("Guild", back_populates="banned_users_spam")
user_info = relationship("UserInfo", back_populates="banned_users_spam")
class Channels(Base):
__tablename__ = "channels"
guild_id = Column(BIGINT, ForeignKey('guilds.guild_id'), primary_key=True)
channel_name = Column(VARCHAR(length=150))
channel_id = Column(BIGINT)
guilds = relationship("Guild", back_populates="channels")
banned_channels_spam = relationship("BannedChannelsSpam", back_populates="channels")
banned_channels_cmds = relationship("BannedChannelsCmds", back_populates="channels")
class Commands(Base):
__tablename__ = "commands"
id = Column(Integer, primary_key=True, autoincrement=True)
command_name = Column(VARCHAR(length=100))
banned_channels_cmds = relationship("BannedChannelsCmds", back_populates="commands")
banned_roles_cmds = relationship("BannedRolesCmds", back_populates="commands")
banned_users_cmds = relationship("BannedUsersCmds", back_populates="commands")
class Errors(Base):
__tablename__ = "Error"
id = Column(BIGINT, primary_key=True, autoincrement=True)
guild_id = Column(BIGINT, ForeignKey('guilds.guild_id'))
error = Column(VARCHAR(length=2000))
date = Column(DATETIME)
guilds = relationship("Guild", back_populates="Error")
class Guild(Base):
__tablename__ = "guilds"
guild_id = Column(BIGINT, primary_key=True)
channels = relationship("Channels", back_populates="guilds")
banned_channels_cmds = relationship("BannedChannelsCmds", back_populates="guilds")
banned_roles_cmds = relationship("BannedRolesCmds", back_populates="guilds")
banned_users_cmds = relationship("BannedUsersCmds", back_populates="guilds")
banned_channels_spam = relationship("BannedChannelsSpam", back_populates="guilds")
banned_roles_spam = relationship("BannedRolesSpam", back_populates="guilds")
banned_users_spam = relationship("BannedUsersSpam", back_populates="guilds")
Error = relationship("Errors", back_populates="guilds")
user_info = relationship("UserInfo", back_populates="guilds")
messages = relationship("Messages", back_populates="guilds")
profiles = relationship("Profiles", back_populates="guilds")
reactions = relationship("Reaction", back_populates="guilds")
roles = relationship("Roles", back_populates="guilds")
settings = relationship("Settings", back_populates="guilds")
class Messages(Base):
__tablename__ = "messages"
id = Column(BIGINT, primary_key=True, autoincrement=True)
guild_id = Column(BIGINT, ForeignKey('guilds.guild_id'))
user_id = Column(BIGINT, ForeignKey('user_info.user_id'))
message_id = Column(BIGINT)
message = Column(TEXT)
time = Column(DATETIME)
guilds = relationship("Guild", back_populates="messages")
user_info = relationship("UserInfo", back_populates="messages")
reactions = relationship("Reaction", back_populates="messages")
class Profiles(Base):
__tablename__ = 'profiles'
id = Column(BIGINT, primary_key=True, autoincrement=True)
guild_id = Column(BIGINT, ForeignKey('guilds.guild_id'))
user_id = Column(BIGINT, ForeignKey('user_info.user_id'))
warnings = Column(Integer, default=0)
kickCount = Column(Integer, default=0)
text_xp = Column(Integer, default=0)
text_lvl = Column(Integer, default=0)
voice_xp = Column(Integer, default=0)
voice_lvl = Column(Integer, default=0)
banned_at = Column(DATETIME)
banned_until = Column(DATETIME)
muted_at = Column(DATETIME)
muted_until = Column(DATETIME)
guilds = relationship("Guild", back_populates="profiles")
user_info = relationship("UserInfo", back_populates="profiles")
class Reaction(Base):
__tablename__ = "reactions"
id = Column(BIGINT, primary_key=True, autoincrement=True)
guild_id = Column(BIGINT, ForeignKey('guilds.guild_id'))
message_id = Column(BIGINT, ForeignKey("messages.message_id"))
role_id = Column(BIGINT, ForeignKey("roles.role_id"))
emoji = Column(CHAR)
guilds = relationship("Guild", back_populates="reactions")
roles = relationship("Roles", back_populates="reactions")
messages = relationship("Messages", back_populates="reactions")
class Roles(Base):
__tablename__ = "roles"
guild_id = Column(BIGINT, ForeignKey('guilds.guild_id'))
role_id = Column(BIGINT, primary_key=True, autoincrement=True)
role_name = Column(VARCHAR(length=255))
guilds = relationship("Guild", back_populates="roles")
reactions = relationship("Reaction", back_populates="roles")
banned_roles_cmds = relationship("BannedRolesCmds", back_populates="roles")
banned_roles_spam = relationship("BannedRolesSpam", back_populates="roles")
class Settings(Base):
__tablename__ = "settings"
guild_id = Column(BIGINT, ForeignKey('guilds.guild_id'), primary_key=True, autoincrement=True, nullable=False)
# TODO: make relations to Roles table
# TODO: make relations to channels table
standard_role_id = Column(BIGINT, default=0)
dev_role_id = Column(BIGINT, default=0)
mod_role_id = Column(BIGINT, default=0)
admin_role_id = Column(BIGINT, default=0)
imgwelcome_toggle = Column(BOOLEAN, default=False)
imgwelcome_text = Column(VARCHAR(length=2000), default="V2VsY29tZSB0byB0aGUgc2VydmVyIHlvdSBsaXR0bGUgdXNlcg==")
levelsystem_toggle = Column(BOOLEAN, default=False)
welcome_channel_id = Column(BIGINT, default=0)
leave_channel_id = Column(BIGINT, default=0)
lvl_channel_id = Column(BIGINT, default=0)
cmd_channel_id = Column(BIGINT, default=0)
stdout_channel_id = Column(BIGINT, default=0)
warn_channel_id = Column(BIGINT, default=0)
kick_channel_id = Column(BIGINT, default=0)
ban_channel_id = Column(BIGINT, default=0)
prefix = Column(VARCHAR(length=20), default="LQ==")
Color = Column(VARCHAR(length=25), default="default()")
leave_text = Column(VARCHAR(2000), default="VXNlciB1c2VyIGxlZnQgdGhlIHNlcnZlci4uLg==")
warnThreshold = Column(Integer, default=3)
kickThreshold = Column(Integer, default=2)
banThreshold = Column(Integer, default=2)
messageInterval = Column(Integer, default=2500)
warnMessage = Column(VARCHAR(length=2000), default="Hey $MENTIONUSER, please stop spamming/"
"sending duplicate messages.")
kickMessage = Column(VARCHAR(length=2000), default="$USERNAME was kicked for spamming/sending duplicate messages.")
banMessage = Column(VARCHAR(length=2000), default="$USERNAME was banned for spamming/sending duplicate messages.")
messageDuplicateCount = Column(Integer, default=5)
messageDuplicateAccuracy = Column(FLOAT, default=90)
ignoreBots = Column(BOOLEAN, default=True)
guilds = relationship("Guild", back_populates="settings")
class UserInfo(Base):
__tablename__ = 'user_info'
id = Column(BIGINT, primary_key=True, autoincrement=True)
user_id = Column(BIGINT)
username = Column(String)
guild_id = Column(BIGINT, ForeignKey('guilds.guild_id'))
guilds = relationship("Guild", back_populates="user_info")
messages = relationship("Messages", back_populates="user_info")
profiles = relationship("Profiles", back_populates="user_info")
banned_users_spam = relationship("BannedUsersSpam", back_populates="user_info")
banned_users_cmds = relationship("BannedUsersCmds", back_populates="user_info")
class Db:
"""
This class is more or less a layer on top of sqlalchemy,
that can read data from the database and represent it in useful forms.
TODO: add consistent error handling
"""
def __init__(self):
self._session = None
@property
def session(self):
if self._session is None:
self._session = Session()
return self._session
return self._session
def prefix_lookup(self, guild_id: int):
"""
:param guild_id: the id of the guild
:returns: the prefix for the given guild
"""
prefix = self.session.query(Settings.prefix).filter_by(guild_id=guild_id).one()
self.session.commit()
return prefix
async def roles_from_db(self, guild_id: int):
# returns a tuple with all role name's and id's
roles = self.session.query(Roles.role_name, Roles.role_id).filter_by(guild_id=guild_id).all()
self.session.commit()
return roles
async def get_admin_role(self, guild_id: int):
"""
:param guild_id: the id of the guild
:returns: the role id of the e.g. admin role for the given guild
"""
role_id = self.session.query(Settings.admin_role_id).filter_by(guild_id=guild_id).all()
self.session.commit()
return role_id[0]
async def get_dev_role(self, guild_id: int):
"""
:param guild_id: the id of the guild
:returns: the role id of the e.g. admin role for the given guild
"""
role_id = self.session.query(Settings.dev_role_id).filter_by(guild_id=guild_id).all()
self.session.commit()
return role_id[0]
async def get_mod_role(self, guild_id: int):
"""
:param guild_id: the id of the guild
:returns: the role id of the e.g. admin role for the given guild
"""
role_id = self.session.query(Settings.mod_role_id).filter_by(guild_id=guild_id).all()
self.session.commit()
return role_id[0]
async def get_standard_role(self, guild_id: int):
"""
:param guild_id: the id of the guild
:returns: the role id of the e.g. admin role for the given guild
"""
role_id = self.session.query(Settings.standard_role_id).filter_by(guild_id=guild_id).all()
self.session.commit()
return role_id[0]
async def get_warns(self, guild_id: int, user_id: int):
"""
:param guild_id: the id of the guild
:param user_id: the ID of the user
:returns: the warnings for the given user, can be 0
"""
warnings = self.session.query(Profiles.warnings).filter(Profiles.guild_id == guild_id,
Profiles.user_id == user_id).all()
self.session.commit()
if not warnings:
return 0
return warnings[0][0]
async def get_welcome_channel(self, guild_id: int):
"""
:param guild_id: the id of the guild
:returns: the welcome channel for the given guild
"""
welcome_channel = self.session.query(Settings.welcome_channel_id).filter_by(guild_id=guild_id).one()
self.session.commit()
return welcome_channel[0]
async def get_cmd_channel(self, guild_id: int):
"""
:param guild_id: the id of the guild
:returns: the command channel for the given guild
"""
cmd_channel = self.session.query(Settings.cmd_channel_id).filter_by(guild_id=guild_id).one()
self.session.commit()
return cmd_channel[0]
async def get_lvl_channel(self, guild_id: int):
"""
:param guild_id: the id of the guild
:returns: the level channel for the given guild
"""
lvl_channel = self.session.query(Settings.lvl_channel_id).filter_by(guild_id=guild_id).one()
self.session.commit()
return lvl_channel[0]
async def get_leave_channel(self, guild_id: int):
"""
:param guild_id: the id of the guild
:returns: the leave channel for the given guild
"""
leave_channel = self.session.query(Settings.leave_channel_id).filter_by(guild_id=guild_id).one()
self.session.commit()
return leave_channel[0]
async def get_stdout_channel(self, guild_id: int):
"""
:param guild_id: the id of the guild
:returns: the stdout(logging) channel for the given guild
"""
stdout_channel = self.session.query(Settings.stdout_channel_id).filter_by(guild_id=guild_id).one()
self.session.commit()
return stdout_channel[0]
async def get_warn_channel(self, guild_id: int):
"""
:param guild_id: the id of the guild
:returns: the warn(logging) channel for the given guild
"""
stdout_channel = self.session.query(Settings.warn_channel_id).filter_by(guild_id=guild_id).one()
self.session.commit()
return stdout_channel[0]
async def get_kick_channel(self, guild_id: int):
"""
:param guild_id: the id of the guild
:returns: the kick(logging) channel for the given guild
"""
stdout_channel = self.session.query(Settings.kick_channel_id).filter_by(guild_id=guild_id).one()
self.session.commit()
return stdout_channel[0]
async def get_ban_channel(self, guild_id: int):
"""
:param guild_id: the id of the guild
:returns: the ban(logging) channel for the given guild
"""
stdout_channel = self.session.query(Settings.ban_channel_id).filter_by(guild_id=guild_id).one()
self.session.commit()
return stdout_channel[0]
async def get_leave_text(self, guild_id: int):
"""
:param guild_id: the id of the guild
:returns: the leave text for the given guild
"""
leave_text = self.session.query(Settings.leave_text).filter_by(guild_id=guild_id).all()
self.session.commit()
return leave_text[0][0]
async def get_img(self, guild_id: int):
"""
:param guild_id: the id of the guild
:returns: wether the welcome image is on or off for the given guild
"""
img = self.session.query(Settings.imgwelcome_toggle).filter_by(guild_id=guild_id).all()
self.session.commit()
return img[0][0]
async def get_img_text(self, guild_id: int):
"""
:param guild_id: the id of the guild
:returns: the welcome text for the image for the given guild
"""
text = self.session.query(Settings.imgwelcome_text).filter_by(guild_id=guild_id).all()
self.session.commit()
return text[0][0]
async def get_text_xp(self, guild_id: int, user_id: int):
"""
:param guild_id: the id of the guild
:param user_id: the ID of the user
:returns: the xp amount for the given user
"""
xp = self.session.query(Profiles.text_xp).filter(Profiles.guild_id == guild_id,
Profiles.user_id == user_id).all()
self.session.commit()
return xp[0][0]
async def get_lvl_text(self, guild_id: int, user_id: int):
"""
:param guild_id: the id of the guild
:param user_id: the ID of the user
:returns: the text lvl for the given user
"""
lvl = self.session.query(Profiles.text_lvl).filter(Profiles.guild_id == guild_id,
Profiles.user_id == user_id).all()
self.session.commit()
return lvl[0][0]
async def get_levelsystem(self, guild_id: int):
"""
:param guild_id: the id of the guild
:returns: if the levelsytem is on or off for the specific guild
"""
lvl_toggle = self.session.query(Settings.levelsystem_toggle).filter_by(guild_id=guild_id).all()
self.session.commit()
return lvl_toggle[0][0]
async def get_banned_until(self, user_id: int, guild_id: int):
"""
:param guild_id: the id of the guild
:param user_id: the ID of the user
:returns: the date the user is allowed to get unbanned if none the user wasn't temp banned
"""
date = self.session.query(Profiles.banned_until).filter(Profiles.guild_id == guild_id,
Profiles.user_id == user_id).all()
self.session.commit()
return date[0][0]
async def get_bannlist(self, user_id: int):
"""
Searches in the db if the user is blacklisted
:param user_id: the ID of the user
:returns: if the user is in the blacklist if false the user isnt blacklisted
"""
user = self.session.query(Banlist.user_id).filter_by(user_id=user_id).all()
self.session.commit()
if user_id in user:
return True
else:
return False
async def get_message(self, guild_id: int, message_id: int):
"""
:param guild_id: the id of the guild
:param message_id: the requested message id
:return: the message content and the user_id in a list, can be none
"""
message = self.session.query(Messages.message, Messages.user_id).filter(Messages.message_id == message_id,
Messages.guild_id == guild_id).all()
self.session.commit()
return message
async def get_guild_byuserid(self, user_id: int):
"""
:param user_id: the id of the user
:return: list with all guild ids inside
"""
guilds = self.session.query(UserInfo.guild_id).filter_by(user_id=user_id).all()
self.session.commit()
return guilds
async def get_reaction_role(self, guild_id: int, message_id: int, emoji):
"""
:param guild_id: the id of the guild
:param message_id: the id of the message where a reaction got added
:param emoji: the added emoji
:return: the role id that the user should get
"""
roleid = self.session.query(Reaction.role_id).filter(Reaction.message_id == message_id,
Reaction.guild_id == guild_id,
Reaction.emoji == emoji).all()
self.session.commit()
if not roleid:
return False
return roleid[0][0]
async def leaderboard(self, guild_id: int):
"""
:param guild_id: the id of the guild
:return: the leaderboard by rank/level with xp type list with tuples inside
"""
ranks = self.session.query(Profiles.text_lvl, Profiles.text_xp,
Profiles.user_id).filter(Profiles.guild_id == guild_id).order_by(
Profiles.text_xp.desc())[0:10]
self.session.commit()
return ranks
async def get_spam_settings(self, guild_id: int):
"""
:param guild_id: the id of the given guild
:return: list of tuples with all spam settings
=============================
tuple contents
=============================
warnThreshold : int, optional
This is the amount of messages in a row that result in a warning within the messageInterval
kickThreshold : int, optional
The amount of 'warns' before a kick occurs
banThreshold : int, optional
The amount of 'kicks' that occur before a ban occurs
messageInterval : int, optional
Amount of time a message is kept before being discarded.
Essentially the amount of time (In milliseconds) a message can count towards spam
warnMessage : str, optional
The message to be sent upon warnThreshold being reached
kickMessage : str, optional
The message to be sent up kickThreshold being reached
banMessage : str, optional
The message to be sent up banThreshold being reached
messageDuplicateCount : int, optional
Amount of duplicate messages needed to trip a punishment
messageDuplicateKick : int, optional
Amount of duplicate messages needed within messageInterval to trip a kick
messageDuplicateBan : int, optional
Amount of duplicate messages needed within messageInterval to trip a ban
messageDuplicateAccuracy : float, optional
How 'close' messages need to be to be registered as duplicates (Out of 100)
ignorePerms : list, optional
The perms (ID Form), that bypass anti-spam
ignoreUsers : list, optional
The users (ID Form), that bypass anti-spam
ignoreBots : bool, optional
Should bots bypass anti-spam?
"""
settings = self.session.query(Settings.warnThreshold, Settings.kickThreshold, Settings.banThreshold,
Settings.messageInterval, Settings.warnMessage, Settings.kickMessage,
Settings.banMessage,
Settings.messageDuplicateCount, Settings.messageDuplicateAccuracy
).filter(Settings.guild_id == guild_id).all()
self.session.commit()
return settings
async def get_kick_count(self, guild_id: int, user_id: int):
"""
:param guild_id: the id of the guild
:param user_id: the ID of the user
:returns: the warnings for the given user, can be 0
"""
kickcount = self.session.query(Profiles.kickCount).filter(Profiles.guild_id == guild_id,
Profiles.user_id == user_id).all()
self.session.commit()
if not kickcount:
return 0
return kickcount[0][0]
async def get_banned_channels_cmd(self, guild_id: int, command: str = None):
"""
Returns the banned channels for the given command
:param guild_id: id of the given guild
:param command: name of the invoked command
:return: list of channel ids
"""
channels_cmds = self.session.query(BannedChannelsCmds.channel_id).filter_by(guild_id=guild_id).all()
self.session.commit()
return channels_cmds
async def get_banned_roles_cmd(self, guild_id: int, command: str = None):
"""
Returns the banned roles for the given command
:param guild_id:id of the given guild
:param command: name of the invoked command
:return:list with role ids
"""
channels_cmds = self.session.query(BannedRolesCmds.role_id).filter_by(guild_id=guild_id).all()
self.session.commit()
return channels_cmds
async def get_banned_users_cmd(self, guild_id: int, command: str = None):
"""
Returns the banned users from spam detection
:param guild_id:id of the given guild
:param command: name of the invoked command
:return: list with user ids
"""
channels_cmds = self.session.query(BannedUsersCmds.user_id).filter_by(guild_id=guild_id).all()
self.session.commit()
return channels_cmds
async def get_banned_channels_spam(self, guild_id: int):
"""
Returns the banned channels from spam detection
:param guild_id:id of the given guild
:return:list of channel ids
"""
channels_spam = self.session.query(BannedChannelsSpam.channel_id).filter_by(guild_id=guild_id).all()
self.session.commit()
return channels_spam
async def get_banned_roles_spam(self, guild_id: int):
"""
Returns the banned roles from spam detection
:param guild_id:id of the given guild
:return:list with role ids
"""
roles_spam = self.session.query(BannedRolesSpam.role_id).filter_by(guild_id=guild_id).all()
self.session.commit()
return roles_spam
async def get_banned_users_spam(self, guild_id: int):
"""
Returns the banned users from spam detection
:param guild_id: id of the given guild
:return: list with user ids
"""
user_spam = self.session.query(BannedUsersSpam.user_id).filter_by(guild_id=guild_id).all()
self.session.commit()
return user_spam
```
#### File: modules/base/moderation_mods.py
```python
import datetime
from discord.ext import commands
import discord.utils
from base_folder.bot.utils.Permissions_checks import mod
from base_folder.bot.utils.util_functions import success_embed, error_embed
from base_folder.celery.db import *
from base_folder.bot.utils.checks import check_args_datatyp, logging_to_channel_stdout, purge_command_in_channel, \
logging_to_channel_cmd
class ModerationMod(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(pass_context=True, name="kick", brief="Kicks a given member", usage="kick @member reason")
@commands.guild_only()
@mod()
@check_args_datatyp
@logging_to_channel_stdout
@purge_command_in_channel
@logging_to_channel_cmd
async def kick(self, ctx, member: discord.Member = None, reason: str = "Because you were bad. We kicked you."):
e = success_embed(self.client)
if member is not None:
e.description = f"{member.mention} has been successfully kicked for {reason}"
await ctx.send(embed=e)
e.description = f"You have been banned from {ctx.guild.name} for {reason}."\
f"If you think this is wrong then message an admin but shit happens"\
f" when you don't have the name."
await ctx.guild.kick(member, reason=reason)
await member.send(embed=e)
else:
e.title = "Error!"
e.description = f"You need to specify a member via mention"
await ctx.send(embed=e)
return e
@commands.command(pass_context=True, name="unban", brief="Unbans a given member", usage="unban @member reason")
@commands.guild_only()
@mod()
@check_args_datatyp
@logging_to_channel_stdout
@purge_command_in_channel
@logging_to_channel_cmd
async def unban(self, ctx, member: str = "", reason: str = "You have been unbanned. Time is over. Please behave"):
e = success_embed(self.client)
if member == "":
e.title = "Error!"
e.description = f"No member specified! Specify a user by writing his name without #tag"
await ctx.send(embed=e)
bans = await ctx.guild.bans()
for b in bans:
if b.user.name == member:
e.description = f"{b.user.name} has been successfully unbanned!"
await ctx.guild.unban(b.user, reason=reason)
await ctx.sende(embed=e)
e.description = f"{member} wasn't found in the ban list so either you wrote the name " \
f"wrong or {member} was never banned!"
await ctx.send(embed=e)
return e
@commands.command(pass_context=True, name="delete", brief="Clears a given amount of msg's", usage="delete number of msg's "
"to be deleted")
@commands.guild_only()
@mod()
@check_args_datatyp
@logging_to_channel_stdout
@purge_command_in_channel
@logging_to_channel_cmd
async def delete(self, ctx, limit: int):
await ctx.channel.purge(limit=limit+1)
e = success_embed(self.client)
e.description = f"cleared: {limit} messages!"
await ctx.send(embed=e)
return e
@commands.command(pass_context=True, name="tempmute", brief="Mutes a user for given time", usage="tempmute @member reason time")
@commands.guild_only()
@mod()
@check_args_datatyp
@logging_to_channel_stdout
@purge_command_in_channel
@logging_to_channel_cmd
async def tempmute(self, ctx, member: discord.Member = None, reason="you made a mistake", time: int = 0):
muteduntil = ctx.message.created_at + datetime.timedelta(hours=time)
role = discord.utils.get(ctx.guild.roles, name="Muted")
e = success_embed(self.client)
e.description = f"{member.mention} was successfully muted for {reason} until {muteduntil}"
await member.add_roles(role)
await ctx.send(embed=e)
edit_muted_at.delay(ctx.guild.id, member.id, ctx.message.created_at)
muted_until.delay(ctx.guild.id, member.id, muteduntil)
self.client.scheduler.add_job(self.unmute, "date", run_date=muteduntil, args=[ctx, member])
return e
@commands.command(pass_context=True, name="mute", brief="Mutes a user permanently", usage="mute @member reason")
@commands.guild_only()
@mod()
@check_args_datatyp
@logging_to_channel_stdout
@purge_command_in_channel
@logging_to_channel_cmd
async def mute(self, ctx, member: discord.Member = None, reason="you made a mistake"):
role = discord.utils.get(ctx.guild.roles, name="Muted") # retrieves muted role returns none if there isn't
e = success_embed(self.client)
e.description = f"{member.mention} was successfully muted for {reason}"
if not role: # checks if there is muted role
try: # creates muted role
muted = await ctx.guild.create_role(name="Muted", reason="To use for muting")
for channel in ctx.guild.channels: # removes permission to view and send in the channels
await channel.set_permissions(muted, send_messages=False,
read_message_history=False,
read_messages=False)
except discord.Forbidden:
e = error_embed(self.client)
e.description = f"Master please give me admin rights"
return await ctx.send(embed=e) # self-explainatory
await member.add_roles(muted) # adds newly created muted role
await ctx.send(embed=e)
else:
await member.add_roles(role) # adds already existing muted role
await ctx.send(embed=e)
return e
@commands.command(pass_context=True, name="mute", brief="Enables slowmode with custom delay",
usage="slowmode seconds must be < 120")
@commands.guild_only()
@mod()
@check_args_datatyp
@logging_to_channel_stdout
@purge_command_in_channel
@logging_to_channel_cmd
async def slowmode(self, ctx, seconds: int = 0):
e = success_embed(self.client)
if seconds > 120:
e.description = ":no_entry: Amount can't be over 120 seconds"
await ctx.send(embed=e)
if seconds == 0:
e.description = f"{ctx.author.mention} tuned slow mode off for the channel {ctx.channel.mention}"
await ctx.channel.edit(slowmode_delay=seconds)
await ctx.send(embed=e)
else:
e.description = f"{ctx.author.mention} set the {ctx.channel.mention} channel's slow mode delay " \
f"to `{seconds}`" \
f"\nTo turn this off, do prefixslowmode"
await ctx.channel.edit(slowmode_delay=seconds)
await ctx.send(embed=e)
return e
@commands.command(pass_context=True, name="unmute", brief="Unmutes a user", usage="unmute @member")
@commands.guild_only()
@mod()
@check_args_datatyp
@logging_to_channel_stdout
@purge_command_in_channel
@logging_to_channel_cmd
async def unmute(self, ctx, member: discord.Member = None):
e = success_embed(self.client)
try:
e.description = f"{member.mention} has been unmuted "
await member.remove_roles(discord.utils.get(ctx.guild.roles, name="Muted"))
await ctx.send(embed=e)
except discord.DiscordException:
e.description = f"{member.mention} already unmuted or {member.mention} was never muted"
await ctx.send(embed=e)
return e
@commands.command(pass_context=True, name="warn", brief="Warns a user", usage="warn @member")
@commands.guild_only()
@mod()
@check_args_datatyp
@logging_to_channel_stdout
@purge_command_in_channel
@logging_to_channel_cmd
async def warn(self, ctx, member: discord.Member = None, *, reason="you made a mistake"):
warnings = await self.client.sql.get_warns(ctx.guild.id, member.id)
e = success_embed(self.client)
if warnings == 0:
warnings += 1
edit_warns.delay(ctx.guild.id, member.id, warnings)
e.description = f"{member.mention} you have been warned this is your " \
f"first infraction keep it at this, reason {reason}"
await member.send(embed=e)
await ctx.send(embed=e)
else:
warnings += 1
e.description = f"{member.mention} you have been warned, you have now {warnings} warning(s)"
edit_warns.delay(ctx.guild.id, member.id, warnings)
await member.send(embed=e)
await ctx.send(embed=e)
return e
@commands.command(pass_context=True, name="infractions", brief="Shows the infractions a user has",
usage="infractions @member")
@commands.guild_only()
@mod()
@check_args_datatyp
@logging_to_channel_stdout
@purge_command_in_channel
@logging_to_channel_cmd
async def infractions(self, ctx, member: discord.Member = None):
e = success_embed(self.client)
warnings = await self.client.sql.get_warns(ctx.guild.id, member.id)
e.description = f"{member.mention} Has {warnings} infraction(s)!"
await ctx.send(embed=e)
return e
def setup(client):
client.add_cog(ModerationMod(client))
```
#### File: modules/listener/levelsystem.py
```python
from discord.ext import commands
from base_folder.bot.utils.Permissions_checks import mod
from base_folder.bot.utils.util_functions import success_embed
from base_folder.celery.db import update_text_lvl, update_xp_text, edit_settings_levelsystem
from base_folder.bot.utils.checks import check_args_datatyp, logging_to_channel_stdout, purge_command_in_channel
async def update_data(ctx, xp):
amount = 0
for i in str(ctx.content).split(" "):
amount += 1
xp += amount
update_xp_text.delay(ctx.guild.id, ctx.author.id, xp)
return xp
async def _enabled(client, guildid):
if client.cache.states[guildid].get_levelsystem == 1:
return True
else:
return False
class Levelsystem(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(name="levelsystemtoggle", aliases=["lvltoggle"], brief="toggles the levelsystem",
usage="levelsystemtoggle")
@mod()
@check_args_datatyp
@logging_to_channel_stdout
@purge_command_in_channel
async def levelsystemtoggle(self, ctx):
e = success_embed(self.client)
if self.client.cache.states[ctx.guild.id].get_levelsystem == 1:
edit_settings_levelsystem.delay(ctx.guild.id, 0)
e.description = "The levelsystem is now disabled"
else:
edit_settings_levelsystem.delay(ctx.guild.id, 1)
e.description = "The levelsystem is now enabled"
await self.client.cache.states[ctx.guild.id].set_lvltoggle()
await ctx.send(embed=e)
@commands.Cog.listener()
async def on_message(self, message):
if message.guild is None:
return
if message.content is None:
return
if not await _enabled(self.client, message.guild.id):
return
if message.author.id == self.client.user.id:
return
channel_id = self.client.cache.states[message.guild.id].get_channel("lvl")
print(channel_id)
if channel_id is None or channel_id == 0:
channel = message.guild.system_channel
else:
channel = self.client.get_channel(channel_id)
xp_before = await self.client.sql.get_text_xp(message.guild.id, message.author.id)
xp_after = await update_data(message, xp_before)
e = success_embed(self.client)
lvl_start = await self.client.sql.get_lvl_text(message.guild.id, message.author.id)
lvl_end = int(float(int(xp_after) ** (1 / 4)))
if lvl_start < lvl_end:
e.title = "LEEVEEL UP"
e.description = f"{message.author.mention} reached level {lvl_end} and has now {xp_after}XP"
await channel.send(embed=e)
update_text_lvl.delay(message.guild.id, message.author.id, lvl_end)
update_xp_text.delay(message.guild.id, message.author.id, xp_after)
def setup(client):
client.add_cog(Levelsystem(client))
```
#### File: modules/listener/listener_error.py
```python
import discord
from discord.ext import commands
from base_folder.bot.utils.util_functions import error_embed
from base_folder.celery.db import on_error
from base_folder.bot.utils.checks import logging_to_channel_stdout, purge_command_in_channel
class ErrorHandler(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
@logging_to_channel_stdout
@purge_command_in_channel
async def on_command_error(self, ctx, ex):
"""
:param ctx: context of the command that caused the error
:param ex: the exception itself
:return: logs the error to the db if it wasn't a commands.* error and sends a log entry in the stdout channel
regardless of the error
"""
if hasattr(ctx.command, 'on_error'):
return
error = getattr(ex, 'original', ex)
embed = error_embed(self.client)
print(error) # Printing the error for debugging reasons
if isinstance(error, commands.CommandNotFound):
embed.description = "I have never seen this command in my entire life"
await ctx.send(embed=embed)
return
if isinstance(error, commands.errors.CheckFailure):
embed.description = "You do not have permission to use this command." \
"If you think this is an error, talk to your admin"
await ctx.send(embed=embed)
return
if isinstance(error, commands.BadArgument):
embed.description = "You gave me an wrong input, check the command usage"
await ctx.send(embed=embed)
return
if isinstance(error, commands.MissingRequiredArgument):
embed.description = "You need to give the required arguments, check the command usage"
await ctx.send(embed=embed)
return
if isinstance(error, commands.NoPrivateMessage):
try:
embed.description = "This command is for guilds/servers only"
await ctx.author.send(embed=embed)
except discord.Forbidden:
pass
return
embed.description = "Something is totally wrong here in the M3E5 land I will open issue at my creator's bridge"
await ctx.send(embed=embed)
on_error.delay(ctx.guild.id, str(ex))
def setup(client):
client.add_cog(ErrorHandler(client))
```
#### File: modules/listener/listener_internal.py
```python
import discord
from discord.ext import commands
from base_folder.bot.utils.util_functions import build_embed
from base_folder.celery.db import initialize_guild, is_user_indb, insert_message, roles_to_db
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from base_folder.bot.utils.checks import logging_to_channel_stdout, purge_command_in_channel
# TODO: Automod
class Internal(commands.Cog):
def __init__(self, client):
self.client = client
'''
Bot things like activity and on guild_join
'''
@commands.Cog.listener()
async def on_guild_join(self, guild: discord.Guild):
self.client.sql.updatedb(guild)
message = build_embed(author=self.client.user.name, title="Hey!",
description="Thanks for choosing me!"
"here are some commands you need to execute:")
message.add_field(name="Important setup commands", value="-prefix the prefix\n -set_leave\n -set_welcome\n"
"-set_lvl\n -set_cmd\n -set_default\n -set_dev\n"
" -set_mod\n set_admin", inline=True)
message.add_field(name="Usage", value="sets the prefix\n sets the leave channel\n sets the welcome channel\n "
"sets the lvl up channel\n sets the channel for bot commands\n "
"sets the default role a user should have on join\n "
"sets the dev role\n sets the mod role\n sets the admin role")
await guild.owner.send(embed=message)
self.client.cache.create_state(guild.id)
"""
Logging the guilds follows
"""
@commands.Cog.listener()
async def on_message(self, message: discord.message):
"""
Logs every message the bot gets into the db with channel id message id user id guild id timestamp
:parameter message is the message object returned by the restapi
"""
# Todo: log to the table for private messages
if message.guild is None:
return
if message.author.id == self.client.user.id:
return
insert_message.delay(message.guild.id, message.author.id, message.id, message.channel.id, message.content)
# TODO: FIX GUILD ID etc
@commands.Cog.listener()
async def on_raw_message_edit(self, payload):
if payload.data["guild_id"]:
guildid = payload.data["guild_id"]
content = await self.client.sql.get_message(guildid, payload.message_id)
if not content:
return
stdoutchannel = self.client.get_channel(self.client.cache.states[int(guildid)].get_channel())
if stdoutchannel is None:
return
channel = self.client.get_channel(payload.channel_id)
message = await channel.fetch_message(payload.message_id)
if payload.cached_message:
user = payload.cached_message.author
else:
user = self.client.get_user(content[0][1])
if not user.bot:
if content[0][0] != message.content:
await self.client.log.stdout(stdoutchannel, f"Message from {message.author.name} was changed from: "
f"'{str(content[0][0]).replace('@', '')}' to "
f"'{str(message.content).replace('@', '')}'")
@commands.Cog.listener()
async def on_raw_message_delete(self, payload):
if payload.guild_id:
content = await self.client.sql.get_message(payload.guild_id, payload.message_id)
if not content:
return
stdoutchannel = self.client.get_channel(self.client.cache.states[payload.guild_id].get_channel())
if stdoutchannel is None:
return
channel = self.client.get_channel(payload.channel_id)
if payload.cached_message:
user = payload.cached_message.author
else:
user = self.client.get_user(content[0][1])
if not user.bot:
await self.client.log.stdout(stdoutchannel, f"Message from {user.name}#{user.discriminator} was deleted"
f" Content: {content[0][0]} in Channel: {channel.name}")
'''
Automated background tasks
'''
def setup(client):
client.add_cog(Internal(client))
```
#### File: bot/utils/Permissions_checks.py
```python
from discord.ext.commands import check, MissingRole
def admin():
async def predicate(ctx):
if ctx.guild.owner_id == ctx.author.id:
return True
admin = ctx.bot.cache.states[ctx.guild.id].get_role()
role_list = ctx.bot.cache.states[ctx.guild.id].get_perm_list
for role in ctx.author.roles:
if role.id in role_list:
return True
raise MissingRole(admin)
return check(predicate)
def mod():
async def predicate(ctx):
if ctx.guild.owner_id == ctx.author.id:
return True
mod = ctx.bot.cache.states[ctx.guild.id].get_role("mod")
role_list = ctx.bot.cache.states[ctx.guild.id].get_perm_list
for role in ctx.author.roles:
if role.id in role_list:
return True
raise MissingRole(mod)
return check(predicate)
def dev():
async def predicate(ctx):
if ctx.guild.owner_id == ctx.author.id:
return True
dev = ctx.bot.cache.states[ctx.guild.id].get_role("dev")
role_list = ctx.bot.cache.states[ctx.guild.id].get_perm_list
for role in ctx.author.roles:
if role.id in role_list:
return True
raise MissingRole(dev)
return check(predicate)
def user():
async def predicate(ctx):
if ctx.guild.owner_id == ctx.author.id:
return True
default = ctx.bot.cache.states[ctx.guild.id].get_role("default")
role_list = ctx.bot.cache.states[ctx.guild.id].get_perm_list
for role in ctx.author.roles:
if role.id in role_list:
return True
raise MissingRole(default)
return check(predicate)
```
#### File: base_folder/celery/db.py
```python
import base64
from abc import ABC
from base_folder.celery.worker import app, Task
from base_folder.config import sql
'''
Initialize the tables
'''
class DatabaseTask(Task, ABC):
_db = None
@property
def db(self):
if self._db is None:
self._db = sql()
if self._db.is_connected():
return self._db
else:
self._db = sql()
return self._db
@app.task(base=DatabaseTask, ignore_result=False)
def initialize_guild(guild_id):
conn = initialize_guild.db
c = conn.cursor()
try:
c.execute(f"SELECT * FROM guilds WHERE and guild_id= {guild_id};")
sql = c.fetchone()
if sql:
pass
else:
c.execute(f"INSERT INTO guilds (`guild_id`) VALUES ({guild_id});")
conn.commit()
c.execute(f"SELECT * FROM settings WHERE and guild_id= {guild_id};")
sql = c.fetchone()
if sql:
pass
else:
c.execute(f"INSERT INTO settings (guild_id) VALUES ({guild_id});")
conn.commit()
c.close()
except:
c.close()
return
'''
General per guild settings
'''
@app.task(base=DatabaseTask, ignore_result=True)
def is_user_indb(user_name, user_id, guild_id):
# Checks if a given user is in the db else writes it in the db
conn = is_user_indb.db
c = conn.cursor()
try:
c.execute(f"SELECT * FROM user_info WHERE user_id={user_id} and guild_id= {guild_id};")
sql = c.fetchone()
if not sql:
c.execute(f"INSERT INTO user_info (username, user_id, guild_id) "
f"VALUES ('{str(user_name)} ', '{str(user_id)}', '{str(guild_id)}')")
conn.commit()
c.execute(f"SELECT * FROM profiles WHERE user_id={user_id} and guild_id= {guild_id};")
sql = c.fetchone()
if not sql:
c.execute(f"INSERT INTO profiles (user_id, guild_id) "
f"VALUES ('{str(user_id)}', '{str(guild_id)}')")
conn.commit()
c.close()
except:
c.close()
return
@app.task(base=DatabaseTask, ignore_result=True)
def on_error(guild_id, error):
conn = on_error.db
c = conn.cursor()
c.execute(f"INSERT into Error (guild_id, error) VALUES ('{guild_id}', %s)", (error,))
conn.commit()
c.close()
return
@app.task(base=DatabaseTask, ignore_result=True)
def set_prefix(guild_id, prefix):
conn = set_prefix.db
c = conn.cursor()
prefix = (base64.b64encode(str(prefix).encode("utf8"))).decode("utf8")
c.execute(f"UPDATE settings SET prefix='{prefix}' WHERE guild_id ={guild_id}")
conn.commit()
c.close()
return
@app.task(base=DatabaseTask, ignore_result=True)
def edit_warns(guild_id, user_id, amount):
# sets warn with given amount
conn = edit_warns.db
c = conn.cursor()
c.execute(f"UPDATE profiles SET warnings={str(amount)} WHERE user_id="
f"{str(user_id)} and guild_id={str(guild_id)}")
conn.commit()
c.close()
return
@app.task(base=DatabaseTask, ignore_result=True)
def edit_kickcount(guild_id, user_id, amount):
# sets warn with given amount
conn = edit_kickcount.db
c = conn.cursor()
c.execute(f"UPDATE profiles SET kickCount={str(amount)} WHERE user_id="
f"{str(user_id)} and guild_id={str(guild_id)}")
conn.commit()
c.close()
return
@app.task(base=DatabaseTask, ignore_result=True)
def edit_muted_at(guild_id, user_id, date):
conn = edit_muted_at.db
c = conn.cursor()
c.execute(f"UPDATE profiles SET muted_at='{date}' WHERE user_id="
f"{str(user_id)} and guild_id={str(guild_id)}")
conn.commit()
c.close()
return
@app.task(base=DatabaseTask, ignore_result=True)
def muted_until(guild_id, user_id, date):
conn = muted_until.db
c = conn.cursor()
c.execute(f"UPDATE profiles SET muted_until='{date}' WHERE user_id="
f"{str(user_id)} and guild_id={str(guild_id)}")
conn.commit()
c.close()
return
@app.task(base=DatabaseTask, ignore_result=True)
def edit_banned_at(guild_id, user_id, date):
conn = edit_banned_at.db
c = conn.cursor()
c.execute(f"UPDATE profiles SET banned_at='{date}' WHERE user_id="
f"{str(user_id)} and guild_id={str(guild_id)}")
conn.commit()
c.close()
return
@app.task(base=DatabaseTask, ignore_result=True)
def banned_until(guild_id, user_id, date):
conn = banned_until.db
c = conn.cursor()
c.execute(f"UPDATE profiles SET banned_until='{date}' WHERE user_id="
f"{str(user_id)} and guild_id={str(guild_id)}")
conn.commit()
c.close()
return
'''
Role settings
'''
@app.task(base=DatabaseTask, ignore_result=True)
def roles_to_db(guild_id, role_name, role_id):
# Checks if a given role is in the db else writes it in the db
conn = roles_to_db.db
c = conn.cursor()
c.execute(f"SELECT role_id FROM roles WHERE role_id={str(role_id)} and guild_id={str(guild_id)}")
sq = c.fetchone()
if sq:
conn.commit()
c.close()
return True
else:
c.execute(f"INSERT INTO roles (guild_id, role_name, role_id) "
f"VALUES ('{guild_id}', '{str(role_name)}', '{str(role_id)}')")
conn.commit()
c.close()
return
@app.task(base=DatabaseTask, ignore_result=True)
def remove_role(guild_id, role_id):
conn = remove_role.db
c = conn.cursor()
c.execute(f"DELETE FROM roles WHERE role_id = {role_id} and guild_id = {guild_id}")
conn.commit()
c.close()
return
@app.task(base=DatabaseTask, ignore_result=True)
def edit_settings_role(guild_id, role_id, field_name):
# Let's you set roles e.g. mod role
conn = edit_settings_role.db
c = conn.cursor()
c.execute(f"INSERT INTO settings (guild_id,{field_name}) VALUES ('{guild_id}','{role_id}')"
f"ON DUPLICATE KEY UPDATE {field_name}='{role_id}'")
conn.commit()
c.close()
return
'''
end of roles settings
Channel settings and warning settings
'''
@app.task(base=DatabaseTask, ignore_result=True)
def edit_settings_welcome(guild_id, channel_id):
# sets the welcome_channel to the given channel id
conn = edit_settings_welcome.db
c = conn.cursor()
c.execute(f"UPDATE settings SET welcome_channel_id={str(channel_id)} WHERE guild_id = '{guild_id}'")
conn.commit()
c.close()
return
@app.task(base=DatabaseTask, ignore_result=True)
def edit_settings_stdout(guild_id, channel_id):
# sets the stdout_channel to the given channel id
conn = edit_settings_welcome.db
c = conn.cursor()
c.execute(f"UPDATE settings SET stdout_channel_id={str(channel_id)} WHERE guild_id = '{guild_id}'")
conn.commit()
c.close()
return
@app.task(base=DatabaseTask, ignore_result=True)
def edit_settings_warn(guild_id, channel_id):
# sets the warn_channel to the given channel id
conn = edit_settings_welcome.db
c = conn.cursor()
c.execute(f"UPDATE settings SET warn_channel_id={str(channel_id)} WHERE guild_id = '{guild_id}'")
conn.commit()
c.close()
return
@app.task(base=DatabaseTask, ignore_result=True)
def edit_settings_kick(guild_id, channel_id):
# sets the kick_channel to the given channel id
conn = edit_settings_welcome.db
c = conn.cursor()
c.execute(f"UPDATE settings SET kick_channel_id={str(channel_id)} WHERE guild_id = '{guild_id}'")
conn.commit()
c.close()
return
@app.task(base=DatabaseTask, ignore_result=True)
def edit_settings_ban(guild_id, channel_id):
# sets the ban_channel to the given channel id
conn = edit_settings_welcome.db
c = conn.cursor()
c.execute(f"UPDATE settings SET ban_channel_id={str(channel_id)} WHERE guild_id = '{guild_id}'")
conn.commit()
c.close()
return
@app.task(base=DatabaseTask, ignore_result=True)
def edit_settings_leave(guild_id, channel_id):
# sets the leave_channel to the given channel id
conn = edit_settings_leave.db
c = conn.cursor()
c.execute(f"UPDATE settings SET leave_channel_id ={str(channel_id)} WHERE guild_id ='{guild_id}'")
conn.commit()
c.close()
return
@app.task(base=DatabaseTask, ignore_result=True)
def edit_settings_cmd(guild_id, channel_id):
# sets the leave_channel to the given channel id
conn = edit_settings_leave.db
c = conn.cursor()
c.execute(f"UPDATE settings SET cmd_channel_id ={str(channel_id)} WHERE guild_id ='{guild_id}'")
conn.commit()
c.close()
return
@app.task(base=DatabaseTask, ignore_result=True)
def set_leave_text(guild_id, text):
# sets the leave_channel to the given channel id
conn = edit_settings_leave.db
c = conn.cursor()
c.execute(f"UPDATE settings SET leave_text ='{str(text)}' WHERE guild_id ='{guild_id}'")
conn.commit()
c.close()
return
@app.task(base=DatabaseTask, ignore_result=True)
def edit_settings_lvl(guild_id, channel_id):
# sets the lvl_channel to the given channel id
conn = edit_settings_leave.db
c = conn.cursor()
c.execute(f"UPDATE settings SET lvl_channel_id ={str(channel_id)} WHERE guild_id ='{guild_id}'")
conn.commit()
c.close()
return
'''
end of channel and warning settings
Welcomeimg
'''
@app.task(base=DatabaseTask, ignore_result=True)
def edit_settings_img(guild_id, img):
# sets the column imgwelcome to 1/enabled or 0/disabled
conn = edit_settings_img.db
c = conn.cursor()
c.execute(f"UPDATE settings SET imgwelcome_toggle={str(img)} WHERE guild_id={str(guild_id)}")
conn.commit()
c.close()
return
@app.task(base=DatabaseTask, ignore_result=True)
def edit_settings_img_text(guild_id, img="Welcome {0.mention} to {1}!"):
# Unused for now but it will be used for the welcome image function
# sets the message text in the column imgwelcome_text to what ever you enter
conn = edit_settings_img_text.db
c = conn.cursor()
c.execute(f"UPDATE settings SET imgwelcome_text={str(img)} WHERE guild_id={str(guild_id)}")
conn.commit()
c.close()
return
'''
End of general settings
Level system
'''
@app.task(base=DatabaseTask, ignore_result=True)
def update_xp_text(guild_id, user_id, amount):
# updates the xp amount for a given user
conn = update_xp_text.db
c = conn.cursor()
c.execute(f"UPDATE profiles SET text_xp={str(amount)} WHERE user_id={str(user_id)} and guild_id={str(guild_id)};")
conn.commit()
c.close()
return
@app.task(ignore_result=True)
def update_text_lvl(guild_id, user_id, amount=1):
# updates the text lvl for a given user
conn = update_xp_text.db
c = conn.cursor()
c.execute(f"UPDATE profiles SET text_lvl = {str(amount)} WHERE user_id ="
f"{str(user_id)} and guild_id={str(guild_id)}")
conn.commit()
c.close()
return
@app.task(base=DatabaseTask, ignore_result=True)
def edit_settings_levelsystem(guild_id, toggler):
# sets the column level system to 1/enabled or 0/disabled
conn = edit_settings_levelsystem.db
c = conn.cursor()
c.execute(f"UPDATE settings SET levelsystem_toggle= {str(toggler)} WHERE guild_id={guild_id}")
conn.commit()
c.close()
return
@app.task(base=DatabaseTask, ignore_result=True)
def insert_message(guild_id, userid, messageid, channelid, message):
"""
:param guild_id: id of the guild the data is for
:param userid: the id of the user given by the restapi
:param messageid: the id of the message
:param channelid: the channel id the message was sent in
:param message: message content itself
:return: nothing
"""
conn = insert_message.db
c = conn.cursor()
c.execute(f"INSERT INTO `messages`(`guild_id`, `user_id`, `message_id`, `channel_id`, `message`)"
f"VALUES ('{guild_id}','{userid}','{messageid}','{channelid}', %s)", (message,))
conn.commit()
c.close()
return
@app.task(base=DatabaseTask, ignore_result=True)
def insert_reaction(guild_id, message_id, roleid, emoji):
"""
:param guild_id: id of the guild the data is for
:param message_id: the id of the message
:param roleid: the role a user should get if the user reacts
:param emoji: the emoji the bot reacted with
:return:
"""
conn = insert_message.db
c = conn.cursor()
c.execute(f"INSERT INTO `reactions`(`guild_id`, `message_id`, `role_id`, `emoji`) VALUES ('{guild_id}',"
f"'{message_id}','{roleid}', %s)", (emoji,))
conn.commit()
c.close()
return
@app.task(base=DatabaseTask, ignore_result=True)
def update_role_name(guild_id, roleid, name):
"""
:param guild_id: id of the guild the data is for
:param roleid: the role that should be updated
:param name: the new name of the role
:return:
"""
conn = update_role_name.db
c = conn.cursor()
c.execute(f"UPDATE roles SET role_name= '{str(name)}' WHERE role_id= '{roleid}' and guild_id='{str(guild_id)}'")
conn.commit()
c.close()
return
``` |
{
"source": "11Tuvork28/moodle_scrapping_discord_bot",
"score": 3
} |
#### File: 11Tuvork28/moodle_scrapping_discord_bot/Main_bot.py
```python
import asyncio
import discord
from discord.ext import commands
import get_link
client = commands.Bot(command_prefix='.') # Sets the prefix to listen.
async def post_tasks(): # background method to send the data extracted in get_link.py
await client.wait_until_ready()
channel = client.get_channel(680655448042504232) # channel ID goes here
while not client.is_closed():
await channel.purge(limit=100)
get_link.get_html() # this calls get_link.py to download the webpage to get the newest information
titel = ['None', 'None', 'Deutsch', 'English', 'Mathe', 'FBE', 'Datenbanken', 'Programmieren', 'FBIN', 'None',
'None', 'Politik', 'Wirtschaft', 'None', 'None'] # list with titels in it
for section in range(2, 14): # goes through every section in get_link.py
text = str(get_link.get_information_main(section)).replace('[', '').replace(']', '').replace("'", '') # calls get_link with section(an int), so that the script knows wich section
if text == None:
test = "Oh nothing found xD"
else:
test = "Footer :D"
message = discord.Embed(
titel = titel[section],
description = text,
colour = discord.Colour.blurple()
)
message.set_footer(text= test)
await channel.send(embed=message)
#await channel.send(str(lists[section])+'\n'+str(get_link.get_information_main(section)).replace('[', '').replace(']', '').replace("'", ''))
await channel.send('@everyone ')
await asyncio.sleep(86400) # task runs every 86400 seconds or better every 24h
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
@client.command()
async def post(): # Method to manually get the data from get_link.py
channel = client.get_channel(680655448042504232)
lists = ['None', 'None', 'Deutsch', 'English', 'Mathe', 'FBE', 'Datenbanken', 'Programmieren', 'FBIN', 'None',
'None', 'Politik', 'Wirtschaft', 'None', 'None']
for section in range(2, 14):
await channel.send(str(lists[section])+'\n'+str(get_link.get_information_main(section))) #Not sent in an embed remember it
@client.command()
async def vertretungsplan(message): # this method will sends the link below and mentions the author of the request
channel = client.get_channel(680655448042504232)
await channel.send('https://webuntis.krzn.de/WebUntis/monitor?school=bk-technik-moers&monitorType'
'=subst&format=Schulflur' + '{0.author.mention}'.format(message))
@client.command()
async def clear(ctx): # Method to manually clear the channel
channel = client.get_channel(680655448042504232)
await channel.purge(limit=100)
client.loop.create_task(post_tasks())
client.run('')
``` |
{
"source": "11Tuvork28/Python-ai-assistant",
"score": 2
} |
#### File: client/engines/stt.py
```python
import logging
import speech_recognition as sr
import server
from server.core.console import ConsoleManager
class STTEngine:
"""
Speech To Text Engine (STT)
Google API Speech recognition settings
SpeechRecognition API : https://pypi.org/project/SpeechRecognition/2.1.3
"""
def __init__(self):
super().__init__()
self.console_manager = ConsoleManager()
self.console_manager.console_output(info_log="Configuring Mic..")
self.recognizer = sr.Recognizer()
self.recognizer.pause_threshold = 0.5
self.microphone = sr.Microphone()
self.console_manager.console_output(info_log="Microphone configured successfully!")
def recognize_input(self, already_activated=False):
"""
Recognize input from mic and returns transcript if activation tag (assistant name) exist
"""
while True:
transcript = self._recognize_speech_from_mic()
if already_activated or self._activation_name_exist(transcript):
transcript = self._remove_activation_word(transcript)
return transcript
def _recognize_speech_from_mic(self, ):
"""
Capture the words from the recorded audio (audio stream --> free text).
Transcribe speech from recorded from `microphone`.
"""
with self.microphone as source:
self.recognizer.adjust_for_ambient_noise(source)
audio = self.recognizer.listen(source)
try:
transcript = self.recognizer.recognize_google(audio).lower()
self.console_manager.console_output(info_log='User said: {0}'.format(transcript))
except sr.UnknownValueError:
# speech was unintelligible
transcript = ''
self.console_manager.console_output(info_log='Unable to recognize speech', refresh_console=False)
except sr.RequestError:
# API was unreachable or unresponsive
transcript = ''
self.console_manager.console_output(error_log='Google API was unreachable')
return transcript
@staticmethod
def _activation_name_exist(transcript):
"""
Identifies the assistant name in the input transcript.
"""
if transcript:
transcript_words = transcript.split()
return bool(set(transcript_words).intersection([jarvis.assistant_name]))
else:
return False
@staticmethod
def _remove_activation_word(transcript):
transcript = transcript.replace(jarvis.assistant_name, '')
return transcript
```
#### File: client/engines/ttt.py
```python
import logging
from server.core.console import ConsoleManager
from server.utils.console import user_input
class TTTEngine:
"""
Text To Text Engine (TTT)
"""
def __init__(self):
self.logger = logging
self.console_manager = ConsoleManager()
def recognize_input(self, **kwargs):
"""
Recognize input from console and returns transcript if its not empty string.
"""
try:
text_transcript = input(user_input).lower()
while text_transcript == '':
text_transcript = input(user_input).lower()
return text_transcript
except EOFError as e:
self.console_manager.console_output(error_log='Failed to recognize user input with message: {0}'.format(e))
def assistant_response(self, message, refresh_console=True):
"""
Assistant response in voice or/and in text.
:param refresh_console: boolean
:param message: string
"""
try:
if message:
self.console_manager.console_output(message, refresh_console=refresh_console)
except RuntimeError as e:
self.console_manager.console_output(error_log='Error in assistant response with message: {0}'.format(e))
```
#### File: skills/collection/activation.py
```python
import sys
import time
from datetime import datetime
from server.skills.skill import AssistantSkill
class ActivationSkills(AssistantSkill):
@classmethod
def disable_assistant(cls, **kwargs):
"""
- Clear console
- Shutdown the assistant service
"""
cls.response('Bye')
time.sleep(1)
cls.console(info_log='Application terminated gracefully.')
sys.exit()
@classmethod
def assistant_greeting(cls, **kwargs):
"""
Assistant greeting based on day hour.
"""
now = datetime.now()
day_time = int(now.strftime('%H'))
if day_time < 12:
cls.response('Good morning sir')
elif 12 <= day_time < 18:
cls.response('Good afternoon sir')
else:
cls.response('Good evening sir')
```
#### File: src/tests/skill_analyzer_tests.py
```python
import unittest
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from jarvis.server import settings
from jarvis.server.skills.registry import CONTROL_SKILLS, BASIC_SKILLS, ENABLED_BASIC_SKILLS
from jarvis.server.enumerations import MongoCollections
from jarvis.server.skills.analyzer import SkillAnalyzer
from jarvis.server.utils.mongoDB import db
class TestSkillMatching(unittest.TestCase):
def setUp(self):
all_skills = {
MongoCollections.CONTROL_SKILLS.value: CONTROL_SKILLS,
MongoCollections.ENABLED_BASIC_SKILLS.value: ENABLED_BASIC_SKILLS,
}
for collection, documents in all_skills.items():
db.update_collection(collection, documents)
default_assistant_name = settings.DEFAULT_GENERAL_SETTINGS['assistant_name']
default_input_mode = settings.DEFAULT_GENERAL_SETTINGS['input_mode']
default_response_in_speech = settings.DEFAULT_GENERAL_SETTINGS['response_in_speech']
default_settings = {
'assistant_name': default_assistant_name,
'input_mode': default_input_mode,
'response_in_speech': default_response_in_speech,
}
db.update_collection(collection=MongoCollections.GENERAL_SETTINGS.value, documents=[default_settings])
self.skill_analyzer = SkillAnalyzer(
weight_measure=TfidfVectorizer,
similarity_measure=cosine_similarity,
args=settings.SKILL_ANALYZER.get('args'),
sensitivity=settings.SKILL_ANALYZER.get('sensitivity'),
)
def test_all_skill_matches(self):
"""
In this test we examine the matches or ALL skill tags with the functions
If all skills matched right then the test passes otherwise not.
At the end we print a report with all the not matched cases.
"""
verifications_errors = []
for basic_skill in BASIC_SKILLS:
print('--------------------------------------------------------------------------------------')
print('Examine skill: {0}'.format(basic_skill.get('name')))
for tag in basic_skill.get('tags',).split(','):
# If the skill has matching tags
if tag:
expected_skill = basic_skill.get('name')
actual_skill = self.skill_analyzer.extract(tag).get('name')
try:
self.assertEqual(expected_skill, actual_skill)
except AssertionError as e:
verifications_errors.append({'tag': tag, 'error': e})
print('-------------------------------------- SKILLS MATCHING REPORT --------------------------------------')
if verifications_errors:
for increment, e in enumerate(verifications_errors):
print('{0})'.format(increment))
print('Not correct match with tag: {0}'.format(e.get('tag')))
print('Assertion values (expected != actual): {0}'.format(e.get('error')))
raise AssertionError
else:
print('All skills matched correctly!')
``` |
{
"source": "11uc/whole_cell_patch",
"score": 3
} |
#### File: whole_cell_patch/whole_cell_patch/cpTableWidget.py
```python
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QKeySequence
from PyQt5.QtWidgets import QTableWidget, QShortcut
class CpTableWidget(QTableWidget):
'''
With function and key press responses to enable copy/paste without
entering edit mode and multiple cell paste. Also enable deletion
in multiple cells.
'''
def __init__(self, *args):
'''
Initialize attributes, add shortcuts.
Attributes
----------
copied: list
List of string in cells that are copied.
'''
super().__init__(*args)
self.copied = []
s1 = QShortcut(QKeySequence(Qt.CTRL + Qt.Key_C), self)
s1.activated.connect(self.copy)
s2 = QShortcut(QKeySequence(Qt.CTRL + Qt.Key_V), self)
s2.activated.connect(self.paste)
def copy(self):
'''
Copy text from selected item(s).
'''
self.copied.clear()
for it in self.selectedItems():
if it == None:
self.copied.append('')
else:
self.copied.append(it.text())
def paste(self):
'''
Paste copeid texts. If more items are copied than cells to paste,
paste the first few cells until paste cells are filled. If less items
are copied than the cells to paste, loop around copied items until
paste cells are filled.
'''
i = 0
n = len(self.copied)
if n > 0:
for it in self.selectedItems():
if it == None:
item = QTableWidgetItem(self.copied[i % n])
else:
it.setText(self.copied[i % n])
i += 1
def delete(self):
'''
Deletion without entering editing mode.
'''
for it in self.selectedItems():
if it != None:
it.setText('')
def keyPressedEvent(evt):
if evt.key() == Qt.Key_Delete:
self.delete()
```
#### File: whole_cell_patch/whole_cell_patch/filterDialog.py
```python
from PyQt5.QtWidgets import QLabel, QGridLayout, QPushButton, \
QLineEdit, QVBoxLayout, QHBoxLayout, QDialog, QComboBox, QWidget
from PyQt5.QtCore import pyqtSignal
class FilterDialog(QDialog):
'''
Dialog for choosing filter types.
'''
def __init__(self, default, parent = None):
'''
Build ui and set up parameter setting
Parameters
----------
default: list
List of filters, which are dictionaries with names under
key "name" and parameter elements.
parent: QWidget, optional
Parent widget.
Attributes
----------
fnames: dictionary
Names of filters, two nested dictionaries to specify two
properties about the type of filters.
'''
self.defaultFilters = default
super().__init__(parent)
self.filterCb = QComboBox(self) # Filter type
self.bandCb = QComboBox(self) # Band type
self.fnames = {}
count = 0
for f in default:
names = f["name"].split(',')
if names[0] not in self.fnames:
self.fnames[names[0]] = {}
self.filterCb.addItem(names[0])
if len(names) > 1:
if names[1] not in self.fnames[names[0]]:
self.fnames[names[0]][names[1]] = count
else:
self.fnames[names[0]][''] = count
count += 1
okBtn = QPushButton("OK", self)
cancelBtn = QPushButton("Cancel", self)
okBtn.clicked.connect(self.accept)
cancelBtn.clicked.connect(self.reject)
self.filterCb.currentTextChanged.connect(self.updateBand)
topVB = QVBoxLayout(self)
topVB.addWidget(self.filterCb)
topVB.addWidget(self.bandCb)
topVB.addWidget(okBtn)
topVB.addWidget(cancelBtn)
def updateBand(self, name):
'''
Update list of band in the band combobox.
Parameters
----------
name: str
Name of filter type.
'''
self.bandCb.clear()
self.bandCb.addItems(list(self.fnames[name].keys()))
def exec_(self):
'''
Override QDialog exec_ function. Alter return code to -1 for rejection
and integer number for chosen filter's id.
'''
ret = super().exec_()
if ret:
return self.fnames[self.filterCb.currentText()][
self.bandCb.currentText()]
else:
return -1
class FilterParamDialog(QDialog):
'''
Dialog for setting filter parameters.
'''
def __init__(self, parent = None):
'''
Build ui and set up connections.
Parameters
----------
parent: QWidget, optional
Parent widget.
Attributes
----------
form: dictionary
Parameter names as keys and corresponding QLineEdit object
as values.
formWd: QWidget
Container for displaying the parameter setting form.
'''
super().__init__(parent)
self.form = {}
okBtn = QPushButton("OK", self)
cancelBtn = QPushButton("Cancel", self)
topVB = QVBoxLayout(self)
self.formVB = QVBoxLayout()
self.formWd = None
btnHB = QHBoxLayout()
btnHB.addWidget(okBtn)
btnHB.addWidget(cancelBtn)
cancelBtn.clicked.connect(self.reject)
okBtn.clicked.connect(self.accept)
topVB.addLayout(self.formVB)
topVB.addLayout(btnHB)
def makeForm(self, filt):
'''
Build parameters setting grid layout for filter filt.
Parameters
----------
filt: dictionary
Filter information, parameters are in string format.
'''
# clear the previous form widget
if self.formWd != None:
self.formVB.removeWidget(self.formWd)
self.form = {}
self.formWd.setParent(None)
del self.formWd
self.formWd = None
self.formWd = QWidget()
formGrid = QGridLayout(self.formWd)
row = 0
for k, v in filt.items():
if k != "name":
self.form[k] = QLineEdit(v, self.formWd)
formGrid.addWidget(QLabel(k, self.formWd), row, 0)
formGrid.addWidget(self.form[k], row, 1)
row = row + 1
self.formVB.addWidget(self.formWd)
def getForm(self):
'''
Get the parameters filled in the QLineEdit objects.
Returns
-------
filt: dictionary
Filter information, without name.
'''
filt = {}
for k, v in self.form.items():
filt[k] = v.text()
return filt
```
#### File: whole_cell_patch/whole_cell_patch/mini.py
```python
import os
import copy
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
from .project import Project
from .analysis import Analysis
from .process import SignalProc
from . import plot
class Mini(SignalProc, Analysis):
'''
Analyze mini postsynaptic response properties, including
decay time constant, amplitude and frequency.
'''
def __init__(self, inTxtWidget, projMan = None, parent = None):
'''
Load spike detection parameters from the grand parameter file
and raw data information.
Parameters
----------
inTxtWidget: QLineEdit
Input line widget of the main window.
projMan: Project
Object containing information about the project including
raw data and some parameters.
'''
SignalProc.__init__(self)
Analysis.__init__(self, inTxtWidget, projMan, parent)
def loadDefault(self, name):
'''
Override parent class method.
'''
default = {
"basic": {"sign" : -1,
"medianFilterWinSize" : 5,
"medianFilterThresh" : 30e-12,
"lowBandWidth" : 300,
"riseSlope" : 4.5e-9,
"riseTime" : 5e-3,
"baseLineWin" : 5e-4,
"minAmp" : 1e-11,
"minTau" : 1.2e-3,
"residual" : 200,
"onTauIni" : 1,
"offTauIni" : 20,
"stackWin" : 7e-3,
"scale" : 1e12},
"batchMini": {"protocol": '',
"win": [0, 0],
"verbose": 0},
"aveMini": {"protocol": '',
"cells": [],
"RsTh": 0,
"numTh": 0},
"indMini": {"protocol": '',
"cells": [],
"trials": []},
}
return default[name]
def setBasic(self, param):
'''
Set basic analysis parameters since it's not passed in any function.
Parameters
----------
param: dictionary
Basic parameters.
'''
self.miniParam = copy.deepcopy(param)
self.miniParam["riseSlope"] *= param["scale"]
self.miniParam["minAmp"] *= param["scale"]
def miniAnalysis(self, trace, sr, win = [0, 0], verbose = 0):
'''
Detect minis and analyze them
Criterions:
1. Rising slope large enough
2. Rise time short enough
3. Amplitude large enough
4. Decay fit exponential curve with low enough residual
5. Decay time constant big enough
It will make a list of peaks with rising slope large enough and rise time short
enough. Then it will look at each of the peaks. For peaks with large enough
amplitude, it will fit double exponential function to it. After fitting, those
with low enough fitting exponential and big enough decay time constant will
be kept.
Parameters
----------
trace: numpy.array
Recorded electric signal trace.
sr: float
Sampling rate.
win: array_like, optional
With 2 scalars, time window in which the minis are analyzed. Default is
[0, 0], takes the entire trace.
verbose: int, optional
Whether to display intermediate results for inspection.
1 - Plot detected minis.
2 - Plot fitting of each mini.
Returns
-------
miniProps: pandas.DataFrame
Mini properties, row are action potentials
in the trial and columns are properties
'''
# mini properties
miniRises = [] # valid minis' rise time points
miniPeaks = [] # valid minis' peak time points
miniAmps = [] # valid mini's peak amplitudes
miniDecayTaus = [] # valid mini's decay time constants
if win[0] != win[1]:
x = trace[int(sr * win[0]):int(sr * win[1])] * \
self.miniParam['sign']
else:
x = trace * self.miniParam['sign']
# rig defect related single point noise
x = self.thmedfilt(x, self.miniParam['medianFilterWinSize'], \
self.miniParam['medianFilterThresh'])
# scale
x = x * self.miniParam["scale"]
# remove linear shifting baseline
p = np.polyfit(np.arange(len(x)), x, 1)
x = (x - np.polyval(p, np.arange(len(x))))
# low pass filter
fx = self.smooth(x, sr, self.miniParam['lowBandWidth'],
"butter", "lowpass")
dfx = np.diff(fx) * sr
peaks = (0 < dfx[0:-1]) & (dfx[1:] < 0)
troughs = (dfx[0:-1] < 0) & (0 < dfx[1:])
# points with local maximum slope, which is also larger than threshold
rises = (dfx[0:-1] < self.miniParam["riseSlope"]) & \
(self.miniParam["riseSlope"] < dfx[1:])
'''
rises = np.zeros(peaks.shape)
rises = (dfx[0:-2] < dfx[1:-1]) & (dfx[2:] < dfx[1:-1]) & \
(self.miniParam['riseSlope'] < dfx[1:-1])
'''
# indices of either rises or peaks
ptrInds = np.concatenate((np.nonzero(peaks | rises | troughs)[0], \
[int(win[1] * sr)]), axis = None)
lastRise = -self.miniParam["riseTime"] * sr # last rise point index
last2Rise = 0 # the rise point index before last rise point
baseline = 0 # current baseline level
peakStack = [] # peaks stacked too close to each other
if verbose > 1:
ax0 = plot.plot_trace_buffer(x, sr, smooth_trace = fx)
ax1 = plot.plot_trace_buffer(fx, sr, pcl = 'r',
points = np.nonzero(rises)[0] / sr)
plot.plot_trace_buffer(fx, sr, pcl = None, ax = ax1,
points = np.nonzero(peaks)[0] / sr)
self.clearPlt()
self.plt(ax0, 0)
self.plt(ax1, 1)
self.linkPlt(0, 0, 1, 0)
for i in range(len(ptrInds) - 1):
if peaks[ptrInds[i]]:
if ptrInds[i] - lastRise < self.miniParam['riseTime'] * sr or \
len(peakStack):
if (len(peakStack) and ptrInds[i + 1] - peakStack[0] \
< self.miniParam["stackWin"] * sr):
peakStack.append(ptrInds[i])
else:
if last2Rise < lastRise - \
int(self.miniParam['baseLineWin'] * sr):
baseline = np.mean(x[lastRise - \
int(self.miniParam['baseLineWin'] * sr):\
lastRise])
amp = fx[ptrInds[i]] - baseline
if self.miniParam['minAmp'] < amp or len(peakStack):
if not len(peakStack) and \
ptrInds[i + 1] - ptrInds[i] < \
self.miniParam["stackWin"] * sr and \
i + 3 < len(ptrInds) and \
not rises[ptrInds[i + 2]]:
peakStack.append(ptrInds[i])
else:
if len(peakStack):
amp = np.max(fx[peakStack] - baseline)
peakStack = []
sample = x[lastRise:ptrInds[i + 1]]
# exponential function to fit the decay
fun = lambda x, t1, t2, a, b, c: \
a * np.exp(-x / t1) - \
b * np.exp(-x / t2) + c
# initial parameter values
p0 = [self.miniParam["offTauIni"],
self.miniParam["onTauIni"],
fx[lastRise] + amp - baseline,
amp, baseline]
# boundaries
bounds = ([-np.inf, -np.inf, 0, 0, -np.inf],
[np.inf, np.inf, np.inf, np.inf, np.inf])
try:
popt, pcov = curve_fit(fun,
np.arange(len(sample)),
sample, p0, bounds = bounds,
loss = "linear",
max_nfev = 1e3 * len(sample))
tau_rise = popt[1] / sr
tau_decay = popt[0] / sr
res = np.sqrt(np.sum((
fun(np.arange(len(sample)), *popt) - \
sample) ** 2))
if verbose > 1:
self.prt("popt: ", popt)
self.prt("tau rise: ", tau_rise,
"tau decay: ", tau_decay,
"res: ", res,
"time:", lastRise / sr)
self.prt(self.miniParam["residual"])
ax = plot.plot_trace_buffer(
x[lastRise:ptrInds[i + 1]], sr,
smooth_trace = \
fx[lastRise:ptrInds[i + 1]])
plot.plot_trace_buffer(
fun(np.arange(len(sample)), *popt),
sr, ax = ax, cl = 'r')
self.plt(ax, 2)
self.prt("Continue (c) or step (default)")
if self.ipt() == 'c':
verbose = 1
if self.miniParam['minTau'] < tau_decay \
and res < self.miniParam['residual']:
miniPeaks.append(ptrInds[i] / sr)
miniRises.append(lastRise / sr)
miniAmps.append(amp / self.miniParam["scale"])
miniDecayTaus.append(tau_decay)
except RuntimeError as e:
self.prt("Fit Error")
self.prt(e)
except ValueError as e:
self.prt("Initialization Error")
self.prt(e)
elif rises[ptrInds[i]]:
last2Rise = lastRise
lastRise = ptrInds[i]
miniProps = pd.DataFrame({"peak": miniPeaks, "rise": miniRises,
"amp": miniAmps, "decay": miniDecayTaus})
if verbose > 0:
ax0 = plot.plot_trace_buffer(x, sr, smooth_trace = fx)
ax1 = plot.plot_trace_buffer(fx, sr, pcl = 'r',
points = np.nonzero(rises)[0] / sr)
plot.plot_trace_buffer(fx, sr, pcl = None, ax = ax1,
points = np.nonzero(peaks)[0] / sr)
plot.plot_trace_buffer(fx, sr, pcl = 'b', ax = ax1,
points = miniRises)
ax2 = plot.plot_trace_buffer(dfx, sr)
self.clearPlt()
self.plt(ax0, 0)
self.plt(ax1, 1)
self.plt(ax2, 2)
self.linkPlt(0, 0, 1, 0)
self.ipt("Input any thing to continue.")
self.clearPlt()
return miniProps
def batchMiniAnalysis(self, protocol, win = [0, 0], verbose = 1):
'''
Analyze minis in all raw data in a certain subfolder/protocol
in current data set. Save all the properties in an intermediate
hdf5 file in the working directory. In group
/mini/protocol/[miniProps and trialProps]
Parameters
----------
protocol: string
Subfolder/protocol where the spike detection is done.
win: array_like, optional
With 2 scalars, time window in which the minis are analyzed. Default
is [0, 0], taking the entire trace.
verbose: int
Whether to print progress information.
0 - No output.
1 - Print cell and trial numbers.
2 - Plot detected minis.
3 - Plot each fitting of a possible mini.
'''
# Detect minis and save properties in file
# trialProps includes window size and total number of minis
dur = win[1] - win[0]
miniProps = []
trialProps = []
for c, t in self.projMan.iterate(protocol):
if verbose:
self.prt("Cell", c, "Trial", t)
trace, sr, stim = self.projMan.loadWave(c, t)
props = self.miniAnalysis(trace, sr, win, verbose - 1)
props.index.name = "id"
props["cell"] = c
props["trial"] = t
props.set_index(["cell", "trial"], append = True, inplace = True)
miniProps.append(props)
trialProps.append(pd.DataFrame({"dur": dur, "num": len(props)},
index = pd.MultiIndex.from_tuples([(c, t)],
names = ["cell", "trial"])))
if self.stopRequested():
return 0
miniProps = pd.concat(miniProps, sort = True)
trialProps = pd.concat(trialProps, sort = True)
store = pd.HDFStore(self.projMan.workDir + os.sep + "interm.h5")
store.put("/mini/" + protocol + "/miniProps", miniProps)
store.put("/mini/" + protocol + "/trialProps", trialProps)
store.close()
def aveProps(self, protocol, cells = [], RsTh = 0, numTh = 0):
'''
Calculate average mini properties. If input resistance is already
calculate, only use trials with input resistance lower than
provided threshold.
Parameters
----------
protocol: string
Subfolder/protocol where the spike detection is done.
cells: array_like, optional
Ids of cells to include, default is all the cells.
RsTh: float, optional
Maximum access resistance threshold. Used when the access
resistances for cells in this protocol/subfolder is calculated.
By default not applied.
NumTh: int, optional
Minimum number of valid trails required to include the
cell. By default not applied.
Returns
-------
aveMiniProps: pandas.DataFrame
DataFrame with averge properties for each cell entry.
'''
store = pd.HDFStore(self.projMan.workDir + os.sep + "interm.h5")
miniDataF = "/mini/" + protocol + "/miniProps"
trialDataF = "/mini/" + protocol + "/trialProps"
if miniDataF in store.keys() and trialDataF in store.keys():
miniProps = store.get(miniDataF)
trialProps = store.get(trialDataF)
store.close()
if RsTh > 0 and len(miniProps):
try:
stProps = pd.read_hdf(
self.projMan.workDir + os.sep + "interm.h5",
"/st/" + protocol + "/stProps")
analyzedCells = list(set(miniProps["cell"]))
stProps = stProps.loc[(analyzedCells), :]
idx = stProps.index[stProps["Rs"] < RsTh]
miniProps.reset_index("id", inplace = True)
miniProps.drop("id", axis = 1, inplace = True)
miniProps = miniProps.loc[idx, :]
trialProps = trialProps.loc[idx, :]
except KeyError as e:
self.prt(e)
self.prt("Seal test not done for these traces yet,",
"Rin threshold won't be used.")
if len(cells):
cells = list(set(cells) &
set(self.projMan.getSelectedCells()) &
set(trialProps.index.get_level_values("cell")))
miniProps = miniProps.loc[(slice(None), cells), :]
trialProps = trialProps.loc[(slice(None), cells), :]
aveMiniProps = miniProps.groupby("cell").mean()
sumTrialProps = trialProps.groupby("cell").sum()
sumTrialProps["rate"] = sumTrialProps["num"] / sumTrialProps["dur"]
aveMiniProps = aveMiniProps.merge(sumTrialProps, "left", on = "cell")
if numTh > 0 and len(miniProps):
counts = miniProps.groupby("cell").count()
idx = counts.index[counts.iloc[:, 0] > numTh]
aveMiniProps = aveMiniProps.loc[idx, :]
aveMiniProps= aveMiniProps.join(self.projMan.getAssignedType(),
"cell", "left")
aveMiniProps.to_csv(self.projMan.workDir + os.sep + \
"mini_" + protocol + ".csv")
return aveMiniProps
store.close()
def indProps(self, protocol, cells = [], trials = []):
'''
Output properties of individual minis to a csv file.
Parameters
----------
cells: array_like, optional
Ids of cells to include, default is all the cells.
trials: array_like, optional
Ids of trials to include, default is all the trials.
'''
store = pd.HDFStore(self.projMan.workDir + os.sep + "interm.h5")
miniDataF = "/mini/" + protocol + "/miniProps"
trialDataF = "/mini/" + protocol + "/trialProps"
if miniDataF in store.keys() and trialDataF in store.keys():
miniProps = store.get(miniDataF)
trialProps = store.get(trialDataF)
store.close()
if len(cells):
cells = list(set(cells) &
set(self.projMan.getSelectedCells()) &
set(trialProps.index.get_level_values("cell")))
else:
cells = list(set(self.projMan.getSelectedCells()) &
set(trialProps.index.get_level_values("cell")))
if len(trials):
miniProps = miniProps.loc[(slice(None), cells, trials), :]
else:
miniProps = miniProps.loc[(slice(None), cells,), :]
miniProps.to_csv(self.projMan.workDir + os.sep + \
"ind_mini_" + protocol + ".csv")
store.close()
def profile(self):
'''
Override parent class method.
'''
basicParam = {"sign" : "int",
"medianFilterWinSize" : "int",
"medianFilterThresh" : "float",
"lowBandWidth" : "float",
"riseSlope" : "float",
"riseTime" : "float",
"baseLineWin" : "float",
"minAmp" : "float",
"minTau" : "float",
"residual" : "float",
"onTauIni" : "float",
"offTauIni" : "float",
"stackWin" : "float",
"scale" : "float"}
prof = [
{"name": "Mini Analysis",
"pname": "batchMini",
"foo": self.batchMiniAnalysis,
"param": {"protocol": "protocol",
"win": "floatr",
"verbose": "int"}},
{"name": "Mean Properties",
"pname": "aveMini",
"foo": self.aveProps,
"param": {"protocol": "protocol",
"cells": "intl",
"RsTh": "float",
"numTh": "int"}},
{"name": "Properties",
"pname": "indMini",
"foo": self.indProps,
"param": {"protocol": "protocol",
"cells": "intl",
"trials": "intl"}}
]
return basicParam, prof
```
#### File: whole_cell_patch/whole_cell_patch/paramDialog.py
```python
from PyQt5.QtWidgets import QLabel, QGridLayout, QPushButton, \
QVBoxLayout, QHBoxLayout, QDialog, QMessageBox
from .paramWidget import ParamWidget
class ParamDialog(QDialog):
'''
Dialog window used to set basic parameters for a analysis method.
'''
def __init__(self, paramTyp, param, parent = None):
'''
Build the window.
'''
super().__init__(parent)
self.setModal(True)
self.paramGrid = ParamWidget(paramTyp, param)
okBtn = QPushButton("OK")
cancelBtn = QPushButton("Cancel")
topVB = QVBoxLayout(self)
topVB.addLayout(self.paramGrid)
topVB.addWidget(okBtn)
topVB.addWidget(cancelBtn)
okBtn.clicked.connect(self.accept)
cancelBtn.clicked.connect(self.reject)
def getParam(self):
'''
Return parameters from this window.
'''
return self.paramGrid.getParam()
def updateDisp(self, param):
'''
After parameter changes due to importing or change of protocols,
update display of parameters.
Parameters
----------
param: dictionary
New parameters. Default is None, only tend to update protocols.
'''
self.paramGrid.updateDisp(param)
self.update()
```
#### File: whole_cell_patch/whole_cell_patch/project.py
```python
import os
import re
import numpy as np
import pandas as pd
import pickle
from igor import binarywave
from PyQt5.QtCore import QObject, pyqtSlot
from .process import SignalProc
class Project(QObject, SignalProc):
'''
Contain functions used to manipulate raw data files.
Attributes
----------
projFile: string
Directory of the file saving the project information.
name: string
Name of the project, needs to be not empty in *create* mode.
baseFolder: string
Folder with the raw trace data, needs to be not empty in *create*
mode.
workDir: string
Working directory for saving the processing data and output results.
formatParam: dictionary
Raw trace file format parameters.
assignedProt: dictionary, optional
Trials and corresponding protocols used to record the trials.
protocols: list
Names of all protocols.
assignedTyp: pandas.DataFrame, optional
Cells and corresponding experimental types.
types: list
Names of all cell types.
selectedCells: list
Index of cells that are selected for analysis in this project
'''
def __init__(self, projFile = '', name = '', baseFolder = [],
workDir = '', formatParam = {}):
'''
Create a new project with name, baseFolder and workDir or load a
project from saved file projFile.
Parameters
----------
projFile: string, optional
Directory of the file saving the project information. Default is empty,
specified parameters will be used, otherwise, they'll be ignored.
name: string, optional
Name of the project, needs to be not empty in *create* mode. Default
is empty.
baseFolder: string, optional
List of folders with the raw trace data, needs to be not empty in
*create* mode. Default is empty.
workDir: string, optional
Working directory for saving the processing data and output results.
Default is empty.
fileFormatDir: string, optional
File with the raw trace file format parameters, default is empty.
analysisParamFile: string, optional
Directory to analysis parameter file, the parameters are managed by
another class.
'''
super().__init__()
self.projFile = projFile
if len(projFile) == 0:
self.name = name
self.workDir = workDir
if len(workDir) and workDir[-1] != os.sep:
self.workDir += os.sep
self.baseFolder = baseFolder
for i in range(len(self.baseFolder)):
if self.baseFolder[i][-1] != os.sep:
self.baseFolder[i] += os.sep
if len(formatParam):
self.formatParam = formatParam
else:
self.formatParam = {"prefix": "Cell",
"pad": '4',
"link": '_',
"suffix": ".ibw"}
else:
self.load(projFile)
self.filters = []
def edit(self, dummy):
'''
Edit basic project information based the attributes in a dummy class.
Parameters
----------
dummy: Project
Dummy project with only basic project attributes specified.
'''
self.name = dummy.name
self.baseFolder = dummy.baseFolder
self.workDir = dummy.workDir
self.formatParam = dummy.formatParam
def load(self, projFile):
'''
Load project information from project file.
Parameters
----------
projFile: string
Directory of the file with the project information.
'''
with open(projFile, 'rb') as f:
info = pickle.load(f)
self.projFile = projFile
self.name = info["name"]
self.baseFolder = info["baseFolder"]
self.workDir = info["workDir"]
self.formatParam = info["formatParam"]
if "assignedProt" in info:
self.assignedProt = info["assignedProt"]
self.protocols = info["protocols"]
if "assignedTyp" in info:
self.assignedTyp = info["assignedTyp"]
self.types = info["types"]
if "selectedCells" in info:
self.selectedCells = info["selectedCells"]
def save(self, target = ''):
'''
Save project information into a file.
Parameters
----------
target: string, optional
Direcoty of target file to save the information. Default is
empty, in which case it will be saved in a current projFile.
If projFile is empty, do nothing.
'''
info = {}
info["name"] = self.name
info["baseFolder"] = self.baseFolder
info["workDir"] = self.workDir
info["formatParam"] = self.formatParam
if hasattr(self, "assignedProt") and len(self.assignedProt):
info["protocols"] = self.protocols
info["assignedProt"] = self.assignedProt
if hasattr(self, "assignedTyp") and len(self.assignedTyp):
info["types"] = self.types
info["assignedTyp"] = self.assignedTyp
if hasattr(self, "selectedCells") and len(self.selectedCells):
info["selectedCells"] = self.selectedCells
if len(target) == 0:
target = self.projFile
else:
self.projFile = target
if len(target):
with open(target, 'wb') as f:
pickle.dump(info, f)
def genName(self, cell, trial):
'''
Generate raw data file name of certain trial of certain cell.
Parameters
----------
cell: int
Cell index.
trial: int
Trial index.
Returns
-------
fileName: string
Formated file name.
'''
cell = int(cell)
trial = int(trial)
p = self.formatParam
fileName = (p['prefix'] + p['link'] + '{0:0' + p['pad'] + 'd}' +
p['link'] + '{1:0' + p['pad'] + 'd}' +
p['suffix']).format(cell, trial)
return fileName
@pyqtSlot(tuple)
def selectCells(self, cells):
'''
Select cells that will be analyzed in this project.
Parameters
----------
cells: tuple
In the form of (inc, exc), in which inc is a list of included
cells and exc is a list of excluded cells.
'''
self.selectedCells = sorted(cells[0])
# If cell types have been assigned before, adjust it
# by keeping only the newly selected cells and assign unknown
# type to those that are not assigned before.
if hasattr(self, "assignedTyp"):
updated = pd.DataFrame([],
index = pd.Index(self.selectedCells, name = "cell"))
self.assignedTyp = updated.merge(self.assignedTyp, how = "left",
on = "cell", sort = True).fillna("unknown")
self.types = set(self.assignedTyp["type"])
def getSelectedCells(self):
'''
Get cells selected for analysis in this project.
Returns
-------
cells: list
Sorted list of indices of selected cells. If none has been
selected, use all the cells.
'''
if hasattr(self, "selectedCells"):
return self.selectedCells
else:
return self.getCells()
def assignProtocol(self, cells, labels):
'''
Assign trials to different protocols for different analysis.
Take labeled trial data or labeled stimulation type name
data.
Parameters
----------
cells: array_like
Id of cells to assign protocols. When it has length of 0,
all the cells in the baseFolder will be considered.
labels: pandas.DataFrame or dict
pandas.DataFrame -
Trial and protocol pairs in a DataFrame with two columns,
"trial" and "protocol", "trial" as index. The same
trial-protocol association will be applied to all cells.
dict -
Dictionary with cell ids as keys and trial-protocol pair
data in a DataFrame as values. Specify protocols for
cells separatedly.
Attributes
----------
protocols: set
Names of protocols.
assignedProt: dictionary
Dictionary with cells as keys and trial-protocol pairs
DataFrame as values.
'''
# drop empty labels
if type(labels) is not dict:
labels.drop(labels.index[labels["protocol"] == ''], inplace = True)
if len(cells) == 0:
cells = self.getCells()
if not hasattr(self, "assignedProt"):
self.assignedProt = {}
for c in cells:
if type(labels) is dict:
self.assignedProt[c] = labels[c]
else:
cTrials = self.getTrials([c])
labeled = list(set(cTrials) & set(labels.index))
prot = labels.loc[labeled, :]
# record the simulation intensity of the trials as well.
prot["stim"] = np.nan
for t in labeled:
_, _, stim = self.loadWave(c, t)
prot.loc[t, "stim"] = stim[2]
self.assignedProt[c] = prot
# update protocols by checking again all protocl tables
self.protocols = set()
for c, df in self.assignedProt.items():
self.protocols = self.protocols | set(df["protocol"])
def getStimType(self, cells, trials):
'''
Returns stimulation type for trials for setting protocol based
on stimulation type.
Parameters
----------
cells: array_like
Cells of which the stimulation type will be returned.
trials: array_like
Trials of which the stimulation type will be returned.
Returns
-------
stimTypes: pandas.DataFrame
Cells and trials as index, one column with stimulation
amplitude and one column with the stimulation type.
'''
stimTypes = []
for c in cells:
for t in trials:
try:
trace, sr, stim = self.loadWave(c, t)
stimTypes.append([c, t, stim[2], stim[3]])
except IOError:
pass
stimTypes = pd.DataFrame(stimTypes,
columns = ("cell", "trial", "stim", "type"))
return stimTypes
def getProtocols(self):
'''
Get all the protocols specified in this project. If not yet,
return empty set.
'''
if hasattr(self, "protocols"):
return self.protocols
else:
return set()
@pyqtSlot(pd.DataFrame)
def assignType(self, labels):
'''
Assign cells to different types for possible statistical tests.
Parameters
----------
labels: pandas.DataFrame
Cell and type pairs in a DataFrame with two columns,
"cell" and "type", "cell" as index.
Attributes
----------
types: set
Names of protocols.
assignedTyp: pandas.DataFrame
Cell and type pairs in a DataFrame with two columns,
"cell" and "type", "cell" as index.
'''
self.types = set(labels["type"])
self.assignedTyp = labels
print(self.assignedTyp)
def getAssignedType(self):
'''
Get assigned types in the form of a pandas DataFrame, if not
specified yet, return an empty one.
Returns
-------
labels: pd.DataFrame
Cell and type pairs in a DataFrame with two columns,
"cell" and "type", "cell" as index.
'''
if hasattr(self, "assignedTyp"):
return self.assignedTyp
else:
labels = pd.DataFrame([],
index = pd.Index(self.getSelectedCells(), name = "cell"),
columns = ["type"])
return labels
def getCells(self):
'''
Get list of cell ids in the baseFolder.
Returns
-------
cells: list
Cell ids.
'''
for bf in self.baseFolder:
dfs = os.listdir(bf)
cells = set()
for df in dfs:
matched = re.match(self.formatParam['prefix'] + \
self.formatParam['link'] + \
'0*([1-9][0-9]*)' + \
self.formatParam['link'] + \
'0*([1-9][0-9]*)' + \
self.formatParam['suffix'] , df)
if matched:
cells.add(int(matched.group(1)))
return list(cells)
def getTrials(self, cells, protocol = None, stim = None):
'''
Get list of trial ids for cells in the baseFolder. If there is more
than one cell, list the union of trials from each cell. If protocol
and stim are provided, trials will be selected from saved protocol
stim table.
Parameters
----------
cells: array_like
Cell ids. If length is 0, all cells in the baseFolder will be
considered.
protocol: string, optional
Protocol used to limit trials to get. Default not considered.
stim: float, optional
Stimulation amplitude used to limit trials to get. Default
not considered.
Returns
-------
trials: list
Trial ids.
'''
trials = set()
if protocol is None or stim is None:
for bf in self.baseFolder:
dfs = os.listdir(bf)
for c in cells:
for df in dfs:
matched = re.match(self.formatParam['prefix'] + \
self.formatParam['link'] + \
'{:04d}'.format(c) + \
self.formatParam['link'] + \
'0*([1-9][0-9]*)' + \
self.formatParam['suffix'] , df)
if matched:
trials.add(int(matched.group(1)))
elif hasattr(self, "assignedProt"):
for c in cells:
prot = self.assignedProt[c]
ts = set(prot.index[(prot["protocol"] == protocol) &
(abs(prot["stim"] - stim) < 1e-12)])
trials = trials | ts
return list(trials)
def getStims(self, cell, protocol):
'''
Get list of stimulation amplitude for cell in protocol.
'''
stims = []
if hasattr(self, "assignedProt"):
if cell in self.assignedProt:
prot = self.assignedProt[cell]
if "stim" in prot.columns:
stims = set(prot.loc[prot["protocol"] == protocol, "stim"])
return list(stims)
def setFilters(self, filters = []):
'''
Set filters to be applied on the traces when loading them. The
filter parameters need to be converted to numertic format. Also
check the parameters, if not valid, default will be used and returns
0. Otherwise returns 1.
Parameters
----------
filters: list
List of filters defined in dictionaries. Parameters are
in string format.
Returns
-------
ret: int
0 when invalid paramter detected, 1 normally.
'''
ret = 1
self.filters = []
default = self.getDefaultFilters()
for f in filters:
name = f["name"]
fc = {}
try:
if name == "median":
fc["name"] = name
fc["winSize"] = int(f["winSize"])
fc["threshold"] = float(f["threshold"])
else:
fc["name"] = name
for k, v in f.items():
if k != "name":
fc[k] = float(v)
if (name == "bessel,bandpass" or \
name == "butter,bandpass") and \
fc["freq_high"] <= fc["freq_low"]:
ret = 0
for d in default:
if d["name"] == name:
fc = d
except ValueError as e:
ret = 0
for d in default:
if d["name"] == name:
fc = d
self.filters.append(fc)
return ret
def getDefaultFilters(self, form = "num"):
'''
Define available filter types and default parameters.
Paramters
---------
form: str
Format of the filter parameters.
"num" - numeric, for setting.
"str" - string, for displaying.
'''
if form == "num":
filters = [{"name": "median",
"winSize": 5,
"threshold": 30e-12},
{"name": "butter,lowpass",
"freq": 500},
{"name": "butter,highpass",
"freq": 2000},
{"name": "butter,bandpass",
"freq_low": 500,
"freq_high": 2000},
{"name": "bessel,lowpass",
"freq": 500},
{"name": "bessel,highpass",
"freq": 2000},
{"name": "bessel,bandpass",
"freq_low": 500,
"freq_high": 2000}]
else:
filters = [{"name": "median",
"winSize": '5',
"threshold": "30e-12"},
{"name": "butter,lowpass",
"freq": "500"},
{"name": "butter,highpass",
"freq": "2000"},
{"name": "butter,bandpass",
"freq_low": "500",
"freq_high": "2000"},
{"name": "bessel,lowpass",
"freq": "500"},
{"name": "bessel,highpass",
"freq": "2000"},
{"name": "bessel,bandpass",
"freq_low": "500",
"freq_high": "2000"}]
return filters
def loadWave(self, cell, trial):
'''
Load trace from an igor data file, as well as sampleing rate
and stimulation amplitude.
Parameters
----------
cell: int
Cell index.
trial: int
Trial index.
Returns
-------
trace: numpy.array
Data trace in the file.
sr: float
Sampling rate.
stim: list
Stimulation step properties, including start time,
duration and amplitude
'''
try:
sr, stim_amp, stim_dur, stim_start = 10000, 0, 0, 0
stim_type = ''
for bf in self.baseFolder:
dfs = os.listdir(bf)
if self.genName(cell, trial) in dfs:
break
data = binarywave.load(bf + self.genName(cell, trial))
trace = data['wave']['wData']
# Search for sampling rate
searched = re.search(r'XDelta\(s\):(.*?);',
data['wave']['note'].decode())
if(searched != None):
sr = 1 / float(searched.group(1))
# Search for stimulation amplitude
searched = re.search(r';Stim Amp.:(.+?);',
data['wave']['note'].decode())
if(searched != None):
stim_amp = float(searched.group(1))
# Search for stimulation duration
searched = re.search(r';Width:(.+?);',
data['wave']['note'].decode())
if(searched != None):
stim_dur = float(searched.group(1))
# Search for stimulation start
searched = re.search(r';StepStart\(s\):(.+?);',
data['wave']['note'].decode())
if(searched != None):
stim_start = float(searched.group(1))
# Search for stimulation type
searched = re.search(r';StimProtocol:(.+?);',
data['wave']['note'].decode())
if(searched != None):
stim_type = searched.group(1)
if len(self.filters):
for f in self.filters:
names = f["name"].split(',')
if len(names) == 1:
trace = self.thmedfilt(trace, f["winSize"],
f["threshold"])
elif names[1] == "bandpass":
trace = self.smooth(trace, sr,
[f["freq_low"], f["freq_high"]], names[0], names[1])
else:
trace = self.smooth(trace, sr,
f["freq"], names[0], names[1])
return (trace, sr, [stim_start, stim_dur, stim_amp, stim_type])
except IOError:
print('Igor wave file (' + bf + self.genName(cell, trial)
+ ') reading error')
raise
def iterate(self, protocol = None):
'''
Iterate all trace files in a protocol, yield cell
and trial numbers.
Parameters
----------
protocol: string, optional
Name of protocol used on a specific set of trials in each cell.
Default is empty, in which case all trials will be tranversed.
Yields
------
c: int
Cell number.
t: int
Trial number.
'''
if protocol is not None and len(protocol) and \
hasattr(self, "assignedProt"):
for c in self.getSelectedCells():
if c in self.assignedProt:
lb = self.assignedProt[c]
for t in lb.index:
if lb.loc[t, "protocol"] == protocol:
yield (c, t)
elif protocol is None:
cells = self.getCells()
for c in cells:
for t in self.getTrials([c]):
yield (c, t)
def getTrialTable(self, protocol, cells = [], trials = [],
types = [], stims = []):
'''
Return a table with type, stimulation amplitude, cell id and
trial id as columns. Each row is a trial.
Parameters
----------
protocol: string
Protocol where the subthreshold recording is done.
cells: array_like, optional
Ids of cells to include. By default include all the cells.
trials: array_like, optional
Ids of trials to include. By default include all trials.
types: array_like, optional
Types of cells to include. By default include all the cells.
stims: array_like, optional
Stimulation amplitudes with which trials will be included.
By default include all trials.
Returns
-------
df: pandas.DataFrame
Table with trial labels, "type", "stim", "cell" and "trial".
'''
df = []
if len(protocol) and hasattr(self, "assignedProt"):
if len(cells):
cells = list(set(cells) & set(self.getSelectedCells()))
else:
cells = self.getSelectedCells()
for c in cells:
if c in self.assignedProt and ((not len(types)) or \
self.assignedTyp.loc[c, "type"] in types):
lb = self.assignedProt[c]
if len(trials):
ctrials = np.unique(trials + list(lb.index))
else:
ctrials = lb.index
for t in ctrials:
if lb.loc[t, "protocol"] == protocol and \
((not len(stims)) or lb.loc[t, "stim"] in stims):
df.append([self.assignedTyp.loc[c, "type"],
lb.loc[t, "stim"], c, t])
df = pd.DataFrame(df, columns = ["type", "stim", "cell", "trial"])
return df
def clear(self):
'''
Clear current content to make a new project object.
'''
self.projFile = ''
self.name = ''
self.baseFolder = ''
self.workDir = ''
self.formatParam = {"prefix": "Cell",
"pad": '4',
"link": '_',
"suffix": ".ibw"}
if hasattr(self, "assignedProt"):
del self.protocols
del self.assignedProt
if hasattr(self, "assignedTyp"):
del self.types
del self.assignedTyp
if hasattr(self, "selectedCells"):
del self.selectedCells
```
#### File: whole_cell_patch/whole_cell_patch/start.py
```python
import sys
from PyQt5.QtWidgets import QApplication
from .main import wcpMainWindow
def start_gui():
app = QApplication(sys.argv)
mainWindow = wcpMainWindow()
sys.exit(app.exec_())
``` |
{
"source": "11USERNAME11/KivyMD",
"score": 2
} |
#### File: KivyMD/kivymd/grid.py
```python
from kivy.lang import Builder
from kivy.properties import StringProperty, BooleanProperty, ObjectProperty, \
NumericProperty, ListProperty, OptionProperty
from kivy.uix.behaviors import ButtonBehavior
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivymd.ripplebehavior import RectangularRippleBehavior
from kivymd.theming import ThemableBehavior
Builder.load_string("""
<SmartTile>
_img_widget: img
_img_overlay: img_overlay
_box_overlay: box
AsyncImage:
id: img
allow_stretch: root.allow_stretch
anim_delay: root.anim_delay
anim_loop: root.anim_loop
color: root.img_color
keep_ratio: root.keep_ratio
mipmap: root.mipmap
source: root.source
size_hint_y: 1 if root.overlap else None
x: root.x
y: root.y if root.overlap or root.box_position == 'header' else box.top
BoxLayout:
id: img_overlay
size_hint: img.size_hint
size: img.size
pos: img.pos
BoxLayout:
canvas:
Color:
rgba: root.box_color
Rectangle:
pos: self.pos
size: self.size
id: box
size_hint_y: None
height: dp(68) if root.lines == 2 else dp(48)
x: root.x
y: root.y if root.box_position == 'footer' else root.y + root.height - self.height
<SmartTileWithLabel>
_img_widget: img
_img_overlay: img_overlay
_box_overlay: box
_box_label: boxlabel
AsyncImage:
id: img
allow_stretch: root.allow_stretch
anim_delay: root.anim_delay
anim_loop: root.anim_loop
color: root.img_color
keep_ratio: root.keep_ratio
mipmap: root.mipmap
source: root.source
size_hint_y: 1 if root.overlap else None
x: root.x
y: root.y if root.overlap or root.box_position == 'header' else box.top
BoxLayout:
id: img_overlay
size_hint: img.size_hint
size: img.size
pos: img.pos
BoxLayout:
canvas:
Color:
rgba: root.box_color
Rectangle:
pos: self.pos
size: self.size
id: box
size_hint_y: None
height: self.minimum_height #dp(68) if root.lines == 2 else dp(48)
x: root.x
y: root.y if root.box_position == 'footer' else root.y + root.height - self.height
MDLabel:
id: boxlabel
font_style: root.font_style
#halign: "center"
size_hint_y: None
height: self.texture_size[1]
text: root.text
color: root.tile_text_color
""")
class Tile(ThemableBehavior, RectangularRippleBehavior, ButtonBehavior,
BoxLayout):
"""A simple tile. It does nothing special, just inherits the right behaviors
to work as a building block.
"""
pass
class SmartTile(ThemableBehavior, RectangularRippleBehavior, ButtonBehavior,
FloatLayout):
"""A tile for more complex needs.
Includes an image, a container to place overlays and a box that can act
as a header or a footer, as described in the Material Design specs.
"""
box_color = ListProperty([0, 0, 0, 0.5])
"""Sets the color and opacity for the information box."""
box_position = OptionProperty('footer', options=['footer', 'header'])
"""Determines wether the information box acts as a header or footer to the
image.
"""
lines = OptionProperty(1, options=[1, 2])
"""Number of lines in the header/footer.
As per Material Design specs, only 1 and 2 are valid values.
"""
overlap = BooleanProperty(True)
"""Determines if the header/footer overlaps on top of the image or not"""
# Img properties
allow_stretch = BooleanProperty(True)
anim_delay = NumericProperty(0.25)
anim_loop = NumericProperty(0)
img_color = ListProperty([1, 1, 1, 1])
keep_ratio = BooleanProperty(False)
mipmap = BooleanProperty(False)
source = StringProperty()
_img_widget = ObjectProperty()
_img_overlay = ObjectProperty()
_box_overlay = ObjectProperty()
_box_label = ObjectProperty()
def reload(self):
self._img_widget.reload()
def add_widget(self, widget, index=0):
if issubclass(widget.__class__, IOverlay):
self._img_overlay.add_widget(widget, index)
elif issubclass(widget.__class__, IBoxOverlay):
self._box_overlay.add_widget(widget, index)
else:
super(SmartTile, self).add_widget(widget, index)
class SmartTileWithLabel(SmartTile):
_box_label = ObjectProperty()
# MDLabel properties
font_style = StringProperty("Caption")
theme_text_color = StringProperty("Custom")
tile_text_color = ListProperty([1, 1, 1, 1])
text = StringProperty("")
"""Determines the text for the box footer/header"""
class IBoxOverlay():
"""An interface to specify widgets that belong to to the image overlay
in the :class:`SmartTile` widget when added as a child.
"""
pass
class IOverlay():
"""An interface to specify widgets that belong to to the image overlay
in the :class:`SmartTile` widget when added as a child.
"""
pass
``` |
{
"source": "11zhengyu/CINN",
"score": 2
} |
#### File: tests/ops/test_index_assign_op.py
```python
import unittest
import numpy as np
from op_test import OpTest, OpTestTool
import paddle
import paddle.nn.functional as F
import cinn
from cinn.frontend import *
from cinn.common import *
@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
class TestIndexAssignOp(OpTest):
def setUp(self):
self.init_case()
self.target = DefaultNVGPUTarget()
def init_case(self):
self.axis = 0
self.inputs = {
"x": np.random.random([10, 5]).astype("float32"),
"y": np.random.random([3, 5]).astype("float32"),
"index": np.random.randint(0, 10, size=3)
}
def build_paddle_program(self, target):
x = self.inputs["x"]
y = self.inputs["y"]
out = x
axis = self.axis
while (axis < 0):
axis += len(self.inputs["x"].shape)
if axis == 0:
for i in range(self.inputs["index"].shape[0]):
out[self.inputs["index"][i]] = y[i]
elif axis == 1:
for i in range(self.inputs["x"].shape[0]):
for j in range(self.inputs["index"].shape[0]):
out[i][self.inputs["index"][j]] = y[i][j]
elif axis == 2:
for i in range(self.inputs["x"].shape[0]):
for j in range(self.inputs["x"].shape[1]):
for k in range(self.inputs["index"].shape[0]):
out[i][j][self.inputs["index"][k]] = y[i][j][k]
else:
self.assertTrue(False, "Axis {} No Implement".format(self.axis))
pd_out = paddle.to_tensor(out, stop_gradient=True)
self.paddle_outputs = [pd_out]
def build_cinn_program(self, target):
builder = NetBuilder("index_assign")
x = builder.create_input(Float(32), self.inputs["x"].shape, "x")
y = builder.create_input(Float(32), self.inputs["y"].shape, "y")
index = builder.create_input(
Float(32), self.inputs["index"].shape, "index")
out = builder.index_assign(x, y, index, self.axis)
prog = builder.build()
res = self.get_cinn_output(prog, target, [x, y, index], [
self.inputs["x"], self.inputs["y"],
self.inputs["index"].astype("float32")
], [out])
self.cinn_outputs = [res[0]]
def test_check_results(self):
self.check_outputs_and_grads()
class TestIndexAssignCase1(TestIndexAssignOp):
def init_case(self):
self.inputs = {
"x": np.random.random([10, 5]).astype("float32"),
"y": np.random.random([10, 3]).astype("float32"),
"index": np.random.randint(0, 5, size=3)
}
self.axis = 1
class TestIndexAssignCase2(TestIndexAssignOp):
def init_case(self):
self.inputs = {
"x": np.random.random([10, 5, 5]).astype("float32"),
"y": np.random.random([10, 5, 3]).astype("float32"),
"index": np.random.randint(0, 5, size=3)
}
self.axis = -1
class TestIndexAssignCase3(TestIndexAssignOp):
def init_case(self):
self.inputs = {
"x": np.random.random([10]).astype("float32"),
"y": np.random.random([1]).astype("float32"),
"index": np.random.randint(0, 10, size=1)
}
self.axis = -1
class TestIndexAssignCase4(TestIndexAssignOp):
def init_case(self):
self.inputs = {
"x": np.random.random([10, 5]).astype("float32"),
"y": np.random.random([3, 5]).astype("float32"),
"index": np.array([0, 5, 0])
}
self.axis = 0
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "1200wd/fastecdsa",
"score": 3
} |
#### File: fastecdsa/fastecdsa/ecdsa.py
```python
from binascii import hexlify
from hashlib import sha256 # Python standard lib SHA2 is already in C
from fastecdsa import _ecdsa
from .curve import P256
from .point import Point
from .util import RFC6979, msg_bytes
class EcdsaError(Exception):
def __init__(self, msg):
self.msg = msg
def sign(msg, d, curve=P256, hashfunc=sha256):
"""Sign a message using the elliptic curve digital signature algorithm.
The elliptic curve signature algorithm is described in full in FIPS 186-4 Section 6. Please
refer to http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-4.pdf for more information.
Args:
| msg (str|bytes|bytearray): A message to be signed.
| d (long): The ECDSA private key of the signer.
| curve (fastecdsa.curve.Curve): The curve to be used to sign the message.
| hashfunc (_hashlib.HASH): The hash function used to compress the message.
"""
# generate a deterministic nonce per RFC6979
rfc6979 = RFC6979(msg, d, curve.q, hashfunc)
k = rfc6979.gen_nonce()
hashed = hashfunc(msg_bytes(msg)).hexdigest()
r, s = _ecdsa.sign(
hashed,
str(d),
str(k),
str(curve.p),
str(curve.a),
str(curve.b),
str(curve.q),
str(curve.gx),
str(curve.gy)
)
return (int(r), int(s))
def verify(sig, msg, Q, curve=P256, hashfunc=sha256):
"""Verify a message signature using the elliptic curve digital signature algorithm.
The elliptic curve signature algorithm is described in full in FIPS 186-4 Section 6. Please
refer to http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-4.pdf for more information.
Args:
| sig (long, long): The signature for the message.
| msg (str|bytes|bytearray): A message to be signed.
| Q (fastecdsa.point.Point): The ECDSA public key of the signer.
| curve (fastecdsa.curve.Curve): The curve to be used to sign the message.
| hashfunc (_hashlib.HASH): The hash function used to compress the message.
Returns:
bool: True if the signature is valid, False otherwise.
Raises:
fastecdsa.ecdsa.EcdsaError: If the signature or public key are invalid. Invalid signature
in this case means that it has values less than 1 or greater than the curve order.
"""
if isinstance(Q, tuple):
Q = Point(Q[0], Q[1], curve)
r, s = sig
# validate Q, r, s (Q should be validated in constructor of Point already but double check)
if not curve.is_point_on_curve((Q.x, Q.y)):
raise EcdsaError('Invalid public key, point is not on curve {}'.format(curve.name))
elif r > curve.q or r < 1:
raise EcdsaError(
'Invalid Signature: r is not a positive integer smaller than the curve order')
elif s > curve.q or s < 1:
raise EcdsaError(
'Invalid Signature: s is not a positive integer smaller than the curve order')
hashed = hashfunc(msg_bytes(msg)).hexdigest()
return _ecdsa.verify(
str(r),
str(s),
hashed,
str(Q.x),
str(Q.y),
str(curve.p),
str(curve.a),
str(curve.b),
str(curve.q),
str(curve.gx),
str(curve.gy)
)
```
#### File: fastecdsa/encoding/__init__.py
```python
from abc import ABCMeta, abstractmethod
class KeyEncoder:
"""Base class that any encoding class for EC keys should derive from.
All overriding methods should be static.
"""
__metaclass__ = ABCMeta
@abstractmethod
def encode_public_key(Q):
pass
@abstractmethod
def encode_private_key(d):
pass
@abstractmethod
def decode_public_key(binary_data):
pass
@abstractmethod
def decode_private_key(binary_data):
pass
class SigEncoder:
"""Base class that any encoding class for EC signatures should derive from.
All overriding methods should be static.
"""
__metaclass__ = ABCMeta
@abstractmethod
def encode_signature(r, s):
pass
@abstractmethod
def decode_signature(binary_data):
pass
```
#### File: fastecdsa/fastecdsa/test.py
```python
from binascii import unhexlify, hexlify
from hashlib import sha1, sha224, sha256, sha384, sha512
from os import remove
from random import choice, randint
from re import findall, DOTALL
from six.moves.urllib.request import urlopen
import unittest
from .curve import (
P192, P224, P256, P384, P521, secp192k1, secp224k1, secp256k1, brainpoolP160r1, brainpoolP192r1,
brainpoolP224r1, brainpoolP256r1, brainpoolP320r1, brainpoolP384r1, brainpoolP512r1
)
from .ecdsa import sign, verify
from .encoding.der import DEREncoder, InvalidDerSignature
from .encoding.sec1 import InvalidSEC1PublicKey, SEC1Encoder
from .keys import export_key, gen_keypair, get_public_keys_from_sig, import_key, gen_private_key
from .point import Point
from .util import RFC6979
CURVES = [
P192, P224, P256, P384, P521, secp192k1, secp224k1, secp256k1, brainpoolP160r1, brainpoolP192r1,
brainpoolP224r1, brainpoolP256r1, brainpoolP320r1, brainpoolP384r1, brainpoolP512r1
]
class TestPrimeFieldCurve(unittest.TestCase):
""" cases taken from https://www.nsa.gov/ia/_files/nist-routines.pdf """
def test_P192_arith(self):
S = Point(
0xd458e7d127ae671b0c330266d246769353a012073e97acf8,
0x325930500d851f336bddc050cf7fb11b5673a1645086df3b,
curve=P192
)
d = 0xa78a236d60baec0c5dd41b33a542463a8255391af64c74ee
expected = Point(
0x1faee4205a4f669d2d0a8f25e3bcec9a62a6952965bf6d31,
0x5ff2cdfa508a2581892367087c696f179e7a4d7e8260fb06,
curve=P192
)
R = d * S
self.assertEqual(R, expected)
def test_P224_arith(self):
S = Point(
0x6eca814ba59a930843dc814edd6c97da95518df3c6fdf16e9a10bb5b,
0xef4b497f0963bc8b6aec0ca0f259b89cd80994147e05dc6b64d7bf22,
curve=P224
)
d = 0xa78ccc30eaca0fcc8e36b2dd6fbb03df06d37f52711e6363aaf1d73b
expected = Point(
0x96a7625e92a8d72bff1113abdb95777e736a14c6fdaacc392702bca4,
0x0f8e5702942a3c5e13cd2fd5801915258b43dfadc70d15dbada3ed10,
curve=P224
)
R = d * S
self.assertEqual(R, expected)
def test_P256_arith(self):
S = Point(
0xde2444bebc8d36e682edd27e0f271508617519b3221a8fa0b77cab3989da97c9,
0xc093ae7ff36e5380fc01a5aad1e66659702de80f53cec576b6350b243042a256,
curve=P256
)
d = 0xc51e4753afdec1e6b6c6a5b992f43f8dd0c7a8933072708b6522468b2ffb06fd
expected = Point(
0x51d08d5f2d4278882946d88d83c97d11e62becc3cfc18bedacc89ba34eeca03f,
0x75ee68eb8bf626aa5b673ab51f6e744e06f8fcf8a6c0cf3035beca956a7b41d5,
curve=P256
)
R = d * S
self.assertEqual(R, expected)
def test_P384_arith(self):
S = Point(
int('fba203b81bbd23f2b3be971cc23997e1ae4d89e69cb6f92385dda82768ada415ebab4167459da98e6'
'2b1332d1e73cb0e', 16),
int('5ffedbaefdeba603e7923e06cdb5d0c65b22301429293376d5c6944e3fa6259f162b4788de6987fd5'
'9aed5e4b5285e45', 16),
curve=P384
)
d = int('a4ebcae5a665983493ab3e626085a24c104311a761b5a8fdac052ed1f111a5c44f76f45659d2d111a'
'61b5fdd97583480', 16)
expected = Point(
int('e4f77e7ffeb7f0958910e3a680d677a477191df166160ff7ef6bb5261f791aa7b45e3e653d151b95d'
'ad3d93ca0290ef2', 16),
int('ac7dee41d8c5f4a7d5836960a773cfc1376289d3373f8cf7417b0c6207ac32e913856612fc9ff2e35'
'7eb2ee05cf9667f', 16),
curve=P384
)
R = d * S
self.assertEqual(R, expected)
def test_P521_arith(self):
S = Point(
int('000001d5c693f66c08ed03ad0f031f937443458f601fd098d3d0227b4bf62873af50740b0bb84aa15'
'7fc847bcf8dc16a8b2b8bfd8e2d0a7d39af04b089930ef6dad5c1b4', 16),
int('00000144b7770963c63a39248865ff36b074151eac33549b224af5c8664c54012b818ed037b2b7c1a'
'63ac89ebaa11e07db89fcee5b556e49764ee3fa66ea7ae61ac01823', 16),
curve=P521
)
d = int('000001eb7f81785c9629f136a7e8f8c674957109735554111a2a866fa5a166699419bfa9936c78b62'
'653964df0d6da940a695c7294d41b2d6600de6dfcf0edcfc89fdcb1', 16)
expected = Point(
int('00000091b15d09d0ca0353f8f96b93cdb13497b0a4bb582ae9ebefa35eee61bf7b7d041b8ec34c6c0'
'0c0c0671c4ae063318fb75be87af4fe859608c95f0ab4774f8c95bb', 16),
int('00000130f8f8b5e1abb4dd94f6baaf654a2d5810411e77b7423965e0c7fd79ec1ae563c207bd255ee'
'9828eb7a03fed565240d2cc80ddd2cecbb2eb50f0951f75ad87977f', 16),
curve=P521
)
R = d * S
self.assertEqual(R, expected)
def test_secp256k1_arith(self):
# http://crypto.stackexchange.com/a/787/17884
m = 0xAA5E28D6A97A2479A65527F7290311A3624D4CC0FA1578598EE3C2613BF99522
expected = Point(
0x34F9460F0E4F08393D192B3C5133A6BA099AA0AD9FD54EBCCFACDFA239FF49C6,
0x0B71EA9BD730FD8923F6D25A7A91E7DD7728A960686CB5A901BB419E0F2CA232,
curve=secp256k1
)
R = m * secp256k1.G
self.assertEqual(R, expected)
m = 0x7E2B897B8CEBC6361663AD410835639826D590F393D90A9538881735256DFAE3
expected = Point(
0xD74BF844B0862475103D96A611CF2D898447E288D34B360BC885CB8CE7C00575,
0x131C670D414C4546B88AC3FF664611B1C38CEB1C21D76369D7A7A0969D61D97D,
curve=secp256k1
)
R = m * secp256k1.G
self.assertEqual(R, expected)
m = 0x6461E6DF0FE7DFD05329F41BF771B86578143D4DD1F7866FB4CA7E97C5FA945D
expected = Point(
0xE8AECC370AEDD953483719A116711963CE201AC3EB21D3F3257BB48668C6A72F,
0xC25CAF2F0EBA1DDB2F0F3F47866299EF907867B7D27E95B3873BF98397B24EE1,
curve=secp256k1
)
R = m * secp256k1.G
self.assertEqual(R, expected)
m = 0x376A3A2CDCD12581EFFF13EE4AD44C4044B8A0524C42422A7E1E181E4DEECCEC
expected = Point(
0x14890E61FCD4B0BD92E5B36C81372CA6FED471EF3AA60A3E415EE4FE987DABA1,
0x297B858D9F752AB42D3BCA67EE0EB6DCD1C2B7B0DBE23397E66ADC272263F982,
curve=secp256k1
)
R = m * secp256k1.G
self.assertEqual(R, expected)
m = 0x1B22644A7BE026548810C378D0B2994EEFA6D2B9881803CB02CEFF865287D1B9
expected = Point(
0xF73C65EAD01C5126F28F442D087689BFA08E12763E0CEC1D35B01751FD735ED3,
0xF449A8376906482A84ED01479BD18882B919C140D638307F0C0934BA12590BDE,
curve=secp256k1
)
R = m * secp256k1.G
self.assertEqual(R, expected)
def test_arbitrary_arithmetic(self):
for _ in range(100):
curve = choice(CURVES)
a, b = randint(0, curve.q), randint(0, curve.q)
c = (a + b) % curve.q
P, Q = a * curve.G, b * curve.G
R = c * curve.G
pq_sum, qp_sum = P + Q, Q + P
self.assertTrue(pq_sum == qp_sum)
self.assertTrue(qp_sum == R)
class TestNonceGeneration(unittest.TestCase):
def test_rfc_6979(self):
msg = 'sample'
x = 0x09A4D6792295A7F730FC3F2B49CBC0F62E862272F
q = 0x4000000000000000000020108A2E0CC0D99F8A5EF
expected = 0x09744429FA741D12DE2BE8316E35E84DB9E5DF1CD
nonce = RFC6979(msg, x, q, sha1).gen_nonce()
self.assertTrue(nonce == expected)
expected = 0x323E7B28BFD64E6082F5B12110AA87BC0D6A6E159
nonce = RFC6979(msg, x, q, sha224).gen_nonce()
self.assertTrue(nonce == expected)
expected = 0x23AF4074C90A02B3FE61D286D5C87F425E6BDD81B
nonce = RFC6979(msg, x, q, sha256).gen_nonce()
self.assertTrue(nonce == expected)
expected = 0x2132ABE0ED518487D3E4FA7FD24F8BED1F29CCFCE
nonce = RFC6979(msg, x, q, sha384).gen_nonce()
self.assertTrue(nonce == expected)
expected = 0x00BBCC2F39939388FDFE841892537EC7B1FF33AA3
nonce = RFC6979(msg, x, q, sha512).gen_nonce()
self.assertTrue(nonce == expected)
class TestPrimeFieldECDSA(unittest.TestCase):
""" case taken from http://tools.ietf.org/html/rfc6979#appendix-A.2.5 """
def test_ecdsa_P256_SHA1_sign(self):
d = 0xC9AFA9D845BA75166B5C215767B1D6934E50C3DB36E89B127B8A622B120F6721
expected = (
0x61340C88C3AAEBEB4F6D667F672CA9759A6CCAA9FA8811313039EE4A35471D32,
0x6D7F147DAC089441BB2E2FE8F7A3FA264B9C475098FDCF6E00D7C996E1B8B7EB,
)
sig = sign('sample', d, curve=P256, hashfunc=sha1)
self.assertEqual(sig, expected)
Q = d * P256.G
self.assertTrue(verify(sig, 'sample', Q, curve=P256, hashfunc=sha1))
""" case taken from http://tools.ietf.org/html/rfc6979#appendix-A.2.5 """
def test_ecdsa_P256_SHA224_sign(self):
d = 0xC9AFA9D845BA75166B5C215767B1D6934E50C3DB36E89B127B8A622B120F6721
expected = (
0x53B2FFF5D1752B2C689DF257C04C40A587FABABB3F6FC2702F1343AF7CA9AA3F,
0xB9AFB64FDC03DC1A131C7D2386D11E349F070AA432A4ACC918BEA988BF75C74C
)
sig = sign('sample', d, curve=P256, hashfunc=sha224)
self.assertEqual(sig, expected)
Q = d * P256.G
self.assertTrue(verify(sig, 'sample', Q, curve=P256, hashfunc=sha224))
""" case taken from http://tools.ietf.org/html/rfc6979#appendix-A.2.5 """
def test_ecdsa_P256_SHA2_sign(self):
d = 0xC9AFA9D845BA75166B5C215767B1D6934E50C3DB36E89B127B8A622B120F6721
expected = (
0xEFD48B2AACB6A8FD1140DD9CD45E81D69D2C877B56AAF991C34D0EA84EAF3716,
0xF7CB1C942D657C41D436C7A1B6E29F65F3E900DBB9AFF4064DC4AB2F843ACDA8
)
sig = sign('sample', d, curve=P256, hashfunc=sha256)
self.assertEqual(sig, expected)
Q = d * P256.G
self.assertTrue(verify(sig, 'sample', Q, curve=P256, hashfunc=sha256))
""" case taken from http://tools.ietf.org/html/rfc6979#appendix-A.2.5 """
def test_ecdsa_P256_SHA384_sign(self):
d = 0xC9AFA9D845BA75166B5C215767B1D6934E50C3DB36E89B127B8A622B120F6721
expected = (
0x0EAFEA039B20E9B42309FB1D89E213057CBF973DC0CFC8F129EDDDC800EF7719,
0x4861F0491E6998B9455193E34E7B0D284DDD7149A74B95B9261F13ABDE940954
)
sig = sign('sample', d, curve=P256, hashfunc=sha384)
self.assertEqual(sig, expected)
Q = d * P256.G
self.assertTrue(verify(sig, 'sample', Q, curve=P256, hashfunc=sha384))
""" case taken from http://tools.ietf.org/html/rfc6979#appendix-A.2.5 """
def test_ecdsa_P256_SHA512_sign(self):
d = 0xC9AFA9D845BA75166B5C215767B1D6934E50C3DB36E89B127B8A622B120F6721
expected = (
0x8496A60B5E9B47C825488827E0495B0E3FA109EC4568FD3F8D1097678EB97F00,
0x2362AB1ADBE2B8ADF9CB9EDAB740EA6049C028114F2460F96554F61FAE3302FE
)
sig = sign('sample', d, curve=P256, hashfunc=sha512)
self.assertEqual(sig, expected)
Q = d * P256.G
self.assertTrue(verify(sig, 'sample', Q, curve=P256, hashfunc=sha512))
""" case taken from https://www.nsa.gov/ia/_files/ecdsa.pdf """
def test_ecdsa_P256_verify(self):
Q = Point(
0x8101ece47464a6ead70cf69a6e2bd3d88691a3262d22cba4f7635eaff26680a8,
0xd8a12ba61d599235f67d9cb4d58f1783d3ca43e78f0a5abaa624079936c0c3a9,
curve=P256
)
msg = 'This is only a test message. It is 48 bytes long'
sig = (
0x7214bc9647160bbd39ff2f80533f5dc6ddd70ddf86bb815661e805d5d4e6f27c,
0x7d1ff961980f961bdaa3233b6209f4013317d3e3f9e1493592dbeaa1af2bc367
)
self.assertTrue(verify(sig, msg, Q, curve=P256, hashfunc=sha256))
sig = (
0x7214bc9647160bbd39ff2f80533f5dc6ddd70ddf86bb815661e805d5d4e6fbad,
0x7d1ff961980f961bdaa3233b6209f4013317d3e3f9e1493592dbeaa1af2bc367
)
self.assertFalse(verify(sig, msg, Q, curve=P256, hashfunc=sha256))
class TestP192ECDSA(unittest.TestCase):
def test_rfc6979(self):
text = urlopen('https://tools.ietf.org/rfc/rfc6979.txt').read().decode()
curve_tests = findall(r'curve: NIST P-192(.*)curve: NIST P-224', text, flags=DOTALL)[0]
q = int(findall(r'q = ([0-9A-F]*)', curve_tests)[0], 16)
x = int(findall(r'x = ([0-9A-F]*)', curve_tests)[0], 16)
test_regex = r'With SHA-(\d+), message = "([a-zA-Z]*)":\n' \
r'\s*k = ([0-9A-F]*)\n' \
r'\s*r = ([0-9A-F]*)\n' \
r'\s*s = ([0-9A-F]*)\n'
hash_lookup = {
'1': sha1,
'224': sha224,
'256': sha256,
'384': sha384,
'512': sha512
}
for test in findall(test_regex, curve_tests):
h = hash_lookup[test[0]]
msg = test[1]
k = int(test[2], 16)
r = int(test[3], 16)
s = int(test[4], 16)
self.assertEqual(k, RFC6979(msg, x, q, h).gen_nonce())
self.assertEqual((r, s), sign(msg, x, curve=P192, hashfunc=h))
class TestP224ECDSA(unittest.TestCase):
def test_rfc6979(self):
text = urlopen('https://tools.ietf.org/rfc/rfc6979.txt').read().decode()
curve_tests = findall(r'curve: NIST P-224(.*)curve: NIST P-256', text, flags=DOTALL)[0]
q = int(findall(r'q = ([0-9A-F]*)', curve_tests)[0], 16)
x = int(findall(r'x = ([0-9A-F]*)', curve_tests)[0], 16)
test_regex = r'With SHA-(\d+), message = "([a-zA-Z]*)":\n' \
r'\s*k = ([0-9A-F]*)\n' \
r'\s*r = ([0-9A-F]*)\n' \
r'\s*s = ([0-9A-F]*)\n'
hash_lookup = {
'1': sha1,
'224': sha224,
'256': sha256,
'384': sha384,
'512': sha512
}
for test in findall(test_regex, curve_tests):
h = hash_lookup[test[0]]
msg = test[1]
k = int(test[2], 16)
r = int(test[3], 16)
s = int(test[4], 16)
self.assertEqual(k, RFC6979(msg, x, q, h).gen_nonce())
self.assertEqual((r, s), sign(msg, x, curve=P224, hashfunc=h))
class TestP256ECDSA(unittest.TestCase):
def test_rfc6979(self):
text = urlopen('https://tools.ietf.org/rfc/rfc6979.txt').read().decode()
curve_tests = findall(r'curve: NIST P-256(.*)curve: NIST P-384', text, flags=DOTALL)[0]
q = int(findall(r'q = ([0-9A-F]*)', curve_tests)[0], 16)
x = int(findall(r'x = ([0-9A-F]*)', curve_tests)[0], 16)
test_regex = r'With SHA-(\d+), message = "([a-zA-Z]*)":\n' \
r'\s*k = ([0-9A-F]*)\n' \
r'\s*r = ([0-9A-F]*)\n' \
r'\s*s = ([0-9A-F]*)\n'
hash_lookup = {
'1': sha1,
'224': sha224,
'256': sha256,
'384': sha384,
'512': sha512
}
for test in findall(test_regex, curve_tests):
h = hash_lookup[test[0]]
msg = test[1]
k = int(test[2], 16)
r = int(test[3], 16)
s = int(test[4], 16)
self.assertEqual(k, RFC6979(msg, x, q, h).gen_nonce())
self.assertEqual((r, s), sign(msg, x, curve=P256, hashfunc=h))
class TestP384ECDSA(unittest.TestCase):
def test_rfc6979(self):
text = urlopen('https://tools.ietf.org/rfc/rfc6979.txt').read().decode()
curve_tests = findall(r'curve: NIST P-384(.*)curve: NIST P-521', text, flags=DOTALL)[0]
q_parts = findall(r'q = ([0-9A-F]*)\n\s*([0-9A-F]*)', curve_tests)[0]
q = int(q_parts[0] + q_parts[1], 16)
x_parts = findall(r'x = ([0-9A-F]*)\n\s*([0-9A-F]*)', curve_tests)[0]
x = int(x_parts[0] + x_parts[1], 16)
test_regex = r'With SHA-(\d+), message = "([a-zA-Z]*)":\n' \
r'\s*k = ([0-9A-F]*)\n\s*([0-9A-F]*)\n' \
r'\s*r = ([0-9A-F]*)\n\s*([0-9A-F]*)\n' \
r'\s*s = ([0-9A-F]*)\n\s*([0-9A-F]*)\n'
hash_lookup = {
'1': sha1,
'224': sha224,
'256': sha256,
'384': sha384,
'512': sha512
}
for test in findall(test_regex, curve_tests):
h = hash_lookup[test[0]]
msg = test[1]
k = int(test[2] + test[3], 16)
r = int(test[4] + test[5], 16)
s = int(test[6] + test[7], 16)
self.assertEqual(k, RFC6979(msg, x, q, h).gen_nonce())
self.assertEqual((r, s), sign(msg, x, curve=P384, hashfunc=h))
class TestP521ECDSA(unittest.TestCase):
def test_rfc6979(self):
text = urlopen('https://tools.ietf.org/rfc/rfc6979.txt').read().decode()
curve_tests = findall(r'curve: NIST P-521(.*)curve: NIST K-163', text, flags=DOTALL)[0]
q_parts = findall(r'q = ([0-9A-F]*)\n\s*([0-9A-F]*)\n\s*([0-9A-F]*)', curve_tests)[0]
q = int(q_parts[0] + q_parts[1] + q_parts[2], 16)
x_parts = findall(r'x = ([0-9A-F]*)\n\s*([0-9A-F]*)\n\s*([0-9A-F]*)', curve_tests)[0]
x = int(x_parts[0] + x_parts[1] + x_parts[2], 16)
test_regex = r'With SHA-(\d+), message = "([a-zA-Z]*)":\n' \
r'\s*k = ([0-9A-F]*)\n\s*([0-9A-F]*)\n\s*([0-9A-F]*)\n' \
r'\s*r = ([0-9A-F]*)\n\s*([0-9A-F]*)\n\s*([0-9A-F]*)\n' \
r'\s*s = ([0-9A-F]*)\n\s*([0-9A-F]*)\n\s*([0-9A-F]*)\n'
hash_lookup = {
'1': sha1,
'224': sha224,
'256': sha256,
'384': sha384,
'512': sha512
}
for test in findall(test_regex, curve_tests):
h = hash_lookup[test[0]]
msg = test[1]
k = int(test[2] + test[3] + test[4], 16)
r = int(test[5] + test[6] + test[7], 16)
s = int(test[8] + test[9] + test[10], 16)
self.assertEqual(k, RFC6979(msg, x, q, h).gen_nonce())
self.assertEqual((r, s), sign(msg, x, curve=P521, hashfunc=h))
class TestBrainpoolECDH(unittest.TestCase):
def test_256bit(self):
# https://tools.ietf.org/html/rfc7027#appendix-A.1
dA = 0x81DB1EE100150FF2EA338D708271BE38300CB54241D79950F77B063039804F1D
qA = dA * brainpoolP256r1.G
self.assertEqual(qA.x, 0x44106E913F92BC02A1705D9953A8414DB95E1AAA49E81D9E85F929A8E3100BE5)
self.assertEqual(qA.y, 0x8AB4846F11CACCB73CE49CBDD120F5A900A69FD32C272223F789EF10EB089BDC)
dB = 0x55E40BC41E37E3E2AD25C3C6654511FFA8474A91A0032087593852D3E7D76BD3
qB = dB * brainpoolP256r1.G
self.assertEqual(qB.x, 0x8D2D688C6CF93E1160AD04CC4429117DC2C41825E1E9FCA0ADDD34E6F1B39F7B)
self.assertEqual(qB.y, 0x990C57520812BE512641E47034832106BC7D3E8DD0E4C7F1136D7006547CEC6A)
self.assertEqual((dA * qB).x, (dB * qA).x)
self.assertEqual((dA * qB).y, (dB * qA).y)
Z = dA * qB
self.assertEqual(Z.x, 0x89AFC39D41D3B327814B80940B042590F96556EC91E6AE7939BCE31F3A18BF2B)
self.assertEqual(Z.y, 0x49C27868F4ECA2179BFD7D59B1E3BF34C1DBDE61AE12931648F43E59632504DE)
def test_384bit(self):
# https://tools.ietf.org/html/rfc7027#appendix-A.2
dA = int('1E20F5E048A5886F1F157C74E91BDE2B98C8B52D58E5003D57053FC4B0BD6'
'5D6F15EB5D1EE1610DF870795143627D042', 16)
qA = dA * brainpoolP384r1.G
self.assertEqual(
qA.x,
int('68B665DD91C195800650CDD363C625F4E742E8134667B767B1B47679358'
'8F885AB698C852D4A6E77A252D6380FCAF068', 16)
)
self.assertEqual(
qA.y,
int('55BC91A39C9EC01DEE36017B7D673A931236D2F1F5C83942D049E3FA206'
'07493E0D038FF2FD30C2AB67D15C85F7FAA59', 16)
)
dB = int('032640BC6003C59260F7250C3DB58CE647F98E1260ACCE4ACDA3DD869F74E'
'01F8BA5E0324309DB6A9831497ABAC96670', 16)
qB = dB * brainpoolP384r1.G
self.assertEqual(
qB.x,
int('4D44326F269A597A5B58BBA565DA5556ED7FD9A8A9EB76C25F46DB69D19'
'DC8CE6AD18E404B15738B2086DF37E71D1EB4', 16)
)
self.assertEqual(
qB.y,
int('62D692136DE56CBE93BF5FA3188EF58BC8A3A0EC6C1E151A21038A42E91'
'85329B5B275903D192F8D4E1F32FE9CC78C48', 16)
)
self.assertEqual((dA * qB).x, (dB * qA).x)
self.assertEqual((dA * qB).y, (dB * qA).y)
Z = dA * qB
self.assertEqual(
Z.x,
int('0BD9D3A7EA0B3D519D09D8E48D0785FB744A6B355E6304BC51C229FBBCE2'
'39BBADF6403715C35D4FB2A5444F575D4F42', 16)
)
self.assertEqual(
Z.y,
int('0DF213417EBE4D8E40A5F76F66C56470C489A3478D146DECF6DF0D94BAE9'
'E598157290F8756066975F1DB34B2324B7BD', 16)
)
def test_512bit(self):
# https://tools.ietf.org/html/rfc7027#appendix-A.2
dA = int('16302FF0DBBB5A8D733DAB7141C1B45ACBC8715939677F6A56850A38BD87B'
'D59B09E80279609FF333EB9D4C061231FB26F92EEB04982A5F1D1764CAD57665422', 16)
qA = dA * brainpoolP512r1.G
self.assertEqual(
qA.x,
int('0A420517E406AAC0ACDCE90FCD71487718D3B953EFD7FBEC5F7F27E28C6'
'149999397E91E029E06457DB2D3E640668B392C2A7E737A7F0BF04436D11640FD09FD', 16)
)
self.assertEqual(
qA.y,
int('72E6882E8DB28AAD36237CD25D580DB23783961C8DC52DFA2EC138AD472'
'A0FCEF3887CF62B623B2A87DE5C588301EA3E5FC269B373B60724F5E82A6AD147FDE7', 16)
)
dB = int('230E18E1BCC88A362FA54E4EA3902009292F7F8033624FD471B5D8ACE49D1'
'2CFABBC19963DAB8E2F1EBA00BFFB29E4D72D13F2224562F405CB80503666B25429', 16)
qB = dB * brainpoolP512r1.G
self.assertEqual(
qB.x,
int('9D45F66DE5D67E2E6DB6E93A59CE0BB48106097FF78A081DE781CDB31FC'
'E8CCBAAEA8DD4320C4119F1E9CD437A2EAB3731FA9668AB268D871DEDA55A5473199F', 16)
)
self.assertEqual(
qB.y,
int('2FDC313095BCDD5FB3A91636F07A959C8E86B5636A1E930E8396049CB48'
'1961D365CC11453A06C719835475B12CB52FC3C383BCE35E27EF194512B71876285FA', 16)
)
self.assertEqual((dA * qB).x, (dB * qA).x)
self.assertEqual((dA * qB).y, (dB * qA).y)
Z = dA * qB
self.assertEqual(
Z.x,
int('A7927098655F1F9976FA50A9D566865DC530331846381C87256BAF322624'
'4B76D36403C024D7BBF0AA0803EAFF405D3D24F11A9B5C0BEF679FE1454B21C4CD1F', 16)
)
self.assertEqual(
Z.y,
int('7DB71C3DEF63212841C463E881BDCF055523BD368240E6C3143BD8DEF8B3'
'B3223B95E0F53082FF5E412F4222537A43DF1C6D25729DDB51620A832BE6A26680A2', 16)
)
class TestKeys(unittest.TestCase):
def test_gen_private_key(self):
class FakeCurve():
def __init__(self, q):
self.q = q
class FakeRandom():
def __init__(self, values=b"\x00"):
self.values = values
self.pos = 0
def __call__(self, nb):
result = self.values[self.pos:self.pos + nb]
self.pos += nb
return result
self.assertEqual(gen_private_key(FakeCurve(2), randfunc=FakeRandom(b"\x00")), 0)
# 1 byte / 6 bits shaved off + the first try is lower than the order
self.assertEqual(gen_private_key(FakeCurve(2), randfunc=FakeRandom(b"\x40")), 1)
# 1 byte / 6 bits shaved off + the first try is higher than the order
self.assertEqual(gen_private_key(FakeCurve(2), randfunc=FakeRandom(b"\xc0\x40")), 1)
self.assertEqual(gen_private_key(FakeCurve(2), randfunc=FakeRandom(b"\xc0\x00")), 0)
# 2 byte / 3 are shaved off, the first try is lower than the order.
self.assertEqual(gen_private_key(FakeCurve(8191), randfunc=FakeRandom(b"\xff\xf0")), 8190)
# 2 byte / 3 are shaved off
# first try : _bytes_to_int("\xff\xf8") >> 3 == 8191 (too high for order 8191)
# second try : _bytes_to_int("\xff\xf0") >> 3 == 8190 (ok for order 8191)
self.assertEqual(gen_private_key(FakeCurve(8191), randfunc=FakeRandom(b"\xff\xf8\xff\xf0")), 8190)
# Same but with a different second try value
self.assertEqual(gen_private_key(FakeCurve(8191), randfunc=FakeRandom(b"\xff\xf8\xff\xef")), 8189)
class TestAsn1(unittest.TestCase):
def test_generate_and_parse_pem(self):
d, Q = gen_keypair(P256)
export_key(d, curve=P256, filepath='p256.key')
export_key(Q, curve=P256, filepath='p256.pub')
parsed_d, parsed_Q = import_key('p256.key')
self.assertEqual(parsed_d, d)
self.assertEqual(parsed_Q, Q)
parsed_d, parsed_Q = import_key('p256.pub')
self.assertTrue(parsed_d is None)
self.assertEqual(parsed_Q, Q)
remove('p256.key')
remove('p256.pub')
class TestSignatureDERencoding(unittest.TestCase):
def test_encode_der_signature(self):
self.assertEqual(DEREncoder.encode_signature(r=1, s=2), b"\x30" # SEQUENCE
b"\x06" # Length of Sequence
b"\x02" # INTEGER
b"\x01" # Length of r
b"\x01" # r
b"\x02" # INTEGER
b"\x01" # Length of s
b"\x02") # s
# Check that we add a zero byte when the number's highest bit is set
self.assertEqual(DEREncoder.encode_signature(r=128, s=128),
b"0\x08\x02\x02\x00\x80\x02\x02\x00\x80")
def test_decode_der_signature(self):
with self.assertRaises(InvalidDerSignature):
DEREncoder.decode_signature(b"") # length to shot
with self.assertRaises(InvalidDerSignature):
DEREncoder.decode_signature(b"\x31\x06\x02\x01\x01\x02\x01\x02") # invalid SEQUENCE marker
with self.assertRaises(InvalidDerSignature):
DEREncoder.decode_signature(b"\x30\x07\x02\x01\x01\x02\x01\x02") # invalid length
with self.assertRaises(InvalidDerSignature):
DEREncoder.decode_signature(b"\x30\x06\x02\x03\x01\x02\x01\x02") # invalid length of r
with self.assertRaises(InvalidDerSignature):
DEREncoder.decode_signature(b"\x30\x06\x02\x01\x01\x03\x01\x02") # invalid length of s
with self.assertRaises(InvalidDerSignature):
DEREncoder.decode_signature(b"\x30\x06\x03\x01\x01\x02\x01\x02") # invalid INTEGER marker for r
with self.assertRaises(InvalidDerSignature):
DEREncoder.decode_signature(b"\x30\x06\x02\x00\x02\x01\x02") # length of r is 0
with self.assertRaises(InvalidDerSignature):
DEREncoder.decode_signature(b"\x30\x06\x02\x01\x81\x02\x01\x02") # value of r is negative
with self.assertRaises(InvalidDerSignature):
DEREncoder.decode_signature(b"\x30\x07\x02\x02\x00\x01\x02\x01\x02") # value of r starts with a zero byte
with self.assertRaises(InvalidDerSignature):
DEREncoder.decode_signature(b"\x30\x06\x02\x01\x01\x03\x01\x02") # invalid INTEGER marker for s
with self.assertRaises(InvalidDerSignature):
DEREncoder.decode_signature(b"\x30\x06\x02\x01\x01\x02\x00") # value of s is 0
with self.assertRaises(InvalidDerSignature):
DEREncoder.decode_signature(b"\x30\x06\x02\x01\x01\x02\x01\x81") # value of s is negative
with self.assertRaises(InvalidDerSignature):
DEREncoder.decode_signature(b"\x30\x07\x02\x01\x01\x02\x02\x00\x02") # value of s starts with a zero byte
self.assertEqual(DEREncoder.decode_signature(b"\x30\x06\x02\x01\x01\x02\x01\x02"), (1, 2))
self.assertEqual(DEREncoder.decode_signature(b"0\x08\x02\x02\x00\x80\x02\x02\x00\x80"),
(128, 128)) # verify zero bytes
self.assertEqual(DEREncoder.decode_signature(b"0\x08\x02\x02\x03\xE8\x02\x02\x03\xE8"),
(1000, 1000)) # verify byte order
class TestEncodePublicKey(unittest.TestCase):
def test_SEC1_encode_public_key(self):
# 1/ PrivateKey generated using openssl "openssl ecparam -name secp256k1 -genkey -out ec-priv.pem"
# 2/ Printed using "openssl ec -in ec-priv.pem -text -noout" and converted to numeric using "asn1._bytes_to_int"
priv_key = 7002880736699640265110069622773736733141182416793484574964618597954446769264
pubkey_compressed = hexlify(SEC1Encoder.encode_public_key(secp256k1.G * priv_key))
pubkey_uncompressed = hexlify(SEC1Encoder.encode_public_key(secp256k1.G * priv_key, compressed=False))
# 3/ PublicKey extracted using "openssl ec -in ec-priv.pem -pubout -out ec-pub.pem"
# 4/ Encoding verified using openssl "openssl ec -in ec-pub.pem -pubin -text -noout -conv_form compressed"
self.assertEqual(pubkey_compressed, b'02e5e2c01985aafb6e2c3ad49f3db5ccc54b2e63343af405b521303d0f35835062')
self.assertEqual(pubkey_uncompressed, b'04e5e2c01985aafb6e2c3ad49f3db5ccc54b2e63343af405b521303d0f3583506'
b'23dad76df888abde5ed0cc5af1b83968edffcae5d70bedb24fdc18bb5f79499d0')
# Same with P256 Curve
priv_P256 = 807015861248675637760562792774171551137308512372870683367415858378856470633
pubkey_compressed = hexlify(SEC1Encoder.encode_public_key(P256.G * priv_P256))
pubkey_uncompressed = hexlify(SEC1Encoder.encode_public_key(P256.G * priv_P256, compressed=False))
self.assertEqual(pubkey_compressed, b'<KEY>')
self.assertEqual(pubkey_uncompressed, b'<KEY>'
b'c9a7d581bcf2aba680b53cedbade03be62fe95869da04a168a458f369ac6a823e')
# And secp192k1 Curve
priv_secp192k1 = 5345863567856687638748079156318679969014620278806295592453
pubkey_compressed = hexlify(SEC1Encoder.encode_public_key(secp192k1.G * priv_secp192k1))
pubkey_uncompressed = hexlify(SEC1Encoder.encode_public_key(secp192k1.G * priv_secp192k1, compressed=False))
self.assertEqual(pubkey_compressed, b'<KEY>')
self.assertEqual(pubkey_uncompressed, b'<KEY>'
b'df07a73819149e8d903aa983e52ab1cff38f0d381f940d361')
def test_SEC1_decode_public_key(self):
expected_public = Point(x=0xe5e2c01985aafb6e2c3ad49f3db5ccc54b2e63343af405b521303d0f35835062,
y=0x3dad76df888abde5ed0cc5af1b83968edffcae5d70bedb24fdc18bb5f79499d0,
curve=secp256k1)
public_from_compressed = SEC1Encoder.decode_public_key(
unhexlify(b'02e5e2c01985aafb6e2c3ad49f3db5ccc54b2e63343af405b521303d0f35835062'), secp256k1)
public_from_uncompressed = SEC1Encoder.decode_public_key(
unhexlify(b'04e5e2c01985aafb6e2c3ad49f3db5ccc54b2e63343af405b521303d0f3583506'
b'23dad76df888abde5ed0cc5af1b83968edffcae5d70bedb24fdc18bb5f79499d0'), secp256k1)
# Same values as in "test_SEC1_encode_public_key", verified using openssl
self.assertEqual(public_from_compressed, expected_public)
self.assertEqual(public_from_uncompressed, expected_public)
with self.assertRaises(InvalidSEC1PublicKey) as e:
SEC1Encoder.decode_public_key(b'\x02', secp256k1) # invalid compressed length
self.assertEqual(e.exception.args[0], "A compressed public key must be 33 bytes long")
with self.assertRaises(InvalidSEC1PublicKey) as e:
SEC1Encoder.decode_public_key(b'\x04', secp256k1) # invalid uncompressed length
self.assertEqual(e.exception.args[0], "An uncompressed public key must be 65 bytes long")
with self.assertRaises(InvalidSEC1PublicKey) as e:
# invalid prefix value
SEC1Encoder.decode_public_key(
unhexlify(b'05e5e2c01985aafb6e2c3ad49f3db5ccc54b2e63343af405b521303d0f35835062'), secp256k1)
self.assertEqual(e.exception.args[0], "Wrong key format")
# With P256, same values as in "test_SEC1_encode_public_key", verified using openssl
expected_P256 = Point(x=0x12c9ddf64b0d1f1d91d9bd729abfb880079fa889d66604cc0b78c9cbc271824c,
y=0x9a7d581bcf2aba680b53cedbade03be62fe95869da04a168a458f369ac6a823e,
curve=P256)
public_from_compressed = SEC1Encoder.decode_public_key(
unhexlify(b'<KEY>'), P256)
self.assertEqual(public_from_compressed, expected_P256)
# With P256, same values as in "test_SEC1_encode_public_key", verified using openssl
expected_secp192k1 = Point(x=0xa3bec5fba6d13e51fb55bd88dd097cb9b04f827bc151d22d,
y=0xf07a73819149e8d903aa983e52ab1cff38f0d381f940d361,
curve=secp192k1)
public_from_compressed = SEC1Encoder.decode_public_key(
unhexlify(b'<KEY>'), secp192k1)
self.assertEqual(public_from_compressed, expected_secp192k1)
class TestKeyRecovery(unittest.TestCase):
def test_key_recovery(self):
for curve in CURVES:
d, Q = gen_keypair(curve)
msg = 'https://crypto.stackexchange.com/questions/18105/how-does-recovering-the-' \
'public-key-from-an-ecdsa-signature-work'
sig = sign(msg, d, curve=curve)
Qs = get_public_keys_from_sig(sig, msg, curve=curve, hashfunc=sha256)
self.assertTrue(Q in Qs)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "1202kbs/Understanding-NN",
"score": 3
} |
#### File: Understanding-NN/models/models_2_1.py
```python
import tensorflow as tf
class MNIST_DNN:
def __init__(self, name):
self.name = name
def __call__(self, X, training, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
with tf.variable_scope('layer1'):
dense1 = tf.layers.dense(inputs=X, units=512, activation=tf.nn.relu)
dropout1 = tf.layers.dropout(inputs=dense1, rate=0.7, training=training)
with tf.variable_scope('layer2'):
dense2 = tf.layers.dense(inputs=dropout1, units=512, activation=tf.nn.relu)
dropout2 = tf.layers.dropout(inputs=dense2, rate=0.7, training=training)
with tf.variable_scope('layer3'):
dense3 = tf.layers.dense(inputs=dropout2, units=512, activation=tf.nn.relu)
dropout3 = tf.layers.dropout(inputs=dense3, rate=0.7, training=training)
with tf.variable_scope('layer4'):
dense4 = tf.layers.dense(inputs=dropout3, units=512, activation=tf.nn.relu)
dropout4 = tf.layers.dropout(inputs=dense4, rate=0.7, training=training)
with tf.variable_scope('layer5'):
logits = tf.layers.dense(inputs=dropout4, units=10)
return logits
@property
def vars(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)
class MNIST_CNN:
def __init__(self, name):
self.name = name
def __call__(self, X, training, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
X_img = tf.reshape(X, [-1, 28, 28, 1])
# Convolutional Layer #1 and Pooling Layer #1
with tf.variable_scope('layer1'):
conv1 = tf.layers.conv2d(inputs=X_img, filters=32, kernel_size=[3, 3], padding="SAME", activation=tf.nn.relu)
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], padding="SAME", strides=2)
dropout1 = tf.layers.dropout(inputs=pool1, rate=0.7, training=training)
# Convolutional Layer #2 and Pooling Layer #2
with tf.variable_scope('layer2'):
conv2 = tf.layers.conv2d(inputs=dropout1, filters=64, kernel_size=[3, 3], padding="SAME", activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], padding="SAME", strides=2)
dropout2 = tf.layers.dropout(inputs=pool2, rate=0.7, training=training)
# Convolutional Layer #3 and Pooling Layer #3
with tf.variable_scope('layer3'):
conv3 = tf.layers.conv2d(inputs=dropout2, filters=128, kernel_size=[3, 3], padding="SAME", activation=tf.nn.relu)
pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2], padding="SAME", strides=2)
dropout3 = tf.layers.dropout(inputs=pool3, rate=0.7, training=training)
# Dense Layer with Relu
with tf.variable_scope('layer4'):
flat = tf.reshape(dropout3, [-1, 128 * 4 * 4])
dense4 = tf.layers.dense(inputs=flat, units=625, activation=tf.nn.relu)
dropout4 = tf.layers.dropout(inputs=dense4, rate=0.5, training=training)
# Logits (no activation) Layer: L5 Final FC 625 inputs -> 10 outputs
with tf.variable_scope('layer5'):
logits = tf.layers.dense(inputs=dropout4, units=10)
return logits
@property
def vars(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)
```
#### File: Understanding-NN/models/models_2_3.py
```python
from tensorflow.python.ops import nn_ops, gen_nn_ops
import tensorflow as tf
class MNIST_NN:
def __init__(self, name):
self.name = name
def __call__(self, X, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
dense1 = tf.layers.dense(inputs=X, units=512, activation=tf.nn.relu, use_bias=True, name='layer1')
dense2 = tf.layers.dense(inputs=dense1, units=128, activation=tf.nn.relu, use_bias=True, name='layer2')
logits = tf.layers.dense(inputs=dense2, units=10, activation=None, use_bias=True, name='layer3')
prediction = tf.nn.softmax(logits)
return [dense1, dense2, prediction], logits
@property
def vars(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)
class MNIST_DNN:
def __init__(self, name):
self.name = name
def __call__(self, X, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
dense1 = tf.layers.dense(inputs=X, units=512, activation=tf.nn.relu, use_bias=True)
dense2 = tf.layers.dense(inputs=dense1, units=512, activation=tf.nn.relu, use_bias=True)
dense3 = tf.layers.dense(inputs=dense2, units=512, activation=tf.nn.relu, use_bias=True)
dense4 = tf.layers.dense(inputs=dense3, units=512, activation=tf.nn.relu, use_bias=True)
logits = tf.layers.dense(inputs=dense4, units=10, activation=None, use_bias=True)
prediction = tf.nn.softmax(logits)
return [dense1, dense2, dense3, dense4, prediction], logits
@property
def vars(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)
class LRP:
def __init__(self, alpha, activations, weights, biases, conv_ksize, pool_ksize, conv_strides, pool_strides, name):
self.alpha = alpha
self.activations = activations
self.weights = weights
self.biases = biases
self.conv_ksize = conv_ksize
self.pool_ksize = pool_ksize
self.conv_strides = conv_strides
self.pool_strides = pool_strides
self.name = name
def __call__(self, logit):
with tf.name_scope(self.name):
Rs = []
j = 0
for i in range(len(self.activations) - 1):
if i is 0:
Rs.append(self.activations[i][:,logit,None])
Rs.append(self.backprop_dense(self.activations[i + 1], self.weights[j][:,logit,None], self.biases[j][logit,None], Rs[-1]))
j += 1
continue
elif 'dense' in self.activations[i].name.lower():
Rs.append(self.backprop_dense(self.activations[i + 1], self.weights[j], self.biases[j], Rs[-1]))
j += 1
elif 'reshape' in self.activations[i].name.lower():
shape = self.activations[i + 1].get_shape().as_list()
shape[0] = -1
Rs.append(tf.reshape(Rs[-1], shape))
elif 'conv' in self.activations[i].name.lower():
Rs.append(self.backprop_conv(self.activations[i + 1], self.weights[j], self.biases[j], Rs[-1], self.conv_strides))
j += 1
elif 'pooling' in self.activations[i].name.lower():
if 'max' in self.activations[i].name.lower():
pooling_type = 'max'
else:
pooling_type = 'avg'
Rs.append(self.backprop_pool(self.activations[i + 1], Rs[-1], self.pool_ksize, self.pool_strides, pooling_type))
else:
raise Error('Unknown operation.')
return Rs[-1]
def backprop_conv(self, activation, kernel, bias, relevance, strides, padding='SAME'):
W_p = tf.maximum(0., kernel)
b_p = tf.maximum(0., bias)
z_p = nn_ops.conv2d(activation, W_p, strides, padding) + b_p
s_p = relevance / z_p
c_p = nn_ops.conv2d_backprop_input(tf.shape(activation), W_p, s_p, strides, padding)
W_n = tf.minimum(0., kernel)
b_n = tf.minimum(0., bias)
z_n = nn_ops.conv2d(activation, W_n, strides, padding) + b_n
s_n = relevance / z_n
c_n = nn_ops.conv2d_backprop_input(tf.shape(activation), W_n, s_n, strides, padding)
return activation * (self.alpha * c_p + (1 - self.alpha) * c_n)
def backprop_pool(self, activation, relevance, ksize, strides, pooling_type, padding='SAME'):
if pooling_type.lower() is 'avg':
z = nn_ops.avg_pool(activation, ksize, strides, padding) + 1e-10
s = relevance / z
c = gen_nn_ops._avg_pool_grad(tf.shape(activation), s, ksize, strides, padding)
return activation * c
else:
z = nn_ops.max_pool(activation, ksize, strides, padding) + 1e-10
s = relevance / z
c = gen_nn_ops._max_pool_grad(activation, z, s, ksize, strides, padding)
return activation * c
def backprop_dense(self, activation, kernel, bias, relevance):
W_p = tf.maximum(0., kernel)
b_p = tf.maximum(0., bias)
z_p = tf.matmul(activation, W_p) + b_p
s_p = relevance / z_p
c_p = tf.matmul(s_p, tf.transpose(W_p))
W_n = tf.minimum(0., kernel)
b_n = tf.minimum(0., bias)
z_n = tf.matmul(activation, W_n) + b_n
s_n = relevance / z_n
c_n = tf.matmul(s_n, tf.transpose(W_n))
return activation * (self.alpha * c_p + (1 - self.alpha) * c_n)
``` |
{
"source": "1204601575/CSGO_Forward_Robot",
"score": 3
} |
#### File: 1204601575/CSGO_Forward_Robot/connecting.py
```python
import pymysql # 模块(mysql相关函数)
import time # 模块(时间相关函数)
# 错误记录函数
def write_log(log_info):
file = open("log.txt", 'a')
file.write(log_info)
file.close()
def connect_mysql(dbconfig):
mysql_conn = pymysql.connect(
host=dbconfig['host'],
port=dbconfig['port'],
user=dbconfig['username'],
password=dbconfig['password'],
db=dbconfig['database'])
return mysql_conn
def try_and_except(func, config):
try:
rt = func(config)
except Exception as e:
write_log("[Error][" + config['host'] + ":" + str(config['port']) + "|" + str(e) + "]" + str(
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())) + '\n')
return None
return rt
``` |
{
"source": "1208592332/Daily-learning-records",
"score": 4
} |
#### File: 习题解答记录/链表/offer22_test.py
```python
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def getKthFromEnd(self, head: ListNode, k: int) -> ListNode:
former, latter = head, head
for _ in range(k):
former = former.next
while former:
former, latter = former.next, latter.next
return latter
```
#### File: 习题解答记录/链表/offero6_test.py
```python
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def reverseprint(self, head: ListNode)->List[int]:
return self.reverseprint(head.next)+[head.val] if head else []
# 2.
class Solution:
def reverseprint(self, head: ListNode)->List[int]:
p, rev = head, None
while p:
rev, rev.next, p = p, rev, p.next
result = []
while rev:
result.append(rev.val)
rev = rev.next
return result
# 辅助栈法
# 解题思路:
# 链表特点: 只能从前至后访问每个节点。
# 题目要求: 倒序输出节点值。
# 这种 先入后出 的需求可以借助 栈 来实现。
# 算法流程:
# 入栈: 遍历链表,将各节点值 push 入栈。(Python 使用 append() 方法,Java借助 LinkedList 的addLast()方法)。
# 出栈: 将各节点值 pop 出栈,存储于数组并返回。(Python 直接返回 stack 的倒序列表,Java 新建一个数组,通过 popLast() 方法将各元素存入数组,实现倒序输出)。
# 复杂度分析:
# 时间复杂度O(N):入栈和出栈共使用O(N)时间。
# 空间复杂度O(N):辅助栈stack和数组res共使用 O(N)的额外空间。
class Solution:
def reversePrint(self, head: ListNode) -> List[int]:
stack = []
while head:
stack.append(head.val)
head = head.next
return stack[::-1] # stack.reverse() return stack
``` |
{
"source": "1212087/blockchain-python-tutorial",
"score": 2
} |
#### File: blockchain-python-tutorial/server/run.py
```python
from argparse import ArgumentParser
from router import app
from blockchain import blockchain
from threading import Thread
from sync import sync
import mine
import apscheduler
from config import *
def run_blockchain():
sync.sync_overall()
if __name__ == '__main__':
sync.sync_overall()
from apscheduler.schedulers.background import BackgroundScheduler
sched = BackgroundScheduler(standalone=True)
mine.sched = sched
sched.add_job(mine.mine_for_block, kwargs={
'rounds': STANDARD_ROUNDS, 'start_nonce': 0}, id='mining') # add the block again
sched.add_listener(mine.mine_for_block_listener,
apscheduler.events.EVENT_JOB_EXECUTED) # , args=sched)
sched.start()
parser = ArgumentParser()
parser.add_argument('-p', '--port', default=5000, type=int, help='port to listen on')
args = parser.parse_args()
port = args.port
app.run(host='127.0.0.1', port=port)
```
#### File: blockchain-python-tutorial/server/sync.py
```python
from block import Block
from blockchain import Blockchain
from config import *
import os
import json
import requests
import glob
import sys
import sys
sys.path.append('../')
from transaction.transaction import Transaction
class Sync():
def sync_local(self):
local_chain = Blockchain()
local_chain.restore_chain()
return local_chain
def sync_overall(self, save = False):
print(" * Start syncing...")
best_chain = self.sync_local()
best_chain_is_local_chain = True
for peer in PEERS:
peer_blockchain_url = peer + 'blockchain.json'
try:
r = requests.get(peer_blockchain_url)
peer_blockchain_dict = r.json()
print(' * Syncing from %s:' % (peer_blockchain_url))
peer_blocks = []
for peer_block in peer_blockchain_dict:
peer_blocks.append(Block(
index=peer_block['index'],
timestamp=peer_block['timestamp'],
transactions=peer_block['transactions'],
previous_hash=peer_block['previous_hash'],
diff=peer_block['diff'],
hash=peer_block['hash'],
nonce=peer_block['nonce']
))
peer_blockchain = Blockchain()
peer_blockchain.chain = peer_blocks
#sync transaction
Transaction.sync_transaction()
#sync wallet
Transaction.sync_wallet()
if peer_blockchain.is_valid_chain() and len(peer_blockchain.chain) > len(best_chain.chain):
best_chain = peer_blockchain
best_chain_is_local_chain = False
except requests.ConnectionError:
print("Peer %s is not running and can not be synced" %(peer))
else:
print(" * Syncing complete from peer %s" %(peer))
if not best_chain_is_local_chain:
best_chain.save()
return best_chain
sync = Sync()
``` |
{
"source": "1212091/python-learning",
"score": 4
} |
#### File: python-learning/basic_assignment/16.py
```python
input_data = raw_input(">")
number_list = input_data.split(",")
def is_odd_number(number):
if (int(number) % 2) != 0:
return True
return False
result = map(int, filter(is_odd_number, number_list))
print(result)
```
#### File: python-learning/basic_assignment/32.py
```python
class Circle:
def __init__(self, radius):
self.radius = radius
def compute_area(self):
return self.radius ** 2 * 3.14
circle = Circle(2)
print("Area of circuit: " + str(circle.compute_area()))
```
#### File: main/controller/project_controller.py
```python
from flask import request, jsonify, make_response, Blueprint
from database_setup import Project
from main.repository import project_repository
mod_project = Blueprint('project', __name__, url_prefix='/project')
@mod_project.route("/", methods=["POST"])
def create_project():
new_project = Project(name=request.get_json().get('name'), description=request.get_json().get('description'),
business_unit_id=request.get_json().get('business_unit_id'))
project_repository.create_project(new_project)
resp = jsonify(success=True)
resp.status_code = 200
return resp
@mod_project.route("/<int:project_id>", methods=["GET"])
def get_project_from_project_id(project_id):
project = project_repository.get_project_from_project_id(project_id)
return make_response(jsonify({'id': project.id, 'name': project.name, 'description': project.description}), 200)
@mod_project.route("/", methods=["GET"])
def get_all_projects():
projects = project_repository.get_all_projects()
projects_response = []
for project in projects:
projects_response.append({'id': project.id, 'name': project.name, 'description': project.description})
return make_response(jsonify(projects_response), 200)
@mod_project.route("/<int:project_id>", methods=["DELETE"])
def delete_project_by_project_id(project_id):
project_repository.delete_project_by_project_id(project_id)
resp = jsonify(success=True)
resp.status_code = 200
return resp
``` |
{
"source": "1212Prajwol-Pdl/RMG-Py",
"score": 2
} |
#### File: arkane/encorr/bac.py
```python
import csv
import importlib
import json
import logging
import os
import re
from collections import Counter, defaultdict
from typing import Dict, Iterable, List, Sequence, Set, Tuple, Union
import numpy as np
import scipy.optimize as optimize
from rmgpy.quantity import ScalarQuantity
import arkane.encorr.data as data
from arkane.encorr.data import Molecule, BACDatapoint, BACDataset, extract_dataset, geo_to_mol
from arkane.encorr.reference import ReferenceSpecies, ReferenceDatabase
from arkane.exceptions import BondAdditivityCorrectionError
from arkane.modelchem import LevelOfTheory, CompositeLevelOfTheory
class BACJob:
"""
A representation of an Arkane BAC job. This job is used to fit and
save bond additivity corrections.
"""
def __init__(self,
level_of_theory: Union[LevelOfTheory, CompositeLevelOfTheory],
bac_type: str = 'p',
write_to_database: bool = False,
overwrite: bool = False,
**kwargs):
"""
Initialize a BACJob instance.
Args:
level_of_theory: The level of theory that will be used to get training data from the RMG database.
bac_type: 'p' for Petersson-style BACs, 'm' for Melius-style BACs.
write_to_database: Save the fitted BACs directly to the RMG database.
overwrite: Overwrite BACs in the RMG database if they already exist.
kwargs: Additional parameters passed to BAC.fit.
"""
self.level_of_theory = level_of_theory
self.bac_type = bac_type
self.write_to_database = write_to_database
self.overwrite = overwrite
self.kwargs = kwargs
self.bac = BAC(level_of_theory, bac_type=bac_type)
def execute(self, output_directory: str = None, plot: bool = False, jobnum: int = 1):
"""
Execute the BAC job.
Args:
output_directory: Save the results in this directory.
plot: Save plots of results.
jobnum: Job number.
"""
logging.info(f'Running BAC job {jobnum}')
self.bac.fit(**self.kwargs)
if output_directory is not None:
os.makedirs(output_directory, exist_ok=True)
self.write_output(output_directory, jobnum=jobnum)
if plot:
self.plot(output_directory, jobnum=jobnum)
if self.write_to_database:
try:
self.bac.write_to_database(overwrite=self.overwrite)
except IOError as e:
logging.warning('Could not write BACs to database. Captured error:')
logging.warning(str(e))
def write_output(self, output_directory: str, jobnum: int = 1):
"""
Save the BACs to the `output.py` file located in
`output_directory` and save a CSV file of the results.
Args:
output_directory: Save the results in this directory.
jobnum: Job number.
"""
model_chemistry_formatted = self.level_of_theory.to_model_chem().replace('//', '__').replace('/', '_')
output_file1 = os.path.join(output_directory, 'output.py')
output_file2 = os.path.join(output_directory, f'{jobnum}_{model_chemistry_formatted}.csv')
logging.info(f'Saving results for {self.level_of_theory}...')
with open(output_file1, 'a') as f:
stats_before = self.bac.dataset.calculate_stats()
stats_after = self.bac.dataset.calculate_stats(for_bac_data=True)
f.write(f'# BAC job {jobnum}: {"Melius" if self.bac.bac_type == "m" else "Petersson"}-type BACs:\n')
f.write(f'# RMSE/MAE before fitting: {stats_before.rmse:.2f}/{stats_before.mae:.2f} kcal/mol\n')
f.write(f'# RMSE/MAE after fitting: {stats_after.rmse:.2f}/{stats_after.mae:.2f} kcal/mol\n')
f.writelines(self.bac.format_bacs())
f.write('\n')
with open(output_file2, 'w') as f:
writer = csv.writer(f)
writer.writerow([
'Smiles',
'InChI',
'Formula',
'Multiplicity',
'Charge',
'Reference Enthalpy',
'Calculated Enthalpy',
'Corrected Enthalpy',
'Source'
])
for d in self.bac.dataset:
writer.writerow([
d.spc.smiles,
d.spc.inchi,
d.spc.formula,
d.spc.multiplicity,
d.spc.charge,
f'{d.ref_data:.3f}',
f'{d.calc_data:.3f}',
f'{d.bac_data:.3f}',
d.spc.get_preferred_source()
])
def plot(self, output_directory: str, jobnum: int = 1):
"""
Plot the distribution of errors before and after fitting BACs
and plot the parameter correlation matrix.
Args:
output_directory: Save the plots in this directory.
jobnum: Job number
"""
try:
import matplotlib.pyplot as plt
except ImportError:
return
model_chemistry_formatted = self.level_of_theory.to_model_chem().replace('//', '__').replace('/', '_')
correlation_path = os.path.join(output_directory, f'{jobnum}_{model_chemistry_formatted}_correlation.pdf')
self.bac.save_correlation_mat(correlation_path)
plt.rcParams.update({'font.size': 16})
fig_path = os.path.join(output_directory, f'{jobnum}_{model_chemistry_formatted}_errors.pdf')
fig = plt.figure(figsize=(10, 7))
ax = fig.gca()
error_before = self.bac.dataset.calc_data - self.bac.dataset.ref_data
error_after = self.bac.dataset.bac_data - self.bac.dataset.ref_data
_, _, patches = ax.hist(
(error_before, error_after),
bins=50,
label=('before fitting', 'after fitting'),
edgecolor='black',
linewidth=0.5
)
ax.set_xlabel('Error (kcal/mol)')
ax.set_ylabel('Count')
hatches = ('////', '----')
for patch_set, hatch in zip(patches, hatches):
plt.setp(patch_set, hatch=hatch)
ax.tick_params(bottom=False)
ax.set_axisbelow(True)
ax.grid()
ax.legend()
fig.savefig(fig_path, bbox_inches='tight', pad_inches=0)
class BAC:
"""
A class for deriving and applying bond additivity corrections.
"""
ref_databases = {}
atom_spins = {
'H': 0.5, 'C': 1.0, 'N': 1.5, 'O': 1.0, 'F': 0.5,
'Si': 1.0, 'P': 1.5, 'S': 1.0, 'Cl': 0.5, 'Br': 0.5, 'I': 0.5
}
exp_coeff = 3.0 # Melius-type parameter (Angstrom^-1)
def __init__(self, level_of_theory: Union[LevelOfTheory, CompositeLevelOfTheory], bac_type: str = 'p'):
"""
Initialize a BAC instance.
There are two implemented BAC types:
Petersson-type: Petersson et al., J. Chem. Phys. 1998, 109, 10570-10579
Melius-type: Anantharaman and Melius, J. Phys. Chem. A 2005, 109, 1734-1747
Args:
level_of_theory: Level of theory to get preexisting BACs or data from reference database.
bac_type: Type of BACs to get/fit ('p' for Petersson and 'm' for Melius).
"""
self._level_of_theory = self._bac_type = None # Set these first to avoid errors in setters
self.level_of_theory = level_of_theory
self.bac_type = bac_type
# Attributes related to fitting BACs for a given model chemistry
self.database_key = None # Dictionary key to access reference database
self.dataset = None # Collection of BACDatapoints in BACDataset
self.correlation = None # Correlation matrix for BAC parameters
# Define attributes for memoization during fitting
self._reset_memoization()
@property
def bac_type(self) -> str:
return self._bac_type
@bac_type.setter
def bac_type(self, val: str):
"""Check validity and update BACs every time the BAC type is changed."""
if val not in {'m', 'p'}:
raise BondAdditivityCorrectionError(f'Invalid BAC type: {val}')
self._bac_type = val
self._update_bacs()
@property
def level_of_theory(self) -> Union[LevelOfTheory, CompositeLevelOfTheory]:
return self._level_of_theory
@level_of_theory.setter
def level_of_theory(self, val: Union[LevelOfTheory, CompositeLevelOfTheory]):
"""Update BACs every time the level of theory is changed."""
self._level_of_theory = val
self._update_bacs()
def _update_bacs(self):
self.bacs = None
try:
if self.bac_type == 'm':
self.bacs = data.mbac[self.level_of_theory]
elif self.bac_type == 'p':
self.bacs = data.pbac[self.level_of_theory]
except KeyError:
pass
@classmethod
def load_database(cls,
paths: Union[str, List[str]] = None,
names: Union[str, List[str]] = None,
reload: bool = False) -> str:
"""
Load a reference database.
Args:
paths: Paths to database folders.
names: Names of database folders in RMG database.
reload: Force reload of database.
Returns:
Key to access just loaded database.
"""
paths = ReferenceDatabase.get_database_paths(paths=paths, names=names)
key = cls.get_database_key(paths)
if key not in cls.ref_databases or reload:
logging.info(f'Loading reference database from {paths}')
cls.ref_databases[key] = ReferenceDatabase()
cls.ref_databases[key].load(paths=paths)
return key
@staticmethod
def get_database_key(paths: Union[str, List[str]]) -> Union[str, Tuple[str, ...]]:
"""Get a key to access a stored reference database based on the database paths."""
if not (isinstance(paths, str) or (isinstance(paths, list) and all(isinstance(p, str) for p in paths))):
raise ValueError(f'{paths} paths is invalid')
return tuple(sorted(paths)) if isinstance(paths, list) else paths
def _reset_memoization(self):
self._alpha_coeffs = {}
self._beta_coeffs = {}
self._gamma_coeffs = {}
self._k_coeffs = {}
def get_correction(self,
bonds: Dict[str, int] = None,
coords: np.ndarray = None,
nums: Iterable[int] = None,
datapoint: BACDatapoint = None,
spc: ReferenceSpecies = None,
multiplicity: int = None) -> ScalarQuantity:
"""
Returns the bond additivity correction.
There are two bond additivity corrections currently supported.
Peterson-type corrections can be specified by setting
`self.bac_type` to 'p'. This will use the `bonds` variable,
which is a dictionary associating bond types with the number of
that bond in the molecule.
The Melius-type BAC is specified with 'm' and utilizes the atom
coordinates in `coords` and the structure's multiplicity.
Args:
bonds: A dictionary of bond types (e.g., 'C=O') with their associated counts.
coords: A Numpy array of Cartesian molecular coordinates.
nums: A sequence of atomic numbers.
datapoint: If not using bonds, coords, nums, use BACDatapoint.
spc: Alternatively, use ReferenceSpecies.
multiplicity: The spin multiplicity of the molecule.
Returns:
The bond correction to the electronic energy.
"""
if self.bacs is None:
bac_type_str = 'Melius' if self.bac_type == 'm' else 'Petersson'
raise BondAdditivityCorrectionError(
f'Missing {bac_type_str}-type BAC parameters for {self.level_of_theory}'
)
if datapoint is None and spc is not None:
datapoint = BACDatapoint(spc, level_of_theory=self.level_of_theory)
if self.bac_type == 'm':
return self._get_melius_correction(coords=coords, nums=nums, datapoint=datapoint, multiplicity=multiplicity)
elif self.bac_type == 'p':
return self._get_petersson_correction(bonds=bonds, datapoint=datapoint)
def _get_petersson_correction(self, bonds: Dict[str, int] = None, datapoint: BACDatapoint = None) -> ScalarQuantity:
"""
Given the level of theory and a dictionary of bonds, return the
total BAC.
Args:
bonds: Dictionary of bonds with the following format:
bonds = {
'C-H': C-H_bond_count,
'C-C': C-C_bond_count,
'C=C': C=C_bond_count,
...
}
datapoint: BACDatapoint instead of bonds.
Returns:
Petersson-type bond additivity correction.
"""
if datapoint is not None:
if bonds is None:
bonds = datapoint.bonds
else:
logging.warning(f'Species {datapoint.spc.label} will not be used because `bonds` was specified')
# Sum up corrections for all bonds
bac = 0.0
for symbol, count in bonds.items():
if symbol in self.bacs:
bac += count * self.bacs[symbol]
else:
symbol_flipped = ''.join(re.findall('[a-zA-Z]+|[^a-zA-Z]+', symbol)[::-1]) # Check reversed symbol
if symbol_flipped in self.bacs:
bac += count * self.bacs[symbol_flipped]
else:
logging.warning(f'Bond correction not applied for unknown bond type {symbol}.')
return ScalarQuantity(bac, 'kcal/mol')
def _get_melius_correction(self,
coords: np.ndarray = None,
nums: Iterable[int] = None,
datapoint: BACDatapoint = None,
multiplicity: int = None,
params: Dict[str, Union[float, Dict[str, float]]] = None) -> ScalarQuantity:
"""
Given the level of theory, molecular coordinates, atomic numbers,
and dictionaries of BAC parameters, return the total BAC.
Notes:
A molecular correction term other than 0 destroys the size
consistency of the quantum chemistry method. This correction
also requires the multiplicity of the molecule.
The negative of the total correction described in
Anantharaman and Melius (JPCA 2005) is returned so that it
can be added to the energy.
Args:
coords: Numpy array of Cartesian atomic coordinates.
nums: Sequence of atomic numbers.
datapoint: BACDatapoint instead of molecule.
multiplicity: Multiplicity of the molecule (not necessary if using datapoint).
params: Optionally provide parameters other than those stored in self.
Returns:
Melius-type bond additivity correction.
"""
if params is None:
params = self.bacs
atom_corr = params['atom_corr']
bond_corr_length = params['bond_corr_length']
bond_corr_neighbor = params['bond_corr_neighbor']
mol_corr = params.get('mol_corr', 0.0)
# Get single-bonded RMG molecule
mol = None
if datapoint is not None:
if nums is None or coords is None:
mol = datapoint.to_mol(from_geo=True)
multiplicity = datapoint.spc.multiplicity # Use species multiplicity instead
else:
logging.warning(
f'Species {datapoint.spc.label} will not be used because `nums` and `coords` were specified'
)
if mol is None:
mol = geo_to_mol(coords, nums=nums)
# Molecular correction
if mol_corr != 0 and multiplicity is None:
raise BondAdditivityCorrectionError(f'Missing multiplicity for {mol}')
bac_mol = mol_corr * self._get_mol_coeff(mol, multiplicity=multiplicity)
# Atomic correction
bac_atom = sum(count * atom_corr[symbol] for symbol, count in self._get_atom_counts(mol).items())
# Bond correction
bac_length = sum(
coeff * (bond_corr_length[symbol[0]] * bond_corr_length[symbol[1]]) ** 0.5 if isinstance(symbol, tuple)
else coeff * bond_corr_length[symbol]
for symbol, coeff in self._get_length_coeffs(mol).items()
)
bac_neighbor = sum(count * bond_corr_neighbor[symbol] for
symbol, count in self._get_neighbor_coeffs(mol).items())
bac_bond = bac_length + bac_neighbor
# Note the minus sign
return ScalarQuantity(-(bac_mol + bac_atom + bac_bond), 'kcal/mol')
def _get_atom_counts(self, mol: Molecule) -> Counter:
"""
Get a counter containing how many atoms of each type are
present in the molecule.
Args:
mol: RMG-Py molecule.
Returns:
Counter containing atom counts.
"""
if hasattr(mol, 'id') and mol.id is not None:
if mol.id in self._alpha_coeffs:
return self._alpha_coeffs[mol.id]
atom_counts = Counter(atom.element.symbol for atom in mol.atoms)
if hasattr(mol, 'id'):
self._alpha_coeffs[mol.id] = atom_counts
return atom_counts
def _get_length_coeffs(self, mol: Molecule) -> defaultdict:
"""
Get a dictionary containing the coefficients for the beta
(bond_corr_length) variables. There is one coefficient per atom
type and an additional coefficient for each combination of atom
types.
Example: If the atoms are H, C, and O, there are (at most)
coefficients for H, C, O, (C, H), (H, O), and (C, O).
Args:
mol: RMG-Py molecule.
Returns:
Defaultdict containing beta coefficients.
"""
if hasattr(mol, 'id') and mol.id is not None:
if mol.id in self._beta_coeffs:
return self._beta_coeffs[mol.id]
coeffs = defaultdict(float)
for bond in mol.get_all_edges():
atom1 = bond.atom1
atom2 = bond.atom2
symbol1 = atom1.element.symbol
symbol2 = atom2.element.symbol
c = np.exp(-self.exp_coeff * np.linalg.norm(atom1.coords - atom2.coords))
k = symbol1 if symbol1 == symbol2 else tuple(sorted([symbol1, symbol2]))
coeffs[k] += c
if hasattr(mol, 'id'):
self._beta_coeffs[mol.id] = coeffs
return coeffs
def _get_neighbor_coeffs(self, mol: Molecule) -> Counter:
"""
Get a counter containing the coefficients for the gamma
(bond_corr_neighbor) variables.
Args:
mol: RMG-Py molecule.
Returns:
Counter containing gamma coefficients.
"""
if hasattr(mol, 'id') and mol.id is not None:
if mol.id in self._gamma_coeffs:
return self._gamma_coeffs[mol.id]
coeffs = Counter()
for bond in mol.get_all_edges():
atom1 = bond.atom1
atom2 = bond.atom2
# Atoms adjacent to atom1
counts1 = Counter(a.element.symbol for a, b in atom1.bonds.items() if b is not bond)
counts1[atom1.element.symbol] += max(0, len(atom1.bonds) - 1)
# Atoms adjacent to atom2
counts2 = Counter(a.element.symbol for a, b in atom2.bonds.items() if b is not bond)
counts2[atom2.element.symbol] += max(0, len(atom2.bonds) - 1)
coeffs += counts1 + counts2
if hasattr(mol, 'id'):
self._gamma_coeffs[mol.id] = coeffs
return coeffs
def _get_mol_coeff(self, mol: Molecule, multiplicity: int = 1) -> float:
"""
Get the coefficient for the K (mol_corr) variable.
Args:
mol: RMG-Py molecule.
multiplicity: Multiplicity of the molecule.
Returns:
K coefficient.
"""
if hasattr(mol, 'id') and mol.id is not None:
if mol.id in self._k_coeffs:
return self._k_coeffs[mol.id]
spin = 0.5 * (multiplicity - 1)
coeff = spin - sum(self.atom_spins[atom.element.symbol] for atom in mol.atoms)
if hasattr(mol, 'id'):
self._k_coeffs[mol.id] = coeff
return coeff
def fit(self,
weighted: bool = False,
db_names: Union[str, List[str]] = 'main',
exclude_elements: Union[Sequence[str], Set[str], str] = None,
charge: Union[Sequence[Union[str, int]], Set[Union[str, int]], str, int] = 'all',
multiplicity: Union[Sequence[int], Set[int], int, str] = 'all',
**kwargs):
"""
Fits bond additivity corrections using calculated and reference
data available in the RMG database. The resulting BACs stored
in self.bacs will be based on kcal/mol.
Args:
weighted: Perform weighted least squares by balancing training data.
db_names: Optionally specify database names to train on (defaults to main).
exclude_elements: Molecules with any of the elements in this sequence are excluded from training data.
charge: Allowable charges for molecules in training data.
multiplicity: Allowable multiplicites for molecules in training data.
kwargs: Keyword arguments for fitting Melius-type BACs (see self._fit_melius).
"""
self._reset_memoization()
self.database_key = self.load_database(names=db_names)
self.dataset = extract_dataset(self.ref_databases[self.database_key], self.level_of_theory,
exclude_elements=exclude_elements, charge=charge, multiplicity=multiplicity)
if len(self.dataset) == 0:
raise BondAdditivityCorrectionError(f'No species available for {self.level_of_theory}')
if weighted:
self.dataset.compute_weights()
if self.bac_type == 'm':
logging.info(f'Fitting Melius-type BACs for {self.level_of_theory}...')
self._fit_melius(**kwargs)
elif self.bac_type == 'p':
logging.info(f'Fitting Petersson-type BACs for {self.level_of_theory}...')
self._fit_petersson()
stats_before = self.dataset.calculate_stats()
stats_after = self.dataset.calculate_stats(for_bac_data=True)
logging.info(f'RMSE/MAE before fitting: {stats_before.rmse:.2f}/{stats_before.mae:.2f} kcal/mol')
logging.info(f'RMSE/MAE after fitting: {stats_after.rmse:.2f}/{stats_after.mae:.2f} kcal/mol')
def test(self,
species: List[ReferenceSpecies] = None,
dataset: BACDataset = None,
db_names: Union[str, List[str]] = None) -> BACDataset:
"""
Test on data.
Note:
Only one of `species`, `dataset`, or `db_names` can be specified.
Args:
species: Species to test on.
dataset: BACDataset to test on.
db_names: Database names to test on..
Returns:
BACDataset containing the calculated BAC enthalpies in `bac_data`.
"""
if sum(1 for arg in (species, dataset, db_names) if arg is not None) > 1:
raise BondAdditivityCorrectionError('Cannot specify several data sources')
if species is not None:
dataset = BACDataset([BACDatapoint(spc, level_of_theory=self.level_of_theory) for spc in species])
elif db_names is not None:
database_key = self.load_database(names=db_names)
dataset = extract_dataset(self.ref_databases[database_key], self.level_of_theory)
if dataset is None or len(dataset) == 0:
raise BondAdditivityCorrectionError('No data available for evaluation')
corr = np.array([self.get_correction(datapoint=d).value_si / 4184 for d in dataset])
dataset.bac_data = dataset.calc_data + corr
return dataset
def _fit_petersson(self):
"""
Fit Petersson-type BACs.
"""
features = self.dataset.bonds
feature_keys = list({k for f in features for k in f})
feature_keys.sort()
def make_feature_mat(_features: List[Dict[str, int]]) -> np.ndarray:
_x = np.zeros((len(_features), len(feature_keys)))
for idx, f in enumerate(_features):
flist = []
for k in feature_keys:
try:
flist.append(f[k])
except KeyError:
flist.append(0.0)
_x[idx] = np.array(flist)
return _x
# Assume that variance of observations is unity. This is clearly
# not true because we know the uncertainties but we often care
# more about less certain molecules.
x = make_feature_mat(features)
y = self.dataset.ref_data - self.dataset.calc_data
weights = np.diag(self.dataset.weight)
w = np.linalg.solve(x.T @ weights @ x, x.T @ weights @ y)
ypred = x @ w
covariance = np.linalg.inv(x.T @ weights @ x)
self.correlation = _covariance_to_correlation(covariance)
self.dataset.bac_data = self.dataset.calc_data + ypred
self.bacs = {fk: wi for fk, wi in zip(feature_keys, w)}
def _fit_melius(self,
fit_mol_corr: bool = True,
global_opt: bool = True,
global_opt_iter: int = 10,
lsq_max_nfev: int = 500):
"""
Fit Melius-type BACs.
Args:
fit_mol_corr: Also fit molecular correction term.
global_opt: Perform a global optimization.
global_opt_iter: Number of global opt iterations.
lsq_max_nfev: Maximum function evaluations in least squares optimizer.
"""
mols = self.dataset.get_mols(from_geo=True)
for i, mol in enumerate(mols):
mol.id = i
all_atom_symbols = list({atom.element.symbol for mol in mols for atom in mol.atoms})
all_atom_symbols.sort()
nelements = len(all_atom_symbols)
# The order of parameters is
# atom_corr (alpha)
# bond_corr_length (beta)
# bond_corr_neighbor (gamma)
# optional: mol_corr (k)
# where atom_corr are the atomic corrections, bond_corr_length are the bondwise corrections
# due to bond lengths (bounded by 0 below), bond_corr_neighbor are the bondwise corrections
# due to neighboring atoms, and mol_corr (optional) is a molecular correction.
# Choose reasonable bounds depending on the parameter
lim_alpha = (-5.0, 5.0)
lim_beta = (0.0, 1e4)
lim_gamma = (-1.0, 1.0)
lim_k = (-5.0, 5.0)
wmin = [lim_alpha[0]] * nelements + [lim_beta[0]] * nelements + [lim_gamma[0]] * nelements
wmax = [lim_alpha[1]] * nelements + [lim_beta[1]] * nelements + [lim_gamma[1]] * nelements
if fit_mol_corr:
wmin.append(lim_k[0])
wmax.append(lim_k[1])
def get_params(_w: np.ndarray) -> Dict[str, Union[float, Dict[str, float]]]:
_atom_corr = dict(zip(all_atom_symbols, _w[:nelements]))
_bond_corr_length = dict(zip(all_atom_symbols, _w[nelements:2*nelements]))
_bond_corr_neighbor = dict(zip(all_atom_symbols, _w[2*nelements:3*nelements]))
_mol_corr = _w[3*nelements] if fit_mol_corr else 0.0
return dict(
atom_corr=_atom_corr,
bond_corr_length=_bond_corr_length,
bond_corr_neighbor=_bond_corr_neighbor,
mol_corr=_mol_corr
)
def get_bac_data(_w: np.ndarray) -> np.ndarray:
corr = np.array(
[self._get_melius_correction(datapoint=d, params=get_params(_w)).value_si / 4184 for d in self.dataset]
)
return self.dataset.calc_data + corr
# Construct weight matrix
weights = np.diag(self.dataset.weight)
def residuals(_w: np.ndarray) -> Union[float, np.ndarray]:
"""Calculate residuals"""
bac_data = get_bac_data(_w)
return np.sqrt(weights) @ (self.dataset.ref_data - bac_data)
global_opt_iter = global_opt_iter if global_opt else 1
results = []
for it in range(global_opt_iter):
if global_opt:
logging.info(f'Global opt iteration {it}')
# Get random initial guess
w_alpha = np.random.uniform(*lim_alpha, nelements)
w_beta = np.exp(np.random.uniform(-5, np.log(lim_beta[1]), nelements))
w_gamma = np.random.uniform(*lim_gamma, nelements)
w = np.concatenate((w_alpha, w_beta, w_gamma))
if fit_mol_corr:
w_k = np.random.uniform(*lim_k, 1)
w = np.concatenate((w, w_k))
res = optimize.least_squares(residuals, w, jac='3-point', bounds=(wmin, wmax),
max_nfev=lsq_max_nfev, verbose=1)
results.append(res)
res = min(results, key=lambda r: r.cost)
w = res.x
# Estimate parameter covariance matrix using Jacobian
covariance = np.linalg.inv(res.jac.T @ weights @ res.jac)
self.correlation = _covariance_to_correlation(covariance)
self.dataset.bac_data = get_bac_data(w)
self.bacs = get_params(w)
def write_to_database(self, overwrite: bool = False, alternate_path: str = None):
"""
Write BACs to database.
Args:
overwrite: Overwrite existing BACs.
alternate_path: Write BACs to this path instead.
"""
if self.bacs is None:
raise BondAdditivityCorrectionError('No BACs available for writing')
data_path = data.quantum_corrections_path
with open(data_path) as f:
lines = f.readlines()
bacs_formatted = self.format_bacs(indent=True)
bac_dict = data.mbac if self.bac_type == 'm' else data.pbac
keyword = 'mbac' if self.bac_type == 'm' else 'pbac'
has_entries = bool(data.mbac) if self.bac_type == 'm' else bool(data.pbac)
# Add new BACs to file without changing existing formatting
# First: find the BACs dict in the file
for i, line in enumerate(lines):
if keyword in line:
break
else:
# 'pbac' and 'mbac' should both be found at `data_path`
raise RuntimeError(f'Keyword "{keyword} is not found in the data file. '
f'Please check the database file at {data_path} and '
f'make sure an up-to-date RMG-database branch is used.')
# Second: Write the BACs block into the BACs dict
# Does not overwrite comments
if self.level_of_theory in bac_dict and overwrite:
del_idx_start = del_idx_end = None
lot_repr = repr(self.level_of_theory)
for j, line2 in enumerate(lines[i:]):
if lot_repr in line2 and 'Composite' not in lot_repr and 'Composite' not in line2:
del_idx_start = i + j
elif lot_repr in line2 and 'Composite' in lot_repr:
del_idx_start = i + j
if del_idx_start is not None and line2.rstrip() == ' },': # Can't have comment after final brace
del_idx_end = i + j + 1
if (lines[del_idx_start - 1].lstrip().startswith('#')
or lines[del_idx_end + 1].lstrip().startswith('#')):
logging.warning('There may be left over comments from previous BACs')
lines[del_idx_start:del_idx_end] = bacs_formatted
break
# Check if the entry is successfully inserted to the `lines`
if del_idx_start is None or del_idx_end is None:
raise RuntimeError(f'The script cannot identify the corresponding block for the given BACs. '
f'It is possible that the database file at {data_path} is not correctly '
f'formatted. Please check the file.')
elif self.level_of_theory in bac_dict and not overwrite:
raise IOError(
f'{self.level_of_theory} already exists. Set `overwrite` to True.'
)
else:
# Either empty BACs dict or adding BACs for a new level of theory
if not has_entries and '}' in lines[i]: # Empty BACs dict
lines[i] = f'{keyword} = {{\n'
lines[(i+1):(i+1)] = ['\n}\n']
lines[(i+1):(i+1)] = ['\n'] + bacs_formatted
with open(data_path if alternate_path is None else alternate_path, 'w') as f:
f.writelines(lines)
# Reload data to update BAC dictionaries
if alternate_path is None:
importlib.reload(data)
def format_bacs(self, indent: bool = False) -> List[str]:
"""
Obtain a list of nicely formatted BACs suitable for writelines.
Args:
indent: Indent each line for printing in database.
Returns:
Formatted list of BACs.
"""
bacs_formatted = json.dumps(self.bacs, indent=4).replace('"', "'").split('\n')
bacs_formatted[0] = f'"{self.level_of_theory!r}": ' + bacs_formatted[0]
bacs_formatted[-1] += ','
bacs_formatted = [e + '\n' for e in bacs_formatted]
if indent:
bacs_formatted = [' ' + e for e in bacs_formatted]
return bacs_formatted
def save_correlation_mat(self, path: str, labels: List[str] = None):
"""
Save a visual representation of the parameter correlation matrix.
Args:
path: Path to save figure to.
labels: Parameter labels.
"""
try:
import matplotlib.pyplot as plt
except ImportError:
return
if self.correlation is None:
raise BondAdditivityCorrectionError('Fit BACs before saving correlation matrix!')
if labels is None:
if self.bac_type == 'm':
param_types = list(self.bacs.keys())
atom_symbols = list(self.bacs[param_types[0]])
labels = [r'$\alpha_{' + s + r'}$' for s in atom_symbols] # atom_corr is alpha
labels.extend(r'$\beta_{' + s + r'}$' for s in atom_symbols) # bond_corr_length is beta
labels.extend(r'$\gamma_{' + s + r'}$' for s in atom_symbols) # bond_corr_neighbor is gamma
if len(self.correlation) == 3 * len(atom_symbols) + 1:
labels.append('K') # mol_corr is K
elif self.bac_type == 'p':
labels = list(self.bacs.keys())
fig, ax = plt.subplots(figsize=(11, 11) if self.bac_type == 'm' else (18, 18))
ax.matshow(self.correlation, cmap=plt.cm.PiYG)
# Superimpose values as text
for i in range(len(self.correlation)):
for j in range(len(self.correlation)):
c = self.correlation[j, i]
ax.text(i, j, f'{c: .2f}', va='center', ha='center', fontsize=8)
# Save lims because they get changed when modifying labels
xlim = ax.get_xlim()
ylim = ax.get_ylim()
ax.set_xticks(list(range(len(self.correlation))))
ax.set_yticks(list(range(len(self.correlation))))
ax.set_xticklabels(labels, fontsize=14, rotation='vertical' if self.bac_type == 'p' else None)
ax.set_yticklabels(labels, fontsize=14)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.tick_params(bottom=False, top=False, left=False, right=False)
fig.savefig(path, dpi=600, bbox_inches='tight', pad_inches=0)
def _covariance_to_correlation(cov: np.ndarray) -> np.ndarray:
"""Convert (unscaled) covariance matrix to correlation matrix"""
v = np.sqrt(np.diag(cov))
corr = cov / np.outer(v, v)
corr[cov == 0] = 0
return corr
``` |
{
"source": "1212Prajwol-Pdl/SmartProcessAnalytics",
"score": 3
} |
#### File: SmartProcessAnalytics/Code-SPA/IC.py
```python
from sklearn.feature_selection import VarianceThreshold
import regression_models as rm
import numpy as np
import nonlinear_regression as nr
import timeseries_regression_RNN as RNN
def IC_mse(model_name, X, y, X_test, y_test, X_val =None, y_val = None, cv_type = None, alpha_num =50, eps = 1e-4, round_number = '', **kwargs):
'''This function determines the best hyper_parameter using mse based on AIC/AICc
Input:
model_name: str, indicating which model to use
X: independent variables of size N x m np_array
y: dependent variable of size N x 1 np_array
cv_type: 'BIC', 'AIC' or 'AICc', if not specified use the 40 rule of thumb for 'AIC'
**kwargs: hyper-parameters for model fitting, if None, using default range or settings
Output:
hyper_params: dictionary, contains optimal model parameters based on cross validation
model: final fited model on all training data
model_params: np_array m x 1
mse_train
mse_test
yhat_train
yhat_test
'''
if model_name == 'DALVEN':
DALVEN = rm.model_getter(model_name)
if 'l1_ratio' not in kwargs:
kwargs['l1_ratio'] = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.97, 0.99][::-1]
if 'degree' not in kwargs:
kwargs['degree'] = [1,2,3]
if 'lag' not in kwargs:
kwargs['lag'] = [i+1 for i in range(40)]
if 'label_name' not in kwargs:
kwargs['label_name'] = False
if 'trans_type' not in kwargs:
kwargs['trans_type'] = 'auto'
if 'select_value' not in kwargs:
kwargs['select_pvalue'] = 0.05
if cv_type == None:
if X.shape[0]//X.shape[1]<40:
cv_type = 'AICc'
else:
cv_type = 'AIC'
IC_result = np.zeros((len(kwargs['degree']),alpha_num,len(kwargs['l1_ratio']), len(kwargs['lag'])))
#check if the data is zscored, score back:
#########################to be continue###################################
for k in range(len(kwargs['degree'])):
for j in range(len(kwargs['l1_ratio'])):
for i in range(alpha_num):
for t in range(len(kwargs['lag'])):
# print(k,j,i,t)
_, _, _, _, _, _ , _, _, (AIC,AICc,BIC)= DALVEN(X, y, X_test, y_test, alpha = i, l1_ratio = kwargs['l1_ratio'][j],
degree = kwargs['degree'][k], lag = kwargs['lag'][t], tol = eps , alpha_num = alpha_num, cv = True,
selection = 'p_value', select_value = kwargs['select_pvalue'], trans_type = kwargs['trans_type'])
if cv_type == 'AICc':
IC_result[k,i,j,t] += AICc
elif cv_type == 'BIC':
IC_result[k,i,j,t] += BIC
else:
IC_result[k,i,j,t] += AIC
#find the min value, if there is a tie, only the first occurence is returned, and fit the final model
ind = np.unravel_index(np.argmin(IC_result, axis=None), IC_result.shape)
degree = kwargs['degree'][ind[0]]
l1_ratio = kwargs['l1_ratio'][ind[2]]
lag = kwargs['lag'][ind[3]]
DALVEN_model, DALVEN_params, mse_train, mse_test, yhat_train, yhat_test, alpha, retain_index,_= DALVEN(X,y, X_test, y_test, alpha = ind[1], l1_ratio = l1_ratio,
degree = degree, lag = lag, tol = eps , alpha_num = alpha_num, cv = False,
selection = 'p_value', select_value = kwargs['select_pvalue'], trans_type = kwargs['trans_type'])
hyper_params = {}
hyper_params['alpha'] = alpha
hyper_params['l1_ratio'] = l1_ratio
hyper_params['degree'] = degree
hyper_params['lag'] = lag
hyper_params['retain_index'] = retain_index
#get the name for the retained
if kwargs['label_name'] :
if kwargs['trans_type'] == 'auto':
Xtrans,_ = nr.feature_trans(X, degree = degree, interaction = 'later')
else:
Xtrans, _ = nr.poly_feature(X, degree = degree, interaction = True, power = True)
#lag padding for X
XD = Xtrans[lag:]
for i in range(lag):
XD = np.hstack((XD,Xtrans[lag-1-i:-i-1]))
#lag padding for y in design matrix
for i in range(lag):
XD = np.hstack((XD,y[lag-1-i:-i-1]))
#remove feature with 0 variance
sel = VarianceThreshold(threshold=eps).fit(XD)
if 'xticks' in kwargs:
list_name = kwargs['xticks']
else:
list_name =['x'+str(i) for i in range(1,np.shape(X)[1]+1)]
if kwargs['trans_type'] == 'auto':
if degree == 1:
list_name_final = list_name + ['log('+ name + ')' for name in list_name] + ['sqrt(' +name+')' for name in list_name]+['1/' +name for name in list_name]
list_copy = list_name_final[:]
for i in range(lag):
list_name_final = list_name_final + [s + '(t-' + str(i+1) + ')' for s in list_copy]
for i in range(lag):
list_name_final = list_name_final + ['y(t-' + str(i+1) +')' ]
if degree == 2:
list_name_final = list_name[:]
for i in range(X.shape[1]-1):
for j in range(i+1,X.shape[1]):
list_name_final = list_name_final + [list_name[i]+'*'+list_name[j]]
list_name_final = list_name_final + ['log('+ name + ')' for name in list_name] + ['sqrt(' +name+')' for name in list_name]+['1/' +name for name in list_name]+[name +'^2' for name in list_name]+['(log'+ name + ')^2' for name in list_name] + ['1/' +name+'^2' for name in list_name]+\
[name +'^1.5' for name in list_name]+ ['log(' +name +')/' + name for name in list_name]+ ['1/' +name+'^0.5' for name in list_name]
list_copy = list_name_final[:]
for i in range(lag):
list_name_final = list_name_final + [s + '(t-' + str(i+1) + ')' for s in list_copy]
for i in range(lag):
list_name_final = list_name_final + ['y(t-' + str(i+1) +')' ]
if degree == 3:
list_name_final = list_name[:]
for i in range(X.shape[1]-1):
for j in range(i+1,X.shape[1]):
list_name_final = list_name_final + [list_name[i]+'*'+list_name[j]]
for i in range(X.shape[1]-2):
for j in range(i+1,X.shape[1]-1):
for k in range(j+1,X.shape[1]):
list_name_final = list_name_final + [list_name[i]+'*'+list_name[j]+'*'+list_name[k]]
list_name_final = list_name_final + ['log('+ name + ')' for name in list_name] + ['sqrt(' +name+')' for name in list_name]+['1/' +name for name in list_name]+\
[name +'^2' for name in list_name]+['(log'+ name + ')^2' for name in list_name] + ['1/' +name+'^2' for name in list_name]+\
[name +'^1.5' for name in list_name]+ ['log(' +name +')/' + name for name in list_name]+ ['1/' +name+'^0.5' for name in list_name] +\
[name +'^3' for name in list_name]+['(log'+ name + ')^3' for name in list_name] + ['1/' +name+'^3' for name in list_name]+\
[name +'^2.5' for name in list_name] +['(log' +name +')^2/' + name for name in list_name]+ ['log(' +name +')/sqrt(' + name +')' for name in list_name]+ ['log(' +name +')/' + name +'^2' for name in list_name]+\
[name +'^-1.5' for name in list_name]
list_copy = list_name_final[:]
for i in range(lag):
list_name_final = list_name_final + [s + '(t-' + str(i+1) + ')' for s in list_copy]
for i in range(lag):
list_name_final = list_name_final + ['y(t-' + str(i+1) +')' ]
else:
if degree == 1:
list_name_final = list_name
list_copy = list_name_final[:]
for i in range(lag):
list_name_final = list_name_final + [s + '(t-' + str(i+1) + ')' for s in list_copy]
for i in range(lag):
list_name_final = list_name_final + ['y(t-' + str(i+1) +')' ]
if degree == 2:
list_name_final = list_name[:]
for i in range(X.shape[1]):
for j in range(i, X.shape[1]):
list_name_final = list_name_final +[list_name[i]+'*'+list_name[j]]
list_copy = list_name_final[:]
for i in range(lag):
list_name_final = list_name_final + [s + '(t-' + str(i+1) + ')' for s in list_copy]
for i in range(lag):
list_name_final = list_name_final + ['y(t-' + str(i+1) +')' ]
if degree == 3:
list_name_final = list_name[:]
for i in range(X.shape[1]):
for j in range(i, X.shape[1]):
list_name_final = list_name_final +[list_name[i]+'*'+list_name[j]]
for i in range(X.shape[1]):
for j in range(i, X.shape[1]):
for k in range(j, X.shape[1]):
list_name_final = list_name_final + [list_name[i]+'*'+list_name[j]+'*'+list_name[k]]
list_copy = list_name_final[:]
for i in range(lag):
list_name_final = list_name_final + [s + '(t-' + str(i+1) + ')' for s in list_copy]
for i in range(lag):
list_name_final = list_name_final + ['y(t-' + str(i+1) +')' ]
index = list(sel.get_support())
list_name_final = [x for x, y in zip(list_name_final, index) if y]
list_name_final = [x for x, y in zip(list_name_final, retain_index) if y]
else:
list_name_final = []
return(hyper_params, DALVEN_model, DALVEN_params, mse_train, mse_test, yhat_train, yhat_test, IC_result[ind], list_name_final)
###################################################################################################################
elif model_name == 'DALVEN_full_nonlinear':
DALVEN = rm.model_getter(model_name)
if 'l1_ratio' not in kwargs:
kwargs['l1_ratio'] = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.97, 0.99][::-1]
if 'degree' not in kwargs:
kwargs['degree'] = [1,2] #,3]
if 'lag' not in kwargs:
kwargs['lag'] = [i+1 for i in range(40)]
if 'label_name' not in kwargs:
kwargs['label_name'] = False
if 'trans_type' not in kwargs:
kwargs['trans_type'] = 'auto'
if 'select_value' not in kwargs:
kwargs['select_pvalue'] = 0.05
IC_result = np.zeros((len(kwargs['degree']),alpha_num,len(kwargs['l1_ratio']), len(kwargs['lag'])))
#check if the data is zscored, score back:
#########################to be continue###################################
for k in range(len(kwargs['degree'])):
for j in range(len(kwargs['l1_ratio'])):
for i in range(alpha_num):
for t in range(len(kwargs['lag'])):
# print(k,j,i,t)
_, _, _, _, _, _ , _, _, (AIC,AICc,BIC)= DALVEN(X, y, X_test, y_test, alpha = i, l1_ratio = kwargs['l1_ratio'][j],
degree = kwargs['degree'][k], lag = kwargs['lag'][t], tol = eps , alpha_num = alpha_num, cv = True,
selection = 'p_value', select_value = kwargs['select_pvalue'], trans_type = kwargs['trans_type'])
if cv_type == 'AICc':
IC_result[k,i,j,t] += AICc
elif cv_type == 'BIC':
IC_result[k,i,j,t] += BIC
else:
IC_result[k,i,j,t] += AIC
#find the min value, if there is a tie, only the first occurence is returned, and fit the final model
ind = np.unravel_index(np.argmin(IC_result, axis=None), IC_result.shape)
degree = kwargs['degree'][ind[0]]
l1_ratio = kwargs['l1_ratio'][ind[2]]
lag = kwargs['lag'][ind[3]]
DALVEN_model, DALVEN_params, mse_train, mse_test, yhat_train, yhat_test, alpha, retain_index,_= DALVEN(X, y, X_test, y_test, alpha = ind[1], l1_ratio = l1_ratio,
degree = degree, lag = lag, tol = eps , alpha_num = alpha_num, cv = False,
selection = 'p_value', select_value = kwargs['select_pvalue'], trans_type = kwargs['trans_type'])
hyper_params = {}
hyper_params['alpha'] = alpha
hyper_params['l1_ratio'] = l1_ratio
hyper_params['degree'] = degree
hyper_params['lag'] = lag
hyper_params['retain_index'] = retain_index
#lag padding for X
XD = X[lag:]
for i in range(lag):
XD = np.hstack((XD,X[lag-1-i:-i-1]))
#lag padding for y in design matrix
for i in range(lag):
XD = np.hstack((XD,y[lag-1-i:-i-1]))
#get the name for the retained
if kwargs['trans_type'] == 'auto':
XD,_ = nr.feature_trans(XD, degree = degree, interaction = 'later')
else:
XD, _ = nr.poly_feature(XD, degree = degree, interaction = True, power = True)
#remove feature with 0 variance
sel = VarianceThreshold(threshold=eps).fit(XD)
if kwargs['label_name'] :
list_name =['x'+str(i) for i in range(1,np.shape(X)[1]+1)]
list_copy = list_name[:]
for i in range(lag):
list_name = list_name + [s + '(t-' + str(i+1) + ')' for s in list_copy]
for i in range(lag):
list_name = list_name + ['y(t-' + str(i+1) +')' ]
if kwargs['trans_type'] == 'auto':
if degree == 1:
list_name_final = list_name + ['log('+ name + ')' for name in list_name] + ['sqrt(' +name+')' for name in list_name]+['1/' +name for name in list_name]
if degree == 2:
list_name_final = list_name[:]
for i in range(len(list_name)-1):
for j in range(i+1,len(list_name)):
list_name_final = list_name_final + [list_name[i]+'*'+list_name[j]]
list_name_final = list_name_final + ['log('+ name + ')' for name in list_name] + ['sqrt(' +name+')' for name in list_name]+['1/' +name for name in list_name]+[name +'^2' for name in list_name]+['(log'+ name + ')^2' for name in list_name] + ['1/' +name+'^2' for name in list_name]+\
[name +'^1.5' for name in list_name]+ ['log(' +name +')/' + name for name in list_name]+ ['1/' +name+'^0.5' for name in list_name]
if degree == 3:
list_name_final = list_name[:]
for i in range(len(list_name)-1):
for j in range(i+1,len(list_name)):
list_name_final = list_name_final + [list_name[i]+'*'+list_name[j]]
for i in range(len(list_name)-2):
for j in range(i+1,len(list_name)-1):
for k in range(j+1,len(list_name)):
list_name_final = list_name_final + [list_name[i]+'*'+list_name[j]+'*'+list_name[k]]
list_name_final = list_name_final + ['log('+ name + ')' for name in list_name] + ['sqrt(' +name+')' for name in list_name]+['1/' +name for name in list_name]+\
[name +'^2' for name in list_name]+['(log'+ name + ')^2' for name in list_name] + ['1/' +name+'^2' for name in list_name]+\
[name +'^1.5' for name in list_name]+ ['log(' +name +')/' + name for name in list_name]+ ['1/' +name+'^0.5' for name in list_name] +\
[name +'^3' for name in list_name]+['(log'+ name + ')^3' for name in list_name] + ['1/' +name+'^3' for name in list_name]+\
[name +'^2.5' for name in list_name] +['(log' +name +')^2/' + name for name in list_name]+ ['log(' +name +')/sqrt(' + name +')' for name in list_name]+ ['log(' +name +')/' + name +'^2' for name in list_name]+\
[name +'^-1.5' for name in list_name]
else:
if degree == 1:
list_name_final = list_name
if degree == 2:
list_name_final = list_name[:]
for i in range(X.shape[1]):
for j in range(i, X.shape[1]):
list_name_final = list_name_final +[list_name[i]+'*'+list_name[j]]
if degree == 3:
list_name_final = list_name[:]
for i in range(len(list_name)):
for j in range(i, len(list_name)):
list_name_final = list_name_final +[list_name[i]+'*'+list_name[j]]
for i in range(len(list_name)):
for j in range(i, len(list_name)):
for k in range(j, len(list_name)):
list_name_final = list_name_final + [list_name[i]+'*'+list_name[j]+'*'+list_name[k]]
index = list(sel.get_support())
list_name_final = [x for x, y in zip(list_name_final, index) if y]
list_name_final = [x for x, y in zip(list_name_final, retain_index) if y]
else:
list_name_final = []
return(hyper_params, DALVEN_model, DALVEN_params, mse_train, mse_test, yhat_train, yhat_test, IC_result[ind], list_name_final)
###################################################################################################################
#for RNN, only the model archetecture is viewed as hyper-parameter in thie automated version, the other training parameters can be set by kwargs, otw the default value will be used
if model_name == 'RNN':
input_size_x = X.shape[1]
#currently not support BRNN version, so keep test at 1
input_prob_test = 1
output_prob_test = 1
state_prob_test = 1
#model architecture (which is also hyperparameter for selection)
if 'cell_type' not in kwargs:
kwargs['cell_type'] = 'e'
if 'activation' not in kwargs:
kwargs['activation'] = ['tanh'] #can be relu, tanh, linear
if 'state_size' not in kwargs:
kwargs['state_size'] = [input_size_x*(i+1) for i in range(5)]
if 'num_layers' not in kwargs:
kwargs['num_layers'] = [1, 2, 3]
#training parameters
if 'batch_size' not in kwargs:
kwargs['batch_size'] = 2
if 'epoch_overlap' not in kwargs:
kwargs['epoch_overlap'] = 0
if 'num_steps' not in kwargs:
kwargs['num_steps'] = 25
if 'learning_rate' not in kwargs:
kwargs['learning_rate'] = 1e-2
if 'lambda_l2_reg' not in kwargs:
kwargs['lambda_l2_reg'] = 1e-3
if 'num_epochs' not in kwargs:
kwargs['num_epochs'] = 1000
#drop-out parameters for training
if 'input_prob' not in kwargs:
kwargs['input_prob'] = 1
if 'output_prob' not in kwargs:
kwargs['output_prob'] = 1
if 'state_prob' not in kwargs:
kwargs['state_prob'] = 1
#early stop parameter
if 'train_ratio' not in kwargs:
if X_val is None:
kwargs['train_ratio'] = 0.85
else:
kwargs['train_ratio'] = 1
if 'max_checks_without_progress' not in kwargs:
kwargs['max_checks_without_progress'] = 100
if 'epoch_before_val' not in kwargs:
kwargs['epoch_before_val'] = 300
#save or not
if 'location' not in kwargs:
kwargs['location'] = 'RNNtest'
######model training
if cv_type == None:
if X.shape[0]//X.shape[1]<40:
cv_type = 'AICc'
else:
cv_type = 'AIC'
IC_result = np.zeros((len(kwargs['cell_type']),len(kwargs['activation']), len(kwargs['state_size']), len(kwargs['num_layers'])))
Result = {}
for i in range(len(kwargs['cell_type'])):
for j in range(len(kwargs['activation'])):
for k in range(len(kwargs['state_size'])):
for t in range(len(kwargs['num_layers'])):
# print(i,j,k,t)
p_train,p_val, p_test, (AIC,AICc,BIC),train_loss, val_loss, test_loss = RNN.timeseries_RNN_feedback_single_train(X, y, X_test=X_test, Y_test=y_test, X_val = X_val, Y_val=y_val, train_ratio = kwargs['train_ratio'],\
cell_type=kwargs['cell_type'][i],activation = kwargs['activation'][j], state_size = kwargs['state_size'][k],\
batch_size = kwargs['batch_size'], epoch_overlap = kwargs['epoch_overlap'],num_steps = kwargs['num_steps'],\
num_layers = kwargs['num_layers'][t], learning_rate = kwargs['learning_rate'], lambda_l2_reg=kwargs['lambda_l2_reg'],\
num_epochs = kwargs['num_epochs'], input_prob = kwargs['input_prob'], output_prob = kwargs['output_prob'], state_prob = kwargs['state_prob'],\
input_prob_test =input_prob_test, output_prob_test = output_prob_test, state_prob_test =state_prob_test,\
max_checks_without_progress = kwargs['max_checks_without_progress'],epoch_before_val=kwargs['epoch_before_val'], location= kwargs['location'], plot= False)
if cv_type == 'AICc':
IC_result[i,j,k,t] += AICc
elif cv_type == 'BIC':
IC_result[i,j,k,t] += BIC
else:
IC_result[i,j,k,t] += AIC
Result[(i,j,k,t)] = {'prediction_train':p_train,'prediction_val':p_val,'prediction_test':p_test,'train_loss_final':train_loss,'val_loss_final':val_loss,'test_loss_final':test_loss}
#find the min value, if there is a tie, only the first occurence is returned, and fit the final model
ind = np.unravel_index(np.argmin(IC_result, axis=None), IC_result.shape)
cell_type = kwargs['cell_type'][ind[0]]
activation = kwargs['activation'][ind[1]]
state_size = kwargs['state_size'][ind[2]]
num_layers = kwargs['num_layers'][ind[3]]
Final = Result[(ind[0],ind[1],ind[2],ind[3])]
prediction_train,prediction_val, prediction_test, AICs, train_loss_final, val_loss_final, test_loss_final = RNN.timeseries_RNN_feedback_single_train(X, y, X_test=X_test, Y_test=y_test, X_val = X_val, Y_val=y_val, train_ratio = kwargs['train_ratio'],\
cell_type=cell_type,activation = activation , state_size = state_size,\
batch_size = kwargs['batch_size'], epoch_overlap = kwargs['epoch_overlap'],num_steps = kwargs['num_steps'],\
num_layers = num_layers, learning_rate = kwargs['learning_rate'], lambda_l2_reg=kwargs['lambda_l2_reg'],\
num_epochs = kwargs['num_epochs'], input_prob = kwargs['input_prob'], output_prob = kwargs['output_prob'], state_prob = kwargs['state_prob'],\
input_prob_test =input_prob_test, output_prob_test = output_prob_test, state_prob_test =state_prob_test,\
max_checks_without_progress = kwargs['max_checks_without_progress'],epoch_before_val=kwargs['epoch_before_val'], location= kwargs['location'], plot= True, round_number = round_number)
hyper_params = {}
hyper_params['cell_type'] = cell_type
hyper_params['activation'] = activation
hyper_params['state_size'] = state_size
hyper_params['num_layers'] = num_layers
hyper_params['training_params'] = {'batch_size':kwargs['batch_size'],'epoch_overlap':kwargs['epoch_overlap'],'num_steps':kwargs['num_steps'],'learning_rate':kwargs['learning_rate'],'lambda_l2_reg':kwargs['lambda_l2_reg'],'num_epochs':kwargs['num_epochs']}
hyper_params['drop_out'] = {'input_prob':kwargs['input_prob'],'output_prob':kwargs['output_prob'], 'state_prob':kwargs['state_prob']}
hyper_params['early_stop'] = {'train_ratio':kwargs['train_ratio'], 'max_checks_without_progress':kwargs['max_checks_without_progress'],'epoch_before_val':kwargs['epoch_before_val']}
hyper_params['IC_optimal'] = IC_result[ind]
return(hyper_params, kwargs['location'], Final['prediction_train'], Final['prediction_val'], Final['prediction_test'], Final['train_loss_final'], Final['val_loss_final'], Final['test_loss_final'])
```
#### File: SmartProcessAnalytics/Code-SPA/nonlinear_regression_other.py
```python
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.svm import SVR
def model_getter(model_name):
'''Return the model according to the name'''
switcher = {
'RF': RF_fitting,
'SVR': SVR_fitting
}
#Get the function from switcher dictionary
if model_name not in switcher:
print('No corresponding regression model')
func = switcher.get(model_name)
return func
def mse(y, yhat):
"""
This function calculate the goodness of fit mse
Input: y: N x 1 real response
yhat: N x 1 predited by the model
Output: mse
"""
return np.sum((yhat-y)**2)/y.shape[0]
def RF_fitting(X, y, X_test, y_test, n_estimators = 100, max_depth = 10, min_samples_leaf = 0.1, max_features = 'auto',random_state=0):
'''Random forest regressor https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html#sklearn.ensemble.RandomForestRegressor.decision_path
Input:
X: independent variables of size N x m
y: dependent variable of size N x 1
X_test: independent variables of size N_test x m
y_test: dependent variable of size N_test x 1
n_estimators: int, number of trees in the RF
max_depth: int, max_depth of a single tree
max_features: maximum number of features when considered for a potential splitting, 'auto' = m
random_state: int, if None, np.rand is used
Output:
tuple (trained_model, model_params, mse_train, mse_test, yhat_train, yhat_test)
trained_model: RF model type
'''
#build model
RF = RandomForestRegressor(n_estimators=n_estimators, max_depth=max_depth,random_state= random_state,
max_features = max_features, min_samples_leaf = min_samples_leaf)
RF.fit(X, y.flatten())
#predict
yhat_train = RF.predict(X).reshape((-1,1))
yhat_test = RF.predict(X_test).reshape((-1,1))
#get error
mse_train = mse(y, yhat_train)
mse_test = mse(y_test, yhat_test)
return (RF, mse_train, mse_test, yhat_train, yhat_test)
def MLP_fitting(X, y, X_test, y_test, hidden_layer_sizes = (10,), activation = 'tanh', solver = 'adma', alpha = 0.0001,
learning_rate_init = 0.001, max_iter = 1000, random_state = 0, tol = 1e-4, early_stopping = True, n_iter_no_change = 5, validation_fraction = 0.1):
'''A MLP with adam solver https://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPRegressor.html#sklearn.neural_network.MLPRegressor
Input:
X: independent variables of size N x m
y: dependent variable of size N x 1
X_test: independent variables of size N_test x m
y_test: dependent variable of size N_test x 1
hidden_layer_sizes: tuple, number of nodes in each hindden layers
activation: activation function
alpha: float, L2 regularization parameter
Other parameters see the link above
Output:
tuple (trained_model, model_params, mse_train, mse_test, yhat_train, yhat_test)
trained_model: MLP model type
'''
#build model
MLP = MLPRegressor(hidden_layer_sizes = hidden_layer_sizes, activation = activation, solver = solver, alpha = alpha,learning_rate_init=learning_rate_init,
max_iter = max_iter, random_state = random_state, tol = tol, early_stopping = early_stopping, n_iter_no_change = n_iter_no_change, validation_fraction =validation_fraction)
MLP.fit(X,y)
#predict
yhat_train = MLP.predict(X).reshape(-1,1)
yhat_test = MLP.predict(X_test).reshape(-1,1)
#get error
mse_train = mse(y, yhat_train)
mse_test = mse(y_test, yhat_test)
return (MLP, mse_train, mse_test, yhat_train, yhat_test)
def SVR_fitting(X, y, X_test, y_test, C = 100, epsilon = 10, gamma = 'auto', tol = 1e-4, max_iter = 10000):
'''Support Vector Reression with RBF kernel https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVR.html#sklearn.svm.SVR
Input:
X: independent variables of size N x m
y: dependent variable of size N x 1
X_test: independent variables of size N_test x m
y_test: dependent variable of size N_test x 1
C: float, penalty parameter of the error term
epsilon: float, epsilon-tube within which no penalty is associated
gamma: float, kernal coefficient for 'rbf'
Output:
tuple (trained_model, model_params, mse_train, mse_test, yhat_train, yhat_test)
trained_model: SVR model type
'''
#build model
SVR_model = SVR(gamma=gamma, C=C, epsilon=epsilon, tol = tol, max_iter = max_iter)
SVR_model.fit(X, y.flatten())
#predict
yhat_train = SVR_model.predict(X).reshape((-1,1))
yhat_test = SVR_model.predict(X_test).reshape((-1,1))
#get error
mse_train = mse(y, yhat_train)
mse_test = mse(y_test, yhat_test)
return (SVR_model, mse_train, mse_test, yhat_train, yhat_test)
```
#### File: SmartProcessAnalytics/Code-SPA/nonlinear_regression.py
```python
from sklearn.preprocessing import PolynomialFeatures
import numpy as np
#from copy import deepcopy
def _xexp(x):
'''exponential transform with protection against large numbers'''
with np.errstate(over='ignore'):
return np.where(np.abs(x) < 9, np.exp(x), np.exp(9)*np.ones_like(x))
def _xlog(x):
'''logarithm with protection agiasnt small numbers'''
with np.errstate(divide = 'ignore', invalid = 'ignore'):
return np.where(np.abs(x) > np.exp(-10), np.log(abs(x)), -10*np.ones_like(x))
def _xsqrt(x):
'''square root with protection with negative values (take their abs)'''
with np.errstate(invalid = 'ignore'):
return np.sqrt(np.abs(x))
def _xinv(x):
'''inverse with protection with 0 value'''
with np.errstate(divide = 'ignore', invalid = 'ignore'):
return np.where(np.abs(x)>1e-9, 1/x, 1e9*np.ones_like(x))
def _mul(X,Y):
'''column-wise multiplication'''
def poly_feature(X, X_test = None, degree = 2, interaction = True, power = True):
'''
This function transforms features to polynomials according to the specified degree
Input:
X: N x m np_array indepdendent variables
X_test: independent variables of size N_test x m np_array
degree: int, degree of the polynomials, default 2
interaction: Bool, including interactions (x1, x2) to (x1, x2, x1x2)
power: Bool, including powers (x1 x2) to (x1**2, x2**2)
Return:
transofrmed X, np_array, N x m_trans
transformed X_test, np_array, N_test x m_trans
'''
if interaction and power:
poly = PolynomialFeatures(degree,include_bias=False)
X = poly.fit_transform(X)
if X_test is not None:
X_test = poly.fit_transform(X_test)
if interaction and not power:
poly = PolynomialFeatures(degree,include_bias=False, interaction_only = True)
X = poly.fit_transform(X)
if X_test is not None:
X_test = poly.fit_transform(X_test)
if not interaction and power:
X_copy = X[:]
if X_test is not None:
X_test_copy = X_test[:]
for i in range(1,degree):
X = np.column_stack((X, X_copy**(i+1)))
if X_test is not None:
X_test = np.column_stack((X_test, X_test_copy**(i+1)))
return (X, X_test)
def feature_trans(X, X_test = None, degree = 2, interaction = 'later'):
'''
This function transforms features according to the specified nonlinear forms
including [poly, log, exp, sigmoid, abs, 1/x, sqrt(x)]
Input:
X: N x m np_array indepdendent variables
X_test: independent variables of size N_test x m np_array
form: list including str of the nonlinear forms
'''
Xlog = _xlog(X)
Xinv = _xinv(X)
Xsqrt = _xsqrt(X)
# Xexp = _xexp(X)
# Xexp_t = _xexp(X_test)
# Xsig = 1/(1+ _xexp(-X))
# Xsig_t = 1/(1+ _xexp(-X_test))
# Xabs = np.abs(X)
# Xabs_t = np.abs(X_test)
if X_test is not None:
Xlog_t = _xlog(X_test)
Xsqrt_t = _xsqrt(X_test)
Xinv_t = _xinv(X_test)
if interaction == 'no':
if degree == 1:
X = np.column_stack((X, Xlog, Xsqrt, Xinv))
if X_test is not None:
X_test = np.column_stack((X_test, Xlog_t, Xsqrt_t, Xinv_t))
if degree == 2:
X = np.column_stack((X, Xlog, Xsqrt, Xinv, X**2,Xlog**2,Xinv**2, X*Xsqrt, Xlog*Xinv, Xsqrt*Xinv ))
if X_test is not None:
X_test = np.column_stack((X_test, Xlog_t, Xsqrt_t, Xinv_t, X_test**2,Xlog_t**2,Xinv_t**2, X_test*Xsqrt_t, Xlog_t*Xinv_t, Xsqrt_t*Xinv_t ))
if degree == 3:
X = np.column_stack((X, Xlog, Xsqrt, Xinv, X**2,Xlog**2,Xinv**2, X*Xsqrt, Xlog*Xinv, Xsqrt*Xinv,
X**3, Xlog**3, Xinv**3, X**2*Xsqrt, Xlog**2*Xinv, Xlog*Xsqrt*Xinv,Xlog*Xinv**2, Xsqrt*Xinv**2))
if X_test is not None:
X_test = np.column_stack((X_test, Xlog_t, Xsqrt_t, Xinv_t, X_test**2,Xlog_t**2,Xinv_t**2, X_test*Xsqrt_t, Xlog_t*Xinv_t, Xsqrt_t*Xinv_t,
X_test**3, Xlog_t**3, Xinv_t**3, X_test**2*Xsqrt_t, Xlog_t**2*Xinv_t, Xlog_t*Xsqrt_t*Xinv_t,
Xlog_t*Xinv_t**2, Xsqrt_t*Xinv_t**2))
if interaction == 'later':
if degree == 1:
X = np.column_stack((X, Xlog, Xsqrt, Xinv))
if X_test is not None:
X_test = np.column_stack((X_test, Xlog_t, Xsqrt_t, Xinv_t))
if degree == 2:
poly = PolynomialFeatures(degree = 2,include_bias=False, interaction_only = True)
X_inter = poly.fit_transform(X)[:,X.shape[1]:]
X = np.column_stack((X, X_inter, Xlog, Xsqrt, Xinv, X**2,Xlog**2,Xinv**2, X*Xsqrt, Xlog*Xinv, Xsqrt*Xinv))
if X_test is not None:
X_test_inter = poly.fit_transform(X_test)[:,X_test.shape[1]:]
X_test = np.column_stack((X_test, X_test_inter, Xlog_t, Xsqrt_t, Xinv_t, X_test**2,Xlog_t**2,Xinv_t**2, X_test*Xsqrt_t, Xlog_t*Xinv_t, Xsqrt_t*Xinv_t ))
if degree == 3:
poly = PolynomialFeatures(degree = 3,include_bias=False, interaction_only = True)
X_inter = poly.fit_transform(X)[:,X.shape[1]:]
X = np.column_stack((X,X_inter, Xlog, Xsqrt, Xinv, X**2,Xlog**2,Xinv**2, X*Xsqrt, Xlog*Xinv, Xsqrt*Xinv,
X**3, Xlog**3, Xinv**3, X**2*Xsqrt, Xlog**2*Xinv, Xlog*Xsqrt*Xinv,Xlog*Xinv**2, Xsqrt*Xinv**2))
if X_test is not None:
X_test_inter = poly.fit_transform(X_test)[:,X_test.shape[1]:]
X_test = np.column_stack((X_test, X_test_inter, Xlog_t, Xsqrt_t, Xinv_t, X_test**2,Xlog_t**2,Xinv_t**2, X_test*Xsqrt_t, Xlog_t*Xinv_t, Xsqrt_t*Xinv_t,
X_test**3, Xlog_t**3, Xinv_t**3, X_test**2*Xsqrt_t, Xlog_t**2*Xinv_t, Xlog_t*Xsqrt_t*Xinv_t,
Xlog_t*Xinv_t**2, Xsqrt_t*Xinv_t**2))
return (X, X_test)
```
#### File: SmartProcessAnalytics/Code-SPA/timeseries_regression_RNN.py
```python
"""
Load packages and Set reproduceble results
"""
from sklearn.preprocessing import StandardScaler
import RNN_feedback as RNN_fd
import matplotlib.pyplot as plt
# Seed value
seed_value= 1
# 1. Set `PYTHONHASHSEED` environment variable at a fixed value
import os
os.environ['PYTHONHASHSEED']=str(seed_value)
seed_value += 1
# 2. Set `python` built-in pseudo-random generator at a fixed value
import random
random.seed(seed_value)
seed_value += 1
# 3. Set `numpy` pseudo-random generator at a fixed value
import numpy as np
np.random.seed(seed_value)
seed_value += 1
# 4. Set `tensorflow` pseudo-random generator at a fixed value
import tensorflow as tf
tf.set_random_seed(seed_value)
def timeseries_RNN_feedback_single_train(X, Y, X_val = None, Y_val = None, X_test=None, Y_test=None, train_ratio = 0.8,\
cell_type='e',activation = 'tanh', state_size = 2,\
batch_size = 1, epoch_overlap = None,num_steps = 10,\
num_layers = 1, learning_rate = 1e-2, lambda_l2_reg = 1e-3,\
num_epochs =200, input_prob = 0.95, output_prob = 0.95, state_prob = 0.95,\
input_prob_test = 1, output_prob_test = 1, state_prob_test = 1,\
max_checks_without_progress = 100,epoch_before_val=50, location='RNN_feedback_0', round_number = '', plot=False):
'''This function fits RNN_feedback model to training data, using validation data to determine when to stop,
when test data is given, it is used to choose the hyperparameter, otherwise AIC will be returned based on training data
to select the hyper parameter
Input:
X: training data predictors numpy array: Nxm
y: training data response numy array: Nx1
X_test: testing data predictors numpy arrray: N_testxm
y_test: testing data response numpy array: N_test x 1
train_ratio: float, portion of training data used to train the model, and the rest is used as validation data
if X_val is provided, this value is overrided
cell_type: str, type of RNN cell, can be either LSTM, GRU, others for BasicRNN, default = basicRNN
activation: str, type of activation function, can be relu, tanh, sigmoid, linear, default = tanh
state_size: int, number of states in the model
batch_size: int, number of batch used in training
epoch_overlap: None or int, None indicate no overlap between each training patch, int number represnets the space between each path, (e.g. 0 represtns adjacent patch)
num_steps: int, number of steps of memory used in dyanmic_RNN training
num_layer: int, number of RNN layer in the system, default = 1
learning_rate: float, learning rate for Adam, default= 1e-2
labda_l2_reg: float, regularization weight, <=0 indicate no regularization, default = 1e-3,
num_epochs: int, maximum number of epochs considered in the system
intput_prob, output_prob, state_prob: float, (0, 1], the keep probability for dropout during training, default = 0.95
intput_prob_test, output_prob_test, state_prob_test: float (0,1], the keep probability for dropout during testing, default = 1 (no dropout)
max_chekcs_without_progress: int, number of epochs in validation does not improve error for early stopping, default = 100
epoch_before_val: int, number of epochs in training before using validation set to early stop, default = 50
location: str, name for saving the trained RNN-feedback model
plot: Boolean, whether to plot the training results or not
Output:
(AIC or test results, prediction_train, prediction_test)
'''
print('========= Loading data =========')
"""
Load and arrange data for regression
"""
#parameter for the data sets
if X_val is None:
num_train = round(X.shape[0]*train_ratio)
else:
num_train = X.shape[0]
if X_test is not None:
test = True
num_test = X_test.shape[0]
else:
test = False
x_num_features = X.shape[1]
y_num_features = Y.shape[1]
print('======== Pre-process Data =========')
if X_val is None:
scaler = StandardScaler()
scaler.fit(X[:num_train])
X_train = scaler.transform(X[:num_train])
X_val = scaler.transform(X[num_train:])
scalery = StandardScaler()
scalery.fit(Y[:num_train])
Y_train=scalery.transform(Y[:num_train])
Y_val = scalery.transform(Y[num_train:])
else:
scaler = StandardScaler()
scaler.fit(X)
X_train = scaler.transform(X)
X_val = scaler.transform(X_val)
scalery = StandardScaler()
scalery.fit(Y)
Y_train=scalery.transform(Y)
Y_val = scalery.transform(Y_val)
if test:
X_test = scaler.transform(X_test)
Y_test = scalery.transform(Y_test)
input_size_x = x_num_features
input_size_y = y_num_features
print('======== Training =========')
g_train=RNN_fd.build_multilayer_rnn_graph_with_dynamic_rnn(cell_type=cell_type, activation=activation,state_size=state_size,
num_steps=num_steps, num_layers=num_layers, input_size_x=input_size_x,
input_size_y=input_size_y , learning_rate=learning_rate, lambda_l2_reg=lambda_l2_reg)
train_loss,val_loss,num_parameter = RNN_fd.train_rnn(X_train,Y_train,X_val,Y_val,
g_train ,num_epochs, num_steps, batch_size, input_prob, output_prob, state_prob,
verbose=True, save=location, epoch_overlap=epoch_overlap, max_checks_without_progress=max_checks_without_progress,
epoch_before_val = epoch_before_val)
if train_loss is None:
return (None, None, None, (100000,100000,100000), 100000,100000,100000)
val_loss = np.array(val_loss)
if plot:
'''Plot the result'''
plt.figure()
s = 12
plt.plot(train_loss, color='xkcd:sky blue', label = 'train loss')
plt.plot(np.linspace(epoch_before_val-1,epoch_before_val+val_loss.shape[0]-1, num = val_loss.shape[0]), val_loss, color= 'xkcd:coral', label = 'val loss')
plt.title('Traingin and validation loss')
plt.ylabel('Loss')
plt.xlabel('# of epoch')
plt.legend(fontsize=s)
plt.tight_layout()
plt.savefig('Training and validation error round ' + round_number +'.png', dpi = 600,bbox_inches='tight')
############################################################################
"""Training Final Results"""
g_train_final = RNN_fd.build_multilayer_rnn_graph_with_dynamic_rnn(cell_type=cell_type, activation=activation,state_size=state_size,
num_steps= num_train , num_layers=num_layers, input_size_x=input_size_x,
input_size_y = input_size_y , learning_rate = learning_rate, lambda_l2_reg=lambda_l2_reg)
prediction_train,train_loss_final,_ = RNN_fd.test_rnn(X_train,Y_train, g_train_final, location, input_prob_test, output_prob_test, state_prob_test, num_train)
AIC = num_train*np.log(np.sum(train_loss_final)/y_num_features) + 2*num_parameter
AICc = num_train*np.log(np.sum(train_loss_final)/y_num_features) + (num_parameter+num_train)/(1-(num_parameter+2)/num_train)
BIC = num_train*np.log(np.sum(train_loss_final)/y_num_features) + + num_parameter*np.log(num_train)
############################################################################
"""Validation Final Results"""
g_val_final = RNN_fd.build_multilayer_rnn_graph_with_dynamic_rnn(cell_type=cell_type, activation=activation,state_size=state_size,
num_steps= X_val.shape[0] , num_layers=num_layers, input_size_x=input_size_x,
input_size_y = input_size_y , learning_rate = learning_rate, lambda_l2_reg=lambda_l2_reg)
prediction_val,val_loss_final,_ = RNN_fd.test_rnn(X_val,Y_val, g_val_final, location, input_prob_test, output_prob_test, state_prob_test, X_val.shape[0])
###############################################for other test sets 0 step
"""Testing Results"""
if test:
g_test = RNN_fd.build_multilayer_rnn_graph_with_dynamic_rnn(cell_type=cell_type, activation=activation,state_size=state_size,
num_steps= num_test , num_layers=num_layers, input_size_x=input_size_x,
input_size_y = input_size_y , learning_rate = learning_rate, lambda_l2_reg=lambda_l2_reg)
prediction_test, test_loss_final,_ = RNN_fd.test_rnn(X_test,Y_test, g_test, location, input_prob_test, output_prob_test, state_prob_test, num_test)
else:
prediction_test = None
test_loss_final = None
#############################################plot training results
if plot:
import matplotlib
cmap = matplotlib.cm.get_cmap('Paired')
#plot the prediction vs real
for j in range(y_num_features):
plt.figure(figsize=(5,3))
plt.plot(Y_train[1:, j], color= cmap(j*2+1), label= 'real')
plt.plot(prediction_train[1:, j], '--', color= 'xkcd:coral', label = 'prediction')
plt.title('RNN Training data prediction for y' + str(j+1),fontsize=s)
plt.xlabel('Time index',fontsize=s)
plt.ylabel('y',fontsize=s)
plt.legend(fontsize=s)
plt.tight_layout()
plt.savefig('RNN_train_var_' + str(j+1)+'.png', dpi = 600,bbox_inches='tight')
plt.figure(figsize=(5,3))
plt.plot(Y_val[1:, j], color= cmap(j*2+1), label= 'real')
plt.plot(prediction_val[1:, j], '--', color= 'xkcd:coral',label = 'prediction')
plt.title('RNN Validation data prediction for y' + str(j+1),fontsize=s)
plt.xlabel('Time index',fontsize=s)
plt.ylabel('y',fontsize=s)
plt.legend(fontsize=s)
plt.tight_layout()
plt.savefig('RNN_val_var_' + str(j+1)+' round ' + round_number +'.png', dpi = 600,bbox_inches='tight')
if test:
plt.figure(figsize=(5,3))
plt.plot(Y_test[1:, j], color= cmap(j*2+1), label= 'real')
plt.plot(prediction_test[1:, j], '--',color= 'xkcd:coral', label = 'prediction')
plt.title('RNN Test data prediction for y' + str(j+1),fontsize=s)
plt.xlabel('Time index',fontsize=s)
plt.ylabel('y',fontsize=s)
plt.legend(fontsize=s)
plt.tight_layout()
plt.savefig('RNN_test_var_' + str(j+1) + ' round ' + round_number + '.png', dpi = 600,bbox_inches='tight')
#plot fitting errors
for j in range(y_num_features):
plt.figure(figsize=(5,3))
plt.plot(prediction_train[1:,j]-Y_train[1:,j], color= cmap(j*2+1))
plt.title('RNN Training error for y' + str(j+1),fontsize=s)
plt.xlabel('Time index',fontsize=s)
plt.ylabel('y_pre - y',fontsize=s)
plt.tight_layout()
plt.savefig('RNN_train_var_'+ str(j+1)+' error.png', dpi = 600,bbox_inches='tight')
plt.figure(figsize=(5,3))
plt.plot(prediction_val[1:,j]-Y_val[1:,j], color= cmap(j*2+1))
plt.title('RNN Validation error for y' + str(j+1),fontsize=s)
plt.xlabel('Time index',fontsize=s)
plt.ylabel('y_pre - y',fontsize=s)
plt.tight_layout()
plt.savefig('RNN_val_var_' + str(j+1)+' round ' + round_number +' error.png', dpi = 600,bbox_inches='tight')
if test:
plt.figure(figsize=(5,3))
plt.plot(prediction_test[1:,j]-Y_test[1:,j], color= cmap(j*2+1))
plt.title('RNN Test error for y' + str(j+1),fontsize=s)
plt.xlabel('Time index',fontsize=s)
plt.ylabel('y_pre - y',fontsize=s)
plt.tight_layout()
plt.savefig('RNN_test_var_' + str(j+1) +' round ' + round_number +' error.png', dpi = 600,bbox_inches='tight')
return (prediction_train,prediction_val, prediction_test, (AIC,AICc,BIC), train_loss_final, val_loss_final, test_loss_final)
def timeseries_RNN_feedback_multi_train(X, Y, X_val, Y_val, timeindex_train, timeindex_val, X_test=None, Y_test=None,\
cell_type='e',activation = 'tanh', state_size = 2,\
batch_size = 1, epoch_overlap = None,num_steps = 10,\
num_layers = 1, learning_rate = 1e-2, lambda_l2_reg = 1e-3,\
num_epochs =200, input_prob = 0.95, output_prob = 0.95, state_prob = 0.95,\
input_prob_test = 1, output_prob_test = 1, state_prob_test = 1,\
max_checks_without_progress = 100,epoch_before_val=50, location='RNN_feedback_0', plot= False):
'''This function fits RNN_feedback model to training data, using validation data to determine when to stop,
when test data is given, it is used to choose the hyperparameter, otherwise AIC will be returned based on training data
to select the hyper parameter
Input:
X: training data predictors numpy array: Nxm
y: training data response numy array: Nx1
timeindex: dictionary, starting from 1, each contanis the time index for that seires
X_test: testing data predictors numpy arrray: N_testxm
y_test: testing data response numpy array: N_test x 1
train_ratio: float, portion of training data used to train the model, and the rest is used as validation data
cell_type: str, type of RNN cell, can be either LSTM, GRU, others for BasicRNN, default = basicRNN
activation: str, type of activation function, can be relu, tanh, sigmoid, linear, default = tanh
state_size: int, number of states in the model
batch_size: int, number of batch used in training
epoch_overlap: None or int, None indicate no overlap between each training patch, int number represnets the space between each path, (e.g. 0 represtns adjacent patch)
num_steps: int, number of steps of memory used in dyanmic_RNN training
num_layer: int, number of RNN layer in the system, default = 1
learning_rate: float, learning rate for Adam, default= 1e-2
labda_l2_reg: float, regularization weight, <=0 indicate no regularization, default = 1e-3,
num_epochs: int, maximum number of epochs considered in the system
intput_prob, output_prob, state_prob: float, (0, 1], the keep probability for dropout during training, default = 0.95
intput_prob_test, output_prob_test, state_prob_test: float (0,1], the keep probability for dropout during testing, default = 1 (no dropout)
max_chekcs_without_progress: int, number of epochs in validation does not improve error for early stopping, default = 100
epoch_before_val: int, number of epochs in training before using validation set to early stop, default = 50
location: str, name for saving the trained RNN-feedback model
Output:
(AIC or test results, prediction_train, prediction_test)
'''
print('========= Loading data =========')
"""
Load and arrange data for regression
"""
#parameter for the data sets
if X_test is not None:
test = True
num_test = X_test.shape[0]
else:
test = False
x_num_features = X.shape[1]
y_num_features = Y.shape[1]
print('======== Pre-process Data =========')
scaler = StandardScaler()
scaler.fit(X)
X_train = scaler.transform(X)
X_val = scaler.transform(X_val)
if test:
X_test = scaler.transform(X_test)
scalery = StandardScaler()
scalery.fit(Y)
Y_train=scalery.transform(Y)
Y_val = scalery.transform(Y_val)
if test:
Y_test=scalery.transform(Y_test)
input_size_x = x_num_features
input_size_y = y_num_features
print('======== Training =========')
g_train=RNN_fd.build_multilayer_rnn_graph_with_dynamic_rnn(cell_type=cell_type, activation=activation,state_size=state_size,
num_steps=num_steps, num_layers=num_layers, input_size_x=input_size_x,
input_size_y=input_size_y , learning_rate=learning_rate, lambda_l2_reg=lambda_l2_reg)
train_loss,val_loss,num_parameter = RNN_fd.train_rnn_multi(X,Y,X_val,Y_val, timeindex_train, timeindex_val,
g_train ,num_epochs, num_steps, batch_size, input_prob, output_prob, state_prob,
verbose=True, save=location, epoch_overlap=epoch_overlap, max_checks_without_progress=max_checks_without_progress,
epoch_before_val = epoch_before_val)
'''Plot the result'''
s = 12
val_loss= np.array(val_loss)
plt.plot(train_loss, color='xkcd:sky blue', label = 'train loss')
plt.plot(np.linspace(epoch_before_val-1,epoch_before_val+val_loss.shape[0]-1, num = val_loss.shape[0]), val_loss, color= 'xkcd:coral', label = 'val loss')
plt.title('Traingin and validation loss')
plt.ylabel('Loss')
plt.xlabel('# of epoch')
plt.legend(fontsize=s)
plt.tight_layout()
plt.savefig('Training and validation error.png', dpi = 600,bbox_inches='tight')
############################################################################
cum = 0
train_loss_final = []
prediction_train = []
train_loss = []
"""Training Final Results"""
for index in range(len(timeindex_train)):
num = np.shape(timeindex_train[index+1])[0]
g_train_final = RNN_fd.build_multilayer_rnn_graph_with_dynamic_rnn(cell_type=cell_type, activation=activation,state_size=state_size,
num_steps= num , num_layers=num_layers, input_size_x=input_size_x,
input_size_y = input_size_y , learning_rate = learning_rate, lambda_l2_reg=lambda_l2_reg)
train_pred,loss,_ = RNN_fd.test_rnn(X_train[cum:cum+num],Y_train[cum:cum+num], g_train_final, location, input_prob_test, output_prob_test, state_prob_test, num)
prediction_train.append(train_pred)
train_loss.append(loss*num)
train_loss_final.append(loss)
if plot:
import matplotlib
cmap = matplotlib.cm.get_cmap('Paired')
#plot the prediction vs real
for j in range(y_num_features):
plt.figure(figsize=(5,3))
plt.plot(Y_train[cum+1:cum+num,j], color= cmap(j*2+1), label= 'real')
plt.plot(train_pred[1:,j], '--', color= 'xkcd:coral', label = 'prediction')
plt.title('RNN Training data prediction for y' + str(j+1),fontsize=s)
plt.xlabel('Time index',fontsize=s)
plt.ylabel('y',fontsize=s)
plt.legend(fontsize=s)
plt.tight_layout()
plt.savefig('RNN_train_var_' + str(j+1)+'.png', dpi = 600,bbox_inches='tight')
#plot fitting errors
for j in range(y_num_features):
plt.figure(figsize=(5,3))
plt.plot(train_pred[1:,j]-Y_train[cum+1:cum+num,j], color= cmap(j*2+1))
plt.title('Training error for y' + str(j+1),fontsize=s)
plt.xlabel('Time index',fontsize=s)
plt.ylabel('y_pre - y',fontsize=s)
plt.tight_layout()
plt.savefig('RNN_train_var_' + str(j+1)+' error.png', dpi = 600,bbox_inches='tight')
cum += num
AIC = cum*np.log(np.sum(train_loss)/cum/y_num_features) + 2*num_parameter
AICc = cum*np.log(np.sum(train_loss)/cum/y_num_features) + (num_parameter+cum)/(1-(num_parameter+2)/cum)
BIC = cum*np.log(np.sum(train_loss)/cum/y_num_features) + np.log(cum)*num_parameter
############################################################################
cum = 0
prediction_val = []
val_loss_final = []
"""Validation Final Results"""
for index in range(len(timeindex_val)):
num = np.shape(timeindex_val[index+1])[0]
g_val_final = RNN_fd.build_multilayer_rnn_graph_with_dynamic_rnn(cell_type=cell_type, activation=activation,state_size=state_size,
num_steps= num , num_layers=num_layers, input_size_x=input_size_x,
input_size_y = input_size_y , learning_rate = learning_rate, lambda_l2_reg=lambda_l2_reg)
val_pred,loss,_ = RNN_fd.test_rnn(X_val[cum:cum+num],Y_val[cum:cum+num], g_val_final, location, input_prob_test, output_prob_test, state_prob_test, num)
prediction_val.append(val_pred)
val_loss_final.append(loss)
if plot:
import matplotlib
cmap = matplotlib.cm.get_cmap('Paired')
#plot the prediction vs real
for j in range(y_num_features):
plt.figure(figsize=(5,3))
plt.plot(Y_val[cum+1:cum+num, j], color= cmap(j*2+1), label= 'real')
plt.plot(val_pred[1:,j], '--', color= 'xkcd:coral', label = 'prediction')
plt.title('RNN Validation data prediction for y' + str(j+1),fontsize=s)
plt.xlabel('Time index',fontsize=s)
plt.ylabel('y',fontsize=s)
plt.legend(fontsize=s)
plt.tight_layout()
plt.savefig('RNN_val_var_' + str(j+1)+ 'index' + str(index+1)+'.png', dpi = 600,bbox_inches='tight')
#plot fitting errors
for j in range(y_num_features):
plt.figure(figsize=(5,3))
plt.plot(val_pred[1:,j]-Y_val[cum+1:cum+num ,j], color= cmap(j*2+1))
plt.title('RNN Validation error for y' + str(j+1),fontsize=s)
plt.xlabel('Time index',fontsize=s)
plt.ylabel('y_pre - y',fontsize=s)
plt.tight_layout()
plt.savefig('RNN_val_var_' + str(j+1)+ 'index' + str(index+1)+' error.png', dpi = 600,bbox_inches='tight')
cum += num
###############################################for other test sets 0 step
"""Testing Results"""
if test:
g_test = RNN_fd.build_multilayer_rnn_graph_with_dynamic_rnn(cell_type=cell_type, activation=activation,state_size=state_size,
num_steps= num_test , num_layers=num_layers, input_size_x=input_size_x,
input_size_y = input_size_y , learning_rate = learning_rate, lambda_l2_reg=lambda_l2_reg)
prediction_test, test_loss_final,_ = RNN_fd.test_rnn(X_test,Y_test, g_test, location, input_prob_test, output_prob_test, state_prob_test, num_test)
if plot:
#plot the prediction vs real
for j in range(y_num_features):
plt.figure(figsize=(5,3))
plt.plot(Y_test[1:,j], color= cmap(j*2+1), label= 'real')
plt.plot(prediction_test[1:,j], '--', color= 'xkcd:coral', label = 'prediction')
plt.title('RNN Testing data prediction for y' + str(j+1),fontsize=s)
plt.xlabel('Time index',fontsize=s)
plt.ylabel('y',fontsize=s)
plt.legend(fontsize=s)
plt.tight_layout()
plt.savefig('RNN Test_var_' + str(j+1)+ 'index' + str(index+1)+'.png', dpi = 600,bbox_inches='tight')
#plot fitting errors
for j in range(y_num_features):
plt.figure(figsize=(5,3))
plt.plot(prediction_test[1:,j]-Y_test[1:,j], color= cmap(j*2+1))
plt.title('RNN Testing error for y' + str(j+1),fontsize=s)
plt.xlabel('Time index',fontsize=s)
plt.ylabel('y_pre - y',fontsize=s)
plt.tight_layout()
plt.savefig('RNN Test_var_' + str(j+1)+ 'index' + str(index+1) +' error.png', dpi = 600,bbox_inches='tight')
else:
prediction_test = None
test_loss_final = None
return (prediction_train, prediction_val, prediction_test, (AIC,AICc,BIC), train_loss_final, val_loss_final, test_loss_final)
def timeseries_RNN_feedback_test(X, Y, X_test,Y_test, kstep = 1, cell_type='e',activation = 'tanh', state_size = 2,\
num_layers = 1, input_prob_test = 1, output_prob_test = 1, state_prob_test = 1,\
location='RNN_feedback_0', plot=True,round_number = ''):
'''This function fits RNN_feedback model to training data, using validation data to determine when to stop,
when test data is given, it is used to choose the hyperparameter, otherwise AIC will be returned based on training data
to select the hyper parameter
Input:
X: training data predictors numpy array: Nxm, used to scale data
y: training data response numy array: Nx1, used to scale data
X_test: testing data predictors numpy arrray: N_testxm
y_test: testing data response numpy array: N_test x 1
kstep: positive integer for number of steps prediction ahead.The output at time instant t is calculated using previously measured outputs up to time t-K and inputs up to the time instant t.
cell_type: str, type of RNN cell, can be either LSTM, GRU, others for BasicRNN, default = basicRNN
activation: str, type of activation function, can be relu, tanh, sigmoid, linear, default = tanh
state_size: int, number of states in the model
num_layer: int, number of RNN layer in the system, default = 1
num_epochs: int, maximum number of epochs considered in the system
intput_prob, output_prob, state_prob: float, (0, 1], the keep probability for dropout during training, default = 0.95
intput_prob_test, output_prob_test, state_prob_test: float (0,1], the keep probability for dropout during testing, default = 1 (no dropout)
location: str, name for saving the trained RNN-feedback model
plot: boolean, plot the figure or not
Output:
(test results, prediction_train, prediction_test)
'''
print('========= Loading data =========')
"""
Load and arrange data for regression
"""
#parameter for the data sets
num_test = X_test.shape[0]
x_num_features = X.shape[1]
y_num_features = Y.shape[1]
print('======== Pre-process Data =========')
scaler = StandardScaler()
scaler.fit(X)
X =scaler.transform(X)
X_test = scaler.transform(X_test)
scalery = StandardScaler()
scalery.fit(Y)
Y = scalery.transform(Y)
Y_test=scalery.transform(Y_test)
input_size_x = x_num_features
input_size_y = y_num_features
#################################################
kstep = kstep-1 #adjustment for the test_rnn code to be comparable with matlab
###############################################k_STEP single layer
if num_layers == 1:
"""Testing 0 step"""
g_test = RNN_fd.build_multilayer_rnn_graph_with_dynamic_rnn(cell_type=cell_type, activation=activation,state_size=state_size,
num_steps= num_test , num_layers=num_layers, input_size_x=input_size_x,
input_size_y = input_size_y , learning_rate = 0, lambda_l2_reg=0)
test_y_prediction, test_loss_final ,test_rnn_outputs = RNN_fd.test_rnn(X_test,Y_test, g_test, location, input_prob_test, output_prob_test, state_prob_test, num_test)
if kstep > 0:
"""Testing k step"""
g_test = RNN_fd.build_multilayer_rnn_graph_with_dynamic_rnn(cell_type=cell_type, activation=activation,state_size=state_size,
num_steps= 1 , num_layers=num_layers, input_size_x=input_size_x,
input_size_y = input_size_y , learning_rate = 0, lambda_l2_reg=0)
test_y_prediction_kstep, test_loss_kstep = RNN_fd.test_rnn_kstep(X_test,Y_test, test_y_prediction,test_rnn_outputs, g_test, location, input_prob_test, output_prob_test, state_prob_test, num_test, kstep=kstep)
else:
test_y_prediction_kstep = None
test_loss_kstep = None
###############################################k_STEP multi layer
else:
"""Testing 0 step"""
g_test = RNN_fd.build_multilayer_rnn_graph_with_dynamic_rnn(cell_type=cell_type, activation=activation,state_size=state_size,
num_steps= 1 , num_layers=num_layers, input_size_x=input_size_x,
input_size_y = input_size_y , learning_rate = 0, lambda_l2_reg=0)
test_y_prediction, test_loss_final, test_inter_state = RNN_fd.test_rnn_layer(X_test,Y_test, g_test, location, input_prob_test, output_prob_test, state_prob_test, num_test, num_layers)
if kstep > 0:
"""Testing k step"""
g_test = RNN_fd.build_multilayer_rnn_graph_with_dynamic_rnn(cell_type=cell_type, activation=activation,state_size=state_size,
num_steps= 1 , num_layers=num_layers, input_size_x=input_size_x,
input_size_y = input_size_y , learning_rate = 0, lambda_l2_reg=0)
test_y_prediction_kstep, test_loss_kstep = RNN_fd.test_rnn_kstep_layer(X_test,Y_test, test_y_prediction,test_inter_state, g_test, location, input_prob_test, output_prob_test, state_prob_test, num_test, kstep=kstep)
else:
test_y_prediction_kstep = None
test_loss_kstep = None
loss_final = np.vstack((test_loss_final,test_loss_kstep))
prediction_final = {}
for i in range(kstep+1):
if i == 0:
prediction_final[i+1] = test_y_prediction
else:
prediction_final[i+1] = test_y_prediction_kstep[i]
###############################################plot final
if plot:
import matplotlib
cmap = matplotlib.cm.get_cmap('Paired')
s=12
test_prediction_plot = {}
for i in range(kstep+1):
if i == 0:
test_prediction_plot[i] = test_y_prediction
else:
test_prediction_plot[i] = test_y_prediction_kstep[i]
if X.shape[0] == X_test.shape[0]:
if np.sum(X-X_test) < 1e-4:
name = 'Train' +round_number
else:
name = 'Test' +round_number
else:
name = 'Test' + round_number
#plot the prediction vs real
for i in range(kstep+1):
for j in range(y_num_features):
plt.figure(figsize=(5,3))
plt.plot(Y_test[i+1:,j], color= cmap(j*2+1), label= 'real')
plt.plot(test_prediction_plot[i][1:,j], '--',color= 'xkcd:coral', label = 'prediction')
plt.title(name+' data ' + str(i+1) +'-step prediction for y' + str(j+1),fontsize=s)
plt.xlabel('Time index',fontsize=s)
plt.ylabel('y',fontsize=s)
plt.legend(fontsize=s)
plt.tight_layout()
plt.savefig(name+'_var_' + str(j+1)+'_step_'+str(i+1)+ '.png', dpi = 600,bbox_inches='tight')
#plot fitting errors
max_limit=np.max(test_prediction_plot[kstep][kstep+1:],axis=0)
min_limit=np.min(test_prediction_plot[kstep][kstep+1:],axis=0)
fig2, axs2 = plt.subplots(kstep+1,y_num_features,figsize=(3*y_num_features,2*(kstep+1)))
if y_num_features >1:
for i in range(kstep+1):
for j in range(y_num_features):
axs2[i,j].plot(test_prediction_plot[i][1:,j]-Y_test[i+1:,j], color= cmap(j*2+1))
axs2[i,j].set_title(name + ' data ' + str(i+1) +'-step error for y' + str(j+1), fontsize=s)
axs2[i,j].set_ylim(min_limit[j]-abs(min_limit[j])*0.5,max_limit[j]*1.5)
if i is kstep-1:
axs2[i,j].set_xlabel('Time index', fontsize=s)
else:
for i in range(kstep+1):
axs2[i].plot(test_prediction_plot[i][1:]-Y_test[i+1:], color= cmap(2+1))
axs2[i].set_title(name + ' data ' + str(i+1) +'-step error for y' + str(1), fontsize=s)
axs2[i].set_ylim(min_limit-abs(min_limit)*0.5,max_limit*1.5)
if i is kstep-1:
axs2[i].set_xlabel('Time index', fontsize=s)
fig2.tight_layout()
plt.savefig(name + ' error kstep.png', dpi=600,bbox_inches='tight')
#MSE for prediction results over different steps
MSE_test= np.vstack((test_loss_final,test_loss_kstep))
for i in range(y_num_features):
plt.figure(figsize=(3,2))
plt.plot(np.linspace(1,MSE_test.shape[0],num=MSE_test.shape[0]),MSE_test[:,i], 'd-', color = cmap(i*2+1))
plt.title(name+' MSE for y' + str(i) +' prediction', fontsize = s)
plt.xlabel('k-step ahead', fontsize = s)
plt.ylabel('MSE', fontsize = s)
plt.tight_layout()
plt.savefig('MSE_'+name+'_var_'+str(i)+'.png', dpi=600,bbox_inches='tight')
return (prediction_final, loss_final)
``` |
{
"source": "1214367903/CubeOnline",
"score": 2
} |
#### File: CubeOnline/server/app.py
```python
import os
import socket
from abc import ABC
from typing import Any
import tornado.ioloop
import tornado.web
import uvloop
from tornado.options import define, options, parse_command_line
from tornado.platform.asyncio import BaseAsyncIOLoop
import config
from util import init_utils, log
logger = log.get_logger()
def get_address() -> str:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
s.close()
return ip
class TornadoUvloop(BaseAsyncIOLoop, ABC):
def initialize(self, **kwargs: Any) -> None:
loop = uvloop.new_event_loop()
super().initialize(loop, close_loop=True, **kwargs)
def run_server() -> None:
# 首先,使用uvloop代替tornado的事件循环
tornado.ioloop.IOLoop.configure(TornadoUvloop)
# 然后,将需要异步初始化的组件初始化
tornado.ioloop.IOLoop.current().run_sync(init_utils)
# 然后,整一些服务器的设置
define('address', default=get_address(), help='Run server on a specific address', type=str)
define('port', default=8888, help='Run server on a specific port', type=int)
options.log_file_prefix = os.path.join(config.log_path, 'tornado.log')
parse_command_line()
# 然后,配置app
application = tornado.web.Application(handlers=config.handlers, **config.app_settings)
application.listen(options.port, address=options.address)
logger.info(f'run server in {options.address}:{options.port}')
# 最后,启动事件循环
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
run_server()
```
#### File: server/controller/user.py
```python
import asyncio
import functools
import json
from collections.abc import Coroutine
from typing import Any, Callable, Optional, Union
from tornado.escape import json_decode
from tornado.ioloop import IOLoop
from form.forms import BaseForm
from util import cache
redis = cache.redis()
user_items = (
'username',
'usericon'
)
"""
user这个模块就是围绕着redis来的
简单点说,就是把每个用户的数据作为value,某个不重复id作为key存到redis里面
然后就可以实现session和用户认证等等的功能
"""
async def create_cache(session_id: str, form_obj: BaseForm) -> None:
# 调用这个方法将需要的用户的数据存入缓存,然后就可以调用下面的函数获取session对象了
user_data = {key: form_obj.get(key) for key in user_items}
await redis.hmset_dict(session_id, user_data)
async def create_session_obj(session_id: str) -> Optional['Session']:
# 创建session对象的必要条件是,这个session_id和相应已经在缓存里了,如果不存在,是没法创建的
user_cache = await redis.hgetall(session_id)
for item in user_items:
if item not in user_cache:
return None
return Session(session_id)
# redis的哈希类型数据,其key和value支持以下几种数据类型
hash_type = Union[bytearray, bytes, float, int, str]
class Session:
def __init__(self, session_id: str) -> None:
self.session_id = session_id
async def get(self, key: hash_type, default: Any = None) -> Any:
result = await redis.hget(self.session_id, key)
if result is not None:
try:
result = json.loads(result)
except json.decoder.JSONDecodeError:
pass
return result
else:
return default
async def set(self, key: hash_type, value: hash_type, timeout: int = None) -> None:
value = json.dumps(value)
await redis.hset(self.session_id, key, value)
if timeout is not None:
IOLoop.current().add_callback(self.expire, key, timeout)
async def expire(self, key: hash_type, seconds: int) -> None:
await asyncio.sleep(seconds)
await self.delete(key)
async def delete(self, key: hash_type) -> None:
await redis.hdel(self.session_id, key)
async def get_all(self) -> dict:
dic = await redis.hgetall(self.session_id)
return dic
async def delete_all(self) -> None:
await redis.delete(self.session_id)
def authenticated(method: Callable) -> Callable:
"""
这个装饰器专门给RequestHandler的网络请求方法用的,验证用户是否已经在后台登录
如果用户传的数据中没有session_id或者session_id不对,就直接return不管这个请求
否则,为这个handler对象创建一个session实例,以后需要什么用户信息,直接去session中取就行了
"""
@functools.wraps(method)
async def wrapper(self, *args: Any, **kwargs: Any) -> None:
try:
session_id = json_decode(self.request.body)['session_id']
except (json.decoder.JSONDecodeError, KeyError):
return
session = await create_session_obj(session_id)
if session is None:
self.resp.set_error_message('用户未登录!')
self.write(self.resp)
return
self.session = session
result = method(self, *args, **kwargs)
if isinstance(result, Coroutine):
await result
return wrapper
```
#### File: server/util/scrambler.py
```python
from typing import Dict, List, Optional
import requests
import tornado.ioloop
from aiohttp.client_exceptions import ClientConnectorError
import util.connector
from config import contest_to_cube, scrambler_config
from util import UtilError, log
client = util.connector.get_client()
logger = log.get_logger()
_scramble_pool = None
def get_scramble_pool() -> Optional['ScramblePool']:
if _scramble_pool is None:
raise UtilError('The scramble pool has not been initialized')
return _scramble_pool
async def init() -> None:
global _scramble_pool
if _scramble_pool is not None:
return
_scramble_pool = await ScramblePool.instance()
class ScramblePool:
"""
这个池子负责储存各种魔方的打乱,它是单例的
必须调用initialize进行初始化,这个过程会把打乱池填满,不然得到的是一个空池子
"""
_pool = None
@classmethod
async def instance(cls) -> 'ScramblePool':
if cls._pool is None:
cls._pool = cls(**scrambler_config)
try:
await cls._pool.initialize()
except ClientConnectorError:
raise UtilError('the scrambler server has not started')
return cls._pool
def __init__(self, capacity: Dict[str, int], url: str) -> None:
self.capacity = capacity
self.url = url
self.scrambles = {}
self.flags = {}
async def initialize(self) -> None:
for cube in self.capacity.keys():
self.scrambles[cube] = await self.request_scrambles(cube)
self.flags[cube] = False
def get_scramble(self, contest: str = '333') -> str:
"""
这个方法由外界调用,从打乱池中取出一个指定项目的打乱
首先,打乱池会在自己的缓存中找打乱然后返回
如果缓存中的打乱不多了,就添加个回调补充一波打乱
最坏的情况就是缓存空了,此时只好先用同步的方式去补充一波
这种情况如果常常出现,就要修改config中的capacity了
"""
cube = contest_to_cube[contest]
if not self.scrambles[cube]:
self.scrambles[cube] += self.request_scrambles_sync(cube)
logger.warning(f'{cube}的缓存打乱已经被击穿')
scramble = self.scrambles[cube].pop()
if len(self.scrambles[cube]) <= self.capacity[cube] // 2:
tornado.ioloop.IOLoop.current().add_callback(self.supplement_scramble, cube)
return scramble
async def supplement_scramble(self, cube: str) -> None:
# 缓存里面某种打乱不足的时候,可能会多次触发这个函数
# 因此使用一个flag来拦截多余的补充
if self.flags[cube]:
return
self.flags[cube] = True
scrambles = await self.request_scrambles(cube)
self.scrambles[cube] += scrambles
self.flags[cube] = False
async def request_scrambles(self, cube: str) -> List[str]:
response = await client.get(self.url.format(cube=cube, n=self.capacity[cube]))
content = await response.text()
return content.split('\r\n')[:-1]
def request_scrambles_sync(self, cube: str) -> List[str]:
response = requests.get(self.url.format(cube=cube, n=self.capacity[cube]))
return response.text.split('\r\n')[:-1]
``` |
{
"source": "1214367903/myblog",
"score": 2
} |
#### File: myblog/blog/tests.py
```python
from django.apps import apps
from django.test import TestCase
from django.urls import reverse
from .feeds import AllPostsRssFeed
from .models import Article, Category
class BasicTestCase(TestCase):
# 这个类会自动创建数据,以及屏蔽全文索引的生成,做测试继承它就行了
def setUp(self):
apps.get_app_config('haystack').signal_processor.teardown()
self.category1 = Category.objects.create(name='测试分类一')
self.category2 = Category.objects.create(name='测试分类二')
self.article1 = Article.objects.create(
title='测试标题一',
content='测试内容一',
category=self.category1,
)
self.article2 = Article.objects.create(
title='测试标题二',
content='测试内容二',
category=self.category2,
)
class ArticleModelTestCase(BasicTestCase):
def test_str_representation(self):
self.assertEqual(self.article1.__str__(), self.article1.title)
def test_get_absolute_url(self):
expected_url = reverse('blog:detail', kwargs={'pk': self.article1.pk})
self.assertEqual(self.article1.get_absolute_url(), expected_url)
def test_increase_views(self):
for _ in range(10):
views = self.article1.views
self.article1.viewed()
self.article1.refresh_from_db()
self.assertEqual(self.article1.views, views + 1)
class CategoryModelTestCase(BasicTestCase):
def setUp(self):
self.category1 = Category.objects.create(name='测试分类')
def test_str_representation(self):
self.assertEqual(self.category1.__str__(), self.category1.name)
def test_get_absolute_url(self):
expected_url = reverse('blog:category', kwargs={'name': self.category1.name})
self.assertEqual(self.category1.get_absolute_url(), expected_url)
class IndexViewTestCase(BasicTestCase):
def setUp(self):
super().setUp()
self.url = reverse('blog:index')
def test_view(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed('blog/index.html')
self.assertContains(response, self.article1.title)
self.assertContains(response, self.article2.title)
class CategoryViewTestCase(BasicTestCase):
def setUp(self):
super().setUp()
self.url = self.category1.get_absolute_url()
def test_visit_a_nonexistent_category(self):
url = reverse('blog:category', kwargs={'name': 'nonexistent'})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_view(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed('blog/detail.html')
self.assertContains(response, self.article1.title)
expected_qs = self.category1.article_set.all().order_by('-created_date')
self.assertQuerysetEqual(response.context['articles'], [repr(p) for p in expected_qs])
class ArticleViewTestCase(BasicTestCase):
def setUp(self):
super().setUp()
self.url = reverse('blog:detail', kwargs={'pk': self.article1.pk})
def test_visit_a_nonexistent_article(self):
url = reverse('blog:detail', kwargs={'pk': 0})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_increase_views(self):
for _ in range(10):
views = self.article1.views
self.client.get(self.url)
self.article1.refresh_from_db()
self.assertEqual(self.article1.views, views + 1)
def test_view(self):
response = self.client.get(self.url)
self.assertEqual(response.context['article'], self.article1)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed('blog/detail.html')
self.assertContains(response, self.article1.title)
class RSSTestCase(BasicTestCase):
def setUp(self):
super().setUp()
self.url = reverse('rss')
def test_rss_subscription_content(self):
response = self.client.get(self.url)
self.assertContains(response, AllPostsRssFeed.title)
self.assertContains(response, AllPostsRssFeed.description)
self.assertContains(response, self.article1.title)
self.assertContains(response, self.article2.title)
self.assertContains(response, f'[{self.article1.category}] {self.article1.title}')
self.assertContains(response, f'[{self.article2.category}] {self.article2.title}')
``` |
{
"source": "121me/ashna-bot",
"score": 3
} |
#### File: 121me/ashna-bot/email_addresses.py
```python
with open(file='domains.txt', mode='r', encoding='utf-8') as domains_txt:
domains = dict(line.strip().split(':') for line in domains_txt.readlines())
def main():
print(domains)
if __name__ == '__main__':
main()
```
#### File: 121me/ashna-bot/users.py
```python
from datetime import datetime
import sqlite3
from typing import Any
date_time_format = "%Y.%m.%d.%H.%M.%S"
user_attr = [
"id",
"name",
"age",
"university",
"gender",
"bio",
"email_address",
"so",
"lang",
"profile_step",
"last_saved_media_name",
"media_type",
"is_profile_complete",
"verf_code",
"co",
"last_online"]
def datetime_now() -> str:
return datetime.now().strftime(date_time_format)
class UsersDB:
def __init__(self, dbname="users.db") -> None:
self.dbname = dbname
self.con = sqlite3.connect(dbname, check_same_thread=False)
self.cur = self.con.cursor()
self.setup()
def setup(self) -> None:
stmt = """
CREATE TABLE IF NOT EXISTS users (
id INTEGER PRIMARY KEY,
name TEXT,
age INTEGER,
university TEXT,
gender VARCHAR(1),
bio TEXT,
email_address TEXT,
so VARCHAR(1),
lang VARCHAR(2),
profile_step INTEGER,
last_saved_media_name INTEGER,
media_type VARCHAR(3),
is_profile_complete INTEGER,
verf_code VARCHAR(16),
co INTEGER,
last_online TEXT
);
CREATE TABLE IF NOT EXISTS pending_matches (
user1 INTEGER PRIMARY KEY,
user2 INTEGER,
date TEXT
);
CREATE TABLE IF NOT EXISTS matches (
user1 INTEGER,
user2 INTEGER,
liked INTEGER,
date TEXT
);
CREATE TABLE IF NOT EXISTS complaints (
user1 INTEGER,
user2 INTEGER,
complaint INTEGER,
date TEXT
);
CREATE TABLE IF NOT EXISTS errors (
user_id INTEGER,
error INTEGER,
desc TEXT,
date TEXT
);"""
self.cur.executescript(stmt)
self.con.commit()
# users functions
def add_initial(self, user_id: int) -> None:
stmt = "INSERT INTO users VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);"
args = (
user_id,
'',
0,
'',
'',
'',
'',
'b',
'',
0,
0,
'',
0,
'',
6,
datetime_now())
self.cur.execute(stmt, args)
self.con.commit()
def check_user(self, user_id: int) -> bool:
stmt = "SELECT EXISTS(SELECT 1 from users WHERE id = ?);"
args = (user_id,)
self.cur.execute(stmt, args)
return self.cur.fetchone() == (1,)
def set_lang(self, user_id: int, lang: str) -> None:
stmt = "UPDATE users SET lang = ?, profile_step = 1 WHERE id = ?;"
args = (lang, user_id)
self.cur.execute(stmt, args)
self.con.commit()
def add_name(self, user_id: int, user_name: str) -> None:
stmt = "UPDATE users SET name = ?, profile_step = 2 WHERE id = ?;"
args = (user_name, user_id)
self.cur.execute(stmt, args)
self.con.commit()
def add_age(self, user_id: int, age: int) -> None:
stmt = "UPDATE users SET age = ?, profile_step = 3 WHERE id = ?;"
args = (age, user_id)
self.cur.execute(stmt, args)
self.con.commit()
def add_university(self, user_id: int, university: int) -> None:
stmt = "UPDATE users SET university = ? WHERE id = ?;"
args = (university, user_id)
self.cur.execute(stmt, args)
self.con.commit()
def add_gender(self, user_id: int, gender: str) -> None:
stmt = "UPDATE users SET gender = ?, profile_step = 4 WHERE id = ?;"
args = (gender, user_id)
self.cur.execute(stmt, args)
self.con.commit()
def add_bio(self, user_id: int, bio: str) -> None:
stmt = "UPDATE users SET bio = ?, profile_step = 5 WHERE id = ?;"
args = (bio, user_id)
self.cur.execute(stmt, args)
self.con.commit()
def add_media(self, user_id: int, media_type: str) -> None:
stmt = "UPDATE users SET last_saved_media_name = last_saved_media_name + 1, profile_step = 6, media_type = ? WHERE id = ?;"
args = (media_type, user_id)
self.cur.execute(stmt, args)
self.con.commit()
def add_email_address(self, user_id: int, email_address: str) -> None:
stmt = "UPDATE users SET email_address = ?, profile_step = 7 WHERE id = ?;"
args = (email_address, user_id)
self.cur.execute(stmt, args)
self.con.commit()
def check_email_address(self, email_address: str) -> bool:
stmt = "SELECT EXISTS(SELECT 1 FROM users WHERE email_address = ? AND is_profile_complete = 1);"
args = (email_address,)
self.cur.execute(stmt, args)
return self.cur.fetchone() == (1,)
def add_so(self, user_id: int, so: str) -> None:
stmt = "UPDATE users SET so = ? WHERE id = ?;"
args = (so, user_id)
self.cur.execute(stmt, args)
self.con.commit()
def add_verf_code(self, user_id: int, code: str) -> None:
stmt = "UPDATE users SET code = ?, profile_step = 8 WHERE id = ?;"
args = (code, user_id)
self.cur.execute(stmt, args)
self.con.commit()
def check_verf_code(self, user_id: int, code: str) -> bool:
stmt = "SELECT EXISTS(SELECT 1 FROM users WHERE id = ? AND verf_code = ?);"
args = (user_id, code)
self.cur.execute(stmt, args)
return self.cur.fetchone() == (1,)
def change_profile_step(self, user_id: int, profile_step: int) -> None:
stmt = "UPDATE users SET profile_step = ? WHERE id = ?;"
args = (profile_step, user_id)
self.cur.execute(stmt, args)
self.con.commit()
def mark_profile_as_completed(self, user_id: int) -> None:
stmt = "UPDATE users SET is_profile_complete = 1 WHERE id = ?;"
args = (user_id,)
self.cur.execute(stmt, args)
self.con.commit()
def get_user(self, user_id: int) -> dict or None:
stmt = "SELECT * FROM users WHERE id = ?;"
args = (user_id,)
self.cur.execute(stmt, args)
try:
return dict(zip(user_attr, self.cur.fetchone()))
except TypeError:
return None
def clear_tables(self) -> None:
stmt = """
DELETE FROM users;
DELETE FROM matches;
DELETE FROM pending_matches;"""
self.cur.executescript(stmt)
self.con.commit()
def all_user_ids(self) -> list:
stmt = "SELECT id FROM users"
args = ()
self.cur.execute(stmt, args)
return self.cur.fetchmany()
def delete_user(self, user_id: int) -> None:
stmt = "DELETE FROM users WHERE id = ?;"
args = (user_id,)
self.cur.execute(stmt, args)
self.con.commit()
# names functions
def add_db_names(self, names: list) -> None:
stmt = "INSERT INTO names VALUES (?, ?);"
args = names
self.cur.executemany(stmt, args)
self.con.commit()
def add_db_name(self, name: str, gender: str) -> None:
stmt = "INSERT INTO names VALUES (?, ?);"
args = (name, gender)
self.cur.execute(stmt, args)
self.con.commit()
def check_name(self, name: str) -> bool:
stmt = "SELECT EXISTS(SELECT 1 FROM names WHERE name = ?);"
args = (name,)
self.cur.execute(stmt, args)
return self.cur.fetchone() == (1,)
# pending matches functions
def add_pending_match(self, user1: int, user2: int) -> None:
stmt = """INSERT INTO pending_matches(user1, user2) VALUES(?, ?)
ON CONFLICT(user1) DO UPDATE SET user2 = ?;"""
args = (
user1,
user2,
user2
)
self.cur.execute(stmt, args)
self.con.commit()
def get_pending_match(self, user_id: int) -> Any:
stmt = "SELECT user2 FROM pending_matches WHERE user1 = ?"
args = (
user_id,
)
self.cur.execute(stmt, args)
try:
return self.cur.fetchone()[0]
except TypeError:
return None
def delete_pending_match(self, user_id: int) -> None:
stmt = "DELETE FROM pending_matches WHERE user1 = ?;"
args = (
user_id,
)
self.cur.execute(stmt, args)
self.con.commit()
# matches functions
def add_match(self, user_id_1: int, user_id_2: int, action: int) -> None:
stmt = "INSERT INTO matches VALUES (?, ?, ?, ?);"
args = (
user_id_1,
user_id_2,
action,
datetime.now().strftime(date_time_format)
)
self.cur.execute(stmt, args)
self.con.commit()
def check_perfect_match(self, user_id_1: int, user_id_2: int) -> bool:
stmt = "SELECT EXISTS(SELECT 1 FROM matches WHERE user1 = ? and user2 = ? and liked = 1)"
args = (user_id_1, user_id_2)
self.cur.execute(stmt, args)
return self.cur.fetchone() == (1,)
def get_next_swipe(self, user_id: int, gender: str, so: str) -> Any:
if gender == 'm':
g = 'm', 'f'
elif gender == 'f':
g = 'f', 'm'
else:
g = 'm', 'f'
if so == 'o':
gso = '{0}o|{0}b'
elif so == 'e':
gso = '{1}e|{1}b'
elif so == 'b':
gso = '{0}o|{1}e|{0}b|{1}b'
else:
gso = '{0}o|{1}e|{0}b|{1}b'
gso = gso.format(g)
gso = " OR ".join(f'(gender = {pair[0]} AND so = {pair[1]})' for pair in gso.split('|')) + ' AND '
stmt = f'''
SELECT id
FROM users
WHERE
{gso}
id NOT IN (
SELECT user2
FROM matches
WHERE user1 = ?
)
AND id NOT IN (
SELECT user1
FROM matches
WHERE user2 = ?
)
AND id != ?
OR id IN (
SELECT user1
FROM matches
WHERE user2 = ? AND liked = 1 AND user1 NOT IN (
SELECT user2
FROM matches
WHERE user1 = ?
)
);
'''
args = (user_id,) * 5
self.cur.execute(stmt, args)
try:
return self.cur.fetchone()[0]
except TypeError:
return None
# complaints functions
def add_complaint(self, user1: int, user2: int, complaint: int) -> None:
stmt = "INSERT INTO complaints VALUES (?, ? ,?)"
args = (
user1,
user2,
complaint
)
self.cur.execute(stmt, args)
self.con.commit()
# errors functions
def add_error(self, user_id: int, error_code: int) -> None:
"""
1. name error with '!'
"""
stmt = "INSERT INTO errors VALUES (?, ?, ?);"
args = (
user_id,
error_code,
datetime.now().strftime(date_time_format)
)
self.cur.execute(stmt, args)
self.con.commit()
'''
with open(".\\isimleri-filtrele\\birlesik.txt", mode="r", encoding="utf-8-sig") as file:
lines = [line.strip('\n').split(',') for line in file.readlines()]
test_db = UsersDB()
test_db.add_db_names(lines)
'''
pass
``` |
{
"source": "1222-takeshi/ros2-mpu6050-driver",
"score": 3
} |
#### File: ros2-mpu6050-driver/src/mpu6050_sample.py
```python
import smbus
import math
from time import sleep
DEV_ADDR = 0x68
ACCEL_XOUT = 0x3b
ACCEL_YOUT = 0x3d
ACCEL_ZOUT = 0x3f
TEMP_OUT = 0x41
GYRO_XOUT = 0x43
GYRO_YOUT = 0x45
GYRO_ZOUT = 0x47
PWR_MGMT_1 = 0x6b
PWR_MGMT_2 = 0x6c
bus = smbus.SMBus(1)
bus.write_byte_data(DEV_ADDR, PWR_MGMT_1, 0)
def read_word(adr):
high = bus.read_byte_data(DEV_ADDR, adr)
low = bus.read_byte_data(DEV_ADDR, adr+1)
val = (high << 8) + low
return val
# Sensor data read
def read_word_sensor(adr):
val = read_word(adr)
if (val >= 0x8000): # minus
return -((65535 - val) + 1)
else: # plus
return val
def get_temp():
temp = read_word_sensor(TEMP_OUT)
x = temp / 340 + 36.53 # data sheet(register map)記載の計算式.
return x
def getGyro():
x = read_word_sensor(GYRO_XOUT)/ 131.0
y = read_word_sensor(GYRO_YOUT)/ 131.0
z = read_word_sensor(GYRO_ZOUT)/ 131.0
return [x, y, z]
def getAccel():
x = read_word_sensor(ACCEL_XOUT)/ 16384.0
y= read_word_sensor(ACCEL_YOUT)/ 16384.0
z= read_word_sensor(ACCEL_ZOUT)/ 16384.0
return [x, y, z]
while 1:
ax, ay, az = getAccel()
gx, gy, gz = getGyro()
#print ('{:4.3f},{:4.3f}, {:4.3f}, {:4.3f},{:4.3f}, {:4.3f},' .format(gx, gy, gz, ax, ay, az))
roll = math.atan(ay/az) * 57.324
pitch = math.atan(-ax / math.sqrt( ay* ay+ az*az ) ) * 57.324
#pitch = math.atan(-ax / (ay*math.sin(roll) + az*math.cos(roll)))
# ↓のprint()分が間違っていたため修正いたしました。申し訳ありません
print('{:4.3f}, {:4.3f},' .format(pitch, roll))
``` |
{
"source": "122333211121/crawler_chanmama",
"score": 3
} |
#### File: 122333211121/crawler_chanmama/spider.py
```python
import json
import requests
import time
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36',
}
def login(username, password):
timestamp = str(int(time.time())) # 获取今日时间,先转化为整形再转化为字符串
url = 'https://api-service.chanmama.com/v1/access/token'
data = {"appId": '10000', "timeStamp": timestamp, "username": username, "password": password} # 登录
response = requests.session().post(url, headers=headers, data=data).json()
return response
def get_the_page(url, params, response):
cookies={}
cookies['LOGIN-TOKEN-FORSNS'] = response['data']['token'] # 获取cookies
response = requests.get(url, headers=headers, cookies=cookies, params=params).text
return response
# 不同的榜单有不同的数据需求,以不同的函数获取数据
def yesterday_sale_rank(text):
data = []
text = json.loads(text) # 加载数据
text = text['data']
for text in text:
# 将字典中的数据分别抠出来,这个message需要随着循坏不断更新地址,否则爬出来的数据都会指向最后一个数据的地址
message = {}
message['rank'] = text['rank'] # 排名
message['name'] = text['title'] # 商品名
message['price'] = text['coupon_price'] # 商品价格
message['rate'] = '{:.2%}'.format(text['rate']/100) # 佣金比例
message['yesterday_sales'] = text['day_order_count'] # 昨日销量
message['sales'] = text['amount'] # 销售额
message['month_sales'] = text['order_count'] # 月销量
message['conversion_rate'] = '{:.2%}'.format(text['month_conversion_rate']/100) # 30天转化率
message['platform'] = text['platform'] # 销售平台
message['image'] = text['image'] # 图片链接
data.append(message)
return data
def yesterday_hot_rank(text):
data = []
text = json.loads(text)
text = text['data']
for text in text:
message = {}
message['rank'] = text['rank'] # 排名
message['name'] = text['title'] # 商品名
message['price'] = text['coupon_price'] # 价格
message['rate'] = '{:.2%}'.format(text['rate']/100) # 佣金比例
message['author'] = text['author_number'] # 昨日带货达人数
message['month_sales'] = text['order_count'] # 月销量
message['conversion_rate'] = '{:.2%}'.format(text['month_conversion_rate']/100) # 30天转化率
message['platform'] = text['platform'] # 销售平台
message['image'] = text['image'] # 图片链接
data.append(message)
return data
def live_rank(text):
data = []
text = json.loads(text)
text = text['data']
for text in text:
message = {}
message['rank'] = text['rank'] # 排名
message['name'] = text['title'] # 商品名
message['price'] = text['coupon_price'] # 价格
message['rate'] = '{:.2%}'.format(text['rate']/100) # 佣金比例
message['hour_sales'] = text['sale_incr'] # 近两小时销量
message['month_sales'] = text['sales'] # 月销量
message['platform'] = text['platform'] # 销售平台
message['image'] = text['image'] # 图片链接
data.append(message)
return data
def all_day_rank(text):
return live_rank(text)
def speciality_today(text):
data = []
text = json.loads(text)
text = text['data']
for text in text:
message = {}
message['rank'] = text['rank'] # 排名
message['name'] = text['shop_title'] # 抖音小店
message['yesterday_sales'] = text['total_volume'] # 昨日销量
message['yesterday_money'] = text['total_amount'] # 昨日销售额
message['category'] = text['category'] # 类别
message['video'] = text['video_count'] # 关联视频
message['live'] = text['live_count'] # 关联直播
message['rate'] = '{:.2%}'.format(text['average_conversion_rate']) # 昨日转化率
message['image'] = text['shop_icon'] # 图片链接
data.append(message)
return data
def rank(text):
data = []
text = json.loads(text)
text = text['data']['list']
for text in text:
message = {}
message['rank'] = text['rank'] # 排名
message['name'] = text['brand_name'] # 品牌名
message['yesterday_sales'] = text['day_order_count'] # 昨日销量
message['amount'] = text['amount'] # 销售额
message['product_count'] = text['product_count'] # 商品数
message['video'] = text['aweme_count'] # 关联视频
message['live'] = text['live_count'] # 关联直播
message['label'] = text['label'] # 品牌类目
message['image'] = text['brand_logo'] # 图片链接
data.append(message)
return data
def run(page, size):
username = '13974973299'
password = '<PASSWORD>'
response = login(username, password)
url_list = ['yesterdaySaleRank', 'yesterdayHotRank', 'liveRank', 'allDayRank', 'specialtyToday', ''] # 从列表中分别取出数值爬取
for i in range(len(url_list)):
url = 'https://api-service.chanmama.com/v1/home/rank/' + str(url_list[i])
# 接口数据(前4个是一样的)
params = (
('category', '美妆护理'),
('page', page),
('size', size),
)
if url_list[i] == 'yesterdaySaleRank':
print('开始爬取抖音销量榜————————————')
text = get_the_page(url, params, response)
message = yesterday_sale_rank(text)
print(message)
print('抖音销售榜爬取完毕————————————')
elif url_list[i] == 'yesterdayHotRank':
print('开始爬取抖音热推榜————————————')
text = get_the_page(url, params, response)
message = yesterday_hot_rank(text)
print(message)
print('抖音热推榜爬取完毕————————————')
elif url_list[i] == 'liveRank':
print('开始爬取实时销量榜————————————')
text = get_the_page(url, params, response)
message = live_rank(text)
print(message)
print('实时销量榜爬取完毕————————————')
elif url_list[i] == 'allDayRank':
print('开始爬取全天销量榜————————————')
text = get_the_page(url, params, response)
message = all_day_rank(text)
print(message)
print('全天销量榜爬取完毕————————————')
elif url_list[i] == 'specialtyToday':
print('开始爬取抖音小店榜————————————')
params = (
('category', '美妆护理'),
('order_by', 'amount'),
('page', page),
('size', size)
)
text = get_the_page(url, params, response)
message = speciality_today(text)
print(message)
print('抖音小店榜爬取完毕————————————')
# 最后一个榜单的网址特殊,额外放出来
if url_list[i] == '':
print('开始爬取商品品牌榜榜————————————')
url = 'https://api-service.chanmama.com/v1/brand/rank/'
params = (
('day_type', 'day'),
('day', '2021-08-23'),
('category', '美妆护理'),
('sort', 'amount'),
('page', page),
('size', size)
)
text = get_the_page(url, params, response)
message = rank(text)
print(message)
print('商品品牌榜爬取完毕————————————')
if __name__ == '__main__':
# run(page, size),page是从哪一页开始爬取数据,size是爬取数据的数量
run(1, 300)
``` |
{
"source": "122333211121/music_spider",
"score": 3
} |
#### File: 122333211121/music_spider/spider2.py
```python
import requests
import random
import base64
from Crypto.Cipher import AES
import json
import binascii
from bs4 import BeautifulSoup as BS
import os
import time
class Music_Api:
# 设置从JS文件提取的RSA的模数、协商的AES对称密钥、RSA的公钥等重要信息
def __init__(self):
self.modulus = "00e0b509f6259df8642dbc35662901477df22677ec152b5ff68ace615bb7b725152b3ab17a876aea8a5aa76d2e417629ec4ee341f56135fccf695280104e0312ecbda92557c93870114af6c9d05c4f7f0c3685b7a46bee255932575cce10b424d813cfe4875d3e82047b97ddef52741d546b8e289dc6935b3ece0462db0a22b8e7"
self.nonce = '0CoJUm6Qyw8W8jud'
self.pubKey = '010001'
self.url = "https://music.163.com/weapi/cloudsearch/get/web?csrf_token="
self.header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
}
self.file_path = './music'
if not os.path.exists(self.file_path):
os.makedirs(self.file_path)
self.secKey = self.getRandom()
# 生成16字节即256位的随机数
def getRandom(self):
string = "1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
res = ""
for i in range(16):
res += string[int(random.random() * 62)]
return res
# AES加密,用seckey对text加密
def aesEncrypt(self, text, secKey):
pad = 16 - len(text) % 16
text = text + pad * chr(pad)
encryptor = AES.new(secKey.encode('utf-8'), 2, '0102030405060708'.encode('utf-8'))
ciphertext = encryptor.encrypt(text.encode('utf-8'))
ciphertext = base64.b64encode(ciphertext).decode("utf-8")
return ciphertext
# 快速模幂运算,求 x^y mod mo
def quickpow(self, x, y, mo):
res = 1
while y:
if y & 1:
res = res * x % mo
y = y // 2
x = x * x % mo
return res
# rsa加密
def rsaEncrypt(self, text, pubKey, modulus):
text = text[::-1]
a = int(binascii.hexlify(str.encode(text)), 16)
b = int(pubKey, 16)
c = int(modulus, 16)
rs = self.quickpow(a, b, c)
return format(rs, 'x').zfill(256)
# 设置相应的请求参数,从而搜索列表
# 总体的密码加密步骤为:
# 首先用nonce对text加密生成密文1
# 然后用随机数seckey加密密文1生成密文2
# 随后,用公钥加密seckey生成密文3
# 其中,密文2作为请求参数中的params,密文3作为encSeckey字段
# 这样,接收方可以通过私钥解密密文3获得seckey(随机数)
# 然后用seckey解密密文2获得密文1
# 最终用统一协商的密钥nonce解密密文1最终获得text
def search(self, s, offset, type):
text = {
"hlpretag": "<span class=\"s-fc7\">",
"hlposttag": "</span>",
"#/discover": "",
"s": s,
"type": type,
"offset": offset,
"total": "true",
"limit": "30",
"csrf_token": ""
}
text = json.dumps(text)
params = self.aesEncrypt(self.aesEncrypt(text, self.nonce), self.secKey)
encSecKey = self.rsaEncrypt(self.secKey, self.pubKey, self.modulus)
data = {
'params': params,
'encSecKey': encSecKey
}
response = requests.post(url=self.url, data=data, headers=self.header).json()
return response
# 通过不同的type以及获得的response获取歌曲id
def get_playlist(self, response, type):
if type == '1':
result = response['result']['song']
id_list = []
for result in result:
id = result['id']
name = result['name']
id_list.append((name, id))
return id_list
if type == '1000':
result = response['result']['playlists']
id_list = []
for result in result:
id = result['id']
id_list.append(id)
song_list = []
for id_list in id_list:
url = 'https://music.163.com/playlist?id=' + str(id_list)
response = requests.get(url, headers=self.header).content
s = BS(response, 'lxml')
html = s.find('ul', {'class': 'f-hide'})
results = html.find_all('a')
for result in results:
song_name = result.text
song_id = result["href"]
song_id = song_id.replace('/song?id=', '')
song_list.append((song_name, song_id))
return song_list
# 通过获取的歌曲id下载歌曲
def load_music(self, song_list):
basic_url = 'http://music.163.com/song/media/outer/url?id={}.mp3'
for song_name, song_id in song_list:
download_url = basic_url.format(song_id)
song = requests.get(download_url, headers=self.header)
time.sleep(1)
music_path = '{}/{}.{}'.format(self.file_path, song_name, 'mp3')
try:
if not os.path.exists(music_path):
with open(music_path, 'wb')as f:
f.write(song.content)
print('{}下载成功'.format(song_name))
time.sleep(0.5)
else:
print('{}已经下载'.format(song_name))
except:
print('{}下载失败'.format(song_name))
# 设置一个启动函数
def run(self):
s = input('请输入搜索关键字:')
offset = input('请输入需要爬取的页面数:')
type_list = input('请输入爬取的类别:')
if type_list == '歌单':
type = '1000'
elif type_list == '歌曲':
type = '1'
response = self.search(s, offset, type)
song_list = self.get_playlist(response, type)
self.load_music(song_list)
if __name__ == '__main__':
Music_Api().run()
``` |
{
"source": "122689305/private-pgm",
"score": 2
} |
#### File: private-pgm/examples/cdp2adp.py
```python
import math
import matplotlib.pyplot as plt
#*********************************************************************
#Now we move on to concentrated DP
#compute delta such that
#rho-CDP implies (eps,delta)-DP
#Note that adding cts or discrete N(0,sigma2) to sens-1 gives rho=1/(2*sigma2)
#start with standard P[privloss>eps] bound via markov
def cdp_delta_standard(rho,eps):
assert rho>=0
assert eps>=0
if rho==0: return 0 #degenerate case
#https://arxiv.org/pdf/1605.02065.pdf#page=15
return math.exp(-((eps-rho)**2)/(4*rho))
#Our new bound:
# https://arxiv.org/pdf/2004.00010v3.pdf#page=13
def cdp_delta(rho,eps):
assert rho>=0
assert eps>=0
if rho==0: return 0 #degenerate case
#search for best alpha
#Note that any alpha in (1,infty) yields a valid upper bound on delta
# Thus if this search is slightly "incorrect" it will only result in larger delta (still valid)
# This code has two "hacks".
# First the binary search is run for a pre-specificed length.
# 1000 iterations should be sufficient to converge to a good solution.
# Second we set a minimum value of alpha to avoid numerical stability issues.
# Note that the optimal alpha is at least (1+eps/rho)/2. Thus we only hit this constraint
# when eps<=rho or close to it. This is not an interesting parameter regime, as you will
# inherently get large delta in this regime.
amin=1.01 #don't let alpha be too small, due to numerical stability
amax=(eps+1)/(2*rho)+2
for i in range(1000): #should be enough iterations
alpha=(amin+amax)/2
derivative = (2*alpha-1)*rho-eps+math.log1p(-1.0/alpha)
if derivative<0:
amin=alpha
else:
amax=alpha
#now calculate delta
delta = math.exp((alpha-1)*(alpha*rho-eps)+alpha*math.log1p(-1/alpha)) / (alpha-1.0)
return min(delta,1.0) #delta<=1 always
#Above we compute delta given rho and eps, now we compute eps instead
#That is we wish to compute the smallest eps such that rho-CDP implies (eps,delta)-DP
def cdp_eps(rho,delta):
assert rho>=0
assert delta>0
if delta>=1 or rho==0: return 0.0 #if delta>=1 or rho=0 then anything goes
epsmin=0.0 #maintain cdp_delta(rho,eps)>=delta
epsmax=rho+2*math.sqrt(rho*math.log(1/delta)) #maintain cdp_delta(rho,eps)<=delta
#to compute epsmax we use the standard bound
for i in range(1000):
eps=(epsmin+epsmax)/2
if cdp_delta(rho,eps)<=delta:
epsmax=eps
else:
epsmin=eps
return epsmax
#Now we compute rho
#Given (eps,delta) find the smallest rho such that rho-CDP implies (eps,delta)-DP
def cdp_rho(eps,delta):
assert eps>=0
assert delta>0
if delta>=1: return 0.0 #if delta>=1 anything goes
rhomin=0.0 #maintain cdp_delta(rho,eps)<=delta
rhomax=eps+1 #maintain cdp_delta(rhomax,eps)>delta
for i in range(1000):
rho=(rhomin+rhomax)/2
if cdp_delta(rho,eps)<=delta:
rhomin=rho
else:
rhomax=rho
return rhomin
```
#### File: private-pgm/test/test_graphical_model.py
```python
import unittest
from mbi.domain import Domain
from mbi.factor import Factor
from mbi.graphical_model import GraphicalModel, CliqueVector
import numpy as np
class TestGraphicalModel(unittest.TestCase):
def setUp(self):
attrs = ['a','b','c','d']
shape = [2,3,4,5]
domain = Domain(attrs, shape)
cliques = [('a','b'), ('b','c'),('c','d')]
self.model = GraphicalModel(domain, cliques)
zeros = { cl : Factor.zeros(domain.project(cl)) for cl in self.model.cliques }
self.model.potentials = CliqueVector(zeros)
def test_datavector(self):
x = self.model.datavector()
ans = np.ones(2*3*4*5) / (2*3*4*5)
self.assertTrue(np.allclose(x, ans))
def test_project(self):
model = self.model.project(['d','a'])
x = model.datavector()
ans = np.ones(2*5) / 10.0
self.assertEqual(x.size, 10)
self.assertTrue(np.allclose(x, ans))
model = self.model
pot = { cl : Factor.random(model.domain.project(cl)) for cl in model.cliques }
model.potentials = CliqueVector(pot)
x = model.datavector(flatten=False)
y0 = x.sum(axis=(2,3)).flatten()
y1 = model.project(['a','b']).datavector()
self.assertEqual(y0.size, y1.size)
self.assertTrue(np.allclose(y0, y1))
x = model.project('a').datavector()
def test_krondot(self):
model = self.model
pot = { cl : Factor.random(model.domain.project(cl)) for cl in model.cliques }
model.potentials = CliqueVector(pot)
A = np.ones((1,2))
B = np.eye(3)
C = np.ones((1,4))
D = np.eye(5)
res = model.krondot([A,B,C,D])
x = model.datavector(flatten=False)
ans = x.sum(axis=(0,2), keepdims=True)
self.assertEqual(res.shape, ans.shape)
self.assertTrue(np.allclose(res, ans))
def test_calculate_many_marginals(self):
proj = [[],['a'],['b'],['c'],['d'],['a','b'],['a','c'],['a','d'],['b','c'],
['b','d'],['c','d'],['a','b','c'],['a','b','d'],['a','c','d'],['b','c','d'],
['a','b','c','d']]
proj = [tuple(p) for p in proj]
model = self.model
model.total = 10.0
pot = { cl : Factor.random(model.domain.project(cl)) for cl in model.cliques }
model.potentials = CliqueVector(pot)
results = model.calculate_many_marginals(proj)
for pr in proj:
ans = model.project(pr).values
close = np.allclose(results[pr].values, ans)
print(pr, close, results[pr].values, ans)
self.assertTrue(close)
def test_belief_prop(self):
pot = self.model.potentials
self.model.total = 10
mu = self.model.belief_propagation(pot)
for key in mu:
ans = self.model.total/np.prod(mu[key].domain.shape)
self.assertTrue(np.allclose(mu[key].values, ans))
pot = { cl : Factor.random(pot[cl].domain) for cl in pot }
mu = self.model.belief_propagation(pot)
logp = sum(pot.values())
logp -= logp.logsumexp()
dist = logp.exp() * self.model.total
for key in mu:
ans = dist.project(key).values
res = mu[key].values
self.assertTrue(np.allclose(ans, res))
def test_synthetic_data(self):
model = self.model
sy = model.synthetic_data()
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "1226tmxpvksh/leaning_python",
"score": 4
} |
#### File: py_1118/py_1118/py_1118.py
```python
statement=input("문자열을 입력하시오: ") #문자열 조사
alphas=0
digits=0
spaces=0
for c in statement:
if c.isalpha():
alphas=alphas+1
if c.isdigit:
digits=digits+1
if c.isspace():
spaces=spaces+1
print("알파벳 문자의 개수=",alphas)
print("숫자 문자의 개수=",digits)
print("스페이스의 개수=",spaces)
def get_sum(start, end): #1부터10까지 함수
sum=0
for i in range(start, end+1):
sum+=i
return sum
value = get_sum(1,10)
print(value)
def get_sum(start, end): #함수
sum=0
for i in range(start, end+1):
sum+= i
print(sum)
value = get_sum(1,10)
print(value)
def asterisk_test(a,b,*num): #가변 인수
return a+b+sum(num)
print(asterisk_test(1,2,3,4,5))
```
#### File: py_1128/py_1128/py_1128.py
```python
from tkinter import*
window=Tk()
lb=Listbox(window, height=4)
lb.pack()
lb.insert(END,"Python")
lb.insert(END,"C")
lb.insert(END,"Java")
lb.insert(END,"Swift")
window.mainloop()
``` |
{
"source": "1229203497/MyT",
"score": 2
} |
#### File: dashboard/dashboard/edit_sheriffs.py
```python
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import json
from dashboard import edit_config_handler
from dashboard.models import sheriff
from dashboard import sheriff_pb2
from google.protobuf import text_format
class EditSheriffsHandler(edit_config_handler.EditConfigHandler):
"""Handles editing of Sheriff entities.
The post method is inherited from EditConfigHandler. It takes the request
parameters documented there, as well as the following parameters, which
are properties of Sheriff:
url: A URL at which there is a list of email addresses to send mail to.
email: An email address to send mail to, possibly a mailing list.
internal-only: Whether the data should be considered internal-only.
summarize: Whether to send emails in a summary form.
"""
def __init__(self, request, response):
super(EditSheriffsHandler, self).__init__(
request, response, sheriff.Sheriff)
def get(self):
"""Renders the UI with the form."""
def SheriffData(sheriff_entity):
subscription = sheriff_pb2.Subscription()
subscription.name = sheriff_entity.key.string_id()
subscription.rotation_url = sheriff_entity.url or ''
subscription.notification_email = sheriff_entity.email or ''
if not sheriff_entity.internal_only:
subscription.visibility = sheriff_pb2.Subscription.PUBLIC
# Find the labels, and find the ones that say 'Component-' and turn those
# into components, formatting appropriately.
for label in sorted(sheriff_entity.labels):
if label.startswith('Component-'):
subscription.bug_components.append('>'.join(label.split('-')[1:]))
else:
subscription.bug_labels.append(label)
# Treat all patterns as globs for now.
for pattern in sorted(sheriff_entity.patterns):
p = subscription.patterns.add()
p.glob = pattern
return {
'url': sheriff_entity.url or '',
'email': sheriff_entity.email or '',
'patterns': '\n'.join(sorted(sheriff_entity.patterns)),
'labels': ','.join(sorted(sheriff_entity.labels)),
'internal_only': sheriff_entity.internal_only,
'summarize': sheriff_entity.summarize,
'subscription': text_format.MessageToString(subscription)
}
sheriff_dicts = {entity.key.string_id(): SheriffData(entity)
for entity in sheriff.Sheriff.query()}
self.RenderHtml('edit_sheriffs.html', {
'sheriffs_json': json.dumps(sheriff_dicts),
'sheriff_names': sorted(sheriff_dicts),
})
def post(self):
self.ReportError('Sheriff configs are no longer editable via this page. '
'See go/chromeperf-sheriff-redux', status=403)
def _UpdateFromRequestParameters(self, sheriff_entity):
"""Updates the given Sheriff based on query parameters.
Args:
sheriff_entity: A Sheriff entity.
"""
# This overrides the method in the superclass.
sheriff_entity.url = self.request.get('url') or None
sheriff_entity.email = self.request.get('email') or None
sheriff_entity.internal_only = self.request.get('internal-only') == 'true'
labels = self.request.get('labels')
if labels:
sheriff_entity.labels = labels.split(',')
else:
sheriff_entity.labels = []
sheriff_entity.summarize = self.request.get('summarize') == 'true'
```
#### File: value/diagnostics/generic_set_unittest.py
```python
import unittest
from tracing.proto import histogram_proto
from tracing.value import histogram_deserializer
from tracing.value import histogram_serializer
from tracing.value.diagnostics import diagnostic
from tracing.value.diagnostics import generic_set
class GenericSetUnittest(unittest.TestCase):
def testRoundtrip(self):
a_set = generic_set.GenericSet([
None,
True,
False,
0,
1,
42,
[],
{},
[0, False],
{'a': 1, 'b': True},
])
self.assertEqual(a_set, diagnostic.Diagnostic.FromDict(a_set.AsDict()))
def testEq(self):
a_set = generic_set.GenericSet([
None,
True,
False,
0,
1,
42,
[],
{},
[0, False],
{'a': 1, 'b': True},
])
b_set = generic_set.GenericSet([
{'b': True, 'a': 1},
[0, False],
{},
[],
42,
1,
0,
False,
True,
None,
])
self.assertEqual(a_set, b_set)
def testMerge(self):
a_set = generic_set.GenericSet([
None,
True,
False,
0,
1,
42,
[],
{},
[0, False],
{'a': 1, 'b': True},
])
b_set = generic_set.GenericSet([
{'b': True, 'a': 1},
[0, False],
{},
[],
42,
1,
0,
False,
True,
None,
])
self.assertTrue(a_set.CanAddDiagnostic(b_set))
self.assertTrue(b_set.CanAddDiagnostic(a_set))
a_set.AddDiagnostic(b_set)
self.assertEqual(a_set, b_set)
b_set.AddDiagnostic(a_set)
self.assertEqual(a_set, b_set)
c_dict = {'a': 1, 'b': 1}
c_set = generic_set.GenericSet([c_dict])
a_set.AddDiagnostic(c_set)
self.assertEqual(len(a_set), 1 + len(b_set))
self.assertIn(c_dict, a_set)
def testGetOnlyElement(self):
gs = generic_set.GenericSet(['foo'])
self.assertEqual(gs.GetOnlyElement(), 'foo')
def testGetOnlyElementRaises(self):
gs = generic_set.GenericSet([])
with self.assertRaises(AssertionError):
gs.GetOnlyElement()
def testDeserialize(self):
d = histogram_deserializer.HistogramDeserializer(['aaa', 'bbb'])
a = generic_set.GenericSet.Deserialize(0, d)
self.assertEqual(len(a), 1)
self.assertIn('aaa', a)
b = generic_set.GenericSet.Deserialize([0, 1], d)
self.assertEqual(len(b), 2)
self.assertIn('aaa', b)
self.assertIn('bbb', b)
def testSerialize(self):
s = histogram_serializer.HistogramSerializer()
g = generic_set.GenericSet(['a', 'b'])
self.assertEqual(g.Serialize(s), [0, 1])
g = generic_set.GenericSet(['a'])
self.assertEqual(g.Serialize(s), 0)
def testFromProto(self):
p = histogram_proto.Pb2().GenericSet()
p.values.append('12345')
p.values.append('"string"')
p.values.append('{"attr":1}')
g = generic_set.GenericSet.FromProto(p)
values = list(g)
self.assertEqual([12345, 'string', {"attr": 1}], values)
def testInvalidJsonValueInProto(self):
with self.assertRaises(TypeError):
p = histogram_proto.Pb2().GenericSet()
p.values.append('this_is_an_undefined_json_indentifier')
generic_set.GenericSet.FromProto(p)
``` |
{
"source": "122abhi/concepts-of-python-01",
"score": 4
} |
#### File: Maths for Competitive Programming/Prime Number/Efficient Algo-2.py
```python
def isPrime(n: int)->bool:
"""
Give an Integer 'n'. Check if 'n' is Prime or Not
:param n:
:return: bool - True or False
A no. is prime if it is divisible only by 1 & itself.
1- is neither Prime nor Composite
- check for n being a even number i.e divisble by 2
- check for n being divisible by 3
- then create a range of i following series, 5,7,11,13,17,19,23,.... till sqrt(n) with step as +6
- check for divisibility of n with each value of i & i+2
"""
if n== 1:
return False
# handle boundary conditions
if n == 2 or n==3:
return True
# Now check for divisibility of n by 2 & 3
if n % 2 ==0 or n % 3 ==0:
return False
i = 5
while (i*i <= n):
if n%i ==0 or n%(i+2) ==0:
return False
i = i+ 6
return True
#Driver Code
if __name__ == '__main__':
n = 24971
returned = isPrime(n)
print(returned)
```
#### File: Linked List/Singly Linked List/solution.py
```python
Data Structures/Linked List/Singly Linked List/solution.py
class LinkedList:
"""
We represent a Linked List using Linked List class
having states and behaviours
states of SLL includes- self.head as an instance variable.
Behaviour of LL class will include any behaviours that we implement on a Singly list list.
Whenever a LL instance is initialized
it will have zero nodes withing itself.
Thus while initializing a LL we make self.head as None (refer __init__)."""
def __init__(self):
self.head=None
def append_node(self,data):
""" Let's write a LL behaviour which will append a data Node at last of the LL.
We will pass data as a method argument and the function will instantiate a new node
and append it to the LL instance, accordingly.
There can be two cases- (1) Either the LL is compeltely empty i.e head is None Or
or (2) The LL have some values- in this case we will search for the last Node.
A last Node is defined as Node for which, the next state value is None.
To search for the last Node, we will traverse the LL instance till a node which is
having next state property is found. Once it is found we call that Node as last Node
and initialize it's next value as reference to our new node.
In All cases a New Node has to be made with data, passed as an argument."""
# In All cases a New Node has to be made with data, passed as an argument.
new_node = Node(data)
# In Case the LL is empty.
if self.head is None:
self.head = new_node
# By default new_node's next is == None.
return
# Search for last_node: as Node having next state value as None.
# One way is to run a for loop and break whenever a Node with next value None is found.
# however, using break is not a good programming method.
# Wherever, you find a situation to run a loop until a condition is matched, like in this case
# we can use While loop of python.
# condition here: last_node.next is not None.
last_node = self.head
# is next pointing to Null, if No, move to next Node, otherwise come out of the loop
while last_node.next is not None:
last_node = last_node.next
# found last_node
last_node.next = new_node
# new_node by default have next values as None.
return
def print_nodes(self):
if self.head is None:
print("Linked List is Empty")
return
current_node = self.head
while current_node is not None:
print("current_nod data is: ",current_node.data)
current_node = current_node.next
print("Print_nodes complete")
return
class Node:
"""
Whenever, a Node is defined it has two states: (1) Data & (2) Next: reference to the next node.
The (1) data can contain any type of data including built-int types or an class object.
Now whenever a a new Node is created it can have either None data & then we class a behaviours method of
Node class to insert data or we can include data within the constructor as an argument. TO safe time
we will do the later here.
Also, whenever a new Node is created it will have next reference value as None. As currently, it's
just a standalone Node and is not added to any data structure like LL.
Any Behaviours like data change within a Node will be written & explained in Node Class methods.
While any change like addition or Deletion og Nodes will be be written as behaviours of LL i.e LL class Method."""
def __init__(self, data):
self.data = data
self.next = None
if __name__=="__main__":
"""Driver code
(1) Create LL
(2) Append nodes to it"""
LL1 = LinkedList()
LL1.append_node(10)
LL1.append_node(11)
LL1.append_node('abc')
LL1.print_nodes()
```
#### File: Gfg/Catalan number/sol1.py
```python
Dynamic Programming/Gfg/Catalan number/sol1.py
"""
Return nth catalan number.
Recursive Formula of Catalan Numbers says:
C of (n+1) = summation of C of i* C of n-i, for range i=0 to i=n
Therefore, for C of n formula becomes
C of (n) = summation of C of i* C of n-1-i, for range i=0 to i=n-1
"""
def getCatalan(n,dp_arr):
# Lookup
if (dp_arr[n] is not None):
return dp_arr[n]
#Base Case
if (n==0):
return 1
#Rec Case
Cn = 0
for i in range(0,n):
Cn = Cn + ( getCatalan(i, dp_arr) * getCatalan(n-1-i, dp_arr) )
dp_arr[n] = Cn
return Cn
#Driver Code
if __name__ == '__main__':
dp_arr : list = [None] * 100
n = 5
returned = getCatalan(n, dp_arr)
print(returned)
```
#### File: is array sorted/ascending order/solution1.py
```python
def is_sorted_asc(arr):
## base case (when to start combining sub-problems - to get complete problem solution
# n ==1 i.e when smaller_problem will be only 1 digit array: we will want to return True
# as 1 element array is always sorted
# Also n==0 for the case if the initial array itself is of zero length.
if ((len(arr) == 1) or (len(arr) == 0)):
return True
## Recursive case: Define bigger problem in terms of smaller sub-problem instances
bigger_prb = is_sorted_asc(arr[1:])
if (bigger_prb == False):
return False
if arr[0]< arr[1]:
return True
else:
return False
# ## Whenver, you feel getting nested if conditions u can combine them using logical operators
# if arr[0]< arr[1] and is_sorted_asc(arr[1:]):
# return True
# else:
# return False
if __name__=="__main__":
## Driver Code
sorted_list = [1,2,3,4,5]
result = is_sorted_asc(sorted_list)
print(sorted_list, "is sorted?")
print(result)
un_sorted_list = [1,2,3,5,4]
result = is_sorted_asc(un_sorted_list)
print(un_sorted_list, "is sorted?")
print(result)
``` |
{
"source": "123122492/vnpy",
"score": 2
} |
#### File: vnpy/component/cta_line_bar.py
```python
import copy
import decimal
import math
import os
import sys
import traceback
import talib as ta
import numpy as np
import csv
from collections import OrderedDict
from datetime import datetime, timedelta
from pykalman import KalmanFilter
from vnpy.component.base import (
Direction,
Area,
MARKET_DAY_ONLY,
NIGHT_MARKET_23,
NIGHT_MARKET_SQ2,
MARKET_ZJ)
from vnpy.component.cta_period import CtaPeriod, Period
from vnpy.trader.object import BarData, TickData
from vnpy.trader.constant import Interval, Color
from vnpy.trader.utility import round_to, get_trading_date, get_underlying_symbol
def get_cta_bar_type(bar_name: str):
"""根据名称,返回K线类型和K线周期"""
if bar_name.startswith('S'):
return CtaLineBar, int(bar_name.replace('S', ''))
if bar_name.startswith('M'):
return CtaMinuteBar, int(bar_name.replace('M', ''))
if bar_name.startswith('H'):
return CtaHourBar, int(bar_name.replace('H', ''))
if bar_name.startswith('D'):
interval = bar_name.replace('D', '')
if len(interval) == 0:
return CtaDayBar, 1
else:
return CtaDayBar, int(interval)
raise Exception(u'{}参数错误'.format(bar_name))
def get_cta_bar_class(bar_type: str):
"""根据类型名获取对象"""
assert isinstance(bar_type, str)
if bar_type == Interval.SECOND:
return CtaLineBar
if bar_type == Interval.MINUTE:
return CtaMinuteBar
if bar_type == Interval.HOUR:
return CtaHourBar
if bar_type == Interval.DAILY:
return CtaDayBar
if bar_type == Interval.WEEKLY:
return CtaWeekBar
raise Exception('no matched CTA bar type:{}'.format(bar_type))
class CtaLineBar(object):
"""CTA K线"""
""" 使用方法:
1、在策略构造函数__init()中初始化
self.lineM = None # 1分钟K线
lineMSetting = {}
lineMSetting['name'] = u'M1'
lineMSetting['interval'] = Interval.MINUTE
lineMSetting['bar_interval'] = 60 # 1分钟对应60秒
lineMSetting['para_ema1_len'] = 7 # EMA线1的周期
lineMSetting['para_ema2_len'] = 21 # EMA线2的周期
lineMSetting['para_boll_len'] = 20 # 布林特线周期
lineMSetting['para_boll_std_rate'] = 2 # 布林特线标准差
lineMSetting['price_tick'] = self.price_tick # 最小条
lineMSetting['underlying_symbol'] = self.underlying_symbol #商品短号
self.lineM = CtaLineBar(self, self.onBar, lineMSetting)
2、在onTick()中,需要导入tick数据
self.lineM.onTick(tick)
self.lineM5.onTick(tick) # 如果你使用2个周期
3、在onBar事件中,按照k线结束使用;其他任何情况下bar内使用,通过对象使用即可,self.lineM.lineBar[-1].close
# 创建30分钟K线(类似文华,用交易日内,累加分钟够30根1min bar)
lineM30Setting = {}
lineM30Setting['name'] = u'M30'
lineM30Setting['interval'] = Interval.MINUTE
lineM30Setting['bar_interval'] = 30
lineM30Setting['mode'] = CtaLineBar.TICK_MODE
lineM30Setting['price_tick'] = self.price_tick
lineM30Setting['underlying_symbol'] = self.underlying_symbol
self.lineM30 = CtaMinuteBar(self, self.onBarM30, lineM30Setting)
# 创建2小时K线
lineH2Setting = {}
lineH2Setting['name'] = u'H2'
lineH2Setting['interval'] = Interval.HOUR
lineH2Setting['bar_inverval'] = 2
lineH2Setting['mode'] = CtaLineBar.TICK_MODE
lineH2Setting['price_tick'] = self.price_tick
lineH2Setting['underlying_symbol'] = self.underlying_symbol
self.lineH2 = CtaHourBar(self, self.onBarH2, lineH2Setting)
# 创建的日K线
lineDaySetting = {}
lineDaySetting['name'] = u'D1'
lineDaySetting['mode'] = CtaDayBar.TICK_MODE
lineDaySetting['price_tick'] = self.price_tick
lineDaySetting['underlying_symbol'] = self.underlying_symbol
self.lineD = CtaDayBar(self, self.onBarD, lineDaySetting)
"""
# 区别:
# -使用tick模式时,当tick到达后,最新一个lineBar[-1]是当前的正在拟合的bar,不断累积tick,传统按照OnBar来计算的话,是使用LineBar[-2]。
# -使用bar模式时,当一个bar到达时,lineBar[-1]是当前生成出来的Bar,不再更新
TICK_MODE = 'tick'
BAR_MODE = 'bar'
CB_ON_BAR = 'cb_on_bar'
CB_ON_PERIOD = 'cb_on_period'
# 参数列表,保存了参数的名称
paramList = ['vt_symbol']
# 参数列表
def __init__(self, strategy, cb_on_bar, setting=None):
# OnBar事件回调函数
self.cb_on_bar = cb_on_bar
# 周期变更事件回调函数
self.cb_on_period = None
# K 线服务的策略
self.strategy = strategy
self.underly_symbol = '' # 商品的短代码
self.price_tick = 1 # 商品的最小价格单位
self.round_n = 4 # round() 小数点的截断数量
self.is_7x24 = False
# 当前的Tick
self.cur_tick = None
self.last_tick = None
self.cur_datetime = None
self.cur_trading_day = ''
self.cur_price = 0
# K线保存数据
self.cur_bar = None # K线数据对象,代表最后一根/未走完的bar
self.line_bar = [] # K线缓存数据队列
self.bar_len = 0 # 当前K线得真实数量
self.max_hold_bars = 2000
self.is_first_tick = False # K线的第一条Tick数据
# (实时运行时,或者addbar小于bar得周期时,不包含最后一根Bar)
self.open_array = np.zeros(self.max_hold_bars) # 与lineBar一致得开仓价清单
self.high_array = np.zeros(self.max_hold_bars) # 与lineBar一致得最高价清单
self.low_array = np.zeros(self.max_hold_bars) # 与lineBar一致得最低价清单
self.close_array = np.zeros(self.max_hold_bars) # 与lineBar一致得收盘价清单
self.mid3_array = np.zeros(self.max_hold_bars) # 收盘价/最高/最低价 的平均价
self.mid4_array = np.zeros(self.max_hold_bars) # 收盘价*2/最高/最低价 的平均价
self.mid5_array = np.zeros(self.max_hold_bars) # 收盘价*2/开仓价/最高/最低价 的平均价
self.export_filename = None
self.export_fields = []
# 创建内部变量
self.init_properties()
# 创建初始化指标
self.init_indicators()
# 启动实时得函数
self.rt_funcs = set()
self.rt_executed = False
# 注册回调函数
self.cb_dict = {}
if setting:
self.set_params(setting)
# 修正self.minute_interval
if self.interval == Interval.SECOND:
self.minute_interval = int(self.bar_interval / 60)
elif self.interval == Interval.MINUTE:
self.minute_interval = self.bar_interval
elif self.interval == Interval.HOUR:
self.minute_interval = 60
elif self.interval == Interval.DAILY:
self.minute_interval = 60 * 24
# 修正精度
if self.price_tick < 1:
exponent = decimal.Decimal(str(self.price_tick))
self.round_n = max(abs(exponent.as_tuple().exponent) + 2, 4)
# 导入卡尔曼过滤器
if self.para_active_kf:
try:
self.kf = KalmanFilter(transition_matrices=[1],
observation_matrices=[1],
initial_state_mean=0,
initial_state_covariance=1,
observation_covariance=1,
transition_covariance=0.01)
except Exception:
self.write_log(u'导入卡尔曼过滤器失败,需先安装 pip install pykalman')
self.para_active_kf = False
def register_event(self, event_type, cb_func):
"""注册事件回调函数"""
self.cb_dict.update({event_type: cb_func})
if event_type == self.CB_ON_PERIOD:
self.cb_on_period = cb_func
def init_param_list(self):
self.paramList.append('bar_interval')
self.paramList.append('interval')
self.paramList.append('mode')
self.paramList.append('para_pre_len')
self.paramList.append('para_ma1_len')
self.paramList.append('para_ma2_len')
self.paramList.append('para_ma3_len')
self.paramList.append('para_ema1_len')
self.paramList.append('para_ema2_len')
self.paramList.append('para_ema3_len')
self.paramList.append('para_dmi_len')
self.paramList.append('para_dmi_max')
self.paramList.append('para_atr1_len')
self.paramList.append('para_atr2_len')
self.paramList.append('para_atr3_len')
self.paramList.append('para_vol_len')
self.paramList.append('para_rsi1_len')
self.paramList.append('para_rsi2_len')
self.paramList.append('para_cmi_len')
self.paramList.append('para_boll_len')
self.paramList.append('para_boll_tb_len')
self.paramList.append('para_boll_std_rate')
self.paramList.append('para_boll2_len')
self.paramList.append('para_boll2_tb_len')
self.paramList.append('para_boll2_std_rate')
self.paramList.append('para_kdj_len')
self.paramList.append('para_kdj_tb_len')
self.paramList.append('para_kdj_slow_len')
self.paramList.append('para_kdj_smooth_len')
self.paramList.append('para_cci_len')
self.paramList.append('para_macd_fast_len')
self.paramList.append('para_macd_slow_len')
self.paramList.append('para_macd_signal_len')
self.paramList.append('para_active_kf')
self.paramList.append('para_sar_step')
self.paramList.append('para_sar_limit')
self.paramList.append('para_active_skd')
self.paramList.append('para_skd_fast_len')
self.paramList.append('para_skd_slow_len')
self.paramList.append('para_skd_low')
self.paramList.append('para_skd_high')
self.paramList.append('para_active_yb')
self.paramList.append('para_yb_len')
self.paramList.append('para_yb_ref')
self.paramList.append('para_golden_n')
self.paramList.append('para_active_area')
self.paramList.append('para_bias_len')
self.paramList.append('para_bias2_len')
self.paramList.append('para_bias3_len')
self.paramList.append('is_7x24')
self.paramList.append('price_tick')
self.paramList.append('underly_symbol')
self.paramList.append('name')
def init_properties(self):
"""
初始化内部变量
:return:
"""
self.init_param_list()
# 输入参数
self.name = u'LineBar'
self.mode = self.TICK_MODE # 缺省为tick模式
self.interval = Interval.SECOND # 缺省为分钟级别周期
self.bar_interval = 300 # 缺省为5分钟周期
self.minute_interval = self.bar_interval / 60
def __getstate__(self):
"""移除Pickle dump()时不支持的Attribute"""
state = self.__dict__.copy()
# Remove the unpicklable entries.
remove_keys = ['strategy', 'cb_on_bar', 'cb_on_period']
for key in self.__dict__.keys():
if key in remove_keys:
del state[key]
return state
def __setstate__(self, state):
"""Pickle load()"""
self.__dict__.update(state)
def restore(self, state):
"""从Pickle中恢复数据"""
for key in state.__dict__.keys():
self.__dict__[key] = state.__dict__[key]
def init_indicators(self):
""" 定义所有的指标数据"""
# 指标参数
self.para_pre_len = 0 # 20 # 前高前低的周期长度
self.para_ma1_len = 0 # 10 # 第一根MA均线的周期长度
self.para_ma2_len = 0 # 20 # 第二根MA均线的周期长度
self.para_ma3_len = 0 # 120 # 第三根MA均线的周期长度
self.para_ema1_len = 0 # 13 # 第一根EMA均线的周期长度
self.para_ema2_len = 0 # 21 # 第二根EMA均线的周期长度
self.para_ema3_len = 0 # 120 # 第三根EMA均线的周期长度
self.para_dmi_len = 0 # 14 # DMI的计算周期
self.para_dmi_max = 0 # 30 # Dpi和Mdi的突破阈值
self.para_atr1_len = 0 # 10 # ATR波动率的计算周期(近端)
self.para_atr2_len = 0 # 26 # ATR波动率的计算周期(常用)
self.para_atr3_len = 0 # 50 # ATR波动率的计算周期(远端)
self.para_vol_len = 0 # 14 # 平均交易量的计算周期
self.para_rsi1_len = 0 # 7 # RSI 相对强弱指数(快曲线)
self.para_rsi2_len = 0 # 14 # RSI 相对强弱指数(慢曲线)
self.para_cmi_len = 0 # 计算CMI强度的周期
self.para_boll_len = 0 # 布林的计算K线周期
self.para_boll_tb_len = 0 # 布林的计算K线周期( 适用于TB的计算方式)
self.para_boll_std_rate = 2 # 布林标准差(缺省2倍)
self.para_boll2_len = 0 # 第二条布林的计算K线周期
self.para_boll2_tb_len = 0 # 第二跳布林的计算K线周期( 适用于TB的计算方式)
self.para_boll2_std_sate = 2 # 第二条布林标准差(缺省2倍)
self.para_kdj_len = 0 # KDJ指标的长度,缺省是9
self.para_kdj_tb_len = 0 # KDJ指标的长度,缺省是9 ( for TB)
self.para_kdj_slow_len = 3 # KDJ K值平滑指标
self.para_kdj_smooth_len = 3 # KDJ D值平滑指标
self.para_cci_len = 0 # 计算CCI的K线周期
self.para_macd_fast_len = 0 # 计算MACD的K线周期
self.para_macd_slow_len = 0 # 慢线周期
self.para_macd_signal_len = 0 # 平滑周期
self.para_active_kf = False # 是否激活卡尔曼均线计算
self.para_sar_step = 0 # 抛物线的参数
self.para_sar_limit = 0 # 抛物线参数
self.para_active_skd = False # 是否激活摆动指标
self.para_skd_fast_len = 13 # 摆动指标快线周期1
self.para_skd_slow_len = 8 # 摆动指标慢线周期2
self.para_skd_low = 30 # 摆动指标下限区域
self.para_skd_high = 70 # 摆动指标上限区域
self.para_active_yb = False # 是否激活多空趋势线
self.para_yb_ref = 1 # 趋势线参照周期
self.para_yb_len = 10 # 趋势线观测周期
self.para_golden_n = 0 # 黄金分割的观测周期(一般设置为60,或120)
self.para_active_area = False # 是否激活区域划分
self.para_bias_len = 0 # 乖离率观测周期1
self.para_bias2_len = 0 # 乖离率观测周期2
self.para_bias3_len = 0 # 乖离率观测周期3
# K 线的相关计算结果数据
self.line_pre_high = [] # K线的前para_pre_len的的最高
self.line_pre_low = [] # K线的前para_pre_len的的最低
self.line_ma1 = [] # K线的MA(para_ma1_len)均线,不包含未走完的bar
self.line_ma2 = [] # K线的MA(para_ma2_len)均线,不包含未走完的bar
self.line_ma3 = [] # K线的MA(para_ma3_len)均线,不包含未走完的bar
self._rt_ma1 = None # K线的实时MA(para_ma1_len)
self._rt_ma2 = None # K线的实时MA(para_ma2_len)
self._rt_ma3 = None # K线的实时MA(para_ma3_len)
self.line_ma1_atan = [] # K线的MA(para_ma2_len)均线斜率
self.line_ma2_atan = [] # K线的MA(para_ma2_len)均线斜率
self.line_ma3_atan = [] # K线的MA(para_ma2_len)均线斜率
self._rt_ma1_atan = None
self._rt_ma2_atan = None
self._rt_ma3_atan = None
self.ma12_count = 0 # ma1 与 ma2 ,金叉/死叉后第几根bar
self.ma13_count = 0 # ma1 与 ma3 ,金叉/死叉后第几根bar
self.ma23_count = 0 # ma2 与 ma3 ,金叉/死叉后第几根bar
self.line_ema1 = [] # K线的EMA1均线,周期是InputEmaLen1,不包含当前bar
self.line_ema2 = [] # K线的EMA2均线,周期是InputEmaLen2,不包含当前bar
self.line_ema3 = [] # K线的EMA3均线,周期是InputEmaLen3,不包含当前bar
self._rt_ema1 = None # K线的实时EMA(para_ema1_len)
self._rt_ema2 = None # K线的实时EMA(para_ema2_len)
self._rt_ema3 = None # K线的实时EMA(para_ema3_len)
# K线的DMI( Pdi,Mdi,ADX,Adxr) 计算数据
self.cur_pdi = 0 # bar内的升动向指标,即做多的比率
self.cur_mdi = 0 # bar内的下降动向指标,即做空的比率
self.line_pdi = [] # 升动向指标,即做多的比率
self.line_mdi = [] # 下降动向指标,即做空的比率
self.line_dx = [] # 趋向指标列表,最大长度为inputM*2
self.cur_adx = 0 # Bar内计算的平均趋向指标
self.line_adx = [] # 平均趋向指标
self.cur_adxr = 0 # 趋向平均值,为当日ADX值与M日前的ADX值的均值
self.line_adxr = [] # 平均趋向变化指标
# K线的基于DMI、ADX计算的结果
self.cur_adx_trend = 0 # ADX值持续高于前一周期时,市场行情将维持原趋势
self.cur_adxr_trend = 0 # ADXR值持续高于前一周期时,波动率比上一周期高
self.signal_adx_long = False # 多过滤器条件,做多趋势的判断,ADX高于前一天,上升动向> inputMM
self.signal_adx_short = False # 空过滤器条件,做空趋势的判断,ADXR高于前一天,下降动向> inputMM
# K线的ATR技术数据
self.line_atr1 = [] # K线的ATR1,周期为inputAtr1Len
self.line_atr2 = [] # K线的ATR2,周期为inputAtr2Len
self.line_atr3 = [] # K线的ATR3,周期为inputAtr3Len
self.cur_atr1 = 0
self.cur_atr2 = 0
self.cur_atr3 = 0
# K线的交易量平均
self.line_vol_ma = [] # K 线的交易量平均
# K线的RSI计算数据
self.line_rsi1 = [] # 记录K线对应的RSI数值,只保留inputRsi1Len*8
self.line_rsi2 = [] # 记录K线对应的RSI数值,只保留inputRsi2Len*8
self.para_rsi_low = 30 # RSI的最低线
self.para_rsi_high = 70 # RSI的最高线
self.rsi_top_list = [] # 记录RSI的最高峰,只保留 inputRsiLen个
self.rsi_buttom_list = [] # 记录RSI的最低谷,只保留 inputRsiLen个
self.cur_rsi_top_buttom = {} # 最近的一个波峰/波谷
# K线的CMI计算数据
self.line_cmi = [] # 记录K线对应的Cmi数值,只保留inputCmiLen*8
# K线的布林特计算数据
self.line_boll_upper = [] # 上轨
self.line_boll_middle = [] # 中线
self.line_boll_lower = [] # 下轨
self.line_boll_std = [] # 标准差
self.line_upper_atan = []
self.line_middle_atan = []
self.line_lower_atan = []
self._rt_upper = 0
self._rt_middle = 0
self._rt_lower = 0
self._rt_upper_atan = 0
self._rt_middle_atan = 0
self._rt_lower_atan = 0
self.cur_upper = 0 # 最后一根K的Boll上轨数值(与MinDiff取整)
self.cur_middle = 0 # 最后一根K的Boll中轨数值(与MinDiff取整)
self.cur_lower = 0 # 最后一根K的Boll下轨数值(与MinDiff取整+1)
self.line_boll2_upper = [] # 上轨
self.line_boll2_middle = [] # 中线
self.line_boll2_lower = [] # 下轨
self.line_boll2_std = [] # 标准差
self.line_upper2_atan = []
self.line_middle2_atan = []
self.line_lower2_atan = []
self._rt_upper2 = None
self._rt_middle2 = None
self._rt_lower2 = None
self._rt_upper2_atan = None
self._rt_middle2_atan = None
self._rt_lower2_atan = None
self.cur_upper2 = 0 # 最后一根K的Boll2上轨数值(与MinDiff取整)
self.cur_middle2 = 0 # 最后一根K的Boll2中轨数值(与MinDiff取整)
self.cur_lower2 = 0 # 最后一根K的Boll2下轨数值(与MinDiff取整+1)
# K线的KDJ指标计算数据
self.line_k = [] # K为快速指标
self.line_d = [] # D为慢速指标
self.line_j = [] #
self.kdj_top_list = [] # 记录KDJ最高峰,只保留 inputKdjLen个
self.kdj_buttom_list = [] # 记录KDJ的最低谷,只保留 inputKdjLen个
self.line_rsv = [] # RSV
self.cur_kdj_top_buttom = {} # 最近的一个波峰/波谷
self.cur_k = 0 # bar内计算时,最后一个未关闭的bar的实时K值
self.cur_d = 0 # bar内计算时,最后一个未关闭的bar的实时值
self.cur_j = 0 # bar内计算时,最后一个未关闭的bar的实时J值
self.cur_kd_count = 0 # > 0, 金叉, < 0 死叉
self.cur_kd_cross = 0 # 最近一次金叉/死叉的点位
self.cur_kd_cross_price = 0 # 最近一次发生金叉/死叉的价格
# K线的MACD计算数据(26,12,9)
self.line_dif = [] # DIF = EMA12 - EMA26,即为talib-MACD返回值macd
self.line_dea = [] # DEA = (前一日DEA X 8/10 + 今日DIF X 2/10),即为talib-MACD返回值
self.line_macd = [] # (dif-dea)*2,但是talib中MACD的计算是bar = (dif-dea)*1,国内一般是乘以2
self.macd_segment_list = [] # macd 金叉/死叉的段列表,记录价格的最高/最低,Dif的最高,最低,Macd的最高/最低,Macd面接
self._rt_dif = None
self._rt_dea = None
self._rt_macd = None
self.cur_macd_count = 0 # macd 金叉/死叉
self.cur_macd_cross = 0 # 最近一次金叉/死叉的点位
self.cur_macd_cross_price = 0 # 最近一次发生金叉/死叉的价格
self.rt_macd_count = 0 # 实时金叉/死叉, default = 0; -1 实时死叉; 1:实时金叉
self.rt_macd_cross = 0 # 实时金叉/死叉的位置
self.rt_macd_cross_price = 0 # 发生实时金叉死叉时的价格
self.dif_top_divergence = False # mcad dif 与price 顶背离
self.dif_buttom_divergence = False # mcad dif 与price 底背离
self.macd_top_divergence = False # mcad 面积 与price 顶背离
self.macd_buttom_divergence = False # mcad 面积 与price 底背离
# K 线的CCI计算数据
self.line_cci = []
self.cur_cci = None
self._rt_cci = None
# 卡尔曼过滤器
self.kf = None
self.line_state_mean = [] # 卡尔曼均线
self.line_state_covar = [] # 方差
# SAR 抛物线
self.cur_sar_direction = '' # up/down
self.line_sar = []
self.line_sar_top = []
self.line_sar_buttom = []
self.line_sar_sr_up = []
self.line_sar_ep_up = []
self.line_sar_af_up = []
self.line_sar_sr_down = []
self.line_sar_ep_down = []
self.line_sar_af_down = []
self.cur_sar_count = 0 # SAR 上升下降变化后累加
# 周期
self.cur_atan = None
self.line_atan = []
self.cur_period = None # 当前所在周期
self.period_list = []
# 优化的多空动量线
self.line_skd_rsi = [] # 参照的RSI
self.line_skd_sto = [] # 根据RSI演算的STO
self.line_sk = [] # 快线
self.line_sd = [] # 慢线
self.cur_skd_count = 0 # 当前金叉/死叉后累加
self._rt_sk = None # 实时SK值
self._rt_sd = None # 实时SD值
self.cur_skd_divergence = 0 # 背离,>0,底背离, < 0 顶背离
self.skd_top_list = [] # SK 高位
self.skd_buttom_list = [] # SK 低位
self.cur_skd_cross = 0 # 最近一次金叉/死叉的点位
self.cur_skd_cross_price = 0 # 最近一次发生金叉/死叉的价格
self.rt_skd_count = 0 # 实时金叉/死叉, default = 0; -1 实时死叉; 1:实时金叉
self.rt_skd_cross = 0 # 实时金叉/死叉的位置
self.rt_skd_cross_price = 0 # 发生实时金叉死叉时的价格
# 多空趋势线
self.line_yb = []
self.cur_yb_count = 0 # 当前黄/蓝累加
self._rt_yb = None
# 黄金分割
self.cur_p192 = None # HH-(HH-LL) * 0.192;
self.cur_p382 = None # HH-(HH-LL) * 0.382;
self.cur_p500 = None # (HH+LL)/2;
self.cur_p618 = None # HH-(HH-LL) * 0.618;
self.cur_p809 = None # HH-(HH-LL) * 0.809;
# 智能划分区域
self.area_list = []
self.cur_area = None
self.pre_area = None
# BIAS
self.line_bias = [] # BIAS1
self.line_bias2 = [] # BIAS2
self.line_bias3 = [] # BIAS3
self.cur_bias = 0 # 最后一个bar的BIAS1值
self.cur_bias2 = 0 # 最后一个bar的BIAS2值
self.cur_bias3 = 0 # 最后一个bar的BIAS3值
self._rt_bias = None
self._rt_bias2 = None
self._rt_bias3 = None
def set_params(self, setting: dict = {}):
"""设置参数"""
d = self.__dict__
for key in self.paramList:
if key in setting:
d[key] = setting[key]
def set_mode(self, mode: str):
"""Tick/Bar模式"""
self.mode = mode
def on_tick(self, tick: TickData):
"""行情更新
:type tick: object
"""
# Tick 有效性检查
if not self.is_7x24 and (tick.datetime.hour == 8 or tick.datetime.hour == 20):
self.write_log(u'竞价排名tick时间:{0}'.format(tick.datetime))
return
self.cur_datetime = tick.datetime
self.cur_tick = copy.copy(tick)
# 兼容 标准套利合约,它没有last_price
if self.cur_tick.last_price is None or self.cur_tick.last_price == 0:
if self.cur_tick.ask_price_1 == 0 and self.cur_tick.bid_price_1 == 0:
return
self.cur_price = round_to((self.cur_tick.ask_price_1 + self.cur_tick.bid_price_1) / 2, self.price_tick)
self.cur_tick.last_price = self.cur_price
else:
self.cur_price = self.cur_tick.last_price
# 3.生成x K线,若形成新Bar,则触发OnBar事件
self.generate_bar(copy.copy(self.cur_tick))
# 更新curPeriod的High,low
if self.cur_period is not None:
self.cur_period.update_price(self.cur_tick.last_price)
def add_bar(self, bar: BarData, bar_is_completed: bool = False, bar_freq: int = 1):
"""
予以外部初始化程序增加bar
予以外部初始化程序增加bar
:param bar:
:param bar_is_completed: 插入的bar,其周期与K线周期一致,就设为True
:param bar_freq: 插入的bar,其分钟周期数
:return:
"""
self.bar_len = len(self.line_bar)
# 更新最后价格
self.cur_price = bar.close_price
self.cur_datetime = bar.datetime + timedelta(minutes=bar_freq)
if self.bar_len == 0:
new_bar = copy.deepcopy(bar)
self.line_bar.append(new_bar)
self.cur_trading_day = bar.trading_day
self.on_bar(bar)
return
# 与最后一个BAR的时间比对,判断是否超过K线的周期
lastBar = self.line_bar[-1]
self.cur_trading_day = bar.trading_day
is_new_bar = False
if bar_is_completed:
is_new_bar = True
if self.interval == Interval.SECOND:
if (bar.datetime - lastBar.datetime).seconds >= self.bar_interval:
is_new_bar = True
elif self.interval == Interval.MINUTE:
bar_today_time = bar.datetime.replace(hour=0, minute=0, second=0, microsecond=0)
cur_bar_minute = int((bar.datetime - bar_today_time).total_seconds() / 60 / self.bar_interval)
last_bar_today_time = lastBar.datetime.replace(hour=0, minute=0, second=0, microsecond=0)
last_bar_minute = int((lastBar.datetime - last_bar_today_time).total_seconds() / 60 / self.bar_interval)
if cur_bar_minute != last_bar_minute:
is_new_bar = True
elif self.interval == Interval.HOUR:
if self.bar_interval == 1 and bar.datetime.hour != lastBar.datetime.hour:
is_new_bar = True
elif self.bar_interval == 2 and bar.datetime.hour != lastBar.datetime.hour \
and bar.datetime.hour in {1, 9, 11, 13, 15, 21, 23}:
is_new_bar = True
elif self.bar_interval == 4 and bar.datetime.hour != lastBar.datetime.hour \
and bar.datetime.hour in {1, 9, 13, 21}:
is_new_bar = True
else:
cur_bars_in_day = int(bar.datetime.hour / self.bar_interval)
last_bars_in_day = int(lastBar.datetime.hour / self.bar_interval)
if cur_bars_in_day != last_bars_in_day:
is_new_bar = True
elif self.interval == Interval.DAILY:
if bar.trading_day != lastBar.trading_day:
is_new_bar = True
if is_new_bar:
# 添加新的bar
new_bar = copy.deepcopy(bar)
self.line_bar.append(new_bar)
# 将上一个Bar推送至OnBar事件
self.on_bar(lastBar)
else:
# 更新最后一个bar
# 此段代码,针对一部分短周期生成长周期的k线更新,如3根5分钟k线,合并成1根15分钟k线。
lastBar.close_price = bar.close_price
lastBar.high_price = max(lastBar.high_price, bar.high_price)
lastBar.low_price = min(lastBar.low_price, bar.low_price)
lastBar.volume += bar.volume
lastBar.open_interest = bar.open_interest
# 实时计算
self.rt_executed = False
def on_bar(self, bar: BarData):
"""OnBar事件"""
# 计算相关数据
bar_mid3 = round((bar.close_price + bar.high_price + bar.low_price) / 3, self.round_n)
bar_mid4 = round((2 * bar.close_price + bar.high_price + bar.low_price) / 4, self.round_n)
bar_mid5 = round((2 * bar.close_price + bar.open_price + bar.high_price + bar.low_price) / 5, self.round_n)
# 扩展open,close,high,low numpy array列表
self.open_array[:-1] = self.open_array[1:]
self.open_array[-1] = bar.open_price
self.high_array[:-1] = self.high_array[1:]
self.high_array[-1] = bar.high_price
self.low_array[:-1] = self.low_array[1:]
self.low_array[-1] = bar.low_price
self.close_array[:-1] = self.close_array[1:]
self.close_array[-1] = bar.close_price
self.mid3_array[:-1] = self.mid3_array[1:]
self.mid3_array[-1] = bar_mid3
self.mid4_array[:-1] = self.mid4_array[1:]
self.mid4_array[-1] = bar_mid4
self.mid5_array[:-1] = self.mid5_array[1:]
self.mid5_array[-1] = bar_mid5
self.bar_len = len(self.line_bar)
self.__count_pre_high_low()
self.__count_ma()
self.__count_ema()
self.__count_dmi()
self.__count_atr()
self.__count_vol_ma()
self.__count_rsi()
self.__count_cmi()
self.__count_kdj()
self.__count_kdj_tb()
self.__count_boll()
self.__count_macd()
self.__count_cci()
self.__count_kf()
self.__count_period(bar)
self.__count_skd()
self.__count_yb()
self.__count_sar()
self.__count_golden_section()
self.__count_area(bar)
self.__count_bias()
self.export_to_csv(bar)
self.rt_executed = False
# 回调上层调用者
if self.cb_on_bar:
self.cb_on_bar(bar)
def check_rt_funcs(self, func):
"""
1.检查调用函数名是否在实时计算函数清单中,如果没有,则添加
2. 如果当前tick/addbar之后,没有被执行过实时计算,就执行一次
:param func:
:return:
"""
if func not in self.rt_funcs:
self.write_log(u'{}添加{}到实时函数中'.format(self.name, str(func.__name__)))
self.rt_funcs.add(func)
self.run_rt_count()
def run_rt_count(self):
"""
根据实时计算得要求,执行实时指标计算
:return:
"""
if self.rt_executed:
return
for func in list(self.rt_funcs):
try:
func()
except Exception as ex:
print(u'{}调用实时计算,异常:{},{}'.format(self.name, str(ex), traceback.format_exc()), file=sys.stderr)
self.rt_executed = True
def export_to_csv(self, bar: BarData):
""" 输出到csv文件"""
if self.export_filename is None or len(self.export_fields) == 0:
return
field_names = []
save_dict = {}
for field in self.export_fields:
field_name = field.get('name', None)
attr_name = field.get('attr', None)
source = field.get('source', None)
type_ = field.get('type_', None)
if field_name is None or attr_name is None or source is None or type_ is None:
continue
field_names.append(field_name)
if source == 'bar':
save_dict[field_name] = getattr(bar, str(attr_name), None)
else:
if type_ == 'list':
list_obj = getattr(self, str(attr_name), None)
if list_obj is None or len(list_obj) == 0:
save_dict[field_name] = 0
else:
save_dict[field_name] = list_obj[-1]
else:
save_dict[field_name] = getattr(self, str(attr_name), 0)
if len(save_dict) > 0:
self.append_data(file_name=self.export_filename, dict_data=save_dict, field_names=field_names)
def get_last_bar_str(self):
"""获取显示最后一个Bar的信息"""
msg = u'[' + self.name + u']'
if len(self.line_bar) < 2:
return msg
if self.mode == self.TICK_MODE:
display_bar = self.line_bar[-2]
else:
display_bar = self.line_bar[-1]
msg = msg + u'[td:{}] dt:{} o:{};h:{};l:{};c:{},v:{}'. \
format(display_bar.trading_day, display_bar.datetime.strftime('%Y-%m-%d %H:%M:%S'), display_bar.open_price,
display_bar.high_price,
display_bar.low_price, display_bar.close_price, display_bar.volume)
if self.para_ma1_len > 0 and len(self.line_ma1) > 0:
msg = msg + u',MA({0}):{1}'.format(self.para_ma1_len, self.line_ma1[-1])
if self.para_ma2_len > 0 and len(self.line_ma2) > 0:
msg = msg + u',MA({0}):{1}'.format(self.para_ma2_len, self.line_ma2[-1])
if self.ma12_count == 1:
msg = msg + u'MA{}金叉MA{}'.format(self.para_ma1_len, self.para_ma2_len)
elif self.ma12_count == -1:
msg = msg + u'MA{}死叉MA{}'.format(self.para_ma1_len, self.para_ma2_len)
if self.para_ma3_len > 0 and len(self.line_ma3) > 0:
msg = msg + u',MA({0}):{1}'.format(self.para_ma3_len, self.line_ma3[-1])
if self.ma13_count == 1:
msg = msg + u'MA{}金叉MA{}'.format(self.para_ma1_len, self.para_ma3_len)
elif self.ma13_count == -1:
msg = msg + u'MA{}死叉MA{}'.format(self.para_ma1_len, self.para_ma3_len)
if self.ma23_count == 1:
msg = msg + u'MA{}金叉MA{}'.format(self.para_ma2_len, self.para_ma3_len)
elif self.ma23_count == -1:
msg = msg + u'MA{}死叉MA{}'.format(self.para_ma2_len, self.para_ma3_len)
if self.para_ema1_len > 0 and len(self.line_ema1) > 0:
msg = msg + u',EMA({0}):{1}'.format(self.para_ema1_len, self.line_ema1[-1])
if self.para_ema2_len > 0 and len(self.line_ema2) > 0:
msg = msg + u',EMA({0}):{1}'.format(self.para_ema2_len, self.line_ema2[-1])
if self.para_ema3_len > 0 and len(self.line_ema3) > 0:
msg = msg + u',EMA({0}):{1}'.format(self.para_ema3_len, self.line_ema3[-1])
if self.para_dmi_len > 0 and len(self.line_pdi) > 0:
msg = msg + u',Pdi:{1};Mdi:{1};Adx:{2}'.format(self.line_pdi[-1], self.line_mdi[-1], self.line_adx[-1])
if self.para_atr1_len > 0 and len(self.line_atr1) > 0:
msg = msg + u',Atr({0}):{1}'.format(self.para_atr1_len, self.line_atr1[-1])
if self.para_atr2_len > 0 and len(self.line_atr2) > 0:
msg = msg + u',Atr({0}):{1}'.format(self.para_atr2_len, self.line_atr2[-1])
if self.para_atr3_len > 0 and len(self.line_atr3) > 0:
msg = msg + u',Atr({0}):{1}'.format(self.para_atr3_len, self.line_atr3[-1])
if self.para_vol_len > 0 and len(self.line_vol_ma) > 0:
msg = msg + u',AvgVol({0}):{1}'.format(self.para_vol_len, self.line_vol_ma[-1])
if self.para_rsi1_len > 0 and len(self.line_rsi1) > 0:
msg = msg + u',Rsi({0}):{1}'.format(self.para_rsi1_len, self.line_rsi1[-1])
if self.para_rsi2_len > 0 and len(self.line_rsi2) > 0:
msg = msg + u',Rsi({0}):{1}'.format(self.para_rsi2_len, self.line_rsi2[-1])
if self.para_kdj_len > 0 and len(self.line_k) > 0:
msg = msg + u',KDJ({},{},{}):{},{},{}'.format(self.para_kdj_len,
self.para_kdj_slow_len,
self.para_kdj_smooth_len,
round(self.line_k[-1], self.round_n),
round(self.line_d[-1], self.round_n),
round(self.line_j[-1], self.round_n))
if self.para_kdj_tb_len > 0 and len(self.line_k) > 0:
msg = msg + u',KDJ_TB({},{},{}):{},K:{},D:{},J:{}'.format(self.para_kdj_tb_len,
self.para_kdj_slow_len,
self.para_kdj_smooth_len,
round(self.line_rsv[-1], self.round_n),
round(self.line_k[-1], self.round_n),
round(self.line_d[-1], self.round_n),
round(self.line_j[-1], self.round_n))
if self.para_cci_len > 0 and len(self.line_cci) > 0:
msg = msg + u',Cci({0}):{1}'.format(self.para_cci_len, self.line_cci[-1])
if (self.para_boll_len > 0 or self.para_boll_tb_len > 0) and len(self.line_boll_upper) > 0:
msg = msg + u',Boll({}):std:{},mid:{},up:{},low:{},Atan:[mid:{},up:{},low:{}]'. \
format(self.para_boll_len, round(self.line_boll_std[-1], self.round_n),
round(self.line_boll_middle[-1], self.round_n),
round(self.line_boll_upper[-1], self.round_n),
round(self.line_boll_lower[-1], self.round_n),
round(self.line_middle_atan[-1], 2) if len(self.line_middle_atan) > 0 else 0,
round(self.line_upper_atan[-1], 2) if len(self.line_upper_atan) > 0 else 0,
round(self.line_lower_atan[-1], 2) if len(self.line_lower_atan) > 0 else 0)
if (self.para_boll2_len > 0 or self.para_boll2_tb_len > 0) and len(self.line_boll_upper) > 0:
msg = msg + u',Boll2({}):std:{},m:{},u:{},l:{}'. \
format(self.para_boll2_len, round(self.line_boll_std[-1], self.round_n),
round(self.line_boll2_middle[-1], self.round_n),
round(self.line_boll2_upper[-1], self.round_n),
round(self.line_boll2_lower[-1], self.round_n))
if self.para_macd_fast_len > 0 and len(self.line_dif) > 0:
msg = msg + u',MACD({0},{1},{2}):Dif:{3},Dea{4},Macd:{5}'. \
format(self.para_macd_fast_len, self.para_macd_slow_len, self.para_macd_signal_len,
round(self.line_dif[-1], self.round_n),
round(self.line_dea[-1], self.round_n),
round(self.line_macd[-1], self.round_n))
if len(self.line_macd) > 2:
if self.line_macd[-2] < 0 < self.line_macd[-1]:
msg = msg + u'金叉 '
elif self.line_macd[-2] > 0 > self.line_macd[-1]:
msg = msg + u'死叉 '
if self.dif_top_divergence:
msg = msg + u'Dif顶背离 '
if self.macd_top_divergence:
msg = msg + u'MACD顶背离 '
if self.dif_buttom_divergence:
msg = msg + u'Dif底背离 '
if self.macd_buttom_divergence:
msg = msg + u'MACD底背离 '
if self.para_active_kf and len(self.line_state_mean) > 0:
msg = msg + u',Kalman:{0}'.format(self.line_state_mean[-1])
if self.para_active_skd and len(self.line_sk) > 1 and len(self.line_sd) > 1:
msg = msg + u',SK:{}/SD:{}{}{},count:{}' \
.format(round(self.line_sk[-1], 2),
round(self.line_sd[-1], 2),
u'金叉' if self.cur_skd_count == 1 else u'',
u'死叉' if self.cur_skd_count == -1 else u'',
self.cur_skd_count)
if self.cur_skd_divergence == 1:
msg = msg + u'底背离'
elif self.cur_skd_divergence == -1:
msg = msg + u'顶背离'
if self.para_active_yb and len(self.line_yb) > 1:
c = 'Blue' if self.line_yb[-1] < self.line_yb[-2] else 'Yellow'
msg = msg + u',YB:{},[{}({})]'.format(self.line_yb[-1], c, self.cur_yb_count)
if self.para_sar_step > 0 and self.para_sar_limit > 0:
if len(self.line_sar) > 1:
msg = msg + u',Sar:{},{}(h={},l={}),#{}' \
.format(self.cur_sar_direction,
round(self.line_sar[-2], self.round_n),
self.line_sar_top[-1],
self.line_sar_buttom[-1],
self.cur_sar_count)
if self.para_active_area:
msg = msg + 'Area:{}'.format(self.cur_area)
if self.para_bias_len > 0 and len(self.line_bias) > 0:
msg = msg + u',Bias({}):{}]'. \
format(self.para_bias_len, round(self.line_bias[-1], self.round_n))
if self.para_bias2_len > 0 and len(self.line_bias2) > 0:
msg = msg + u',Bias2({}):{}]'. \
format(self.para_bias2_len, round(self.line_bias2[-1], self.round_n))
if self.para_bias3_len > 0 and len(self.line_bias3) > 0:
msg = msg + u',Bias3({}):{}]'. \
format(self.para_bias3_len, round(self.line_bias3[-1], self.round_n))
return msg
def first_tick(self, tick: TickData):
""" K线的第一个Tick数据"""
self.cur_bar = BarData(
gateway_name=tick.gateway_name,
symbol=tick.symbol,
exchange=tick.exchange,
datetime=tick.datetime
) # 创建新的K线
# 计算K线的整点分钟周期,这里周期最小是1分钟。如果你是采用非整点分钟,例如1.5分钟,请把这段注解掉
if self.minute_interval and self.interval == Interval.SECOND:
self.minute_interval = int(self.bar_interval / 60)
if self.minute_interval < 1:
self.minute_interval = 1
fixedMin = int(tick.datetime.minute / self.minute_interval) * self.minute_interval
tick.datetime = tick.datetime.replace(minute=fixedMin)
self.cur_bar.vt_symbol = tick.vt_symbol
self.cur_bar.symbol = tick.symbol
self.cur_bar.exchange = tick.exchange
self.cur_bar.open_interest = tick.open_interest
self.cur_bar.open_price = tick.last_price # O L H C
self.cur_bar.high_price = tick.last_price
self.cur_bar.low_price = tick.last_price
self.cur_bar.close_price = tick.last_price
self.cur_bar.mid4 = tick.last_price # 4价均价
self.cur_bar.mid5 = tick.last_price # 5价均价
# K线的日期时间
self.cur_bar.trading_day = tick.trading_day # K线所在的交易日期
self.cur_bar.date = tick.date # K线的日期,(夜盘的话,与交易日期不同哦)
self.cur_bar.datetime = tick.datetime
if (self.interval == Interval.SECOND and self.bar_interval % 60 == 0) \
or self.interval in [Interval.MINUTE, Interval.HOUR, Interval.DAILY]:
# K线的日期时间(去除秒)设为第一个Tick的时间
self.cur_bar.datetime = self.cur_bar.datetime.replace(second=0, microsecond=0)
self.cur_bar.time = self.cur_bar.datetime.strftime('%H:%M:%S')
self.cur_bar.volume = tick.volume
if self.cur_trading_day != self.cur_bar.trading_day or not self.line_bar:
# bar的交易日与记录的当前交易日不一致:
self.cur_trading_day = self.cur_bar.trading_day
self.is_first_tick = True # 标识该Tick属于该Bar的第一个tick数据
self.line_bar.append(self.cur_bar) # 推入到lineBar队列
def generate_bar(self, tick: TickData):
"""生成 line Bar """
self.bar_len = len(self.line_bar)
# 保存第一个K线数据
if self.bar_len == 0:
self.first_tick(tick)
return
# 清除480周期前的数据,
if self.bar_len > self.max_hold_bars:
del self.line_bar[0]
# 与最后一个BAR的时间比对,判断是否超过5分钟
lastBar = self.line_bar[-1]
# 处理日内的间隔时段最后一个tick,如10:15分,11:30分,15:00 和 2:30分
endtick = False
if not self.is_7x24:
if (tick.datetime.hour == 10 and tick.datetime.minute == 15) \
or (tick.datetime.hour == 11 and tick.datetime.minute == 30) \
or (tick.datetime.hour == 15 and tick.datetime.minute == 00) \
or (tick.datetime.hour == 2 and tick.datetime.minute == 30):
endtick = True
# 夜盘1:30收盘
if self.underly_symbol in NIGHT_MARKET_SQ2 and tick.datetime.hour == 1 and tick.datetime.minute == 00:
endtick = True
# 夜盘23:00收盘
if self.underly_symbol in NIGHT_MARKET_23 and tick.datetime.hour == 23 and tick.datetime.minute == 00:
endtick = True
# 满足时间要求
# 1,秒周期,tick的时间,距离最后一个bar的开始时间,已经超出bar的时间周期(barTimeInterval)
# 2,分钟周期,tick的时间属于的bar不等于最后一个bar的时间属于的bar
# 3,小时周期,取整=0
# 4、日周期,开盘时间
# 5、不是最后一个结束tick
is_new_bar = False
if self.last_tick is None: # Fix for Min10, 13:30 could not generate
self.last_tick = tick
# self.write_log('drawLineBar: datetime={}, lastPrice={}, endtick={}'.format(tick.datetime.strftime("%Y%m%d %H:%M:%S"), tick.last_price, endtick))
if not endtick:
if self.interval == Interval.SECOND:
if (tick.datetime - lastBar.datetime).total_seconds() >= self.bar_interval:
is_new_bar = True
elif self.interval == Interval.MINUTE:
# 时间到达整点分钟数,例如5分钟的 0,5,15,20,,.与上一个tick的分钟数不是同一分钟
cur_bars_in_day = int(((tick.datetime - datetime.strptime(tick.datetime.strftime('%Y-%m-%d'),
'%Y-%m-%d')).total_seconds() / 60 / self.bar_interval))
last_bars_in_day = int(((lastBar.datetime - datetime.strptime(lastBar.datetime.strftime('%Y-%m-%d'),
'%Y-%m-%d')).total_seconds() / 60 / self.bar_interval))
if cur_bars_in_day != last_bars_in_day:
is_new_bar = True
elif self.interval == Interval.HOUR:
if self.bar_interval == 1 and tick.datetime is not None and tick.datetime.hour != self.last_tick.datetime.hour:
is_new_bar = True
elif not self.is_7x24 and self.bar_interval == 2 and tick.datetime is not None \
and tick.datetime.hour != self.last_tick.datetime.hour \
and tick.datetime.hour in {1, 9, 11, 13, 21, 23}:
is_new_bar = True
elif not self.is_7x24 and self.bar_interval == 4 and tick.datetime is not None \
and tick.datetime.hour != self.last_tick.datetime.hour \
and tick.datetime.hour in {1, 9, 13, 21}:
is_new_bar = True
else:
cur_bars_in_day = int(tick.datetime.hour / self.bar_interval)
last_bars_in_day = int(lastBar.datetime.hour / self.bar_interval)
if cur_bars_in_day != last_bars_in_day:
is_new_bar = True
elif self.interval == Interval.DAILY:
if not self.is_7x24:
if tick.datetime is not None \
and (tick.datetime.hour == 21 or tick.datetime.hour == 9) \
and 14 <= self.last_tick.datetime.hour <= 15:
is_new_bar = True
else:
if tick.date != lastBar.date:
is_new_bar = True
if is_new_bar:
# 创建并推入新的Bar
self.first_tick(tick)
# 触发OnBar事件
self.on_bar(lastBar)
else:
# 更新当前最后一个bar
self.is_first_tick = False
# 更新最高价、最低价、收盘价、成交量
lastBar.high_price = max(lastBar.high_price, tick.last_price)
lastBar.low_price = min(lastBar.low_price, tick.last_price)
lastBar.close_price = tick.last_price
lastBar.open_interest = tick.open_interest
lastBar.volume += tick.volume
# 更新Bar的颜色
if lastBar.close_price > lastBar.open_price:
lastBar.color = Color.RED
elif lastBar.close_price < lastBar.open_price:
lastBar.color = Color.BLUE
else:
lastBar.color = Color.EQUAL
# 实时计算
self.rt_executed = False
if not endtick:
self.last_tick = tick
def __count_pre_high_low(self):
"""计算 K线的前周期最高和最低"""
if self.para_pre_len <= 0: # 不计算
return
count_len = min(self.para_pre_len, self.bar_len)
# 2.计算前inputPreLen周期内(不包含当前周期)的Bar高点和低点
preHigh = max(self.high_array[-count_len:])
preLow = min(self.low_array[-count_len:])
# 保存
if len(self.line_pre_high) > self.max_hold_bars:
del self.line_pre_high[0]
self.line_pre_high.append(preHigh)
# 保存
if len(self.line_pre_low) > self.max_hold_bars:
del self.line_pre_low[0]
self.line_pre_low.append(preLow)
def __count_sar(self):
"""计算K线的SAR"""
if self.bar_len < 5:
return
if not (self.para_sar_step > 0 or self.para_sar_limit > self.para_sar_step): # 不计算
return
if len(self.line_sar_sr_up) == 0 and len(self.line_sar_sr_down) == 0:
if self.line_bar[-2].close_price > self.line_bar[-5].close_price:
# 标记为上涨趋势
sr0 = min(self.low_array[0:])
af0 = 0
ep0 = self.high_array[-1]
self.line_sar_sr_up.append(sr0)
self.line_sar_ep_up.append(ep0)
self.line_sar_af_up.append(af0)
self.line_sar.append(sr0)
self.cur_sar_direction = 'up'
self.cur_sar_count = 0
else:
# 标记为下跌趋势
sr0 = max(self.high_array[0:])
af0 = 0
ep0 = self.low_array[-1]
self.line_sar_sr_down.append(sr0)
self.line_sar_ep_down.append(ep0)
self.line_sar_af_down.append(af0)
self.line_sar.append(sr0)
self.cur_sar_direction = 'down'
self.cur_sar_count = 0
self.line_sar_top.append(self.line_bar[-2].high_price) # SAR的谷顶
self.line_sar_buttom.append(self.line_bar[-2].low_price) # SAR的波底
elif len(self.line_sar_sr_up) > 0:
if self.low_array[-1] > self.line_sar_sr_up[-1]:
sr0 = self.line_sar_sr_up[-1]
ep0 = self.high_array[-1] # 文华使用前一个K线的最高价
af0 = min(self.para_sar_limit,
self.line_sar_af_up[-1] + self.para_sar_step) # 文华的af随着K线的数目增加而递增,没有判断新高
sr = sr0 + af0 * (ep0 - sr0)
self.line_sar_sr_up.append(sr)
self.line_sar_ep_up.append(ep0)
self.line_sar_af_up.append(af0)
self.line_sar.append(sr)
self.cur_sar_count += 1
# self.write_log('Up: sr0={},ep0={},af0={},sr={}'.format(sr0, ep0, af0, sr))
elif self.low_array[-1] <= self.line_sar_sr_up[-1]:
ep0 = max(self.high_array[-len(self.line_sar_sr_up):])
sr0 = ep0
af0 = 0
self.line_sar_sr_down.append(sr0)
self.line_sar_ep_down.append(ep0)
self.line_sar_af_down.append(af0)
self.line_sar.append(sr0)
self.cur_sar_direction = 'down'
# self.write_log('Up->Down: sr0={},ep0={},af0={},sr={}'.format(sr0, ep0, af0, sr0))
# self.write_log('lineSarTop={}, lineSarButtom={}, len={}'.format(self.lineSarTop[-1], self.lineSarButtom[-1],len(self.lineSarSrUp)))
self.line_sar_top.append(self.line_bar[-2].high_price)
self.line_sar_buttom.append(self.line_bar[-2].low_price)
self.line_sar_sr_up = []
self.line_sar_ep_up = []
self.line_sar_af_up = []
sr0 = self.line_sar_sr_down[-1]
ep0 = self.low_array[-1] # 文华使用前一个K线的最低价
af0 = min(self.para_sar_limit,
self.line_sar_af_down[-1] + self.para_sar_step) # 文华的af随着K线的数目增加而递增,没有判断新高
sr = sr0 + af0 * (ep0 - sr0)
self.line_sar_sr_down.append(sr)
self.line_sar_ep_down.append(ep0)
self.line_sar_af_down.append(af0)
self.line_sar.append(sr)
self.cur_sar_count = 0
# self.write_log('Down: sr0={},ep0={},af0={},sr={}'.format(sr0, ep0, af0, sr))
elif len(self.line_sar_sr_down) > 0:
if self.high_array[-1] < self.line_sar_sr_down[-1]:
sr0 = self.line_sar_sr_down[-1]
ep0 = self.low_array[-1] # 文华使用前一个K线的最低价
af0 = min(self.para_sar_limit,
self.line_sar_af_down[-1] + self.para_sar_step) # 文华的af随着K线的数目增加而递增,没有判断新高
sr = sr0 + af0 * (ep0 - sr0)
self.line_sar_sr_down.append(sr)
self.line_sar_ep_down.append(ep0)
self.line_sar_af_down.append(af0)
self.line_sar.append(sr)
self.cur_sar_count -= 1
# self.write_log('Down: sr0={},ep0={},af0={},sr={}'.format(sr0, ep0, af0, sr))
elif self.high_array[-1] >= self.line_sar_sr_down[-1]:
ep0 = min(self.low_array[-len(self.line_sar_sr_down):])
sr0 = ep0
af0 = 0
self.line_sar_sr_up.append(sr0)
self.line_sar_ep_up.append(ep0)
self.line_sar_af_up.append(af0)
self.line_sar.append(sr0)
self.cur_sar_direction = 'up'
# self.write_log('Down->Up: sr0={},ep0={},af0={},sr={}'.format(sr0, ep0, af0, sr0))
# self.write_log('lineSarTop={}, lineSarButtom={}, len={}'.format(self.lineSarTop[-1], self.lineSarButtom[-1],len(self.lineSarSrDown)))
self.line_sar_top.append(self.line_bar[-2].high_price)
self.line_sar_buttom.append(self.line_bar[-2].low_price)
self.line_sar_sr_down = []
self.line_sar_ep_down = []
self.line_sar_af_down = []
sr0 = self.line_sar_sr_up[-1]
ep0 = self.high_array[-1] # 文华使用前一个K线的最高价
af0 = min(self.para_sar_limit,
self.line_sar_af_up[-1] + self.para_sar_step) # 文华的af随着K线的数目增加而递增,没有判断新高
sr = sr0 + af0 * (ep0 - sr0)
self.line_sar_sr_up.append(sr)
self.line_sar_ep_up.append(ep0)
self.line_sar_af_up.append(af0)
self.line_sar.append(sr)
self.cur_sar_count = 0
self.write_log('Up: sr0={},ep0={},af0={},sr={}'.format(sr0, ep0, af0, sr))
# 更新抛物线的最高值和最低值
if self.line_sar_top[-1] < self.high_array[-1]:
self.line_sar_top[-1] = self.high_array[-1]
if self.line_sar_buttom[-1] > self.low_array[-1]:
self.line_sar_buttom[-1] = self.low_array[-1]
if len(self.line_sar) > self.max_hold_bars:
del self.line_sar[0]
def __count_ma(self):
"""计算K线的MA1 和MA2"""
if not (self.para_ma1_len > 0 or self.para_ma2_len > 0 or self.para_ma3_len > 0): # 不计算
return
# 1、lineBar满足长度才执行计算
if self.bar_len < min(7, self.para_ma1_len, self.para_ma2_len, self.para_ma3_len) + 2:
self.write_log(u'数据未充分,当前Bar数据数量:{0},计算MA需要:{1}'.
format(self.bar_len,
min(7, self.para_ma1_len, self.para_ma2_len, self.para_ma3_len) + 2))
return
# 计算第一条MA均线
if self.para_ma1_len > 0:
count_len = min(self.para_ma1_len, self.bar_len)
barMa1 = ta.MA(self.close_array[-count_len:], count_len)[-1]
barMa1 = round(float(barMa1), self.round_n)
if len(self.line_ma1) > self.max_hold_bars:
del self.line_ma1[0]
self.line_ma1.append(barMa1)
# 计算斜率
if len(self.line_ma1) > 2 and self.line_ma1[-2] != 0:
ma1_atan = math.atan((self.line_ma1[-1] / self.line_ma1[-2] - 1) * 100) * 180 / math.pi
ma1_atan = round(ma1_atan, 3)
if len(self.line_ma1_atan) > self.max_hold_bars:
del self.line_ma1_atan[0]
self.line_ma1_atan.append(ma1_atan)
# 计算第二条MA均线
if self.para_ma2_len > 0:
count_len = min(self.para_ma2_len, self.bar_len)
barMa2 = ta.MA(self.close_array[-count_len:], count_len)[-1]
barMa2 = round(float(barMa2), self.round_n)
if len(self.line_ma2) > self.max_hold_bars:
del self.line_ma2[0]
self.line_ma2.append(barMa2)
# 计算斜率
if len(self.line_ma2) > 2 and self.line_ma2[-2] != 0:
ma2_atan = math.atan((self.line_ma2[-1] / self.line_ma2[-2] - 1) * 100) * 180 / math.pi
ma2_atan = round(ma2_atan, 3)
if len(self.line_ma2_atan) > self.max_hold_bars:
del self.line_ma2_atan[0]
self.line_ma2_atan.append(ma2_atan)
# 计算第三条MA均线
if self.para_ma3_len > 0:
count_len = min(self.para_ma3_len, self.bar_len)
barMa3 = ta.MA(self.close_array[-count_len:], count_len)[-1]
barMa3 = round(float(barMa3), self.round_n)
if len(self.line_ma3) > self.max_hold_bars:
del self.line_ma3[0]
self.line_ma3.append(barMa3)
# 计算斜率
if len(self.line_ma3) > 2 and self.line_ma3[-2] != 0:
ma3_atan = math.atan((self.line_ma3[-1] / self.line_ma3[-2] - 1) * 100) * 180 / math.pi
ma3_atan = round(ma3_atan, 3)
if len(self.line_ma3_atan) > self.max_hold_bars:
del self.line_ma3_atan[0]
self.line_ma3_atan.append(ma3_atan)
# 计算MA1,MA2,MA3的金叉死叉
if len(self.line_ma1) >= 2 and len(self.line_ma2) > 2:
golden_cross = False
dead_cross = False
if self.line_ma1[-1] > self.line_ma1[-2] \
and self.line_ma1[-1] > self.line_ma2[-1] \
and self.line_ma1[-2] <= self.line_ma2[-2]:
golden_cross = True
if self.line_ma1[-1] < self.line_ma1[-2] \
and self.line_ma1[-1] < self.line_ma2[-1] \
and self.line_ma1[-2] >= self.line_ma2[-2]:
dead_cross = True
if self.ma12_count <= 0:
if golden_cross:
self.ma12_count = 1
elif self.line_ma1[-1] < self.line_ma2[-1]:
self.ma12_count -= 1
elif self.ma12_count >= 0:
if dead_cross:
self.ma12_count = -1
elif self.line_ma1[-1] > self.line_ma2[-1]:
self.ma12_count += 1
if len(self.line_ma2) >= 2 and len(self.line_ma3) > 2:
golden_cross = False
dead_cross = False
if self.line_ma2[-1] > self.line_ma2[-2] \
and self.line_ma2[-1] > self.line_ma3[-1] \
and self.line_ma2[-2] <= self.line_ma3[-2]:
golden_cross = True
if self.line_ma2[-1] < self.line_ma2[-2] \
and self.line_ma2[-1] < self.line_ma3[-1] \
and self.line_ma2[-2] >= self.line_ma3[-2]:
dead_cross = True
if self.ma23_count <= 0:
if golden_cross:
self.ma23_count = 1
elif self.line_ma2[-1] < self.line_ma3[-1]:
self.ma23_count -= 1
elif self.ma23_count >= 0:
if dead_cross:
self.ma23_count = -1
elif self.line_ma2[-1] > self.line_ma3[-1]:
self.ma23_count += 1
if len(self.line_ma1) >= 2 and len(self.line_ma3) > 2:
golden_cross = False
dead_cross = False
if self.line_ma1[-1] > self.line_ma1[-2] \
and self.line_ma1[-1] > self.line_ma3[-1] \
and self.line_ma1[-2] <= self.line_ma3[-2]:
golden_cross = True
if self.line_ma1[-1] < self.line_ma1[-2] \
and self.line_ma1[-1] < self.line_ma3[-1] \
and self.line_ma1[-2] >= self.line_ma3[-2]:
dead_cross = True
if self.ma13_count <= 0:
if golden_cross:
self.ma13_count = 1
elif self.line_ma1[-1] < self.line_ma3[-1]:
self.ma13_count -= 1
elif self.ma13_count >= 0:
if dead_cross:
self.ma13_count = -1
elif self.line_ma1[-1] > self.line_ma3[-1]:
self.ma13_count += 1
def rt_count_ma(self):
"""
实时计算MA得值
:param ma_num:第几条均线, 1,对应inputMa1Len,,,,
:return:
"""
if self.para_ma1_len > 0:
count_len = min(self.bar_len, self.para_ma1_len)
if count_len > 0:
close_ma_array = ta.MA(np.append(self.close_array[-count_len:], [self.line_bar[-1].close_price]),
count_len)
self._rt_ma1 = round(float(close_ma_array[-1]), self.round_n)
# 计算斜率
if len(close_ma_array) > 2 and close_ma_array[-2] != 0:
self._rt_ma1_atan = round(
math.atan((close_ma_array[-1] / close_ma_array[-2] - 1) * 100) * 180 / math.pi, 3)
if self.para_ma2_len > 0:
count_len = min(self.bar_len, self.para_ma2_len)
if count_len > 0:
close_ma_array = ta.MA(np.append(self.close_array[-count_len:], [self.line_bar[-1].close_price]),
count_len)
self._rt_ma2 = round(float(close_ma_array[-1]), self.round_n)
# 计算斜率
if len(close_ma_array) > 2 and close_ma_array[-2] != 0:
self._rt_ma2_atan = round(
math.atan((close_ma_array[-1] / close_ma_array[-2] - 1) * 100) * 180 / math.pi, 3)
if self.para_ma3_len > 0:
count_len = min(self.bar_len, self.para_ma3_len)
if count_len > 0:
close_ma_array = ta.MA(np.append(self.close_array[-count_len:], [self.line_bar[-1].close_price]),
count_len)
self._rt_ma3 = round(float(close_ma_array[-1]), self.round_n)
# 计算斜率
if len(close_ma_array) > 2 and close_ma_array[-2] != 0:
self._rt_ma3_atan = round(
math.atan((close_ma_array[-1] / close_ma_array[-2] - 1) * 100) * 180 / math.pi, 3)
@property
def rt_ma1(self):
self.check_rt_funcs(self.rt_count_ma)
if self._rt_ma1 is None and len(self.line_ma1) > 0:
return self.line_ma1[-1]
return self._rt_ma1
@property
def rt_ma2(self):
self.check_rt_funcs(self.rt_count_ma)
if self._rt_ma2 is None and len(self.line_ma2) > 0:
return self.line_ma2[-1]
return self._rt_ma2
@property
def rt_ma3(self):
self.check_rt_funcs(self.rt_count_ma)
if self._rt_ma3 is None and len(self.line_ma3) > 0:
return self.line_ma3[-1]
return self._rt_ma3
@property
def rt_ma1_atan(self):
self.check_rt_funcs(self.rt_count_ma)
if self._rt_ma1_atan is None and len(self.line_ma1_atan) > 0:
return self.line_ma1_atan[-1]
return self._rt_ma1_atan
@property
def rt_ma2_atan(self):
self.check_rt_funcs(self.rt_count_ma)
if self._rt_ma2_atan is None and len(self.line_ma2_atan) > 0:
return self.line_ma2_atan[-1]
return self._rt_ma2_atan
@property
def rt_ma3_atan(self):
self.check_rt_funcs(self.rt_count_ma)
if self._rt_ma3_atan is None and len(self.line_ma3_atan) > 0:
return self.line_ma3_atan[-1]
return self._rt_ma3_atan
def __count_ema(self):
"""计算K线的EMA1 和EMA2"""
if not (self.para_ema1_len > 0 or self.para_ema2_len > 0 or self.para_ema3_len > 0): # 不计算
return
ema1_data_len = min(self.para_ema1_len * 4, self.para_ema1_len + 40) if self.para_ema1_len > 0 else 0
ema2_data_len = min(self.para_ema2_len * 4, self.para_ema2_len + 40) if self.para_ema2_len > 0 else 0
ema3_data_len = min(self.para_ema3_len * 4, self.para_ema3_len + 40) if self.para_ema3_len > 0 else 0
max_data_len = max(ema1_data_len, ema2_data_len, ema3_data_len)
# 1、lineBar满足长度才执行计算
if self.bar_len < max_data_len:
self.write_log(u'数据未充分,当前Bar数据数量:{0},计算EMA需要:{1}'.
format(len(self.line_bar), max_data_len))
return
# 计算第一条EMA均线
if self.para_ema1_len > 0:
count_len = min(self.para_ema1_len, self.bar_len)
# 3、获取前InputN周期(不包含当前周期)的K线
barEma1 = ta.EMA(self.close_array[-ema1_data_len:], count_len)[-1]
barEma1 = round(float(barEma1), self.round_n)
if len(self.line_ema1) > self.max_hold_bars:
del self.line_ema1[0]
self.line_ema1.append(barEma1)
# 计算第二条EMA均线
if self.para_ema2_len > 0:
count_len = min(self.bar_len, self.para_ema2_len)
# 3、获取前InputN周期(不包含当前周期)的自适应均线
barEma2 = ta.EMA(self.close_array[-ema2_data_len:], count_len)[-1]
barEma2 = round(float(barEma2), self.round_n)
if len(self.line_ema2) > self.max_hold_bars:
del self.line_ema2[0]
self.line_ema2.append(barEma2)
# 计算第三条EMA均线
if self.para_ema3_len > 0:
count_len = min(self.bar_len, self.para_ema3_len)
# 3、获取前InputN周期(不包含当前周期)的自适应均线
barEma3 = ta.EMA(self.close_array[-ema3_data_len:], count_len)[-1]
barEma3 = round(float(barEma3), self.round_n)
if len(self.line_ema3) > self.max_hold_bars:
del self.line_ema3[0]
self.line_ema3.append(barEma3)
def rt_count_ema(self):
"""计算K线的EMA1 和EMA2"""
if not (self.para_ema1_len > 0 or self.para_ema2_len > 0 or self.para_ema3_len > 0): # 不计算
return
ema1_data_len = min(self.para_ema1_len * 4, self.para_ema1_len + 40) if self.para_ema1_len > 0 else 0
ema2_data_len = min(self.para_ema2_len * 4, self.para_ema2_len + 40) if self.para_ema2_len > 0 else 0
ema3_data_len = min(self.para_ema3_len * 4, self.para_ema3_len + 40) if self.para_ema3_len > 0 else 0
max_data_len = max(ema1_data_len, ema2_data_len, ema3_data_len)
# 1、lineBar满足长度才执行计算
if self.bar_len < max_data_len:
return
# 计算第一条EMA均线
if self.para_ema1_len > 0:
count_len = min(self.para_ema1_len, self.bar_len)
# 3、获取前InputN周期(不包含当前周期)的K线
barEma1 = ta.EMA(np.append(self.close_array[-ema1_data_len:], [self.cur_price]), count_len)[-1]
self._rt_ema1 = round(float(barEma1), self.round_n)
# 计算第二条EMA均线
if self.para_ema2_len > 0:
count_len = min(self.bar_len, self.para_ema2_len)
# 3、获取前InputN周期(不包含当前周期)的自适应均线
barEma2 = ta.EMA(np.append(self.close_array[-ema2_data_len:], [self.cur_price]), count_len)[-1]
self._rt_ema2 = round(float(barEma2), self.round_n)
# 计算第三条EMA均线
if self.para_ema3_len > 0:
count_len = min(self.bar_len, self.para_ema3_len)
# 3、获取前InputN周期(不包含当前周期)的自适应均线
barEma3 = ta.EMA(np.append(self.close_array[-ema3_data_len:], [self.cur_price]), count_len)[-1]
self._rt_ema3 = round(float(barEma3), self.round_n)
@property
def rt_ema1(self):
self.check_rt_funcs(self.rt_count_ema)
if self._rt_ema1 is None and len(self.line_ema1) > 0:
return self.line_ema1[-1]
return self._rt_ema1
@property
def rt_ema2(self):
self.check_rt_funcs(self.rt_count_ema)
if self._rt_ema2 is None and len(self.line_ema2) > 0:
return self.line_ema2[-1]
return self._rt_ema2
@property
def rt_ema3(self):
self.check_rt_funcs(self.rt_count_ema)
if self._rt_ema3 is None and len(self.line_ema3) > 0:
return self.line_ema3[-1]
return self._rt_ema3
def __count_dmi(self):
"""计算K线的DMI数据和条件"""
if self.para_dmi_len <= 0: # 不计算
return
# 1、lineMx满足长度才执行计算
if len(self.line_bar) < self.para_dmi_len + 1:
self.write_log(u'数据未充分,当前Bar数据数量:{0},计算DMI需要:{1}'.format(len(self.line_bar), self.para_dmi_len + 1))
return
# 2、根据当前High,Low,(不包含当前周期)重新计算TR1,PDM,MDM和ATR
barTr1 = 0 # 获取InputP周期内的价差最大值之和
barPdm = 0 # InputP周期内的做多价差之和
barMdm = 0 # InputP周期内的做空价差之和
for i in range(self.bar_len - 1, self.bar_len - 1 - self.para_dmi_len, -1): # 周期 inputDmiLen
# 3.1、计算TR1
# 当前周期最高与最低的价差
high_low_spread = self.line_bar[i].high_price - self.line_bar[i].low_price
# 当前周期最高与昨收价的价差
high_preclose_spread = abs(self.line_bar[i].high_price - self.line_bar[i - 1].close_price)
# 当前周期最低与昨收价的价差
low_preclose_spread = abs(self.line_bar[i].low_price - self.line_bar[i - 1].close_price)
# 最大价差
max_spread = max(high_low_spread, high_preclose_spread, low_preclose_spread)
barTr1 = barTr1 + float(max_spread)
# 今高与昨高的价差
high_prehigh_spread = self.line_bar[i].high_price - self.line_bar[i - 1].high_price
# 昨低与今低的价差
low_prelow_spread = self.line_bar[i - 1].low_price - self.line_bar[i].low_price
# 3.2、计算周期内的做多价差之和
if high_prehigh_spread > 0 and high_prehigh_spread > low_prelow_spread:
barPdm = barPdm + high_prehigh_spread
# 3.3、计算周期内的做空价差之和
if low_prelow_spread > 0 and low_prelow_spread > high_prehigh_spread:
barMdm = barMdm + low_prelow_spread
# 6、计算上升动向指标,即做多的比率
if barTr1 == 0:
self.cur_pdi = 0
else:
self.cur_pdi = barPdm * 100 / barTr1
if len(self.line_pdi) > self.max_hold_bars:
del self.line_pdi[0]
self.line_pdi.append(self.cur_pdi)
# 7、计算下降动向指标,即做空的比率
if barTr1 == 0:
self.cur_mdi = 0
else:
self.cur_mdi = barMdm * 100 / barTr1
# 8、计算平均趋向指标 Adx,Adxr
if self.cur_mdi + self.cur_pdi == 0:
dx = 0
else:
dx = 100 * abs(self.cur_mdi - self.cur_pdi) / (self.cur_mdi + self.cur_pdi)
if len(self.line_mdi) > self.max_hold_bars:
del self.line_mdi[0]
self.line_mdi.append(self.cur_mdi)
if len(self.line_dx) > self.max_hold_bars:
del self.line_dx[0]
self.line_dx.append(dx)
# 平均趋向指标,MA计算
if len(self.line_dx) < self.para_dmi_len + 1:
self.cur_adx = dx
else:
self.cur_adx = ta.EMA(np.array(self.line_dx, dtype=float), self.para_dmi_len)[-1]
# 保存Adx值
if len(self.line_adx) > self.max_hold_bars:
del self.line_adx[0]
self.line_adx.append(self.cur_adx)
# 趋向平均值,为当日ADX值与1周期前的ADX值的均值
if len(self.line_adx) == 1:
self.cur_adxr = self.line_adx[-1]
else:
self.cur_adxr = (self.line_adx[-1] + self.line_adx[-2]) / 2
# 保存Adxr值
if len(self.line_adxr) > self.max_hold_bars:
del self.line_adxr[0]
self.line_adxr.append(self.cur_adxr)
# 7、计算A,ADX值持续高于前一周期时,市场行情将维持原趋势
if len(self.line_adx) < 2:
self.cur_adx_trend = False
elif self.line_adx[-1] > self.line_adx[-2]:
self.cur_adx_trend = True
else:
self.cur_adx_trend = False
# ADXR值持续高于前一周期时,波动率比上一周期高
if len(self.line_adxr) < 2:
self.cur_adxr_trend = False
elif self.line_adxr[-1] > self.line_adxr[-2]:
self.cur_adxr_trend = True
else:
self.cur_adxr_trend = False
# 多过滤器条件,做多趋势,ADX高于前一天,上升动向> inputDmiMax
if self.cur_pdi > self.cur_mdi and self.cur_adx_trend and self.cur_adxr_trend and self.cur_pdi >= self.para_dmi_max:
self.signal_adx_long = True
self.write_log(u'{0}[DEBUG]Buy Signal On Bar,Pdi:{1}>Mdi:{2},adx[-1]:{3}>Adx[-2]:{4}'
.format(self.cur_tick.datetime, self.cur_pdi, self.cur_mdi, self.line_adx[-1],
self.line_adx[-2]))
else:
self.signal_adx_long = False
# 空过滤器条件 做空趋势,ADXR高于前一天,下降动向> inputMM
if self.cur_pdi < self.cur_mdi and self.cur_adx_trend and self.cur_adxr_trend and self.cur_mdi >= self.para_dmi_max:
self.signal_adx_short = True
self.write_log(u'{0}[DEBUG]Short Signal On Bar,Pdi:{1}<Mdi:{2},adx[-1]:{3}>Adx[-2]:{4}'
.format(self.cur_tick.datetime, self.cur_pdi, self.cur_mdi, self.line_adx[-1],
self.line_adx[-2]))
else:
self.signal_adx_short = False
def __count_atr(self):
"""计算Mx K线的各类数据和条件"""
# 1、lineMx满足长度才执行计算
maxAtrLen = max(self.para_atr1_len, self.para_atr2_len, self.para_atr3_len)
if maxAtrLen <= 0: # 不计算
return
data_need_len = min(7, maxAtrLen)
if self.bar_len < data_need_len:
self.write_log(u'数据未充分,当前Bar数据数量:{0},计算ATR需要:{1}'.
format(self.bar_len, data_need_len))
return
# 计算 ATR
if self.para_atr1_len > 0:
count_len = min(self.bar_len, self.para_atr1_len)
cur_atr1 = ta.ATR(self.high_array[-count_len * 2:], self.low_array[-count_len * 2:],
self.close_array[-count_len * 2:], count_len)
self.cur_atr1 = round(cur_atr1[-1], self.round_n)
if len(self.line_atr1) > self.max_hold_bars:
del self.line_atr1[0]
self.line_atr1.append(self.cur_atr1)
if self.para_atr2_len > 0:
count_len = min(self.bar_len, self.para_atr2_len)
cur_atr2 = ta.ATR(self.high_array[-count_len * 2:], self.low_array[-count_len * 2:],
self.close_array[-count_len * 2:], count_len)
self.cur_atr2 = round(cur_atr2[-1], self.round_n)
if len(self.line_atr2) > self.max_hold_bars:
del self.line_atr2[0]
self.line_atr2.append(self.cur_atr2)
if self.para_atr3_len > 0:
count_len = min(self.bar_len, self.para_atr3_len)
cur_atr3 = ta.ATR(self.high_array[-count_len * 2:], self.low_array[-count_len * 2:],
self.close_array[-count_len * 2:], count_len)
self.cur_atr3 = round(cur_atr3[-1], self.round_n)
if len(self.line_atr3) > self.max_hold_bars:
del self.line_atr3[0]
self.line_atr3.append(self.cur_atr3)
def __count_vol_ma(self):
"""计算平均成交量"""
# 1、lineBar满足长度才执行计算
if self.para_vol_len <= 0: # 不计算
return
bar_len = min(self.bar_len, self.para_vol_len)
sumVol = sum([x.volume for x in self.line_bar[-bar_len:]])
avgVol = round(sumVol / bar_len, 0)
if len(self.line_vol_ma) > self.max_hold_bars:
del self.line_vol_ma[0]
self.line_vol_ma.append(avgVol)
def __count_rsi(self):
"""计算K线的RSI"""
if self.para_rsi1_len <= 0 and self.para_rsi2_len <= 0:
return
# 1、lineBar满足长度才执行计算
if len(self.line_bar) < self.para_rsi1_len + 2:
self.write_log(u'数据未充分,当前Bar数据数量:{0},计算RSI需要:{1}'.
format(len(self.line_bar), self.para_rsi1_len + 2))
return
# 计算第1根RSI曲线
# 3、inputRsi1Len(包含当前周期)的相对强弱
barRsi = ta.RSI(self.close_array[-2 * self.para_rsi1_len:], self.para_rsi1_len)[-1]
barRsi = round(float(barRsi), self.round_n)
if len(self.line_rsi1) > self.max_hold_bars:
del self.line_rsi1[0]
self.line_rsi1.append(barRsi)
if len(self.line_rsi1) > 3:
# 峰
if self.line_rsi1[-1] < self.line_rsi1[-2] and self.line_rsi1[-3] < self.line_rsi1[-2]:
t = {}
t["Type"] = u'T'
t["RSI"] = self.line_rsi1[-2]
t["Close"] = self.close_array[-1]
if len(self.rsi_top_list) > self.max_hold_bars:
del self.rsi_top_list[0]
self.rsi_top_list.append(t)
self.cur_rsi_top_buttom = self.rsi_top_list[-1]
# 谷
elif self.line_rsi1[-1] > self.line_rsi1[-2] and self.line_rsi1[-3] > self.line_rsi1[-2]:
b = {}
b["Type"] = u'B'
b["RSI"] = self.line_rsi1[-2]
b["Close"] = self.close_array[-1]
if len(self.rsi_buttom_list) > self.max_hold_bars:
del self.rsi_buttom_list[0]
self.rsi_buttom_list.append(b)
self.cur_rsi_top_buttom = self.rsi_buttom_list[-1]
# 计算第二根RSI曲线
if self.para_rsi2_len > 0:
if self.bar_len < self.para_rsi2_len + 2:
return
barRsi = ta.RSI(self.close_array[-2 * self.para_rsi2_len:], self.para_rsi2_len)[-1]
barRsi = round(float(barRsi), self.round_n)
if len(self.line_rsi2) > self.max_hold_bars:
del self.line_rsi2[0]
self.line_rsi2.append(barRsi)
def __count_cmi(self):
"""市场波动指数(Choppy Market Index,CMI)是一个用来判断市场走势类型的技术分析指标。
它通过计算当前收盘价与一定周期前的收盘价的差值与这段时间内价格波动的范围的比值,来判断目前的股价走势是趋势还是盘整。
市场波动指数CMI的计算公式:
CMI=(Abs(Close-ref(close,(n-1)))*100/(HHV(high,n)-LLV(low,n))
其中,Abs是绝对值。
n是周期数,例如30。
市场波动指数CMI的使用方法:
这个指标的重要用途是来区分目前的股价走势类型:盘整,趋势。当CMI指标小于20时,市场走势是盘整;当CMI指标大于20时,市场在趋势期。
CMI指标还可以用于预测股价走势类型的转变。因为物极必反,当CMI长期处于0附近,此时,股价走势很可能从盘整转为趋势;当CMI长期处于100附近,此时,股价趋势很可能变弱,形成盘整。
"""
if self.para_cmi_len <= 0:
return
# 1、lineBar满足长度才执行计算
if self.bar_len < self.para_cmi_len:
self.write_log(u'数据未充分,当前Bar数据数量:{0},计算CMI需要:{1}'.
format(len(self.line_bar), self.para_cmi_len))
return
hhv = max(self.close_array[-self.para_cmi_len:])
llv = min(self.close_array[-self.para_cmi_len:])
if hhv == llv:
cmi = 100
else:
cmi = abs(self.close_array[-1] - self.close_array[-self.para_cmi_len]) * 100 / (hhv - llv)
cmi = round(cmi, self.round_n)
if len(self.line_cmi) > self.max_hold_bars:
del self.line_cmi[0]
self.line_cmi.append(cmi)
def __count_boll(self):
"""布林特线"""
if not (self.para_boll_len > 0
or self.para_boll2_len > 0
or self.para_boll_tb_len > 0
or self.para_boll2_tb_len > 0): # 不计算
return
if self.para_boll_len > 0:
if self.bar_len < min(7, self.para_boll_len):
self.write_log(u'数据未充分,当前Bar数据数量:{0},计算Boll需要:{1}'.
format(len(self.line_bar), min(14, self.para_boll_len) + 1))
else:
bollLen = min(self.bar_len, self.para_boll_len)
# 不包含当前最新的Bar
upper_list, middle_list, lower_list = ta.BBANDS(self.close_array,
timeperiod=bollLen, nbdevup=self.para_boll_std_rate,
nbdevdn=self.para_boll_std_rate, matype=0)
if len(self.line_boll_upper) > self.max_hold_bars:
del self.line_boll_upper[0]
if len(self.line_boll_middle) > self.max_hold_bars:
del self.line_boll_middle[0]
if len(self.line_boll_lower) > self.max_hold_bars:
del self.line_boll_lower[0]
if len(self.line_boll_std) > self.max_hold_bars:
del self.line_boll_std[0]
# 1标准差
std = (upper_list[-1] - lower_list[-1]) / (self.para_boll_std_rate * 2)
self.line_boll_std.append(std)
upper = round(upper_list[-1], self.round_n)
self.line_boll_upper.append(upper) # 上轨
self.cur_upper = upper # 上轨
middle = round(middle_list[-1], self.round_n)
self.line_boll_middle.append(middle) # 中轨
self.cur_middle = middle # 中轨
lower = round(lower_list[-1], self.round_n)
self.line_boll_lower.append(lower) # 下轨
self.cur_lower = lower # 下轨
# 计算斜率
if len(self.line_boll_upper) > 2 and self.line_boll_upper[-2] != 0:
up_atan = math.atan((self.line_boll_upper[-1] / self.line_boll_upper[-2] - 1) * 100) * 180 / math.pi
up_atan = round(up_atan, 3)
if len(self.line_upper_atan) > self.max_hold_bars:
del self.line_upper_atan[0]
self.line_upper_atan.append(up_atan)
if len(self.line_boll_middle) > 2 and self.line_boll_middle[-2] != 0:
mid_atan = math.atan(
(self.line_boll_middle[-1] / self.line_boll_middle[-2] - 1) * 100) * 180 / math.pi
mid_atan = round(mid_atan, 3)
if len(self.line_middle_atan) > self.max_hold_bars:
del self.line_middle_atan[0]
self.line_middle_atan.append(mid_atan)
if len(self.line_boll_lower) > 2 and self.line_boll_lower[-2] != 0:
low_atan = math.atan(
(self.line_boll_lower[-1] / self.line_boll_lower[-2] - 1) * 100) * 180 / math.pi
low_atan = round(low_atan, 3)
if len(self.line_lower_atan) > self.max_hold_bars:
del self.line_lower_atan[0]
self.line_lower_atan.append(low_atan)
if self.para_boll2_len > 0:
if self.bar_len < min(14, self.para_boll2_len) + 1:
self.write_log(u'数据未充分,当前Bar数据数量:{0},计算Boll2需要:{1}'.
format(len(self.line_bar), min(14, self.para_boll2_len) + 1))
else:
boll2Len = min(self.bar_len, self.para_boll2_len)
# 不包含当前最新的Bar
upper_list, middle_list, lower_list = ta.BBANDS(self.close_array[-2 * self.para_boll2_len],
timeperiod=boll2Len, nbdevup=self.para_boll2_std_sate,
nbdevdn=self.para_boll2_std_sate, matype=0)
if len(self.line_boll2_upper) > self.max_hold_bars:
del self.line_boll2_upper[0]
if len(self.line_boll2_middle) > self.max_hold_bars:
del self.line_boll2_middle[0]
if len(self.line_boll2_lower) > self.max_hold_bars:
del self.line_boll2_lower[0]
if len(self.line_boll2_std) > self.max_hold_bars:
del self.line_boll2_std[0]
# 1标准差
std = (upper_list[-1] - lower_list[-1]) / (self.para_boll2_std_sate * 2)
self.line_boll2_std.append(std)
upper = round(upper_list[-1], self.round_n)
self.line_boll2_upper.append(upper) # 上轨
self.cur_upper2 = upper # 上轨
middle = round(middle_list[-1], self.round_n)
self.line_boll2_middle.append(middle) # 中轨
self.cur_middle2 = middle # 中轨
lower = round(lower_list[-1], self.round_n)
self.line_boll2_lower.append(lower) # 下轨
self.cur_lower2 = lower # 下轨
# 计算斜率
if len(self.line_boll2_upper) > 2 and self.line_boll2_upper[-2] != 0:
up_atan = math.atan(
(self.line_boll2_upper[-1] / self.line_boll2_upper[-2] - 1) * 100) * 180 / math.pi
up_atan = round(up_atan, 3)
if len(self.line_upper2_atan) > self.max_hold_bars:
del self.line_upper2_atan[0]
self.line_upper2_atan.append(up_atan)
if len(self.line_boll2_middle) > 2 and self.line_boll2_middle[-2] != 0:
mid_atan = math.atan(
(self.line_boll2_middle[-1] / self.line_boll2_middle[-2] - 1) * 100) * 180 / math.pi
mid_atan = round(mid_atan, 3)
if len(self.line_middle2_atan) > self.max_hold_bars:
del self.line_middle2_atan[0]
self.line_middle2_atan.append(mid_atan)
if len(self.line_boll2_lower) > 2 and self.line_boll2_lower[-2] != 0:
low_atan = math.atan(
(self.line_boll2_lower[-1] / self.line_boll2_lower[-2] - 1) * 100) * 180 / math.pi
low_atan = round(low_atan, 3)
if len(self.line_lower2_atan) > self.max_hold_bars:
del self.line_lower2_atan[0]
self.line_lower2_atan.append(low_atan)
if self.para_boll_tb_len > 0:
if self.bar_len < min(14, self.para_boll_tb_len) + 1:
self.write_log(u'数据未充分,当前Bar数据数量:{0},计算Boll需要:{1}'.
format(len(self.line_bar), min(14, self.para_boll_tb_len) + 1))
else:
bollLen = min(self.bar_len, self.para_boll_tb_len)
# 不包含当前最新的Bar
if len(self.line_boll_upper) > self.max_hold_bars:
del self.line_boll_upper[0]
if len(self.line_boll_middle) > self.max_hold_bars:
del self.line_boll_middle[0]
if len(self.line_boll_lower) > self.max_hold_bars:
del self.line_boll_lower[0]
if len(self.line_boll_std) > self.max_hold_bars:
del self.line_boll_std[0]
# 1标准差
std = np.std(self.close_array[-2 * bollLen:], ddof=1)
self.line_boll_std.append(std)
middle = np.mean(self.close_array[-2 * bollLen:])
self.line_boll_middle.append(middle) # 中轨
self.cur_middle = middle - middle % self.price_tick # 中轨取整
upper = middle + self.para_boll_std_rate * std
self.line_boll_upper.append(upper) # 上轨
self.cur_upper = upper - upper % self.price_tick # 上轨取整
lower = middle - self.para_boll_std_rate * std
self.line_boll_lower.append(lower) # 下轨
self.cur_lower = lower - lower % self.price_tick # 下轨取整
# 计算斜率
if len(self.line_boll_upper) > 2 and self.line_boll_upper[-2] != 0:
up_atan = math.atan((self.line_boll_upper[-1] / self.line_boll_upper[-2] - 1) * 100) * 180 / math.pi
up_atan = round(up_atan, 3)
if len(self.line_upper_atan) > self.max_hold_bars:
del self.line_upper_atan[0]
self.line_upper_atan.append(up_atan)
if len(self.line_boll_middle) > 2 and self.line_boll_middle[-2] != 0:
mid_atan = math.atan(
(self.line_boll_middle[-1] / self.line_boll_middle[-2] - 1) * 100) * 180 / math.pi
mid_atan = round(mid_atan, 3)
if len(self.line_middle_atan) > self.max_hold_bars:
del self.line_middle_atan[0]
self.line_middle_atan.append(mid_atan)
if len(self.line_boll_lower) > 2 and self.line_boll_lower[-2] != 0:
low_atan = math.atan(
(self.line_boll_lower[-1] / self.line_boll_lower[-2] - 1) * 100) * 180 / math.pi
low_atan = round(low_atan, 3)
if len(self.line_lower_atan) > self.max_hold_bars:
del self.line_lower_atan[0]
self.line_lower_atan.append(low_atan)
if self.para_boll2_tb_len > 0:
if self.bar_len < min(14, self.para_boll2_tb_len) + 1:
self.write_log(u'数据未充分,当前Bar数据数量:{0},计算Boll2需要:{1}'.
format(len(self.line_bar), min(14, self.para_boll2_tb_len) + 1))
else:
boll2Len = min(self.bar_len, self.para_boll2_tb_len)
if len(self.line_boll2_upper) > self.max_hold_bars:
del self.line_boll2_upper[0]
if len(self.line_boll2_middle) > self.max_hold_bars:
del self.line_boll2_middle[0]
if len(self.line_boll2_lower) > self.max_hold_bars:
del self.line_boll2_lower[0]
if len(self.line_boll2_std) > self.max_hold_bars:
del self.line_boll2_std[0]
# 1标准差
std = np.std(self.close_array[-2 * boll2Len:], ddof=1)
self.line_boll2_std.append(std)
middle = np.mean(self.close_array[-2 * boll2Len:])
self.line_boll2_middle.append(middle) # 中轨
self.cur_middle2 = middle - middle % self.price_tick # 中轨取整
upper = middle + self.para_boll2_std_sate * std
self.line_boll2_upper.append(upper) # 上轨
self.cur_upper2 = upper - upper % self.price_tick # 上轨取整
lower = middle - self.para_boll2_std_sate * std
self.line_boll2_lower.append(lower) # 下轨
self.cur_lower2 = lower - lower % self.price_tick # 下轨取整
# 计算斜率
if len(self.line_boll2_upper) > 2 and self.line_boll2_upper[-2] != 0:
up_atan = math.atan(
(self.line_boll2_upper[-1] / self.line_boll2_upper[-2] - 1) * 100) * 180 / math.pi
up_atan = round(up_atan, 3)
if len(self.line_upper2_atan) > self.max_hold_bars:
del self.line_upper2_atan[0]
self.line_upper2_atan.append(up_atan)
if len(self.line_boll2_middle) > 2 and self.line_boll2_middle[-2] != 0:
mid_atan = math.atan(
(self.line_boll2_middle[-1] / self.line_boll2_middle[-2] - 1) * 100) * 180 / math.pi
mid_atan = round(mid_atan, 3)
if len(self.line_middle2_atan) > self.max_hold_bars:
del self.line_middle2_atan[0]
self.line_middle2_atan.append(mid_atan)
if len(self.line_boll2_lower) > 2 and self.line_boll2_lower[-2] != 0:
low_atan = math.atan(
(self.line_boll2_lower[-1] / self.line_boll2_lower[-2] - 1) * 100) * 180 / math.pi
low_atan = round(low_atan, 3)
if len(self.line_lower2_atan) > self.max_hold_bars:
del self.line_lower2_atan[0]
self.line_lower2_atan.append(low_atan)
def rt_count_boll(self):
"""实时计算布林上下轨,斜率"""
boll_01_len = max(self.para_boll_len, self.para_boll_tb_len)
boll_02_len = max(self.para_boll2_len, self.para_boll2_tb_len)
if not (boll_01_len > 0 or boll_02_len > 0): # 不计算
return
if boll_01_len > 0:
if self.bar_len < min(14, boll_01_len) + 1:
return
bollLen = min(boll_01_len, self.bar_len)
if self.para_boll_tb_len == 0:
upper_list, middle_list, lower_list = ta.BBANDS(self.close_array[-bollLen:],
timeperiod=bollLen, nbdevup=self.para_boll_std_rate,
nbdevdn=self.para_boll_std_rate, matype=0)
# 1标准差
std = (upper_list[-1] - lower_list[-1]) / (self.para_boll_std_rate * 2)
self._rt_upper = round(upper_list[-1], self.round_n)
self._rt_middle = round(middle_list[-1], self.round_n)
self._rt_lower = round(lower_list[-1], self.round_n)
else:
# 1标准差
std = np.std(np.append(self.close_array[-boll_01_len:], [self.line_bar[-1].close]), ddof=1)
middle = np.mean(np.append(self.close_array[-boll_01_len:], [self.line_bar[-1].close]))
self._rt_middle = round(middle, self.round_n)
upper = middle + self.para_boll_std_rate * std
self._rt_upper = round(upper, self.round_n)
lower = middle - self.para_boll_std_rate * std
self._rt_lower = round(lower, self.round_n)
# 计算斜率
if len(self.line_boll_upper) > 2 and self.line_boll_upper[-1] != 0:
up_atan = math.atan((self._rt_upper / self.line_boll_upper[-1] - 1) * 100) * 180 / math.pi
self._rt_upper_atan = round(up_atan, 3)
if len(self.line_boll_middle) > 2 and self.line_boll_middle[-1] != 0:
mid_atan = math.atan((self._rt_middle / self.line_boll_middle[-1] - 1) * 100) * 180 / math.pi
self._rt_middle_atan = round(mid_atan, 3)
if len(self.line_boll_lower) > 2 and self.line_boll_lower[-1] != 0:
low_atan = math.atan((self._rt_lower / self.line_boll_lower[-1] - 1) * 100) * 180 / math.pi
self._rt_lower_atan = round(low_atan, 3)
if boll_02_len > 0:
if self.bar_len < min(14, boll_02_len) + 1:
return
bollLen = min(boll_02_len, self.bar_len)
if self.para_boll2_tb_len == 0:
upper_list, middle_list, lower_list = ta.BBANDS(self.close_array[-bollLen:],
timeperiod=bollLen, nbdevup=self.para_boll_std_rate,
nbdevdn=self.para_boll_std_rate, matype=0)
# 1标准差
std = (upper_list[-1] - lower_list[-1]) / (self.para_boll2_std_sate * 2)
self._rt_upper2 = round(upper_list[-1], self.round_n)
self._rt_middle2 = round(middle_list[-1], self.round_n)
self._rt_lower2 = round(lower_list[-1], self.round_n)
else:
# 1标准差
std = np.std(np.append(self.close_array[-boll_02_len:], [self.line_bar[-1].close]), ddof=1)
middle = np.mean(np.append(self.close_array[-boll_02_len:], [self.line_bar[-1].close]))
self._rt_middle2 = round(middle, self.round_n)
upper = middle + self.para_boll_std_rate * std
self._rt_upper2 = round(upper, self.round_n)
lower = middle - self.para_boll_std_rate * std
self._rt_lower2 = round(lower, self.round_n)
# 计算斜率
if len(self.line_boll2_upper) > 2 and self.line_boll2_upper[-1] != 0:
up_atan = math.atan((self._rt_upper2 / self.line_boll2_upper[-1] - 1) * 100) * 180 / math.pi
self._rt_upper2_atan = round(up_atan, 3)
if len(self.line_boll2_middle) > 2 and self.line_boll2_middle[-1] != 0:
mid_atan = math.atan((self._rt_middle2 / self.line_boll2_middle[-1] - 1) * 100) * 180 / math.pi
self._rt_middle2_atan = round(mid_atan, 3)
if len(self.line_boll2_lower) > 2 and self.line_boll2_lower[-1] != 0:
low_atan = math.atan((self._rt_lower2 / self.line_boll2_lower[-1] - 1) * 100) * 180 / math.pi
self._rt_lower2_atan = round(low_atan, 3)
@property
def rt_upper(self):
self.check_rt_funcs(self.rt_count_boll)
if self._rt_upper is None and len(self.line_boll_upper) > 0:
return self.line_boll_upper[-1]
return self._rt_upper
@property
def rt_middle(self):
self.check_rt_funcs(self.rt_count_boll)
if self._rt_middle is None and len(self.line_boll_middle) > 0:
return self.line_boll_middle[-1]
return self._rt_middle
@property
def rt_lower(self):
self.check_rt_funcs(self.rt_count_boll)
if self._rt_lower is None and len(self.line_boll_lower) > 0:
return self.line_boll_lower[-1]
return self._rt_lower
@property
def rt_upper_atan(self):
self.check_rt_funcs(self.rt_count_boll)
if self._rt_upper_atan is None and len(self.line_upper_atan) > 0:
return self.line_upper_atan[-1]
return self._rt_upper_atan
@property
def rt_middle_atan(self):
self.check_rt_funcs(self.rt_count_boll)
if self._rt_middle_atan is None and len(self.line_middle_atan) > 0:
return self.line_middle_atan[-1]
return self._rt_middle_atan
@property
def rt_lower_atan(self):
self.check_rt_funcs(self.rt_count_boll)
if self._rt_lower_atan is None and len(self.line_lower_atan) > 0:
return self.line_lower_atan[-1]
return self._rt_lower_atan
@property
def rt_upper2(self):
self.check_rt_funcs(self.rt_count_boll)
if self._rt_upper2 is None and len(self.line_boll2_upper) > 0:
return self.line_boll2_upper[-1]
return self._rt_upper2
@property
def rt_middle2(self):
self.check_rt_funcs(self.rt_count_boll)
if self._rt_middle2 is None and len(self.line_boll2_middle) > 0:
return self.line_boll2_middle[-1]
return self._rt_middle2
@property
def rt_lower2(self):
self.check_rt_funcs(self.rt_count_boll)
if self._rt_lower2 is None and len(self.line_boll2_lower) > 0:
return self.line_boll2_lower[-1]
return self._rt_lower2
@property
def rt_upper2_atan(self):
self.check_rt_funcs(self.rt_count_boll)
if self._rt_upper2_atan is None and len(self.line_upper2_atan) > 0:
return self.line_upper2_atan[-1]
return self._rt_upper2_atan
@property
def rt_middle2_atan(self):
self.check_rt_funcs(self.rt_count_boll)
if self._rt_middle2_atan is None and len(self.line_middle2_atan) > 0:
return self.line_middle2_atan[-1]
return self._rt_middle2_atan
@property
def rt_lower2_atan(self):
self.check_rt_funcs(self.rt_count_boll)
if self._rt_lower2_atan is None and len(self.line_lower2_atan) > 0:
return self.line_lower2_atan[-1]
return self._rt_lower2_atan
def __count_kdj(self):
"""KDJ指标"""
"""
KDJ指标的中文名称又叫随机指标,是一个超买超卖指标,最早起源于期货市场,由乔治·莱恩(George Lane)首创。
随机指标KDJ最早是以KD指标的形式出现,而KD指标是在威廉指标的基础上发展起来的。
不过KD指标只判断股票的超买超卖的现象,在KDJ指标中则融合了移动平均线速度上的观念,形成比较准确的买卖信号依据。在实践中,K线与D线配合J线组成KDJ指标来使用。
KDJ指标在设计过程中主要是研究最高价、最低价和收盘价之间的关系,同时也融合了动量观念、强弱指标和移动平均线的一些优点。
因此,能够比较迅速、快捷、直观地研判行情,被广泛用于股市的中短期趋势分析,是期货和股票市场上最常用的技术分析工具。
第一步 计算RSV:即未成熟随机值(Raw Stochastic Value)。
RSV 指标主要用来分析市场是处于“超买”还是“超卖”状态:
- RSV高于80%时候市场即为超买状况,行情即将见顶,应当考虑出仓;
- RSV低于20%时候,市场为超卖状况,行情即将见底,此时可以考虑加仓。
N日RSV=(N日收盘价-N日内最低价)÷(N日内最高价-N日内最低价)×100%
第二步 计算K值:当日K值 = 2/3前1日K值 + 1/3当日RSV ;
第三步 计算D值:当日D值 = 2/3前1日D值 + 1/3当日K值;
第四步 计算J值:当日J值 = 3当日K值 - 2当日D值.
"""
if self.para_kdj_len <= 0:
return
if len(self.line_bar) < self.para_kdj_len + 1:
self.write_log(u'数据未充分,当前Bar数据数量:{0},计算KDJ需要:{1}'.format(len(self.line_bar), self.para_kdj_len + 1))
return
if self.para_kdj_slow_len == 0:
self.para_kdj_slow_len = 3
if self.para_kdj_smooth_len == 0:
self.para_kdj_smooth_len = 3
inputKdjLen = min(self.para_kdj_len, self.bar_len)
hhv = max(self.high_array[-inputKdjLen:])
llv = min(self.low_array[-inputKdjLen:])
if len(self.line_k) > 0:
lastK = self.line_k[-1]
else:
lastK = 0
if len(self.line_d) > 0:
lastD = self.line_d[-1]
else:
lastD = 0
if hhv == llv:
rsv = 50
else:
rsv = (self.close_array[-1] - llv) / (hhv - llv) * 100
self.line_rsv.append(rsv)
k = (self.para_kdj_slow_len - 1) * lastK / self.para_kdj_slow_len + rsv / self.para_kdj_slow_len
if k < 0:
k = 0
if k > 100:
k = 100
d = (self.para_kdj_smooth_len - 1) * lastD / self.para_kdj_smooth_len + k / self.para_kdj_smooth_len
if d < 0:
d = 0
if d > 100:
d = 100
j = self.para_kdj_smooth_len * k - (self.para_kdj_smooth_len - 1) * d
if len(self.line_k) > self.max_hold_bars:
del self.line_k[0]
self.line_k.append(k)
if len(self.line_d) > self.max_hold_bars:
del self.line_d[0]
self.line_d.append(d)
if len(self.line_j) > self.max_hold_bars:
del self.line_j[0]
self.line_j.append(j)
# 增加KDJ的J谷顶和波底
if len(self.line_j) > 3:
# 峰
if self.line_j[-1] < self.line_j[-2] and self.line_j[-3] <= self.line_j[-2]:
t = {
'Type': 'T',
'J': self.line_j[-2],
'Close': self.close_array[-1]}
if len(self.kdj_top_list) > self.max_hold_bars:
del self.kdj_top_list[0]
self.kdj_top_list.append(t)
self.cur_kdj_top_buttom = self.kdj_top_list[-1]
# 谷
elif self.line_j[-1] > self.line_j[-2] and self.line_j[-3] >= self.line_j[-2]:
b = {
'Type': u'B',
'J': self.line_j[-2],
'Close': self.close_array[-1]
}
if len(self.kdj_buttom_list) > self.max_hold_bars:
del self.kdj_buttom_list[0]
self.kdj_buttom_list.append(b)
self.cur_kdj_top_buttom = self.kdj_buttom_list[-1]
self.__update_kd_cross()
def __count_kdj_tb(self):
"""KDJ指标"""
"""
KDJ指标的中文名称又叫随机指标,是一个超买超卖指标,最早起源于期货市场,由乔治·莱恩(George Lane)首创。
随机指标KDJ最早是以KD指标的形式出现,而KD指标是在威廉指标的基础上发展起来的。
不过KD指标只判断股票的超买超卖的现象,在KDJ指标中则融合了移动平均线速度上的观念,形成比较准确的买卖信号依据。在实践中,K线与D线配合J线组成KDJ指标来使用。
KDJ指标在设计过程中主要是研究最高价、最低价和收盘价之间的关系,同时也融合了动量观念、强弱指标和移动平均线的一些优点。
因此,能够比较迅速、快捷、直观地研判行情,被广泛用于股市的中短期趋势分析,是期货和股票市场上最常用的技术分析工具。
第一步 计算RSV:即未成熟随机值(Raw Stochastic Value)。
RSV 指标主要用来分析市场是处于“超买”还是“超卖”状态:
- RSV高于80%时候市场即为超买状况,行情即将见顶,应当考虑出仓;
- RSV低于20%时候,市场为超卖状况,行情即将见底,此时可以考虑加仓。
N日RSV=(N日收盘价-N日内最低价)÷(N日内最高价-N日内最低价)×100%
第二步 计算K值:当日K值 = 2/3前1日K值 + 1/3当日RSV ;
第三步 计算D值:当日D值 = 2/3前1日D值 + 1/3当日K值;
第四步 计算J值:当日J值 = 3当日K值 - 2当日D值.
"""
if self.para_kdj_tb_len <= 0:
return
if self.para_kdj_tb_len + self.para_kdj_smooth_len > self.max_hold_bars:
self.max_hold_bars = self.para_kdj_tb_len + self.para_kdj_smooth_len + 1
if self.para_kdj_slow_len == 0:
self.para_kdj_slow_len = 3
if self.para_kdj_smooth_len == 0:
self.para_kdj_smooth_len = 3
if self.bar_len < 3:
return
data_len = min(self.bar_len, self.para_kdj_tb_len)
hhv = max(self.high_array[-data_len:])
llv = min(self.low_array[-data_len:])
if len(self.line_k) > 0:
lastK = self.line_k[-1]
else:
lastK = 0
if len(self.line_d) > 0:
lastD = self.line_d[-1]
else:
lastD = 0
if hhv == llv:
rsv = 50
else:
rsv = (self.close_array[-1] - llv) / (hhv - llv) * 100
self.line_rsv.append(rsv)
k = (self.para_kdj_slow_len - 1) * lastK / self.para_kdj_slow_len + rsv / self.para_kdj_slow_len
if k < 0:
k = 0
if k > 100:
k = 100
d = (self.para_kdj_smooth_len - 1) * lastD / self.para_kdj_smooth_len + k / self.para_kdj_smooth_len
if d < 0:
d = 0
if d > 100:
d = 100
j = self.para_kdj_smooth_len * k - (self.para_kdj_smooth_len - 1) * d
if len(self.line_k) > self.max_hold_bars:
del self.line_k[0]
self.line_k.append(k)
if len(self.line_d) > self.max_hold_bars:
del self.line_d[0]
self.line_d.append(d)
if len(self.line_j) > self.max_hold_bars:
del self.line_j[0]
self.line_j.append(j)
# 增加KDJ的J谷顶和波底
if len(self.line_j) > 3:
# 峰
if self.line_j[-1] < self.line_j[-2] and self.line_j[-3] <= self.line_j[-2]:
t = {
'Type': 'T',
'J': self.line_j[-2],
'Close': self.close_array[-1]
}
if len(self.kdj_top_list) > self.max_hold_bars:
del self.kdj_top_list[0]
self.kdj_top_list.append(t)
self.cur_kdj_top_buttom = self.kdj_top_list[-1]
# 谷
elif self.line_j[-1] > self.line_j[-2] and self.line_j[-3] >= self.line_j[-2]:
b = {
'Type': 'B',
'J': self.line_j[-2],
'Close': self.close_array
}
if len(self.kdj_buttom_list) > self.max_hold_bars:
del self.kdj_buttom_list[0]
self.kdj_buttom_list.append(b)
self.cur_kdj_top_buttom = self.kdj_buttom_list[-1]
self.__update_kd_cross()
def __update_kd_cross(self):
"""更新KDJ金叉死叉"""
if len(self.line_k) < 2 or len(self.line_d) < 2:
return
# K值大于D值
if self.line_k[-1] > self.line_d[-1]:
if self.line_k[-2] > self.line_d[-2]:
# 延续金叉
self.cur_kd_count = max(1, self.cur_kd_count) + 1
else:
# 发生金叉
self.cur_kd_count = 1
self.cur_kd_cross = round((self.line_k[-1] + self.line_k[-2]) / 2, 2)
self.cur_kd_cross_price = self.cur_price
# K值小于D值
else:
if self.line_k[-2] < self.line_d[-2]:
# 延续死叉
self.cur_kd_count = min(-1, self.cur_kd_count) - 1
else:
# 发生死叉
self.cur_kd_count = -1
self.cur_kd_cross = round((self.line_k[-1] + self.line_k[-2]) / 2, 2)
self.cur_kd_cross_price = self.cur_price
def __count_macd(self):
"""
Macd计算方法:
12日EMA的计算:EMA12 = 前一日EMA12 X 11/13 + 今日收盘 X 2/13
26日EMA的计算:EMA26 = 前一日EMA26 X 25/27 + 今日收盘 X 2/27
差离值(DIF)的计算: DIF = EMA12 - EMA26,即为talib-MACD返回值macd
根据差离值计算其9日的EMA,即离差平均值,是所求的DEA值。
今日DEA = (前一日DEA X 8/10 + 今日DIF X 2/10),即为talib-MACD返回值signal
DIF与它自己的移动平均之间差距的大小一般BAR=(DIF-DEA)*2,即为MACD柱状图。
但是talib中MACD的计算是bar = (dif-dea)*1
"""
if self.para_macd_fast_len <= 0 or self.para_macd_slow_len <= 0 or self.para_macd_signal_len <= 0:
return
maxLen = max(self.para_macd_fast_len, self.para_macd_slow_len) + self.para_macd_signal_len
maxLen = maxLen * 3 # 注:数据长度需要足够,才能准确。测试过,3倍长度才可以与国内的文华等软件一致
if self.bar_len < maxLen:
self.write_log(u'数据未充分,当前Bar数据数量:{0},计算MACD需要:{1}'.format(len(self.line_bar), maxLen))
return
dif_list, dea_list, macd_list = ta.MACD(self.close_array[-2 * maxLen:], fastperiod=self.para_macd_fast_len,
slowperiod=self.para_macd_slow_len,
signalperiod=self.para_macd_signal_len)
# dif, dea, macd = ta.MACDEXT(np.array(listClose, dtype=float),
# fastperiod=self.inputMacdFastPeriodLen, fastmatype=1,
# slowperiod=self.inputMacdSlowPeriodLen, slowmatype=1,
# signalperiod=self.inputMacdSignalPeriodLen, signalmatype=1)
if len(self.line_dif) > self.max_hold_bars:
del self.line_dif[0]
self.line_dif.append(round(dif_list[-1], 2))
if len(self.line_dea) > self.max_hold_bars:
del self.line_dea[0]
self.line_dea.append(round(dea_list[-1], 2))
if len(self.line_macd) > self.max_hold_bars:
del self.line_macd[0]
self.line_macd.append(round(macd_list[-1] * 2, 2)) # 国内一般是2倍
# 更新 “段”(金叉-》死叉;或 死叉-》金叉)
segment = self.macd_segment_list[-1] if len(self.macd_segment_list) > 0 else {}
# 创建新的段
if (self.line_macd[-1] > 0 and self.cur_macd_count <= 0) or \
(self.line_macd[-1] < 0 and self.cur_macd_count >= 0):
segment = {}
# 金叉/死叉,更新位置&价格
self.cur_macd_count, self.rt_macd_count = (1, 1) if self.line_macd[-1] > 0 else (-1, -1)
self.cur_macd_cross = round((self.line_dif[-1] + self.line_dea[-1]) / 2, 2)
self.cur_macd_cross_price = self.close_array[-1]
self.rt_macd_cross = self.cur_macd_cross
self.rt_macd_cross_price = self.cur_macd_cross_price
# 更新段
segment.update({
'macd_count': self.cur_macd_count,
'max_price': self.high_array[-1],
'min_price': self.low_array[-1],
'max_dif': self.line_dif[-1],
'min_dif': self.line_dif[-1],
'macd_area': abs(self.line_macd[-1]),
'max_macd': self.line_macd[-1],
'min_macd': self.line_macd[-1]
})
self.macd_segment_list.append(segment)
# 新得能量柱>0,判断是否有底背离,同时,取消原有顶背离
if self.line_macd[-1] > 0:
self.dif_buttom_divergence = self.is_dif_divergence(direction=Direction.SHORT)
self.macd_buttom_divergence = self.is_macd_divergence(direction=Direction.SHORT)
self.dif_top_divergence = False
self.macd_top_divergence = False
# 新得能量柱<0,判断是否有顶背离,同时,取消原有底背离
elif self.line_macd[-1] < 0:
self.dif_buttom_divergence = False
self.macd_buttom_divergence = False
self.dif_top_divergence = self.is_dif_divergence(direction=Direction.LONG)
self.macd_top_divergence = self.is_macd_divergence(direction=Direction.LONG)
else:
# 继续金叉
if self.line_macd[-1] > 0 and self.cur_macd_count > 0:
self.cur_macd_count += 1
segment.update({
'macd_count': self.cur_macd_count,
'max_price': max(segment.get('max_price', self.high_array[-1]), self.high_array[-1]),
'min_price': min(segment.get('min_price', self.low_array[-1]), self.low_array[-1]),
'max_dif': max(segment.get('max_dif', self.line_dif[-1]), self.line_dif[-1]),
'min_dif': min(segment.get('min_dif', self.line_dif[-1]), self.line_dif[-1]),
'macd_area': segment.get('macd_area', 0) + abs(self.line_macd[-1]),
'max_macd': max(segment.get('max_macd', self.line_macd[-1]), self.line_macd[-1])
})
# 取消实时得记录
self.rt_macd_count = 0
self.rt_macd_cross = 0
self.rt_macd_cross_price = 0
# 继续死叉
elif self.line_macd[-1] < 0 and self.cur_macd_count < 0:
self.cur_macd_count -= 1
segment.update({
'macd_count': self.cur_macd_count,
'max_price': max(segment.get('max_price', self.high_array[-1]), self.high_array[-1]),
'min_price': min(segment.get('min_price', self.low_array[-1]), self.low_array[-1]),
'max_dif': max(segment.get('max_dif', self.line_dif[-1]), self.line_dif[-1]),
'min_dif': min(segment.get('min_dif', self.line_dif[-1]), self.line_dif[-1]),
'macd_area': segment.get('macd_area', 0) + abs(self.line_macd[-1]),
'min_macd': min(segment.get('min_macd', self.line_macd[-1]), self.line_macd[-1])
})
# 取消实时得记录
self.rt_macd_count = 0
self.rt_macd_cross = 0
self.rt_macd_cross_price = 0
# 删除超过10个的macd段
if len(self.macd_segment_list) > 10:
self.macd_segment_list.pop(0)
def rt_count_macd(self):
"""
(实时)Macd计算方法:
12日EMA的计算:EMA12 = 前一日EMA12 X 11/13 + 今日收盘 X 2/13
26日EMA的计算:EMA26 = 前一日EMA26 X 25/27 + 今日收盘 X 2/27
差离值(DIF)的计算: DIF = EMA12 - EMA26,即为talib-MACD返回值macd
根据差离值计算其9日的EMA,即离差平均值,是所求的DEA值。
今日DEA = (前一日DEA X 8/10 + 今日DIF X 2/10),即为talib-MACD返回值signal
DIF与它自己的移动平均之间差距的大小一般BAR=(DIF-DEA)*2,即为MACD柱状图。
但是talib中MACD的计算是bar = (dif-dea)*1
"""
if self.para_macd_fast_len <= 0 or self.para_macd_slow_len <= 0 or self.para_macd_signal_len <= 0:
return
maxLen = max(self.para_macd_fast_len, self.para_macd_slow_len) + self.para_macd_signal_len + 1
if self.bar_len < maxLen:
return
dif, dea, macd = ta.MACD(np.append(self.close_array[-maxLen:], [self.line_bar[-1].close]),
fastperiod=self.para_macd_fast_len,
slowperiod=self.para_macd_slow_len, signalperiod=self.para_macd_signal_len)
self._rt_dif = round(dif[-1], 2) if len(dif) > 0 else None
self._rt_dea = round(dea[-1], 2) if len(dea) > 0 else None
self._rt_macd = round(macd[-1] * 2, 2) if len(macd) > 0 else None
# 判断是否实时金叉/死叉
if self._rt_macd is not None:
# 实时金叉
if self._rt_macd >= 0 and self.line_macd[-1] < 0:
self.rt_macd_count = 1
self.rt_macd_cross = round((self._rt_dif + self._rt_dea) / 2, 2)
self.rt_macd_cross_price = self.cur_price
# 实时死叉
elif self._rt_macd <= 0 and self.line_macd[-1] > 0:
self.rt_macd_count = -1
self.rt_macd_cross = round((self._rt_dif + self._rt_dea) / 2, 2)
self.rt_macd_cross_price = self.cur_price
@property
def rt_dif(self):
self.check_rt_funcs(self.rt_count_macd)
if self._rt_dif is None and len(self.line_dif) > 0:
return self.line_dif[-1]
return self._rt_dif
@property
def rt_dea(self):
self.check_rt_funcs(self.rt_count_macd)
if self._rt_dea is None and len(self.line_dea) > 0:
return self.line_dea[-1]
return self._rt_dea
@property
def rt_macd(self):
self.check_rt_funcs(self.rt_count_macd)
if self._rt_macd is None and len(self.line_macd) > 0:
return self.line_macd[-1]
return self._rt_macd
def is_dif_divergence(self, direction):
"""
检查MACD DIF是否与价格有背离
:param: direction,多:检查是否有顶背离,空,检查是否有底背离
"""
s1, s2 = None, None # s1,倒数的一个匹配段;s2,倒数第二个匹配段
for seg in reversed(self.macd_segment_list):
if direction == Direction.LONG:
if seg.get('macd_count', 0) > 0:
if s1 is None:
s1 = seg
continue
elif s2 is None:
s2 = seg
break
else:
if seg.get('macd_count', 0) < 0:
if s1 is None:
s1 = seg
continue
elif s2 is None:
s2 = seg
break
if not all([s1, s2]):
return False
if direction == Direction.LONG:
s1_max_price = s1.get('max_price', None)
s2_max_price = s2.get('max_price', None)
s1_dif_max = s1.get('max_dif', None)
s2_dif_max = s2.get('max_dif', None)
if s1_max_price is None or s2_max_price is None or s1_dif_max is None and s2_dif_max is None:
return False
# 顶背离,只能在零轴上方才判断
if s1_dif_max < 0 or s2_dif_max < 0:
return False
# 价格创新高(超过前高得0.99);dif指标没有创新高
if s1_max_price >= s2_max_price * 0.99 and s1_dif_max < s2_dif_max:
return True
if direction == Direction.SHORT:
s1_min_price = s1.get('min_price', None)
s2_min_price = s2.get('min_price', None)
s1_dif_min = s1.get('min_dif', None)
s2_dif_min = s2.get('min_dif', None)
if s1_min_price is None or s2_min_price is None or s1_dif_min is None and s2_dif_min is None:
return False
# 底部背离,只能在零轴下方才判断
if s1_dif_min > 0 or s1_dif_min > 0:
return False
# 价格创新低,dif没有创新低
if s1_min_price <= s2_min_price * 1.01 and s1_dif_min > s2_dif_min:
return True
return False
def is_macd_divergence(self, direction):
"""
检查MACD 能量柱是否与价格有背离
:param: direction,多:检查是否有顶背离,空,检查是否有底背离
"""
s1, s2 = None, None # s1,倒数的一个匹配段;s2,倒数第二个匹配段
for seg in reversed(self.macd_segment_list):
if direction == Direction.LONG:
if seg.get('macd_count', 0) > 0:
if s1 is None:
s1 = seg
continue
elif s2 is None:
s2 = seg
break
else:
if seg.get('macd_count', 0) < 0:
if s1 is None:
s1 = seg
continue
elif s2 is None:
s2 = seg
break
if not all([s1, s2]):
return False
if direction == Direction.LONG:
s1_max_price = s1.get('max_price', None)
s2_max_price = s2.get('max_price', None)
s1_area = s1.get('macd_area', None)
s2_area = s2.get('macd_area', None)
if s1_max_price is None or s2_max_price is None or s1_area is None and s2_area is None:
return False
# 价格创新高(超过前高得0.99);MACD能量柱没有创更大面积
if s1_max_price >= s2_max_price * 0.99 and s1_area < s2_area:
return True
if direction == Direction.SHORT:
s1_min_price = s1.get('min_price', None)
s2_min_price = s2.get('min_price', None)
s1_area = s1.get('macd_area', None)
s2_area = s2.get('macd_area', None)
if s1_min_price is None or s2_min_price is None or s1_area is None and s2_area is None:
return False
# 价格创新低,MACD能量柱没有创更大面积
if s1_min_price <= s2_min_price * 1.01 and s1_area < s2_area:
return True
return False
def __count_cci(self):
"""CCI计算
顺势指标又叫CCI指标,CCI指标是美国股市技术分析 家唐纳德·蓝伯特(<NAME>)于20世纪80年代提出的,专门测量股价、外汇或者贵金属交易
是否已超出常态分布范围。属于超买超卖类指标中较特殊的一种。波动于正无穷大和负无穷大之间。但是,又不需要以0为中轴线,这一点也和波动于正无穷大
和负无穷大的指标不同。
它最早是用于期货市场的判断,后运用于股票市场的研判,并被广泛使用。与大多数单一利用股票的收盘价、开盘价、最高价或最低价而发明出的各种技术分析
指标不同,CCI指标是根据统计学原理,引进价格与固定期间的股价平均区间的偏离程度的概念,强调股价平均绝对偏差在股市技术分析中的重要性,是一种比
较独特的技术指标。
它与其他超买超卖型指标又有自己比较独特之处。象KDJ、W%R等大多数超买超卖型指标都有“0-100”上下界限,因此,它们对待一般常态行情的研判比较适用
,而对于那些短期内暴涨暴跌的股票的价格走势时,就可能会发生指标钝化的现象。而CCI指标却是波动于正无穷大到负无穷大之间,因此不会出现指标钝化现
象,这样就有利于投资者更好地研判行情,特别是那些短期内暴涨暴跌的非常态行情。
http://baike.baidu.com/view/53690.htm?fromtitle=CCI%E6%8C%87%E6%A0%87&fromid=4316895&type=syn
"""
if self.para_cci_len <= 0:
return
# 1、lineBar满足长度才执行计算
if len(self.line_bar) < self.para_cci_len + 2:
self.write_log(u'数据未充分,当前Bar数据数量:{0},计算CCI需要:{1}'.
format(len(self.line_bar), self.para_cci_len + 2))
return
# 计算第1根RSI曲线
cur_cci = ta.CCI(high=self.high_array[-2 * self.para_cci_len:], low=self.low_array[-2 * self.para_cci_len:],
close=self.close_array[-2 * self.para_cci_len:], timeperiod=self.para_cci_len)[-1]
self.cur_cci = round(float(cur_cci), 3)
if len(self.line_cci) > self.max_hold_bars:
del self.line_cci[0]
self.line_cci.append(self.cur_cci)
def rt_count_cci(self):
"""实时计算CCI值"""
if self.para_cci_len <= 0:
return
# 1、lineBar满足长度才执行计算
if len(self.line_bar) < self.para_cci_len + 2:
self.write_log(u'数据未充分,当前Bar数据数量:{0},计算CCI需要:{1}'.
format(len(self.line_bar), self.para_cci_len + 2))
return
self._rt_cci = ta.CCI(high=np.append(self.high_array[-2 * self.para_cci_len:], [self.line_bar[-1].high_price]),
low=np.append(self.low_array[-2 * self.para_cci_len:], [self.line_bar[-1].low_price]),
close=np.append(self.close_array[-2 * self.para_cci_len:],
[self.line_bar[-1].close_price]),
timeperiod=self.para_cci_len)[-1]
@property
def rt_cci(self):
self.check_rt_funcs(self.rt_count_cci)
if self._rt_cci is None:
return self.cur_cci
return self._rt_cci
def __count_kf(self):
"""计算卡尔曼过滤器均线"""
if not self.para_active_kf or self.kf is None:
return
if self.bar_len < 2:
return
if len(self.line_state_mean) == 0 or len(self.line_state_covar) == 0:
try:
self.kf = KalmanFilter(transition_matrices=[1],
observation_matrices=[1],
initial_state_mean=self.close_array[-1],
initial_state_covariance=1,
transition_covariance=0.01)
except Exception:
self.write_log(u'导入卡尔曼过滤器失败,需先安装 pip install pykalman')
self.para_active_kf = False
state_means, state_covariances = self.kf.filter(self.close_array)
m = state_means[-1].item()
c = state_covariances[-1].item()
else:
m = self.line_state_mean[-1]
c = self.line_state_covar[-1]
state_means, state_covariances = self.kf.filter_update(filtered_state_mean=m,
filtered_state_covariance=c,
observation=self.close_array[-1])
m = state_means[-1].item()
c = state_covariances[-1].item()
if len(self.line_state_mean) > self.max_hold_bars:
del self.line_state_mean[0]
if len(self.line_state_covar) > self.max_hold_bars:
del self.line_state_covar[0]
self.line_state_mean.append(m)
self.line_state_covar.append(c)
def __count_period(self, bar):
"""重新计算周期"""
len_rsi = len(self.line_rsi1)
if self.para_active_kf:
if len(self.line_state_mean) < 7 or len_rsi <= 0:
return
listMid = self.line_state_mean[-7:-1]
malist = ta.MA(np.array(listMid, dtype=float), 5)
lastMid = self.line_state_mean[-1]
else:
len_boll = len(self.line_boll_middle)
if len_boll <= 6 or len_rsi <= 0:
return
listMid = self.line_boll_middle[-7:-1]
lastMid = self.line_boll_middle[-1]
malist = ta.MA(np.array(listMid, dtype=float), 5)
ma5 = malist[-1]
ma5_ref1 = malist[-2]
if ma5 <= 0 or ma5_ref1 <= 0:
self.write_log(u'boll中轨计算均线异常')
return
if self.para_active_kf:
self.cur_atan = math.atan((ma5 / ma5_ref1 - 1) * 100) * 180 / math.pi
else:
# 当前均值,与前5均值得价差,除以标准差
self.cur_atan = math.atan((ma5 - ma5_ref1) / self.line_boll_std[-1]) * 180 / math.pi
# atan2 = math.atan((ma5 / ma5_ref1 - 1) * 100) * 180 / math.pi
# atan3 = math.atan(ma5 / ma5_ref1 - 1)* 100
self.cur_atan = round(self.cur_atan, 3)
# self.write_log(u'{}/{}/{}'.format(self.atan, atan2, atan3))
if self.cur_period is None:
self.write_log(u'初始化周期为震荡')
self.cur_period = CtaPeriod(mode=Period.SHOCK, price=bar.close_price, pre_mode=Period.INIT, dt=bar.datetime)
self.period_list.append(self.cur_period)
if len(self.line_atan) > self.max_hold_bars:
del self.line_atan[0]
self.line_atan.append(self.cur_atan)
if len_rsi < 3:
return
# 当前期趋势是震荡
if self.cur_period.mode == Period.SHOCK:
# 初始化模式
if self.cur_period.pre_mode == Period.INIT:
if self.cur_atan < -45:
self.cur_period = CtaPeriod(mode=Period.SHORT_EXTREME, price=bar.close_price, pre_mode=Period.SHORT,
dt=bar.datetime)
self.period_list.append(self.cur_period)
self.write_log(u'{} 角度向下,Atan:{},周期{}=》{}'.
format(bar.datetime, self.cur_atan, self.cur_period.pre_mode, self.cur_period.mode))
if self.cb_on_period:
self.cb_on_period(self.cur_period)
return
elif self.cur_atan > 45:
self.cur_period = CtaPeriod(mode=Period.LONG_EXTREME, price=bar.close_price, pre_mode=Period.LONG,
dt=bar.datetime)
self.period_list.append(self.cur_period)
self.write_log(u'{} 角度加速向上,Atan:{},周期:{}=>{}'.
format(bar.datetime, self.cur_atan, self.cur_period.pre_mode,
self.cur_period.mode))
if self.cb_on_period:
self.cb_on_period(self.cur_period)
return
# 震荡 -》 空
if self.cur_atan <= -20:
self.cur_period = CtaPeriod(mode=Period.SHORT, price=bar.close_price, pre_mode=Period.SHOCK,
dt=bar.datetime)
self.period_list.append(self.cur_period)
self.write_log(u'{} 角度向下,Atan:{},周期{}=》{}'.
format(bar.datetime, self.cur_atan, self.cur_period.pre_mode, self.cur_period.mode))
if self.cb_on_period:
self.cb_on_period(self.cur_period)
# 震荡 =》 多
elif self.cur_atan >= 20:
self.cur_period = CtaPeriod(mode=Period.LONG, price=bar.close_price, pre_mode=Period.SHOCK,
dt=bar.datetime)
self.period_list.append(self.cur_period)
self.write_log(u'{} 角度向上,Atan:{},周期:{}=>{}'.
format(bar.datetime, self.cur_atan, self.cur_period.pre_mode,
self.cur_period.mode))
if self.cb_on_period:
self.cb_on_period(self.cur_period)
# 周期维持不变
else:
self.write_log(u'{} 角度维持,Atan:{},周期维持:{}'.
format(bar.datetime, self.cur_atan, self.cur_period.mode))
return
# 当前期趋势是空
if self.cur_period.mode == Period.SHORT:
# 空=》空极端
if self.cur_atan <= -45 and self.line_atan[-1] < self.line_atan[-2]:
self.cur_period = CtaPeriod(mode=Period.SHORT_EXTREME, price=bar.close_price, pre_mode=Period.SHORT,
dt=bar.datetime)
self.period_list.append(self.cur_period)
self.write_log(u'{} 角度极端向下,Atan:{},注意反弹。周期:{}=>{}'.
format(bar.datetime, self.cur_atan, self.cur_period.pre_mode, self.cur_period.mode))
if self.cb_on_period:
self.cb_on_period(self.cur_period)
# 空=》震荡
elif -20 < self.cur_atan < 20 or (self.cur_atan >= 20 and self.line_atan[-2] <= -20):
self.cur_period = CtaPeriod(mode=Period.SHOCK, price=bar.close_price, pre_mode=Period.SHORT,
dt=bar.datetime)
self.period_list.append(self.cur_period)
self.write_log(u'{} 角度平缓,Atan:{},结束下降趋势。周期:{}=>{}'.
format(bar.datetime, self.cur_atan, self.cur_period.pre_mode, self.cur_period.mode))
if self.cb_on_period:
self.cb_on_period(self.cur_period)
elif self.cur_atan > 20 and self.cur_period.pre_mode == Period.LONG_EXTREME and self.line_atan[-1] > \
self.line_atan[-2] and bar.close_price > lastMid:
self.cur_period = CtaPeriod(mode=Period.SHOCK, price=bar.close_price, pre_mode=Period.SHORT,
dt=bar.datetime)
self.period_list.append(self.cur_period)
self.write_log(u'{} 角度平缓,Atan:{},结束下降趋势。周期:{}=>{}'.
format(bar.datetime, self.cur_atan, self.cur_period.pre_mode, self.cur_period.mode))
if self.cb_on_period:
self.cb_on_period(self.cur_period)
# 周期维持空
else:
self.write_log(u'{} 角度向下{},周期维持:{}'.
format(bar.datetime, self.cur_atan, self.cur_period.mode))
return
# 当前期趋势是多
if self.cur_period.mode == Period.LONG:
# 多=》多极端
if self.cur_atan >= 45 and self.line_atan[-1] > self.line_atan[-2]:
self.cur_period = CtaPeriod(mode=Period.LONG_EXTREME, price=bar.close_price, pre_mode=Period.LONG,
dt=bar.datetime)
self.period_list.append(self.cur_period)
self.write_log(u'{} 角度加速向上,Atan:{},周期:{}=>{}'.
format(bar.datetime, self.cur_atan, self.cur_period.pre_mode,
self.cur_period.mode))
if self.cb_on_period:
self.cb_on_period(self.cur_period)
# 多=》震荡
elif -20 < self.cur_atan < 20 or (self.cur_atan <= -20 and self.line_atan[-2] >= 20):
self.cur_period = CtaPeriod(mode=Period.SHOCK, price=bar.close_price, pre_mode=Period.LONG,
dt=bar.datetime)
self.period_list.append(self.cur_period)
self.write_log(u'{} 角度平缓,Atan:{},结束上升趋势。周期:{}=>{}'.
format(bar.datetime, self.cur_atan, self.cur_period.pre_mode, self.cur_period.mode))
if self.cb_on_period:
self.cb_on_period(self.cur_period)
# 多=》震荡
elif self.cur_atan < -20 and self.cur_period.pre_mode == Period.SHORT_EXTREME and self.line_atan[-1] < \
self.line_atan[-2] and bar.close_price < lastMid:
self.cur_period = CtaPeriod(mode=Period.SHOCK, price=bar.close_price, pre_mode=Period.LONG,
dt=bar.datetime)
self.period_list.append(self.cur_period)
self.write_log(u'{} 角度平缓,Atan:{},结束上升趋势。周期:{}=>{}'.
format(bar.datetime, self.cur_atan, self.cur_period.pre_mode, self.cur_period.mode))
if self.cb_on_period:
self.cb_on_period(self.cur_period)
# 周期保持多
else:
self.write_log(u'{} 角度向上,Atan:{},周期维持:{}'.
format(bar.datetime, self.cur_atan, self.cur_period.mode))
return
# 当前周期为多极端
if self.cur_period.mode == Period.LONG_EXTREME:
# 多极端 =》 空
if self.line_rsi1[-1] < self.line_rsi1[-2] \
and max(self.line_rsi1[-5:-2]) >= 50 \
and bar.close_price < lastMid:
self.cur_period = CtaPeriod(mode=Period.SHORT, price=bar.close_price, pre_mode=Period.LONG_EXTREME,
dt=bar.datetime)
self.period_list.append(self.cur_period)
self.write_log(u'{} 角度高位反弹向下,Atan:{} , RSI {}=》{},{}下穿中轨{},周期:{}=》{}'.
format(bar.datetime, self.cur_atan, self.line_rsi1[-2], self.line_rsi1[-1],
bar.close_price, lastMid,
self.cur_period.pre_mode, self.cur_period.mode))
if self.cb_on_period:
self.cb_on_period(self.cur_period)
# 多极端 =》多
elif self.line_rsi1[-1] < self.line_rsi1[-2] \
and bar.close_price > lastMid:
self.cur_period = CtaPeriod(mode=Period.LONG, price=bar.close_price, pre_mode=Period.LONG_EXTREME,
dt=bar.datetime)
self.period_list.append(self.cur_period)
self.write_log(u'{} 角度上加速放缓,Atan:{}, & RSI{}=>{},周期:{}=》{}'.
format(bar.datetime, self.cur_atan, self.line_rsi1[-2], self.line_rsi1[-1],
self.cur_period.pre_mode, self.cur_period.mode))
if self.cb_on_period:
self.cb_on_period(self.cur_period)
# 当前趋势保持多极端
else:
self.write_log(u'{} 角度向上加速{},周期维持:{}'.
format(bar.datetime, self.cur_atan, self.cur_period.mode))
return
# 当前周期为空极端
if self.cur_period.mode == Period.SHORT_EXTREME:
# 空极端 =》多
if self.line_rsi1[-1] > self.line_rsi1[-2] and min(self.line_rsi1[-5:-2]) <= 50 \
and bar.close_price > lastMid:
self.cur_period = CtaPeriod(mode=Period.LONG, price=bar.close_price, pre_mode=Period.SHORT_EXTREME,
dt=bar.datetime)
self.period_list.append(self.cur_period)
self.write_log(u'{} 角度下极限低位反弹转折,Atan:{}, RSI:{}=>{},周期:{}=>{}'.
format(bar.datetime, self.cur_atan, self.line_rsi1[-2], self.line_rsi1[-1],
self.cur_period.pre_mode, self.cur_period.mode))
if self.cb_on_period:
self.cb_on_period(self.cur_period)
# 空极端=》空
elif self.line_rsi1[-1] > self.line_rsi1[-2] and bar.close_price < lastMid:
self.cur_period = CtaPeriod(mode=Period.SHORT, price=bar.close_price, pre_mode=Period.SHORT_EXTREME,
dt=bar.datetime)
self.period_list.append(self.cur_period)
self.write_log(u'{} 角度下加速放缓,Atan:{},RSI:{}=>{}, ,周期:{}=>{}'.
format(bar.datetime, self.cur_atan, self.line_rsi1[-2], self.line_rsi1[-1],
self.cur_period.pre_mode, self.cur_period.mode))
if self.cb_on_period:
self.cb_on_period(self.cur_period)
# 保持空极端趋势
else:
self.write_log(u'{} 角度向下加速,Atan:{},周期维持:{}'.
format(bar.datetime, self.cur_atan, self.cur_period.mode))
return
def __count_skd(self):
"""
改良得多空线(类似KDJ,RSI)
:param bar:
:return:
"""
if not self.para_active_skd:
return
data_len = max(self.para_skd_fast_len * 2, self.para_skd_fast_len + 20)
if self.bar_len < data_len:
return
# 计算最后一根Bar的RSI指标
last_rsi = ta.RSI(self.close_array[-data_len:], self.para_skd_fast_len)[-1]
# 添加到lineSkdRSI队列
if len(self.line_skd_rsi) > self.max_hold_bars:
del self.line_skd_rsi[0]
self.line_skd_rsi.append(last_rsi)
if len(self.line_skd_rsi) < self.para_skd_slow_len:
return
# 计算最后根的最高价/最低价
rsi_HHV = max(self.line_skd_rsi[-self.para_skd_slow_len:])
rsi_LLV = min(self.line_skd_rsi[-self.para_skd_slow_len:])
# 计算STO
if rsi_HHV == rsi_LLV:
sto = 0
else:
sto = 100 * (last_rsi - rsi_LLV) / (rsi_HHV - rsi_LLV)
sto_len = len(self.line_skd_sto)
if sto_len > self.max_hold_bars:
del self.line_skd_sto[0]
self.line_skd_sto.append(sto)
# 根据STO,计算SK = EMA(STO,5)
if sto_len < 5:
return
sk = ta.EMA(np.array(self.line_skd_sto, dtype=float), 5)[-1]
sk = round(sk, self.round_n)
if len(self.line_sk) > self.max_hold_bars:
del self.line_sk[0]
self.line_sk.append(sk)
if len(self.line_sk) < 3:
return
sd = ta.EMA(np.array(self.line_sk, dtype=float), 3)[-1]
sd = round(sd, self.round_n)
if len(self.line_sd) > self.max_hold_bars:
del self.line_sd[0]
self.line_sd.append(sd)
if len(self.line_sd) < 2:
return
for t in self.skd_top_list[-1:]:
t['bars'] += 1
for b in self.skd_buttom_list[-1:]:
b['bars'] += 1
# 记录所有SK的顶部和底部
# 峰(顶部)
if self.line_sk[-1] < self.line_sk[-2] and self.line_sk[-3] < self.line_sk[-2]:
t = dict()
t['type'] = u'T'
t['sk'] = self.line_sk[-2]
t['price'] = max(self.high_array[-4:])
t['time'] = self.line_bar[-1].datetime
t['bars'] = 0
if len(self.skd_top_list) > self.max_hold_bars:
del self.skd_top_list[0]
self.skd_top_list.append(t)
if self.cur_skd_count > 0:
# 检查是否有顶背离
if self.is_skd_divergence(direction=Direction.LONG):
self.cur_skd_divergence = -1
# 谷(底部)
elif self.line_sk[-1] > self.line_sk[-2] and self.line_sk[-3] > self.line_sk[-2]:
b = dict()
b['type'] = u'B'
b['sk'] = self.line_sk[-2]
b['price'] = min(self.low_array[-4:])
b['time'] = self.line_bar[-1].datetime
b['bars'] = 0
if len(self.skd_buttom_list) > self.max_hold_bars:
del self.skd_buttom_list[0]
self.skd_buttom_list.append(b)
if self.cur_skd_count < 0:
# 检查是否有底背离
if self.is_skd_divergence(direction=Direction.SHORT):
self.cur_skd_divergence = 1
# 判断是否金叉和死叉
if self.line_sk[-1] > self.line_sk[-2] \
and self.line_sk[-2] < self.line_sd[-2] \
and self.line_sk[-1] > self.line_sd[-1]:
golden_cross = True
else:
golden_cross = False
if self.line_sk[-1] < self.line_sk[-2] \
and self.line_sk[-2] > self.line_sd[-2] \
and self.line_sk[-1] < self.line_sd[-1]:
dead_cross = True
else:
dead_cross = False
if self.cur_skd_count <= 0:
if golden_cross:
# 金叉
self.cur_skd_count = 1
self.cur_skd_cross = (self.line_sk[-1] + self.line_sk[-2] + self.line_sd[-1] + self.line_sd[-2]) / 4
self.rt_skd_count = self.cur_skd_count
self.rt_skd_cross = self.cur_skd_cross
if self.rt_skd_cross_price == 0 or self.cur_price < self.rt_skd_cross_price:
self.rt_skd_cross_price = self.cur_price
self.cur_skd_cross_price = self.cur_price
if self.cur_skd_divergence < 0:
# 若原来是顶背离,消失
self.cur_skd_divergence = 0
else: # if self.line_sk[-1] < self.line_sk[-2]:
# 延续死叉
self.cur_skd_count -= 1
# 取消实时的数据
self.rt_skd_count = 0
self.rt_skd_cross = 0
self.rt_skd_cross_price = 0
# 延续顶背离
if self.cur_skd_divergence < 0:
self.cur_skd_divergence -= 1
return
elif self.cur_skd_count >= 0:
if dead_cross:
self.cur_skd_count = -1
self.cur_skd_cross = (self.line_sk[-1] + self.line_sk[-2] + self.line_sd[-1] + self.line_sd[-2]) / 4
self.rt_skd_count = self.cur_skd_count
self.rt_skd_cross = self.cur_skd_cross
if self.rt_skd_cross_price == 0 or self.cur_price > self.rt_skd_cross_price:
self.rt_skd_cross_price = self.cur_price
self.cur_skd_cross_price = self.cur_price
# 若原来是底背离,消失
if self.cur_skd_divergence > 0:
self.cur_skd_divergence = 0
else:
# 延续金叉
self.cur_skd_count += 1
# 取消实时的数据
self.rt_skd_count = 0
self.rt_skd_cross = 0
self.rt_skd_cross_price = 0
# 延续底背离
if self.cur_skd_divergence > 0:
self.cur_skd_divergence += 1
def __get_2nd_item(self, line):
"""
获取第二个合适的选项
:param line:
:return:
"""
bars = 0
for item in reversed(line):
bars += item['bars']
if bars > 5:
return item
return line[0]
def is_skd_divergence(self, direction, runtime=False):
"""
检查是否有背离
:param:direction,多:检查是否有顶背离,空,检查是否有底背离
:return:
"""
if len(self.skd_top_list) < 2 or len(self.skd_buttom_list) < 2 or self._rt_sk is None or self._rt_sd is None:
return False
t1 = self.skd_top_list[-1]
t2 = self.__get_2nd_item(self.skd_top_list[:-1])
b1 = self.skd_buttom_list[-1]
b2 = self.__get_2nd_item(self.skd_buttom_list[:-1])
if runtime:
# 峰(顶部)
if self._rt_sk < self.line_sk[-1] and self.line_sk[-2] < self.line_sk[-1]:
t1 = {}
t1['type'] = u'T'
t1['sk'] = self.line_sk[-1]
t1['price'] = max(self.high_array[-4:])
t1['time'] = self.line_bar[-1].datetime
t1['bars'] = 0
t2 = self.__get_2nd_item(self.skd_top_list)
# 谷(底部)
elif self._rt_sk > self.line_sk[-1] and self.line_sk[-2] > self.line_sk[-1]:
b1 = {}
b1['type'] = u'B'
b1['sk'] = self.line_sk[-1]
b1['price'] = min(self.low_array[-4:])
b1['time'] = self.line_bar[-1].datetime
b1['bars'] = 0
b2 = self.__get_2nd_item(self.skd_buttom_list)
# 检查顶背离
if direction == Direction.LONG:
t1_price = t1.get('price', 0)
t2_price = t2.get('price', 0)
t1_sk = t1.get('sk', 0)
t2_sk = t2.get('sk', 0)
b1_sk = b1.get('sk', 0)
t2_t1_price_rate = ((t1_price - t2_price) / t2_price) if t2_price != 0 else 0
t2_t1_sk_rate = ((t1_sk - t2_sk) / t2_sk) if t2_sk != 0 else 0
# 背离:价格创新高,SK指标没有创新高
if t2_t1_price_rate > 0 and t2_t1_sk_rate < 0 and b1_sk > self.para_skd_high:
return True
elif direction == Direction.SHORT:
b1_price = b1.get('price', 0)
b2_price = b2.get('price', 0)
b1_sk = b1.get('sk', 0)
b2_sk = b2.get('sk', 0)
t1_sk = t1.get('sk', 0)
b2_b1_price_rate = ((b1_price - b2_price) / b2_price) if b2_price != 0 else 0
b2_b1_sk_rate = ((b1_sk - b2_sk) / b2_sk) if b2_sk != 0 else 0
# 背离:价格创新低,指标没有创新低
if b2_b1_price_rate < 0 and b2_b1_sk_rate > 0 and t1_sk < self.para_skd_low:
return True
return False
def rt_count_sk_sd(self):
"""
计算实时SK/SD
:return:
"""
if not self.para_active_skd:
return
# 准备得数据长度
data_len = max(self.para_skd_fast_len * 2, self.para_skd_fast_len + 20)
if len(self.line_bar) < data_len:
return
# 收盘价 = 结算bar + 最后一个未结束得close
close_array = np.append(self.close_array[-data_len:], [self.line_bar[-1].close_price])
# 计算最后得动态RSI值
last_rsi = ta.RSI(close_array[-2 * self.para_skd_fast_len:], self.para_skd_fast_len)[-1]
# 所有RSI值长度不足计算标准
if len(self.line_skd_rsi) < self.para_skd_slow_len:
return
# 拼接RSI list
rsi_list = self.line_skd_rsi[1 - self.para_skd_slow_len:]
rsi_list.append(last_rsi)
# 获取 RSI得最高/最低值
rsi_HHV = max(rsi_list)
rsi_LLV = min(rsi_list)
# 计算动态STO
if rsi_HHV == rsi_LLV:
sto = 0
else:
sto = 100 * (last_rsi - rsi_LLV) / (rsi_HHV - rsi_LLV)
sto_len = len(self.line_skd_sto)
if sto_len < 5:
self._rt_sk = self.line_sk[-1] if len(self.line_sk) > 0 else 0
self._rt_sd = self.line_sd[-1] if len(self.line_sd) > 0 else 0
return
# 历史STO
sto_list = self.line_skd_sto[:]
sto_list.append(sto)
self._rt_sk = ta.EMA(np.array(sto_list, dtype=float), 5)[-1]
self._rt_sk = round(self._rt_sk, self.round_n)
sk_list = self.line_sk[:]
sk_list.append(self._rt_sk)
if len(sk_list) < 5:
self._rt_sd = self.line_sd[-1] if len(self.line_sd) > 0 else 0
else:
self._rt_sd = ta.EMA(np.array(sk_list, dtype=float), 3)[-1]
self._rt_sd = round(self._rt_sd, self.round_n)
def is_skd_has_risk(self, direction, dist=15, runtime=False):
"""
检查SDK的方向风险
:return:
"""
if not self.para_active_skd or len(self.line_sk) < 2 or self._rt_sk is None:
return False
if runtime:
sk = self._rt_sk
else:
sk = self.line_sk[-1]
if direction == Direction.LONG and sk >= 100 - dist:
return True
if direction == Direction.SHORT and sk <= dist:
return True
return False
def is_skd_high_dead_cross(self, runtime=False, high_skd=None):
"""
检查是否高位死叉
:return:
"""
if not self.para_active_skd or len(self.line_sk) < self.para_skd_slow_len:
return False
if high_skd is None:
high_skd = self.para_skd_high
if runtime:
# 兼容写法,如果老策略没有配置实时运行,又用到实时数据,就添加
if self.rt_count_skd not in self.rt_funcs:
self.write_log(u'skd_is_high_dead_cross(),添加rt_countSkd到实时函数中')
self.rt_funcs.add(self.rt_count_skd)
self.rt_count_sk_sd()
if self._rt_sk is None or self._rt_sd is None:
return False
# 判断是否实时死叉
dead_cross = self._rt_sk < self.line_sk[-1] and self.line_sk[-1] > self.line_sd[
-1] and self._rt_sk < self._rt_sd
# 实时死叉
if self.cur_skd_count >= 0 and dead_cross:
skd_last_cross = (self._rt_sk + self.line_sk[-1] + self._rt_sd + self.line_sd[-1]) / 4
# 记录bar内首次死叉后的值:交叉值,价格
if self.rt_skd_count >= 0:
self.rt_skd_count = -1
self.rt_skd_cross = skd_last_cross
self.rt_skd_cross_price = self.cur_price
# self.write_log(u'{} rt Dead Cross at:{} ,price:{}'
# .format(self.name, self.skd_rt_last_cross, self.skd_rt_cross_price))
if skd_last_cross > high_skd:
return True
# 非实时,高位死叉
if self.cur_skd_count < 0 and self.cur_skd_cross > high_skd:
return True
return False
def is_skd_low_golden_cross(self, runtime=False, low_skd=None):
"""
检查是否低位金叉
:return:
"""
if not self.para_active_skd or len(self.line_sk) < self.para_skd_slow_len:
return False
if low_skd is None:
low_skd = self.para_skd_low
if runtime:
# 兼容写法,如果老策略没有配置实时运行,又用到实时数据,就添加
if self.rt_count_skd not in self.rt_funcs:
self.write_log(u'skd_is_low_golden_cross添加rt_countSkd到实时函数中')
self.rt_funcs.add(self.rt_count_skd)
self.rt_count_sk_sd()
if self._rt_sk is None or self._rt_sd is None:
return False
# 判断是否金叉和死叉
golden_cross = self._rt_sk > self.line_sk[-1] and self.line_sk[-1] < self.line_sd[
-1] and self._rt_sk > self._rt_sd
if self.cur_skd_count <= 0 and golden_cross:
# 实时金叉
skd_last_cross = (self._rt_sk + self.line_sk[-1] + self._rt_sd + self.line_sd[-1]) / 4
if self.rt_skd_count <= 0:
self.rt_skd_count = 1
self.rt_skd_cross = skd_last_cross
self.rt_skd_cross_price = self.cur_price
# self.write_log(u'{} rt Gold Cross at:{} ,price:{}'
# .format(self.name, self.skd_rt_last_cross, self.skd_rt_cross_price))
if skd_last_cross < low_skd:
return True
# 非实时低位金叉
if self.cur_skd_count > 0 and self.cur_skd_cross < low_skd:
return True
return False
def rt_count_skd(self):
"""
实时计算 SK,SD值,并且判断计算是否实时金叉/死叉
:return:
"""
if self.para_active_skd:
# 计算实时指标 rt_SK, rt_SD
self.rt_count_sk_sd()
# 计算 实时金叉/死叉
self.is_skd_high_dead_cross(runtime=True, high_skd=0)
self.is_skd_low_golden_cross(runtime=True, low_skd=100)
@property
def rt_sk(self):
self.check_rt_funcs(self.rt_count_skd)
if self._rt_sk is None and len(self.line_sk) > 0:
return self.line_sk[-1]
return self._rt_sk
@property
def rt_sd(self):
self.check_rt_funcs(self.rt_count_skd)
if self._rt_sd is None and len(self.line_sd) > 0:
return self.line_sd[-1]
return self._rt_sd
def __count_yb(self):
"""某种趋势线"""
if not self.para_active_yb:
return
if self.para_yb_len < 1:
return
if self.para_yb_ref < 1:
self.write_log(u'参数 self.inputYbRef:{}不能低于1'.format(self.para_yb_ref))
return
# 1、lineBar满足长度才执行计算
# if len(self.lineBar) < 4 * self.inputYbLen:
# self.write_log(u'数据未充分,当前Bar数据数量:{0},计算YB 需要:{1}'.
# format(len(self.lineBar), 4 * self.inputYbLen))
# return
ema_len = min(self.bar_len, self.para_yb_len)
if ema_len < 3:
self.write_log(u'数据未充分,当前Bar数据数量:{0}'.
format(len(self.line_bar)))
return
# 3、获取前InputN周期(不包含当前周期)的K线
bar_mid3_ema10 = ta.EMA(self.mid3_array[-ema_len * 4:], ema_len)[-1]
bar_mid3_ema10 = round(float(bar_mid3_ema10), self.round_n)
if len(self.line_yb) > self.max_hold_bars:
del self.line_yb[0]
self.line_yb.append(bar_mid3_ema10)
if len(self.line_yb) < self.para_yb_ref + 1:
return
if self.line_yb[-1] > self.line_yb[-1 - self.para_yb_ref]:
self.cur_yb_count = self.cur_yb_count + 1 if self.cur_yb_count >= 0 else 1
else:
self.cur_yb_count = self.cur_yb_count - 1 if self.cur_yb_count <= 0 else -1
def rt_count_yb(self):
"""
实时计算黄蓝
:return:
"""
if not self.para_active_yb:
return
if self.para_yb_len < 1:
return
if self.para_yb_ref < 1:
self.write_log(u'参数 self.inputYbRef:{}不能低于1'.format(self.para_yb_ref))
return
ema_len = min(len(self.line_bar), self.para_yb_len)
if ema_len < 3:
self.write_log(u'数据未充分,当前Bar数据数量:{0}'.
format(len(self.line_bar)))
return
# 3、获取前InputN周期(包含当前周期)的K线
last_bar_mid3 = (self.line_bar[-1].close_price + self.line_bar[-1].high_price + self.line_bar[-1].low_price) / 3
bar_mid3_ema10 = ta.EMA(np.append(self.mid3_array[-ema_len * 3:], [last_bar_mid3]), ema_len)[-1]
self._rt_yb = round(float(bar_mid3_ema10), self.round_n)
@property
def rt_yb(self):
self.check_rt_funcs(self.rt_count_yb)
if self._rt_yb is None and len(self.line_yb) > 0:
return self.line_yb[-1]
return self._rt_yb
def __count_golden_section(self):
"""
重新计算黄金分割线
:return:
"""
if self.para_golden_n < 0:
return
if self.bar_len < 2:
return
bar_len = min(self.para_golden_n, self.bar_len)
hhv = max(self.high_array[-bar_len:])
llv = min(self.low_array[-bar_len:])
self.cur_p192 = hhv - (hhv - llv) * 0.192
self.cur_p382 = hhv - (hhv - llv) * 0.382
self.cur_p500 = (hhv + llv) / 2
self.cur_p618 = hhv - (hhv - llv) * 0.618
self.cur_p809 = hhv - (hhv - llv) * 0.809
# 根据最小跳动取整
self.cur_p192 = round_to(self.cur_p192, self.price_tick)
self.cur_p382 = round_to(self.cur_p382, self.price_tick)
self.cur_p500 = round_to(self.cur_p500, self.price_tick)
self.cur_p618 = round_to(self.cur_p618, self.price_tick)
self.cur_p809 = round_to(self.cur_p809, self.price_tick)
def __count_area(self, bar):
"""计算布林和MA的区域"""
if not self.para_active_area:
return
if len(self.line_ma1) < 2 or len(self.line_boll_middle) < 2:
return
last_area = self.area_list[-1] if len(self.area_list) > 0 else None
new_area = None
# 判断做多/做空,判断在那个区域
# 做多:均线(169)向上,且中轨在均线上方(不包含金叉死叉点集)
# 做空:均线(169)向下,且中轨在均线下方(不包含金叉死叉点集)
if self.line_boll_middle[-1] > self.line_ma1[-1] > self.line_ma1[-2]:
# 做多
if self.line_boll_middle[-1] >= bar.close_price >= max(self.line_ma1[-1], self.line_boll_lower[-1]):
# 判断 A 的区域 ( ma169或下轨 ~ 中轨)
new_area = Area.LONG_A
elif self.line_boll_upper[-1] >= bar.close_price > self.line_boll_middle[-1]:
# 判断 B 的区域( 中轨 ~ 上轨)
new_area = Area.LONG_B
elif self.line_boll_upper[-1] < bar.close_price:
# 判断 C 的区域( 上轨~ )
new_area = Area.LONG_C
elif max(self.line_ma1[-1], self.line_boll_lower[-1]) > bar.close_price >= min(self.line_ma1[-1],
self.line_boll_lower[-1]):
# 判断 D 的区域( 下轨~均线~ )
new_area = Area.LONG_D
elif min(self.line_boll_lower[-1], self.line_ma1[-1]) > bar.close_price:
# 判断 E 的区域( ~下轨或均线下方 )
new_area = Area.LONG_E
elif self.line_ma1[-2] > self.line_ma1[-1] > self.line_boll_middle[-1]:
# 做空
if self.line_boll_middle[-1] <= bar.close_price <= min(self.line_ma1[-1], self.line_boll_upper[-1]):
# 判断 A 的区域 ( ma169或上轨 ~ 中轨)
new_area = Area.SHORT_A
elif self.line_boll_lower[-1] <= bar.close_price < self.line_boll_middle[-1]:
# 判断 B 的区域( 下轨~中轨 )
new_area = Area.SHORT_B
elif self.line_boll_lower[-1] > bar.close_price:
# 判断 C 的区域( ~下轨 )
new_area = Area.SHORT_C
elif min(self.line_ma1[-1], self.line_boll_upper[-1]) < bar.close_price <= max(self.line_ma1[-1],
self.line_boll_upper[-1]):
# 判断 D 的区域(均线~上轨 )
new_area = Area.SHORT_D
elif max(self.line_ma1[-1], self.line_boll_upper[-1]) < bar.close_price:
# 判断 E 的区域( 上轨~ )
new_area = Area.SHORT_E
if last_area != new_area:
self.area_list.append(new_area)
self.cur_area = new_area
self.pre_area = last_area
def __count_bias(self):
"""乖离率"""
# BIAS1 : (CLOSE-MA(CLOSE,L1))/MA(CLOSE,L1)*100;
if not (self.para_bias_len > 0 or self.para_bias2_len > 0 or self.para_bias3_len > 0):
# 不计算
return
if self.para_bias_len > 0:
if self.bar_len < min(6, self.para_bias_len) + 1:
self.write_log(u'数据未充分,当前Bar数据数量:{0},计算Bias需要:{1}'.
format(len(self.line_bar), min(14, self.para_bias_len) + 1))
else:
BiasLen = min(self.para_bias_len, self.bar_len)
# 计算BIAS
m = np.mean(self.close_array[-BiasLen:])
bias = (self.close_array[-1] - m) / m * 100
self.line_bias.append(bias) # 中轨
if len(self.line_bias) > self.max_hold_bars:
del self.line_bias[0]
self.cur_bias = bias
if self.para_bias2_len > 0:
if self.bar_len < min(6, self.para_bias2_len) + 1:
self.write_log(u'数据未充分,当前Bar数据数量:{0},计算Bias2需要:{1}'.
format(len(self.line_bar), min(14, self.para_bias2_len) + 1))
else:
Bias2Len = min(self.bar_len, self.para_bias2_len)
# 计算BIAS2
m = np.mean(self.close_array[-Bias2Len:])
bias2 = (self.close_array[-1] - m) / m * 100
self.line_bias2.append(bias2) # 中轨
if len(self.line_bias2) > self.max_hold_bars:
del self.line_bias2[0]
self.cur_bias2 = bias2
if self.para_bias3_len > 0:
if self.bar_len < min(6, self.para_bias3_len) + 1:
self.write_log(u'数据未充分,当前Bar数据数量:{0},计算Bias3需要:{1}'.
format(len(self.line_bar), min(14, self.para_bias3_len) + 1))
else:
Bias3Len = min(self.bar_len, self.para_bias3_len)
# 计算BIAS3
m = np.mean(self.close_array[-Bias3Len:])
bias3 = (self.close_array[-1] - m) / m * 100
self.line_bias3.append(bias3) # 中轨
if len(self.line_bias3) > self.max_hold_bars:
del self.line_bias3[0]
self.cur_bias3 = bias3
def rt_count_bias(self):
"""实时计算乖离率"""
if not (self.para_bias_len > 0 or self.para_bias2_len or self.para_bias3_len > 0): # 不计算
return
if self.para_bias_len > 0:
if self.bar_len < min(6, self.para_bias_len) + 1:
return
else:
biasLen = min(self.bar_len, self.para_bias_len) - 1
# 计算BIAS
m = np.mean(np.append(self.close_array[-biasLen:], [self.cur_price]))
self._rt_bias = (self.cur_price - m) / m * 100
if self.para_bias2_len > 0:
if self.bar_len < min(6, self.para_bias2_len) + 1:
return
else:
biasLen = min(self.bar_len, self.para_bias2_len) - 1
# 计算BIAS
m = np.mean(np.append(self.close_array[-biasLen:], [self.cur_price]))
self._rt_bias2 = (self.cur_price - m) / m * 100
if self.para_bias3_len > 0:
if self.bar_len < min(6, self.para_bias3_len) + 1:
return
else:
biasLen = min(self.bar_len, self.para_bias3_len) - 1
# 计算BIAS
m = np.mean(np.append(self.close_array[-biasLen:], [self.cur_price]))
self._rt_bias3 = (self.cur_price - m) / m * 100
@property
def rt_bias(self):
self.check_rt_funcs(self.rt_count_bias)
if self._rt_bias is None and len(self.line_bias) > 0:
return self.line_bias[-1]
return self._rt_bias
@property
def rt_bias2(self):
self.check_rt_funcs(self.rt_count_bias)
if self._rt_bias2 is None and len(self.line_bias2) > 0:
return self.line_bias2[-1]
return self._rt_bias2
@property
def rt_bias3(self):
self.check_rt_funcs(self.rt_count_bias)
if self._rt_bias3 is None and len(self.line_bias3) > 0:
return self.line_bias3[-1]
return self._rt_bias3
def write_log(self, content):
"""记录CTA日志"""
self.strategy.write_log(u'[' + self.name + u']' + content)
def append_data(self, file_name, dict_data, field_names=None):
"""
添加数据到csv文件中
:param file_name: csv的文件全路径
:param dict_data: OrderedDict
:return:
"""
if not isinstance(dict_data, dict):
print(u'{}.append_data,输入数据不是dict'.format(self.name), file=sys.stderr)
return
dict_fieldnames = list(dict_data.keys()) if field_names is None else field_names
if not isinstance(dict_fieldnames, list):
print(u'{}append_data,输入字段不是list'.format(self.name), file=sys.stderr)
return
try:
if not os.path.exists(file_name):
self.write_log(u'create csv file:{}'.format(file_name))
with open(file_name, 'a', encoding='utf8', newline='') as csvWriteFile:
writer = csv.DictWriter(f=csvWriteFile, fieldnames=dict_fieldnames, dialect='excel')
self.write_log(u'write csv header:{}'.format(dict_fieldnames))
writer.writeheader()
writer.writerow(dict_data)
else:
dt = dict_data.get('datetime', None)
if dt is not None:
dt_index = dict_fieldnames.index('datetime')
last_dt = self.get_csv_last_dt(file_name=file_name, dt_index=dt_index,
line_length=sys.getsizeof(dict_data) / 8 + 1)
if last_dt is not None and dt < last_dt:
print(u'新增数据时间{}比最后一条记录时间{}早,不插入'.format(dt, last_dt))
return
with open(file_name, 'a', encoding='utf8', newline='') as csvWriteFile:
writer = csv.DictWriter(f=csvWriteFile, fieldnames=dict_fieldnames, dialect='excel',
extrasaction='ignore')
writer.writerow(dict_data)
except Exception as ex:
print(u'{}.append_data exception:{}/{}'.format(self.name, str(ex), traceback.format_exc()))
def get_csv_last_dt(self, file_name, dt_index=0, line_length=1000):
"""
获取csv文件最后一行的日期数据(第dt_index个字段必须是 '%Y-%m-%d %H:%M:%S'格式
:param file_name:文件名
:param line_length: 行数据的长度
:return: None,文件不存在,或者时间格式不正确
"""
with open(file_name, 'r') as f:
f_size = os.path.getsize(file_name)
if f_size < line_length:
line_length = f_size
f.seek(f_size - line_length) # 移动到最后1000个字节
for row in f.readlines()[-1:]:
datas = row.split(',')
if len(datas) > dt_index + 1:
try:
last_dt = datetime.strptime(datas[dt_index], '%Y-%m-%d %H:%M:%S')
return last_dt
except Exception:
return None
return None
def is_shadow_line(self, open, high, low, close, direction, shadow_rate, wave_rate):
"""
是否上影线/下影线
:param open: 开仓价
:param high: 最高价
:param low: 最低价
:param close: 收盘价
:param direction: 方向(多/空)
:param shadown_rate: 上影线比例(百分比)
:param wave_rate:振幅(百分比)
:return:
"""
if close <= 0 or high <= low or shadow_rate <= 0 or wave_rate <= 0:
self.write_log(u'是否上下影线,参数出错.close={}, high={},low={},shadow_rate={},wave_rate={}'
.format(close, high, low, shadow_rate, wave_rate))
return False
# 振幅 = 高-低 / 收盘价 百分比
cur_wave_rate = round(100 * float((high - low) / close), 2)
# 上涨时,判断上影线
if direction == Direction.LONG:
# 上影线比例 = 上影线(高- max(开盘,收盘))/ 当日振幅=(高-低)
cur_shadow_rate = round(100 * float((high - max(open, close)) / (high - low)), 2)
if cur_wave_rate >= wave_rate and cur_shadow_rate >= shadow_rate:
return True
# 下跌时,判断下影线
elif direction == Direction.SHORT:
cur_shadow_rate = round(100 * float((min(open, close) - low) / (high - low)), 2)
if cur_wave_rate >= wave_rate and cur_shadow_rate >= shadow_rate:
return True
return False
def is_end_tick(self, tick_dt):
"""
根据短合约和时间,判断是否为最后一个tick
:param tick_dt:
:return:
"""
if self.is_7x24:
return False
# 中金所,只有11:30 和15:15,才有最后一个tick
if self.underly_symbol in MARKET_ZJ:
if (tick_dt.hour == 11 and tick_dt.minute == 30) or (tick_dt.hour == 15 and tick_dt.minute == 15):
return True
else:
return False
# 其他合约(上期所/郑商所/大连)
if 2 <= tick_dt.hour < 23:
if (tick_dt.hour == 10 and tick_dt.minute == 15) \
or (tick_dt.hour == 11 and tick_dt.minute == 30) \
or (tick_dt.hour == 15 and tick_dt.minute == 00) \
or (tick_dt.hour == 2 and tick_dt.minute == 30):
return True
else:
return False
# 夜盘1:30收盘
if self.underly_symbol in NIGHT_MARKET_SQ2 and tick_dt.hour == 1 and tick_dt.minute == 00:
return True
# 夜盘23:00收盘
if self.underly_symbol in NIGHT_MARKET_23 and tick_dt.hour == 23 and tick_dt.minute == 00:
return True
return False
def get_data(self):
"""
获取数据,供外部系统查看
:return: dict:
{
name: [], # k线名称
type: k线类型:second, minute , hour, day, week
interval: 周期
symbol: 品种,
main_indicators: [] , 主图指标
sub_indicators: [], 附图指标
start_time: '', 开始时间
end_time: '',结束时间
data_list: list of dict
}
"""
# 根据参数,生成主图指标和附图指标
indicators = {}
# 前高/前低(通道)
if isinstance(self.para_pre_len, int) and self.para_pre_len > 0:
indicator = {
'name': 'preHigh{}'.format(self.para_pre_len),
'attr_name': 'line_pre_high',
'is_main': True,
'type': 'line'}
indicators.update({indicator.get('name'): copy.copy(indicator)})
indicator = {
'name': 'preLow{}'.format(self.para_pre_len),
'attr_name': 'line_pre_low',
'is_main': True,
'type': 'line'}
indicators.update({indicator.get('name'): copy.copy(indicator)})
# EMA 均线(主图)
if isinstance(self.para_ema1_len, int) and self.para_ema1_len > 0:
indicator = {
'name': 'EMA{}'.format(self.para_ema1_len),
'attr_name': 'line_ema1',
'is_main': True,
'type': 'line'}
indicators.update({indicator.get('name'): copy.copy(indicator)})
if isinstance(self.para_ema2_len, int) and self.para_ema2_len > 0:
indicator = {
'name': 'EMA{}'.format(self.para_ema2_len),
'attr_name': 'line_ema2',
'is_main': True,
'type': 'line'}
indicators.update({indicator.get('name'): copy.copy(indicator)})
if isinstance(self.para_ema3_len, int) and self.para_ema3_len > 0:
indicator = {
'name': 'EMA{}'.format(self.para_ema3_len),
'attr_name': 'line_ema3',
'is_main': True,
'type': 'line'
}
indicators.update({indicator.get('name'): copy.copy(indicator)})
# MA 均线 (主图)
if isinstance(self.para_ma1_len, int) and self.para_ma1_len > 0:
indicator = {
'name': 'MA{}'.format(self.para_ma1_len),
'attr_name': 'line_ma1',
'is_main': True,
'type': 'line'
}
indicators.update({indicator.get('name'): copy.copy(indicator)})
if isinstance(self.para_ma2_len, int) and self.para_ma2_len > 0:
indicator = {
'name': 'MA{}'.format(self.para_ma2_len),
'attr_name': 'line_ma2',
'is_main': True,
'type': 'line'
}
indicators.update({indicator.get('name'): copy.copy(indicator)})
if isinstance(self.para_ma3_len, int) and self.para_ma3_len > 0:
indicator = {
'name': 'MA{}'.format(self.para_ma3_len),
'attr_name': 'line_ma3',
'is_main': True,
'type': 'line'
}
indicators.update({indicator.get('name'): copy.copy(indicator)})
# 动能指标(附图)
if isinstance(self.para_dmi_len, int) and self.para_dmi_len > 0:
indicator = {
'name': 'ADX({})'.format(self.para_dmi_len),
'attr_name': 'line_adx',
'is_main': False,
'type': 'line'}
indicators.update({indicator.get('name'): copy.copy(indicator)})
indicator = {
'name': 'ADXR({})'.format(self.para_dmi_len),
'attr_name': 'line_adxr',
'is_main': False,
'type': 'line'}
indicators.update({indicator.get('name'): copy.copy(indicator)})
# 平均波动率 (副图)
if isinstance(self.para_atr1_len, int) and self.para_atr1_len > 0:
indicator = {
'name': 'ATR{}'.format(self.para_atr1_len),
'attr_name': 'line_atr1',
'is_main': False,
'type': 'line'}
indicators.update({indicator.get('name'): copy.copy(indicator)})
if isinstance(self.para_atr2_len, int) and self.para_atr2_len > 0:
indicator = {
'name': 'ATR{}'.format(self.para_atr2_len),
'attr_name': 'line_atr2',
'is_main': False,
'type': 'line'}
indicators.update({indicator.get('name'): copy.copy(indicator)})
if isinstance(self.para_atr3_len, int) and self.para_atr3_len > 0:
indicator = {
'name': 'ATR{}'.format(self.para_atr3_len),
'attr_name': 'line_atr3',
'is_main': False,
'type': 'line'}
indicators.update({indicator.get('name'): copy.copy(indicator)})
# 平均成交量( 副图)
if isinstance(self.para_vol_len, int) and self.para_vol_len > 0:
indicator = {
'name': 'AgvVol({})'.format(self.para_vol_len),
'attr_name': 'line_vol_ma',
'is_main': False,
'type': 'line'}
indicators.update({indicator.get('name'): copy.copy(indicator)})
# 摆动指标(附图)
if isinstance(self.para_rsi1_len, int) and self.para_rsi1_len > 0:
indicator = {
'name': 'RSI({})'.format(self.para_rsi1_len),
'attr_name': 'line_rsi1',
'is_main': False,
'type': 'line'}
indicators.update({indicator.get('name'): copy.copy(indicator)})
if isinstance(self.para_rsi2_len, int) and self.para_rsi2_len > 0:
indicator = {
'name': 'RSI({})'.format(self.para_rsi2_len),
'attr_name': 'line_rsi2',
'is_main': False,
'type': 'line'}
indicators.update({indicator.get('name'): copy.copy(indicator)})
# 市场波动指数 (副图)
if isinstance(self.para_cmi_len, int) and self.para_cmi_len > 0:
indicator = {
'name': 'CMI({})'.format(self.para_cmi_len),
'attr_name': 'line_cmi',
'is_main': False,
'type': 'line'}
indicators.update({indicator.get('name'): copy.copy(indicator)})
# 布林通道 (主图)
if (isinstance(self.para_boll_len, int) and self.para_boll_len > 0) or (
isinstance(self.para_boll_tb_len, int) and self.para_boll_tb_len > 0):
boll_len = max(self.para_boll_tb_len, self.para_boll_len)
indicator = {
'name': 'BOLL({})_U'.format(boll_len),
'attr_name': 'line_boll_upper',
'is_main': True,
'type': 'line'}
indicators.update({indicator.get('name'): copy.copy(indicator)})
indicator = {
'name': 'BOLL({})_M'.format(boll_len),
'attr_name': 'line_boll_middle',
'is_main': True,
'type': 'line'}
indicators.update({indicator.get('name'): copy.copy(indicator)})
indicator = {
'name': 'BOLL({})_L'.format(boll_len),
'attr_name': 'line_boll_lower',
'is_main': True,
'type': 'line'}
indicators.update({indicator.get('name'): copy.copy(indicator)})
# 布林通道 (主图)
if (isinstance(self.para_boll2_len, int) and self.para_boll2_len > 0) or (
isinstance(self.para_boll2_tb_len, int) and self.para_boll2_tb_len > 0):
boll_len = max(self.para_boll2_tb_len, self.para_boll2_len)
indicator = {
'name': 'BOLL_U'.format(boll_len),
'attr_name': 'line_boll2_upper',
'is_main': True,
'type': 'line'}
indicators.update({indicator.get('name'): copy.copy(indicator)})
indicator = {
'name': 'BOLL({})_M'.format(boll_len),
'attr_name': 'line_boll2_middle',
'is_main': True,
'type': 'line'}
indicators.update({indicator.get('name'): copy.copy(indicator)})
indicator = {
'name': 'BOLL({})_L'.format(boll_len),
'attr_name': 'line_boll2_lower',
'is_main': True,
'type': 'line'}
indicators.update({indicator.get('name'): copy.copy(indicator)})
# KDJ 摆动指标 (副图)
if (isinstance(self.para_kdj_len, int) and self.para_kdj_len > 0) or (
isinstance(self.para_kdj_tb_len, int) and self.para_kdj_tb_len > 0):
kdj_len = max(self.para_kdj_tb_len, self.para_kdj_len)
indicator = {
'name': 'KDJ({})_K'.format(kdj_len),
'attr_name': 'line_k',
'is_main': False,
'type': 'line'}
indicators.update({indicator.get('name'): copy.copy(indicator)})
indicator = {
'name': 'KDJ({})_D'.format(kdj_len),
'attr_name': 'line_d',
'is_main': False,
'type': 'line'}
indicators.update({indicator.get('name'): copy.copy(indicator)})
# CCI 动能指标 (副图)
if isinstance(self.para_cci_len, int) and self.para_cci_len > 0:
indicator = {
'name': 'CCI({})'.format(self.para_cci_len),
'attr_name': 'line_cci',
'is_main': False,
'type': 'line'}
indicators.update({indicator.get('name'): copy.copy(indicator)})
if isinstance(self.para_macd_fast_len, int) and self.para_macd_fast_len > 0:
indicator = {
'name': 'Dif',
'attr_name': 'line_dif',
'is_main': False,
'type': 'line'}
indicators.update({indicator.get('name'): copy.copy(indicator)})
indicator = {
'name': 'Dea',
'attr_name': 'line_dea',
'is_main': False,
'type': 'line'}
indicators.update({indicator.get('name'): copy.copy(indicator)})
indicator = {
'name': 'Macd',
'attr_name': 'line_macd',
'is_main': False,
'type': 'bar'}
indicators.update({indicator.get('name'): copy.copy(indicator)})
# 卡尔曼均线
if self.para_active_kf:
indicator = {
'name': 'KF',
'attr_name': 'line_state_mean',
'is_main': True,
'type': 'line'}
indicators.update({indicator.get('name'): copy.copy(indicator)})
# 摆动指标
if self.para_active_skd:
indicator = {
'name': 'SK',
'attr_name': 'line_sk',
'is_main': False,
'type': 'line'}
indicators.update({indicator.get('name'): copy.copy(indicator)})
indicator = {
'name': 'SD',
'attr_name': 'line_sd',
'is_main': False,
'type': 'line'}
indicators.update({indicator.get('name'): copy.copy(indicator)})
# 重心线
if self.para_active_yb:
indicator = {
'name': 'YB',
'attr_name': 'line_yb',
'is_main': True,
'type': 'bar'}
indicators.update({indicator.get('name'): copy.copy(indicator)})
# 抛物线 (主图指标)
if self.para_sar_step > 0:
indicator = {
'name': 'SAR',
'attr_name': 'line_sar',
'is_main': True,
'type': 'point'}
indicators.update({indicator.get('name'): copy.copy(indicator)})
# 乖离率(附图)
if self.para_bias_len > 0:
indicator = {
'name': 'Bias{}'.format(self.para_bias_len),
'attr_name': 'line_bias',
'is_main': False,
'type': 'line'}
indicators.update({indicator.get('name'): copy.copy(indicator)})
if self.para_bias2_len > 0:
indicator = {
'name': 'Bias{}'.format(self.para_bias2_len),
'attr_name': 'line_bias2',
'is_main': False,
'type': 'line'}
indicators.update({indicator.get('name'): copy.copy(indicator)})
if self.para_bias3_len > 0:
indicator = {
'name': 'Bias{}'.format(self.para_bias3_len),
'attr_name': 'line_bias3',
'is_main': False,
'type': 'line'}
indicators.update({indicator.get('name'): copy.copy(indicator)})
# 逐一填充数据到pandas
bar_list = [OrderedDict({'datetime': bar.datetime,
'open': bar.open_price, 'high': bar.high_price, 'low': bar.low_price,
'close': bar.close_price,
'volume': bar.volume, 'open_interest': bar.open_interest}) for bar in self.line_bar]
bar_len = len(bar_list)
if bar_len == 0:
return {}
# 补充数据
main_indicators = []
sub_indicators = []
for k, v in indicators.items():
attr_name = v.get('attr_name', None)
if attr_name is None or not hasattr(self, attr_name):
continue
attr_data_list = getattr(self, attr_name, [])
data_len = len(attr_data_list)
if data_len == 0:
continue
if data_len > bar_len:
attr_data_list = attr_data_list[-bar_len:]
elif data_len < bar_len:
first_data = attr_data_list[0]
attr_data_list = [first_data] * (bar_len - data_len) + attr_data_list
# 逐一增加到bar_list的每个dict中
for i in range(bar_len):
bar_list[i].update({k: attr_data_list[i]})
if v.get('is_main', False):
main_indicators.append({'name': k, 'type': v.get('type')})
else:
sub_indicators.append({'name': k, 'type': v.get('type')})
return {
'name': self.name,
'type': self.interval,
'interval': self.bar_interval,
'symbol': self.line_bar[-1].vt_symbol,
'main_indicators': list(sorted(main_indicators, key=lambda x: x['name'])),
'sub_indicators': list(sorted(sub_indicators, key=lambda x: x['name'])),
'start_time': bar_list[0].get('datetime'),
'end_time': bar_list[-1].get('datetime'),
'data_list': bar_list}
class CtaMinuteBar(CtaLineBar):
"""
分钟级别K线
对比基类CtaLineBar
"""
def __init__(self, strategy, cb_on_bar, setting=None):
if 'interval' in setting:
del setting['interval']
super(CtaMinuteBar, self).__init__(strategy, cb_on_bar, setting)
# 一天内bar的数量累计
self.bars_count = 0
self.minutes_adjust = -15
self.m1_bars_count = 0
def __getstate__(self):
"""移除Pickle dump()时不支持的Attribute"""
return super().__getstate__()
def __setstate__(self, state):
"""Pickle load()"""
self.__dict__.update(state)
def restore(self, state):
"""从Pickle中恢复数据"""
for key in state.__dict__.keys():
self.__dict__[key] = state.__dict__[key]
def init_properties(self):
"""
初始化内部变量
:return:
"""
self.init_param_list()
# 输入参数
self.name = u'MinuteBar'
self.mode = self.TICK_MODE # 缺省为tick模式
self.interval = Interval.MINUTE # 分钟级别周期
self.bar_interval = 5 # 缺省为5分钟周期
self.minute_interval = self.bar_interval # 1分钟
def add_bar(self, bar, bar_is_completed=False, bar_freq=1):
"""
予以外部初始化程序增加bar
:param bar:
:param bar_is_completed: 插入的bar,其周期与K线周期一致,就设为True
:param bar_freq, bar对象得frequency
:return:
"""
if bar.trading_day is None:
if self.is_7x24:
bar.trading_day = bar.datetime.strftime('%Y-%m-%d')
else:
bar.trading_day = get_trading_date(bar.datetime)
# 更新最后价格
self.cur_price = bar.close_price
self.cur_datetime = bar.datetime
bar_len = len(self.line_bar)
if bar_len == 0:
self.line_bar.append(bar)
self.cur_trading_day = bar.trading_day
# self.m1_bars_count += bar_freq
if bar_is_completed:
# self.m1_bars_count = 0
self.on_bar(bar)
# 计算当前加入的 bar的1分钟,属于当日的第几个1分钟
minutes_passed = (bar.datetime - datetime.strptime(bar.datetime.strftime('%Y-%m-%d'),
'%Y-%m-%d')).total_seconds() / 60
# 计算,当前的bar,属于当日的第几个bar
self.bars_count = int(minutes_passed / self.bar_interval)
return
# 与最后一个BAR的时间比对,判断是否超过K线的周期
lastBar = self.line_bar[-1]
is_new_bar = False
if bar_is_completed:
is_new_bar = True
minutes_passed = (bar.datetime - datetime.strptime(bar.datetime.strftime('%Y-%m-%d'),
'%Y-%m-%d')).total_seconds() / 60
if self.underly_symbol in MARKET_ZJ:
if int(bar.datetime.strftime('%H%M')) > 1130 and int(bar.datetime.strftime('%H%M')) < 1600:
# 扣除11:30到13:00的中场休息的90分钟
minutes_passed = minutes_passed - 90
else:
if int(bar.datetime.strftime('%H%M')) > 1015 and int(bar.datetime.strftime('%H%M')) <= 1130:
# 扣除10:15到10:30的中场休息的15分钟
minutes_passed = minutes_passed - 15
elif int(bar.datetime.strftime('%H%M')) > 1130 and int(bar.datetime.strftime('%H%M')) < 1600:
# 扣除(10:15到10:30的中场休息的15分钟)&(11:30到13:30的中场休息的120分钟)
minutes_passed = minutes_passed - 135
bars_passed = int(minutes_passed / self.bar_interval)
# 不在同一交易日,推入新bar
if self.cur_trading_day != bar.trading_day:
is_new_bar = True
self.cur_trading_day = bar.trading_day
self.bars_count = bars_passed
# self.write_log("drawLineBar(): {}, m1_bars_count={}".format(bar.datetime.strftime("%Y%m%d %H:%M:%S"),
# self.m1_bars_count))
else:
if bars_passed != self.bars_count:
is_new_bar = True
self.bars_count = bars_passed
# self.write_log("addBar(): {}, bars_count={}".format(bar.datetime.strftime("%Y%m%d %H:%M:%S"),
# self.bars_count))
# 数字货币,如果bar的前后距离,超过周期,重新开启一个新的bar
if self.is_7x24 and (bar.datetime - lastBar.datetime).total_seconds() >= 60 * self.bar_interval:
is_new_bar = True
if is_new_bar:
new_bar = copy.deepcopy(bar)
# 添加新的bar
self.line_bar.append(new_bar)
# 将上一个Bar推送至OnBar事件
self.on_bar(lastBar)
else:
# 更新最后一个bar
# 此段代码,针对一部分短周期生成长周期的k线更新,如3根5分钟k线,合并成1根15分钟k线。
lastBar.close_price = bar.close_price
lastBar.high_price = max(lastBar.high_price, bar.high_price)
lastBar.low_price = min(lastBar.low_price, bar.low_price)
lastBar.volume = lastBar.volume + bar.volume
lastBar.open_interest = bar.open_interest
# 实时计算
self.rt_executed = False
def generate_bar(self, tick):
"""
生成 line Bar
:param tick:
:return:
"""
bar_len = len(self.line_bar)
minutes_passed = (tick.datetime - datetime.strptime(tick.datetime.strftime('%Y-%m-%d'),
'%Y-%m-%d')).total_seconds() / 60
if self.underly_symbol in MARKET_ZJ:
if int(tick.datetime.strftime('%H%M')) > 1130 and int(tick.datetime.strftime('%H%M')) < 1600:
# 扣除11:30到13:00的中场休息的90分钟
minutes_passed = minutes_passed - 90
else:
if int(tick.datetime.strftime('%H%M')) > 1015 and int(tick.datetime.strftime('%H%M')) <= 1130:
# 扣除10:15到10:30的中场休息的15分钟
minutes_passed = minutes_passed - 15
elif int(tick.datetime.strftime('%H%M')) > 1130 and int(tick.datetime.strftime('%H%M')) < 1600:
# 扣除(10:15到10:30的中场休息的15分钟)&(11:30到13:30的中场休息的120分钟)
minutes_passed = minutes_passed - 135
bars_passed = int(minutes_passed / self.bar_interval)
# 保存第一个K线数据
if bar_len == 0:
self.first_tick(tick)
self.bars_count = bars_passed
return
# 清除480周期前的数据,
if bar_len > self.max_hold_bars:
del self.line_bar[0]
# 与最后一个BAR的时间比对,判断是否超过K线周期
lastBar = self.line_bar[-1]
is_new_bar = False
endtick = False
if not self.is_7x24:
# 处理日内的间隔时段最后一个tick,如10:15分,11:30分,15:00 和 2:30分
if (tick.datetime.hour == 10 and tick.datetime.minute == 15) \
or (tick.datetime.hour == 11 and tick.datetime.minute == 30) \
or (tick.datetime.hour == 15 and tick.datetime.minute == 00) \
or (tick.datetime.hour == 2 and tick.datetime.minute == 30):
endtick = True
# 夜盘1:30收盘
if self.underly_symbol in NIGHT_MARKET_SQ2 and tick.datetime.hour == 1 and tick.datetime.minute == 00:
endtick = True
# 夜盘23:00收盘
if self.underly_symbol in NIGHT_MARKET_23 and tick.datetime.hour == 23 and tick.datetime.minute == 00:
endtick = True
if endtick is True:
return
is_new_bar = False
# 不在同一交易日,推入新bar
if self.cur_trading_day != tick.trading_day:
# self.write_log('{} drawLineBar() new_bar,{} curTradingDay:{},tick.trading_day:{} bars_count={}'
# .format(self.name, tick.datetime.strftime("%Y-%m-%d %H:%M:%S"), self.cur_trading_day,
# tick.trading_day, self.bars_count))
is_new_bar = True
self.cur_trading_day = tick.trading_day
self.bars_count = bars_passed
else:
# 同一交易日,看过去了多少个周期的Bar
if bars_passed != self.bars_count:
is_new_bar = True
self.bars_count = bars_passed
# self.write_log('{} drawLineBar() new_bar,{} bars_count={}'
# .format(self.name, tick.datetime, self.bars_count))
self.last_minute = tick.datetime.minute
# 数字货币市场,分钟是连续的,所以只判断是否取整,或者与上一根bar的距离
if self.is_7x24:
if (
tick.datetime.minute % self.bar_interval == 0 and tick.datetime.minute != lastBar.datetime.minute) or (
tick.datetime - lastBar.datetime).total_seconds() > self.bar_interval * 60:
# self.write_log('{} drawLineBar() new_bar,{} lastbar:{}, bars_count={}'
# .format(self.name, tick.datetime, lastBar.datetime,
# self.bars_count))
is_new_bar = True
if is_new_bar:
# 创建并推入新的Bar
self.first_tick(tick)
# 触发OnBar事件
self.on_bar(lastBar)
else:
# 更新当前最后一个bar
self.barFirstTick = False
# 更新最高价、最低价、收盘价、成交量
lastBar.high_price = max(lastBar.high_price, tick.last_price)
lastBar.low_price = min(lastBar.low_price, tick.last_price)
lastBar.close_price = tick.last_price
lastBar.open_interest = tick.open_interest
lastBar.volume += tick.volume
# 更新Bar的颜色
if lastBar.close_price > lastBar.open_price:
lastBar.color = Color.RED
elif lastBar.close_price < lastBar.open_price:
lastBar.color = Color.BLUE
else:
lastBar.color = Color.EQUAL
# 实时计算
self.rt_executed = False
class CtaHourBar(CtaLineBar):
"""
小时级别K线
对比基类CtaLineBar
"""
def __init__(self, strategy, cb_on_bar, setting=None):
if 'interval' in setting:
del setting['interval']
super(CtaHourBar, self).__init__(strategy, cb_on_bar, setting)
# bar内得分钟数量累计
self.m1_bars_count = 0
self.last_minute = None
def __getstate__(self):
"""移除Pickle dump()时不支持的Attribute"""
return super().__getstate__()
def __setstate__(self, state):
"""Pickle load()"""
self.__dict__.update(state)
def restore(self, state):
"""从Pickle中恢复数据"""
for key in state.__dict__.keys():
self.__dict__[key] = state.__dict__[key]
def init_properties(self):
"""
初始化内部变量
:return:
"""
self.init_param_list()
# 输入参数
self.name = u'HourBar'
self.mode = self.TICK_MODE # 缺省为tick模式
self.interval = Interval.HOUR # 小时级别周期
self.bar_interval = 1 # 缺省为小时周期
self.minute_interval = 60 #
def add_bar(self, bar, bar_is_completed=False, bar_freq=1):
"""
予以外部初始化程序增加bar
:param bar:
:param bar_is_completed: 插入的bar,其周期与K线周期一致,就设为True
:param bar_freq, bar对象得frequency
:return:
"""
if bar.trading_day is None:
if self.is_7x24:
bar.trading_day = bar.date
else:
bar.trading_day = get_trading_date(bar.datetime)
# 更新最后价格
self.cur_price = bar.close_price
self.cur_datetime = bar.datetime
bar_len = len(self.line_bar)
if bar_len == 0:
self.line_bar.append(bar)
self.cur_trading_day = bar.trading_day
self.m1_bars_count += bar_freq
if bar_is_completed:
self.m1_bars_count = 0
self.on_bar(bar)
return
# 与最后一个BAR的时间比对,判断是否超过K线的周期
lastBar = self.line_bar[-1]
is_new_bar = False
if bar_is_completed:
is_new_bar = True
if self.cur_trading_day is None:
self.cur_trading_day = bar.trading_day
if self.cur_trading_day != bar.trading_day:
is_new_bar = True
self.cur_trading_day = bar.trading_day
if self.is_7x24:
if (bar.datetime - lastBar.datetime).total_seconds() >= 3600 * self.bar_interval:
is_new_bar = True
self.cur_trading_day = bar.trading_day
if self.m1_bars_count + bar_freq > 60 * self.bar_interval:
is_new_bar = True
if is_new_bar:
# 添加新的bar
self.line_bar.append(bar)
self.m1_bars_count = bar_freq
# 将上一个Bar推送至OnBar事件
self.on_bar(lastBar)
else:
# 更新最后一个bar
# 此段代码,针对一部分短周期生成长周期的k线更新,如3根5分钟k线,合并成1根15分钟k线。
lastBar.close_price = bar.close_price
lastBar.high_price = max(lastBar.high_price, bar.high_price)
lastBar.low_price = min(lastBar.low_price, bar.low_price)
lastBar.volume = lastBar.volume + bar.volume
lastBar.open_interest = bar.open_interest
self.m1_bars_count += bar_freq
# 实时计算
self.rt_executed = False
def generate_bar(self, tick):
"""
生成 line Bar
:param tick:
:return:
"""
bar_len = len(self.line_bar)
# 保存第一个K线数据
if bar_len == 0:
self.first_tick(tick)
return
# 清除480周期前的数据,
if bar_len > self.max_hold_bars:
del self.line_bar[0]
endtick = False
if not self.is_7x24:
# 处理日内的间隔时段最后一个tick,如10:15分,11:30分,15:00 和 2:30分
if (tick.datetime.hour == 10 and tick.datetime.minute == 15) \
or (tick.datetime.hour == 11 and tick.datetime.minute == 30) \
or (tick.datetime.hour == 15 and tick.datetime.minute == 00) \
or (tick.datetime.hour == 2 and tick.datetime.minute == 30):
endtick = True
# 夜盘1:30收盘
if self.underly_symbol in NIGHT_MARKET_SQ2 and tick.datetime.hour == 1 and tick.datetime.minute == 00:
endtick = True
# 夜盘23:00收盘
if self.underly_symbol in NIGHT_MARKET_23 and tick.datetime.hour == 23 and tick.datetime.minute == 00:
endtick = True
# 与最后一个BAR的时间比对,判断是否超过K线周期
lastBar = self.line_bar[-1]
is_new_bar = False
if self.last_minute is None:
if tick.datetime.second == 0:
self.m1_bars_count += 1
self.last_minute = tick.datetime.minute
# 不在同一交易日,推入新bar
if self.cur_trading_day != tick.trading_day:
is_new_bar = True
# 去除分钟和秒数
tick.datetime = datetime.strptime(tick.datetime.strftime('%Y-%m-%d %H:00:00'), '%Y-%m-%d %H:%M:%S')
tick.time = tick.datetime.strftime('%H:%M:%S')
self.last_minute = tick.datetime.minute
self.cur_trading_day = tick.trading_day
# self.write_log('{} drawLineBar() new_bar,{} curTradingDay:{},tick.trading_day:{}'
# .format(self.name, tick.datetime.strftime("%Y-%m-%d %H:%M:%S"), self.cur_trading_day,
# tick.trading_day))
else:
# 同一交易日,看分钟是否一致
if tick.datetime.minute != self.last_minute and not endtick:
self.m1_bars_count += 1
self.last_minute = tick.datetime.minute
if self.is_7x24:
# 数字货币,用前后时间间隔
if (tick.datetime - lastBar.datetime).total_seconds() >= 3600 * self.bar_interval:
# self.write_log('{} drawLineBar() new_bar,{} - {} > 3600 * {} '
# .format(self.name, tick.datetime.strftime("%Y-%m-%d %H:%M:%S"),
# lastBar.datetime.strftime("%Y-%m-%d %H:%M:%S"),
# self.barTimeInterval))
is_new_bar = True
# 去除分钟和秒数
tick.datetime = datetime.strptime(tick.datetime.strftime('%Y-%m-%d %H:00:00'), '%Y-%m-%d %H:%M:%S')
tick.time = tick.datetime.strftime('%H:%M:%S')
if len(tick.trading_day) > 0:
self.cur_trading_day = tick.trading_day
else:
self.cur_trading_day = tick.date
else:
# 国内期货,用bar累加
if self.m1_bars_count > 60 * self.bar_interval:
# self.write_log('{} drawLineBar() new_bar,{} {} > 60 * {} '
# .format(self.name, tick.datetime.strftime("%Y-%m-%d %H:%M:%S"),
# self.m1_bars_count,
# self.barTimeInterval))
is_new_bar = True
# 去除秒数
tick.datetime = datetime.strptime(tick.datetime.strftime('%Y-%m-%d %H:%M:00'), '%Y-%m-%d %H:%M:%S')
tick.time = tick.datetime.strftime('%H:%M:%S')
if is_new_bar:
# 创建并推入新的Bar
self.first_tick(tick)
self.m1_bars_count = 1
# 触发OnBar事件
self.on_bar(lastBar)
else:
# 更新当前最后一个bar
self.barFirstTick = False
# 更新最高价、最低价、收盘价、成交量
lastBar.high_price = max(lastBar.high_price, tick.last_price)
lastBar.low_price = min(lastBar.low_price, tick.last_price)
lastBar.close_price = tick.last_price
lastBar.open_interest = tick.open_interest
lastBar.volume += tick.volume
# 更新Bar的颜色
if lastBar.close_price > lastBar.open_price:
lastBar.color = Color.RED
elif lastBar.close_price < lastBar.open_price:
lastBar.color = Color.BLUE
else:
lastBar.color = Color.EQUAL
# 实时计算
self.rt_executed = False
if not endtick:
self.lastTick = tick
class CtaDayBar(CtaLineBar):
"""
日线级别K线(只支持1日线)
"""
def __init__(self, strategy, cb_on_bar, setting=None):
self.had_night_market = False # 是否有夜市
if 'interval' in setting:
del setting['interval']
if 'bar_interval' in setting:
del setting['bar_interval']
super(CtaDayBar, self).__init__(strategy, cb_on_bar, setting)
def __getstate__(self):
"""移除Pickle dump()时不支持的Attribute"""
return super().__getstate__()
def __setstate__(self, state):
"""Pickle load()"""
self.__dict__.update(state)
def restore(self, state):
"""从Pickle中恢复数据"""
for key in state.__dict__.keys():
self.__dict__[key] = state.__dict__[key]
def init_properties(self):
"""
初始化内部变量
:return:
"""
self.init_param_list()
# 输入参数
self.name = u'DayBar'
self.mode = self.TICK_MODE # 缺省为tick模式
self.interval = Interval.DAILY # 日线级别周期
self.bar_interval = 1 # 缺省为1天
self.minute_interval = 60 * 24
def add_bar(self, bar, bar_is_completed=False, bar_freq=1):
"""
予以外部初始化程序增加bar
:param bar:
:param bar_is_completed: 插入的bar,其周期与K线周期一致,就设为True
:param bar_freq, bar对象得frequency
:return:
"""
# 更新最后价格
self.cur_price = bar.close_price
self.cur_datetime = bar.datetime
bar_len = len(self.line_bar)
if bar_len == 0:
new_bar = copy.deepcopy(bar)
self.line_bar.append(new_bar)
self.cur_trading_day = bar.trading_day if bar.trading_day is not None else bar.date
if bar_is_completed:
self.on_bar(bar)
return
# 与最后一个BAR的时间比对,判断是否超过K线的周期
lastBar = self.line_bar[-1]
self.cur_trading_day = bar.trading_day if bar.trading_day is not None else bar.date
is_new_bar = False
if bar_is_completed:
is_new_bar = True
# 夜盘时间判断(当前的bar时间在21点后,上一根Bar的时间在21点前
if not self.is_7x24 and bar.datetime.hour >= 21 and lastBar.datetime.hour < 21:
is_new_bar = True
self.cur_trading_day = bar.trading_day if bar.trading_day is not None else bar.date
# 日期判断
if not is_new_bar and lastBar.trading_day != self.cur_trading_day:
is_new_bar = True
self.cur_trading_day = bar.trading_day if bar.trading_day is not None else bar.date
if is_new_bar:
# 添加新的bar
new_bar = copy.deepcopy(bar)
self.line_bar.append(new_bar)
# 将上一个Bar推送至OnBar事件
self.on_bar(lastBar)
else:
# 更新最后一个bar
# 此段代码,针对一部分短周期生成长周期的k线更新,如3根5分钟k线,合并成1根15分钟k线。
lastBar.close_price = bar.close_price
lastBar.high_price = max(lastBar.high_price, bar.high_price)
lastBar.low_price = min(lastBar.low_price, bar.low_price)
lastBar.volume = lastBar.volume + bar.volume
lastBar.open_interest = bar.open_interest
# 实时计算
self.rt_executed = False
def generate_bar(self, tick):
"""
生成 line Bar
:param tick:
:return:
"""
bar_len = len(self.line_bar)
# 保存第一个K线数据
if bar_len == 0:
self.first_tick(tick)
return
# 清除480周期前的数据,
if bar_len > self.max_hold_bars:
del self.line_bar[0]
# 与最后一个BAR的时间比对,判断是否超过K线周期
lastBar = self.line_bar[-1]
is_new_bar = False
# 交易日期不一致,新的交易日
if len(tick.trading_day) > 0 and tick.trading_day != lastBar.trading_day:
is_new_bar = True
# 数字货币方面,如果当前tick 日期与bar的日期不一致.(取消,按照上面的统一处理,因为币安是按照UTC时间算的每天开始,ok是按照北京时间开始)
# if self.is_7x24 and tick.date != lastBar.date:
# is_new_bar = True
if is_new_bar:
# 创建并推入新的Bar
self.first_tick(tick)
# 触发OnBar事件
self.on_bar(lastBar)
else:
# 更新当前最后一个bar
self.barFirstTick = False
# 更新最高价、最低价、收盘价、成交量
lastBar.high_price = max(lastBar.high_price, tick.last_price)
lastBar.low_price = min(lastBar.low_price, tick.last_price)
lastBar.close_price = tick.last_price
lastBar.open_interest = tick.open_interest
lastBar.volume += tick.volume
# 更新Bar的颜色
if lastBar.close_price > lastBar.open_price:
lastBar.color = Color.RED
elif lastBar.close_price < lastBar.open_price:
lastBar.color = Color.BLUE
else:
lastBar.color = Color.EQUAL
# 实时计算
self.rt_executed = False
self.lastTick = tick
class CtaWeekBar(CtaLineBar):
"""
周线级别K线
"""
def __init__(self, strategy, cb_on_bar, setting=None):
self.had_night_market = False # 是否有夜市
if 'interval' in setting:
del setting['interval']
if 'bar_interval' in setting:
del setting['bar_interval']
super(CtaWeekBar, self).__init__(strategy, cb_on_bar, setting)
# 使用周一作为周线时间
self.use_monday = False
# 开始的小时/分钟/秒
self.bar_start_hour_dt = '21:00:00'
if self.is_7x24:
# 数字货币,使用周一的开始时间
self.use_monday = True
self.bar_start_hour_dt = '00:00:00'
else:
# 判断是否期货
if self.underly_symbol is not None:
if len(self.underly_symbol) <= 4:
# 是期货
if get_underlying_symbol(self.underly_symbol) in MARKET_DAY_ONLY:
# 日盘期货
self.use_monday = True
if get_underlying_symbol(self.underly_symbol) in MARKET_ZJ:
# 中金所
self.bar_start_hour_dt = '09:15:00'
else:
# 其他日盘期货
self.bar_start_hour_dt = '09:00:00'
else:
# 夜盘期货
self.use_monday = False
self.bar_start_hour_dt = '21:00:00'
else:
# 可能是股票
self.use_monday = True
self.bar_start_hour_dt = '09:30:00'
else:
# 可能是股票
self.use_monday = True
self.bar_start_hour_dt = '09:30:00'
def __getstate__(self):
"""移除Pickle dump()时不支持的Attribute"""
return super().__getstate__()
def __setstate__(self, state):
"""Pickle load()"""
self.__dict__.update(state)
def restore(self, state):
"""从Pickle中恢复数据"""
for key in state.__dict__.keys():
self.__dict__[key] = state.__dict__[key]
def init_properties(self):
"""
初始化内部变量
:return:
"""
self.init_param_list()
# 输入参数
self.name = u'WeekBar'
self.mode = self.TICK_MODE # 缺省为tick模式
self.interval = Interval.WEEKLY # 周线级别周期
self.bar_interval = 1 # 为1周
self.minute_interval = 60 * 24 * 7
def add_bar(self, bar, bar_is_completed=False, bar_freq=1):
"""
予以外部初始化程序增加bar
:param bar:
:param bar_is_completed: 插入的bar,其周期与K线周期一致,就设为True
:param bar_freq, bar对象得frequency
:return:
# 国内期货,周线时间开始为周五晚上21点
# 股票,周线开始时间为周一
# 数字货币,周线的开始时间为周一
"""
# 更新最后价格
self.cur_price = bar.close_price
self.cur_datetime = bar.datetime
bar_len = len(self.line_bar)
if bar_len == 0:
new_bar = copy.deepcopy(bar)
new_bar.datetime = self.get_bar_start_dt(bar.datetime)
self.write_log(u'周线开始时间:{}=>{}'.format(bar.datetime, new_bar.datetime))
self.line_bar.append(new_bar)
self.cur_trading_day = bar.trading_day
if bar_is_completed:
self.on_bar(bar)
return
# 与最后一个BAR的时间比对,判断是否超过K线的周期
lastBar = self.line_bar[-1]
self.cur_trading_day = bar.trading_day
is_new_bar = False
if bar_is_completed:
is_new_bar = True
# 时间判断,与上一根Bar的时间,超过7天
if (bar.datetime - lastBar.datetime).total_seconds() >= 60 * 60 * 24 * 7:
is_new_bar = True
self.cur_trading_day = bar.trading_day
if is_new_bar:
# 添加新的bar
new_bar = copy.deepcopy(bar)
new_bar.datetime = self.get_bar_start_dt(bar.datetime)
self.write_log(u'新周线开始时间:{}=>{}'.format(bar.datetime, new_bar.datetime))
self.line_bar.append(new_bar)
# 将上一个Bar推送至OnBar事件
self.on_bar(lastBar)
else:
# 更新最后一个bar
# 此段代码,针对一部分短周期生成长周期的k线更新,如3根5分钟k线,合并成1根15分钟k线。
lastBar.close_price = bar.close_price
lastBar.high_price = max(lastBar.high_price, bar.high_price)
lastBar.low_price = min(lastBar.low_price, bar.low_price)
lastBar.volume = lastBar.volume + bar.volume
lastBar.open_interest = bar.open_interest
# 实时计算
self.rt_executed = False
def get_bar_start_dt(self, cur_dt):
"""获取当前时间计算的周线Bar开始时间"""
if self.use_monday:
# 使用周一. 例如当前是周二,weekday=1,相当于减去一天
monday_dt = cur_dt.replace(hour=0, minute=0, second=0, microsecond=0) - timedelta(days=cur_dt.weekday())
start_dt = datetime.strptime(monday_dt.strftime('%Y-%m-%d') + ' ' + self.bar_start_hour_dt,
'%Y-%m-%d %H:%M:%S')
return start_dt
else:
# 使用周五
week_day = cur_dt.weekday()
if week_day >= 5 or (week_day == 4 and cur_dt.hour > 20):
# 周六或周天; 或者周五晚上21:00以后
friday_dt = cur_dt.replace(hour=0, minute=0, second=0, microsecond=0) - timedelta(
days=cur_dt.weekday() - 4)
else:
# 周一~周五白天
friday_dt = cur_dt.replace(hour=0, minute=0, second=0, microsecond=0) - timedelta(
days=cur_dt.weekday() + 2)
friday_night_dt = datetime.strptime(friday_dt.strftime('%Y-%m-%d') + ' ' + self.bar_start_hour_dt,
'%Y-%m-%d %H:%M:%S')
return friday_night_dt
def generate_bar(self, tick):
"""
生成 line Bar
:param tick:
:return:
"""
bar_len = len(self.line_bar)
# 保存第一个K线数据
if bar_len == 0:
self.first_tick(tick)
return
# 清除480周期前的数据,
if bar_len > self.max_hold_bars:
del self.line_bar[0]
# 与最后一个BAR的时间比对,判断是否超过K线周期
lastBar = self.line_bar[-1]
is_new_bar = False
# 交易日期不一致,新的交易日
if (tick.datetime - lastBar.datetime).total_seconds() >= 60 * 60 * 24 * 7:
is_new_bar = True
if is_new_bar:
# 创建并推入新的Bar
self.first_tick(tick)
# 触发OnBar事件
self.on_bar(lastBar)
else:
# 更新当前最后一个bar
self.barFirstTick = False
# 更新最高价、最低价、收盘价、成交量
lastBar.high_price = max(lastBar.high_price, tick.last_price)
lastBar.low_price = min(lastBar.low_price, tick.last_price)
lastBar.close_price = tick.last_price
lastBar.open_interest = tick.open_interest
# 更新日内总交易量,和bar内交易量
lastBar.volume += tick.volume
# 更新Bar的颜色
if lastBar.close_price > lastBar.open_price:
lastBar.color = Color.RED
elif lastBar.close_price < lastBar.open_price:
lastBar.color = Color.BLUE
else:
lastBar.color = Color.EQUAL
# 实时计算
self.rt_executed = False
self.last_tick = tick
``` |
{
"source": "123123hgj/codinglife_api",
"score": 2
} |
#### File: codinglife_api/codinglife/renderers.py
```python
from rest_framework.renderers import JSONRenderer
class DDJsonRenderer(JSONRenderer):
def render(self, data, accepted_media_type=None, renderer_context=None):
wrapped_data = {
'code': 1000,
'status': 'success',
'data': data,
'msg': ''
}
return super(DDJsonRenderer, self).render(wrapped_data, accepted_media_type=None, renderer_context=None)
```
#### File: codinglife_api/user/serializers.py
```python
from django.contrib.auth.hashers import check_password, make_password
from django.db.models import Q
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from rest_framework.serializers import ModelSerializer, Serializer
from user.models import User
class UserSerializer(ModelSerializer):
class Meta:
model = User
fields = ['id', 'username', 'email']
class UserCreateSerializer(ModelSerializer):
class Meta:
model = User
fields = ['username', 'password', 'email']
def create(self, validated_data):
validated_data['password'] = <PASSWORD>_password(validated_data.get('password'))
instance = self.Meta.model(**validated_data)
instance.save()
print('type of instance:', type(instance))
return instance
class UserLoginSerializer(ModelSerializer):
account = serializers.CharField(max_length=100, help_text="用户名或邮箱")
class Meta:
model = User
fields = ['account', 'password']
def validate(self, attrs):
user = User.objects.get(Q(username=attrs['account']) | Q(email=attrs['account']))
if check_password(attrs['password'], user.password):
return attrs
raise ValidationError(detail='账号或密码错误')
class UserEmptySerializer(Serializer):
def create(self, validated_data):
pass
def update(self, instance, validated_data):
pass
``` |
{
"source": "123385918/Python-Science-Handbook",
"score": 3
} |
#### File: 123385918/Python-Science-Handbook/DNN2.py
```python
import numpy as np
# =============================================================================
# 损失函数导数定义
# =============================================================================
der_mse = lambda y_hat,y: y_hat - y
der_llh = lambda y_hat,y: y ## 必须接softmax激活函数,否则错误
class SoftLayer:
def __init__(self):
pass
def forward(self,X,record = False):
rst = np.exp(X)/np.exp(X).sum(1,keepdims=True)
if record: self.temp = rst
return rst
def backward(self, cum_grad):
return self.temp-cum_grad ## 必须接der_llh损失函数导数,否则错误
def update(self, l_rate):
pass
class LinearLayer:
def __init__(self, size_in: int, size_out: int):
self.W = np.random.rand(size_in, size_out) ## X*W+B
self.B = np.random.rand(1, size_out)
def forward(self,X,record=False):
if record: self.temp = X
return X.dot(self.W) + self.B
def backward(self,cum_grad):
self.grad_W = np.matmul(self.temp.T,cum_grad)
self.grad_B = np.matmul(cum_grad.T, np.ones(len(self.temp)) )
return np.matmul(cum_grad,self.W.T)
def update(self, l_rate):
self.W -= self.grad_W * l_rate/(len(self.temp))
self.B -= self.grad_B * l_rate/(len(self.temp))
class SigmodLayer:
def __init__(self):
pass
def forward(self,X,record = False):
rst = 1/(1+np.exp(-X))
if record: self.temp = rst
return rst
def backward(self, cum_grad):
return self.temp*(1-self.temp)*cum_grad
def update(self, l_rate):
pass
class ReluLayer:
def __init__(self):
pass
def forward(self,X,record = False):
rst = np.where(X < 0, 0, X)
if record: self.temp = rst
return rst
def backward(self, cum_grad):
return np.where(self.temp > 0, 1, 0) * cum_grad
def update(self, l_rate):
pass
class DNN:
def __init__(self,layers:list):
self.layers = layers
def predict(self,X,record=False):
for layer in self.layers:
X = layer.forward(X, record=record)
return X.argmax(1)
def train(self,X,Y,testX,testY,loss=der_mse,batch=10,epoch=50,alpha=.1):
'''batch-GD'''
self.info = []
for t in range(epoch):
batches = np.split(np.random.permutation(len(X)),
np.arange(len(X),step=batch)[1:])
for ids in batches:
## 前向传播激活,记录求导时用到的输入或输出值
forward = X[ids].copy()
for layer in self.layers:
forward = layer.forward(forward, record=True)
## 反向传播梯度,计算各层参数梯度
grads = loss(forward, Y[ids]) ## 损失函数MSE导数y_hat-y
for layer in self.layers[::-1]:
grads = layer.backward(grads)
## 根据梯度更新各层参数
for layer in self.layers:
layer.update(alpha)
## 记录训练信息
Y_hat = self.predict(testX)
self.info.append({'t':t,'right':(Y_hat==testY.argmax(1)).mean()})
return 'train done!'
if __name__=='__main__':
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# load data
iris = load_iris()
iris.target = pd.get_dummies(iris.target,dtype=float).values
X_train, X_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=.3,random_state=42)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# train model
## 最普通的sigmod激活+mse损失函数
layers = [LinearLayer(4,8),SigmodLayer(),LinearLayer(8,3),
SigmodLayer()]
dnn = DNN(layers)
dnn.train(X_train,y_train,X_test,y_test,
loss=der_mse,batch=10,epoch=50,alpha=1)
info = pd.DataFrame(dnn.info)
info.plot(x='t',y='right',marker='o',ms=3)
## 对分类问题,softmax激活+对数似然损失函数效果更好
layers = [LinearLayer(4,8),ReluLayer(),LinearLayer(8,3),
SoftLayer()]
dnn = DNN(layers)
dnn.train(X_train,y_train,X_test,y_test,
loss=der_llh,batch=10,epoch=20,alpha=.1)
info = pd.DataFrame(dnn.info)
info.plot(x='t',y='right',marker='o',ms=3)
# x=np.array([[1,2]],dtype=float)
# y=np.array([[1]],dtype=float)
# layers[0].W=np.array([[2,1,3],[1,3,2]],dtype=float)
# layers[0].W=np.array([[2,1,3],[1,3,2]],dtype=float)
# layers[0].B=np.array([[1,2,3]],dtype=float)
# layers[2].W=np.array([[1,3],[1,3],[2,2]],dtype=float)
# layers[2].B=np.array([[2,1]],dtype=float)
# layers[4].W=np.array([[1],[3]],dtype=float)
# layers[4].B=np.array([[2]],dtype=float)
# dnn = DNN(layers)
#
# ## 前向
# o0 = x.copy()
# a0 = layers[0].forward(o0) ## linear
# o1 = layers[1].forward(a0) ## sigmod
# a1 = layers[2].forward(o1) ## linear
# o2 = layers[3].forward(a1) ## sigmod
# a2 = layers[4].forward(o2) ## linear
# o3 = layers[5].forward(a2) ## sigmod
# y_hat = o3.copy()
# ## 后向
# y_hat_der = y_hat-y
# o3_der = y_hat_der*o3*(1-o3)
# a2_der_W = np.matmul(o2.T,o3_der)
# a2_der_B = np.matmul(o3_der.T, np.ones(len(o2)) ) ## o3_der.sum(),得到单元素值
# a2_der = np.matmul(o3_der,layers[4].W.T)
# o2_der = a2_der*o2*(1-o2)
# a1_der_W = np.matmul(o1.T, o2_der)
# a1_der_B = np.matmul(o2_der.T, np.ones(len(o1)) )
# a1_der = np.matmul(o2_der,layers[2].W.T)
# o1_der = a1_der*o1*(1-o1)
# a0_der_W = np.matmul(o0.T, o1_der)
# a0_der_B = np.matmul(o1_der.T, np.ones(len(o0)) )
# a0_der = np.matmul(o1_der,layers[0].W.T)
```
#### File: 123385918/Python-Science-Handbook/DNN.py
```python
import numpy as np
sigmod = lambda x: 1/(1+np.exp(-x))
class DNN:
'''隐藏层均为sigmod激活函数
sizes[5,4,2,1]:输入层5个元,隐藏层有2层,隐1有4个元,隐2有2个元,输出层1个元'''
def initial(self,sizes):
self.B = [np.random.rand(b) for b in sizes[1:]]
self.W = [np.random.rand(w2,w1) for w1,w2 in zip(sizes[:-1],sizes[1:])] ## W:nrows:输出,ncols:输入
def __init__(self,sizes):
self.initial(sizes)
self.sizes = sizes
def predict(self,X):
for w,b in zip(self.W, self.B):
X = np.apply_along_axis(lambda x: sigmod(w.dot(x)+b), 1, X)
return X.argmax(1)
def train(self,X,Y,testX,testY,batch=10,epoch=50,alpha=.1):
'''batch-GD'''
self.info = []
self.initial(self.sizes)
for t in range(epoch):
batches = np.split(np.random.permutation(len(X)),
np.arange(len(X),step=batch)[1:])
for ids in batches:
x, y = X[ids].copy(), Y[ids].copy()
## 前向激活求中间值
F = [x]
for w,b in zip(self.W, self.B):
x = np.apply_along_axis(lambda row: sigmod(w.dot(row)+b),1,x)
F.append(x)
## 后向求误差值
δ = [(x-y)*(x*(1-x))]
for w,f in zip(self.W[1:][::-1],F[1:-1][::-1]):
delta = np.apply_along_axis(lambda row: w.T.dot(row),1,δ[-1])
delta *= f*(1-f)
δ.append(delta)
## 前向更新参数
δ.reverse()
for w,b,d,f in zip(self.W, self.B, δ, F[:-1]):
grad_w = np.sum([i[:,None]*j for i,j in zip(d,f)],axis=0)
w -= alpha/batch * grad_w
b -= alpha/batch * d.sum(0)
## 记录训练信息
Y_hat = self.predict(testX)
self.info.append({'t':t,'right':(Y_hat==testY.argmax(1)).mean()})
return 'train done!'
if __name__=='__main__':
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# load data
iris = load_iris()
iris.target = pd.get_dummies(iris.target).values
X_train, X_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=.3,random_state=42)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# train model
dnn = DNN(sizes=[4,5,4,3])
dnn.train(X_train,y_train,X_test,y_test,batch=10,epoch=50,alpha=3)
info = pd.DataFrame(dnn.info)
info.plot(x='t',y='right',marker='o',ms=3)
# load data
mnist = fetch_openml('mnist_784', version=1, data_home='E:/Learn/algorithm_ljp')
mnist.target = pd.get_dummies(mnist.target).values
X_train, X_test, y_train, y_test = train_test_split(
mnist.data, mnist.target, test_size=.3,random_state=42)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# train model
dnn = DNN(sizes=[784,30,10])
dnn.train(X_train,y_train,X_test,y_test,batch=10,epoch=30,alpha=10)
info = pd.DataFrame(dnn.info)
info.plot(x='t',y='right',marker='o',ms=3)
```
#### File: 123385918/Python-Science-Handbook/HMM.py
```python
import numpy as np
"""
HMM
问题1:概率计算方法:给定λ=(A,B,π)和观测序列O,求P(O|λ)
直接计算法:
按照概率公式,列举所有可能的长度为T的状态序列,求各状态序列与观测序列的联合概率,
对所有可能的状态序列的联合概率求和。这是一棵深度为T,各节点的子节点为所有隐藏状态的完整树。
可写成DFS的遍历或递归。
"""
class HMM:
def __init__(self,A=None,B=None,π=None):
self.A = A
self.B = B
self.π = π
self.N = len(π) ## 隐藏态个数
def forward(self,O,record = False):
α = self.π*self.B[:,O[0]]
α_T = [α.tolist()]
for t in range(1,len(O)):
α = α.dot(self.A)*self.B[:,O[t]]
if record: α_T.append(α.tolist())
return np.array(α_T) if record else α.sum()
def backward(self,O,record = False):
β = np.ones_like(self.π,dtype=float)
β_T = [β.tolist()]
for t in range(len(O)-2,-1,-1):
β = np.dot(self.A*self.B[:,O[t+1]],β)
if record: β_T.append(β.tolist())
return np.array(β_T[::-1]) if record else np.dot(self.π*self.B[:,O[0]],β)
def em_fit(self,O,N,maxiter=50): ## O:观测序列 N:隐状态个数
V = np.unique(O)
self.A = np.ones([N,N])/N
self.B = np.ones([N,len(V)])/len(V)
self.π = np.random.sample(N)
self.π /= self.π.sum()
self.p = [0]
T_V = (O[:,None]==V).astype(int) ## T行V列的one-hot矩阵
while len(self.p)<=maxiter:
## e_step:求当前参数下使Q函数导数为0时,有用部分的值
T_α = self.forward(O, record = True)
T_β = self.backward(O, record = True)
## m_step:根据e_step得到的值,按照解析解更新Q函数参数
T_αβ = T_α*T_β
self.A *= T_α[:-1].T.dot(T_β[1:]*self.B[:,O[1:]].T)/T_αβ[:-1].sum(0)[:,None]
self.B = T_αβ.T.dot(T_V) / T_αβ.sum(0)[:,None]
self.π = T_αβ[0] / T_αβ[0].sum(0)
## 记录当前λ下的O的概率
self.p.append(T_αβ[0].sum())
return 'train done!'
def dp_pred(self,O):
'''dp数组定义:dp[t,i]定义为,t时的状态为i的1~t个状态的最大概率。
递推条件:dp[t,i] = max(dp[t-1,:]*A[:,i])*B[i,O[t]]
'''
dp = np.zeros((len(O),self.N))
dp[0] = self.π*self.B[:,O[0]]
for i in range(1,len(O)):
tmp = dp[i-1,:,None]*self.A
dp[i-1] = np.argmax(tmp,axis=0) ## 记下Ψ
dp[i] = np.max(tmp,axis=0)*self.B[:,O[i]]
path = [dp[i].argmax()]
for i in range(len(O)-2,-1,-1): ## 回溯
path.append(int(dp[i,path[-1]]))
return path[::-1], dp[-1].max()
def eval_prob_direct(self,O):
rst = 0
for n in range(self.N):
stack = [(self.π[n]*self.B[n,O[0]], 0, n)] ## [累计的概率,第1个观测i,当前状态s]
while stack:
p, i, s = stack.pop()
if i ==len(O)-1:
rst += p
continue
for nn in range(self.N):
stack.append((p*self.A[s,nn]*self.B[nn,O[i+1]], i+1, nn))
return rst
def eval_prob_direct2(self,O):
self.rst = 0
def epd(p,i,s):
if i==len(O)-1:
self.rst += p
return
[epd(p*self.A[s,n]*self.B[n,O[i+1]],i+1,n) for n in range(self.N)]
[epd(self.π[n]*self.B[n,O[0]], 0, n) for n in range(self.N)]
return self.rst
if __name__=='__main__':
import pandas as pd
Q = [1, 2, 3]
V = [1, 0] ## 0:红 1:白
A = np.array([[0.5, 0.2, 0.3],
[0.3, 0.5, 0.2],
[0.2, 0.3, 0.5]])
B = np.array([[0.5, 0.5],
[0.4, 0.6],
[0.7, 0.3]])
π = np.array([0.2,
0.4,
0.4])
# O = ['红', '白', '红']
O = np.array([0, 1, 0])
hmm = HMM(A,B,π)
## 问题一--观测概率
hmm.backward(O) ## 0.130218
hmm.forward(O) ## 0.130218
hmm.eval_prob_direct(O) ## 0.130218
hmm.eval_prob_direct2(O) ## 0.13021800000000003
## 问题二--参数估计
hmm.em_fit(O,N=2,maxiter=20)
pd.DataFrame(hmm.p,columns=['P(O|λ)']).plot(y='P(O|λ)')
## 问题三--隐状态预测
hmm = HMM(A,B,π)
hmm.dp_pred(O) ## ([2, 2, 2], 0.01467) 与李航结果相同
```
#### File: 123385918/Python-Science-Handbook/PCA.py
```python
import numpy as np
def pca(X, k):
'''X: m*n,m个样本,每个样本有n个特征
k: 降到k维
'''
m, n = X.shape
if k>n: return X
X = X - X.mean(axis=0) ## 中心化
cov = np.cov(X,rowvar=False,ddof=m-1) ## 协方差矩阵系数为1
val, vec = np.linalg.eig(cov) ## 特征值+特征向量
vec = vec[:,np.argpartition(-val,k-(k==n))[:k]] ## 取前k个最大特征值对应的特征向量
return X.dot(vec) ## 返回降维后的X
if __name__=='__main__':
import matplotlib.pyplot as plt
X = np.array([(2.5,2.4), (0.5,0.7), (2.2,2.9), (1.9,2.2), (3.1,3.0),
(2.3, 2.7), (2, 1.6), (1, 1.1), (1.5, 1.6), (1.1, 0.9)])
X1 = pca(X,1)
X2 = pca(X,2)
plt.scatter(X[:,0],X[:,1], marker='o',c='red')
plt.scatter(X1,np.zeros(len(X1)), marker='o',c='blue')
plt.scatter(X2[:,0],X2[:,1], marker='*',c='blue')
``` |
{
"source": "12341123/segmentation_models.pytorch_new",
"score": 3
} |
#### File: segmentation_models_pytorch/utils/functional.py
```python
import torch
import numpy
from scipy.ndimage.morphology import generate_binary_structure
def _take_channels(*xs, ignore_channels=None):
if ignore_channels is None:
return xs
else:
channels = [channel for channel in range(xs[0].shape[1]) if channel not in ignore_channels]
xs = [torch.index_select(x, dim=1, index=torch.tensor(channels).to(x.device)) for x in xs]
return xs
def _threshold(x, threshold=None):
if threshold is not None:
return (x > threshold).type(x.dtype)
else:
return x
def iou(pr, gt, eps=1e-7, threshold=None, ignore_channels=None):
"""Calculate Intersection over Union between ground truth and prediction
Args:
pr (torch.Tensor): predicted tensor
gt (torch.Tensor): ground truth tensor
eps (float): epsilon to avoid zero division
threshold: threshold for outputs binarization
Returns:
float: IoU (Jaccard) score
"""
pr = _threshold(pr, threshold=threshold)
pr, gt = _take_channels(pr, gt, ignore_channels=ignore_channels)
intersection = torch.sum(gt * pr)
union = torch.sum(gt) + torch.sum(pr) - intersection + eps
return (intersection + eps) / union
jaccard = iou
def f_score(pr, gt, beta=1, eps=1e-7, threshold=None, ignore_channels=None):
"""Calculate F-score between ground truth and prediction
Args:
pr (torch.Tensor): predicted tensor
gt (torch.Tensor): ground truth tensor
beta (float): positive constant
eps (float): epsilon to avoid zero division
threshold: threshold for outputs binarization
Returns:
float: F score
"""
pr = _threshold(pr, threshold=threshold)
pr, gt = _take_channels(pr, gt, ignore_channels=ignore_channels)
tp = torch.sum(gt * pr)
fp = torch.sum(pr) - tp
fn = torch.sum(gt) - tp
score = ((1 + beta ** 2) * tp + eps) \
/ ((1 + beta ** 2) * tp + beta ** 2 * fn + fp + eps)
return score
def accuracy(pr, gt, threshold=0.5, ignore_channels=None):
"""Calculate accuracy score between ground truth and prediction
Args:
pr (torch.Tensor): predicted tensor
gt (torch.Tensor): ground truth tensor
eps (float): epsilon to avoid zero division
threshold: threshold for outputs binarization
Returns:
float: precision score
"""
pr = _threshold(pr, threshold=threshold)
pr, gt = _take_channels(pr, gt, ignore_channels=ignore_channels)
tp = torch.sum(gt == pr, dtype=pr.dtype)
score = tp / gt.view(-1).shape[0]
return score
def precision(pr, gt, eps=1e-7, threshold=None, ignore_channels=None):
"""Calculate precision score between ground truth and prediction
Args:
pr (torch.Tensor): predicted tensor
gt (torch.Tensor): ground truth tensor
eps (float): epsilon to avoid zero division
threshold: threshold for outputs binarization
Returns:
float: precision score
"""
pr = _threshold(pr, threshold=threshold)
pr, gt = _take_channels(pr, gt, ignore_channels=ignore_channels)
tp = torch.sum(gt * pr)
fp = torch.sum(pr) - tp
score = (tp + eps) / (tp + fp + eps)
return score
def recall(pr, gt, eps=1e-7, threshold=None, ignore_channels=None):
"""Calculate Recall between ground truth and prediction
Args:
pr (torch.Tensor): A list of predicted elements
gt (torch.Tensor): A list of elements that are to be predicted
eps (float): epsilon to avoid zero division
threshold: threshold for outputs binarization
Returns:
float: recall score
"""
pr = _threshold(pr, threshold=threshold)
pr, gt = _take_channels(pr, gt, ignore_channels=ignore_channels)
tp = torch.sum(gt * pr)
fn = torch.sum(gt) - tp
score = (tp + eps) / (tp + fn + eps)
return score
#result:pr reference:gt
def obj_fpr(result, reference, connectivity=1):
_, _, _, n_obj_reference, mapping = __distinct_binary_object_correspondences(reference, result, connectivity)
return (n_obj_reference - len(mapping)) / float(n_obj_reference)
def __distinct_binary_object_correspondences(reference, result, connectivity=1):
"""
Determines all distinct (where connectivity is defined by the connectivity parameter
passed to scipy's `generate_binary_structure`) binary objects in both of the input
parameters and returns a 1to1 mapping from the labelled objects in reference to the
corresponding (whereas a one-voxel overlap suffices for correspondence) objects in
result.
All stems from the problem, that the relationship is non-surjective many-to-many.
@return (labelmap1, labelmap2, n_lables1, n_labels2, labelmapping2to1)
"""
result = numpy.atleast_1d(result.astype(numpy.bool))
reference = numpy.atleast_1d(reference.astype(numpy.bool))
# binary structure
footprint = generate_binary_structure(result.ndim, connectivity)
# label distinct binary objects
labelmap1, n_obj_result = label(result, footprint)
labelmap2, n_obj_reference = label(reference, footprint)
# find all overlaps from labelmap2 to labelmap1; collect one-to-one relationships and store all one-two-many for later processing
slicers = find_objects(labelmap2) # get windows of labelled objects
mapping = dict() # mappings from labels in labelmap2 to corresponding object labels in labelmap1
used_labels = set() # set to collect all already used labels from labelmap2
one_to_many = list() # list to collect all one-to-many mappings
for l1id, slicer in enumerate(slicers): # iterate over object in labelmap2 and their windows
l1id += 1 # labelled objects have ids sarting from 1
bobj = (l1id) == labelmap2[slicer] # find binary object corresponding to the label1 id in the segmentation
l2ids = numpy.unique(labelmap1[slicer][
bobj]) # extract all unique object identifiers at the corresponding positions in the reference (i.e. the mapping)
l2ids = l2ids[0 != l2ids] # remove background identifiers (=0)
if 1 == len(
l2ids): # one-to-one mapping: if target label not already used, add to final list of object-to-object mappings and mark target label as used
l2id = l2ids[0]
if not l2id in used_labels:
mapping[l1id] = l2id
used_labels.add(l2id)
elif 1 < len(l2ids): # one-to-many mapping: store relationship for later processing
one_to_many.append((l1id, set(l2ids)))
# process one-to-many mappings, always choosing the one with the least labelmap2 correspondences first
while True:
one_to_many = [(l1id, l2ids - used_labels) for l1id, l2ids in
one_to_many] # remove already used ids from all sets
one_to_many = [x for x in one_to_many if x[1]] # remove empty sets
one_to_many = sorted(one_to_many, key=lambda x: len(x[1])) # sort by set length
if 0 == len(one_to_many):
break
l2id = one_to_many[0][1].pop() # select an arbitrary target label id from the shortest set
mapping[one_to_many[0][0]] = l2id # add to one-to-one mappings
used_labels.add(l2id) # mark target label as used
one_to_many = one_to_many[1:] # delete the processed set from all sets
return labelmap1, labelmap2, n_obj_result, n_obj_reference,
``` |
{
"source": "1234224576/ChallengesDiscoverySystem",
"score": 3
} |
#### File: ChallengesDiscoverySystem/classes/TwitterApiClient.py
```python
from requests_oauthlib import OAuth1Session
import json
class TwitterApiClient(object):
"""Connect by TwitterAPI"""
def __init__(self, consumerKey,consumerSecret,accessToken,accessTokenSecret):
self.consumerKey = consumerKey
self.consumerSecret = consumerSecret
self.accessToken = accessToken
self.accessTokenSecret = accessTokenSecret
def create_oath_session(self):
oath = OAuth1Session(self.consumerKey,self.consumerSecret,self.accessToken,self.accessTokenSecret)
return oath
def search(self,keyword):
url = "https://api.twitter.com/1.1/search/tweets.json"
params = {
"q": unicode(keyword),
"lang": "ja",
"result_type": "recent",
"count": "100"
}
oath = self.create_oath_session()
responce = oath.get(url, params = params)
print(responce.status_code)
if responce.status_code != 200:
print "Error code: %d" %(responce.status_code)
return None
tweets = json.loads(responce.text)
for tweet in tweets["statuses"]:
text = tweet[u'text']
print "text:", text
return tweets
``` |
{
"source": "1234224576/Project_Exercis_Car",
"score": 3
} |
#### File: 1234224576/Project_Exercis_Car/3dplotSample.py
```python
import scipy as sp
import matplotlib.pyplot as plt
from math import *
def error(f,x,y):
return sp.sum((f(x)-y)**2)
###backMode####
dataFile = open('back.txt','r')
lines = dataFile.readlines()
count = 0
x_params = []
y_params = []
for line in lines:
x,y = line.split(" ")
x_params.append(-1* (float(x)- 200.0)/ pow(3600,0.5))
y_params.append(-1 * float(y))
###############
###frontMode####
# dataFile = open('front.txt','r')
# lines = dataFile.readlines()
# count = 0
# x_params = []
# y_params = []
# for line in lines:
# x,y = line.split(" ")
# x_params.append((float(x)- 200.0) / pow(3600,0.5))
# y_params.append(float(y))
###############
# #########テスト#########
# dataFile = open('test.txt','r')
# lines = dataFile.readlines()
# count = 0
# x_params = []
# y_params = []
# for line in lines:
# x,y = line.split("\t")
# x_params.append(float(x))
# y_params.append(float(y))
#########
# y_params = range(0,count)
# dataFile = open('back_curve.txt','r')
# lines = dataFile.readlines()
# count = 0
# x_params2 = []
# y_params2 = []
# for line in lines:
# x,y = line.split(" ")
# x_params2.append(float(x))
# a = float(y)
# y_params2.append(a)
# fp1 = sp.polyfit(x_params,y_params,100)
# f1 = sp.poly1d(fp1)
# print error(f1,x_params,y_params)
#Plot graph
# fx = sp.linspace(0,5,100)
# plt.plot(fx,f1(fx),linewidth=4)
plt.plot(x_params,y_params,linewidth=2)
# plt.plot(y_params2,x_params2,linewidth=2)
plt.autoscale(tight=True)
plt.grid()
plt.show()
# 参考http://d.hatena.ne.jp/white_wheels/20100327/p3
```
#### File: 1234224576/Project_Exercis_Car/svr.py
```python
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.svm import SVR
import numpy as np
def error(f,x,y):
return sp.sum((f(x)-y)**2)
dataFile = open('sample2.txt','r')
lines = dataFile.readlines()
x_params = []
y_params = []
for line in lines:
x,y = line.split(" ")
x_params.append(float(x))
y_params.append(float(y))
fp1 = sp.polyfit(y_params,x_params,10)
f1 = sp.poly1d(fp1)
print fp1
#Plot graph
plt.plot(y_params,x_params,"o")
fx = sp.linspace(200,500)
plt.plot(fx,f1(fx),linewidth=4)
plt.autoscale(tight=True)
plt.grid()
plt.show()
``` |
{
"source": "1234567890num/KH2Randomizer",
"score": 2
} |
#### File: 1234567890num/KH2Randomizer/app.py
```python
from flask import Flask, session, Response
from Module.randomCmdMenu import RandomCmdMenu
from Module.randomBGM import RandomBGM
from Module.startingInventory import StartingInventory
from Module.modifier import SeedModifier
from Module.seedEvaluation import SeedValidator
from List.configDict import miscConfig, locationType, expTypes, keybladeAbilities, locationDepth
from List.hashTextEntries import generateHashIcons
import List.LocationList
import flask as fl
from urllib.parse import urlparse
import os, base64, string, datetime, random, ast, zipfile, redis, json, asyncio, copy
from khbr.randomizer import Randomizer as khbr
from Module.hints import Hints
from Module.randomize import KH2Randomizer
from Module.dailySeed import generateDailySeed, getDailyModifiers
from flask_socketio import SocketIO
app = Flask(__name__, static_url_path='/static')
socketio = SocketIO(app, manage_session=False, always_connect=True, async_mode="threading", ping_interval=20)
url = urlparse(os.environ.get("REDIS_TLS_URL"))
development_mode = os.environ.get("DEVELOPMENT_MODE")
if not development_mode:
r = redis.Redis(host=url.hostname, port=url.port, ssl=True, ssl_cert_reqs=None,password=url.password)
seed = None
app.config['SECRET_KEY'] = os.environ.get("SECRET_KEY")
@app.context_processor
def inject_today_date():
return {'today_date': datetime.date.today()}
@app.route('/', methods=['GET','POST'])
def index(message=""):
session.clear()
return fl.render_template('index.jinja', locations = List.LocationList.getOptions(), expTypes = expTypes, miscConfig = miscConfig, keybladeAbilities = keybladeAbilities, message=message, bossEnemyConfig = khbr()._get_game(game="kh2").get_options(), hintSystems = Hints.getOptions(), startingInventory = StartingInventory.getOptions(), seedModifiers = SeedModifier.getOptions(), dailyModifiers = getDailyModifiers(datetime.date.today()))
@app.route('/daily', methods=["GET", ])
def dailySeed():
session.clear()
dailySession = generateDailySeed()
for k in dailySession.keys():
if k == "locations":
session["includeList"] = [locationType(l) for l in dailySession["locations"]]
elif k == "enemyOptions":
session["enemyOptions"] = json.dumps(dailySession[k])
else:
session[k] = dailySession[k]
session["permaLink"] = ""
session["reportDepth"] = locationDepth.SecondVisit
session["preventSelfHinting"] = False
return fl.redirect(fl.url_for('seed'))
@app.route('/seed/<hash>')
def hashedSeed(hash):
session.clear()
sessionVars = r.hgetall(hash)
for var in sessionVars:
session[str(var, 'utf-8')] = json.loads(sessionVars[var])
includeList = session['includeList'][:]
session['includeList'].clear()
for location in includeList:
session['includeList'].append(locationType(location))
print(session)
return seed()
@app.route('/seed',methods=['GET','POST'])
def seed():
if fl.request.method == "POST":
random.seed(str(datetime.datetime.now()))
session['keybladeAbilities'] = fl.request.form.getlist('keybladeAbilities')
if session['keybladeAbilities'] == []:
return fl.redirect(fl.url_for("index", message="Please select at least one keyblade ability type."), code=307)
if int(fl.request.form.get('keybladeMaxStat')) < int(fl.request.form.get('keybladeMinStat')):
return fl.redirect(fl.url_for("index", message="Keyblade minimum stat larger than maximum stat."), code=307)
session['seed'] = fl.escape(fl.request.form.get("seed")) or ""
if session['seed'] == "":
characters = string.ascii_letters + string.digits
session['seed'] = (''.join(random.choice(characters) for i in range(30)))
includeList = fl.request.form.getlist('include') or []
session['includeList'] = [locationType[location.replace("locationType.","")] for location in includeList]
session['seedHashIcons'] = generateHashIcons()
session['formExpMult'] = {
0: float(fl.request.form.get("SummonExp")),
1: float(fl.request.form.get("ValorExp")),
2: float(fl.request.form.get("WisdomExp")),
3: float(fl.request.form.get("LimitExp")),
4: float(fl.request.form.get("MasterExp")),
5: float(fl.request.form.get("FinalExp"))
}
session['soraExpMult'] = float(fl.request.form.get("SoraExp"))
session['levelChoice'] = fl.request.form.get("levelChoice")
session['spoilerLog'] = fl.request.form.get("spoilerLog") or False
session['keybladeMaxStat'] = int(fl.request.form.get("keybladeMaxStat"))
session['keybladeMinStat'] = int(fl.request.form.get("keybladeMinStat"))
session['seedModifiers'] = fl.request.form.getlist("seedModifiers")
session['promiseCharm'] = bool(fl.request.form.get("PromiseCharm") or False)
session['bossEnemy'] = bool(fl.request.form.get("bossEnemy") or False)
enemyOptions = {
"boss": fl.request.form.get("boss"),
"nightmare_bosses": bool(fl.request.form.get("nightmare_bosses")),
"selected_boss": None if fl.request.form.get("selected_boss") == "None" else fl.request.form.get("selected_boss"),
"enemy": fl.request.form.get("enemy"),
"selected_enemy": None if fl.request.form.get("selected_enemy") == "None" else fl.request.form.get("selected_enemy"),
"nightmare_enemies": bool(fl.request.form.get("nightmare_enemies")),
"scale_boss_stats": bool(fl.request.form.get("scale_boss_stats")),
"cups_bosses": bool(fl.request.form.get("cups_bosses")),
"data_bosses": bool(fl.request.form.get("data_bosses")),
"remove_damage_cap": "Remove Damage Cap" in session['seedModifiers']
}
session['enemyOptions'] = json.dumps(enemyOptions)
hintSubstrings = fl.request.form.get("hintsType").split('-')
session['hintsType'] = hintSubstrings[0]
if len(hintSubstrings)==1:
session['reportDepth'] = locationDepth("DataFight") # don't use report depth
else:
session['reportDepth'] = locationDepth(hintSubstrings[1])
session['preventSelfHinting'] = bool(fl.request.form.get("preventSelfHinting") or False)
session['allowProofHinting'] = bool(fl.request.form.get("allowProofHinting") or False)
session['startingInventory'] = fl.request.form.getlist("startingInventory")
session['itemPlacementDifficulty'] = fl.request.form.get("itemPlacementDifficulty")
session['permaLink'] = ''.join(random.choice(string.ascii_uppercase) for i in range(8))
if not development_mode:
with r.pipeline() as pipe:
for key in session.keys():
pipe.hmset(session['permaLink'], {key.encode('utf-8'): json.dumps(session.get(key)).encode('utf-8')})
pipe.execute()
return fl.render_template('seed.jinja',
spoilerLog = session.get('spoilerLog'),
permaLink = fl.url_for("hashedSeed",hash=session['permaLink'], _external=True),
cmdMenus = RandomCmdMenu.getOptions(),
bgmOptions = RandomBGM.getOptions(),
bgmGames = RandomBGM.getGames(),
levelChoice = session.get('levelChoice'),
include = [locationType(l) for l in session.get('includeList')],
seed = session.get('seed'),
seedHashIcons = session.get('seedHashIcons'),
worlds=locationType,
expTypes = expTypes,
formExpMult = session.get('formExpMult'),
soraExpMult = session.get('soraExpMult'),
keybladeMinStat = session.get('keybladeMinStat'),
keybladeMaxStat = session.get('keybladeMaxStat'),
keybladeAbilities = session.get('keybladeAbilities'),
enemyOptions = json.loads(session.get("enemyOptions")),
hintsType = session.get("hintsType"),
reportDepth = session.get("reportDepth"),
preventSelfHinting = session.get("preventSelfHinting"),
allowProofHinting = session.get("allowProofHinting"),
startingInventory = session.get("startingInventory"),
itemPlacementDifficulty = session.get("itemPlacementDifficulty"),
seedModifiers = session.get("seedModifiers"),
idConverter = StartingInventory.getIdConverter()
)
@socketio.on('connect')
def handleConnection():
socketio.send("connected")
@socketio.on('download')
def startDownload(data):
print("Started")
seed = socketio.start_background_task(randomizePage, data, dict(session))
def randomizePage(data, sessionDict):
print(data['platform'])
platform = data['platform']
excludeList = list(set(locationType) - set(sessionDict['includeList']))
excludeList.append(sessionDict["levelChoice"])
if sessionDict["itemPlacementDifficulty"] == "Nightmare" and locationType.Puzzle in excludeList:
print("Removing puzzle exclusion due to nightmare...")
excludeList.remove(locationType.Puzzle)
cmdMenuChoice = data["cmdMenuChoice"]
randomBGM = data["randomBGM"]
sessionDict["startingInventory"] += SeedModifier.library("Library of Assemblage" in sessionDict["seedModifiers"]) + SeedModifier.schmovement("Schmovement" in sessionDict["seedModifiers"])
seedValidation = SeedValidator(sessionDict)
notValidSeed = True
originalSeedName = sessionDict['seed']
while notValidSeed:
randomizer = KH2Randomizer(seedName = sessionDict["seed"], seedHashIcons = sessionDict["seedHashIcons"], spoiler=bool(sessionDict["spoilerLog"]))
randomizer.populateLocations(excludeList, maxItemLogic = "Max Logic Item Placement" in sessionDict["seedModifiers"],item_difficulty=sessionDict["itemPlacementDifficulty"], reportDepth=sessionDict["reportDepth"])
randomizer.populateItems(promiseCharm = sessionDict["promiseCharm"], startingInventory = sessionDict["startingInventory"], abilityListModifier=SeedModifier.randomAbilityPool if "Randomize Ability Pool" in sessionDict["seedModifiers"] else None)
if randomizer.validateCount():
randomizer.setKeybladeAbilities(
keybladeAbilities = sessionDict["keybladeAbilities"],
keybladeMinStat = int(sessionDict["keybladeMinStat"]),
keybladeMaxStat = int(sessionDict["keybladeMaxStat"])
)
randomizer.setNoAP("Start with No AP" in sessionDict["seedModifiers"])
randomizer.setRewards(levelChoice = sessionDict["levelChoice"], betterJunk=("Better Junk" in sessionDict["seedModifiers"]), reportDepth=sessionDict["reportDepth"])
randomizer.setLevels(sessionDict["soraExpMult"], formExpMult = sessionDict["formExpMult"], statsList = SeedModifier.glassCannon("Glass Cannon" in sessionDict["seedModifiers"]))
randomizer.setBonusStats()
if not seedValidation.validateSeed(sessionDict, randomizer):
print("ERROR: Seed is not completable! Trying another seed...")
characters = string.ascii_letters + string.digits
sessionDict['seed'] = (''.join(random.choice(characters) for i in range(30)))
continue
randomizer.seedName = originalSeedName
hintsText = Hints.generateHints(randomizer._locationItems, sessionDict["hintsType"], randomizer.seedName, excludeList, sessionDict["preventSelfHinting"], sessionDict["allowProofHinting"])
if hintsText is not None and type(hintsText) is not dict:
# there was an error generating hints, return value provides context
print(f"ERROR: {hintsText}")
characters = string.ascii_letters + string.digits
sessionDict['seed'] = (''.join(random.choice(characters) for i in range(30)))
continue
notValidSeed = False
try:
zip = randomizer.generateZip(randomBGM = randomBGM, platform = platform, startingInventory = sessionDict["startingInventory"], hintsText = hintsText, cmdMenuChoice = cmdMenuChoice, spoilerLog = bool(sessionDict["spoilerLog"]), enemyOptions = json.loads(sessionDict["enemyOptions"]))
if development_mode:
development_mode_path = os.environ.get("DEVELOPMENT_MODE_PATH")
if development_mode_path:
if os.path.exists(development_mode_path):
# Ensure a clean environment
import shutil
shutil.rmtree(development_mode_path)
# Unzip mod into path
import zipfile
zipfile.ZipFile(zip).extractall(development_mode_path)
print("unzipped into {}".format(development_mode_path))
return
socketio.emit('file',zip.read())
except ValueError as err:
print("ERROR: ", err.args)
@app.after_request
def add_header(r):
"""
Add headers to both force latest IE rendering engine or Chrome Frame,
and also to cache the rendered page for 10 minutes.
"""
r.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
r.headers["Pragma"] = "no-cache"
r.headers["Expires"] = "0"
r.headers['Cache-Control'] = 'public, max-age=0'
return r
if __name__ == '__main__':
socketio.run(app)
randomizer = KH2Randomizer("fdh6h34q6h4q6g62g6h6w46hw464vbvherby39")
randomizer.populateLocations([locationType.LoD, "ExcludeFrom50"])
randomizer.populateItems(startingInventory=["138","537","369"])
if randomizer.validateCount():
randomizer.setKeybladeAbilities()
randomizer.setRewards()
randomizer.setLevels(soraExpMult = 1.5, formExpMult = {'1':6, '2':3, '3':3, '4':3, '5':3})
randomizer.setBonusStats()
zip = randomizer.generateZip(hintsType="JSmartee").getbuffer()
open("randoSeed.zip", "wb").write(zip)
```
#### File: KH2Randomizer/Class/itemClass.py
```python
from List.configDict import itemType
from dataclasses import dataclass
from json import JSONEncoder
@dataclass (frozen=True)
class KH2Item:
Id: int
Name: str
ItemType: itemType
class ItemEncoder(JSONEncoder):
def default(self, o):
return o.__dict__
```
#### File: KH2Randomizer/Module/dailySeed.py
```python
import random, datetime
from collections import namedtuple
from List.hashTextEntries import generateHashIcons
DailyModifier = namedtuple('DailyModifier', ['modifier', 'name', 'description', 'categories'])
# Default Settings are League + Enemy One-to-One + Boss One-to-One
def get_default_settings():
return {
"keybladeAbilities": ["Support"],
"formExpMult": {0: 1.0, 1: 5.0, 2: 3.0, 3: 3.0, 4: 2.0, 5: 3.0},
"soraExpMult": 3,
"levelChoice": "ExcludeFrom50",
"spoilerLog": False,
"keybladeMaxStat": 7,
"keybladeMinStat": 0,
"promiseCharm": False,
"bossEnemy": False,
"enemyOptions": {"boss": "One to One", "enemy": "One to One", "scale_boss_stats": True, "cups_bosses": True},
"hintsType": "JSmartee",
"startingInventory": [],
"seedModifiers": ["Max Logic Item Placement"],
"locations": ["Land of Dragons", "Beast's Castle", "Hollow Bastion", "Cavern of Remembrance", "Twilight Town", "The World That Never Was", "Space Paranoids", "Port Royal", "Olympus Coliseum", "Agrabah", "Halloween Town", "Pride Lands", "Disney Castle / Timeless River", "Hundred Acre Wood", "Simulated Twilight Town", "Absent Silhouettes", "Sephiroth", "Form Levels", "Garden of Assemblage", "Critical Bonuses"],
"itemPlacementDifficulty": "Normal"
}
def powerfulKeyblades(s):
s["keybladeMaxStat"] = 20
def levelItUp(s):
s["levelChoice"] = "ExcludeFrom99"
s["soraExpMult"] = 10.0
def noLevels(s):
s["levelChoice"] = "Level"
s["startingInventory"].append("404")
def goMode(s):
s["startingInventory"].append("593")
s["startingInventory"].append("594")
s["startingInventory"].append("595")
def enableSuperbosses(s):
s["soraExpMult"] = max(s["soraExpMult"], 5)
s["enemyOptions"]["data_bosses"] = True
s["locations"] += ["Sephiroth", "Lingering Will (Terra)", "Data Organization XIII"]
dailyModifiers = [
DailyModifier(name="Level it up",
description="Level 99 but Sora XP multiplier set to 10x",
categories={"xp"},
modifier=levelItUp
),
DailyModifier(name="No Levels",
description="No checks on Levels and you start with No Experience",
categories={"levels", "xp"},
modifier=noLevels
),
DailyModifier(name="Promise Charm",
description="Start the game with the Promise Charm",
categories={'progression'},
modifier=lambda s: exec('s["promiseCharm"] = True')
),
DailyModifier(name="Go Mode",
description="Start the game with all 3 proofs",
categories={'progression'},
modifier=goMode
),
DailyModifier(name="Action Keyblades",
description="Keyblades can have action or support abilities",
categories={'keyblades'},
modifier=lambda s: s["keybladeAbilities"].append("Action")
),
DailyModifier(name="Wild Bosses",
description="Bosses are randomized using the Wild setting",
categories={'bosses'},
modifier=lambda s: exec('s["enemyOptions"]["boss"] = "Wild"')
),
DailyModifier(name="Superbosses",
description="All superbosses will be included in the randomization pool, and their reward locations are added to the item pool, but your XP is at leat times 5",
categories={'bosses', 'worlds'},
modifier=enableSuperbosses
),
DailyModifier(name="X-Ray Vision",
description="Sora starts the game with Scan",
categories={},
modifier=lambda s: s["startingInventory"].append("138")
),
DailyModifier(name="Shananas Hints",
description="Use Shananas hints",
categories={'hints'},
modifier=lambda s: exec('s["hintsType"] = "Shananas"')
),
DailyModifier(name="Glass Cannon",
description="Replaces all defense ups found during level ups",
categories={'levels'},
modifier=lambda s: s["seedModifiers"].append("Glass Cannon")
),
DailyModifier(name="Library of Assemblage",
description="Start the game with every Ansem Report",
categories={'hints'},
modifier=lambda s: s["seedModifiers"].append("Library of Assemblage")
),
DailyModifier(name="Schmovement",
description="Start the game with level 1 of each movement type",
categories={},
modifier=lambda s: s["seedModifiers"].append("Schmovement")
),
DailyModifier(name="Better Junk",
description="Replaces all synthesis materials with better items",
categories={},
modifier=lambda s: s["seedModifiers"].append("Better Junk")
),
DailyModifier(name="Randomize Ability Pool",
description="Pick Sora's Action/Support abilities at random (Guaranteed 1 SC & 1 OM)",
categories={},
modifier=lambda s: s["seedModifiers"].append("Randomize Ability Pool")
),
DailyModifier(name="Have Some Finny Fun",
description="Atlantica is turned on.",
categories={'worlds'},
modifier=lambda s: s["locations"].append("Atlantica")
),
DailyModifier(name="Remove Damage Cap",
description="Remove Damage Cap for Sora dealing damage to enemies",
categories={},
modifier=lambda s: s["seedModifiers"].append("Remove Damage Cap")
),
DailyModifier(name="More Powerful keyblades",
description="Keyblades can have maximum stats of up to 20",
categories={'keyblades'},
modifier=powerfulKeyblades
),
DailyModifier(name="Early Checks",
description="Worlds are more likely to have better checks early, than late",
categories={'itemdifficulty'},
modifier=lambda s: exec('s["itemPlacementDifficulty"] = "Easy"')
),
DailyModifier(name="Late Checks",
description="Worlds are more likely to have better checks early, than late",
categories={'itemdifficulty'},
modifier=lambda s: exec('s["itemPlacementDifficulty"] = "Hard"')
),
DailyModifier(name="No Starting AP",
description="Sora/Donald/Goofy start the game with 0 AP",
categories= {"abilities"},
modifier=lambda s: s["seedModifiers"].append("Start with No AP")
)
]
def getDailyModifiers(date):
random.seed(date.strftime('%D'))
# Weekends have more modifiers
numMods = 3 if date.isoweekday() < 5 else 5
chosenMods = []
usedCategories = set()
for _ in range(numMods):
availableMods = []
for m in dailyModifiers:
# Don't have more than one modifier from the same category
if m.categories:
if len(m.categories.intersection(usedCategories)) > 0:
continue
# Don't have the same modifier twice
if m.name in [m.name for m in chosenMods]:
continue
availableMods.append(m)
chosen = random.choice(availableMods)
chosenMods.append(chosen)
for c in chosen.categories:
usedCategories.add(c)
return chosenMods
# I think I want to make it less side effecty, where this just returns an object
# And app can take responsibility for messing with the session
# and regenerating the location types
def generateDailySeed():
session = dict(get_default_settings())
session["dailyModifiers"] = []
currentDate = datetime.date.today()
modifiers = getDailyModifiers(currentDate)
for mod in modifiers:
mod.modifier(session)
session["dailyModifiers"].append(mod.name)
session["seed"] = "Daily Seed " + currentDate.strftime('%D')
session['seedHashIcons'] = generateHashIcons()
return session
if __name__ == '__main__':
seed = generateDailySeed()
for k,v in seed.items():
print("{}:{}".format(k, v))
``` |
{
"source": "123456789-dnipro/hackaton",
"score": 2
} |
#### File: hackaton/service_api/api.py
```python
from sanic import Blueprint
from sanic import Sanic
def load_api(app: Sanic):
from service_api.resources import SmokeResource
from service_api.resources.login_resource import LogInResource
from service_api.resources.incidents_resource import IncidentResource, IncidentsResource
api = Blueprint('v1', strict_slashes=False)
api.add_route(SmokeResource.as_view(), '/smoke')
api.add_route(LogInResource.as_view(), '/login')
api.add_route(IncidentResource.as_view(), '/incidents/<incident_id:uuid>')
api.add_route(IncidentsResource.as_view(), '/incidents')
app.blueprint(api)
```
#### File: service_api/domain/decorators.py
```python
import asyncio
from functools import wraps
from http import HTTPStatus
from sanic.response import text, json
from service_api.domain.redis import redis
def authorized(f):
@wraps(f)
async def check_authorization(request, *args, **kwargs):
token = request.headers.get('Authorization')
if token and token.startswith('Basic ') and await redis.check_session(token[6:]):
return await f(request, *args, **kwargs)
else:
return text('Not authorized', HTTPStatus.UNAUTHORIZED)
return check_authorization
def asyncio_task(f):
@wraps(f)
async def wraper(f, *args, **kwargs):
loop = asyncio.get_event_loop()
loop.run_until_complete(f(*args, **kwargs))
return wraper
``` |
{
"source": "123456thomas/typeidea",
"score": 2
} |
#### File: typeidea/config/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse
from django.views.generic import ListView
from blog.views import CommonViewMixin
from .models import Link
# Create your views here.
class LinkListView(CommonViewMixin,ListView):
queryset = Link.objects.filter(status=Link.STATUS_NORMAL)
template_name = 'config/links.html'
context_object_name = 'link_list'
def links(request):
return HttpResponse('links')
``` |
{
"source": "12345bt/DES1",
"score": 3
} |
#### File: 12345bt/DES1/DES.py
```python
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from pyDes import *
from binascii import b2a_hex, a2b_hex
# For Python3, you'll need to use bytes, i.e.:
# data = b"Please encrypt my data"
# k = des(b"KEY", CBC, b"VI", pad=None, padmode=PAD_PKCS5)
class DES():
def encrypt(self,key,iv,data):
k = des(key, CBC, iv, pad=None, padmode=PAD_PKCS5)
d = k.encrypt(data)
return b2a_hex(d)
def decrypt(self,key,iv,data):
k = des(key, CBC, iv, pad=None, padmode=PAD_PKCS5)
d = a2b_hex(data)
return k.decrypt(d)
# print encrypt("test_KEY",Des_IV,data)
#
# print decrypt("test_KEY",Des_IV,"<KEY>")
``` |
{
"source": "12345bt/newTrackon",
"score": 3
} |
#### File: 12345bt/newTrackon/tracker.py
```python
from urllib import request, parse
from time import time, sleep, gmtime, strftime
import re
import scraper
import logging
import trackon
from collections import deque
from datetime import datetime
import pprint
from dns import resolver
logger = logging.getLogger('trackon_logger')
class Tracker:
def __init__(self, url, host, ip, latency, last_checked, interval, status, uptime, country, country_code,
network, historic, added, last_downtime, last_uptime):
self.url = url
self.host = host
self.ip = ip
self.latency = latency
self.last_checked = last_checked
self.interval = interval
self.status = status
self.uptime = uptime
self.country = country
self.country_code = country_code
self.network = network
self.historic = historic
self.added = added
self.last_downtime = last_downtime
self.last_uptime = last_uptime
@classmethod
def from_url(cls, url):
tracker = cls(url, None, None, None, None, None, None, None, [], [], [], None, None, None, None)
tracker.validate_url()
print('URL is ', url)
tracker.host = parse.urlparse(tracker.url).hostname
tracker.update_ips()
if not tracker.ip:
raise RuntimeError("Can't resolve IP")
tracker.historic = deque(maxlen=1000)
date = datetime.now()
tracker.added = "{}-{}-{}".format(date.day, date.month, date.year)
return tracker
def update_status(self):
try:
self.update_ips()
except RuntimeError:
logger.info('Hostname not found')
return
self.update_ipapi_data()
self.last_checked = int(time())
pp = pprint.PrettyPrinter(width=999999, compact=True)
t1 = time()
debug = {'url': self.url, 'ip': self.ip[0], 'time': strftime("%H:%M:%S UTC", gmtime(t1))}
try:
if parse.urlparse(self.url).scheme == 'udp':
parsed, raw, ip = scraper.announce_udp(self.url)
self.interval = parsed['interval']
pretty_data = pp.pformat(parsed)
debug['info'] = pretty_data.replace(scraper.my_ip, 'redacted')
trackon.raw_data.appendleft(debug)
else:
response = scraper.announce_http(self.url)
self.interval = response['interval']
pretty_data = pp.pformat(response)
debug['info'] = pretty_data.replace(scraper.my_ip, 'redacted')
trackon.raw_data.appendleft(debug)
self.latency = int((time() - t1) * 1000)
self.is_up()
debug['status'] = 1
print("TRACKER UP")
except RuntimeError as e:
logger.info('Tracker down: ' + self.url + ' Cause: ' + str(e))
debug.update({'info': str(e), 'status': 0})
trackon.raw_data.appendleft(debug)
print("TRACKER DOWN")
self.is_down()
self.update_uptime()
trackon.update_in_db(self)
def validate_url(self):
uchars = re.compile('^[a-zA-Z0-9_\-\./:]+$')
url = parse.urlparse(self.url)
if url.scheme not in ['udp', 'http', 'https']:
raise RuntimeError("Tracker URLs have to start with 'udp://', 'http://' or 'https://'")
if uchars.match(url.netloc) and uchars.match(url.path):
url = url._replace(path='/announce')
self.url = url.geturl()
else:
raise RuntimeError("Invalid announce URL")
def update_uptime(self):
uptime = float(0)
for s in self.historic:
uptime += s
self.uptime = (uptime / len(self.historic)) * 100
def update_ips(self):
previous_ips = self.ip
self.ip = []
try:
ipv4 = resolver.query(self.host, 'A')
for rdata in ipv4:
self.ip.append(str(rdata))
except Exception:
pass
try:
ipv6 = resolver.query(self.host, 'AAAA')
for rdata in ipv6:
self.ip.append(str(rdata))
except Exception:
pass
if not self.ip: # If DNS query fails, just preserve the previous IPs. Considering showing "Not found" instead.
self.ip = previous_ips
def update_ipapi_data(self):
self.country = []
self.network = []
self.country_code = []
for ip in self.ip:
ip_data = self.ip_api(ip).splitlines()
if len(ip_data) == 3:
self.country.append(ip_data[0])
self.country_code.append(ip_data[1].lower())
self.network.append(ip_data[2])
def scrape(self):
return scraper.scrape_submitted(self.url)
def is_up(self):
self.status = 1
self.last_uptime = int(time())
self.historic.append(self.status)
def is_down(self):
self.status = 0
self.last_downtime = int(time())
self.historic.append(self.status)
@staticmethod
def ip_api(ip):
try:
response = request.urlopen('http://ip-api.com/line/' + ip + '?fields=country,countryCode,org')
tracker_info = response.read().decode('utf-8')
sleep(0.5) # This wait is to respect the queries per minute limit of IP-API and not get banned
except IOError:
tracker_info = 'Error'
return tracker_info
```
#### File: 12345bt/newTrackon/trackon.py
```python
import os.path as path
import logging
import sqlite3
import pickle
from collections import deque
from ipaddress import ip_address
from threading import Lock
from time import time, sleep
from urllib.parse import urlparse
from tracker import Tracker
max_input_length = 20000
submitted_trackers = deque(maxlen=10000)
if path.exists('raw_data.pickle'):
raw_data = pickle.load(open('raw_data.pickle', 'rb'))
else:
raw_data = deque(maxlen=300)
if path.exists('submitted_data.pickle'):
submitted_data = pickle.load(open('submitted_data.pickle', 'rb'))
else:
submitted_data = deque(maxlen=300)
deque_lock = Lock()
list_lock = Lock()
trackers_list = []
processing_trackers = False
logger = logging.getLogger('trackon_logger')
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def get_all_data_from_db():
conn = sqlite3.connect('trackon.db')
conn.row_factory = dict_factory
c = conn.cursor()
trackers_from_db = []
for row in c.execute("SELECT * FROM STATUS ORDER BY uptime DESC"):
tracker_in_db = Tracker(url=row.get('url'),
host=row.get('host'),
ip=eval(row.get('ip')),
latency=row.get('latency'),
last_checked=row.get('last_checked'),
interval=row.get('interval'),
status=row.get('status'),
uptime=row.get('uptime'),
country=eval(row.get('country')),
country_code=eval(row.get('country_code')),
historic=eval(row.get('historic')),
added=row.get('added'),
network=eval(row.get('network')),
last_downtime=row.get('last_downtime'),
last_uptime=row.get('last_uptime'))
trackers_from_db.append(tracker_in_db)
conn.close()
return trackers_from_db
def process_uptime_and_downtime_time(trackers_unprocessed):
for tracker in trackers_unprocessed:
if tracker.status == 1:
if not tracker.last_downtime:
tracker.status_string = "Working"
else:
time_string = calculate_time_ago(tracker.last_downtime)
tracker.status_string = "Working for " + time_string
elif tracker.status == 0:
if not tracker.last_uptime:
tracker.status_string = "Down"
else:
time_string = calculate_time_ago(tracker.last_uptime)
tracker.status_string = "Down for " + time_string
return trackers_unprocessed
def calculate_time_ago(last_time):
now = int(time())
relative = now - int(last_time)
if relative < 60:
if relative == 1:
return str(int(round(relative))) + " second"
else:
return str(int(round(relative))) + " seconds"
minutes = round(relative / 60)
if minutes < 60:
if minutes == 1:
return str(minutes) + " minute"
else:
return str(minutes) + " minutes"
hours = round(relative / 3600)
if hours < 24:
if hours == 1:
return str(hours) + " hour"
else:
return str(hours) + " hours"
days = round(relative / 86400)
if days < 31:
if days == 1:
return str(days) + " day"
else:
return str(days) + " days"
months = round(relative / 2592000)
if months < 12:
if months == 1:
return str(months) + " month"
else:
return str(months) + " months"
years = round(relative / 31536000)
if years == 1:
return str(years) + " year"
else:
return str(years) + " years"
def enqueue_new_trackers(input_string):
global trackers_list
trackers_list = get_all_data_from_db()
if len(input_string) > max_input_length:
return
new_trackers_list = input_string.split()
for url in new_trackers_list:
print("SUBMITTED " + url)
add_one_tracker_to_submitted_deque(url)
if processing_trackers is False:
process_submitted_deque()
def add_one_tracker_to_submitted_deque(url):
try:
ip_address(urlparse(url).hostname)
print("ADDRESS IS IP")
return
except ValueError:
pass
with deque_lock:
for tracker_in_deque in submitted_trackers:
if urlparse(tracker_in_deque.url).netloc == urlparse(url).netloc:
print("Tracker already in the queue.")
return
with list_lock:
for tracker_in_list in trackers_list:
if tracker_in_list.host == urlparse(url).hostname:
print("Tracker already being tracked.")
return
try:
tracker_candidate = Tracker.from_url(url)
except (RuntimeError, ValueError) as e:
print(e)
return
all_ips_tracked = get_all_ips_tracked()
exists_ip = set(tracker_candidate.ip).intersection(all_ips_tracked)
if exists_ip:
print("IP of the tracker already in the list.")
return
with deque_lock:
submitted_trackers.append(tracker_candidate)
print("Tracker added to the submitted queue")
def process_submitted_deque():
global processing_trackers
processing_trackers = True
while submitted_trackers:
with deque_lock:
tracker = submitted_trackers.popleft()
print("Size of deque: ", len(submitted_trackers))
process_new_tracker(tracker)
pickle.dump(submitted_data, open('submitted_data.pickle', 'wb'))
print("Finished processing new trackers")
processing_trackers = False
def process_new_tracker(tracker_candidate):
print('New tracker: ' + tracker_candidate.url)
all_ips_tracked = get_all_ips_tracked()
exists_ip = set(tracker_candidate.ip).intersection(all_ips_tracked)
if exists_ip:
print("IP of the tracker already in the list.")
return
with list_lock:
for tracker_in_list in trackers_list:
if tracker_in_list.host == urlparse(tracker_candidate.url).hostname:
print("Tracker already being tracked.")
return
logger.info('Contact new tracker ' + tracker_candidate.url)
tracker_candidate.last_checked = int(time())
try:
tracker_candidate.latency, tracker_candidate.interval, tracker_candidate.url = tracker_candidate.scrape()
except (RuntimeError, ValueError):
return
if 300 > tracker_candidate.interval or tracker_candidate.interval > 10800: # trackers with an update interval
# less than 5' and more than 3h
debug = submitted_data.popleft()
info = debug['info']
debug.update({'status': 0,
'info': info + '<br>Tracker rejected for having an interval shorter than 5 minutes or longer than 3 hours'})
submitted_data.appendleft(debug)
return
tracker_candidate.update_ipapi_data()
tracker_candidate.is_up()
tracker_candidate.update_uptime()
insert_in_db(tracker_candidate)
logger.info('TRACKER ADDED TO LIST: ' + tracker_candidate.url)
def update_outdated_trackers():
while True:
now = int(time())
trackers_outdated = []
for tracker in get_all_data_from_db():
if (now - tracker.last_checked) > tracker.interval:
trackers_outdated.append(tracker)
for tracker in trackers_outdated:
print("GONNA UPDATE " + tracker.url)
tracker.update_status()
pickle.dump(raw_data, open('raw_data.pickle', 'wb'))
detect_new_ip_duplicates()
sleep(5)
def detect_new_ip_duplicates():
all_ips = get_all_ips_tracked()
non_duplicates = set()
for ip in all_ips:
if ip not in non_duplicates:
non_duplicates.add(ip)
else:
logger.info('IP' + ip + 'is duplicated, manual action required')
print("IP DUPLICATED: " + ip)
def insert_in_db(tracker):
conn = sqlite3.connect('trackon.db')
c = conn.cursor()
c.execute('INSERT INTO status VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)',
(tracker.url, tracker.host, str(tracker.ip), tracker.latency, tracker.last_checked, tracker.interval,
tracker.status, tracker.uptime, str(tracker.country), str(tracker.country_code), str(tracker.network),
tracker.added, str(tracker.historic), tracker.last_downtime, tracker.last_uptime,))
conn.commit()
conn.close()
def update_in_db(tracker):
conn = sqlite3.connect('trackon.db')
c = conn.cursor()
c.execute(
"UPDATE status SET ip=?, latency=?, last_checked=?, status=?, interval=?, uptime=?,"
" historic=?, country=?, country_code=?, network=?, last_downtime=?, last_uptime=? WHERE url=?",
(str(tracker.ip), tracker.latency, tracker.last_checked, tracker.status, tracker.interval, tracker.uptime,
str(tracker.historic), str(tracker.country), str(tracker.country_code), str(tracker.network),
tracker.last_downtime, tracker.last_uptime, tracker.url)).fetchone()
conn.commit()
conn.close()
def get_all_ips_tracked():
all_ips_of_all_trackers = []
all_data = get_all_data_from_db()
for tracker_in_list in all_data:
for ip in tracker_in_list.ip:
all_ips_of_all_trackers.append(ip)
return all_ips_of_all_trackers
def list_live():
conn = sqlite3.connect('trackon.db')
c = conn.cursor()
c.execute('SELECT URL FROM STATUS WHERE STATUS = 1 ORDER BY UPTIME DESC')
raw_list = c.fetchall()
conn.close()
return format_list(raw_list)
def list_uptime(uptime):
conn = sqlite3.connect('trackon.db')
c = conn.cursor()
c.execute('SELECT URL FROM STATUS WHERE UPTIME >= ? ORDER BY UPTIME DESC', (uptime,))
raw_list = c.fetchall()
conn.close()
return format_list(raw_list), len(raw_list)
def api_udp():
conn = sqlite3.connect('trackon.db')
c = conn.cursor()
c.execute('SELECT URL FROM STATUS WHERE URL LIKE "udp://%" AND UPTIME >= 95 ORDER BY UPTIME DESC')
raw_list = c.fetchall()
conn.close()
return format_list(raw_list)
def api_http():
conn = sqlite3.connect('trackon.db')
c = conn.cursor()
c.execute('SELECT URL FROM STATUS WHERE URL LIKE "http%" AND UPTIME >= 95 ORDER BY UPTIME DESC')
raw_list = c.fetchall()
conn.close()
return format_list(raw_list)
def format_list(raw_list):
formatted_list = ''
for url in raw_list:
url_string = url[0]
formatted_list += url_string + '\n' + '\n'
return formatted_list
``` |
{
"source": "12345ieee/replace-solutionnet",
"score": 2
} |
#### File: 12345ieee/replace-solutionnet/save2export.py
```python
import argparse
import pathlib
from write_backends import ExportWriteBackend, make_level_dicts
from read_backends import SaveReadBackend
def main():
id2name, name2id = make_level_dicts()
read_backend = SaveReadBackend(args.file)
write_backend = ExportWriteBackend('exports', id2name)
level_ids = [name2id[lev] for lev in args.levels] if args.levels else None
levels = read_backend.read_solutions(level_ids, pareto_only=False)
for level in levels:
# CE extra sols are stored as `id!progressive`
clean_id = level['id'].split('!')[0]
write_backend.write_solution(clean_id, args.player_name, level['cycles'],
level['symbols'], level['reactors'], level['mastered'])
components = read_backend.read_components(level["id"])
for component in components:
comp_id = component['rowid']
write_backend.write_component(component)
members = read_backend.read_members(comp_id)
write_backend.write_members(members)
pipes = read_backend.read_pipes(comp_id)
write_backend.write_pipes(pipes)
annotations = read_backend.read_annotations(comp_id)
write_backend.write_annotations(annotations)
write_backend.commit(clean_id, args.schem, args.check_precog)
write_backend.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", type=pathlib.Path, nargs="?", default="saves/12345ieee/000.user")
parser.add_argument("-n", "--player-name", default="12345ieee")
parser.add_argument("-l", "--levels", nargs='+')
parser.add_argument("-s", "--schem", default=False, action=argparse.BooleanOptionalAction)
parser.add_argument("--check-precog", default=False, action=argparse.BooleanOptionalAction)
args = parser.parse_args()
main()
``` |
{
"source": "1234borkowskip/WD151280",
"score": 3
} |
#### File: 1234borkowskip/WD151280/dab.py
```python
def silnia(a):
wynik = 1
while a > 0:
wynik = a * wynik
a = a - 1
return wynik
print(silnia(3))
print(silnia(10))
print(silnia(5))
```
#### File: 1234borkowskip/WD151280/dad.py
```python
def cg(n, a1=1, q=2):
n = n - 1
while n > 0:
a1 = a1 * q
n = n - 1
return a1
print("{} wyraz ciagu geometrycznego gdzie a1 = {}, q = {} wynosi {}".format(4, 1, 5, cg(4, 1, 5)))
print("{} wyraz ciagu geometrycznego gdzie a1 = {}, q = {} wynosi {}".format(7, 1, 2, cg(7)))
```
#### File: 1234borkowskip/WD151280/daj.py
```python
def zlicz():
zlicz.counter += 1
zlicz.counter = 0
zlicz()
zlicz()
zlicz()
print(zlicz.counter)
``` |
{
"source": "123789g/Python-Beginner-Caeser-Cipher",
"score": 4
} |
#### File: 123789g/Python-Beginner-Caeser-Cipher/CaeserCipher.py
```python
def caeserCipher(string, key):
# Initializing a... cart of sorts. This will hold whatever we need and deliver it through the function.
payload= ""
# This is where the actual encryption happens. To start we are using i to hold our characters. For each character in the *string*, we added as a param in the function, do the following. This loop will keep going for each and every character in the string.
for i in string:
# This is looking for spaces. If there is a space/whitespace, we're just going to iterate again by moving to the next character in the payload. Also without this small if, the returned encryption WONT HAVE SPACES! :scream: So think about this as a way to keep our weird characters.
if i == ' ' and "." and "!" and ",":
payload = payload + i
# If the letter/character being delivered through this loop is uppercase then run this.
elif i.isupper():
# We are iteraing using chr. This will take our SINGLE character and pass it through this formula to encrypt it. The key is whatever is passed in when the function is run. No matter what the key is, it needs to be within the range of our alphabet. We have to use ASCII to find our letters and the capital letters in ASCII is a range between 65 and 90. ¯\_(ツ)_/¯ so we'll use 65 to keep us around that range and %(modulous) to keep us from going past that ASCII limit.
payload = payload + chr((ord(i) + key - 65) % 25 + 65)
# If the letter/character being delivered through the loop isn't uppercase then it MUST be lowercase. So no matter what run this instead. Also. Lowercase letters are in the ASCII range of 97 - 122. So guess what. That's right we're changing that 65 to 97 and %ing to keep us in that range.
else:
payload = payload + chr((ord(i) + key - 97) % 25 + 97)
return payload
# Self explanatory. This gathers the user's input and stores it in variables. We'll pass these into the function we made at the start of this.
userString = input("What would you like to encrypt? ")
# We add an int in there to MAKE SURE the user actually puts in a number and not like "nine" or something :v
cipherKey = int(input("What's your key?(How far should the cipher shift?) "))
# Finally. Run the function on user Determined Values
print("Cipher Success! Text Encrypted To:", caeserCipher(userString, cipherKey))
``` |
{
"source": "123972/analisis-numerico-computo-cientifico",
"score": 4
} |
#### File: algoritmos/Python/algorithms_for_uco.py
```python
import math
import numpy as np
from numerical_differentiation import gradient_approximation, \
Hessian_approximation
from line_search import line_search_by_backtracking
from utils import compute_error
def gradient_descent(f, x_0, tol,
tol_backtracking, x_ast=None, p_ast=None, maxiter=30,
gf_symbolic=None):
'''
Method of gradient descent to numerically approximate solution of min f.
Args:
f (fun): definition of function f as lambda expression or function definition.
x_0 (numpy ndarray): initial point for gradient descent method.
tol (float): tolerance that will halt method. Controls norm of gradient of f.
tol_backtracking (float): tolerance that will halt method. Controls value of line search by backtracking.
x_ast (numpy ndarray): solution of min f, now it's required that user knows the solution...
p_ast (float): value of f(x_ast), now it's required that user knows the solution...
maxiter (int): maximum number of iterations.
gf_symbolic (fun): definition of gradient of f. If given, no approximation is
performed via finite differences.
Returns:
x (numpy ndarray): numpy array, approximation of x_ast.
iteration (int): number of iterations.
Err_plot (numpy ndarray): numpy array of absolute error between p_ast and f(x) with x approximation
of x_ast. Useful for plotting.
x_plot (numpy ndarray): numpy array that containts in columns vector of approximations. Last column
contains x, approximation of solution. Useful for plotting.
'''
iteration = 0
x = x_0
feval = f(x)
if gf_symbolic:
gfeval = gf_symbolic(x)
else:
gfeval = gradient_approximation(f,x)
normgf = np.linalg.norm(gfeval)
Err_plot_aux = np.zeros(maxiter)
Err_plot_aux[iteration]=compute_error(p_ast,feval)
Err = compute_error(x_ast,x)
n = x.size
x_plot = np.zeros((n,maxiter))
x_plot[:,iteration] = x
print('I\tNormagf\t\tError x_ast\tError p_ast\tline search')
print('{}\t{:0.2e}\t{:0.2e}\t{:0.2e}\t{}'.format(iteration,normgf,Err,Err_plot_aux[iteration],"---"))
iteration+=1
while(normgf>tol and iteration < maxiter):
dir_desc = -gfeval
der_direct = gfeval.dot(dir_desc)
t = line_search_by_backtracking(f,dir_desc,x,der_direct)
x = x + t*dir_desc
feval = f(x)
if gf_symbolic:
gfeval = gf_symbolic(x)
else:
gfeval = gradient_approximation(f,x)
normgf = np.linalg.norm(gfeval)
Err_plot_aux[iteration]=compute_error(p_ast,feval)
x_plot[:,iteration] = x
Err = compute_error(x_ast,x)
print('{}\t{:0.2e}\t{:0.2e}\t{:0.2e}\t{:0.2e}'.format(iteration,normgf,Err,
Err_plot_aux[iteration],t))
if t<tol_backtracking: #if t is less than tol_backtracking then we need to check the reason
iter_salida=iteration
iteration = maxiter - 1
iteration+=1
print('{} {:0.2e}'.format("Error of x with respect to x_ast:",Err))
print('{} {}'.format("Approximate solution:", x))
cond = Err_plot_aux > np.finfo(float).eps*10**(-2)
Err_plot = Err_plot_aux[cond]
if iteration == maxiter and t < tol_backtracking:
print("Backtracking value less than tol_backtracking, check approximation")
iteration=iter_salida
else:
if iteration == maxiter:
print("Reached maximum of iterations, check approximation")
x_plot = x_plot[:,:iteration]
return [x,iteration,Err_plot,x_plot]
def Newtons_method(f, x_0, tol,
tol_backtracking, x_ast=None, p_ast=None, maxiter=30,
gf_symbolic=None,
Hf_symbolic=None):
'''
Newton's method to numerically approximate solution of min f.
Args:
f (fun): definition of function f as lambda expression or function definition.
x_0 (numpy ndarray): initial point for Newton's method.
tol (float): tolerance that will halt method. Controls stopping criteria.
tol_backtracking (float): tolerance that will halt method. Controls value of line search by backtracking.
x_ast (numpy ndarray): solution of min f, now it's required that user knows the solution...
p_ast (float): value of f(x_ast), now it's required that user knows the solution...
maxiter (int): maximum number of iterations
gf_symbolic (fun): definition of gradient of f. If given, no approximation is
performed via finite differences.
Hf_symbolic (fun): definition of Hessian of f. If given, no approximation is
performed via finite differences.
Returns:
x (numpy ndarray): numpy array, approximation of x_ast.
iteration (int): number of iterations.
Err_plot (numpy ndarray): numpy array of absolute error between p_ast and f(x) with x approximation
of x_ast. Useful for plotting.
x_plot (numpy ndarray): numpy array that containts in columns vector of approximations. Last column
contains x, approximation of solution. Useful for plotting.
'''
iteration = 0
x = x_0
feval = f(x)
if gf_symbolic:
gfeval = gf_symbolic(x)
else:
gfeval = gradient_approximation(f,x)
if Hf_symbolic:
Hfeval = Hf_symbolic(x)
else:
Hfeval = Hessian_approximation(f,x)
normgf = np.linalg.norm(gfeval)
condHf= np.linalg.cond(Hfeval)
Err_plot_aux = np.zeros(maxiter)
Err_plot_aux[iteration]=compute_error(p_ast,feval)
Err = compute_error(x_ast,x)
n = x.size
x_plot = np.zeros((n,maxiter))
x_plot[:,iteration] = x
#Newton's direction and Newton's decrement
dir_Newton = np.linalg.solve(Hfeval, -gfeval)
dec_Newton = -gfeval.dot(dir_Newton)
print('I\tNormgf \tNewton Decrement\tError x_ast\tError p_ast\tline search\tCondHf')
print('{}\t{:0.2e}\t{:0.2e}\t{:0.2e}\t{:0.2e}\t{}\t\t{:0.2e}'.format(iteration,normgf,
dec_Newton,Err,
Err_plot_aux[iteration],"---",
condHf))
stopping_criteria = dec_Newton/2
iteration+=1
while(stopping_criteria>tol and iteration < maxiter):
der_direct = -dec_Newton
t = line_search_by_backtracking(f,dir_Newton,x,der_direct)
x = x + t*dir_Newton
feval = f(x)
if gf_symbolic:
gfeval = gf_symbolic(x)
else:
gfeval = gradient_approximation(f,x)
if Hf_symbolic:
Hfeval = Hf_symbolic(x)
else:
Hfeval = Hessian_approximation(f,x)
normgf = np.linalg.norm(gfeval)
condHf= np.linalg.cond(Hfeval)
#Newton's direction and Newton's decrement
dir_Newton = np.linalg.solve(Hfeval, -gfeval)
dec_Newton = -gfeval.dot(dir_Newton)
Err_plot_aux[iteration]=compute_error(p_ast,feval)
x_plot[:,iteration] = x
Err = compute_error(x_ast,x)
print('{}\t{:0.2e}\t{:0.2e}\t{:0.2e}\t{:0.2e}\t{:0.2e}\t{:0.2e}'.format(iteration,normgf,
dec_Newton,Err,
Err_plot_aux[iteration],t,
condHf))
stopping_criteria = dec_Newton/2
if t<tol_backtracking: #if t is less than tol_backtracking then we need to check the reason
iter_salida=iteration
iteration = maxiter - 1
iteration+=1
print('{} {:0.2e}'.format("Error of x with respect to x_ast:",Err))
print('{} {}'.format("Approximate solution:", x))
cond = Err_plot_aux > np.finfo(float).eps*10**(-2)
Err_plot = Err_plot_aux[cond]
if iteration == maxiter and t < tol_backtracking:
print("Backtracking value less than tol_backtracking, check approximation")
iteration=iter_salida
else:
if iteration == maxiter:
print("Reached maximum of iterations, check approximation")
x_plot = x_plot[:,:iteration]
return [x,iteration,Err_plot,x_plot]
```
#### File: algoritmos/Python/line_search.py
```python
from utils import norm_residual, logarithmic_barrier
def line_search_by_backtracking(f,dir_desc,x,
der_direct, alpha=.15, beta=.5):
'''
Line search that sufficiently decreases f restricted to a ray in the direction dir_desc.
Args:
alpha (float): parameter in line search with backtracking, tipically .15
beta (float): parameter in line search with backtracking, tipically .5
f (lambda expression): definition of function f.
dir_desc (array): descent direction.
x (array): numpy array that holds values where line search will be performed.
der_direct (float): directional derivative of f.
Returns:
t (float): positive number for stepsize along dir_desc that sufficiently decreases f.
'''
t=1
if alpha > 1/2:
print('alpha must be less than or equal to 1/2')
t=-1
if beta>1:
print('beta must be less than 1')
t=-1;
if t!=-1:
eval1 = f(x+t*dir_desc)
eval2 = f(x) + alpha*t*der_direct
while eval1 > eval2:
t=beta*t
eval1=f(x+t*dir_desc)
eval2=f(x)+alpha*t*der_direct
return t
def line_search_for_residual_by_backtracking(r_primal, r_dual,dir_desc_primal,dir_desc_dual,x, nu,
norm_residual_eval,
alpha=.15, beta=.5):
'''
Line search that sufficiently decreases residual for Newtons infeasible initial point method
restricted to a ray in the direction dir_desc.
Args:
r_primal (fun): definition of primal residual as function definition or lambda expression.
r_dual (fun): definition of dual residual as function definition or lambda expression.
dir_desc_primal (array): descent direction for primal variable.
dir_desc_dual (array): descent direction for dual variable.
x (array): numpy array that holds values where line search will be performed.
nu (array): numpy array that holds values where line search will be performed.
norm_residual_eval (float): norm of residual that has both r_primal and r_dual evaluations in
x and nu
alpha (float): parameter in line search with backtracking, tipically .15
beta (float): parameter in line search with backtracking, tipically .5
Returns:
t (float): positive number for stepsize along dir_desc that sufficiently decreases f.
'''
t=1
if alpha > 1/2:
print('alpha must be less than or equal to 1/2')
t=-1
if beta>1:
print('beta must be less than 1')
t=-1;
if t!=-1:
feas_primal = r_primal(x + t*dir_desc_primal)
feas_dual = r_dual(nu + t*dir_desc_dual )
eval1 = norm_residual(feas_primal, feas_dual)
eval2 = (1-alpha*t)*norm_residual_eval
while eval1 > eval2:
t=beta*t
feas_primal = r_primal(x + t*dir_desc_primal)
feas_dual = r_dual(nu + t*dir_desc_dual )
eval1 = norm_residual(feas_primal, feas_dual)
eval2 = (1-alpha*t)*norm_residual_eval
return t
def line_search_for_log_barrier_by_backtracking(f,dir_desc,x,t_path,
constraint_inequalities,
der_direct, alpha=.15, beta=.5):
'''
Line search that sufficiently decreases f restricted to a ray in the direction dir_desc.
Args:
alpha (float): parameter in line search with backtracking, tipically .15
beta (float): parameter in line search with backtracking, tipically .5
f (lambda expression): definition of function f.
dir_desc (array): descent direction.
x (array): numpy array that holds values where line search will be performed.
der_direct (float): directional derivative of f.
Returns:
t (float): positive number for stepsize along dir_desc that sufficiently decreases f.
'''
t=1
if alpha > 1/2:
print('alpha must be less than or equal to 1/2')
t=-1
if beta>1:
print('beta must be less than 1')
t=-1;
if t!=-1:
eval1 = logarithmic_barrier(f,x + t*dir_desc, t_path,constraint_inequalities)
eval2 = logarithmic_barrier(f,x, t_path,constraint_inequalities) + alpha*t*der_direct
while eval1 > eval2:
t=beta*t
eval1=logarithmic_barrier(f,x + t*dir_desc, t_path,constraint_inequalities)
eval2=logarithmic_barrier(f,x, t_path,constraint_inequalities) + alpha*t*der_direct
return t
``` |
{
"source": "123972/PCA-nutricion",
"score": 2
} |
#### File: site-packages/dask_ml/wrappers.py
```python
import logging
import dask.array as da
import dask.dataframe as dd
import dask.delayed
import numpy as np
import sklearn.base
import sklearn.metrics
from dask_ml.utils import _timer
from ._compat import check_is_fitted
from ._partial import fit
from ._utils import copy_learned_attributes
from .metrics import check_scoring, get_scorer
logger = logging.getLogger(__name__)
class ParallelPostFit(sklearn.base.BaseEstimator, sklearn.base.MetaEstimatorMixin):
"""Meta-estimator for parallel predict and transform.
Parameters
----------
estimator : Estimator
The underlying estimator that is fit.
scoring : string or callable, optional
A single string (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
For evaluating multiple metrics, either give a list of (unique)
strings or a dict with names as keys and callables as values.
NOTE that when using custom scorers, each scorer should return a
single value. Metric functions returning a list/array of values
can be wrapped into multiple scorers that return one value each.
See :ref:`multimetric_grid_search` for an example.
.. warning::
If None, the estimator's default scorer (if available) is used.
Most scikit-learn estimators will convert large Dask arrays to
a single NumPy array, which may exhaust the memory of your worker.
You probably want to always specify `scoring`.
Notes
-----
.. warning::
This class is not appropriate for parallel or distributed *training*
on large datasets. For that, see :class:`Incremental`, which provides
distributed (but sequential) training. If you're doing distributed
hyperparameter optimization on larger-than-memory datasets, see
:class:`dask_ml.model_selection.IncrementalSearch`.
This estimator does not parallelize the training step. This simply calls
the underlying estimators's ``fit`` method called and copies over the
learned attributes to ``self`` afterwards.
It is helpful for situations where your training dataset is relatively
small (fits on a single machine) but you need to predict or transform
a much larger dataset. ``predict``, ``predict_proba`` and ``transform``
will be done in parallel (potentially distributed if you've connected
to a ``dask.distributed.Client``).
Note that many scikit-learn estimators already predict and transform in
parallel. This meta-estimator may still be useful in those cases when your
dataset is larger than memory, as the distributed scheduler will ensure the
data isn't all read into memory at once.
See Also
--------
Incremental
dask_ml.model_selection.IncrementalSearch
Examples
--------
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> import sklearn.datasets
>>> import dask_ml.datasets
Make a small 1,000 sample 2 training dataset and fit normally.
>>> X, y = sklearn.datasets.make_classification(n_samples=1000,
... random_state=0)
>>> clf = ParallelPostFit(estimator=GradientBoostingClassifier(),
... scoring='accuracy')
>>> clf.fit(X, y)
ParallelPostFit(estimator=GradientBoostingClassifier(...))
>>> clf.classes_
array([0, 1])
Transform and predict return dask outputs for dask inputs.
>>> X_big, y_big = dask_ml.datasets.make_classification(n_samples=100000,
random_state=0)
>>> clf.predict(X)
dask.array<predict, shape=(10000,), dtype=int64, chunksize=(1000,)>
Which can be computed in parallel.
>>> clf.predict_proba(X).compute()
array([[0.99141094, 0.00858906],
[0.93178389, 0.06821611],
[0.99129105, 0.00870895],
...,
[0.97996652, 0.02003348],
[0.98087444, 0.01912556],
[0.99407016, 0.00592984]])
"""
def __init__(self, estimator=None, scoring=None):
self.estimator = estimator
self.scoring = scoring
def _check_array(self, X):
"""Validate an array for post-fit tasks.
Parameters
----------
X : Union[Array, DataFrame]
Returns
-------
same type as 'X'
Notes
-----
The following checks are applied.
- Ensure that the array is blocked only along the samples.
"""
if isinstance(X, da.Array):
if X.ndim == 2 and X.numblocks[1] > 1:
logger.debug("auto-rechunking 'X'")
if not np.isnan(X.chunks[0]).any():
X = X.rechunk({0: "auto", 1: -1})
else:
X = X.rechunk({1: -1})
return X
@property
def _postfit_estimator(self):
# The estimator instance to use for postfit tasks like score
return self.estimator
def fit(self, X, y=None, **kwargs):
"""Fit the underlying estimator.
Parameters
----------
X, y : array-like
**kwargs
Additional fit-kwargs for the underlying estimator.
Returns
-------
self : object
"""
logger.info("Starting fit")
with _timer("fit", _logger=logger):
result = self.estimator.fit(X, y, **kwargs)
# Copy over learned attributes
copy_learned_attributes(result, self)
copy_learned_attributes(result, self.estimator)
return self
def partial_fit(self, X, y=None, **kwargs):
logger.info("Starting partial_fit")
with _timer("fit", _logger=logger):
result = self.estimator.partial_fit(X, y, **kwargs)
# Copy over learned attributes
copy_learned_attributes(result, self)
copy_learned_attributes(result, self.estimator)
return self
def transform(self, X):
"""Transform block or partition-wise for dask inputs.
For dask inputs, a dask array or dataframe is returned. For other
inputs (NumPy array, pandas dataframe, scipy sparse matrix), the
regular return value is returned.
If the underlying estimator does not have a ``transform`` method, then
an ``AttributeError`` is raised.
Parameters
----------
X : array-like
Returns
-------
transformed : array-like
"""
self._check_method("transform")
X = self._check_array(X)
if isinstance(X, da.Array):
return X.map_blocks(_transform, estimator=self._postfit_estimator)
elif isinstance(X, dd._Frame):
return X.map_partitions(_transform, estimator=self._postfit_estimator)
else:
return _transform(X, estimator=self._postfit_estimator)
def score(self, X, y, compute=True):
"""Returns the score on the given data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
return self.estimator.score(X, y)
"""
scoring = self.scoring
X = self._check_array(X)
y = self._check_array(y)
if not scoring:
if type(self._postfit_estimator).score == sklearn.base.RegressorMixin.score:
scoring = "r2"
elif (
type(self._postfit_estimator).score
== sklearn.base.ClassifierMixin.score
):
scoring = "accuracy"
else:
scoring = self.scoring
if scoring:
if not dask.is_dask_collection(X) and not dask.is_dask_collection(y):
scorer = sklearn.metrics.get_scorer(scoring)
else:
scorer = get_scorer(scoring, compute=compute)
return scorer(self, X, y)
else:
return self._postfit_estimator.score(X, y)
def predict(self, X):
"""Predict for X.
For dask inputs, a dask array or dataframe is returned. For other
inputs (NumPy array, pandas dataframe, scipy sparse matrix), the
regular return value is returned.
Parameters
----------
X : array-like
Returns
-------
y : array-like
"""
self._check_method("predict")
X = self._check_array(X)
if isinstance(X, da.Array):
result = X.map_blocks(
_predict, dtype="int", estimator=self._postfit_estimator, drop_axis=1
)
return result
elif isinstance(X, dd._Frame):
return X.map_partitions(
_predict, estimator=self._postfit_estimator, meta=np.array([1])
)
else:
return _predict(X, estimator=self._postfit_estimator)
def predict_proba(self, X):
"""Probability estimates.
For dask inputs, a dask array or dataframe is returned. For other
inputs (NumPy array, pandas dataframe, scipy sparse matrix), the
regular return value is returned.
If the underlying estimator does not have a ``predict_proba``
method, then an ``AttributeError`` is raised.
Parameters
----------
X : array or dataframe
Returns
-------
y : array-like
"""
X = self._check_array(X)
self._check_method("predict_proba")
if isinstance(X, da.Array):
# XXX: multiclass
return X.map_blocks(
_predict_proba,
estimator=self._postfit_estimator,
dtype="float",
chunks=(X.chunks[0], len(self._postfit_estimator.classes_)),
)
elif isinstance(X, dd._Frame):
return X.map_partitions(_predict_proba, estimator=self._postfit_estimator)
else:
return _predict_proba(X, estimator=self._postfit_estimator)
def predict_log_proba(self, X):
"""Log of proability estimates.
For dask inputs, a dask array or dataframe is returned. For other
inputs (NumPy array, pandas dataframe, scipy sparse matrix), the
regular return value is returned.
If the underlying estimator does not have a ``predict_proba``
method, then an ``AttributeError`` is raised.
Parameters
----------
X : array or dataframe
Returns
-------
y : array-like
"""
self._check_method("predict_log_proba")
return da.log(self.predict_proba(X))
def _check_method(self, method):
"""Check if self.estimator has 'method'.
Raises
------
AttributeError
"""
estimator = self._postfit_estimator
if not hasattr(estimator, method):
msg = "The wrapped estimator '{}' does not have a '{}' method.".format(
estimator, method
)
raise AttributeError(msg)
return getattr(estimator, method)
class Incremental(ParallelPostFit):
"""Metaestimator for feeding Dask Arrays to an estimator blockwise.
This wrapper provides a bridge between Dask objects and estimators
implementing the ``partial_fit`` API. These *incremental learners* can
train on batches of data. This fits well with Dask's blocked data
structures.
.. note::
This meta-estimator is not appropriate for hyperparameter optimization
on larger-than-memory datasets. For that, see
:class:`~dask_ml.model_selection.IncrementalSearchCV` or
:class:`~dask_ml.model_selection.HyperbandSearchCV`.
See the `list of incremental learners`_ in the scikit-learn documentation
for a list of estimators that implement the ``partial_fit`` API. Note that
`Incremental` is not limited to just these classes, it will work on any
estimator implementing ``partial_fit``, including those defined outside of
scikit-learn itself.
Calling :meth:`Incremental.fit` with a Dask Array will pass each block of
the Dask array or arrays to ``estimator.partial_fit`` *sequentially*.
Like :class:`ParallelPostFit`, the methods available after fitting (e.g.
:meth:`Incremental.predict`, etc.) are all parallel and delayed.
The ``estimator_`` attribute is a clone of `estimator` that was actually
used during the call to ``fit``. All attributes learned during training
are available on ``Incremental`` directly.
.. _list of incremental learners: https://scikit-learn.org/stable/modules/computing.html#incremental-learning # noqa
Parameters
----------
estimator : Estimator
Any object supporting the scikit-learn ``parital_fit`` API.
scoring : string or callable, optional
A single string (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
For evaluating multiple metrics, either give a list of (unique)
strings or a dict with names as keys and callables as values.
NOTE that when using custom scorers, each scorer should return a
single value. Metric functions returning a list/array of values
can be wrapped into multiple scorers that return one value each.
See :ref:`multimetric_grid_search` for an example.
.. warning::
If None, the estimator's default scorer (if available) is used.
Most scikit-learn estimators will convert large Dask arrays to
a single NumPy array, which may exhaust the memory of your worker.
You probably want to always specify `scoring`.
random_state : int or numpy.random.RandomState, optional
Random object that determines how to shuffle blocks.
shuffle_blocks : bool, default True
Determines whether to call ``partial_fit`` on a randomly selected chunk
of the Dask arrays (default), or to fit in sequential order. This does
not control shuffle between blocks or shuffling each block.
Attributes
----------
estimator_ : Estimator
A clone of `estimator` that was actually fit during the ``.fit`` call.
See Also
--------
ParallelPostFit
dask_ml.model_selection.IncrementalSearchCV
Examples
--------
>>> from dask_ml.wrappers import Incremental
>>> from dask_ml.datasets import make_classification
>>> import sklearn.linear_model
>>> X, y = make_classification(chunks=25)
>>> est = sklearn.linear_model.SGDClassifier()
>>> clf = Incremental(est, scoring='accuracy')
>>> clf.fit(X, y, classes=[0, 1])
When used inside a grid search, prefix the underlying estimator's
parameter names with ``estimator__``.
>>> from sklearn.model_selection import GridSearchCV
>>> param_grid = {"estimator__alpha": [0.1, 1.0, 10.0]}
>>> gs = GridSearchCV(clf, param_grid)
>>> gs.fit(X, y, classes=[0, 1])
"""
def __init__(
self,
estimator=None,
scoring=None,
shuffle_blocks=True,
random_state=None,
assume_equal_chunks=True,
):
self.shuffle_blocks = shuffle_blocks
self.random_state = random_state
self.assume_equal_chunks = assume_equal_chunks
super(Incremental, self).__init__(estimator=estimator, scoring=scoring)
@property
def _postfit_estimator(self):
check_is_fitted(self, "estimator_")
return self.estimator_
def _fit_for_estimator(self, estimator, X, y, **fit_kwargs):
check_scoring(estimator, self.scoring)
if not dask.is_dask_collection(X) and not dask.is_dask_collection(y):
result = estimator.partial_fit(X=X, y=y, **fit_kwargs)
else:
result = fit(
estimator,
X,
y,
random_state=self.random_state,
shuffle_blocks=self.shuffle_blocks,
assume_equal_chunks=self.assume_equal_chunks,
**fit_kwargs
)
copy_learned_attributes(result, self)
self.estimator_ = result
return self
def fit(self, X, y=None, **fit_kwargs):
estimator = sklearn.base.clone(self.estimator)
self._fit_for_estimator(estimator, X, y, **fit_kwargs)
return self
def partial_fit(self, X, y=None, **fit_kwargs):
"""Fit the underlying estimator.
If this estimator has not been previously fit, this is identical to
:meth:`Incremental.fit`. If it has been previously fit,
``self.estimator_`` is used as the starting point.
Parameters
----------
X, y : array-like
**kwargs
Additional fit-kwargs for the underlying estimator.
Returns
-------
self : object
"""
estimator = getattr(self, "estimator_", None)
if estimator is None:
estimator = sklearn.base.clone(self.estimator)
return self._fit_for_estimator(estimator, X, y, **fit_kwargs)
def _first_block(dask_object):
"""Extract the first block / partition from a dask object
"""
if isinstance(dask_object, da.Array):
if dask_object.ndim > 1 and dask_object.numblocks[-1] != 1:
raise NotImplementedError(
"IID estimators require that the array "
"blocked only along the first axis. "
"Rechunk your array before fitting."
)
shape = (dask_object.chunks[0][0],)
if dask_object.ndim > 1:
shape = shape + (dask_object.chunks[1][0],)
return da.from_delayed(
dask_object.to_delayed().flatten()[0], shape, dask_object.dtype
)
if isinstance(dask_object, dd._Frame):
return dask_object.get_partition(0)
else:
return dask_object
def _predict(part, estimator):
return estimator.predict(part)
def _predict_proba(part, estimator):
return estimator.predict_proba(part)
def _transform(part, estimator):
return estimator.transform(part)
```
#### File: core/typing/builtins.py
```python
import itertools
import numpy as np
import operator
from numba.core import types, errors
from numba import prange
from numba.parfors.parfor import internal_prange
from numba.core.utils import RANGE_ITER_OBJECTS
from numba.core.typing.templates import (AttributeTemplate, ConcreteTemplate,
AbstractTemplate, infer_global, infer,
infer_getattr, signature,
bound_function, make_callable_template)
from numba.cpython.builtins import get_type_min_value, get_type_max_value
from numba.core.extending import (
typeof_impl, type_callable, models, register_model, make_attribute_wrapper,
)
@infer_global(print)
class Print(AbstractTemplate):
def generic(self, args, kws):
for a in args:
sig = self.context.resolve_function_type("print_item", (a,), {})
if sig is None:
raise TypeError("Type %s is not printable." % a)
assert sig.return_type is types.none
return signature(types.none, *args)
@infer
class PrintItem(AbstractTemplate):
key = "print_item"
def generic(self, args, kws):
arg, = args
return signature(types.none, *args)
@infer_global(abs)
class Abs(ConcreteTemplate):
int_cases = [signature(ty, ty) for ty in sorted(types.signed_domain)]
uint_cases = [signature(ty, ty) for ty in sorted(types.unsigned_domain)]
real_cases = [signature(ty, ty) for ty in sorted(types.real_domain)]
complex_cases = [signature(ty.underlying_float, ty)
for ty in sorted(types.complex_domain)]
cases = int_cases + uint_cases + real_cases + complex_cases
@infer_global(slice)
class Slice(ConcreteTemplate):
cases = [
signature(types.slice2_type, types.intp),
signature(types.slice2_type, types.none),
signature(types.slice2_type, types.none, types.none),
signature(types.slice2_type, types.none, types.intp),
signature(types.slice2_type, types.intp, types.none),
signature(types.slice2_type, types.intp, types.intp),
signature(types.slice3_type, types.intp, types.intp, types.intp),
signature(types.slice3_type, types.none, types.intp, types.intp),
signature(types.slice3_type, types.intp, types.none, types.intp),
signature(types.slice3_type, types.intp, types.intp, types.none),
signature(types.slice3_type, types.intp, types.none, types.none),
signature(types.slice3_type, types.none, types.intp, types.none),
signature(types.slice3_type, types.none, types.none, types.intp),
signature(types.slice3_type, types.none, types.none, types.none),
]
class Range(ConcreteTemplate):
cases = [
signature(types.range_state32_type, types.int32),
signature(types.range_state32_type, types.int32, types.int32),
signature(types.range_state32_type, types.int32, types.int32,
types.int32),
signature(types.range_state64_type, types.int64),
signature(types.range_state64_type, types.int64, types.int64),
signature(types.range_state64_type, types.int64, types.int64,
types.int64),
signature(types.unsigned_range_state64_type, types.uint64),
signature(types.unsigned_range_state64_type, types.uint64, types.uint64),
signature(types.unsigned_range_state64_type, types.uint64, types.uint64,
types.uint64),
]
for func in RANGE_ITER_OBJECTS:
infer_global(func, typing_key=range)(Range)
infer_global(prange, typing_key=prange)(Range)
infer_global(internal_prange, typing_key=internal_prange)(Range)
@infer
class GetIter(AbstractTemplate):
key = "getiter"
def generic(self, args, kws):
assert not kws
[obj] = args
if isinstance(obj, types.IterableType):
return signature(obj.iterator_type, obj)
@infer
class IterNext(AbstractTemplate):
key = "iternext"
def generic(self, args, kws):
assert not kws
[it] = args
if isinstance(it, types.IteratorType):
return signature(types.Pair(it.yield_type, types.boolean), it)
@infer
class PairFirst(AbstractTemplate):
"""
Given a heterogeneous pair, return the first element.
"""
key = "pair_first"
def generic(self, args, kws):
assert not kws
[pair] = args
if isinstance(pair, types.Pair):
return signature(pair.first_type, pair)
@infer
class PairSecond(AbstractTemplate):
"""
Given a heterogeneous pair, return the second element.
"""
key = "pair_second"
def generic(self, args, kws):
assert not kws
[pair] = args
if isinstance(pair, types.Pair):
return signature(pair.second_type, pair)
def choose_result_bitwidth(*inputs):
return max(types.intp.bitwidth, *(tp.bitwidth for tp in inputs))
def choose_result_int(*inputs):
"""
Choose the integer result type for an operation on integer inputs,
according to the integer typing NBEP.
"""
bitwidth = choose_result_bitwidth(*inputs)
signed = any(tp.signed for tp in inputs)
return types.Integer.from_bitwidth(bitwidth, signed)
# The "machine" integer types to take into consideration for operator typing
# (according to the integer typing NBEP)
machine_ints = (
sorted(set((types.intp, types.int64))) +
sorted(set((types.uintp, types.uint64)))
)
# Explicit integer rules for binary operators; smaller ints will be
# automatically upcast.
integer_binop_cases = tuple(
signature(choose_result_int(op1, op2), op1, op2)
for op1, op2 in itertools.product(machine_ints, machine_ints)
)
class BinOp(ConcreteTemplate):
cases = list(integer_binop_cases)
cases += [signature(op, op, op) for op in sorted(types.real_domain)]
cases += [signature(op, op, op) for op in sorted(types.complex_domain)]
@infer_global(operator.add)
class BinOpAdd(BinOp):
pass
@infer_global(operator.iadd)
class BinOpAdd(BinOp):
pass
@infer_global(operator.sub)
class BinOpSub(BinOp):
pass
@infer_global(operator.isub)
class BinOpSub(BinOp):
pass
@infer_global(operator.mul)
class BinOpMul(BinOp):
pass
@infer_global(operator.imul)
class BinOpMul(BinOp):
pass
@infer_global(operator.mod)
class BinOpMod(ConcreteTemplate):
cases = list(integer_binop_cases)
cases += [signature(op, op, op) for op in sorted(types.real_domain)]
@infer_global(operator.imod)
class BinOpMod(ConcreteTemplate):
cases = list(integer_binop_cases)
cases += [signature(op, op, op) for op in sorted(types.real_domain)]
@infer_global(operator.truediv)
class BinOpTrueDiv(ConcreteTemplate):
cases = [signature(types.float64, op1, op2)
for op1, op2 in itertools.product(machine_ints, machine_ints)]
cases += [signature(op, op, op) for op in sorted(types.real_domain)]
cases += [signature(op, op, op) for op in sorted(types.complex_domain)]
@infer_global(operator.itruediv)
class BinOpTrueDiv(ConcreteTemplate):
cases = [signature(types.float64, op1, op2)
for op1, op2 in itertools.product(machine_ints, machine_ints)]
cases += [signature(op, op, op) for op in sorted(types.real_domain)]
cases += [signature(op, op, op) for op in sorted(types.complex_domain)]
@infer_global(operator.floordiv)
class BinOpFloorDiv(ConcreteTemplate):
cases = list(integer_binop_cases)
cases += [signature(op, op, op) for op in sorted(types.real_domain)]
@infer_global(operator.ifloordiv)
class BinOpFloorDiv(ConcreteTemplate):
cases = list(integer_binop_cases)
cases += [signature(op, op, op) for op in sorted(types.real_domain)]
@infer_global(divmod)
class DivMod(ConcreteTemplate):
_tys = machine_ints + sorted(types.real_domain)
cases = [signature(types.UniTuple(ty, 2), ty, ty) for ty in _tys]
@infer_global(operator.pow)
class BinOpPower(ConcreteTemplate):
cases = list(integer_binop_cases)
# Ensure that float32 ** int doesn't go through DP computations
cases += [signature(types.float32, types.float32, op)
for op in (types.int32, types.int64, types.uint64)]
cases += [signature(types.float64, types.float64, op)
for op in (types.int32, types.int64, types.uint64)]
cases += [signature(op, op, op)
for op in sorted(types.real_domain)]
cases += [signature(op, op, op)
for op in sorted(types.complex_domain)]
@infer_global(operator.ipow)
class BinOpPower(ConcreteTemplate):
cases = list(integer_binop_cases)
# Ensure that float32 ** int doesn't go through DP computations
cases += [signature(types.float32, types.float32, op)
for op in (types.int32, types.int64, types.uint64)]
cases += [signature(types.float64, types.float64, op)
for op in (types.int32, types.int64, types.uint64)]
cases += [signature(op, op, op)
for op in sorted(types.real_domain)]
cases += [signature(op, op, op)
for op in sorted(types.complex_domain)]
@infer_global(pow)
class PowerBuiltin(BinOpPower):
# TODO add 3 operand version
pass
class BitwiseShiftOperation(ConcreteTemplate):
# For bitshifts, only the first operand's signedness matters
# to choose the operation's signedness (the second operand
# should always be positive but will generally be considered
# signed anyway, since it's often a constant integer).
# (also, see issue #1995 for right-shifts)
# The RHS type is fixed to 64-bit signed/unsigned ints.
# The implementation will always cast the operands to the width of the
# result type, which is the widest between the LHS type and (u)intp.
cases = [signature(max(op, types.intp), op, op2)
for op in sorted(types.signed_domain)
for op2 in [types.uint64, types.int64]]
cases += [signature(max(op, types.uintp), op, op2)
for op in sorted(types.unsigned_domain)
for op2 in [types.uint64, types.int64]]
unsafe_casting = False
@infer_global(operator.lshift)
class BitwiseLeftShift(BitwiseShiftOperation):
pass
@infer_global(operator.ilshift)
class BitwiseLeftShift(BitwiseShiftOperation):
pass
@infer_global(operator.rshift)
class BitwiseRightShift(BitwiseShiftOperation):
pass
@infer_global(operator.irshift)
class BitwiseRightShift(BitwiseShiftOperation):
pass
class BitwiseLogicOperation(BinOp):
cases = [signature(types.boolean, types.boolean, types.boolean)]
cases += list(integer_binop_cases)
unsafe_casting = False
@infer_global(operator.and_)
class BitwiseAnd(BitwiseLogicOperation):
pass
@infer_global(operator.iand)
class BitwiseAnd(BitwiseLogicOperation):
pass
@infer_global(operator.or_)
class BitwiseOr(BitwiseLogicOperation):
pass
@infer_global(operator.ior)
class BitwiseOr(BitwiseLogicOperation):
pass
@infer_global(operator.xor)
class BitwiseXor(BitwiseLogicOperation):
pass
@infer_global(operator.ixor)
class BitwiseXor(BitwiseLogicOperation):
pass
# Bitwise invert and negate are special: we must not upcast the operand
# for unsigned numbers, as that would change the result.
# (i.e. ~np.int8(0) == 255 but ~np.int32(0) == 4294967295).
@infer_global(operator.invert)
class BitwiseInvert(ConcreteTemplate):
# Note Numba follows the Numpy semantics of returning a bool,
# while Python returns an int. This makes it consistent with
# np.invert() and makes array expressions correct.
cases = [signature(types.boolean, types.boolean)]
cases += [signature(choose_result_int(op), op) for op in sorted(types.unsigned_domain)]
cases += [signature(choose_result_int(op), op) for op in sorted(types.signed_domain)]
unsafe_casting = False
class UnaryOp(ConcreteTemplate):
cases = [signature(choose_result_int(op), op) for op in sorted(types.unsigned_domain)]
cases += [signature(choose_result_int(op), op) for op in sorted(types.signed_domain)]
cases += [signature(op, op) for op in sorted(types.real_domain)]
cases += [signature(op, op) for op in sorted(types.complex_domain)]
cases += [signature(types.intp, types.boolean)]
@infer_global(operator.neg)
class UnaryNegate(UnaryOp):
pass
@infer_global(operator.pos)
class UnaryPositive(UnaryOp):
pass
@infer_global(operator.not_)
class UnaryNot(ConcreteTemplate):
cases = [signature(types.boolean, types.boolean)]
cases += [signature(types.boolean, op) for op in sorted(types.signed_domain)]
cases += [signature(types.boolean, op) for op in sorted(types.unsigned_domain)]
cases += [signature(types.boolean, op) for op in sorted(types.real_domain)]
cases += [signature(types.boolean, op) for op in sorted(types.complex_domain)]
class OrderedCmpOp(ConcreteTemplate):
cases = [signature(types.boolean, types.boolean, types.boolean)]
cases += [signature(types.boolean, op, op) for op in sorted(types.signed_domain)]
cases += [signature(types.boolean, op, op) for op in sorted(types.unsigned_domain)]
cases += [signature(types.boolean, op, op) for op in sorted(types.real_domain)]
class UnorderedCmpOp(ConcreteTemplate):
cases = OrderedCmpOp.cases + [
signature(types.boolean, op, op) for op in sorted(types.complex_domain)]
@infer_global(operator.lt)
class CmpOpLt(OrderedCmpOp):
pass
@infer_global(operator.le)
class CmpOpLe(OrderedCmpOp):
pass
@infer_global(operator.gt)
class CmpOpGt(OrderedCmpOp):
pass
@infer_global(operator.ge)
class CmpOpGe(OrderedCmpOp):
pass
@infer_global(operator.eq)
class CmpOpEq(UnorderedCmpOp):
pass
@infer_global(operator.eq)
class ConstOpEq(AbstractTemplate):
def generic(self, args, kws):
assert not kws
(arg1, arg2) = args
if isinstance(arg1, types.Literal) and isinstance(arg2, types.Literal):
return signature(types.boolean, arg1, arg2)
@infer_global(operator.ne)
class ConstOpNotEq(ConstOpEq):
pass
@infer_global(operator.ne)
class CmpOpNe(UnorderedCmpOp):
pass
class TupleCompare(AbstractTemplate):
def generic(self, args, kws):
[lhs, rhs] = args
if isinstance(lhs, types.BaseTuple) and isinstance(rhs, types.BaseTuple):
for u, v in zip(lhs, rhs):
# Check element-wise comparability
res = self.context.resolve_function_type(self.key, (u, v), {})
if res is None:
break
else:
return signature(types.boolean, lhs, rhs)
@infer_global(operator.eq)
class TupleEq(TupleCompare):
pass
@infer_global(operator.ne)
class TupleNe(TupleCompare):
pass
@infer_global(operator.ge)
class TupleGe(TupleCompare):
pass
@infer_global(operator.gt)
class TupleGt(TupleCompare):
pass
@infer_global(operator.le)
class TupleLe(TupleCompare):
pass
@infer_global(operator.lt)
class TupleLt(TupleCompare):
pass
@infer_global(operator.add)
class TupleAdd(AbstractTemplate):
def generic(self, args, kws):
if len(args) == 2:
a, b = args
if (isinstance(a, types.BaseTuple) and isinstance(b, types.BaseTuple)
and not isinstance(a, types.BaseNamedTuple)
and not isinstance(b, types.BaseNamedTuple)):
res = types.BaseTuple.from_types(tuple(a) + tuple(b))
return signature(res, a, b)
class CmpOpIdentity(AbstractTemplate):
def generic(self, args, kws):
[lhs, rhs] = args
return signature(types.boolean, lhs, rhs)
@infer_global(operator.is_)
class CmpOpIs(CmpOpIdentity):
pass
@infer_global(operator.is_not)
class CmpOpIsNot(CmpOpIdentity):
pass
def normalize_1d_index(index):
"""
Normalize the *index* type (an integer or slice) for indexing a 1D
sequence.
"""
if isinstance(index, types.SliceType):
return index
elif isinstance(index, types.Integer):
return types.intp if index.signed else types.uintp
@infer_global(operator.getitem)
class GetItemCPointer(AbstractTemplate):
def generic(self, args, kws):
assert not kws
ptr, idx = args
if isinstance(ptr, types.CPointer) and isinstance(idx, types.Integer):
return signature(ptr.dtype, ptr, normalize_1d_index(idx))
@infer_global(operator.setitem)
class SetItemCPointer(AbstractTemplate):
def generic(self, args, kws):
assert not kws
ptr, idx, val = args
if isinstance(ptr, types.CPointer) and isinstance(idx, types.Integer):
return signature(types.none, ptr, normalize_1d_index(idx), ptr.dtype)
@infer_global(len)
class Len(AbstractTemplate):
def generic(self, args, kws):
assert not kws
(val,) = args
if isinstance(val, (types.Buffer, types.BaseTuple)):
return signature(types.intp, val)
elif isinstance(val, (types.RangeType)):
return signature(val.dtype, val)
@infer_global(tuple)
class TupleConstructor(AbstractTemplate):
def generic(self, args, kws):
assert not kws
# empty tuple case
if len(args) == 0:
return signature(types.Tuple(()))
(val,) = args
# tuple as input
if isinstance(val, types.BaseTuple):
return signature(val, val)
@infer_global(operator.contains)
class Contains(AbstractTemplate):
def generic(self, args, kws):
assert not kws
(seq, val) = args
if isinstance(seq, (types.Sequence)):
return signature(types.boolean, seq, val)
@infer_global(operator.truth)
class TupleBool(AbstractTemplate):
def generic(self, args, kws):
assert not kws
(val,) = args
if isinstance(val, (types.BaseTuple)):
return signature(types.boolean, val)
@infer
class StaticGetItemTuple(AbstractTemplate):
key = "static_getitem"
def generic(self, args, kws):
tup, idx = args
if not isinstance(tup, types.BaseTuple):
return
if isinstance(idx, int):
ret = tup.types[idx]
elif isinstance(idx, slice):
ret = types.BaseTuple.from_types(tup.types[idx])
return signature(ret, *args)
# Generic implementation for "not in"
@infer
class GenericNotIn(AbstractTemplate):
key = "not in"
def generic(self, args, kws):
args = args[::-1]
sig = self.context.resolve_function_type(operator.contains, args, kws)
return signature(sig.return_type, *sig.args[::-1])
#-------------------------------------------------------------------------------
@infer_getattr
class MemoryViewAttribute(AttributeTemplate):
key = types.MemoryView
def resolve_contiguous(self, buf):
return types.boolean
def resolve_c_contiguous(self, buf):
return types.boolean
def resolve_f_contiguous(self, buf):
return types.boolean
def resolve_itemsize(self, buf):
return types.intp
def resolve_nbytes(self, buf):
return types.intp
def resolve_readonly(self, buf):
return types.boolean
def resolve_shape(self, buf):
return types.UniTuple(types.intp, buf.ndim)
def resolve_strides(self, buf):
return types.UniTuple(types.intp, buf.ndim)
def resolve_ndim(self, buf):
return types.intp
#-------------------------------------------------------------------------------
@infer_getattr
class BooleanAttribute(AttributeTemplate):
key = types.Boolean
def resolve___class__(self, ty):
return types.NumberClass(ty)
@bound_function("number.item")
def resolve_item(self, ty, args, kws):
assert not kws
if not args:
return signature(ty)
@infer_getattr
class NumberAttribute(AttributeTemplate):
key = types.Number
def resolve___class__(self, ty):
return types.NumberClass(ty)
def resolve_real(self, ty):
return getattr(ty, "underlying_float", ty)
def resolve_imag(self, ty):
return getattr(ty, "underlying_float", ty)
@bound_function("complex.conjugate")
def resolve_conjugate(self, ty, args, kws):
assert not args
assert not kws
return signature(ty)
@bound_function("number.item")
def resolve_item(self, ty, args, kws):
assert not kws
if not args:
return signature(ty)
@infer_getattr
class SliceAttribute(AttributeTemplate):
key = types.SliceType
def resolve_start(self, ty):
return types.intp
def resolve_stop(self, ty):
return types.intp
def resolve_step(self, ty):
return types.intp
@bound_function("slice.indices")
def resolve_indices(self, ty, args, kws):
assert not kws
if len(args) != 1:
raise TypeError(
"indices() takes exactly one argument (%d given)" % len(args)
)
typ, = args
if not isinstance(typ, types.Integer):
raise TypeError(
"'%s' object cannot be interpreted as an integer" % typ
)
return signature(types.UniTuple(types.intp, 3), types.intp)
#-------------------------------------------------------------------------------
@infer_getattr
class NumberClassAttribute(AttributeTemplate):
key = types.NumberClass
def resolve___call__(self, classty):
"""
Resolve a number class's constructor (e.g. calling int(...))
"""
ty = classty.instance_type
def typer(val):
if isinstance(val, (types.BaseTuple, types.Sequence)):
# Array constructor, e.g. np.int32([1, 2])
sig = self.context.resolve_function_type(
np.array, (val,), {'dtype': types.DType(ty)})
return sig.return_type
else:
# Scalar constructor, e.g. np.int32(42)
return ty
return types.Function(make_callable_template(key=ty, typer=typer))
@infer_getattr
class TypeRefAttribute(AttributeTemplate):
key = types.TypeRef
def resolve___call__(self, classty):
"""
Resolve a number class's constructor (e.g. calling int(...))
Note:
This is needed because of the limitation of the current type-system
implementation. Specifically, the lack of a higher-order type
(i.e. passing the ``DictType`` vs ``DictType(key_type, value_type)``)
"""
ty = classty.instance_type
if isinstance(ty, type) and issubclass(ty, types.Type):
# Redirect the typing to a:
# @type_callable(ty)
# def typeddict_call(context):
# ...
# For example, see numba/typed/typeddict.py
# @type_callable(DictType)
# def typeddict_call(context):
class Redirect(object):
def __init__(self, context):
self.context = context
def __call__(self, *args, **kwargs):
result = self.context.resolve_function_type(ty, args, kwargs)
if hasattr(result, "pysig"):
self.pysig = result.pysig
return result
return types.Function(make_callable_template(key=ty,
typer=Redirect(self.context)))
#------------------------------------------------------------------------------
class MinMaxBase(AbstractTemplate):
def _unify_minmax(self, tys):
for ty in tys:
if not isinstance(ty, types.Number):
return
return self.context.unify_types(*tys)
def generic(self, args, kws):
"""
Resolve a min() or max() call.
"""
assert not kws
if not args:
return
if len(args) == 1:
# max(arg) only supported if arg is an iterable
if isinstance(args[0], types.BaseTuple):
tys = list(args[0])
if not tys:
raise TypeError("%s() argument is an empty tuple"
% (self.key.__name__,))
else:
return
else:
# max(*args)
tys = args
retty = self._unify_minmax(tys)
if retty is not None:
return signature(retty, *args)
@infer_global(max)
class Max(MinMaxBase):
pass
@infer_global(min)
class Min(MinMaxBase):
pass
@infer_global(round)
class Round(ConcreteTemplate):
cases = [
signature(types.intp, types.float32),
signature(types.int64, types.float64),
signature(types.float32, types.float32, types.intp),
signature(types.float64, types.float64, types.intp),
]
#------------------------------------------------------------------------------
@infer_global(bool)
class Bool(AbstractTemplate):
def generic(self, args, kws):
assert not kws
[arg] = args
if isinstance(arg, (types.Boolean, types.Number)):
return signature(types.boolean, arg)
# XXX typing for bool cannot be polymorphic because of the
# types.Function thing, so we redirect to the operator.truth
# intrinsic.
return self.context.resolve_function_type(operator.truth, args, kws)
@infer_global(int)
class Int(AbstractTemplate):
def generic(self, args, kws):
assert not kws
[arg] = args
if isinstance(arg, types.Integer):
return signature(arg, arg)
if isinstance(arg, (types.Float, types.Boolean)):
return signature(types.intp, arg)
@infer_global(float)
class Float(AbstractTemplate):
def generic(self, args, kws):
assert not kws
[arg] = args
if arg not in types.number_domain:
raise TypeError("float() only support for numbers")
if arg in types.complex_domain:
raise TypeError("float() does not support complex")
if arg in types.integer_domain:
return signature(types.float64, arg)
elif arg in types.real_domain:
return signature(arg, arg)
@infer_global(complex)
class Complex(AbstractTemplate):
def generic(self, args, kws):
assert not kws
if len(args) == 1:
[arg] = args
if arg not in types.number_domain:
raise TypeError("complex() only support for numbers")
if arg == types.float32:
return signature(types.complex64, arg)
else:
return signature(types.complex128, arg)
elif len(args) == 2:
[real, imag] = args
if (real not in types.number_domain or
imag not in types.number_domain):
raise TypeError("complex() only support for numbers")
if real == imag == types.float32:
return signature(types.complex64, real, imag)
else:
return signature(types.complex128, real, imag)
#------------------------------------------------------------------------------
@infer_global(enumerate)
class Enumerate(AbstractTemplate):
def generic(self, args, kws):
assert not kws
it = args[0]
if len(args) > 1 and not isinstance(args[1], types.Integer):
raise TypeError("Only integers supported as start value in "
"enumerate")
elif len(args) > 2:
#let python raise its own error
enumerate(*args)
if isinstance(it, types.IterableType):
enumerate_type = types.EnumerateType(it)
return signature(enumerate_type, *args)
@infer_global(zip)
class Zip(AbstractTemplate):
def generic(self, args, kws):
assert not kws
if all(isinstance(it, types.IterableType) for it in args):
zip_type = types.ZipType(args)
return signature(zip_type, *args)
@infer_global(iter)
class Iter(AbstractTemplate):
def generic(self, args, kws):
assert not kws
if len(args) == 1:
it = args[0]
if isinstance(it, types.IterableType):
return signature(it.iterator_type, *args)
@infer_global(next)
class Next(AbstractTemplate):
def generic(self, args, kws):
assert not kws
if len(args) == 1:
it = args[0]
if isinstance(it, types.IteratorType):
return signature(it.yield_type, *args)
#------------------------------------------------------------------------------
@infer_global(type)
class TypeBuiltin(AbstractTemplate):
def generic(self, args, kws):
assert not kws
if len(args) == 1:
# One-argument type() -> return the __class__
classty = self.context.resolve_getattr(args[0], "__class__")
if classty is not None:
return signature(classty, *args)
#------------------------------------------------------------------------------
@infer_getattr
class OptionalAttribute(AttributeTemplate):
key = types.Optional
def generic_resolve(self, optional, attr):
return self.context.resolve_getattr(optional.type, attr)
#------------------------------------------------------------------------------
@infer_getattr
class DeferredAttribute(AttributeTemplate):
key = types.DeferredType
def generic_resolve(self, deferred, attr):
return self.context.resolve_getattr(deferred.get(), attr)
#------------------------------------------------------------------------------
@infer_global(get_type_min_value)
@infer_global(get_type_max_value)
class MinValInfer(AbstractTemplate):
def generic(self, args, kws):
assert not kws
assert len(args) == 1
assert isinstance(args[0], (types.DType, types.NumberClass))
return signature(args[0].dtype, *args)
#------------------------------------------------------------------------------
class IndexValue(object):
"""
Index and value
"""
def __init__(self, ind, val):
self.index = ind
self.value = val
def __repr__(self):
return 'IndexValue(%f, %f)' % (self.index, self.value)
class IndexValueType(types.Type):
def __init__(self, val_typ):
self.val_typ = val_typ
super(IndexValueType, self).__init__(
name='IndexValueType({})'.format(val_typ))
@typeof_impl.register(IndexValue)
def typeof_index(val, c):
val_typ = typeof_impl(val.value, c)
return IndexValueType(val_typ)
@type_callable(IndexValue)
def type_index_value(context):
def typer(ind, mval):
if ind == types.intp or ind == types.uintp:
return IndexValueType(mval)
return typer
@register_model(IndexValueType)
class IndexValueModel(models.StructModel):
def __init__(self, dmm, fe_type):
members = [
('index', types.intp),
('value', fe_type.val_typ),
]
models.StructModel.__init__(self, dmm, fe_type, members)
make_attribute_wrapper(IndexValueType, 'index', 'index')
make_attribute_wrapper(IndexValueType, 'value', 'value')
```
#### File: numba/cuda/stubs.py
```python
import operator
import numpy
import llvmlite.llvmpy.core as lc
from numba.core.rewrites.macros import Macro
from numba.core import types, typing, ir
from .cudadrv import nvvm
class Stub(object):
'''A stub object to represent special objects which is meaningless
outside the context of CUDA-python.
'''
_description_ = '<ptx special value>'
__slots__ = () # don't allocate __dict__
def __new__(cls):
raise NotImplementedError("%s is not instantiable" % cls)
def __repr__(self):
return self._description_
#-------------------------------------------------------------------------------
# SREG
SREG_SIGNATURE = typing.signature(types.int32)
class threadIdx(Stub):
'''
The thread indices in the current thread block, accessed through the
attributes ``x``, ``y``, and ``z``. Each index is an integer spanning the
range from 0 inclusive to the corresponding value of the attribute in
:attr:`numba.cuda.blockDim` exclusive.
'''
_description_ = '<threadIdx.{x,y,z}>'
x = Macro('tid.x', SREG_SIGNATURE)
y = Macro('tid.y', SREG_SIGNATURE)
z = Macro('tid.z', SREG_SIGNATURE)
class blockIdx(Stub):
'''
The block indices in the grid of thread blocks, accessed through the
attributes ``x``, ``y``, and ``z``. Each index is an integer spanning the
range from 0 inclusive to the corresponding value of the attribute in
:attr:`numba.cuda.gridDim` exclusive.
'''
_description_ = '<blockIdx.{x,y,z}>'
x = Macro('ctaid.x', SREG_SIGNATURE)
y = Macro('ctaid.y', SREG_SIGNATURE)
z = Macro('ctaid.z', SREG_SIGNATURE)
class blockDim(Stub):
'''
The shape of a block of threads, as declared when instantiating the
kernel. This value is the same for all threads in a given kernel, even
if they belong to different blocks (i.e. each block is "full").
'''
x = Macro('ntid.x', SREG_SIGNATURE)
y = Macro('ntid.y', SREG_SIGNATURE)
z = Macro('ntid.z', SREG_SIGNATURE)
class gridDim(Stub):
'''
The shape of the grid of blocks, accressed through the attributes ``x``,
``y``, and ``z``.
'''
_description_ = '<gridDim.{x,y,z}>'
x = Macro('nctaid.x', SREG_SIGNATURE)
y = Macro('nctaid.y', SREG_SIGNATURE)
z = Macro('nctaid.z', SREG_SIGNATURE)
warpsize = Macro('warpsize', SREG_SIGNATURE)
laneid = Macro('laneid', SREG_SIGNATURE)
#-------------------------------------------------------------------------------
# Grid Macro
def _ptx_grid1d(): pass
def _ptx_grid2d(): pass
def grid_expand(ndim):
"""grid(ndim)
Return the absolute position of the current thread in the entire
grid of blocks. *ndim* should correspond to the number of dimensions
declared when instantiating the kernel. If *ndim* is 1, a single integer
is returned. If *ndim* is 2 or 3, a tuple of the given number of
integers is returned.
Computation of the first integer is as follows::
cuda.threadIdx.x + cuda.blockIdx.x * cuda.blockDim.x
and is similar for the other two indices, but using the ``y`` and ``z``
attributes.
"""
if ndim == 1:
fname = "ptx.grid.1d"
restype = types.int32
elif ndim == 2:
fname = "ptx.grid.2d"
restype = types.UniTuple(types.int32, 2)
elif ndim == 3:
fname = "ptx.grid.3d"
restype = types.UniTuple(types.int32, 3)
else:
raise ValueError('argument can only be 1, 2, 3')
return ir.Intrinsic(fname, typing.signature(restype, types.intp),
args=[ndim])
grid = Macro('ptx.grid', grid_expand, callable=True)
#-------------------------------------------------------------------------------
# Gridsize Macro
def gridsize_expand(ndim):
"""
Return the absolute size (or shape) in threads of the entire grid of
blocks. *ndim* should correspond to the number of dimensions declared when
instantiating the kernel.
Computation of the first integer is as follows::
cuda.blockDim.x * cuda.gridDim.x
and is similar for the other two indices, but using the ``y`` and ``z``
attributes.
"""
if ndim == 1:
fname = "ptx.gridsize.1d"
restype = types.int32
elif ndim == 2:
fname = "ptx.gridsize.2d"
restype = types.UniTuple(types.int32, 2)
elif ndim == 3:
fname = "ptx.gridsize.3d"
restype = types.UniTuple(types.int32, 3)
else:
raise ValueError('argument can only be 1, 2 or 3')
return ir.Intrinsic(fname, typing.signature(restype, types.intp),
args=[ndim])
gridsize = Macro('ptx.gridsize', gridsize_expand, callable=True)
#-------------------------------------------------------------------------------
# syncthreads
class syncthreads(Stub):
'''
Synchronize all threads in the same thread block. This function implements
the same pattern as barriers in traditional multi-threaded programming: this
function waits until all threads in the block call it, at which point it
returns control to all its callers.
'''
_description_ = '<syncthreads()>'
class syncthreads_count(Stub):
'''
syncthreads_count(predictate)
An extension to numba.cuda.syncthreads where the return value is a count
of the threads where predicate is true.
'''
_description_ = '<syncthreads_count()>'
class syncthreads_and(Stub):
'''
syncthreads_and(predictate)
An extension to numba.cuda.syncthreads where 1 is returned if predicate is
true for all threads or 0 otherwise.
'''
_description_ = '<syncthreads_and()>'
class syncthreads_or(Stub):
'''
syncthreads_or(predictate)
An extension to numba.cuda.syncthreads where 1 is returned if predicate is
true for any thread or 0 otherwise.
'''
_description_ = '<syncthreads_or()>'
# -------------------------------------------------------------------------------
# warp level operations
class syncwarp(Stub):
'''
syncwarp(mask)
Synchronizes a masked subset of threads in a warp.
'''
_description_ = '<warp_sync()>'
class shfl_sync_intrinsic(Stub):
'''
shfl_sync_intrinsic(mask, mode, value, mode_offset, clamp)
Nvvm intrinsic for shuffling data across a warp
docs.nvidia.com/cuda/nvvm-ir-spec/index.html#nvvm-intrin-warp-level-datamove
'''
_description_ = '<shfl_sync()>'
class vote_sync_intrinsic(Stub):
'''
vote_sync_intrinsic(mask, mode, predictate)
Nvvm intrinsic for performing a reduce and broadcast across a warp
docs.nvidia.com/cuda/nvvm-ir-spec/index.html#nvvm-intrin-warp-level-vote
'''
_description_ = '<vote_sync()>'
class match_any_sync(Stub):
'''
match_any_sync(mask, value)
Nvvm intrinsic for performing a compare and broadcast across a warp.
Returns a mask of threads that have same value as the given value from within the masked warp.
'''
_description_ = '<match_any_sync()>'
class match_all_sync(Stub):
'''
match_all_sync(mask, value)
Nvvm intrinsic for performing a compare and broadcast across a warp.
Returns a tuple of (mask, pred), where mask is a mask of threads that have
same value as the given value from within the masked warp, if they
all have the same value, otherwise it is 0. Pred is a boolean of whether
or not all threads in the mask warp have the same warp.
'''
_description_ = '<match_all_sync()>'
# -------------------------------------------------------------------------------
# memory fences
class threadfence_block(Stub):
'''
A memory fence at thread block level
'''
_description_ = '<threadfence_block()>'
class threadfence_system(Stub):
'''
A memory fence at system level: across devices
'''
_description_ = '<threadfence_system()>'
class threadfence(Stub):
'''
A memory fence at device level
'''
_description_ = '<threadfence()>'
# -------------------------------------------------------------------------------
# shared
def _legalize_shape(shape):
if isinstance(shape, tuple):
return shape
elif isinstance(shape, int):
return (shape,)
else:
raise TypeError("invalid type for shape; got {0}".format(type(shape)))
def shared_array(shape, dtype):
shape = _legalize_shape(shape)
ndim = len(shape)
fname = "ptx.smem.alloc"
restype = types.Array(dtype, ndim, 'C')
sig = typing.signature(restype, types.UniTuple(types.intp, ndim), types.Any)
return ir.Intrinsic(fname, sig, args=(shape, dtype))
class shared(Stub):
"""
Shared memory namespace.
"""
_description_ = '<shared>'
array = Macro('shared.array', shared_array, callable=True,
argnames=['shape', 'dtype'])
'''
Allocate a shared array of the given *shape* and *type*. *shape* is either
an integer or a tuple of integers representing the array's dimensions.
*type* is a :ref:`Numba type <numba-types>` of the elements needing to be
stored in the array.
The returned array-like object can be read and written to like any normal
device array (e.g. through indexing).
'''
#-------------------------------------------------------------------------------
# local array
def local_array(shape, dtype):
shape = _legalize_shape(shape)
ndim = len(shape)
fname = "ptx.lmem.alloc"
restype = types.Array(dtype, ndim, 'C')
sig = typing.signature(restype, types.UniTuple(types.intp, ndim), types.Any)
return ir.Intrinsic(fname, sig, args=(shape, dtype))
class local(Stub):
'''
Local memory namespace.
'''
_description_ = '<local>'
array = Macro('local.array', local_array, callable=True,
argnames=['shape', 'dtype'])
'''
Allocate a local array of the given *shape* and *type*. The array is private
to the current thread, and resides in global memory. An array-like object is
returned which can be read and written to like any standard array (e.g.
through indexing).
'''
#-------------------------------------------------------------------------------
# const array
def const_array_like(ndarray):
fname = "ptx.cmem.arylike"
from .descriptor import CUDATargetDesc
aryty = CUDATargetDesc.typingctx.resolve_argument_type(ndarray)
sig = typing.signature(aryty, aryty)
return ir.Intrinsic(fname, sig, args=[ndarray])
class const(Stub):
'''
Constant memory namespace.
'''
_description_ = '<const>'
array_like = Macro('const.array_like', const_array_like,
callable=True, argnames=['ary'])
'''
Create a const array from *ary*. The resulting const array will have the
same shape, type, and values as *ary*.
'''
#-------------------------------------------------------------------------------
# bit manipulation
class popc(Stub):
"""
popc(val)
Returns the number of set bits in the given value.
"""
class brev(Stub):
"""
brev(val)
Reverse the bitpattern of an integer value; for example 0b10110110
becomes 0b01101101.
"""
class clz(Stub):
"""
clz(val)
Counts the number of leading zeros in a value.
"""
class ffs(Stub):
"""
ffs(val)
Find the position of the least significant bit set to 1 in an integer.
"""
#-------------------------------------------------------------------------------
# comparison and selection instructions
class selp(Stub):
"""
selp(a, b, c)
Select between source operands, based on the value of the predicate source operand.
"""
#-------------------------------------------------------------------------------
# single / double precision arithmetic
class fma(Stub):
"""
fma(a, b, c)
Perform the fused multiply-add operation.
"""
#-------------------------------------------------------------------------------
# atomic
class atomic(Stub):
"""Namespace for atomic operations
"""
_description_ = '<atomic>'
class add(Stub):
"""add(ary, idx, val)
Perform atomic ary[idx] += val. Supported on int32, float32, and
float64 operands only.
Returns the old value at the index location as if it is loaded
atomically.
"""
class max(Stub):
"""max(ary, idx, val)
Perform atomic ary[idx] = max(ary[idx], val). NaN is treated as a
missing value, so max(NaN, n) == max(n, NaN) == n. Note that this
differs from Python and Numpy behaviour, where max(a, b) is always
a when either a or b is a NaN.
Supported on int32, int64, uint32, uint64, float32, float64 operands only.
Returns the old value at the index location as if it is loaded
atomically.
"""
class min(Stub):
"""min(ary, idx, val)
Perform atomic ary[idx] = min(ary[idx], val). NaN is treated as a
missing value, so min(NaN, n) == min(n, NaN) == n. Note that this
differs from Python and Numpy behaviour, where min(a, b) is always
a when either a or b is a NaN.
Supported on int32, int64, uint32, uint64, float32, float64 operands only.
"""
class compare_and_swap(Stub):
"""compare_and_swap(ary, old, val)
Conditionally assign ``val`` to the first element of an 1D array ``ary``
if the current value matches ``old``.
Returns the current value as if it is loaded atomically.
"""
```
#### File: numba/tests/test_num_threads.py
```python
from __future__ import print_function, absolute_import, division
import sys
import os
import re
import multiprocessing
import unittest
import numpy as np
from numba import (njit, set_num_threads, get_num_threads, prange, config,
threading_layer, guvectorize)
from numba.np.ufunc.parallel import _get_thread_id
from numba.core.errors import TypingError
from numba.tests.support import TestCase, skip_parfors_unsupported, tag
from numba.tests.test_parallel_backend import TestInSubprocess
class TestNumThreads(TestCase):
_numba_parallel_test_ = False
def setUp(self):
# Make sure the num_threads is set to the max. This also makes sure
# the threads are launched.
set_num_threads(config.NUMBA_NUM_THREADS)
def check_mask(self, expected, result):
# There's no guarantee that TBB will use a full mask worth of
# threads if it deems it inefficient to do so
if threading_layer() == 'tbb':
self.assertTrue(np.all(result <= expected))
elif threading_layer() in ('omp', 'workqueue'):
np.testing.assert_equal(expected, result)
else:
assert 0, 'unreachable'
@skip_parfors_unsupported
def test_set_num_threads_type(self):
@njit
def foo():
set_num_threads('wrong_type')
expected = "The number of threads specified must be an integer"
for fn, errty in ((foo, TypingError), (foo.py_func, TypeError)):
with self.assertRaises(errty) as raises:
fn()
self.assertIn(expected, str(raises.exception))
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores")
def _test_set_num_threads_basic(self):
max_threads = config.NUMBA_NUM_THREADS
self.assertEqual(get_num_threads(), max_threads)
set_num_threads(2)
self.assertEqual(get_num_threads(), 2)
set_num_threads(max_threads)
self.assertEqual(get_num_threads(), max_threads)
with self.assertRaises(ValueError):
set_num_threads(0)
with self.assertRaises(ValueError):
set_num_threads(max_threads + 1)
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores")
def _test_set_num_threads_basic_jit(self):
max_threads = config.NUMBA_NUM_THREADS
@njit
def get_n():
return get_num_threads()
self.assertEqual(get_n(), max_threads)
set_num_threads(2)
self.assertEqual(get_n(), 2)
set_num_threads(max_threads)
self.assertEqual(get_n(), max_threads)
@njit
def set_get_n(n):
set_num_threads(n)
return get_num_threads()
self.assertEqual(set_get_n(2), 2)
self.assertEqual(set_get_n(max_threads), max_threads)
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores")
def _test_set_num_threads_basic_guvectorize(self):
max_threads = config.NUMBA_NUM_THREADS
@guvectorize(['void(int64[:])'],
'(n)',
nopython=True,
target='parallel')
def get_n(x):
x[:] = get_num_threads()
x = np.zeros((5000000,), dtype=np.int64)
get_n(x)
np.testing.assert_equal(x, max_threads)
set_num_threads(2)
x = np.zeros((5000000,), dtype=np.int64)
get_n(x)
np.testing.assert_equal(x, 2)
set_num_threads(max_threads)
x = np.zeros((5000000,), dtype=np.int64)
get_n(x)
np.testing.assert_equal(x, max_threads)
@guvectorize(['void(int64[:])'],
'(n)',
nopython=True,
target='parallel')
def set_get_n(n):
set_num_threads(n[0])
n[:] = get_num_threads()
x = np.zeros((5000000,), dtype=np.int64)
x[0] = 2
set_get_n(x)
np.testing.assert_equal(x, 2)
x = np.zeros((5000000,), dtype=np.int64)
x[0] = max_threads
set_get_n(x)
np.testing.assert_equal(x, max_threads)
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores")
def _test_set_num_threads_outside_jit(self):
# Test set_num_threads outside a jitted function
set_num_threads(2)
@njit(parallel=True)
def test_func():
x = 5
buf = np.empty((x,))
for i in prange(x):
buf[i] = get_num_threads()
return buf
@guvectorize(['void(int64[:])'],
'(n)',
nopython=True,
target='parallel')
def test_gufunc(x):
x[:] = get_num_threads()
out = test_func()
np.testing.assert_equal(out, 2)
x = np.zeros((5000000,), dtype=np.int64)
test_gufunc(x)
np.testing.assert_equal(x, 2)
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores")
def _test_set_num_threads_inside_jit(self):
# Test set_num_threads inside a jitted function
@njit(parallel=True)
def test_func(nthreads):
x = 5
buf = np.empty((x,))
set_num_threads(nthreads)
for i in prange(x):
buf[i] = get_num_threads()
return buf
mask = 2
out = test_func(mask)
np.testing.assert_equal(out, mask)
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores")
def _test_set_num_threads_inside_guvectorize(self):
# Test set_num_threads inside a jitted guvectorize function
@guvectorize(['void(int64[:])'],
'(n)',
nopython=True,
target='parallel')
def test_func(x):
set_num_threads(x[0])
x[:] = get_num_threads()
x = np.zeros((5000000,), dtype=np.int64)
mask = 2
x[0] = mask
test_func(x)
np.testing.assert_equal(x, mask)
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores")
def _test_get_num_threads_truth_outside_jit(self):
for mask in range(2, min(6, config.NUMBA_NUM_THREADS + 1)):
set_num_threads(mask)
# a lot of work, hopefully will trigger "mask" count of threads to
# join the parallel region (for those backends with dynamic threads)
@njit(parallel=True)
def test_func():
x = 5000000
buf = np.empty((x,))
for i in prange(x):
buf[i] = _get_thread_id()
return len(np.unique(buf)), get_num_threads()
out = test_func()
self.check_mask((mask, mask), out)
@guvectorize(['void(int64[:], int64[:])'],
'(n), (m)',
nopython=True,
target='parallel')
def test_gufunc(x, out):
x[:] = _get_thread_id()
out[0] = get_num_threads()
# Reshape to force parallelism
x = np.full((5000000,), -1, dtype=np.int64).reshape((100, 50000))
out = np.zeros((1,), dtype=np.int64)
test_gufunc(x, out)
self.check_mask(mask, out)
self.check_mask(mask, len(np.unique(x)))
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores")
def _test_get_num_threads_truth_inside_jit(self):
for mask in range(2, min(6, config.NUMBA_NUM_THREADS + 1)):
# a lot of work, hopefully will trigger "mask" count of threads to
# join the parallel region (for those backends with dynamic threads)
@njit(parallel=True)
def test_func():
set_num_threads(mask)
x = 5000000
buf = np.empty((x,))
for i in prange(x):
buf[i] = _get_thread_id()
return len(np.unique(buf)), get_num_threads()
out = test_func()
self.check_mask((mask, mask), out)
@guvectorize(['void(int64[:], int64[:])'],
'(n), (m)',
nopython=True,
target='parallel')
def test_gufunc(x, out):
set_num_threads(mask)
x[:] = _get_thread_id()
out[0] = get_num_threads()
# Reshape to force parallelism
x = np.full((5000000,), -1, dtype=np.int64).reshape((100, 50000))
out = np.zeros((1,), dtype=np.int64)
test_gufunc(x, out)
self.check_mask(mask, out)
self.check_mask(mask, len(np.unique(x)))
# this test can only run on OpenMP (providing OMP_MAX_ACTIVE_LEVELS is not
# set or >= 2) and TBB backends
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores")
def _test_nested_parallelism_1(self):
if threading_layer() == 'workqueue':
self.skipTest("workqueue is not threadsafe")
# check that get_num_threads is ok in nesting
mask = config.NUMBA_NUM_THREADS - 1
N = config.NUMBA_NUM_THREADS
M = 2 * config.NUMBA_NUM_THREADS
@njit(parallel=True)
def child_func(buf, fid):
M, N = buf.shape
for i in prange(N):
buf[fid, i] = get_num_threads()
def get_test(test_type):
if test_type == 'njit':
def test_func(nthreads, py_func=False):
@njit(parallel=True)
def _test_func(nthreads):
acc = 0
buf = np.zeros((M, N))
set_num_threads(nthreads)
for i in prange(M):
local_mask = 1 + i % mask
# set threads in parent function
set_num_threads(local_mask)
if local_mask < N:
child_func(buf, local_mask)
acc += get_num_threads()
return acc, buf
if py_func:
return _test_func.py_func(nthreads)
else:
return _test_func(nthreads)
elif test_type == 'guvectorize':
def test_func(nthreads, py_func=False):
def _test_func(acc, buf, local_mask):
set_num_threads(nthreads)
# set threads in parent function
set_num_threads(local_mask[0])
if local_mask[0] < N:
child_func(buf, local_mask[0])
acc[0] += get_num_threads()
buf = np.zeros((M, N), dtype=np.int64)
acc = np.zeros((M, 1), dtype=np.int64)
local_mask = (1 + np.arange(M) % mask).reshape((M, 1))
sig = ['void(int64[:], int64[:, :], int64[:])']
layout = '(p), (n, m), (p)'
if not py_func:
_test_func = guvectorize(sig, layout, nopython=True,
target='parallel')(_test_func)
else:
_test_func = guvectorize(sig, layout,
forceobj=True)(_test_func)
_test_func(acc, buf, local_mask)
return acc, buf
return test_func
for test_type in ['njit', 'guvectorize']:
test_func = get_test(test_type)
got_acc, got_arr = test_func(mask)
exp_acc, exp_arr = test_func(mask, py_func=True)
np.testing.assert_equal(exp_acc, got_acc)
np.testing.assert_equal(exp_arr, got_arr)
# check the maths reconciles, guvectorize does not reduce, njit does
math_acc_exp = 1 + np.arange(M) % mask
if test_type == 'guvectorize':
math_acc = math_acc_exp.reshape((M, 1))
else:
math_acc = np.sum(math_acc_exp)
np.testing.assert_equal(math_acc, got_acc)
math_arr = np.zeros((M, N))
for i in range(1, N):
# there's branches on 1, ..., num_threads - 1
math_arr[i, :] = i
np.testing.assert_equal(math_arr, got_arr)
# this test can only run on OpenMP (providing OMP_MAX_ACTIVE_LEVELS is not
# set or >= 2) and TBB backends
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores")
def _test_nested_parallelism_2(self):
if threading_layer() == 'workqueue':
self.skipTest("workqueue is not threadsafe")
# check that get_num_threads is ok in nesting
N = config.NUMBA_NUM_THREADS + 1
M = 4 * config.NUMBA_NUM_THREADS + 1
def get_impl(child_type, test_type):
if child_type == 'parallel':
child_dec = njit(parallel=True)
elif child_type == 'njit':
child_dec = njit(parallel=False)
elif child_type == 'none':
def child_dec(x):
return x
@child_dec
def child(buf, fid):
M, N = buf.shape
set_num_threads(fid) # set threads in child function
for i in prange(N):
buf[fid, i] = get_num_threads()
if test_type in ['parallel', 'njit', 'none']:
if test_type == 'parallel':
test_dec = njit(parallel=True)
elif test_type == 'njit':
test_dec = njit(parallel=False)
elif test_type == 'none':
def test_dec(x):
return x
@test_dec
def test_func(nthreads):
buf = np.zeros((M, N))
set_num_threads(nthreads)
for i in prange(M):
local_mask = 1 + i % mask
# when the threads exit the child functions they should
# have a TLS slot value of the local mask as it was set
# in child
if local_mask < config.NUMBA_NUM_THREADS:
child(buf, local_mask)
assert get_num_threads() == local_mask
return buf
else:
if test_type == 'guvectorize':
test_dec = guvectorize(['int64[:,:], int64[:]'],
'(n, m), (k)', nopython=True,
target='parallel')
elif test_type == 'guvectorize-obj':
test_dec = guvectorize(['int64[:,:], int64[:]'],
'(n, m), (k)', forceobj=True)
def test_func(nthreads):
@test_dec
def _test_func(buf, local_mask):
set_num_threads(nthreads)
# when the threads exit the child functions they should
# have a TLS slot value of the local mask as it was set
# in child
if local_mask[0] < config.NUMBA_NUM_THREADS:
child(buf, local_mask[0])
assert get_num_threads() == local_mask[0]
buf = np.zeros((M, N), dtype=np.int64)
local_mask = (1 + np.arange(M) % mask).reshape((M, 1))
_test_func(buf, local_mask)
return buf
return test_func
mask = config.NUMBA_NUM_THREADS - 1
res_arrays = {}
for test_type in ['parallel', 'njit', 'none',
'guvectorize', 'guvectorize-obj']:
for child_type in ['parallel', 'njit', 'none']:
if child_type == 'none' and test_type != 'none':
continue
set_num_threads(mask)
res_arrays[test_type, child_type] = get_impl(
child_type, test_type)(mask)
py_arr = res_arrays['none', 'none']
for arr in res_arrays.values():
np.testing.assert_equal(arr, py_arr)
# check the maths reconciles
math_arr = np.zeros((M, N))
# there's branches on modulo mask but only NUMBA_NUM_THREADS funcs
for i in range(1, config.NUMBA_NUM_THREADS):
math_arr[i, :] = i
np.testing.assert_equal(math_arr, py_arr)
# this test can only run on OpenMP (providing OMP_MAX_ACTIVE_LEVELS is not
# set or >= 2) and TBB backends
# This test needs at least 3 threads to run, N>=2 for the launch, M>=N+1 for
# the nested function
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 3, "Not enough CPU cores")
def _test_nested_parallelism_3(self):
if threading_layer() == 'workqueue':
self.skipTest("workqueue is not threadsafe")
# check that the right number of threads are present in nesting
# this relies on there being a load of cores present
BIG = 1000000
@njit(parallel=True)
def work(local_nt): # arg is value 3
tid = np.zeros(BIG)
acc = 0
set_num_threads(local_nt) # set to 3 threads
for i in prange(BIG):
acc += 1
tid[i] = _get_thread_id()
return acc, np.unique(tid)
@njit(parallel=True)
def test_func_jit(nthreads):
set_num_threads(nthreads) # set to 2 threads
lens = np.zeros(nthreads)
total = 0
for i in prange(nthreads):
my_acc, tids = work(nthreads + 1) # call with value 3
lens[i] = len(tids)
total += my_acc
return total, np.unique(lens)
NT = 2
expected_acc = BIG * NT
expected_thread_count = NT + 1
got_acc, got_tc = test_func_jit(NT)
self.assertEqual(expected_acc, got_acc)
self.check_mask(expected_thread_count, got_tc)
def test_guvectorize(nthreads):
@guvectorize(['int64[:], int64[:]'],
'(n), (n)',
nopython=True,
target='parallel')
def test_func_guvectorize(total, lens):
my_acc, tids = work(nthreads + 1)
lens[0] = len(tids)
total[0] += my_acc
total = np.zeros((nthreads, 1), dtype=np.int64)
lens = np.zeros(nthreads, dtype=np.int64).reshape((nthreads, 1))
test_func_guvectorize(total, lens)
# vectorize does not reduce, so total is summed
return total.sum(), np.unique(lens)
got_acc, got_tc = test_guvectorize(NT)
self.assertEqual(expected_acc, got_acc)
self.check_mask(expected_thread_count, got_tc)
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores")
@unittest.skipIf(not sys.platform.startswith('linux'), "Linux only")
def _test_threadmask_across_fork(self):
forkctx = multiprocessing.get_context('fork')
@njit
def foo():
return get_num_threads()
def wrap(queue):
queue.put(foo())
mask = 1
self.assertEqual(foo(), config.NUMBA_NUM_THREADS)
set_num_threads(mask)
self.assertEqual(foo(), mask)
shared_queue = forkctx.Queue()
# check TLS slot inheritance in fork
p = forkctx.Process(target=wrap, args=(shared_queue,))
p.start()
p.join()
self.assertEqual(shared_queue.get(), mask)
def tearDown(self):
set_num_threads(config.NUMBA_NUM_THREADS)
class TestNumThreadsBackends(TestInSubprocess, TestCase):
_class = TestNumThreads
_DEBUG = False
# 1 is mainly here to ensure tests skip correctly
num_threads = [i for i in [1, 2, 4, 8, 16] if i <= config.NUMBA_NUM_THREADS]
def run_test_in_separate_process(self, test, threading_layer, num_threads):
env_copy = os.environ.copy()
env_copy['NUMBA_THREADING_LAYER'] = str(threading_layer)
env_copy['NUMBA_NUM_THREADS'] = str(num_threads)
cmdline = [sys.executable, "-m", "numba.runtests", "-v", test]
return self.run_cmd(cmdline, env_copy)
@classmethod
def _inject(cls, name, backend, backend_guard, num_threads):
themod = cls.__module__
thecls = cls._class.__name__
injected_method = '%s.%s.%s' % (themod, thecls, name)
def test_template(self):
o, e = self.run_test_in_separate_process(injected_method, backend,
num_threads)
if self._DEBUG:
print('stdout:\n "%s"\n stderr:\n "%s"' % (o, e))
self.assertIn('OK', e)
self.assertTrue('FAIL' not in e)
self.assertTrue('ERROR' not in e)
m = re.search(r"\.\.\. skipped '(.*?)'", e)
if m:
self.skipTest(m.group(1))
injected_test = "%s_%s_%s_threads" % (name[1:], backend, num_threads)
setattr(cls, injected_test,
tag('long_running')(backend_guard(test_template)))
@classmethod
def generate(cls):
for name in cls._class.__dict__.copy():
for backend, backend_guard in cls.backends.items():
for num_threads in cls.num_threads:
if not name.startswith('_test_'):
continue
cls._inject(name, backend, backend_guard, num_threads)
TestNumThreadsBackends.generate()
if __name__ == '__main__':
unittest.main()
```
#### File: src/pca/todoJunto.py
```python
import codecs
import sys
import sklearn as sk
import pandas as pd
import numpy as np
import math
from sklearn import preprocessing
from sklearn.decomposition import PCA
from src.pca.algoritmo_QR import eigenvectores_eigenvalores_QR_vf
from src.pca.metodo_potencia_deflation import power_iteration
from src.pca.metodo_potencia_deflation import power_deflation
def PCA_from_sklearn(X):
"""
componentes_principales(X): Función que devuelve las componentes principales.
Parámetros
----------
n_components: número de componentes.
svd_solver: str {‘auto’, ‘full’, ‘arpack’, ‘randomized’}
Se elige 'full', lo que significa que se ejecuta completamente SVD llamando al
solucionador estándar LAPACK a través de scipy.linalg.svd y se seleccionan los componentes mediante postprocessing.
Atributos
---------
varianza_explicada: porcentaje de varianza explicada por cada componente.
valores_singulares: valores singulares correspondientes a cada componente.
pca.components_: ejes principales que representan las direcciones de máxima varianza en los datos.
eigenvalues: son los valores propios utilizando la matriz de covarianza.
Método
---------
fit_transform: ajusta el modelo a los datos y aplica la reducción de dimensionalidad en los datos.
"""
X = pd.DataFrame(X)
n_components = len(X.columns)
pca_1 = PCA(n_components, svd_solver='full')
componentesprincipales_1 = pca_1.fit_transform(X)
pca_1.components_
var_exp = pca_1.explained_variance_ratio_
##Se obtiene el número de componentes a través de la varianza explicada acumulada de los componentes, la cual debe sumar 60%.
var_acumulada = var_exp.cumsum()
conteo = (var_acumulada) < 0.8
n_componentes = conteo.sum() + 1
pca = PCA(n_componentes, svd_solver='full')
componentesprincipales = pca.fit_transform(X)
pca.components_
varianza_explicada = pca.explained_variance_ratio_
eigenvalues = pca.explained_variance_
val_sing = pca.singular_values_
return pca, varianza_explicada, componentesprincipales, val_sing, pca.components_, eigenvalues
def PCA_from_SVD(A):
"""
Función para PCA a partir de la SVD de numpy
params: A matriz de datos
num_componentes número de componentes deseados
return: valores_singulares Los valores singulares de la descomposición SVD
componentes Los coeficientes para calcular los componentes principales
Z Los datos transformados (componentes principales)
varianza_explicada La varianza explicada por cada componente principal
"""
# Centrar los datos
A = np.array(A) # convertir los datos a un numpy array por si vienen de un DataFrame
A_centered = A - A.mean(axis=0)
# Calcular SVD
U, S, Vt = np.linalg.svd(A_centered, full_matrices=False)
# Los valores singulares
valores_singulares = S
# Los componentes (coeficientes)
componentes = ((Vt))
# Los datos transformados (componentes principales)
Z = [email protected](Vt)
# La varianza explicada
varianza_explicada = S**2/np.sum(S**2)
# Calcula número de componentes de manera automatica de acuerdo a la variana explicada
# Threshold de 60%
n = A.shape[1] #numero de columnas
varianza_acumulada = varianza_explicada.cumsum()
conteo = (varianza_acumulada) < 0.8
num_componentes = conteo.sum() + 1
# regresar 4 objetos
return valores_singulares[:num_componentes], componentes[:num_componentes], Z[:,:num_componentes], varianza_explicada[:num_componentes]
def PCA_from_SVD_jacobi(A):
"""
Función para PCA a partir de la SVD
params: A matriz de datos
num_componentes número de componentes deseados
return: valores_singulares Los valores singulares de la descomposición SVD
componentes Los coeficientes para calcular los componentes principales
Z Los datos transformados (componentes principales)
varianza_explicada La varianza explicada por cada componente principal
"""
# Centrar los datos
A = np.array(A) # convertir los datos a un numpy array por si vienen de un DataFrame
A_centered = A - A.mean(axis=0)
# Modificar esta línea de código, mandar a llamar la función creada por el equipo
# Calcular SVD
U, S, Vt = svd_jacobi_aprox(A_centered,1e-12,500)
# Los valores singulares
valores_singulares = S
# Los componentes (coeficientes)
componentes = ((Vt))
# Los datos transformados (componentes principales)
Z = [email protected](Vt)
# La varianza explicada
varianza_explicada = S**2/np.sum(S**2)
# Calcula número de componentes de manera automatica de acuerdo a la variana explicada
# Threshold de 60%
n = A.shape[1] #numero de columnas
varianza_acumulada = varianza_explicada.cumsum()
conteo = (varianza_acumulada) < 0.8
num_componentes = conteo.sum() + 1
# regresar 4 objetos
return valores_singulares[:(num_componentes)], componentes[:(num_componentes)], Z[:,:(num_componentes)], varianza_explicada[:(num_componentes)]
def PCA_from_QR_vf(data,niter = 450):
"""
Función para PCA a partir de los eigenvectores
params: data: matriz de datos
niter: número de iteraciones máximas
return: componentes Los coeficientes para calcular los componentes principales (eigenvectores de la matriz de covarianzas)
Z Los datos transformados (componentes principales)
varianza_explicada La varianza explicada por cada componente principal
Depende de la función: eigenvectores_QR
"""
# convertir a array
A = np.array(data)
# Centrar los datos
mean_vec = np.mean(A, axis=0)
datos_centrados = (A - mean_vec)
# Matriz de Covarianzas
#C = (datos_centrados.T@datos_centrados)/(datos_centrados.shape[0]-1)
C = (A - mean_vec).T.dot((A - mean_vec)) / (A.shape[0]-1)
# Calcular algoritmo QR
E, Q = eigenvectores_eigenvalores_QR_vf(C,niter)
# Los componentes (coeficientes)
componentes = Q.T
# Los datos transformados (componentes principales)
# Aquí marcaba error al filtrar porque no se reconocia a Z como numpy array
Z = datos_centrados@Q
# La varianza explicada
varianza_explicada = E/np.sum(E)
# Calcula número de componentes de manera automatica de acuerdo a la variana explicada
# Threshold de 60%
n = data.shape[1] #numero de columnas
varianza_acumulada = varianza_explicada.cumsum()
conteo = (varianza_acumulada) < 0.8
num_componentes = conteo.sum() + 1
# regresar 4 objetos
return E[:num_componentes], componentes[:num_componentes], Z[:,:num_componentes], varianza_explicada[:num_componentes] #, varianza_acumulada, num_componentes
def PCA_from_potencia(X):
"""
Función que calcula PCA a partir del método de la potencia y deflation de Hotteling
params: A: matriz de datos
return: eigenvalues Numpy array con los eigenvectores de A
eigenvectors Numpy array con los correspondientes eigenvectores de A
"""
prop = 0 # Proporción de varianza explicada
comp = 1
cur_var = 0
comp_vecs = np.zeros([X.shape[1], X.shape[1]])
# convertir a array
A = np.array(X)
# Centrar los datos
mean_vec = np.mean(A, axis=0)
datos_centrados = (A - mean_vec)
#Calculamos la matriz de covarianzas
cov = np.dot(X.T, X)/X.shape[0]
#Aplicamos el método de la potencia
evalues_pow, evectors_pow = power_deflation(cov,2000)
# La varianza explicada
varianza_explicada = evalues_pow/np.sum(evalues_pow)
# Los datos transformados (componentes principales)
Z = datos_centrados@evectors_pow
# Calcula número de componentes de manera automatica de acuerdo a la variana explicada
# Threshold de 80%
n = X.shape[1] #numero de columnas
varianza_acumulada = varianza_explicada.cumsum()
conteo = (varianza_acumulada) < 0.8
num_componentes = conteo.sum() + 1
return evalues_pow[:num_componentes], evectors_pow.T[:num_componentes], Z[:,:num_componentes], varianza_explicada[:num_componentes]
```
#### File: src/test_algorithms/comparaModelos.py
```python
import pandas as pd
import numpy as np
from pathlib import Path
from src.pca import todoJunto
from src.test_algorithms.err_relativo import err_relativo
def compara_resultados_sk_np(A):
"""
compara_resultados_sk_np: compara resultados de las funciones PCA_from_sklearn y PCA_from_SVD
Función que compara los resultados de las funciones PCA_from_sklearn y PCA_from_SVD,
y regresa la información en forma de un pandas data frame.
Parameters
----------
A - sección de los datos a la que se aplicará PCA
Returns
---------
Dataframe con comparación y errores relativos
"""
# PCA de scikit learn
pca, var_exp, comp_prin, val_sing, pca_coef, eigenvalues = todoJunto.PCA_from_sklearn(A)
# PCA a partir de SVD de numpy
np_val_sing, np_pca_coef, np_comp_prin, np_var_exp = todoJunto.PCA_from_SVD(A)
coef_iguales = np.allclose(pca_coef,np_pca_coef)
err_coef = err_relativo(pca_coef, np_pca_coef)
var_exp_igual = np.allclose(var_exp,np_var_exp)
err_var_exp = err_relativo(var_exp, np_var_exp)
comp_iguales = np.allclose(comp_prin,np_comp_prin)
err_comp = err_relativo(comp_prin, np_comp_prin)
val_sing_igual = np.allclose(val_sing, np_val_sing)
err_val_sing = err_relativo(val_sing, np_val_sing)
data_a_comparar = {'elemento':['varianza explicada','valores singulares', 'coeficientes', 'componentes principales'],
'Igualdad':[var_exp_igual,val_sing_igual,coef_iguales,comp_iguales],
'Max error relativo': [np.amax(err_var_exp),np.amax(err_val_sing),np.amax(err_coef),np.amax(err_comp)],
'Error relativo':[err_var_exp, err_val_sing, err_coef, err_comp]}
comparativa = pd.DataFrame(data=data_a_comparar)
return comparativa
def compara_resultados_abs_sk_np(A):
"""
compara_resultados_abs_sk_np: compara resultados de las funciones PCA_from_sklearn y PCA_from_SVD, ignorando el signo
Función que compara los que resultan de aplicar las funciones PCA_from_sklearn y PCA_from_SVD,
sin tomar en cuenta el signo en los coeficientes y componentes principales,
y regresa la información en forma de un pandas data frame.
Parameters
----------
A - sección de los datos a la que se aplicará PCA
Returns
---------
Dataframe con comparación y errores relativos
"""
# PCA de scikit learn
pca, var_exp, comp_prin, val_sing, pca_coef, eigenvalues = todoJunto.PCA_from_sklearn(A)
# PCA a partir de SVD de numpy
np_val_sing, np_pca_coef, np_comp_prin, np_var_exp = todoJunto.PCA_from_SVD(A)
var_exp_igual = np.allclose(var_exp,np_var_exp)
err_var_exp = err_relativo(var_exp, np_var_exp)
val_sing_igual = np.allclose(val_sing, np_val_sing)
err_val_sing = err_relativo(val_sing, np_val_sing)
coef_abs_iguales = np.allclose(np.abs(pca_coef),np.abs(np_pca_coef))
err_coef_abs = err_relativo(np.abs(pca_coef),np.abs(np_pca_coef))
comp_abs_iguales = np.allclose(np.abs(comp_prin),np.abs(np_comp_prin))
err_comp_abs = err_relativo(np.abs(comp_prin), np.abs(np_comp_prin))
data_a_comparar = {'elemento':['varianza explicada','valores singulares','coeficientes', 'componentes principales'],
'Igualdad (en valor absoluto)':[var_exp_igual,val_sing_igual,coef_abs_iguales, comp_abs_iguales],
'Max error relativo (con valor absoluto)': [np.amax(err_var_exp),np.amax(err_val_sing),np.amax(err_coef_abs),np.amax(err_comp_abs)],
'Error relativo (con valor absoluto)':[err_var_exp, err_val_sing,err_coef_abs, err_comp_abs]}
comparativa = pd.DataFrame(data=data_a_comparar)
return comparativa
def compara_resultados_abs_sk_qr(A):
"""
compara_resultados_abs_sk_qr: compara resultados de las funciones PCA_from_sklearn y PCA_from_QR
Función que compara los resultados de las funciones PCA_from_sklearn y PCA_from_QR,
y regresa la información en forma de un pandas data frame.
Parameters
----------
A - sección de los datos a la que se aplicará PCA
Returns
---------
Dataframe con comparación y errores relativos
"""
# PCA de scikit learn
pca, var_exp, comp_prin, val_sing, pca_coef, eigenvalues = todoJunto.PCA_from_sklearn(A)
# PCA a partir de algoritmo QR
qr_eigenvalues, qr_pca_coef, qr_comp_prin, qr_var_exp = todoJunto.PCA_from_QR_vf(A)
try:
# if len(val_sing) == len(qr_val_sing):
var_exp_igual = np.allclose(var_exp,qr_var_exp)
err_var_exp = err_relativo(var_exp, qr_var_exp)
eigen_igual = np.allclose(np.abs(eigenvalues), np.abs(qr_eigenvalues))
err_eigen = err_relativo(np.abs(eigenvalues), np.abs(qr_eigenvalues))
coef_abs_iguales = np.allclose(np.abs(pca_coef),np.abs(qr_pca_coef))
err_coef_abs = err_relativo(np.abs(pca_coef),np.abs(qr_pca_coef))
comp_abs_iguales = np.allclose(np.abs(comp_prin),np.abs(qr_comp_prin))
err_comp_abs = err_relativo(np.abs(comp_prin), np.abs(qr_comp_prin))
except:
print('Nota: Los resultados son de distinta longitud y por lo tanto se comparan solo las entradas en común')
min_len = min(len(eigenvalues),len(qr_eigenvalues))
var_exp_igual = np.allclose(var_exp[:min_len],qr_var_exp[:min_len])
err_var_exp = err_relativo(var_exp[:min_len], qr_var_exp[:min_len])
eigen_igual = np.allclose(np.abs(eigenvalues)[:min_len], np.abs(qr_eigenvalues)[:min_len])
err_eigen = err_relativo(np.abs(eigenvalues)[:min_len], np.abs(qr_eigenvalues)[:min_len])
coef_abs_iguales = np.allclose(np.abs(pca_coef[:min_len]),np.abs(qr_pca_coef[:min_len]))
err_coef_abs = err_relativo(np.abs(pca_coef[:min_len]),np.abs(qr_pca_coef[:min_len]))
comp_abs_iguales = np.allclose(np.abs(comp_prin[:,:min_len]),np.abs(qr_comp_prin[:,:min_len]))
err_comp_abs = err_relativo(np.abs(comp_prin[:,:min_len]), np.abs(qr_comp_prin[:,:min_len]))
data_a_comparar = {'elemento':['varianza explicada','eigenvalores','coeficientes', 'componentes principales'],
'Igualdad (en valor absoluto)':[var_exp_igual,eigen_igual,coef_abs_iguales, comp_abs_iguales],
'Max error relativo (con valor absoluto)': [np.amax(err_var_exp),np.amax(err_eigen),np.amax(err_coef_abs),np.amax(err_comp_abs)],
'Error relativo (con valor absoluto)':[err_var_exp, err_eigen,err_coef_abs, err_comp_abs]}
comparativa = pd.DataFrame(data=data_a_comparar)
return comparativa
def compara_resultados_abs_sk_potencia(A):
"""
compara_resultados_abs_sk_qr: compara resultados de las funciones PCA_from_sklearn y PCA_from_potencia
Función que compara los resultados de las funciones PCA_from_sklearn y PCA_from_potencia,
y regresa la información en forma de un pandas data frame.
Parameters
----------
A - sección de los datos a la que se aplicará PCA
Returns
---------
Dataframe con comparación y errores relativos
"""
# PCA de scikit learn
pca, var_exp, comp_prin, val_sing, pca_coef, eigenvalues = todoJunto.PCA_from_sklearn(A)
# PCA a partir del método de la potencia
pow_eigenvalues, pow_pca_coef, pow_comp_prin, pow_var_exp = todoJunto.PCA_from_potencia(A)
try:
# if len(val_sing) == len(pow_val_sing):
var_exp_igual = np.allclose(var_exp,pow_var_exp)
err_var_exp = err_relativo(var_exp, pow_var_exp)
eigen_igual = np.allclose(np.abs(eigenvalues), np.abs(pow_eigenvalues))
err_eigen = err_relativo(np.abs(eigenvalues), np.abs(pow_eigenvalues))
coef_abs_iguales = np.allclose(np.abs(pca_coef),np.abs(pow_pca_coef))
err_coef_abs = err_relativo(np.abs(pca_coef),np.abs(pow_pca_coef))
comp_abs_iguales = np.allclose(np.abs(comp_prin),np.abs(pow_comp_prin))
err_comp_abs = err_relativo(np.abs(comp_prin), np.abs(pow_comp_prin))
except:
print('Nota: Los resultados son de distinta longitud y por lo tanto se comparan solo las entradas en común')
min_len = min(len(eigenvalues),len(pow_eigenvalues))
var_exp_igual = np.allclose(var_exp[:min_len],pow_var_exp[:min_len])
err_var_exp = err_relativo(var_exp[:min_len], pow_var_exp[:min_len])
eigen_igual = np.allclose(np.abs(eigenvalues)[:min_len], np.abs(pow_eigenvalues)[:min_len])
err_eigen = err_relativo(np.abs(eigenvalues)[:min_len], np.abs(pow_eigenvalues)[:min_len])
coef_abs_iguales = np.allclose(np.abs(pca_coef[:min_len]),np.abs(pow_pca_coef[:min_len]))
err_coef_abs = err_relativo(np.abs(pca_coef[:min_len]),np.abs(pow_pca_coef[:min_len]))
comp_abs_iguales = np.allclose(np.abs(comp_prin[:,:min_len]),np.abs(pow_comp_prin[:,:min_len]))
err_comp_abs = err_relativo(np.abs(comp_prin[:,:min_len]), np.abs(pow_comp_prin[:,:min_len]))
data_a_comparar = {'elemento':['varianza explicada','eigenvalores','coeficientes', 'componentes principales'],
'Igualdad (en valor absoluto)':[var_exp_igual,eigen_igual,coef_abs_iguales, comp_abs_iguales],
'Max error relativo (con valor absoluto)': [np.amax(err_var_exp),np.amax(err_eigen),np.amax(err_coef_abs),np.amax(err_comp_abs)],
'Error relativo (con valor absoluto)':[err_var_exp, err_eigen,err_coef_abs, err_comp_abs]}
comparativa = pd.DataFrame(data=data_a_comparar)
return comparativa
``` |
{
"source": "123avi/flink",
"score": 2
} |
#### File: flink/example/TriangleEnumeration.py
```python
from flink.plan.Environment import get_environment
from flink.plan.Constants import Order
from flink.functions.FlatMapFunction import FlatMapFunction
from flink.functions.GroupReduceFunction import GroupReduceFunction
from flink.functions.ReduceFunction import ReduceFunction
from flink.functions.MapFunction import MapFunction
from flink.functions.JoinFunction import JoinFunction
class EdgeDuplicator(FlatMapFunction):
def flat_map(self, value, collector):
collector.collect((value[0], value[1]))
collector.collect((value[1], value[0]))
class DegreeCounter(GroupReduceFunction):
def reduce(self, iterator, collector):
other_vertices = []
data = iterator.next()
edge = (data[0], data[1])
group_vertex = edge[0]
other_vertices.append(edge[1])
while iterator.has_next():
data = iterator.next()
edge = [data[0], data[1]]
other_vertex = edge[1]
contained = False
for v in other_vertices:
if v == other_vertex:
contained = True
break
if not contained and not other_vertex == group_vertex:
other_vertices.append(other_vertex)
degree = len(other_vertices)
for other_vertex in other_vertices:
if group_vertex < other_vertex:
output_edge = (group_vertex, degree, other_vertex, 0)
else:
output_edge = (other_vertex, 0, group_vertex, degree)
collector.collect(output_edge)
class DegreeJoiner(ReduceFunction):
def reduce(self, value1, value2):
edge1 = [value1[0], value1[1], value1[2], value1[3]]
edge2 = [value2[0], value2[1], value2[2], value2[3]]
out_edge = [edge1[0], edge1[1], edge1[2], edge1[3]]
if edge1[1] == 0 and (not edge1[3] == 0):
out_edge[1] = edge2[1]
elif (not edge1[1] == 0) and edge1[3] == 0:
out_edge[3] = edge2[3]
return out_edge
class EdgeByDegreeProjector(MapFunction):
def map(self, value):
if value[1] > value[3]:
return (value[2], value[0])
else:
return (value[0], value[2])
class EdgeByIdProjector(MapFunction):
def map(self, value):
edge = (value[0], value[1])
if value[0] > value[1]:
return (value[1], value[0])
else:
return (value[0], value[1])
class TriadBuilder(GroupReduceFunction):
def reduce(self, iterator, collector):
vertices = []
y = iterator.next()
first_edge = [y[0], y[1]]
vertices.append(first_edge[1])
while iterator.has_next():
x = iterator.next()
second_edge = [x[0], x[1]]
higher_vertex_id = second_edge[1]
for lowerVertexId in vertices:
collector.collect((first_edge[0], lowerVertexId, higher_vertex_id))
vertices.append(higher_vertex_id)
class TriadFilter(JoinFunction):
def join(self, value1, value2):
return value1
if __name__ == "__main__":
env = get_environment()
edges = env.from_elements(
(1, 2), (1, 3), (1, 4), (1, 5), (2, 3), (2, 5), (3, 4), (3, 7), (3, 8), (5, 6), (7, 8))
edges_with_degrees = edges \
.flat_map(EdgeDuplicator()) \
.group_by(0) \
.sort_group(1, Order.ASCENDING) \
.reduce_group(DegreeCounter()) \
.group_by(0, 2) \
.reduce(DegreeJoiner())
edges_by_degree = edges_with_degrees \
.map(EdgeByDegreeProjector())
edges_by_id = edges_by_degree \
.map(EdgeByIdProjector())
triangles = edges_by_degree \
.group_by(0) \
.sort_group(1, Order.ASCENDING) \
.reduce_group(TriadBuilder()) \
.join(edges_by_id) \
.where(1, 2) \
.equal_to(0, 1) \
.using(TriadFilter())
triangles.output()
env.set_parallelism(1)
env.execute(local=True)
``` |
{
"source": "123chengbo/caffe2",
"score": 2
} |
#### File: contrib/torch/torch_ops_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace, dyndep
import caffe2.python.hypothesis_test_util as hu
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
import os
from libfb import parutil
core.GlobalInit(["python", "--caffe2_log_level=0"])
dyndep.InitOpsLibrary('@/caffe2/caffe2/contrib/torch:torch_ops')
RUNTIME = parutil.get_runtime_path()
if 'LUA_PATH' not in os.environ:
os.environ['LUA_PATH'] = ";".join([
os.path.join(RUNTIME, '_lua', '?.lua'),
os.path.join(RUNTIME, '_lua', '?', 'init.lua'),
])
os.environ['LUA_CPATH'] = os.path.join(RUNTIME, '_lua', '?.so')
class TorchOpTest(hu.HypothesisTestCase):
@given(n=st.integers(min_value=1, max_value=10),
i=st.integers(min_value=1, max_value=10),
h=st.integers(min_value=2, max_value=10))
def test_feed(self, n, i, h):
op = core.CreateOperator(
"Torch", ["x", "W", "b"], ["y"],
init=b"nn.Linear({i}, {h})".format(h=h, i=i),
num_inputs=1,
num_params=2,
num_outputs=1
)
x = np.random.randn(n, i).astype(np.float32)
W = np.random.randn(h, i).astype(np.float32)
b = np.random.randn(h).astype(np.float32)
workspace.FeedBlob("x", x)
workspace.FeedBlob("W", W)
workspace.FeedBlob("b", b)
workspace.RunOperatorOnce(op)
y = workspace.FetchBlob("y")
print("y", y)
y = y.reshape((n, h))
np.testing.assert_allclose(y, np.dot(x, W.T) + b, atol=1e-4, rtol=1e-4)
@given(n=st.integers(min_value=1, max_value=10),
i=st.integers(min_value=1, max_value=10),
h=st.integers(min_value=2, max_value=10),
**hu.gcs)
def test_gradient(self, n, i, h, gc, dc):
op = core.CreateOperator(
"Torch", ["x", "W", "b"], ["y"],
init=b"nn.Linear({i}, {h})".format(h=h, i=i),
num_inputs=1,
num_params=2,
num_outputs=1
)
x = np.random.randn(n, i).astype(np.float32)
W = np.random.randn(h, i).astype(np.float32)
b = np.random.randn(h).astype(np.float32)
inputs = [x, W, b]
self.assertDeviceChecks(dc, op, inputs, [0])
for i, _ in enumerate(inputs):
self.assertGradientChecks(gc, op, inputs, i, [0])
@given(n=st.integers(min_value=1, max_value=10),
i=st.integers(min_value=1, max_value=10),
h=st.integers(min_value=2, max_value=10),
iters=st.integers(min_value=1, max_value=100))
def test_iterated(self, n, i, h, iters):
x = np.random.randn(n, i).astype(np.float32)
W = np.random.randn(h, i).astype(np.float32)
b = np.random.randn(h).astype(np.float32)
workspace.FeedBlob("x", x)
workspace.FeedBlob("W", W)
workspace.FeedBlob("b", b)
net = core.Net("op")
net.Torch(
["x", "W", "b"], ["y"],
init=b"nn.Linear({i}, {h})".format(h=h, i=i),
num_inputs=1,
num_params=2,
num_outputs=1
)
print(net.Proto())
workspace.CreateNet(net)
for i in range(iters):
if i % 1000 == 0:
print(i)
workspace.RunNet("op")
y = workspace.FetchBlob("y")
y = y.reshape((n, h))
np.testing.assert_allclose(y, np.dot(x, W.T) + b, atol=1e-4, rtol=1e-4)
def test_leakage_torch(self):
n = 1
i = 100
h = 1000
iters = 2000
x = np.random.randn(n, i).astype(np.float32)
W = np.random.randn(h, i).astype(np.float32)
b = np.random.randn(h).astype(np.float32)
workspace.FeedBlob("x", x)
workspace.FeedBlob("W", W)
workspace.FeedBlob("b", b)
net = core.Net("op")
net.Torch(
["x", "W", "b"], ["y"],
init=b"nn.Linear({i}, {h})".format(h=h, i=i),
num_inputs=1,
num_params=2,
num_outputs=1
)
workspace.CreateNet(net)
for i in range(iters):
if i % 1000 == 0:
print(i)
workspace.RunNet("op")
y = workspace.FetchBlob("y")
y = y.reshape((n, h))
np.testing.assert_allclose(y, np.dot(x, W.T) + b, atol=1e-4, rtol=1e-4)
```
#### File: caffe2/python/extension_loader.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import contextlib
import sys
@contextlib.contextmanager
def DlopenGuard():
# In python 2.7 required constants are not defined.
# Thus they are listed explicitly
flags = sys.getdlopenflags()
sys.setdlopenflags(256 | 2) # RTLD_GLOBAL | RTLD_NOW
yield
sys.setdlopenflags(flags)
```
#### File: caffe2/python/hsm_util.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.proto import hsm_pb2
'''
Hierarchical softmax utility methods that can be used to:
1) create TreeProto structure given list of word_ids or NodeProtos
2) create HierarchyProto structure using the user-inputted TreeProto
'''
def create_node_with_words(words):
node = hsm_pb2.NodeProto()
for word in words:
node.word_ids.append(word)
return node
def create_node_with_nodes(nodes):
node = hsm_pb2.NodeProto()
for child_node in nodes:
new_child_node = node.children.add()
new_child_node.MergeFrom(child_node)
return node
def create_hierarchy(tree_proto):
max_index = 0
def create_path(path, word):
path_proto = hsm_pb2.PathProto()
path_proto.word_id = word
for entry in path:
new_path_node = path_proto.path_nodes.add()
new_path_node.index = entry[0]
new_path_node.length = entry[1]
new_path_node.target = entry[2]
return path_proto
def recursive_path_builder(node_proto, path, hierarchy_proto, max_index):
path.append([max_index,
len(node_proto.word_ids) + len(node_proto.children), 0])
max_index += len(node_proto.word_ids) + len(node_proto.children)
if hierarchy_proto.size < max_index:
hierarchy_proto.size = max_index
for target, node in enumerate(node_proto.children):
path[-1][2] = target
max_index = recursive_path_builder(node, path, hierarchy_proto,
max_index)
for target, word in enumerate(node_proto.word_ids):
path[-1][2] = target
path_entry = create_path(path, word)
new_path_entry = hierarchy_proto.paths.add()
new_path_entry.MergeFrom(path_entry)
del path[-1]
return max_index
node = tree_proto.root_node
hierarchy_proto = hsm_pb2.HierarchyProto()
path = []
max_index = recursive_path_builder(node, path, hierarchy_proto, max_index)
return hierarchy_proto
```
#### File: caffe2/python/memonger_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from caffe2.python import workspace, cnn, memonger
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
from hypothesis import given
class MemongerTest(hu.HypothesisTestCase):
@given(input_dim=st.integers(min_value=1, max_value=10),
output_dim=st.integers(min_value=1, max_value=10),
batch_size=st.integers(min_value=1, max_value=10),
do=st.sampled_from(hu.device_options))
def test_simple_memonger(self, input_dim, output_dim, batch_size, do):
m = cnn.CNNModelHelper()
fc1 = m.FC("data", "fc1", dim_in=input_dim, dim_out=output_dim)
fc2 = m.FC(fc1, "fc2", dim_in=output_dim, dim_out=output_dim)
fc3 = m.FC(fc2, "fc3", dim_in=output_dim, dim_out=output_dim)
fc3.Relu([], fc3)\
.Softmax([], "pred") \
.LabelCrossEntropy(["label"], ["xent"]) \
.AveragedLoss([], "loss")
input_to_grad = m.AddGradientOperators(["loss"])
m.net.Proto().device_option.CopyFrom(do)
m.param_init_net.Proto().device_option.CopyFrom(do)
static_blobs = \
[o for op in m.param_init_net.Proto().op for o in op.output] + \
["data", "label", "loss", input_to_grad["fc1_w"]]
optimization = memonger.optimize_interference(m.Proto(), static_blobs)
data = np.random.randn(batch_size, input_dim).astype(np.float32)
label = np.random.randint(
low=0, high=output_dim, size=(batch_size,)).astype(np.int32)
workspace.RunNetOnce(m.param_init_net)
workspace.FeedBlob("data", data, device_option=do)
workspace.FeedBlob("label", label, device_option=do)
workspace.RunNetOnce(m.net)
loss = workspace.FetchBlob("loss")
grad = workspace.FetchBlob(str(input_to_grad["fc1_w"]))
workspace.RunNetOnce(optimization.net)
optimized_loss = workspace.FetchBlob("loss")
optimized_grad = workspace.FetchBlob(str(input_to_grad["fc1_w"]))
np.testing.assert_almost_equal(loss, optimized_loss)
np.testing.assert_almost_equal(grad, optimized_grad)
stats = memonger.compute_statistics(optimization.assignments)
self.assertLess(stats.optimized_nbytes, stats.baseline_nbytes)
```
#### File: python/operator_test/gather_ops_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase
class TestGatherOps(TestCase):
def test_gather_ops(self):
data = np.array(["world", "hello", "!"], dtype='|S')
ind = np.array([1, 0, 2], dtype=np.int32)
workspace.FeedBlob('data', data)
workspace.FeedBlob('ind', ind)
workspace.RunOperatorOnce(core.CreateOperator(
'Gather', ['data', 'ind'], ['word']))
outdata = np.array(["hello", "world", "!"], dtype='|S')
assert((workspace.FetchBlob('word') == outdata).all())
```
#### File: caffe2/python/schema.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import OrderedDict
import logging
import numpy as np
logger = logging.getLogger(__name__)
def _join_field_name(prefix, suffix):
if prefix and suffix:
return '{}:{}'.format(prefix, suffix)
elif prefix:
return prefix
elif suffix:
return suffix
else:
return ''
class Field(object):
"""Represents an abstract field type in a dataset.
"""
def __init__(self, children):
"""Derived classes must call this after their initialization."""
self._parent = (None, 0)
offset = 0
self._field_offsets = []
for child in children:
self._field_offsets.append(offset)
offset += len(child.field_names())
self._field_offsets.append(offset)
def field_names(self):
"""Return the children field names for this field."""
raise NotImplementedError('Field is an abstract class.')
def field_types(self):
"""Return the numpy.dtype for each of the children fields."""
raise NotImplementedError('Field is an abstract class.')
def clone(self):
"""Clone this Field along with its children."""
raise NotImplementedError('Field is an abstract class.')
def _set_parent(self, parent, relative_id):
self._parent = (parent, relative_id)
def slice(self):
"""
Returns a slice representing the range of field ids that belong to
this field. This slice can be used to index a list of fields.
E.g.:
>>> s = Struct(
>>> ('a', Scalar()),
>>> ('b', Struct(
>>> ('b1', Scalar()),
>>> ('b2', Scalar()),
>>> )),
>>> ('c', Scalar()),
>>> )
>>> field_data = ['da', 'db1', 'db2', 'dc']
>>> field_data[s.b.split()]
['db1', 'db2']
"""
base_id = self._child_base_id()
return slice(base_id, base_id + len(self.field_names()))
def _child_base_id(self, child_index=None):
"""Get the base id of the given child"""
p, i = self._parent
pos = 0 if child_index is None else self._field_offsets[child_index]
if p:
pos += p._child_base_id(i)
return pos
def __eq__(self, other):
"""Equivalance of two schemas"""
return ((self.field_names() == other.field_names()) and
(self.field_types() == other.field_types()))
class List(Field):
"""Represents a variable-length list.
Values of a list can also be complex fields such as Lists and Structs.
In addition to the fields exposed by its `values` field, a List exposes an
additional `lengths` field, which will contain the size of each list under
the parent domain.
"""
def __init__(self, values):
assert isinstance(values, Field)
self.lengths = Scalar(np.int32)
self.values = values.clone()
self.lengths._set_parent(self, 0)
self.values._set_parent(self, 1)
Field.__init__(self, [self.lengths, self.values])
def field_names(self):
value_fields = self.values.field_names()
return (
['lengths'] +
[_join_field_name('values', v) for v in value_fields])
def field_types(self):
return self.lengths.field_types() + self.values.field_types()
def clone(self):
return List(self.values)
class Struct(Field):
"""Represents a named list of fields sharing the same domain.
"""
def __init__(self, *fields):
for field in fields:
assert len(field) == 2
assert field[0], 'Field names cannot be empty'
assert field[0] != 'lengths', (
'Struct cannot contain a field named `lengths`.')
assert isinstance(field[1], Field)
fields = [(name, field.clone()) for name, field in fields]
for id, (name, field) in enumerate(fields):
field._set_parent(self, id)
self.fields = OrderedDict(fields)
Field.__init__(self, self.fields.values())
def field_names(self):
names = []
for name, field in self.fields.items():
names += [_join_field_name(name, f) for f in field.field_names()]
return names
def field_types(self):
types = []
for name, field in self.fields.items():
types += field.field_types()
return types
def clone(self):
return Struct(*self.fields.items())
def __getattr__(self, item):
return self.fields[item]
class Scalar(Field):
"""Represents a typed scalar or tensor of fixed shape.
A Scalar is a leaf in a schema tree, translating to exactly one tensor in
the dataset's underlying storage.
Usually, the tensor storing the actual values of this field is a 1D tensor,
representing a series of values in its domain. It is possible however to
have higher rank values stored as a Scalar, as long as all entries have
the same shape.
E.g.:
Scalar(np.float64)
Scalar field of type float32. Caffe2 will expect readers and
datasets to expose it as a 1D tensor of doubles (vector), where
the size of the vector is determined by this fields' domain.
Scalar((np.int32, 5))
Tensor field of type int32. Caffe2 will expect readers and
datasets to implement it as a 2D tensor (matrix) of shape (L, 5),
where L is determined by this fields' domain.
Scalar((str, (10, 20)))
Tensor field of type str. Caffe2 will expect readers and
datasets to implement it as a 3D tensor of shape (L, 10, 20),
where L is determined by this fields' domain.
If the field type is unknown at construction time, call Scalar(), that will
default to np.void as its dtype.
It is an error to pass a structured dtype to Scalar, since it would contain
more than one field. Instead, use from_dtype, which will construct
a nested `Struct` field reflecting the given dtype's structure.
"""
def __init__(self, dtype=None):
self._original_dtype = dtype
self.dtype = np.dtype(dtype or np.void)
assert not self.dtype.fields, (
'Cannot create Scalar with a structured dtype. ' +
'Use from_dtype instead.')
Field.__init__(self, [])
def field_names(self):
return ['']
def field_types(self):
return [self.dtype]
def clone(self):
return Scalar(self._original_dtype)
def id(self):
"""
Return the zero-indexed position of this scalar field in its schema.
Used in order to index into the field_blob list returned by readers or
accepted by writers.
"""
return self._child_base_id()
def Map(keys, values, keys_name='keys', values_name='values'):
"""A map is a List of Struct containing keys and values fields.
Optionally, you can provide custom name for the key and value fields.
"""
return List(Struct((keys_name, keys), (values_name, values)))
def from_dtype(dtype, _outer_shape=()):
"""Constructs a Caffe2 schema from the given numpy's dtype.
Numpy supports scalar, array-like and structured datatypes, as long as
all the shapes are fixed. This function breaks down the given dtype into
a Caffe2 schema containing `Struct` and `Scalar` types.
Fields containing byte offsets are not currently supported.
"""
if not isinstance(dtype, np.dtype):
# wrap into a ndtype
shape = _outer_shape
dtype = np.dtype((dtype, _outer_shape))
else:
# concatenate shapes if necessary
shape = _outer_shape + dtype.shape
if shape != dtype.shape:
dtype = np.dtype((dtype.base, shape))
if not dtype.fields:
return Scalar(dtype)
struct_fields = []
for name, (fdtype, offset) in dtype.fields:
assert offset == 0, ('Fields with byte offsets are not supported.')
struct_fields += (name, from_dtype(fdtype, _outer_shape=shape))
return Struct(*struct_fields)
class _SchemaNode(object):
"""This is a private class used to represent a Schema Node"""
def __init__(self, name, type_str=''):
self.name = name
self.children = []
self.type_str = type_str
self.field = None
def add_child(self, name, type_str=''):
for child in self.children:
if child.name == name and child.type_str == type_str:
return child
child = _SchemaNode(name, type_str)
self.children.append(child)
return child
def get_field(self):
list_names = ['lengths', 'values']
map_names = ['lengths', 'keys', 'values']
if len(self.children) == 0 or self.field is not None:
assert self.field is not None
return self.field
child_names = []
for child in self.children:
child_names.append(child.name)
if (set(child_names) == set(list_names)):
for child in self.children:
if child.name == 'values':
self.field = List(child.get_field())
self.type_str = "List"
return self.field
elif (set(child_names) == set(map_names)):
for child in self.children:
if child.name == 'keys':
key_field = child.get_field()
elif child.name == 'values':
values_field = child.get_field()
self.field = Map(key_field, values_field)
self.type_str = "Map"
return self.field
else:
struct_fields = []
for child in self.children:
if child.field is not None:
struct_fields.append((child.name, child.field))
else:
struct_fields.append((child.name, child.get_field()))
self.field = Struct(*struct_fields)
self.type_str = "Struct"
return self.field
def print_recursively(self):
for child in self.children:
child.print_recursively()
logger.info("Printing node: Name and type")
logger.info(self.name)
logger.info(self.type_str)
def from_column_list(column_names, column_types):
root = _SchemaNode('root', 'Struct')
for column_name, column_type in zip(column_names, column_types):
columns = column_name.split(':')
current = root
for i in range(len(columns)):
name = columns[i]
type_str = ''
field = None
if i == len(columns) - 1:
type_str = column_type
field = Scalar(column_type)
next = current.add_child(name, type_str)
if field is not None:
next.field = field
current = next
return root.get_field()
```
#### File: caffe2/python/utils.py
```python
from caffe2.proto import caffe2_pb2
from google.protobuf.message import DecodeError, Message
from google.protobuf import text_format
import numpy as np
import sys
if sys.version_info > (3,):
# This is python 3. We will define a few stuff that we used.
basestring = str
long = int
def CaffeBlobToNumpyArray(blob):
return (np.asarray(blob.data, dtype=np.float32)
.reshape(blob.num, blob.channels, blob.height, blob.width))
def Caffe2TensorToNumpyArray(tensor):
return np.asarray(tensor.float_data, dtype=np.float32).reshape(tensor.dims)
def NumpyArrayToCaffe2Tensor(arr, name):
tensor = caffe2_pb2.TensorProto()
tensor.data_type = caffe2_pb2.TensorProto.FLOAT
tensor.name = name
tensor.dims.extend(arr.shape)
tensor.float_data.extend(list(arr.flatten().astype(float)))
return tensor
def MakeArgument(key, value):
"""Makes an argument based on the value type."""
argument = caffe2_pb2.Argument()
argument.name = key
if type(value) is float:
argument.f = value
elif type(value) is int or type(value) is bool or type(value) is long:
# We make a relaxation that a boolean variable will also be stored as
# int.
argument.i = value
elif isinstance(value, basestring):
argument.s = (value if type(value) is bytes
else value.encode('utf-8'))
elif isinstance(value, Message):
argument.s = value.SerializeToString()
elif all(type(v) is float for v in value):
argument.floats.extend(value)
elif all(any(type(v) is t for t in [int, bool, long]) for v in value):
argument.ints.extend(value)
elif all(isinstance(v, basestring) for v in value):
argument.strings.extend([
(v if type(v) is bytes else v.encode('utf-8')) for v in value])
elif all(isinstance(v, Message) for v in value):
argument.strings.extend([v.SerializeToString() for v in value])
else:
raise ValueError(
"Unknown argument type: key=%s value=%s, value type=%s" %
(key, str(value), str(type(value)))
)
return argument
def TryReadProtoWithClass(cls, s):
"""Reads a protobuffer with the given proto class.
Inputs:
cls: a protobuffer class.
s: a string of either binary or text protobuffer content.
Outputs:
proto: the protobuffer of cls
Throws:
google.protobuf.message.DecodeError: if we cannot decode the message.
"""
obj = cls()
try:
text_format.Parse(s, obj)
return obj
except text_format.ParseError:
obj.ParseFromString(s)
return obj
def GetContentFromProto(obj, function_map):
"""Gets a specific field from a protocol buffer that matches the given class
"""
for cls, func in function_map.items():
if type(obj) is cls:
return func(obj)
def GetContentFromProtoString(s, function_map):
for cls, func in function_map.items():
try:
obj = TryReadProtoWithClass(cls, s)
return func(obj)
except DecodeError:
continue
else:
raise DecodeError("Cannot find a fit protobuffer class.")
``` |
{
"source": "123electric/123SmartBMS-Venus",
"score": 2
} |
#### File: 123electric/123SmartBMS-Venus/smartbms.py
```python
import argparse
import os
import serial
import serial.tools.list_ports
import struct
import sys
import threading
import time
from collections import deque
from datetime import datetime
from dbus.mainloop.glib import DBusGMainLoop
from gi.repository import GLib
# Victron packages
sys.path.insert(1, os.path.join(os.path.dirname(__file__), '/opt/victronenergy/dbus-systemcalc-py/ext/velib_python'))
import dbus.service
import ve_utils
from vedbus import VeDbusService
from settingsdevice import SettingsDevice
class MAFilter:
def __init__(
self,
filter_size,
initial_value):
self.buffer = [initial_value]*filter_size
self.pos = 0
self.filter_size = filter_size
def add(self, value):
self.buffer[self.pos] = value
self.pos = (self.pos+1) % self.filter_size
def get_average(self):
return sum(self.buffer)/self.filter_size
class SmartBMSSerial:
BMS_COMM_TIMEOUT = 10 # Seconds
BMS_COMM_BLOCK_SIZE = 58
BATTERY_CHARGE_STATE_BULKABSORPTION = 1
BATTERY_CHARGE_STATE_STORAGE = 2
BATTERY_CHARGE_STATE_ERROR = 3
def __init__(
self,
loop,
dev
):
self.loop = loop
self.last_received = 0
self.battery_voltage = 0
self.charge_current = 0
self.discharge_current = 0
self.battery_current = 0
self.soc = 0
self.lowest_cell_voltage = 0
self.lowest_cell_voltage_num = 0
self.highest_cell_voltage = 0
self.highest_cell_voltage_num = 0
self.lowest_cell_temperature = 0
self.lowest_cell_temperature_num = 0
self.highest_cell_temperature = 0
self.highest_cell_temperature_num = 0
self.cell_count = 0
self.capacity = 0
self.capacity_ah = 0
self.energy_stored_wh = 0
self.ah_stored = 0
self.cell_voltage_min = 0
self.cell_voltage_max = 0
self.cell_voltage_full = 0
self.time_to_go = 0
# Alarm counters are a filter against corrupted bit, which is on top of the checksum safety
self.alarm_minimum_voltage_ma_filter = MAFilter(3, False)
self.alarm_maximum_voltage_ma_filter = MAFilter(3, False)
self.alarm_minimum_temperature_ma_filter = MAFilter(3, False)
self.alarm_maximum_temperature_ma_filter = MAFilter(3, False)
self.alarm_cell_communication_ma_filter = MAFilter(3, False)
self.allowed_to_charge_ma_filter = MAFilter(3, False)
self.allowed_to_discharge_ma_filter = MAFilter(3, False)
self.alarm_minimum_voltage = False
self.alarm_maximum_voltage = False
self.alarm_minimum_temperature = False
self.alarm_maximum_temperature = False
self.alarm_cell_communication = 0
self.allowed_to_charge = None
self.allowed_to_discharge = None
self.nominal_battery_voltage = 0
self.consumed_ah = 0
self.battery_charge_state = self.BATTERY_CHARGE_STATE_BULKABSORPTION
self._battery_full_counter = 0
self._balanced_timer = 0
self._unbalance_detection_timer = 0
self._comm_error_shadow = False
self._current_filter = deque()
self.lock = threading.Lock()
self._poller = threading.Thread(target=lambda:self._poll(dev))
self._poller.daemon = True
self._poller.start()
@property
def alarm_serial_communication(self):
if time.time() > self.last_received + self.BMS_COMM_TIMEOUT:
return True
else:
return False
def determine_nominal_cell_voltage(self):
if self.cell_voltage_full >= 3.4 and self.cell_voltage_full <= 3.6: return 3.3
if self.cell_voltage_full >= 3.9 and self.cell_voltage_full <= 4.4: return 3.7
if self.cell_voltage_full >= 2.5 and self.cell_voltage_full <= 2.7: return 2.3
return 0
# Must be called every second
def update(self):
self._calculate_consumed_ah()
self._update_time_to_go()
self._battery_charge_state()
if self.alarm_serial_communication and not self._comm_error_shadow:
self._comm_error_shadow = True
print('Serial comm error')
if not self.alarm_serial_communication and self._comm_error_shadow:
self._comm_error_shadow = False
print('Serial comm restored')
def _poll(self, dev, test_packet = ''):
try:
# The SmartBMS transmits each 500ms or 1000ms a message containing 58 bytes
# When the serial does not contain any new bytes and no complete message was received, empty the buffer and wait for a new message
buffer = bytearray (self.BMS_COMM_BLOCK_SIZE)
buffer_index = 0
time.sleep(0.5)
self._ser = serial.Serial(dev, 9600)
while(1):
if len(test_packet) > 0:
read_data = test_packet
else:
waiting_bytes = self._ser.in_waiting
read_data = self._ser.read(waiting_bytes)
if len(read_data) > 0:
for c in read_data:
if buffer_index <= self.BMS_COMM_BLOCK_SIZE-1:
buffer[buffer_index] = c
buffer_index += 1
if buffer_index == self.BMS_COMM_BLOCK_SIZE:
checksum = 0
for i in range (self.BMS_COMM_BLOCK_SIZE-1):
checksum += buffer[i]
received_checksum = buffer[self.BMS_COMM_BLOCK_SIZE-1]
if(checksum & 0xff) == received_checksum:
self.lock.acquire()
self.cell_voltage_full = self._decode_voltage(buffer[55:57]) # Value important for estimating nominal voltage, keep on top
self.cell_voltage_min = self._decode_voltage(buffer[51:53])
self.cell_voltage_max = self._decode_voltage(buffer[53:55])
self.battery_voltage = round(self._decode_voltage(buffer[0:3]), 2)
self.charge_current = self._decode_current(buffer[3:6])
self.discharge_current = self._decode_current(buffer[6:9])
self.battery_current = self._decode_current(buffer[9:12])
self.soc = buffer[40]
self.lowest_cell_voltage = self._decode_voltage(buffer[12:14])
self.lowest_cell_voltage_num = buffer[14]
self.highest_cell_voltage = self._decode_voltage(buffer[15:17])
self.highest_cell_voltage_num = buffer[17]
self.lowest_cell_temperature = self._decode_temperature(buffer[18:20])
self.lowest_cell_temperature_num = buffer[20]
self.highest_cell_temperature = self._decode_temperature(buffer[21:23])
self.highest_cell_temperature_num = buffer[23]
self.cell_count = buffer[25]
self.energy_stored_wh = self._decode_value(buffer[34:37], 1)
self.capacity = round(self._decode_value(buffer[49:51], 0.1), 1) # in kWh
self.nominal_battery_voltage = self.determine_nominal_cell_voltage()*self.cell_count
if self.nominal_battery_voltage != 0:
self.capacity_ah = self.capacity*1000/self.nominal_battery_voltage
self.energy_stored_ah = self.energy_stored_wh/self.nominal_battery_voltage
else:
self.capacity_ah = 0
self.energy_stored_ah = 0
self.energy_stored_wh = self._decode_value(buffer[34:37], 1)
self.alarm_minimum_voltage_ma_filter.add(True if (buffer[30] & 0b00001000) else False)
self.alarm_maximum_voltage_ma_filter.add(True if (buffer[30] & 0b00010000) else False)
self.alarm_minimum_temperature_ma_filter.add(True if (buffer[30] & 0b00100000) else False)
self.alarm_maximum_temperature_ma_filter.add(True if (buffer[30] & 0b01000000) else False)
self.alarm_cell_communication_ma_filter.add(True if (buffer[30] & 0b00000100) else False)
self.allowed_to_discharge_ma_filter.add(True if (buffer[30] & 0b00000010) else False)
self.allowed_to_charge_ma_filter.add(True if (buffer[30] & 0b00000001) else False)
self.alarm_minimum_voltage = self.alarm_minimum_voltage_ma_filter.get_average() > 0.5
self.alarm_maximum_voltage = self.alarm_maximum_voltage_ma_filter.get_average() > 0.5
self.alarm_minimum_temperature = self.alarm_minimum_temperature_ma_filter.get_average() > 0.5
self.alarm_maximum_temperature = self.alarm_maximum_temperature_ma_filter.get_average() > 0.5
self.alarm_cell_communication = self.alarm_cell_communication_ma_filter.get_average() > 0.5
self.allowed_to_discharge = self.allowed_to_discharge_ma_filter.get_average() > 0.5
self.allowed_to_charge = self.allowed_to_charge_ma_filter.get_average() > 0.5
self.last_received = time.time()
self.lock.release()
buffer_index = 0
elif len(read_data) == 0:
buffer_index = 0
time.sleep(0.2)
except Exception as e:
print('Fatal exception: ')
print(e)
self.loop.quit()
def _decode_current(self, raw_value):
if raw_value[0] == ord('X'):
return 0
elif raw_value[0] == ord('-'):
factor = -1
else:
factor = 1
return factor*round(0.125*struct.unpack('>H', raw_value[1:3])[0],1)
def _decode_value(self, raw_value, multiplier):
if len(raw_value) == 3:
value = struct.unpack('>L', bytearray(b'\x00')+raw_value)[0]
else:
value = struct.unpack('>H', raw_value[0:2])[0]
return round(multiplier*value, 3)
def _decode_voltage(self, raw_value):
return self._decode_value(raw_value, 0.005)
def _decode_temperature(self, raw_value):
return round(struct.unpack('>H', raw_value[0:2])[0]*0.857-232,0)
def _calculate_consumed_ah(self):
self.consumed_ah = round(-1*(self.capacity_ah-self.energy_stored_ah),1)+0 # Add zero to remove negative sigh from -0.0
def _update_time_to_go(self):
# Filter current with a 3 minute moving average filter to stabilize the time-to-go
if len(self._current_filter) >= 180:
self._current_filter.popleft()
self._current_filter.append(self.battery_current)
current_filter_sum = 0
for value in self._current_filter:
current_filter_sum += value
current_filter_average = current_filter_sum/len(self._current_filter)
battery_power_filtered = (self.nominal_battery_voltage * -1 * current_filter_average)
normalized_power = self.capacity*1000*0.05 # The battery capacity was rated at a current of <= 0.05C -> calculate this measurement current (in wh)
if current_filter_average < 0 and battery_power_filtered > 0 and normalized_power > 0 and self.capacity > 0: # > 0 to avoid divide by zero
# When discharge power is bigger than normalized current, use Peukert-like formula
if battery_power_filtered > normalized_power:
time_to_go_from_full = 60 * 60 * (self.capacity*1000)/(pow(battery_power_filtered/normalized_power, 1.02))/normalized_power
time_to_go = time_to_go_from_full*(self.energy_stored_wh/(self.capacity*1000))
else:
time_to_go = self.energy_stored_wh * 60 * 60 / battery_power_filtered
self.time_to_go = time_to_go
else:
self.time_to_go = None
def _battery_charge_state(self):
battery_current = self.battery_current
if self.lowest_cell_voltage >= self.cell_voltage_full:
battery_current -= 1 # When all cells balance, the charge current is 1A lower because of passive balancing
if self.battery_charge_state == self.BATTERY_CHARGE_STATE_BULKABSORPTION:
if self.lowest_cell_voltage >= self.cell_voltage_full and battery_current < self.capacity_ah*0.05:
self._battery_full_counter += 1
else:
self._battery_full_counter = 0
if self._battery_full_counter >= 30 and self.soc == 100: # When BMS also sees the pack as full
self.battery_charge_state = self.BATTERY_CHARGE_STATE_STORAGE
elif self.battery_charge_state == self.BATTERY_CHARGE_STATE_STORAGE:
# Battery idle and unbalance of more than 40mV
if self.capacity_ah*-0.05 < battery_current < self.capacity_ah*0.05 and self.highest_cell_voltage < self.cell_voltage_full and self.highest_cell_voltage - self.lowest_cell_voltage >= 0.04:
self._unbalance_detection_timer += 1
else:
self._unbalance_detection_timer = 0
# At least balance once a week
self._balanced_timer += 1
# At least 60 seconds in a row a voltage difference of at least 40mV? Unbalance detected
if self._unbalance_detection_timer > 5*60 or self._balanced_timer >= 4*24*60*60:
self._unbalance_detection_timer = 0
self._balanced_timer = 0
self.battery_charge_state = self.BATTERY_CHARGE_STATE_BULKABSORPTION
else:
self.battery_charge_state = self.BATTERY_CHARGE_STATE_BULKABSORPTION
class SmartBMSToDbus(SmartBMSSerial):
def __init__(self, loop, dev, serial_id):
super().__init__(loop, dev)
self._dev = dev
self._serial_id = serial_id
self._info = {
'name' : "123SmartBMS",
'servicename' : "123SmartBMS",
'id' : 0,
'version' : 1.03
}
device_port = args.device[dev.rfind('/') + 1:]
device_port_num = device_port[device_port.rfind('USB') + 3:]
self._device_instance = 288+int(device_port_num)
self._dbusservice = VeDbusService("com.victronenergy.battery." + device_port)
# Create the management objects, as specified in the ccgx dbus-api document
self._dbusservice.add_path('/Mgmt/ProcessName', __file__)
self._dbusservice.add_path('/Mgmt/ProcessVersion', self._info['version'])
self._dbusservice.add_path('/Mgmt/Connection', ' Serial ' + dev)
# Create the basic objects
self._dbusservice.add_path('/DeviceInstance', self._device_instance)
self._dbusservice.add_path('/ProductId', self._info['id'])
self._dbusservice.add_path('/ProductName', self._info['name'])
self._dbusservice.add_path('/FirmwareVersion', self._info['version'], gettextcallback=lambda p, v: "v{:.2f}".format(v))
self._dbusservice.add_path('/HardwareVersion', None)
self._dbusservice.add_path('/Serial', self._serial_id)
self._dbusservice.add_path('/Connected', None)
# Create device list
self._dbusservice.add_path('/Devices/0/DeviceInstance', self._device_instance)
self._dbusservice.add_path('/Devices/0/FirmwareVersion', self._info['version'])
self._dbusservice.add_path('/Devices/0/ProductId', self._info['id'])
self._dbusservice.add_path('/Devices/0/ProductName', self._info['name'])
self._dbusservice.add_path('/Devices/0/ServiceName', self._info['servicename'])
self._dbusservice.add_path('/Devices/0/VregLink', "(API)")
# Create the bms paths
self._dbusservice.add_path('/TimeToGo', None)
self._dbusservice.add_path('/SystemSwitch', None)
self._dbusservice.add_path('/Soc', None, gettextcallback=lambda p, v: "{:.0f}%%".format(v))
self._dbusservice.add_path('/Capacity', None, gettextcallback=lambda p, v: "{:.1f}Ah".format(v))
self._dbusservice.add_path('/InstalledCapacity', None, gettextcallback=lambda p, v: "{:.1f}Ah".format(v))
self._dbusservice.add_path('/ConsumedAmphours', None, gettextcallback=lambda p, v: "{:.1f}Ah".format(v))
self._dbusservice.add_path('/UpdateTimestamp', None)
self._dbusservice.add_path('/Dc/0/Voltage', None, gettextcallback=lambda p, v: "{:.2f}V".format(v))
self._dbusservice.add_path('/Dc/0/Current', None, gettextcallback=lambda p, v: "{:.1f}A".format(v))
self._dbusservice.add_path('/Dc/0/Power', None, gettextcallback=lambda p, v: "{:.0f}W".format(v))
self._dbusservice.add_path('/Dc/0/Temperature', None)
self._dbusservice.add_path('/Io/AllowToCharge', None)
self._dbusservice.add_path('/Io/AllowToDischarge', None)
#self._dbusservice.add_path('/Voltages/Cell1', None)
#self._dbusservice.add_path('/Voltages/Cell2', None)
self._dbusservice.add_path('/System/MaxCellVoltage', None, gettextcallback=lambda p, v: "{:.2f}V".format(v))
self._dbusservice.add_path('/System/MinCellVoltage', None, gettextcallback=lambda p, v: "{:.2f}V".format(v))
self._dbusservice.add_path('/System/MinVoltageCellId', None)
self._dbusservice.add_path('/System/MaxVoltageCellId', None)
self._dbusservice.add_path('/System/MinCellTemperature', None)
self._dbusservice.add_path('/System/MinTemperatureCellId', None)
self._dbusservice.add_path('/System/MaxCellTemperature', None)
self._dbusservice.add_path('/System/MaxTemperatureCellId', None)
self._dbusservice.add_path('/System/NrOfModulesOnline', None)
self._dbusservice.add_path('/System/NrOfModulesOffline', None)
self._dbusservice.add_path('/System/NrOfModulesBlockingCharge', None)
self._dbusservice.add_path('/System/NrOfModulesBlockingDischarge', None)
self._dbusservice.add_path('/System/BatteryChargeState', None)
self._dbusservice.add_path('/System/LowVoltageThreshold', None)
self._dbusservice.add_path('/System/HighVoltageThreshold', None)
self._dbusservice.add_path('/System/FullVoltageThreshold', None)
self._dbusservice.add_path('/System/NrOfCells', None)
self._dbusservice.add_path('/Alarms/LowVoltage', None)
self._dbusservice.add_path('/Alarms/HighVoltage', None)
self._dbusservice.add_path('/Alarms/LowTemperature', None)
self._dbusservice.add_path('/Alarms/HighTemperature', None)
# Register persistent settings
self._settings_register()
# Register paths which can be externally changed, for example via the GUI
self._dbusservice.add_path('/CustomName', value=self._settings['CustomName'], writeable=True, onchangecallback=self._settext)
def update(self):
super().update()
if self.alarm_cell_communication or self.alarm_serial_communication:
self._dbusservice["/Connected"] = 1
self._dbusservice["/Soc"] = None
self._dbusservice["/SystemSwitch"] = None
self._dbusservice["/ConsumedAmphours"] = None
self._dbusservice["/Capacity"] = None
self._dbusservice["/InstalledCapacity"] = None
self._dbusservice['/TimeToGo'] = None
self._dbusservice["/Dc/0/Voltage"] = None
self._dbusservice["/Dc/0/Current"] =None
self._dbusservice["/Dc/0/Power"] = None
self._dbusservice["/Dc/0/Temperature"] = None
self._dbusservice["/Io/AllowToCharge"] = None
self._dbusservice["/Io/AllowToDischarge"] = None
self._dbusservice["/System/MinCellVoltage"] = None
self._dbusservice["/System/MinVoltageCellId"] = None
self._dbusservice["/System/MaxCellVoltage"] = None
self._dbusservice["/System/MaxVoltageCellId"] = None
self._dbusservice["/System/MinCellTemperature"] = None
self._dbusservice["/System/MinTemperatureCellId"] = None
self._dbusservice["/System/MaxCellTemperature"] = None
self._dbusservice["/System/MaxTemperatureCellId"] = None
self._dbusservice["/System/NrOfModulesOnline"] = 0
self._dbusservice["/System/NrOfModulesOffline"] = 1
self._dbusservice["/System/NrOfModulesBlockingCharge"] = None
self._dbusservice["/System/NrOfModulesBlockingDischarge"] = None
self._dbusservice["/System/BatteryChargeState"] = None
self._dbusservice["/System/LowVoltageThreshold"] = None
self._dbusservice["/System/HighVoltageThreshold"] = None
self._dbusservice["/System/FullVoltageThreshold"] = None
self._dbusservice["/System/NrOfCells"] = None
self._dbusservice["/Alarms/LowVoltage"] = None
self._dbusservice["/Alarms/HighVoltage"] = None
self._dbusservice["/Alarms/LowTemperature"] = None
self._dbusservice["/Alarms/HighTemperature"] = None
else:
self._dbusservice["/Connected"] = 1
self._dbusservice["/Soc"] = self.soc
self._dbusservice["/SystemSwitch"] = 1
self._dbusservice["/ConsumedAmphours"] = self.consumed_ah
self._dbusservice["/Capacity"] = round(self.energy_stored_ah, 1)
self._dbusservice["/InstalledCapacity"] = self.capacity_ah
self._dbusservice['/TimeToGo'] = self.time_to_go
self._dbusservice["/Dc/0/Voltage"] = self.battery_voltage
self._dbusservice["/Dc/0/Current"] = self.battery_current
self._dbusservice["/Dc/0/Power"] = self.battery_voltage * self.battery_current
self._dbusservice["/Dc/0/Temperature"] = self.highest_cell_temperature
self._dbusservice["/Io/AllowToCharge"] = int(self.allowed_to_charge)
self._dbusservice["/Io/AllowToDischarge"] = int(self.allowed_to_discharge)
self._dbusservice["/System/MinCellVoltage"] = round(self.lowest_cell_voltage, 2)
self._dbusservice["/System/MinVoltageCellId"] = self.lowest_cell_voltage_num
self._dbusservice["/System/MaxCellVoltage"] = round(self.highest_cell_voltage, 2)
self._dbusservice["/System/MaxVoltageCellId"] = self.highest_cell_voltage_num
self._dbusservice["/System/MinCellTemperature"] = self.lowest_cell_temperature
self._dbusservice["/System/MinTemperatureCellId"] = self.lowest_cell_temperature_num
self._dbusservice["/System/MaxCellTemperature"] = self.highest_cell_temperature
self._dbusservice["/System/MaxTemperatureCellId"] = self.highest_cell_temperature_num
self._dbusservice["/System/NrOfModulesOnline"] = 1
self._dbusservice["/System/NrOfModulesOffline"] = 0
self._dbusservice["/System/NrOfModulesBlockingCharge"] = int(not self.allowed_to_charge)
self._dbusservice["/System/NrOfModulesBlockingDischarge"] = int(not self.allowed_to_discharge)
self._dbusservice["/System/BatteryChargeState"] = self.battery_charge_state
self._dbusservice["/System/LowVoltageThreshold"] = self.cell_voltage_min
self._dbusservice["/System/HighVoltageThreshold"] = self.cell_voltage_max
self._dbusservice["/System/FullVoltageThreshold"] = self.cell_voltage_full
self._dbusservice["/System/NrOfCells"] = self.cell_count
self._dbusservice["/Alarms/LowVoltage"] = 2 if self.alarm_minimum_voltage else 0
self._dbusservice["/Alarms/HighVoltage"] = 2 if self.alarm_maximum_voltage else 0
self._dbusservice["/Alarms/LowTemperature"] = 2 if self.alarm_minimum_temperature else 0
self._dbusservice["/Alarms/HighTemperature"] = 2 if self.alarm_maximum_temperature else 0
# Beeds to be the last thing so others know we finished updating
self._dbusservice["/UpdateTimestamp"] = int(time.time())
def _settext(self, path, value): # Currently only used for CustomName
self._settings['CustomName'] = value
return True
def _settings_handle_changed(self, setting, oldvalue, newvalue):
return True
def _settings_register(self):
# Load all persistent data
self._settings = SettingsDevice(
dbus.SessionBus() if 'DBUS_SESSION_BUS_ADDRESS' in os.environ else dbus.SystemBus(),
supportedSettings={
'CustomName': ['/Settings/123electric/Products/'+ self._serial_id + '/CustomName', self._info['name'], 0, 0]
},
eventCallback = self._settings_handle_changed)
# Called on a one second timer
def handle_timer_tick():
# The BMS data readout and variable writing happens on a different thread -> lock before
bms_dbus.lock.acquire()
bms_dbus.update()
bms_dbus.lock.release()
return True # keep timer running
if __name__ == "__main__":
# Have a mainloop, so we can send/receive asynchronous calls to and from dbus
print('123\\SmartBMS to dbus started')
DBusGMainLoop(set_as_default=True)
parser = argparse.ArgumentParser(description = '123SmartBMS to dbus')
requiredArguments = parser.add_argument_group('required arguments')
requiredArguments.add_argument('-d', '--device', help='serial device for data (eg /dev/ttyUSB0)', required=True)
args = parser.parse_args()
dev_objects = serial.tools.list_ports.comports()
device_serial_numbers = {}
for d in dev_objects:
device_serial_numbers[d.device] = d.serial_number
mainloop = GLib.MainLoop()
bms_dbus = SmartBMSToDbus(mainloop, args.device, device_serial_numbers[args.device])
time.sleep(8) # Wait until we have received some data and the filters are filled
GLib.timeout_add(1000, lambda: ve_utils.exit_on_error(handle_timer_tick))
mainloop.run()
``` |
{
"source": "123freezebrennan/OpenVINO-Car-Pedestrian-Tracker-and-Counter",
"score": 2
} |
#### File: Main/Files/inference.py
```python
from openvino.inference_engine import IECore, IENetwork
class Network:
#Constructor class to declare variables, any of these still as 'None' in console, an error occured when initializing it
def __init__(self):
#NEED TO: put ntoes done indicating what each does
self.plugin = None
self.network = None
self.input_blob = None
self.output_blob = None
self.exec_network = None
self.infer_request = None
def load_model(self, model, bin, device = "CPU"):
#Brings in IR file to be read as an .xml, morphs string to be seen as a .bin in the same folder, as it should be
model_xml = model
model_bin = bin
self.plugin = IECore()
self.network = IENetwork(model_xml, weights = model_bin)
self.exec_network = self.plugin.load_network(self.network, device)
self.input_blob = next(iter(self.network.inputs))
self.output_blob = next(iter(self.network.outputs))
return
def get_input_shape(self):
return self.network.inputs[self.input_blob].shape
def async_inference(self, image):
self.exec_network.start_async(request_id = 0, inputs = {self.input_blob: image})
return
def synchronous_inference(self,image):
self.exec_network.infer({self.input_blob: image})
return
def wait(self):
status = self.exec_network.requests[0].wait(-1)
return status
def extract_output(self):
return self.exec_network.requests[0].outputs[self.output_blob]
``` |
{
"source": "123hotdog1100/RandomName",
"score": 4
} |
#### File: 123hotdog1100/RandomName/calculator.py
```python
import tkinter# importing tkinter gui module
#Setting up the variables
tk = tkinter
window = tk.Tk()
answer = 0
#setting up the labels
label1 = tk.Label(window, text = "What is the first number? ").grid(row = 0)
e = tk.Entry(window)
e.grid(row=0, column = 1)
label2 = tk.Label(window, text = "What is the second number? ").grid(row = 1)
e2 = tk.Entry(window)
e2.grid(row=1, column = 1)
label3 = tk.Label(window, text = "Your answer is ")
label3.grid(row =5, column = 0)
#Defining all the operations
def equals():
global label3, answer
strans = str(answer)
label3.configure(text = "Your answer is " + strans)
def add():
global answer, inte, inte2
inte = int(e.get())
inte2 = int(e2.get())
answer = inte + inte2
equals()
def minus():
global answer, inte, inte2
inte = int(e.get())
inte2 = int(e2.get())
answer = inte - inte2
equals()
def times():
global answer, inte, inte2
inte = int(e.get())
inte2 = int(e2.get())
answer = inte * inte2
equals()
def divide():
global answer, inte, inte2
inte = int(e.get())
inte2 = int(e2.get())
answer = inte / inte2
equals()
def square():
global label3, answer, inte
inte = int(e.get())
#inte2 = int(e2.get())
answer = inte * inte
equals()
def cube():
global label3, answer, inte
inte = int(e.get())
#inte2 = int(e2.get())
answer = inte * inte * inte
equals()
btn = tk.Button(window, text= "+", command = add, height = 2, width = 4)
btn.grid(row = 3, column = 3)
btn2 = tk.Button(window, text= "-", command = minus, height = 2, width = 4)
btn2.grid(row = 4, column = 3)
btn3 = tk.Button(window, text= "*", command = times, height = 2, width = 4)
btn3.grid(row = 5, column = 3)
btn4 = tk.Button(window, text= "/", command = divide, height = 2, width = 4)
btn4.grid(row = 6, column = 3)
btn5 = tk.Button(window, text= "^2", command = square, height = 2, width = 4)
btn5.grid(row = 7, column = 3)
btn6 = tk.Button(window, text= "^3", command = cube, height = 2, width = 4)
btn6.grid(row = 8, column = 3)
btn7 = tk.Button(window, text= "=", command = equals, height = 2, width = 4)
btn7.grid(row = 9, column = 3)
window.title("Calculator")
window.geometry("360x280")
window.mainloop()
```
#### File: 123hotdog1100/RandomName/Random_Name.py
```python
from random import randint
import tkinter
##gui variables
tk = tkinter
window = tk.Tk()
label = tk.Label(window, text ="Hello, Welcome to the random name generator").pack()
##variables
namelist = []
label2 = tk.Label(window)
label3 = tk.Label(window, text ="Enter the names you want to use and press the button after each one").pack()
labelcheck = tk.Label(window)
e1 = tk.Entry(window)
e1.pack()
def randomdef():
global randnum, label2, e1, namelist, lengthlist, randomnum, labelcheck
randomnum = randint(0, lengthlist)
label2.config(text="Your random name is: " + namelist[randomnum])
labelcheck.config(text="")
def namecheck():
global namelist, e1, randomnum, labelcheck, lengthlist
namelist.append(e1.get())
labelcheck.config(text="Your name has been added and the list is now " + ", ".join(namelist))
lengthlist = len(namelist) - 1
def nameclear():
global namelist, label2
label2.config(text="")
namelist = []
labelcheck.config(text="Your name list has been cleared check to see if it is here(should be blank): " + ", ".join(namelist))
def nameimport():
global namelist, file
file = open("namelist.txt","r")
f1 = file.readlines()
for x in f1:
namelist.append(x)
file.close()
def nameexport():
global file, namelist
file = open("namelist.txt","w+")
file.write("\n".join(namelist))
file.close()
window.geometry("620x620")
window.title("Random number generator")
btn1 = tk.Button(window, text = "Add names", command = namecheck).pack()
btn2 = tk.Button(window, text = "Generate", fg = "blue", command = randomdef).pack()
btn3 = tk.Button(window, text = "Clear names",fg = "red", command = nameclear).pack()
btn4 = tk.Button(window, text = "Name import from file",fg = "green", command = nameimport).pack()
btn5 = tk.Button(window, text = "Name export to a file",fg = "red", command = nameexport).pack()
label2.pack()
labelcheck.pack()
window.mainloop()
``` |
{
"source": "123joshuawu/orca",
"score": 3
} |
#### File: orca/tests/test_registrar.py
```python
from api.models import ClassTypeEnum
from api.parser.registrar import Registrar
def test_parse_period_types():
types = Registrar.parse_period_types("202101")
assert types[('40432', 3, '21:05')] == ClassTypeEnum.TEST
assert types[('44040', 4, '12:20')] == ClassTypeEnum.LECTURE
assert types[('41340', 1, '12:20')] == ClassTypeEnum.LECTURE
assert types[('41340', 4, '12:20')] == ClassTypeEnum.LECTURE
assert types[('41536', 1, '09:05')] == ClassTypeEnum.TEST
assert types[('41536', 3, '12:20')] == ClassTypeEnum.LAB
```
#### File: orca/tests/test_utils.py
```python
from api.parser.utils import sanitize
def test_sanitize():
assert sanitize("hello world") == "hello world"
assert sanitize(" hello world ") == "hello world"
assert sanitize(" \thello\nworld ") == "hello world"
``` |
{
"source": "123joshuawu/vfh-python",
"score": 3
} |
#### File: vfh-python/vfhplus/vfh_plus_sim.py
```python
from vfh_plus import *
import sys
import math
import csv
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
# Need to
# - change movement of robot on actual map to decimal accuracy
""" Testing for VFH+ algorithm using map.txt grid from vfh-python"""
# USER-DEFINED VARIABLES--------
# Start/end location for robot in histogram grid
start = (3, 47)
end = (3, 3)
# Dimension of HistogramGrid
hg_dim = (50, 50)
# Number of sectors in PolarHistogram
nsectors = 72
# Window Length of active region
w_s = 21
# Max number of steps for loop (Prevent infinite loops)
MAX_STEPS = 100
# Robot velocity (unit square/interval)
v_r = 1
# CONSTANTS USED IN CALCS-----------------------------
# Feel free to change these to see what happens
# Positive constants for calculating cell magnitude
# Should satisty mhp_a - mhp_b * sqrt(2) * (w_s - 1)/2 = 0
mhp_a = 28
mhp_b = 2
# Positive constant for smoothing polar histogram
mhp_l = 5
# Positive constants for certainty threshold of polar sector
gbd_t_low = 22
gbd_t_high = 24
# Positive constant for number of consecutive sectors for a wide valley
# Should change in accordance with nsectors
gbd_smax = 18
# Positive constants for calculating cost of candidate angles
# gbd_a is for goal oriented steering, gbd_b and gbd_c are for smooth steering
# Should satisty gbd_a > gbd_b + gbd_c
gbd_a = 5
gbd_b = 2
gbd_c = 2
# ---------------
def from_map(map_fname):
""" Create grid from text file """
with open(map_fname, 'r') as f:
reader = csv.reader(f, delimiter=" ")
lines = list(reader)
lines = list(map(lambda l: list(map(int, l)), lines))
nrows = len(lines)
ncols = len(lines[0])
return lines
current_loc = start
current_cell = start
target_angle = None
current_angle = wrap_angle(-1 *
math.degrees(math.atan2(end[1] - start[1], end[0] - start[0])))
previous_angle = current_angle
hg = HistogramGrid(hg_dim[0], hg_dim[1])
# hg.grid = from_map("vfh-python/map_no_sides.txt")
hg.grid = from_map("map.txt")
ph = None
steps = []
index = 0
ani = None
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xticks(np.arange(0, len(hg.grid[0]), 5))
ax.set_yticks(np.arange(0, len(hg.grid), 5))
plt.grid()
# plt.gca().invert_yaxis()
for y in xrange(len(hg.grid)):
for x in xrange(len(hg.grid)):
if hg.grid[y][x] == 1:
ax.plot(x, len(hg.grid) - y - 1, 'ks')
ax.plot(end[0], end[1], 'rx')
pts = ax.scatter([start[0]], [start[1]])
# def test():
# ph = VFH.map_active_hg_to_ph(hg, PolarHistogram(nsectors), current, w_s, mhp_a, mhp_b, mhp_l)
# target_angle = wrap_angle(-1 * math.degrees(math.atan2(end[1] - current[1], end[0] - current[0])))
# best_angle = VFH.get_best_direction(ph, target_angle, current_angle, previous_angle, gbd_t_low, gbd_t_high, gbd_smax, gbd_a, gbd_b, gbd_c)
def update(frame):
global index, current_cell, current_loc, current_angle, target_angle, previous_angle, steps, v_r
if hg.out_of_bounds(current_cell[0], current_cell[1]):
ani.event_source.stop()
return
if current_cell == end:
print("Goal reached")
ani.event_source.stop()
return
# try:
if index < MAX_STEPS:
print "STEP", index
#hg.print_hg(list(map(lambda s: s[1], steps)), start, end, current)
ph = VFH.map_active_hg_to_ph(hg, PolarHistogram(
nsectors), current_cell, w_s, mhp_a, mhp_b, mhp_l)
# avg = np.median(ph.polar_histogram)#sum(ph.polar_histogram) / len(ph.polar_histogram)
target_angle = wrap_angle(math.degrees(math.atan2(
end[1] - current_loc[1], end[0] - current_loc[0])))
best_angle = VFH.get_best_direction(
ph, target_angle, current_angle, previous_angle, gbd_t_low, gbd_t_high, gbd_smax, gbd_a, gbd_b, gbd_c)
print "current_cell (%d, %d)" % (current_cell[0], current_cell[1])
print "current_loc (%f, %f)" % (current_loc[0], current_loc[1])
print "best_angle", best_angle
steps.append((index, current_cell, current_loc,
target_angle, wrap_angle(best_angle)))
# check if robot needs to slow down
end_dis = math.hypot(end[1] - current_loc[1], end[0] - current_loc[0])
print "end_dis %d" % end_dis
if end_dis < v_r:
v_r = end_dis
# Compute next adjacent cell robot will be in
dx = v_r * math.cos(math.radians(best_angle))
dy = v_r * math.sin(math.radians(best_angle))
print "dx %f dy %f" % (dx, dy)
next_x = int(math.floor(current_loc[0] + dx))
next_y = int(math.floor(current_loc[1] + dy))
print "next (%d, %d)" % (next_x, next_y)
current_loc = (current_loc[0] + dx, current_loc[1] + dy)
current_cell = (next_x, next_y)
previous_angle = current_angle
current_angle = best_angle
index += 1
print "-" * 16
ofs = pts.get_offsets()
ofs = np.append(ofs, [[current_loc[0], current_loc[1]]], axis=0)
pts.set_offsets(ofs)
# except Exception:
# ani.event_source.stop()
# finally:
# for s in steps:
# print "{0:2}. ({1:2}, {2:<2}) -- ({3:2.1f}, {4:<2.1f}) target_angle: {5:5.1f} best_angle: {6:5.1f} ".format(s[0], s[1][0], s[1][1], s[2][0], s[2][1], s[3], s[4])
# print "COMPLETE"
# return pts
#hg.print_hg(list(map(lambda s: s[1], steps)), start, end, current)
# #if raw_input(): break
# finally:
# i = 0
# for s in steps:
# print "{0:2}. ({1:2}, {2:<2}) target_angle: {3:5.1f} best_angle: {4:5.1f} smoothed_ph cntr: {5:3.1f} avg_obs_dis: {6}".format(s[0], s[1][0], s[1][1], s[2], s[3], s[4], s[5])
# if i < len(steps) - 1 and steps[i + 1][4] - s[4] > gbd_t_high and steps[i + 1][5] > s[5]:
# print "WOWWWW", steps[i + 1][4] - s[4]
# i += 1
# hg.print_hg(list(map(lambda s: s[1], steps)), start, end, current)
try:
ani = animation.FuncAnimation(fig, update)
except Exception:
print "wowww"
ani.event_source.stop()
plt.show()
# try:
# print "VARS INITIALIZED STARTING LOOP"
# while index < MAX_STEPS:
# print "STEP", index
# #hg.print_hg(list(map(lambda s: s[1], steps)), start, end, current)
# if current == end: break
# ph = VFH.map_active_hg_to_ph(hg, PolarHistogram(nsectors), current, w_s, mhp_a, mhp_b, mhp_l)
# avg = np.median(ph.polar_histogram)#sum(ph.polar_histogram) / len(ph.polar_histogram)
# target_angle = wrap_angle(-1 * math.degrees(math.atan2(end[1] - current[1], end[0] - current[0])))
# best_angle = VFH.get_best_direction(ph, target_angle, current_angle, previous_angle, gbd_t_low, gbd_t_high, gbd_smax, gbd_a, gbd_b, gbd_c)
# print "best_angle", best_angle
# steps.append((index, current, target_angle, wrap_angle(best_angle), avg, ph.avg_obs_dis, ph.polar_histogram))
# # Compute next adjacent cell robot will be in
# next_angle = math.floor(wrap_angle(best_angle + 22.5) / 45) * 45
# print "current", current
# print "next_angle", next_angle
# next_x = int(round((math.sqrt(2) if next_angle % 90 != 0 else 1) * math.cos(math.radians(next_angle))) + current[0])
# next_y = int(round((math.sqrt(2) if next_angle % 90 != 0 else 1) * math.sin(math.radians(next_angle))) * -1 + current[1])
# print "next x %d y %d" % (next_x, next_y)
# current = (next_x, next_y)
# previous_angle = current_angle
# current_angle = best_angle
# index += 1
# print "-" * 16
# print "COMPLETE"
# vcp = (int(sys.argv[2]), int(sys.argv[3]))
# print vcp
# hg_dim = (50, 50)
# nsectors = 36
# w_s = 21
# old_hg = old.HistogramGrid.from_map("map.txt", 1)
# hg = HistogramGrid(hg_dim[0], hg_dim[1])
# hg.grid = old_hg.grid
# #print hg
# ph = VFH.map_active_hg_to_ph(hg, PolarHistogram(nsectors), vcp, w_s)
# print ph
# print VFH.get_best_direction(ph.polar_histogram, int(sys.argv[1]), smax=5)
# #ph = PolarHistogram(hg, (17, 12), 5, 16)
# #print ph
# #print ph.get_best_angle(180, 180)
```
#### File: vfh-python/vfhstar/vfh_star.py
```python
import math
import heapq
import numpy as np
from itertools import groupby, count
import operator
from decimal import Decimal, ROUND_UP
import utm
from itertools import starmap, chain
from shapely.geometry.polygon import Polygon
from shapely.geometry import Point
import matplotlib
matplotlib.use('TkAgg')
def get_line(x1, y1, x2, y2):
points = []
issteep = abs(y2-y1) > abs(x2-x1)
if issteep:
x1, y1 = y1, x1
x2, y2 = y2, x2
rev = False
if x1 > x2:
x1, x2 = x2, x1
y1, y2 = y2, y1
rev = True
deltax = x2 - x1
deltay = abs(y2-y1)
error = int(deltax / 2)
y = y1
ystep = None
if y1 < y2:
ystep = 1
else:
ystep = -1
for x in range(x1, x2 + 1):
if issteep:
points.append((y, x))
else:
points.append((x, y))
error -= deltay
if error < 0:
y += ystep
error += deltax
# Reverse the list if the coordinates were reversed
if rev:
points.reverse()
return points
def trace_bounds(hg, bounds):
for i in range(len(bounds) - 1):
# print("BOUNDS", bounds[i], bounds[i + 1])
for p in get_line(bounds[i][0], bounds[i][1], bounds[i + 1][0], bounds[i + 1][1]):
hg.set_certainty(p[0], p[1], 15)
def bound(index, minimum, maximum):
if index < minimum:
return 0
elif index >= maximum:
return maximum
else:
return index
class HistogramGrid:
""" Class HistogramGrid defines a nested array ("grid") of certainty values
Coordinate points start from 0
"""
def __init__(self, ncols, nrows, bounds):
self.histogram_grid = [[0 for c in range(ncols)] for r in range(nrows)]
self.poly = Polygon(bounds)
self.bounds = self.poly.bounds
self.dimensions = (nrows, ncols)
trace_bounds(self, bounds)
def out_of_bounds(self, x, y):
""" Returns whether the cell is out of the grid. Used for edge conditions """
return not self.poly.contains(Point(x, y))
# return 0 > y or y >= len(self.histogram_grid) or 0 > x or x >= len(self.histogram_grid[0])
def bound_x(self, x):
return int(bound(x, self.bounds[0], self.bounds[2]))
def bound_y(self, y):
return int(bound(y, self.bounds[1], self.bounds[3]))
def get_certainty(self, x, y):
return self.histogram_grid[len(self.histogram_grid) - y - 1][x]
def set_certainty(self, x, y, certainty):
self.histogram_grid[len(self.histogram_grid) - y - 1][x] = certainty
def get_obstacles(self):
row = len(self.histogram_grid[0])
return list(map(lambda t: ((t[0] // row, t[0] % row), t[1]), filter(lambda c: c[1] > 0, enumerate(chain(*self.histogram_grid)))))
# obs = []
# for y in range(int(self.bounds[3])+1):
# for x in range(int(self.bounds[2])+1):
# c = self.get_certainty(x, y)
# if c > 0:
# obs.append(((x, y), c))
# return obs
def print_hg(self, robot_locations, start, end, current, o_nodes=[], c_nodes=[], e_nodes=[], b_nodes=[], window_radius=None):
""" For testing purposes """
string = ""
if window_radius is None:
window_radius = len(self.histogram_grid)
if current is None:
current = start
for y in reversed(range(self.bound_y(current[1] - window_radius), self.bound_y(current[1] + window_radius) + 1)):
for x in range(self.bound_x(current[0] - window_radius), self.bound_x(current[0] + window_radius) + 1):
if self.get_certainty(x, y) == 1:
string += "1 "
elif (x, y) == current:
string += "C "
elif (x, y) == start:
string += "S "
elif (x, y) == end:
string += "E "
elif (x, y) in b_nodes:
string += "B "
elif (x, y) in e_nodes:
string += ". "
elif (x, y) in c_nodes:
string += "# "
elif (x, y) in o_nodes:
string += "N "
elif (x, y) in robot_locations:
string += "X "
else:
string += "0 "
string += "\n"
#string += "0/1 - Free/Occupied (Certainty values)\nX - Robot locations\nS - Start Position (%d, %d)\nE - End Target (%d, %d)\nC - Current" % (start[0], start[1], end[0], end[1])
return string[:-1]
def print_active_region(self, min_ax, max_ax, min_ay, max_ay, vcp):
ar_string = ""
for y in reversed(range(min_ay, max_ay + 1)):
for x in range(min_ax, max_ax + 1):
ar_string += "{} ".format(self.get_certainty(x, y)) if (x, y) != vcp else "X "
ar_string += "\n"
return ar_string[:-1]
class PolarHistogram:
def __init__(self, nsectors):
self.polar_histogram = [0 for s in range(nsectors)]
self.nsectors = nsectors
self.sector_angle = 360 / nsectors
def add_certainty(self, sector, certainty):
while sector >= len(self.polar_histogram):
sector -= len(self.polar_histogram)
self.polar_histogram[sector] += certainty
def get_sector_certainty(self, sector):
return self.polar_histogram[sector]
def __str__(self):
""" Testing purposes """
string = ""
for tup in enumerate(self.polar_histogram):
if tup[1] != 0:
string += "{:<3} {}\n".format(tup[0] * self.sector_angle, tup[1])
return string
def wrap_angle(angle):
while angle > 360:
angle -= 360
while angle < 0:
angle += 360
return angle
def angle_between(n, a, b):
n = wrap_angle(n)
a = wrap_angle(a)
b = wrap_angle(b)
if (a < b):
return a <= n <= b
else:
return a <= n or n <= b
def small_angle_diff(a1, a2):
""" Helper function for getting smallest angle difference between two angles """
return abs((a1 - a2 + 180) % 360 - 180)
#------------------ VFH FUNCTIONS --------------------#
def get_polar_histogram(hg, vcp, w_s, n, rr, ds, t_low, t_high, a, b):
ph = PolarHistogram(n)
robot_x, robot_y = vcp
window_radius = (w_s - 1) // 2
min_active_x = hg.bound_x(robot_x - window_radius)
max_active_x = hg.bound_x(robot_x + window_radius)
min_active_y = hg.bound_y(robot_y - window_radius)
max_active_y = hg.bound_y(robot_y + window_radius)
print("Active Region -- X marks the robot")
print(hg.print_active_region(min_active_x, max_active_x, min_active_y, max_active_y, vcp))
for x in range(min_active_x, max_active_x + 1):
for y in range(min_active_y, max_active_y + 1):
dy = y - robot_y
dx = x - robot_x
cell_certainty = hg.get_certainty(x, y)
if cell_certainty == 0 or (x, y) == vcp: continue
cell_angle = wrap_angle(math.degrees(math.atan2(dy, dx)))
cell_distance = math.hypot(dx, dy)
cell_magnitude = (cell_certainty ** 2) * (a - b * cell_distance)
# cell_magnitude = (cell_certainty ** 2) * (a - b * (cell_distance ** 2))
if cell_distance < rr + ds:
raise Exception("Robot is too close to obstacle.")
obstacle_enlargement_angle = math.degrees(math.asin((rr + ds) / cell_distance))
# print("enlargement angle", obstacle_enlargement_angle)
min_sector = int(math.floor((cell_angle - obstacle_enlargement_angle) / ph.sector_angle))
max_sector = int((cell_angle + obstacle_enlargement_angle) / ph.sector_angle) + 1
print("({0:<2}, {1:<2}) {5:>3}/{6:<3} = {2:6.1f} deg -- Distance: {3:5.1f} Certainty: {7} Magnitude: {4:.1f}".format(x, y, cell_angle, cell_distance, cell_magnitude, dy, dx, cell_certainty))
for sector in range(min_sector, max_sector):
# print("sector", sector * ph.sector_angle)
ph.add_certainty(sector, cell_magnitude)
print("\nPolarHistogram:\n" + str(ph))
binary_ph = [0] * n
offset = n - ph.polar_histogram.index(max(ph.polar_histogram)) - 1
i = 0
for c in np.roll(ph.polar_histogram, offset):
if c > t_high:
binary_ph[i] = 1
elif c < t_low:
binary_ph[i] = 0
else:
binary_ph[i] = binary_ph[i - 1]
i += 1
ph.polar_histogram = np.roll(binary_ph, -1 * offset)
print("Binary PH\n" + str(ph))
return ph
def get_candidate_angles(ph, smax, tgt_angle):
polar_histogram = ph.polar_histogram
if not 0 in polar_histogram:
raise Exception("All sectors occupied, no possible directions")
if max(polar_histogram) == 0:
return [tgt_angle]
valleys = [list(g) for k, g in groupby(list(enumerate(polar_histogram)), operator.itemgetter(1))]
print("Valleys", valleys)
if valleys[0][0][1] == valleys[-1][0][1]:
valleys[-1].extend(map(lambda tup: (tup[0] + len(polar_histogram), tup[1]), valleys.pop(0)))
print("Wrapped last valley")
free_valleys = filter(lambda v: v[0][1] == 0, valleys)
print("Free valleys", free_valleys)
print("Generating candidate angles")
print("Target Angle", tgt_angle)
candidate_angles = []
for v in free_valleys:
if v[-1][0] - v[0][0] > smax:
left_candidate = wrap_angle((v[0][0] + smax / 2) * ph.sector_angle)
right_candidate = wrap_angle((v[-1][0] - smax / 2) * ph.sector_angle)
candidate_angles.append(left_candidate)
candidate_angles.append(right_candidate)
print("Wide Valley (%d, %d)... Adding %d and %d" % (v[0][0] * ph.sector_angle, v[-1][0] * ph.sector_angle, candidate_angles[-2], candidate_angles[-1]))
if angle_between(tgt_angle, left_candidate, right_candidate):
print("Target in btwn (%d, %d)... Adding %d" % (left_candidate, right_candidate, tgt_angle))
candidate_angles.append(tgt_angle)
else:
candidate_angles.append(wrap_angle(((v[0][0] + v[-1][0]) / 2) * ph.sector_angle)) # Add middle of valley to candidat angles
print("Narrow Valley (%d, %d)... Adding %d" % (v[0][0] * ph.sector_angle, v[-1][0] * ph.sector_angle, candidate_angles[-1]))
return candidate_angles
def primary_cost(candidate_angle, cur_angle, prev_angle, tgt_angle, a, b, c):
return a * small_angle_diff(candidate_angle, tgt_angle) \
+ b * small_angle_diff(candidate_angle, cur_angle) \
+ c * small_angle_diff(candidate_angle, prev_angle)
def projected_cost(candidate_angle, effective_direction, cur_angle, prev_angle, tgt_angle, discount_factor, a, b, c):
return discount_factor * (a * max(small_angle_diff(candidate_angle, tgt_angle), \
small_angle_diff(effective_direction, tgt_angle)) \
+ b * small_angle_diff(candidate_angle, cur_angle) \
+ c * small_angle_diff(candidate_angle, prev_angle))
def heuristic(cur_angle, prev_angle, tgt_angle, discount_factor, b, c):
return discount_factor * (b * small_angle_diff(tgt_angle, cur_angle) \
+ c * small_angle_diff(tgt_angle, prev_angle))
# Computationally more expensive but more accurate
# def heuristic(effective_direction, cur_angle, prev_angle, discount_factor, a, b, c):
# return discount_factor * (a * small_angle_diff(effective_direction, tgt_angle) \
# + b * small_angle_diff(tgt_angle, cur_angle) \
# + c * small_angle_diff(tgt_angle, prev_angle))
def get_projected_location(x, y, angle, d_s):
# Disregarding dynamics of robot
if angle % 90 == 0:
projected_x = x + int(round(math.cos(math.radians(angle)) * d_s))
projected_y = y + int(round(math.sin(math.radians(angle)) * d_s))
else:
projected_x = x + round_away_zero(math.cos(math.radians(angle)) * d_s)
projected_y = y + round_away_zero(math.sin(math.radians(angle)) * d_s)
return (projected_x, projected_y)
def get_projected_angle(cur_angle, tgt_angle):
# Disregarding dynamics of robot
return tgt_angle
#--------------------- VFH STAR FUNCTIONS ---------------- #
def a_star(hg, start, start_angle, end, d_t, d_s, w_s, n, rr, ds, t_low, t_high, smax, cm_a, cm_b, a, b, c, mu1, mu2, mu3, discount_factor, steps):
n_g = d_t // d_s
open_nodes = PriorityQueue()
closed_nodes = []
end_nodes = []
previous_locations = set([])
total_costs = []
open_nodes.put(PrimaryNode(start[0], start[1], start_angle, hg, n_g, end, a, b, c), 0)
previous_locations.add(start)
while not open_nodes.empty():
costs = []
current = open_nodes.get()
closed_nodes.append(current)
previous_locations.add(current.location)
if current.location == end:
end_nodes.append(current)
break
if hg.out_of_bounds(current.x, current.y) or small_angle_diff(current.cur_direction, current.get_prev_angle()) > 120:
if current in end_nodes: del end_nodes[-1]
continue
if current.depth == 0:
end_nodes.append(current)
continue
# try:
pph = current.projected_polar_histogram(w_s, n, rr, ds, t_low, t_high, cm_a, cm_b)
candidate_angles = current.projected_candidate_directions(smax)
print("candidate angles", candidate_angles)
# except Exception as e:
# print("PROBALBY COLLISION " + repr(e))
# if current in end_nodes:
# del end_nodes[-1]
# continue
children = [None for i in range(len(candidate_angles))]
i = 0
for ca in candidate_angles:
child_orientation = current.get_projected_angle(ca)
child_x, child_y = current.get_projected_location(ca, d_s, end)
# print("CA %d deg -- Child: (%d, %d)" % (ca, child_x, child_y)
cost = current.projected_cost(ca, discount_factor, mu1, mu2, mu3)
costs.append(cost)
if (child_x, child_y) not in previous_locations or cost < current.cost:
# print("NEW CHILD " + str((child_x, child_y))
child = ProjectedNode(ca, child_x, child_y, child_orientation, current, hg, current.depth - 1, end, cost, cost + current.total_cost)
priority = cost + child.heuristic(discount_factor, mu2, mu3)
open_nodes.put(child, priority)
children[i] = child.location
i += 1
# print(hg.print_hg(list(map(lambda s: s[1], steps)), start, end, current.location, open_nodes.get_locs(), previous_locations, list(map(lambda e: e.location, end_nodes)), [], (w_s - 1) / 2))
# print("Current " + str(current.location))
# print("Candidate angles " + str(candidate_angles))
# print("Costs " + str(costs))
# print("CUR COST: " + str(current.cost))
# print("Children " + str(children))
# print("Prev locs " + str(previous_locations))
# raw_input()
print(closed_nodes)
print(end_nodes)
locs = []
if not end_nodes:
primary_candidate_angle, locs = closed_nodes[0].get_primary_candidate_angle()
else:
primary_candidate_angle, locs = min(end_nodes, key=operator.attrgetter('total_cost')).get_primary_candidate_angle()
# print(hg.print_hg(list(map(lambda s: s[1], steps)), start, end, None, open_nodes.get_locs(), previous_locations, list(map(lambda e: e.location, end_nodes)), locs, (w_s - 1) / 2))
# print("End nodes " + str(list(map(lambda e: e.location, end_nodes))))
# print("Total costs " + str(list(map(lambda e: int(e.total_cost), end_nodes))))
# print("Best nodes " + str(locs))
# print("Best angle " + str(primary_candidate_angle))
# print("Start " + str(start))
# raw_input()
return primary_candidate_angle
class PriorityQueue:
def __init__(self):
self.elements = []
def empty(self):
return len(self.elements) == 0
def put(self, item, priority):
heapq.heappush(self.elements, (priority, item))
def get(self):
return heapq.heappop(self.elements)[1]
def get_locs(self):
return list(map(lambda n: n[1].location, self.elements))
def round_away_zero(num):
""" Specially used for projected location, to be removed once robot dynamics added """
return int(Decimal(num).quantize(Decimal('1.'), ROUND_UP))
class Node(object):
def __init__(self, x, y, hg, cur_direction, depth, cost, total_cost):
self.x = x
self.y = y
self.location = (x, y)
self.hg = hg
self.cur_direction = cur_direction
self.depth = depth
self.cost = cost
self.total_cost = total_cost
def get_projected_location(self, angle, d_s, end):
if d_s > math.sqrt(2) and math.hypot(end[1] - self.y, end[0] - self.x) < d_s:
return end
return get_projected_location(self.x, self.y, angle, d_s)
def get_projected_angle(self, tgt_angle):
return get_projected_angle(self.cur_direction, tgt_angle)
def projected_polar_histogram(self, w_s, n, rr, ds, t_low, t_high, a, b):
self.pph = get_polar_histogram(self.hg, self.location, w_s, n, rr, ds, t_low, t_high, a, b)
return self.pph
def projected_candidate_directions(self, smax):
self.pcd = get_candidate_angles(self.pph, smax, self.tgt_angle)
return self.pcd
def get_primary_candidate_angle(self, ca=None, prev_locs=None):
pass
class ProjectedNode(Node):
def __init__(self, candidate_angle, x, y, cur_direction, parent, hg, depth, end, cost, total_cost):
super().__init__(x, y, hg, cur_direction, depth, cost, total_cost)
self.candidate_angle = candidate_angle
self.parent = parent
self.effective_direction = wrap_angle(math.degrees(math.atan2(y - parent.y, x - parent.x)))
self.tgt_angle = wrap_angle(math.degrees(math.atan2(end[1] - y, end[0] - x)))
def projected_cost(self, candidate_angle, discount_factor, mu1, mu2, mu3):
return discount_factor * (mu1 * max(small_angle_diff(candidate_angle, self.tgt_angle), \
small_angle_diff(self.effective_direction, self.tgt_angle)) \
+ mu2 * small_angle_diff(candidate_angle, self.cur_direction) \
+ mu3 * small_angle_diff(candidate_angle, self.get_prev_angle()))
def heuristic(self, discount_factor, mu2, mu3):
return discount_factor * (mu2 * small_angle_diff(self.tgt_angle, self.cur_direction) \
+ mu3 * small_angle_diff(self.tgt_angle, self.get_prev_angle()))
def get_prev_angle(self):
return self.parent.cur_direction
def get_primary_candidate_angle(self, ca=None, prev_locs=None):
if ca is None: ca = -1
if prev_locs is None: prev_locs = list()
prev_locs.append(self.location)
print("PRI_CAND (%d, %d) -- %d deg" % (self.x, self.y, ca))
return self.parent.get_primary_candidate_angle(self.candidate_angle, prev_locs)
class PrimaryNode(Node):
def __init__(self, x, y, cur_direction, hg, depth, end, a, b, c):
super().__init__(x, y, hg, cur_direction, depth, 0, 0)
self.tgt_angle = wrap_angle(math.degrees(math.atan2(end[1] - y, end[0] - x)))
self.best_angle = None
self.a = a
self.b = b
self.c = c
def projected_candidate_directions(self, smax):
cds = get_candidate_angles(self.pph, smax, self.tgt_angle)
if len(cds) == 1:
self.best_angle = cds[0]
return []
else:
return cds
def projected_cost(self, candidate_angle, discount_factor, mu1, mu2, mu3):
return self.a * small_angle_diff(candidate_angle, self.tgt_angle) \
+ self.b * small_angle_diff(candidate_angle, self.cur_direction) \
+ self.c * small_angle_diff(candidate_angle, self.get_prev_angle())
def get_prev_angle(self):
return self.cur_direction
def get_primary_candidate_angle(self, ca=None, prev_locs=None):
print("GOT THE ANGLE ITS " + str(ca))
if prev_locs is None: prev_locs = []
return (ca, prev_locs) if ca else (self.best_angle, prev_locs)
# from mapping import *
# from pyproj import Proj, transform
# import utm
# from itertools import starmap
# from shapely.geometry import Polygon
# p_l = Proj(proj='latlong', datum='WGS84')
# p_m = Proj(proj='utm', zone=10, datum='NAD27')
def get_meter_poly(l_coords):
m_coords = list(starmap(latlon_convert_to_meter, l_coords))
zone = Polygon(m_coords)
return zone
def latlon_convert_to_meter(lat, lng):
return utm.from_latlon(lat, lng)[:2]
# return transform(p_l, p_m, lat, lng)
def add_polar(v1, v2):
r_x = math.cos(math.radians(v1[0])) * v1[1] + math.cos(math.radians(v2[0])) * v2[1]
r_y = math.sin(math.radians(v1[0])) * v1[1] + math.sin(math.radians(v2[0])) * v2[1]
r = math.hypot(r_x, r_y)
a = wrap_angle(math.atan2(r_y, r_x))
return (a, r)
class Zone:
def __init__(self, name, coords, resolution):
self.name = name
self.poly = get_meter_poly(coords)
self.resolution = resolution
bounds = self.poly.bounds
self.origin = (bounds[0], bounds[1])
hg_bounds = list(starmap(self.get_cell_m, list(self.poly.exterior.coords)))
self.hg = HistogramGrid(int((bounds[2] - bounds[0]) / resolution), int((bounds[3] - bounds[1]) / resolution), hg_bounds)
def get_cell_latlng(self, lat, lng):
return self.get_cell_m(*latlon_convert_to_meter(lat, lng))
def get_cell_m(self, m_e, m_n):
return (int((m_e - self.origin[0]) / self.resolution), int((m_n - self.origin[1]) / self.resolution))
def get_points(self):
return list(self.poly.exterior.coords)
class Sensor:
def __init__(self, name, angle, radius, callback):
self.name = name
self.angle = angle
self.radius = radius
self.callback = callback
def get_readings(self, robot_angle):
t_readings = []
for v_i in self.callback():
v_f = add_polar(v_i, (self.angle - robot_angle, self.radius))
x_g = int(math.floor(v_f[1] * math.cos(math.radians(v_f[0]))))
y_g = int(math.floor(v_f[1] * math.sin(math.radians(v_f[0]))))
t_readings.append((x_g, y_g))
return t_readings
class Bot:
def __init__(self, zone):
self.sensors = []
self.zone = zone
def update_location(self, lat, lng, angle):
robot_x, robot_y = self.zone.get_cell_latlng(lat, lng)
self.aggregate_readings(robot_x, robot_y, angle)
def aggregate_readings(self, x, y, angle):
for r in zip(map(lambda s: s.get_readings(angle), self.sensors)):
self.zone.hg.add_certainty(r[0], r[1])
return True
def add_sensor(self, name, angle, radius, callback):
self.sensors.append(name, angle, radius, callback)
return True
``` |
{
"source": "123joshuawu/yacs.n",
"score": 3
} |
#### File: rpi_data/modules/add_school_column.py
```python
import yaml
import pandas as pd
from typing import List, Dict
# File copied from YACS
SCHOOL_DEPARTMENT_MAPPING_YAML_FILENAME = "school-department-mapping.yaml"
class SchoolDepartmentMapping:
def __init__(self, mapping: Dict[str, str], schools: List[str]):
""" :param: mapping - dict that maps department shortname -> school longname
e.g. mapping.get('CSCI') == 'Science'
mapping.get('COGS') == 'Humanities, Arts and Social Sciences'
:param: schools - list of schools
"""
self.mapping = mapping
self.schools = schools
def get(self, key: str, default = 'Other') -> str:
return self.mapping.get(key, default)
@classmethod
def parse_yaml(cls, path: str) -> 'SchoolDepartmentMapping':
data = None
with open(path) as f:
data = yaml.safe_load(f.read())
# data is a dict with the following form
# {
# 'schools': {
# 'longname': str ('Humanities, Arts and Social Sciences'),
# 'subjects': {
# 'shortname': str ('ARTS'),
# 'longname: str ('Arts')
# }[]
# }[]
# }
mapping: Dict[str, str] = {}
for school in data['schools']:
school_longname = school['longname']
for subject in school['subjects']:
subject_shortname = subject['shortname']
mapping[subject_shortname] = school_longname
schools = [school['longname'] for school in data['schools']]
return cls(mapping, schools)
def add_school_column(df: pd.DataFrame, school_department_mapping_path = SCHOOL_DEPARTMENT_MAPPING_YAML_FILENAME) -> pd.DataFrame:
school_department_mapping = SchoolDepartmentMapping.parse_yaml(school_department_mapping_path)
df['school'] = df['course_department'].apply(school_department_mapping.get)
return df
```
#### File: api/db/admin.py
```python
class Admin:
def __init__(self, db_conn):
self.db_conn = db_conn
self.interface_name = 'admin_info'
def get_semester_default(self):
# NOTE: COALESCE takes first non-null vaue from the list
result, error = self.db_conn.execute("""
SELECT admin.semester FROM admin_settings admin
UNION ALL
SELECT si.semester FROM semester_info si WHERE si.public=true::boolean
LIMIT 1
""", None, True)
default_semester = None
if len(result) == 1:
# parse row
default_semester = result[0]['semester'] ## Only one record in table for admin_settings
if error:
return (None, error)
else:
return (default_semester, error)
def set_semester_default(self, semester):
try:
cmd = """
INSERT INTO admin_settings(semester)
VALUES(%s)
ON CONFLICT (semester) DO UPDATE SET semester = %s
"""
response, error = self.db_conn.execute(cmd, [semester, semester], False)
except Exception as e:
# self.db_conn.rollback()
return (False, e)
if response != None:
return(True, None)
else:
return (False, error)
```
#### File: api/db/userevent.py
```python
from db.model import *
class UserEvent(Model):
def __init__(self):
super().__init__()
def addEvent(self, uid, eventID, data, timestamp):
sql = """INSERT INTO public.userevents ("eventID", "uid", "data", "createdAt") VALUES (%s, %s, %s, %s)"""
args = (eventID, uid, data, timestamp)
return self.db.execute(sql, args, False)[0]
```
#### File: migrations/versions/2020-12-04_init.py
```python
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '4da0df6b49e7'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('admin_settings',
sa.Column('semester', sa.VARCHAR(length=255), nullable=False),
sa.PrimaryKeyConstraint('semester')
)
op.create_table('course',
sa.Column('crn', sa.VARCHAR(length=255), nullable=False),
sa.Column('section', sa.VARCHAR(length=255), nullable=True),
sa.Column('semester', sa.VARCHAR(length=255), nullable=True),
sa.Column('min_credits', sa.INTEGER(), nullable=True),
sa.Column('max_credits', sa.INTEGER(), nullable=True),
sa.Column('date_start', sa.DATE(), nullable=True),
sa.Column('date_end', sa.DATE(), nullable=True),
sa.Column('department', sa.VARCHAR(length=255), nullable=True),
sa.Column('level', sa.INTEGER(), nullable=True),
sa.Column('title', sa.VARCHAR(length=255), nullable=True),
sa.Column('full_title', sa.TEXT(), nullable=True),
sa.Column('description', sa.TEXT(), nullable=True),
sa.Column('raw_precoreqs', sa.TEXT(), nullable=True),
sa.Column('frequency', sa.VARCHAR(length=255), nullable=True),
sa.Column('school', sa.VARCHAR(length=255), nullable=True),
sa.Column('tsv', postgresql.TSVECTOR(), nullable=True),
sa.PrimaryKeyConstraint('crn')
)
op.create_table('course_corequisite',
sa.Column('department', sa.VARCHAR(length=255), nullable=False),
sa.Column('level', sa.INTEGER(), nullable=False),
sa.Column('corequisite', sa.VARCHAR(length=255), nullable=False),
sa.PrimaryKeyConstraint('department', 'level', 'corequisite')
)
op.create_table('course_prerequisite',
sa.Column('department', sa.VARCHAR(length=255), nullable=False),
sa.Column('level', sa.INTEGER(), nullable=False),
sa.Column('prerequisite', sa.VARCHAR(length=255), nullable=False),
sa.PrimaryKeyConstraint('department', 'level', 'prerequisite')
)
op.create_table('course_session',
sa.Column('crn', sa.VARCHAR(length=255), nullable=False),
sa.Column('section', sa.VARCHAR(length=255), nullable=False),
sa.Column('semester', sa.VARCHAR(length=255), nullable=False),
sa.Column('time_start', postgresql.TIME(), nullable=True),
sa.Column('time_end', postgresql.TIME(), nullable=True),
sa.Column('day_of_week', sa.INTEGER(), nullable=False),
sa.Column('location', sa.VARCHAR(length=255), nullable=True),
sa.PrimaryKeyConstraint('crn', 'section', 'semester', 'day_of_week')
)
op.create_table('event',
sa.Column('event_id', sa.INTEGER(), nullable=False),
sa.Column('description', sa.VARCHAR(length=255), nullable=True),
sa.PrimaryKeyConstraint('event_id')
)
op.create_table('semester_date_range',
sa.Column('semester_part_name', sa.VARCHAR(length=255), nullable=True),
sa.Column('date_start', sa.DATE(), nullable=False),
sa.Column('date_end', sa.DATE(), nullable=False),
sa.PrimaryKeyConstraint('date_start', 'date_end')
)
op.create_table('semester_info',
sa.Column('semester', sa.VARCHAR(length=255), nullable=False),
sa.Column('public', sa.BOOLEAN(), nullable=True),
sa.PrimaryKeyConstraint('semester')
)
op.create_table('user_account',
sa.Column('user_id', sa.INTEGER(), nullable=False),
sa.Column('name', sa.TEXT(), nullable=True),
sa.Column('email', sa.TEXT(), nullable=False),
sa.Column('phone', sa.TEXT(), nullable=True),
sa.Column('password', sa.TEXT(), nullable=True),
sa.Column('major', sa.TEXT(), nullable=True),
sa.Column('degree', sa.TEXT(), nullable=True),
sa.Column('enable', sa.BOOLEAN(), nullable=True),
sa.Column('admin', sa.BOOLEAN(), nullable=True),
sa.Column('super_admin', sa.BOOLEAN(), nullable=True),
sa.PrimaryKeyConstraint('user_id'),
sa.UniqueConstraint('email')
)
op.create_table('user_event',
sa.Column('event_id', sa.INTEGER(), nullable=False),
sa.Column('user_id', postgresql.UUID(), nullable=False),
sa.Column('content', sa.VARCHAR(length=255), nullable=True),
sa.Column('created_at', sa.BIGINT(), nullable=True),
sa.PrimaryKeyConstraint('event_id', 'user_id')
)
op.create_table('user_session',
sa.Column('session_id', postgresql.UUID(), nullable=False),
sa.Column('user_id', sa.INTEGER(), nullable=False),
sa.Column('start_time', postgresql.TIMESTAMP(timezone=True), nullable=True),
sa.Column('end_time', postgresql.TIMESTAMP(timezone=True), nullable=True),
sa.PrimaryKeyConstraint('session_id')
)
op.create_table('student_course_selection',
sa.Column('user_id', sa.INTEGER(), nullable=False),
sa.Column('semester', sa.VARCHAR(length=255), nullable=False),
sa.Column('course_name', sa.VARCHAR(length=255), nullable=False),
sa.Column('crn', sa.VARCHAR(length=255), nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['user_account.user_id'], ),
sa.PrimaryKeyConstraint('user_id', 'semester', 'course_name', 'crn')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('student_course_selection')
op.drop_table('user_session')
op.drop_table('user_event')
op.drop_table('user_account')
op.drop_table('semester_info')
op.drop_table('semester_date_range')
op.drop_table('event')
op.drop_table('course_session')
op.drop_table('course_prerequisite')
op.drop_table('course_corequisite')
op.drop_table('course')
op.drop_table('admin_settings')
# ### end Alembic commands ###
```
#### File: api/db/test_semester_info.py
```python
def test_semester_info(semester_info, test_data):
for semester in test_data.semesters:
assert semester_info.is_public(semester)
expected_public_semester = next(iter(test_data.semesters))
semester_info.upsert(expected_public_semester, False)
for semester in test_data.semesters:
assert (semester != expected_public_semester) == semester_info.is_public(semester)
semester_info.upsert(expected_public_semester, True)
for semester in test_data.semesters:
assert semester_info.is_public(semester)
```
#### File: yacs.n/tests/mock_cache.py
```python
class MockCache:
"""simple cache mock"""
def __init__(self):
self.__reset()
def clear(self):
self.cache_cleared = True
def __is_cleared(self):
return self.cache_cleared
def __reset(self):
self.cache_cleared = True
``` |
{
"source": "123jrf/StarHash",
"score": 3
} |
#### File: 123jrf/StarHash/packet.py
```python
import asyncio, socket, time
def send_packet(data, sock):
sock.send(data)
sock.send(b"$PACKET_END$")
def recv_packet(sock):
data = b""
while b"$PACKET_END$" not in data:
d = sock.recv(1)
data += d
data = data.replace(b"$PACKET_END$", b"")
return data
def host():
sock = socket.socket()
sock.bind(("0.0.0.0", 9000))
sock.listen(5)
client, address = sock.accept()
while True:
time.sleep(1)
data = recv_packet(client).decode()
print(data)
send_packet(b"Hi", client)
def connect():
sock = socket.socket()
sock.connect(('localhost', 9000))
while True:
send_packet(b"Hello", sock)
time.sleep(1)
data = recv_packet(sock).decode()
print(data)
if __name__ == "__main__":
mode = input("Mode: ")
if mode == 'c':
connect()
elif mode == 'h':
host()
``` |
{
"source": "123Lez/gnewsly---news-aggregator",
"score": 3
} |
#### File: gnewsly---news-aggregator/python_services/websocket.py
```python
from autobahn.asyncio.websocket import WebSocketServerProtocol,WebSocketServerFactory
from DB_connection import database_connection
from realtimeupdates.General import General_Methods
import json
class MyServerProtocol(WebSocketServerProtocol):
def onConnect(self, request):
print("Client connecting: {0}".format(request.peer))
def onOpen(self):
print("WebSocket connection open.")
def onMessage(self, payload, isBinary):
if isBinary:
print("Binary message received: {0} bytes".format(len(payload)))
else:
print("Text message received: {0}".format(payload.decode('utf8')))
# echo back message verbatim
mysql = database_connection('localhost','root','<PASSWORD>','social_news_db')
mysql.get_connection()
# result = mysql.query_data('SELECT * FROM users')
# # print(result)
gm = General_Methods(mysql)
message = gm.fetch_row()
obj = {'name':'Lesego', 'parameter':{'length':5,'module':'General_Methods'}}
msg = bytes(json.dumps(message),'utf8')
# response_bytes = bytes(json.dumps(client_remove_message), 'utf-8')
self.sendMessage(msg, isBinary)
# self.sendMessage(payload, isBinary)
def onClose(self, wasClean, code, reason):
print("WebSocket connection closed: {0}".format(reason))
# if __name__ == '__main__':
# import asyncio
# factory = WebSocketServerFactory(u"ws://127.0.0.1:9000")
# factory.protocol = MyServerProtocol
# loop = asyncio.get_event_loop()
# coro = loop.create_server(factory, '0.0.0.0', 9000)
# server = loop.run_until_complete(coro)
# try:
# loop.run_forever()
# except KeyboardInterrupt:
# pass
# finally:
# server.close()
# loop.close()
``` |
Subsets and Splits