metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "joshanderson-kw/SMQTK",
"score": 3
} |
#### File: algorithms/rank_relevancy/_interface.py
```python
import abc
from typing import Hashable, Sequence, Tuple
from numpy import ndarray
from smqtk.algorithms import SmqtkAlgorithm
class RankRelevancy (SmqtkAlgorithm):
"""
Algorithm that can rank a given pool of descriptors based on positively
and negatively adjudicated descriptors.
"""
@abc.abstractmethod
def rank(
self,
pos: Sequence[ndarray],
neg: Sequence[ndarray],
pool: Sequence[ndarray],
) -> Sequence[float]:
"""
Assign a relevancy score to each input descriptor in `pool` based on
the positively and negatively adjudicated descriptors in `pos` and
`neg` respectively.
:param pos:
Sequence of positively adjudicated descriptor vectors.
:param neg:
Sequence of negatively adjudicated descriptor vectors.
:param pool:
A sequence of descriptor vectors that we want to rank by topical
relevancy relative to the given positive and negative examples.
:return: An ordered sequence of float values denoting the relevancy of
`pool` elements
"""
class RankRelevancyWithFeedback (SmqtkAlgorithm):
"""
Similar to the :class:`RankRelevancy` algorithm but with the added feature
of also returning a sequence of elements from which feedback would be "most
useful".
What "most useful" means may be flexible but generally refers to the
goal of reducing the amount of adjudications required in order to
separate true-positive examples from true-negative examples in provided
pools via the assigned relevancy scores. E.g. other elements may be
adjudicated in some quantity to achieve some level of relevant sample
separation, but if the feedback requests are instead adjudicated, less
elements may need to be adjudicated to achieve and equivalent level of
separation.
Feedback requests ought to be returned in a form that is meaningful for the
user to be able to properly convey the proper information to the
adjudicating agent to actually perform adjudications. Additionally, we want
to be able to request feedback from elements that may not be present in the
given pool of descriptors.
Towards that end, this algorithm should be given a sequence of UIDs for the
given pool of descriptors. This allows the implementation to potentially
coordinate with an outside source of descriptor references such that the
returned feedback requests may be interpreted uniformly.
"""
@abc.abstractmethod
def _rank_with_feedback(
self,
pos: Sequence[ndarray],
neg: Sequence[ndarray],
pool: Sequence[ndarray],
pool_uids: Sequence[Hashable],
) -> Tuple[Sequence[float], Sequence[Hashable]]:
"""
Implement :meth:`rank_with_feedback`. `pool` and `pool_uids` have
already been checked to be of equal length.
.. seealso:: :meth:`rank_with_feedback`'s doc-string for the meanings
of the parameters and their return values
"""
def rank_with_feedback(
self,
pos: Sequence[ndarray],
neg: Sequence[ndarray],
pool: Sequence[ndarray],
pool_uids: Sequence[Hashable],
) -> Tuple[Sequence[float], Sequence[Hashable]]:
"""
Assign a relevancy score to each input descriptor in `pool` based on
the positively and negatively adjudicated descriptors in `pos` and
`neg` respectively, additionally returning a sequence of UIDs of those
descriptors for which adjudication feedback would be "most useful".
:param pos:
Sequence of positively adjudicated descriptor vectors.
:param neg:
Sequence of negatively adjudicated descriptor vectors.
:param pool:
A sequence of descriptor vectors that we want to rank by topical
relevancy relative to the given positive and negative examples.
:param pool_uids:
A sequence of hashable UID values, parallel in association with
descriptors in `pool`.
:return: Ordered sequence of float values denoting relevancy of `pool`
elements, as well as a sequence of ``Hashable`` values referencing
in-pool or out-of-pool descriptors we recommend for adjudication
feedback. In the latter sequence, descriptors are ordered
by usefulness, most to least.
:raises ValueError: `pool` and `pool_uids` are of different length
.. seealso:: :py:class:`RankRelevancyWithFeedback` class doc-string for
discussion on "most useful" meaning.
"""
if len(pool) != len(pool_uids):
raise ValueError('pool and pool_uids must be equally long but '
f'have length {len(pool)} and {len(pool_uids)}, '
'respectively')
return self._rank_with_feedback(pos, neg, pool, pool_uids)
```
#### File: algorithms/rank_relevancy/wrap_classifier.py
```python
from typing import Sequence
import numpy as np
from smqtk.algorithms import SupervisedClassifier
from smqtk.representation import DescriptorElement
from smqtk.representation.descriptor_element.local_elements import (
DescriptorMemoryElement,
)
from smqtk.utils.configuration import (
from_config_dict,
make_default_config,
cls_conf_to_config_dict,
)
from . import RankRelevancy
class RankRelevancyWithSupervisedClassifier(RankRelevancy):
"""
Relevancy ranking that utilizes a usable supervised classifier for
on-the-fly training and inference.
# Classifier "cloning"
The input supervised classifier instance to the constructor is not directly
used, but its type and configuration are recorded in order to create a new
instance in ``rank`` to train and classify the index.
The caveat here is that any non-configuration reflected, runtime
modifications to the input classifier will not be reflected by the
classifier used in ``rank``.
Using a copy of the input classifier allows the ``rank`` method to be used
in parallel without blocking other calls to ``rank``.
:param smqtk.algorithms.SupervisedClassifier classifier_inst:
Supervised classifier instance to base the ephemeral ranking classifier
on. The type and configuration of this classifier is used to create a
clone at rank time. The input classifier instance is not modified.
"""
def __init__(self, classifier_inst):
super().__init__()
self._classifier_type = type(classifier_inst)
self._classifier_config = classifier_inst.get_config()
@classmethod
def get_default_config(cls):
c = super().get_default_config()
c['classifier_inst'] = \
make_default_config(SupervisedClassifier.get_impls())
return c
@classmethod
def from_config(cls, config_dict, merge_default=True):
config_dict = dict(config_dict) # shallow copy to write to input dict
config_dict['classifier_inst'] = \
from_config_dict(config_dict.get('classifier_inst', {}),
SupervisedClassifier.get_impls())
return super().from_config(
config_dict, merge_default=merge_default,
)
def get_config(self):
return {
'classifier_inst':
cls_conf_to_config_dict(self._classifier_type,
self._classifier_config),
}
def rank(
self,
pos: Sequence[np.ndarray],
neg: Sequence[np.ndarray],
pool: Sequence[np.ndarray],
) -> Sequence[float]:
if len(pool) == 0:
return []
# Train supervised classifier with positive/negative examples.
label_pos = 'pos'
label_neg = 'neg'
i = 0
def create_de(v: np.ndarray) -> DescriptorElement:
nonlocal i
# Hopefully type_str doesn't matter
de = DescriptorMemoryElement('', i)
de.set_vector(v)
i += 1
return de
classifier = self._classifier_type.from_config(self._classifier_config)
classifier.train({
label_pos: map(create_de, pos),
label_neg: map(create_de, neg),
})
# Report ``label_pos`` class probabilities as rank score.
scores = classifier.classify_arrays(pool)
return [c_map.get(label_pos, 0.0) for c_map in scores]
```
#### File: smqtk/bin/createFileIngest.py
```python
import glob
import logging
import os.path as osp
from smqtk.representation import DataSet
from smqtk.representation.data_element.file_element import DataFileElement
from smqtk.utils import cli
from smqtk.utils.configuration import (
from_config_dict,
make_default_config,
)
def default_config():
return {
"data_set": make_default_config(DataSet.get_impls())
}
def cli_parser():
parser = cli.basic_cli_parser(__doc__)
parser.add_argument("input_files", metavar='GLOB', nargs='*')
return parser
def main():
parser = cli_parser()
args = parser.parse_args()
config = cli.utility_main_helper(default_config, args)
log = logging.getLogger(__name__)
log.debug("Script arguments:\n%s" % args)
def iter_input_elements():
for f in args.input_files:
f = osp.expanduser(f)
if osp.isfile(f):
yield DataFileElement(f)
else:
log.debug("Expanding glob: %s" % f)
for g in glob.glob(f):
yield DataFileElement(g)
log.info("Adding elements to data set")
ds = from_config_dict(config['data_set'], DataSet.get_impls())
ds.add_data(*iter_input_elements())
if __name__ == '__main__':
main()
```
#### File: representation/descriptor_set/__init__.py
```python
import abc
from smqtk.representation import SmqtkRepresentation, DescriptorElement
from smqtk.utils.plugin import Pluggable
class DescriptorSet (SmqtkRepresentation, Pluggable):
"""
Index of descriptors, keyed and query-able by descriptor UUID.
Note that these indexes do not use the descriptor type strings. Thus, if
a set of descriptors has multiple elements with the same UUID, but
different type strings, they will bash each other in these indexes. In such
a case, when dealing with descriptors for different generators, it is
advisable to use multiple indices.
"""
def __delitem__(self, uuid):
self.remove_descriptor(uuid)
def __getitem__(self, uuid):
return self.get_descriptor(uuid)
def __iter__(self):
return self.iterdescriptors()
def __len__(self):
return self.count()
def __contains__(self, item):
if isinstance(item, DescriptorElement):
# Testing for UUID inclusion since element hash based on UUID
# value.
return self.has_descriptor(item.uuid())
return False
def get_many_vectors(self, uuids):
"""
Get underlying vectors of descriptors associated with given uuids.
:param uuids: Iterable of descriptor UUIDs to query for.
:type uuids: collections.abc.Iterable[collections.abc.Hashable]
:raises: KeyError: When there is not a descriptor in this set for one
or more input UIDs.
:return: List of vectors for descriptors associated with given uuid
values.
:rtype: list[numpy.ndarray | None]
"""
return DescriptorElement.get_many_vectors(
self.get_many_descriptors(uuids)
)
@abc.abstractmethod
def count(self):
"""
:return: Number of descriptor elements stored in this index.
:rtype: int
"""
@abc.abstractmethod
def clear(self):
"""
Clear this descriptor index's entries.
"""
@abc.abstractmethod
def has_descriptor(self, uuid):
"""
Check if a DescriptorElement with the given UUID exists in this index.
:param uuid: UUID to query for
:type uuid: collections.abc.Hashable
:return: True if a DescriptorElement with the given UUID exists in this
index, or False if not.
:rtype: bool
"""
@abc.abstractmethod
def add_descriptor(self, descriptor):
"""
Add a descriptor to this index.
Adding the same descriptor multiple times should not add multiple copies
of the descriptor in the index (based on UUID). Added descriptors
overwrite indexed descriptors based on UUID.
:param descriptor: Descriptor to index.
:type descriptor: smqtk.representation.DescriptorElement
"""
@abc.abstractmethod
def add_many_descriptors(self, descriptors):
"""
Add multiple descriptors at one time.
Adding the same descriptor multiple times should not add multiple copies
of the descriptor in the index (based on UUID). Added descriptors
overwrite indexed descriptors based on UUID.
:param descriptors: Iterable of descriptor instances to add to this
index.
:type descriptors:
collections.abc.Iterable[smqtk.representation.DescriptorElement]
"""
@abc.abstractmethod
def get_descriptor(self, uuid):
"""
Get the descriptor in this index that is associated with the given UUID.
:param uuid: UUID of the DescriptorElement to get.
:type uuid: collections.abc.Hashable
:raises KeyError: The given UUID doesn't associate to a
DescriptorElement in this index.
:return: DescriptorElement associated with the queried UUID.
:rtype: smqtk.representation.DescriptorElement
"""
@abc.abstractmethod
def get_many_descriptors(self, uuids):
"""
Get an iterator over descriptors associated to given descriptor UUIDs.
:param uuids: Iterable of descriptor UUIDs to query for.
:type uuids: collections.abc.Iterable[collections.abc.Hashable]
:raises KeyError: A given UUID doesn't associate with a
DescriptorElement in this index.
:return: Iterator of descriptors associated to given uuid values.
:rtype: collections.abc.Iterable[smqtk.representation.DescriptorElement]
"""
@abc.abstractmethod
def remove_descriptor(self, uuid):
"""
Remove a descriptor from this index by the given UUID.
:param uuid: UUID of the DescriptorElement to remove.
:type uuid: collections.abc.Hashable
:raises KeyError: The given UUID doesn't associate to a
DescriptorElement in this index.
"""
@abc.abstractmethod
def remove_many_descriptors(self, uuids):
"""
Remove descriptors associated to given descriptor UUIDs from this index.
:param uuids: Iterable of descriptor UUIDs to remove.
:type uuids: collections.abc.Iterable[collections.abc.Hashable]
:raises KeyError: A given UUID doesn't associate with a
DescriptorElement in this index.
"""
@abc.abstractmethod
def iterkeys(self):
"""
Return an iterator over indexed descriptor keys, which are their UUIDs.
:rtype: collections.abc.Iterator[collections.abc.Hashable]
"""
@abc.abstractmethod
def iterdescriptors(self):
"""
Return an iterator over indexed descriptor element instances.
:rtype: collections.abc.Iterator[smqtk.representation.DescriptorElement]
"""
@abc.abstractmethod
def iteritems(self):
"""
Return an iterator over indexed descriptor key and instance pairs.
:rtype: collections.abc.Iterator[(collections.abc.Hashable,
smqtk.representation.DescriptorElement)]
"""
def keys(self):
""" alias for iterkeys """
return self.iterkeys()
def items(self):
""" alias for iteritems """
return self.iteritems()
```
#### File: smqtk/utils/bits.py
```python
import math
import numpy
# noinspection PyUnresolvedReferences
from six.moves import range
from .combinatorics import ncr
def next_perm(v: int) -> int:
"""
Compute the lexicographically next bit permutation
Generates next permutation with a given amount of set bits,
given the previous lexicographical value.
Taken from http://graphics.stanford.edu/~seander/bithacks.html#NextBitPermutation
"""
t = (v | (v - 1)) + 1
w = t | ((((t & -t) // (v & -v)) >> 1) - 1)
return w
def iter_perms(length, n):
"""
Return an iterator over bit combinations of length ``l`` with ``n`` set
bits.
:raises StopIteration: If ``n`` <= 0 or normal completion.
:param length: Total bit length to work with. The ``n`` in nCr problem.
:type length: int
:param n: Number of bits to be set in permutations. The ``r`` in nCr
problem.
:type n: int
:return: List of bit vector permutations of the value ``(1<<n)-1`` over
``l`` bits.
:rtype: typing.Generator[int]
"""
if n <= 0:
raise StopIteration()
n = min(length, n)
s = (1 << n) - 1
yield s
for _ in range(ncr(length, n) - 1):
s = next_perm(s)
yield s
def neighbor_codes(b, c, d):
"""
Iterate through integers of bit length ``b``, where ``b`` is the number
of bits, that are ``d`` hamming distance away from query code ``c``.
This will yield a number of elements equal to ``nCr(b, d)``.
We expect ``d`` to be the integer hamming distance,
e.g. h(001101, 100101) == 2, not 0.333.
:param b: integer bit length
:param b: int
:param c: Query small-code integer
:type c: int
:param d: Integer hamming distance
:type d: int
"""
if not d:
yield c
else:
for fltr in iter_perms(b, d):
yield c ^ fltr
def bit_vector_to_int_large(v):
"""
Transform a numpy vector representing a sequence of binary bits [0 | >0]
into an integer representation.
This function is the special form that can handle very large integers
(>64bit).
:param v: 1D Vector of bits
:type v: numpy.ndarray
:return: Integer equivalent
:rtype: int
"""
c = 0
for b in v:
c = (c << 1) + int(b)
return c
def int_to_bit_vector_large(integer, bits=0):
"""
Transform integer into a bit vector, optionally of a specific length.
This function is the special form that can handle very large integers
(>64bit).
:raises ValueError: If ``bits`` specified is smaller than the required bits
to represent the given ``integer`` value.
:param integer: integer to convert
:type integer: int
:param bits: Optional fixed number of bits that should be represented by the
vector.
:type bits: Optional specification of the size of returned vector.
:return: Bit vector as numpy array (big endian).
:rtype: numpy.ndarray[bool]
"""
# Can't use math version because floating-point precision runs out after
# about 2^48
# -2 to remove length of '0b' string prefix
size = len(bin(integer)) - 2
if bits and (bits - size) < 0:
raise ValueError("%d bits too small to represent integer value %d."
% (bits, integer))
# Converting integer to array
v = numpy.zeros(bits or size, numpy.bool_)
for i in range(0, size):
v[-(i+1)] = integer & 1
integer >>= 1
return v
def popcount(v: int) -> int:
"""
Count the number of bits set (number of 1-bits, not 0-bits).
Pure python popcount algorithm adapted implementation at
https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel.
Maximum known stable value that can be passed through this method:
2**256 - 2. See the ``popcount.v_max`` function property.
:param v: Integer to count the set bits of. Must be a 32-bit integer or
less.
:return: Number of set bits in the given integer ``v``.
"""
# TODO: C implementation of this
# since this version, being in python, isn't faster than counting 1's
# in result of ``bin`` function.
# Cannot take the log of 0.
if not v:
return 0
# T is the number of bits used to represent v to the nearest power of 2
ceil, log = math.ceil, math.log
tp = max(8, int(2**ceil(log(v.bit_length()) / log(2))))
t = 2**tp-1
b = tp // 8
# bit-length constrained
h55 = t//3
h33 = t//15*3
h0f = t//255*15
h01 = t//255
# noinspection PyAugmentAssignment
v = v - ((v >> 1) & h55)
v = (v & h33) + ((v >> 2) & h33)
v = (v + (v >> 4)) & h0f
# Need the extra ``& t`` after the multiplication in order to simulate bit
# truncation as if v were only a tp-bit integer
# Magic 8 represents bits ina byte
return ((v * h01) & t) >> ((b-1) * 8)
# Maximum known stable value that can be passed as ``v``.
POPCOUNT_VMAX = (2**256) - 2
```
#### File: smqtk/utils/feature_memory.py
```python
import logging
import multiprocessing
import numpy as np
from smqtk.utils import ReadWriteLock
from smqtk.utils import SimpleTimer
from smqtk.utils.distance_kernel import DistanceKernel
class FeatureMemory (object):
"""
Class for encapsulating and managing feature and kernel matrices for
different feature types
"""
@classmethod
def construct_from_files(cls, id_vector_file, bg_flags_file,
feature_mat_file, kernel_mat_file, rw_lock=None):
""" Initialize FeatureMemory object from file sources.
:param id_vector_file: File containing the numpy.savetxt(...) output of
clip ID values in the order in which they associate to the rows of
the kernel matrix.
:type id_vector_file: str
:param feature_mat_file: File containing the kernel matrix as saved by
numpy.save(...) (saved as an ndarray, converted to matrix on load).
:type feature_mat_file: str
:param kernel_mat_file: File containing the kernel matrix as saved by
numpy.save(...) (saved as an ndarray, converted to matrix on load).
:type kernel_mat_file: str
:param bg_flags_file: Optional file containing output of
numpy.savetxt(...) where each index maps a row index of the kernel
to whether or not the associated clip ID should be considered a
background video or not.
:type bg_flags_file: str
:param rw_lock: Optional ReadWriteLock for this instance to use. If not
provided, we will create our own.
:type rw_lock: None or ReadWriteLock
:return: Symmetric FeatureMemory constructed with the data provided in
the provided files.
:rtype: FeatureMemory
"""
clip_ids = np.array(np.load(id_vector_file))
bg_flags = np.array(np.load(bg_flags_file))
# noinspection PyCallingNonCallable
feature_mat = np.matrix(np.load(feature_mat_file))
# noinspection PyCallingNonCallable
kernel_mat = np.matrix(np.load(kernel_mat_file))
bg_clips = set([clip_ids[i]
for i, f in enumerate(bg_flags)
if f])
return FeatureMemory(clip_ids, bg_clips, feature_mat, kernel_mat,
rw_lock=rw_lock)
@property
def _log(self):
return logging.getLogger('.'.join([self.__module__,
self.__class__.__name__]))
def __init__(self, id_vector, bg_clip_ids, feature_mat, kernel_mat,
rw_lock=None):
""" Initialize this FeatureMemory object
This class must be used with numpy ndarray and matrix classes for shared
memory purposes.
NOTE: Arrays and matrices given here must own their data! This is
currently required in order to resize them later when updating with new
feature vectors. A ValueError will be thrown if an given array/matrix
does not own its data.
TODO: Allow kernel matrix to be optional, causing it to be built from
the provided feature matrix (not a recommended action).
:param id_vector: (numpy) Array of clip IDs. This is used as the map
from an index position to the clip ID its associated with in the
kernel and distance kernel matrices.
:type id_vector: ndarray of int
:param bg_clip_ids: Set of clip IDs that are to be treated as background
clip IDs.
:type bg_clip_ids: set[int]
:param feature_mat: (numpy) Matrix of features for clip IDs. Features
should be stored vertically, i.e. Each row is a feature for a
particular clip ID (id_vector being the index-to-clipID map).
:type feature_mat: matrix of double
:param kernel_mat: (numpy) Matrix detailing the distances between
feature vectors. This must be a square, symmetric matrix.
:type kernel_mat: matrix of double
:param rw_lock: Optional ReadWriteLock for this instance to use. If not
provided, we will create our own.
:type rw_lock: None or ReadWriteLock
"""
# assert isinstance(id_vector, (ndarray, ArrayProxy)), \
# "ID vector not given as a numpy.ndarray!"
assert isinstance(bg_clip_ids, (set, frozenset)), \
"Background ID vector not a numpy.ndarray!"
# assert isinstance(feature_mat, (matrix, MatrixProxy)), \
# "Kernel matrix not a numpy.matrix!"
# assert isinstance(kernel_mat, (matrix, MatrixProxy)), \
# "Distance kernel not a numpy.matrix!"
# noinspection PyUnresolvedReferences
# -> base IS a member of the matrix class...
if id_vector.base is not None:
raise ValueError("Given ``id_vector`` does not own its data! It "
"will not be transformable later.")
elif feature_mat.base is not None:
raise ValueError("Given ``feature_mat`` does not own its data! It "
"will not be transformable later.")
elif kernel_mat.base is not None:
raise ValueError("Given ``kernel_mat`` does not own its data! It "
"will not be transformable later.")
# The kernel should be square and should be the same size as the feature
# matrix's number or rows (unique stored clip features).
if not (kernel_mat.shape[0] == kernel_mat.shape[1] ==
feature_mat.shape[0]):
raise ValueError("The distance kernel matrix provided is either "
"misshapen or conflicts with the dimensions of "
"the provided feature matrix. (kernel matrix "
"shape: %s, num feature vectors: %d"
% (kernel_mat.shape, feature_mat.shape[0]))
self._log.debug("Lock given: %s", rw_lock)
if rw_lock:
assert isinstance(rw_lock, ReadWriteLock), \
"Not given a value ReadWriteLock instance!"
self._rw_lock = rw_lock
else:
self._log.debug("Falling back on bad lock given (given: %s)",
type(rw_lock))
self._rw_lock = ReadWriteLock()
self._id_vector = id_vector
self._bg_clip_ids = set(bg_clip_ids)
self._feature_mat = feature_mat
self._kernel_mat = kernel_mat
# Helper structure mapping clipIDs to their row index
self._cid2idx_map = dict((cid, idx) for idx, cid
in enumerate(self._id_vector))
@staticmethod
def _histogram_intersection_distance(a, b):
"""
Calculates distance between two vectors using histogram intersection.
Non-branching version of the histogram intersection algorithm.
:param a: A vector in array form.
:type a: ndarray
:param b: A vector in array form.
:type b: ndarray
:return: Histogram Intersection (HI) distance scalar
:rtype: double
"""
# noinspection PyUnresolvedReferences
return (a + b - np.abs(a - b)).sum() * 0.5
def get_ids(self):
"""
NOTE: NOT THREAD SAFE. Use the returned structure only in conjunction
with this object's lock when in a parallel environment to prevent
possible memory corruption.
:return: Ordered vector of clip IDs along the row-edge of this object's
feature matrix and along both edges of the kernel matrix.
:rtype: numpy.core.multiarray.ndarray
"""
return self._id_vector
def get_bg_ids(self):
"""
NOTE: NOT THREAD SAFE. Use the returned structure only in conjunction
with this object's lock when in a parallel environment to prevent
possible memory corruption.
:return: Ordered vector of clip IDs that we are treating as background
clips.
:rtype: ndarray
"""
return frozenset(self._bg_clip_ids)
def get_feature_matrix(self):
"""
NOTE: NOT THREAD SAFE. Use the returned structure only in conjunction
with this object's lock when in a parallel environment to prevent
possible memory corruption.
:return: Matrix recording feature vectors for a feature type. See the
id vector for row-wise index-to-clipID association.
:rtype: numpy.matrixlib.defmatrix.matrix
"""
return self._feature_mat
def get_kernel_matrix(self):
"""
NOTE: NOT THREAD SAFE. Use the returned structure only in conjunction
with this object's lock when in a parallel environment to prevent
possible memory corruption.
:return: Symmetric matrix detailing the distances between any two clip
ID features. Distances are computed via histogram intersection.
:rtype: matrix
"""
return self._kernel_mat
def get_lock(self):
"""
:return: a reference to this object's read/write lock.
:rtype: ReadWriteLock
"""
return self._rw_lock
def get_distance_kernel(self):
"""
DistanceKernel object constructed from this feature's current state.
:return: This feature distance kernel.
:rtype: DistanceKernel
"""
with self._rw_lock.read_lock():
return DistanceKernel(self._id_vector, self._id_vector,
self._kernel_mat, self._bg_clip_ids,
self._rw_lock)
def get_feature(self, *clip_id_or_ids):
"""
Return the a matrix where each row is the feature vector for one or more
clip IDs. The given list of clip IDs given acts as the index-to-clipID
map for the returned matrix's rows. If repeat clip IDs are provided in
the input, there will be repeat feature vectors in the returned matrix.
Raises ValueError if the given clip ID is not represented in the current
matrix.
:param clip_id_or_ids: One or more integer clip IDs to retrieve the
feature vectors for.
:type clip_id_or_ids: tuple of int
:return: NxM matrix, where N is the number of clip IDs requested and M
is the length of a feature vector for this vector.
:rtype: np.matrix
"""
assert all(isinstance(e, int) for e in clip_id_or_ids), \
"Not given an integer or a valid iterable over integers!"
with self._rw_lock.read_lock():
# rows = num of IDs given, cols = width of feature matrix
with SimpleTimer("Allocating return matrix", self._log.debug):
# noinspection PyUnresolvedReferences
# -> matrix class DOES have ``dtype`` property...
ret_mat = np.ndarray((len(clip_id_or_ids),
self._feature_mat.shape[1]),
self._feature_mat.dtype)
for i, cid in enumerate(clip_id_or_ids):
feature_idx = self._cid2idx_map[cid]
ret_mat[i, :] = self._feature_mat[feature_idx, :]
return ret_mat
# noinspection PyUnresolvedReferences,PyCallingNonCallable
def update(self, clip_id, feature_vec=None, is_background=False,
timeout=None):
"""
Update this feature with a feature vector associated with a clip ID. If
clip ID is already in the feature matrix, we replace the current vector
with the given one.
Either way, the distance kernel is updated with either a new row/column,
or updating relevant slots in the existing distance kernel.
:raise ValueError: if the given feature vector is not compatible with
our feature vector.
:raise RuntimeError: If a timeout is given and the underlying write lock
doesn't acquire in that amount of time.
:param clip_id: The ID of the clip the given ``feature_vec`` represents.
:type clip_id: int
:param feature_vec: Feature vector associated to the given clip ID.
:type feature_vec: ndarray
:param is_background: Flag declaring that this clip ID represents a
background feature.
:type is_background: bool
:param timeout: Timeout seconds for the underlying write lock to acquire
before a RuntimeError is thrown.
:type timeout: None or int or float
"""
with self._rw_lock.write_lock(timeout):
clip_id = int(clip_id)
if feature_vec is not None and \
not (feature_vec.ndim == 1
and len(feature_vec) == self._feature_mat.shape[1]):
raise ValueError("Given feature vector not compatible "
"(dimensionality or length does not match)")
# Update the given feature vector and kernel distances
if self._cid2idx_map.get(clip_id, None) is not None:
# In all cases, update the background status of the clip
if is_background:
self._bg_clip_ids.add(clip_id)
else:
self._bg_clip_ids.discard(clip_id)
# If we were given a new feature vector, update entries
if feature_vec is not None:
idx = self._cid2idx_map[clip_id]
self._feature_mat[idx] = feature_vec
new_dist = np.mat(tuple(
self._histogram_intersection_distance(feature_vec, fv)
for fv in self._feature_mat
))
self._kernel_mat[idx, :] = new_dist
self._kernel_mat[:, idx] = new_dist
# Given a new feature to add.
else:
if feature_vec is None:
raise ValueError("Update given a new clip ID, but no "
"feature vector provided.")
# Update internal feature matrix with added vector
self._cid2idx_map[clip_id] = self._id_vector.size
self._id_vector.resize((self._id_vector.size + 1,),
refcheck=False)
self._id_vector[-1] = clip_id
if is_background:
self._bg_clip_ids.add(clip_id)
# noinspection PyUnresolvedReferences
if self._feature_mat.base is not None:
raise RuntimeError("Feature matrix does not own its data")
# Since we're only adding a new row, this resize does not affect
# the positioning of the existing data.
# noinspection PyUnresolvedReferences
self._feature_mat.resize((self._feature_mat.shape[0] + 1,
self._feature_mat.shape[1]),
refcheck=False
)
self._feature_mat[-1, :] = feature_vec
# Need to add a new row AND column to the distance kernel.
if self._kernel_mat.base is not None:
raise RuntimeError("kernel matrix does not own its data")
assert self._kernel_mat.shape[0] == self._kernel_mat.shape[1], \
"kernel matrix is not symmetric for some reason???"
# noinspection PyPep8Naming
# -> because I like ``N`` better...
N = self._kernel_mat.shape[0]
kernel_copy = np.matrix(self._kernel_mat)
self._kernel_mat.resize((N+1, N+1), refcheck=False)
self._kernel_mat[:N, :N] = kernel_copy
del kernel_copy
# Computing new feature distance (histogram intersection). Only
# need to compute this once because of HI being being
# commutative and the kernel matrix being symmetric.
dist_vec = np.mat(tuple(
self._histogram_intersection_distance(feature_vec, fv)
for fv in self._feature_mat
))
self._kernel_mat[-1, :] = dist_vec
self._kernel_mat[:, -1] = dist_vec.T
class FeatureMemoryMap (object):
""" Map different feature types to their own FeatureMemory object
"""
# Basically a pass-through for all the functions in FeatureMemory, but with
# and additional heading parameter of feature type.
def __init__(self):
self._map_lock = multiprocessing.RLock()
#: :type: dict[str, FeatureMemory]
self._feature2memory = {}
def get_feature_types(self):
""" Get available feature types in this map.
:return: Tuple of string names of all features initialize in this map
:rtype: tuple of str
"""
with self._map_lock:
return list(self._feature2memory.keys())
def initialize(self, feature_type, id_vector, bg_clip_ids_vector,
feature_mat, kernel_mat, rw_lock=None):
""" Initialize a feature type within this map
:raise KeyError: When the given feature_type is already present in the
map (requires a removal first).
:raise ValueError: When there is an issue with one or more of the
provided input data elements.
:param feature_type: The name assigned to the feature type
:param id_vector: Numpy array mapping indices to a clip ID.
:param bg_clip_ids_vector: Numpy array of integers detailing the clip
IDs that are to be considered background.
:param feature_mat: Numpy matrix of all clip feature vectors. This
should be an order 'C' matrix with features stacked vertically. The
``id_vector`` maps row indices to the clip ID the feature
represents.
:param kernel_mat: Pre-computed distance kernel
:param rw_lock: Optional read-write lock to manually use with underlying
feature memory construct. Otherwise we create our own.
"""
with self._map_lock:
# KeyError on repeat feature_type key, require removal first
if feature_type in self._feature2memory:
raise KeyError("Key '%s' already present in our mapping. "
"Please remove first before initializing."
% feature_type)
self._feature2memory[feature_type] = \
FeatureMemory(id_vector, bg_clip_ids_vector, feature_mat,
kernel_mat, rw_lock)
def initialize_from_files(self, feature_type, id_vector_file, bg_flags_file,
feature_mat_file, kernel_mat_file, rw_lock=None):
""" Initialize a feature type within this map from file resources.
Files pointed to must be of the following formats:
- id_vector_file:
File resulting from a numpy.savetxt() of a one-dimensional
array, mapping index position with an integer clip ID. This
should correlate to the clip IDs of the row-major features
stored in feature_mat_file.
- bg_flags_file:
File resulting from a numpy.savetxt() of a one-dimensional
array, mapping index position with whether that clip should be
treated as a background video or not. This should corrolate with
features in the same way as the id_id_vector_file.
- feature_mat_file:
File resulting from a numpy.save() of an ndarray. This will be
loaded in as a matrix. This should be the initial NxD feature
matrix for this feature type.
- kernel_mat_file:
File resulting from a numpy.save() of an ndarray. This will be
loaded in as a matrix. This should be the computed NxN distance
kernel for this feature type.
:param feature_type: The name assigned to the feature type.
:type id_vector_file: str
:type bg_flags_file: str
:type feature_mat_file: str
:type kernel_mat_file: str
:param rw_lock: Optionally specified ReadWriteLock instance to use with
the underlying FeatureMemory and DistanceKernel objects associated
with this feature type (not recommended)
:type rw_lock: ReadWriteLock
"""
with self._map_lock:
# even though this happens in the initialize() call we make here,
# we would like to short circuit before loading data if we can.
if feature_type in self._feature2memory:
raise KeyError("Key '%s' already present in our mapping. "
"Please remove first before initializing.")
self._feature2memory[feature_type] = \
FeatureMemory.construct_from_files(id_vector_file,
bg_flags_file,
feature_mat_file,
kernel_mat_file,
rw_lock)
def remove(self, feature_type):
""" Removes a feature type from our map, releasing its contents.
:raise KeyError: If the given feature type does not currently map to
anything.
:param feature_type: The feature type to get the memory object of.
:type feature_type: str
"""
with self._map_lock:
del self._feature2memory[feature_type]
def get_feature_memory(self, feature_type):
""" Get the underlying FeatureMemory object for a feature type.
:raise KeyError: If the given feature type does not currently map to
anything.
:param feature_type: The feature type to get the memory object of.
:type feature_type: str
:return: FeatureMemory object associated to the given feature type.
:rtype: FeatureMemory
"""
with self._map_lock:
return self._feature2memory[feature_type]
def get_distance_kernel(self, feature_type):
""" Get the DistanceKernel for a feature type.
:raise KeyError: If the given feature type does not currently map to
anything.
:param feature_type: The feature type to get the memory object of.
:type feature_type: str
"""
with self._map_lock:
return self._feature2memory[feature_type]\
.get_distance_kernel()
def get_feature(self, feature_type, *clip_id_or_ids):
"""
With respect to the given feature types, return a matrix where each row
is the feature vector for one or more clip IDs. The given clip IDs acts
as the index-to-clipID map for the returned matrix's rows. If repeat
clip IDs are provided in the input, there will be repeat feature vectors
in the returned matrix.
:raise ValueError: If the given clip ID is not represented in the
feature matrix.
:param feature_type: Feature type to access:
:type feature_type: str
:param clip_id_or_ids: One or more integer clip IDs to retrieve the
feature vectors for.
:type clip_id_or_ids: int
:return: NxM matrix, where N is the number of clip IDs requested and M
is the length of a feature vector for this vector.
:rtype: np.matrix
"""
with self._map_lock:
return self._feature2memory[feature_type]\
.get_feature(*clip_id_or_ids)
def update(self, feature_type, clip_id, feature_vector, is_background=False,
timeout=None):
with self._map_lock:
return self._feature2memory[feature_type]\
.update(clip_id, feature_vector, is_background, timeout)
```
#### File: machine_run_scripts/hbase_gpu_machine/transfer_file_descriptors_to_psql.py
```python
import cPickle
import logging
import multiprocessing
import os
import re
from smqtk.representation import DescriptorElementFactory
from smqtk.representation.descriptor_element.local_elements import DescriptorFileElement
from smqtk.representation.descriptor_element.postgres import PostgresDescriptorElement
from smqtk.utils import cli
from smqtk.utils import file
ROOT_DIR = "/data/kitware/smqtk/image_cache_cnn_compute/descriptors"
file_element_config = {
'save_dir': ROOT_DIR,
'subdir_split': 10,
}
psql_element_config = {
'db_name': 'smqtk',
'db_host': 'localhost',
'db_port': 6432, # PgBouncer port
'db_user': 'smqtk',
'db_pass': '<PASSWORD>',
}
file_element_factory = DescriptorElementFactory(
DescriptorFileElement,
file_element_config,
)
psql_element_factory = DescriptorElementFactory(
PostgresDescriptorElement,
psql_element_config,
)
fname_re = re.compile(r'(\w+)\.(\w+)\.vector\.npy')
def transfer_vector(type_str, uuid_str):
pd = psql_element_factory(type_str, uuid_str)
if not pd.has_vector():
fd = file_element_factory(type_str, uuid_str)
# removing the "-0" artifacts
pd.set_vector( fd.vector() + 0 )
def proc_transfer(in_queue):
running = True
while running:
packet = in_queue.get()
if packet:
type_str, uuid_str = packet
transfer_vector(type_str, uuid_str)
else:
running = False
def main():
cli.initialize_logging(logging.getLogger(), logging.DEBUG)
log = logging.getLogger(__name__)
# For each file in descriptor vector file tree, load from file
# [type, uuid, vector] and insert into PSQL element.
log.info("Setting up parallel environment")
in_queue = multiprocessing.Queue()
workers = []
for i in xrange(multiprocessing.cpu_count()):
p = multiprocessing.Process(
target=proc_transfer,
args=(in_queue,)
)
workers.append(p)
p.start()
try:
log.info("Loading filename list")
with open("descriptor_file_names.5.3mil.pickle") as f:
fname_list = cPickle.load(f)
log.info("Running through filename list")
for n in fname_list:
m = fname_re.match(n)
assert m
type_str = m.group(1)
uuid_str = m.group(2)
#print type_str, uuid_str
#break
in_queue.put( (type_str, uuid_str) )
log.info("Sending worker terminal packets")
for w in workers:
in_queue.put(None)
except:
log.info("Terminating workers")
for w in workers:
w.terminate()
finally:
log.info("Waiting for workers to complete")
for w in workers:
w.join()
log.info("Workers joined")
if __name__ == '__main__':
main()
```
#### File: algorithms/descriptor_generator/test_colordescriptor.py
```python
import unittest
import unittest.mock as mock
import pytest
from smqtk.algorithms.descriptor_generator import DescriptorGenerator
from smqtk.algorithms.descriptor_generator.colordescriptor.colordescriptor \
import ColorDescriptor_Image_csift # arbitrary leaf class
from smqtk.utils.configuration import configuration_test_helper
@pytest.mark.skipif(not ColorDescriptor_Image_csift.is_usable(),
reason="ColorDescriptor generator is not currently usable")
class TestColorDescriptor (unittest.TestCase):
def test_impl_findable(self):
self.assertIn(ColorDescriptor_Image_csift.__name__,
DescriptorGenerator.get_impls())
@mock.patch('smqtk.algorithms.descriptor_generator'
'.colordescriptor.colordescriptor.safe_create_dir')
def test_configuration(self, _mock_scd):
i = ColorDescriptor_Image_csift(
model_directory='test model dir',
work_directory='test work dir',
model_gen_descriptor_limit=123764,
kmeans_k=42, flann_distance_metric='hik',
flann_target_precision=0.92, flann_sample_fraction=0.71,
flann_autotune=True, random_seed=7, use_spatial_pyramid=True,
parallel=3,
)
for inst in configuration_test_helper(i):
assert inst._model_dir == 'test model dir'
assert inst._work_dir == 'test work dir'
assert inst._model_gen_descriptor_limit == 123764
assert inst._kmeans_k == 42
assert inst._flann_distance_metric == 'hik'
assert inst._flann_target_precision == 0.92
assert inst._flann_sample_fraction == 0.71
assert inst._flann_autotune is True
assert inst._rand_seed == 7
assert inst._use_sp is True
assert inst.parallel == 3
```
#### File: algorithms/relevancy_index/test_RI_abstract.py
```python
from __future__ import division, print_function
import unittest
import unittest.mock as mock
from smqtk.algorithms.relevancy_index import RelevancyIndex
class DummyRI (RelevancyIndex):
@classmethod
def is_usable(cls):
return True
def rank(self, pos, neg):
pass
def get_config(self):
pass
def count(self):
return 0
def build_index(self, descriptors):
pass
class TestSimilarityIndexAbstract (unittest.TestCase):
def test_count(self):
index = DummyRI()
self.assertEqual(index.count(), 0)
self.assertEqual(index.count(), len(index))
# Pretend that there were things in there. Len should pass it though
index.count = mock.Mock()
index.count.return_value = 5
self.assertEqual(len(index), 5)
```
#### File: tests/representation/test_DetectionElementFactory.py
```python
import unittest.mock as mock
from smqtk.representation import DetectionElement
from smqtk.representation.detection_element_factory \
import DetectionElementFactory
def test_get_default_config():
"""
Test that get_default_config method does not error and returns a
dictionary with a ``type`` key.
"""
c = DetectionElementFactory.get_default_config()
assert isinstance(c, dict)
assert 'type' in c
@mock.patch.object(DetectionElementFactory, '__init__')
@mock.patch.object(DetectionElementFactory, 'get_default_config')
@mock.patch.object(DetectionElement, 'get_impls')
def test_from_config_no_merge(m_de_get_impls, m_def_get_default_config,
m_def_init):
"""
Test that ``from_config`` appropriately constructs a factory instance
without merging a default configuration.
"""
# Because __init__ needs to return None
m_def_init.return_value = None
# Mock available implementations of DetectionElement
T1 = mock.MagicMock(spec=DetectionElement)
T1.__name__ = 'T1'
T1.__module__ = __name__
T2 = mock.MagicMock(spec=DetectionElement)
T2.__name__ = 'T2'
T2.__module__ = __name__
expected_impls_set = {T1, T2}
m_de_get_impls.return_value = expected_impls_set
# Mock default configuration return from class method.
expected_default_conf = {
'type': None,
f'{__name__}.T1': {'z': 'z'},
f'{__name__}.T2': {'y': 'y'},
}
m_def_get_default_config.return_value = expected_default_conf
# Test configuration we are passing to ``from_config``.
test_config = {'type': f'{__name__}.T2',
f'{__name__}.T2': {'a': 1, 'b': 'c'}}
# Because we are not merging default config, we expect only the contents
# of the passed config to reach the factory constructor.
expected_type = T2
expected_conf = {'a': 1, 'b': 'c'}
DetectionElementFactory.from_config(test_config, merge_default=False)
m_def_get_default_config.assert_not_called()
m_de_get_impls.assert_called_once()
m_def_init.assert_called_once_with(expected_type, expected_conf)
@mock.patch.object(DetectionElementFactory, '__init__')
@mock.patch.object(DetectionElementFactory, 'get_default_config')
@mock.patch.object(DetectionElement, 'get_impls')
def test_from_config_with_merge(m_de_get_impls, m_def_get_default_config,
m_def_init):
"""
Test that ``from_config`` appropriately constructs a factory instance
after merging the default configuration.
"""
# Because __init__ needs to return None
m_def_init.return_value = None
# Mock available implementations of DetectionElement
# - Overriding class location to be "local" for testing.
T1 = mock.MagicMock(spec=DetectionElement)
T1.__name__ = 'T1'
T1.__module__ = __name__
T2 = mock.MagicMock(spec=DetectionElement)
T2.__name__ = 'T2'
T2.__module__ = __name__
expected_impls_set = {T1, T2}
m_de_get_impls.return_value = expected_impls_set
# Mock default configuration return from class method.
expected_default_conf = {
'type': None,
f'{__name__}.T1': {'z': 'z'},
f'{__name__}.T2': {'y': 'y'},
}
m_def_get_default_config.return_value = expected_default_conf
# Partial configuration to pass to ``from_config``.
test_config = {'type': f'{__name__}.T2',
f'{__name__}.T2': {'a': 1, 'b': 'c'}}
# Expected construction values. Note that conf has default component(s)
# merged into it.
expected_type = T2
expected_conf = {'a': 1, 'b': 'c', 'y': 'y'}
DetectionElementFactory.from_config(test_config, merge_default=True)
m_def_get_default_config.assert_called_once()
m_de_get_impls.assert_called_once()
m_def_init.assert_called_once_with(expected_type, expected_conf)
def test_get_config():
"""
Test that ``get_config`` returns the appropriate configuration dictionary.
"""
test_type = mock.MagicMock(spec=DetectionElement)
test_type.__name__ = 'T1'
test_type.__module__ = __name__
test_conf = {'a': 1, 'b': 'c'}
expected_config = {
"type": f"{__name__}.T1",
f"{__name__}.T1": {'a': 1, 'b': 'c'}
}
# noinspection PyTypeChecker
factory = DetectionElementFactory(test_type, test_conf)
assert factory.get_config() == expected_config
def test_new_detection_function():
"""
Test that the given type and config at construction time is used to
create a new instance via known ``Configurable`` interface methods.
"""
elem_type = mock.MagicMock(spec=DetectionElement)
# store expected function return that should be returned from
# ``new_detection`` call.
expected_from_config_return = elem_type.from_config.return_value
elem_config = {'a': 1, 'b': 'c'}
expected_uuid = 'some uuid'
# noinspection PyTypeChecker
test_factory = DetectionElementFactory(elem_type, elem_config)
assert test_factory.new_detection(expected_uuid) == \
expected_from_config_return
elem_type.from_config.assert_called_once_with(elem_config, expected_uuid)
def test_new_detection_call_hook():
"""
Same as ``test_new_detection_function`` but invoking through __call__ hook.
"""
elem_type = mock.MagicMock(spec=DetectionElement)
# store expected function return that should be returned from
# ``new_detection`` call.
expected_from_config_return = elem_type.from_config.return_value
elem_config = {'a': 1, 'b': 'c'}
expected_uuid = 'some uuid'
# noinspection PyTypeChecker
test_factory = DetectionElementFactory(elem_type, elem_config)
# Point of distinction: use of __call__
# noinspection PyArgumentList
assert test_factory(expected_uuid) == expected_from_config_return
elem_type.from_config.assert_called_once_with(elem_config, expected_uuid)
```
#### File: utils/file_utils/test_safe_create_dir.py
```python
import errno
import unittest.mock as mock
import os
import unittest
from smqtk.utils.file import safe_create_dir
class TestSafeCreateDir (unittest.TestCase):
@mock.patch('smqtk.utils.file.os.makedirs')
def test_noExists(self, mock_os_makedirs):
dir_path = "/some/directory/somewhere"
p = safe_create_dir(dir_path)
self.assertTrue(mock_os_makedirs.called)
self.assertEqual(p, dir_path)
@mock.patch('smqtk.utils.file.os.path.exists')
@mock.patch('smqtk.utils.file.os.makedirs')
def test_existError_alreadyExists(self, mock_os_makedirs, mock_osp_exists):
mock_os_makedirs.side_effect = OSError(errno.EEXIST,
"Existing directory")
mock_osp_exists.return_value = True
dir_path = '/existing/dir'
p = safe_create_dir(dir_path)
self.assertTrue(mock_os_makedirs.called)
self.assertTrue(mock_osp_exists.called)
mock_osp_exists.assert_called_once_with(dir_path)
self.assertEqual(p, dir_path)
@mock.patch('smqtk.utils.file.os.path.exists')
@mock.patch('smqtk.utils.file.os.makedirs')
def test_existError_noExist(self, mock_os_makedirs, mock_osp_exists):
mock_os_makedirs.side_effect = OSError(errno.EEXIST,
"Existing directory")
mock_osp_exists.return_value = False
dir_path = '/some/dir'
self.assertRaises(OSError, safe_create_dir, dir_path)
mock_os_makedirs.assert_called_once_with(dir_path)
mock_osp_exists.assert_called_once_with(dir_path)
@mock.patch('smqtk.utils.file.os.path.exists')
@mock.patch('smqtk.utils.file.os.makedirs')
def test_otherOsError(self, mock_os_makedirs, mock_osp_exists):
mock_os_makedirs.side_effect = OSError(errno.EACCES,
"Permission Denied")
dir_path = '/some/dir'
self.assertRaises(OSError, safe_create_dir, dir_path)
mock_os_makedirs.assert_called_once_with(dir_path)
self.assertFalse(mock_osp_exists.called)
@mock.patch('smqtk.utils.file.os.makedirs')
def test_otherException(self, mock_os_makedirs):
mock_os_makedirs.side_effect = RuntimeError("Some other exception")
dir_path = 'something'
self.assertRaises(RuntimeError, safe_create_dir, dir_path)
mock_os_makedirs.assert_called_once_with(os.path.abspath(dir_path))
``` |
{
"source": "joshand/merakicli",
"score": 2
} |
#### File: joshand/merakicli/cli_command_exec.py
```python
from meraki_sdk.meraki_sdk_client import MerakiSdkClient
from meraki_sdk.exceptions.api_exception import APIException
from operator import itemgetter
import copy
from _config import *
import json
client = MerakiSdkClient(apikey)
def show_enabled(e_stat):
if e_stat is True:
return "Yes"
else:
return "No"
def xstr(s):
return '' if s is None else str(s)
def format_data(srcdata):
odata = ""
widths = [max(map(len, col)) for col in zip(*srcdata)]
for row in srcdata:
odata += " ".join((val.ljust(width) for val, width in zip(row, widths))) + "\n"
return odata
def resolve_arg(arg, datalist):
dodebug = False
if dodebug: print(datalist)
retval = None
for x in datalist:
# print(x)
for y in x:
if y and y.lower() == arg.lower():
retval = x
break
if retval:
break
# if not retval:
# for x in datalist:
# # print(x)
# for y in x:
# # attempt to match "int3" type interfaces
# temp = re.findall(r'\d+', str(y))
# reslist = list(map(int, temp))
# if dodebug: print("match1=", arg, reslist, y)
# if isinstance(reslist, list) and len(reslist) >= 1:
# if dodebug: print("match1.1=", reslist)
# # res = str("".join(reslist))
# res = "".join("{0}".format(n) for n in reslist)
# if dodebug: print("match1.2=")
# argtext = arg.lower().replace(res, "")
# if dodebug: print("match1.3=")
# # see if the text part starts the argument
# if dodebug: print("match2=", arg, argtext, y)
# if arg.lower().find(argtext) == 0:
# argrest = arg.lower().replace(str(argtext.lower()), "")
# if dodebug: print("match3=", arg, argrest, res, y)
# if str(argrest).strip() == res.strip():
# if dodebug: print("match4=", arg, argrest, res, y)
# retval = x
# break
#
# if retval:
# break
if retval is None:
if isinstance(arg, int) and arg < len(datalist):
retval = datalist[arg]
return retval
def exec_quit(data, clitext, contextchain):
return "", []
def get_org_raw():
try:
srcdata = client.organizations.get_organizations()
except APIException as e:
print(f'Error {e.response_code} with error message {e.context.response.raw_body}')
return ""
newlist = sorted(srcdata, key=itemgetter('name'))
outdata = [["#", "Organization ID", "Organization Name"]]
ocount = 0
for org in newlist:
ocount += 1
outdata.append([str(ocount), str(org["id"]), org["name"]])
return outdata
def get_net_raw(orgid):
try:
srcdata = client.networks.get_organization_networks({"organization_id": orgid})
except APIException as e:
print(f'Error {e.response_code} with error message {e.context.response.raw_body}')
return ""
outdata = [["#", "Network ID", "Network Type", "Network Name"]]
if srcdata == {}:
pass
else:
newlist = sorted(srcdata, key=itemgetter('name'))
ocount = 0
for org in newlist:
ocount += 1
outdata.append([str(ocount), str(org["id"]), "/".join(org["productTypes"]), org["name"]])
return outdata
def get_dev_raw(netid):
try:
srcdata = client.devices.get_network_devices(netid)
except APIException as e:
print(f'Error {e.response_code} with error message {e.context.response.raw_body}')
return ""
outdata = [["#", "Serial #", "Model", "MAC Address", "WAN 1", "WAN 2", "LAN", "Name"]]
if not srcdata:
devlist = outdata
else:
newlist = sorted(srcdata, key=itemgetter('model'))
ocount = 0
for dev in newlist:
ocount += 1
outdata.append([str(ocount), dev["serial"], dev["model"], dev["mac"], xstr(dev.get("wan1Ip")),
xstr(dev.get("wan2Ip")), xstr(dev.get("lanIp")), xstr(dev.get("name"))])
return outdata
def decode_model(dev_model):
if dev_model.find("MS") >= 0:
return "switch"
elif dev_model.find("MR") >= 0:
return "wireless"
elif dev_model.find("MX") >= 0:
return "appliance"
return "unknown"
def get_int_raw(dev_data, orgid, netid, devid):
outdata = None
devtype = decode_model(dev_data[2])
if devtype == "wireless":
outdata = [["#", "Interface", "IP-Assignment", "Name", "Enabled?", "Auth", "Band"]]
try:
int_data = client.ssids.get_network_ssids(netid)
except APIException as e:
print(f'Error {e.response_code} with error message {e.context.response.raw_body}')
return ""
for d in int_data:
outdata.append([str(d["number"]), "SSID" + str(d["number"]), d["ipAssignmentMode"], d["name"], show_enabled(d["enabled"]), d["authMode"], d["bandSelection"]])
elif devtype == "switch":
outdata = [["#", "Interface", "Name", "Enabled?", "Type", "VLAN", "Voice VLAN"]]
try:
int_data = client.switch_ports.get_device_switch_ports(devid)
except APIException as e:
print(f'Error {e.response_code} with error message {e.context.response.raw_body}')
return ""
for d in int_data:
pname = d["name"]
if pname is None:
pname = ""
pvoicevlan = d["voiceVlan"]
if pvoicevlan is None:
pvoicevlan = ""
outdata.append([str(d["number"]), "Ethernet" + str(d["number"]), pname, show_enabled(d.get("enabled", "")), d.get("type", ""), str(d.get("vlan", "")), str(pvoicevlan)])
#print(outdata)
elif devtype == "appliance":
outdata = [["#", "Interface", "Enabled?", "Type", "Native", "Allowed", "DropUntag"]]
int_data = client.management_interface_settings.get_network_device_management_interface_settings({"network_id": netid, "serial": devid})
for d in int_data:
if d.lower().find("wan") >= 0:
isenabled = True
vlanid = int_data[d]["vlan"]
if vlanid is None:
vlanid = 1
outdata.append([str(d.lower().replace("wan", "")), d.upper(), str(isenabled), "access", str(vlanid), "N/A", "False"])
int_data = client.mx_vlan_ports.get_network_appliance_ports(netid)
if int_data[0] == 'VLANs are not enabled for this network':
pass
else:
for d in int_data:
outdata.append([str(d["number"]), "LAN" + str(d["number"]), str(d["enabled"]), d["type"], str(d.get("vlan", "N/A")), str(d.get("allowedVlans", "N/A")), str(d.get("dropUntaggedTraffic", "N/A"))])
intlist = outdata
return intlist
def clear_none(instr):
if instr is None:
return ""
else:
return str(instr)
def parse_json_struct(scope, json, interface=None):
ro_fields = {
"organization": ["id", "url"],
"network": ["id", "organizationId"],
"device": ["networkId", "model", "mac"]
}
c = copy.deepcopy(json)
outstr = ""
if interface is not None:
outstr += scope + " " + str(interface) + "\n"
if "number" in c:
c.pop("number", None)
else:
if "name" in c:
outstr += scope + " " + str(c["name"]) + "\n"
c.pop("name", None)
if "id" in c:
outstr += " ! id " + str(c["id"]) + "\n"
c.pop("id", None)
if "productTypes" in c:
c.pop("type", None)
pt = c["productTypes"]
c.pop("productTypes", None)
c["type"] = " ".join(pt)
for k in c:
if scope in ro_fields and k in ro_fields[scope]:
outstr += " ! " + str(k) + " " + clear_none(c[k]) + "\n"
else:
outstr += " " + str(k) + " " + clear_none(c[k]) + "\n"
return outstr
def get_config_data(scope, elemid, context_chain):
if scope == "root":
return "Unable to show configuration of the root level. You must access an organization first."
elif scope == "organization":
try:
data = client.organizations.get_organization(elemid)
except APIException as e:
print(f'Error {e.response_code} with error message {e.context.response.raw_body}')
return ""
return parse_json_struct(scope, data)
elif scope == "network":
try:
data = client.networks.get_network(elemid)
except APIException as e:
print(f'Error {e.response_code} with error message {e.context.response.raw_body}')
return ""
return parse_json_struct(scope, data)
elif scope == "device":
try:
data = client.devices.get_network_device({"network_id": context_chain[len(context_chain)-3]["selected"], "serial": elemid})
except APIException as e:
print(f'Error {e.response_code} with error message {e.context.response.raw_body}')
return ""
return parse_json_struct(scope, data)
elif scope == "interface":
netid = context_chain[len(context_chain) - 4]["selected"]
devid = context_chain[len(context_chain) - 3]["selected"]
intname = context_chain[len(context_chain) - 2]["selected"]
if "Ethernet" in intname:
intnum = intname.replace("Ethernet", "")
try:
data = client.switch_ports.get_device_switch_port({"serial": devid, "number": intnum})
except APIException as e:
print(f'Error {e.response_code} with error message {e.context.response.raw_body}')
return ""
return parse_json_struct(scope, data, interface=intname)
elif "SSID" in intname:
intnum = intname.replace("SSID", "")
try:
data = client.ssids.get_network_ssid({"network_id": netid, "number": intnum})
except APIException as e:
print(f'Error {e.response_code} with error message {e.context.response.raw_body}')
return ""
return parse_json_struct(scope, data, interface=intname)
elif "LAN" in intname or "WAN" in intname:
data = None
if "LAN" in intname:
intnum = str(intname.replace("LAN", ""))
itype = "LAN"
else:
intnum = str(intname.replace("WAN", ""))
itype = "WAN"
try:
if itype == "WAN":
tempdata = client.management_interface_settings.get_network_device_management_interface_settings({"network_id": netid, "serial": devid})
for d in tempdata:
if intname.lower() in d.lower():
data = tempdata[d]
break
else:
data = client.mx_vlan_ports.get_network_appliance_port({"network_id": netid, "appliance_port_id": intnum})
except APIException as e:
print(f'Error {e.response_code} with error message {e.context.response.raw_body}')
return ""
return parse_json_struct(scope, data, interface=intname)
else:
return "Unable to show configuration for scope " + scope
def exec_show_parse(data, clitext, contextchain):
# sample data:
# [{'command': 'organizations', 'help': 'show list of organizations'}, '', [], 'exec_show_parse', 'root']
curscope = data["context"]
curcmd = data["command"]["command"]
list_line = clitext.split(" ")[1:]
if curscope == "root" and curcmd == "organizations":
outdata = get_org_raw()
contextchain[len(contextchain)-1]["elements"] = outdata
return format_data(outdata), contextchain
elif curscope == "organization" and curcmd == "networks":
outdata = get_net_raw(contextchain[len(contextchain)-2]["selected"])
return format_data(outdata), contextchain
elif curscope == "network" and curcmd == "devices":
outdata = get_dev_raw(contextchain[len(contextchain)-2]["selected"])
return format_data(outdata), contextchain
elif curscope == "device" and curcmd == "interfaces":
prevdev = contextchain[len(contextchain)-2]["selected"]
r = resolve_arg(prevdev, contextchain[len(contextchain)-2]["elements"])
outdata = get_int_raw(r, contextchain[len(contextchain)-4]["selected"], contextchain[len(contextchain)-3]["selected"], contextchain[len(contextchain)-2]["selected"])
return format_data(outdata), contextchain
elif data["remains"] in "configuration":
return get_config_data(curscope, contextchain[len(contextchain)-2]["selected"], contextchain), contextchain
elif len(list_line) == 2 and list_line[0] in "debug" and list_line[1] in "context":
return str(json.dumps(contextchain)), contextchain
elif clitext.find("?") >= 0:
return None, contextchain
else:
return "Unknown argument: " + clitext, contextchain
def exec_context_org(data, clitext, contextchain):
line = data["remains"]
if contextchain[len(contextchain)-1]["elements"] is None:
e = get_org_raw()
contextchain[len(contextchain)-1]["elements"] = e
r = resolve_arg(line, e)
else:
r = resolve_arg(line, contextchain[len(contextchain)-1]["elements"])
if not r:
orgname = str(line).strip()
print("Unable to locate an Organization with the identifier '" + orgname + "'. Creating one...")
try:
data = client.organizations.create_organization(orgname)
except APIException as e:
print(f'Error {e.response_code} with error message {e.context.response.raw_body}')
return "", contextchain
e = get_org_raw()
contextchain[len(contextchain)-1]["elements"] = e
r = resolve_arg(orgname, e)
contextchain[len(contextchain) - 1]["selected_data"] = r
contextchain[len(contextchain) - 1]["selected"] = r[1]
temp_context_desc = "Org-" + r[2] + "#"
temp_context = "organization"
contextchain.append({"prompt": temp_context_desc, "contextname": temp_context, "elements": None, "selected": None, "selected_data": None})
return "", contextchain
def exec_context_net(data, clitext, contextchain):
line = data["remains"]
if contextchain[len(contextchain)-1]["elements"] is None:
e = get_net_raw(contextchain[len(contextchain)-2]["selected"])
contextchain[len(contextchain)-1]["elements"] = e
r = resolve_arg(line, e)
else:
r = resolve_arg(line, contextchain[len(contextchain)-1]["elements"])
if not r:
netname = str(line).strip()
print("Unable to locate a Network with the identifier '" + str(line) + "'. Creating one...")
try:
data = client.networks.create_organization_network({"organization_id": contextchain[len(context_chain)-2]["selected"], "create_organization_network": {"name": netname, "type": default_net_type, "tags": "", "time_zone": default_time_zone}})
except APIException as e:
print(f'Error {e.response_code} with error message {e.context.response.raw_body}')
return "", contextchain
e = get_net_raw(contextchain[len(context_chain)-2]["selected"])
contextchain[len(contextchain)-1]["elements"] = e
r = resolve_arg(netname, e)
contextchain[len(contextchain) - 1]["selected_data"] = r
contextchain[len(contextchain) - 1]["selected"] = r[1]
temp_context_desc = contextchain[len(contextchain)-1]["prompt"][:-1] + "/Net-" + r[3] + "#"
temp_context = "network"
contextchain.append({"prompt": temp_context_desc, "contextname": temp_context, "elements": None, "selected": None, "selected_data": None})
return "", contextchain
def exec_context_dev(data, clitext, contextchain):
line = data["remains"]
if contextchain[len(contextchain)-1]["elements"] is None:
e = get_dev_raw(contextchain[len(contextchain)-2]["selected"])
contextchain[len(contextchain)-1]["elements"] = e
r = resolve_arg(line, e)
else:
r = resolve_arg(line, contextchain[len(contextchain)-1]["elements"])
if not r:
return "Unable to locate a Device with the identifier '" + str(line) + "'. To claim a new Device, use the command 'network claim <device-serial-number>'.", contextchain
contextchain[len(contextchain) - 1]["selected_data"] = r
contextchain[len(contextchain) - 1]["selected"] = r[1]
temp_context_desc = contextchain[len(contextchain)-1]["prompt"][:-1] + "/Dev-" + r[1] + "#"
temp_context = "device"
contextchain.append({"prompt": temp_context_desc, "contextname": temp_context, "elements": None, "selected": None, "selected_data": None})
return "", contextchain
def exec_context_int(data, clitext, contextchain):
line = data["remains"]
if contextchain[len(contextchain)-1]["elements"] is None:
prevdev = contextchain[len(contextchain)-2]["selected"]
r = resolve_arg(prevdev, contextchain[len(contextchain)-2]["elements"])
e = get_int_raw(r, contextchain[len(contextchain)-4]["selected"],
contextchain[len(contextchain)-3]["selected"],
contextchain[len(contextchain)-2]["selected"])
contextchain[len(contextchain)-1]["elements"] = e
r = resolve_arg(line, e)
else:
r = resolve_arg(line, contextchain[len(contextchain)-1]["elements"])
if not r:
return "Unable to locate an Interface with the identifier '" + str(line) + "'.", contextchain
contextchain[len(contextchain) - 1]["selected_data"] = r
contextchain[len(contextchain) - 1]["selected"] = r[1]
temp_context_desc = contextchain[len(contextchain)-1]["prompt"][:-1] + "/Int-" + r[1] + "#"
temp_context = "interface"
contextchain.append({"prompt": temp_context_desc, "contextname": temp_context, "elements": None, "selected": None, "selected_data": None})
return "", contextchain
def exec_up_context(data, clitext, contextchain):
outcx = contextchain[:-1]
outcx[len(outcx)-1]["selected"] = None
outcx[len(outcx)-1]["selected_data"] = None
return "", outcx
def exec_root_context(data, clitext, contextchain):
outcx = contextchain[0]
outcx["selected"] = None
outcx["selected_data"] = None
return "", [outcx]
def exec_handle_disable(data, clitext, contextchain):
handle_port_toggle(data["remains"], contextchain, False)
return "", contextchain
def exec_handle_no(data, clitext, contextchain):
line = data["remains"]
list_line = line.split(" ")
curscope = contextchain[len(contextchain)-1]["contextname"]
if curscope == "interface" and line in "shutdown":
handle_port_toggle(list_line, contextchain, True)
elif curscope == "root" and list_line[0] in "organization":
print("Unable to delete Organization: No API Coverage.")
elif curscope == "organization" and list_line[0] in "network":
if contextchain[len(contextchain)-1]["elements"] is None:
e = get_net_raw(contextchain[len(contextchain) - 2]["selected"])
contextchain[len(contextchain)-1]["elements"] = e
r = resolve_arg(line, e)
else:
r = resolve_arg(line, contextchain[len(contextchain)-1]["elements"])
try:
data = client.networks.delete_network(r[1])
except APIException as e:
print(f'Error {e.response_code} with error message {e.context.response.raw_body}')
return ""
return "", contextchain
def handle_port_toggle(list_line, context_chain, port_enabled):
prevnet = context_chain[len(context_chain) - 4]["selected"]
prevdev = context_chain[len(context_chain) - 3]["selected"]
prevint = context_chain[len(context_chain) - 2]["selected"]
if "Ethernet" in prevint:
intnum = str(prevint.replace("Ethernet", ""))
try:
data = client.switch_ports.update_device_switch_port({"serial": prevdev, "number": intnum, "update_device_switch_port": {"enabled": port_enabled}})
except APIException as e:
print(f'Error {e.response_code} with error message {e.context.response.raw_body}')
return ""
elif "SSID" in prevint:
intnum = str(prevint.replace("SSID", ""))
try:
data = client.ssids.update_network_ssid({"network_id": prevnet, "number": intnum, "update_network_ssid": {"enabled": port_enabled}})
except APIException as e:
print(f'Error {e.response_code} with error message {e.context.response.raw_body}')
return ""
elif "LAN" in prevint or "WAN" in prevint:
if "LAN" in prevint:
intnum = str(prevint.replace("LAN", ""))
itype = "LAN"
else:
intnum = str(prevint.replace("WAN", ""))
itype = "WAN"
try:
if itype == "WAN":
upd_port = "wan" + intnum
if port_enabled:
wan_state = "enabled"
else:
wan_state = "disabled"
data = client.management_interface_settings.update_network_device_management_interface_settings({"network_id": prevnet, "serial": prevdev, "update_network_device_management_interface_settings": {upd_port: {"wanEnabled": wan_state}}})
print("wan toggle", j)
else:
data = client.mx_vlan_ports.update_network_appliance_port({"network_id": prevnet, "appliance_port_id": intnum, "update_network_appliance_port": {"enabled": port_enabled}})
except APIException as e:
print(f'Error {e.response_code} with error message {e.context.response.raw_body}')
return ""
return ""
else:
# print(prevdev, prevint)
pass
return None
def switch_context(data, clitext, contextchain):
list_line = clitext.split(" ")[1:]
out_chain = []
orgid = None
netid = None
devid = None
seldevdata = None
intnum = None
curcontext = "root"
line = " ".join(list_line)
ctxlist = line.split("/")
for ctx in ctxlist:
ctxname = "-".join(ctx.split("-")[1:]).strip()
typectx = ctx.replace(ctxname, "").replace("-", "")
if typectx.lower() == "org":
orgdata = get_org_raw()
r = resolve_arg(ctxname, orgdata)
if r: orgid = str(r[1])
out_chain.append({"prompt": "#", "contextname": curcontext, "elements": orgdata, "selected": orgid, "selected_data": r})
curcontext = "organization"
out_chain.append({"prompt": "Org-" + out_chain[len(out_chain)-1]["selected_data"][2] + "#", "contextname": curcontext, "elements": None, "selected": None, "selected_data": None})
elif typectx.lower() == "net" and orgid is not None:
netdata = get_net_raw(orgid)
r = resolve_arg(ctxname, netdata)
if r: netid = str(r[1])
out_chain[len(out_chain)-1]["elements"] = netdata
out_chain[len(out_chain)-1]["selected"] = netid
out_chain[len(out_chain)-1]["selected_data"] = r
curcontext = "network"
out_chain.append({"prompt": out_chain[len(out_chain)-1]["prompt"][:-1] + "/Net-" + out_chain[len(out_chain)-1]["selected_data"][3] + "#", "contextname": curcontext, "elements": None, "selected": None, "selected_data": None})
elif typectx.lower() == "dev" and netid is not None:
devdata = get_dev_raw(netid)
r = resolve_arg(ctxname, devdata)
if r: devid = str(r[1])
# print(ctx, ctxname, netid, devdata, devid)
out_chain[len(out_chain)-1]["elements"] = devdata
out_chain[len(out_chain)-1]["selected"] = devid
out_chain[len(out_chain)-1]["selected_data"] = r
curcontext = "device"
out_chain.append({"prompt": out_chain[len(out_chain)-1]["prompt"][:-1] + "/Dev-" + out_chain[len(out_chain)-1]["selected_data"][1] + "#", "contextname": curcontext, "elements": None, "selected": None, "selected_data": None})
elif typectx.lower() == "int" and devid is not None:
intdata = get_int_raw(out_chain[len(out_chain)-2]["selected_data"], orgid, netid, devid)
r = resolve_arg(ctxname, intdata)
if r: intnum = str(r[1])
out_chain[len(out_chain)-1]["elements"] = intdata
out_chain[len(out_chain)-1]["selected"] = intnum
out_chain[len(out_chain)-1]["selected_data"] = r
curcontext = "interface"
out_chain.append({"prompt": out_chain[len(out_chain)-1]["prompt"][:-1] + "/Int-" + out_chain[len(out_chain)-1]["selected_data"][1] + "#", "contextname": curcontext, "elements": None, "selected": None, "selected_data": None})
return "", out_chain
``` |
{
"source": "joshand/meraki-integration-tools",
"score": 2
} |
#### File: meraki-integration-tools/scripts/tunnel_client.py
```python
from flask import Flask, request, redirect, session, url_for, render_template, jsonify
import sys
import paramiko
import subprocess
from apscheduler.schedulers.background import BackgroundScheduler
import atexit
import json
import requests
import os
import signal
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
app = Flask(__name__)
# Enable the job scheduler to run for expired lab sessions
cron = BackgroundScheduler()
# Explicitly kick off the background thread
cron.start()
# Shutdown your cron thread if the web process is stopped
atexit.register(lambda: cron.shutdown(wait=False))
ssh = paramiko.SSHClient()
app_version = "0.0.1"
sver = sys.version_info
app_name = "Python" + str(sver[0]) + "." + str(sver[1]) + ":tunnel_client.py"
myuuid = "74886f7a-8247-4cc5-9e46-9cebf3fec1e4"
gateway = "demo.sigraki.com"
controller = "http://" + gateway
internal_port = "8000"
HTTP_METHODS = ['GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'CONNECT', 'OPTIONS', 'TRACE', 'PATCH']
pproxy_pid = 0
def job_function(portnum):
global pproxy_pid
cmd = "pproxy -l tunnel+in://" + gateway + ":" + str(portnum) + " -r tunnel://127.0.0.1:" + internal_port
print(cmd)
p = subprocess.Popen(cmd.split(" "))
pproxy_pid = p.pid
return p
def start_tunnel(portnum):
job = cron.add_job(job_function, args=[str(portnum)])
return job
def health_check():
global pproxy_pid
req = requests.get(controller + "/api/v0/tunnels/" + myuuid + "/health")
rjson = req.json()
print("health check=", str(rjson))
if rjson.get("status") != "ok":
if pproxy_pid != 0:
os.kill(pproxy_pid, signal.SIGTERM) # or signal.SIGKILL
cron.remove_all_jobs()
do_register()
# handled incoming requests over tunnel
@app.route('/health', methods=HTTP_METHODS)
def health():
return jsonify({"status": "ok"})
# handled incoming requests over tunnel
@app.route('/', methods=HTTP_METHODS)
def default_route():
injson = request.get_json(force=True)
# url = injson.get("url", "")
# headers = injson.get("headers", {})
# method = injson.get("method", "")
# auth = injson.get("auth", "")
# auth_un = injson.get("auth_username", "")
# auth_pw = injson.get("auth_password", "")
# https_verify = injson.get("https_verify", "")
# body = injson.get("body", "")
access_method = request.headers.get("X-Local-Access-Method", "api")
if access_method == "api":
url = request.headers.get("X-Local-URL")
headers = json.loads(request.headers.get("X-Local-Headers", "{}"))
insecure = request.headers.get("X-Local-Allow-Insecure", "false").lower()
if insecure == "true":
https_verify = False
else:
https_verify = True
no_body = request.headers.get("X-Empty-Body", "false").lower()
auth = request.headers.get("X-Local-Auth-Basic", None)
method = request.method
if method.upper() == "GET" or method.upper() == "DELETE" or no_body == "true":
body = None
else:
body = request.get_data()
if not url:
return "You must specifiy at a minimum the X-Local-URL with the local URL to call."
if auth:
auth_un, auth_pw = auth.split(":")
print("doing request with basic auth")
try:
if body:
r = requests.request(method, url, headers=headers, auth=(auth_un, auth_pw), verify=https_verify,
timeout=5, body=body)
else:
r = requests.request(method, url, headers=headers, auth=(auth_un, auth_pw), verify=https_verify, timeout=5)
resp = str(r.content.decode("UTF-8"))
except Exception as e:
resp = "error:" + str(e)
else:
print("doing request with no auth")
try:
if body:
r = requests.request(method, url, headers=headers, verify=https_verify, timeout=5, body=body)
else:
r = requests.request(method, url, headers=headers, verify=https_verify, timeout=5)
resp = str(r.content.decode("UTF-8"))
except:
resp = "error"
elif access_method == "ssh":
out_data = {}
ip = request.headers.get("X-Local-IP", None)
port = int(request.headers.get("X-Local-Port", "22"))
auth = request.headers.get("X-Local-Auth-Basic", None)
auth_un, auth_pw = auth.split(":")
cmd = json.loads(request.headers.get("X-Local-Command", "[]"))
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip, port, auth_un, auth_pw)
stdin, stdout, stderr = ssh.exec_command(cmd, timeout=30)
out_data[cmd] = stdout.readlines()
resp = out_data
ssh.close()
else:
resp = ""
return resp
def do_register():
reg_url = controller + "/api/v0/tunnels/" + myuuid + "/register"
data = {"app": app_name, "ver": app_version}
r = requests.post(reg_url, json=data)
# print(r.content.decode("utf-8"))
rj = r.json()
if "portnum" in rj:
start_tunnel(rj["portnum"])
return True
else:
return False
def run():
if do_register():
job = cron.add_job(health_check, 'interval', seconds=60)
app.run(host="0.0.0.0", port=8000, debug=False)
else:
print("Error; no port returned")
``` |
{
"source": "josharenberg/materialize",
"score": 2
} |
#### File: ci/test/build.py
```python
import boto3
import os
from pathlib import Path
import humanize
from materialize import errors
from materialize import cargo
from materialize import ci_util
from materialize import deb
from materialize import git
from materialize import mzbuild
from materialize import spawn
from ..deploy.deploy_util import apt_materialized_path, APT_BUCKET
def main() -> None:
repo = mzbuild.Repository(Path("."))
workspace = cargo.Workspace(repo.root)
# Acquire all the mzbuild images in the repository, while pushing any
# images that we build to Docker Hub, where they will be accessible to
# other build agents.
print("--- Acquiring mzbuild images")
commit_tag = f'unstable-{git.rev_parse("HEAD")}'
deps = repo.resolve_dependencies(image for image in repo if image.publish)
deps.acquire()
deps.push()
deps.push_tagged(commit_tag)
print("--- Staging Debian package")
if os.environ["BUILDKITE_BRANCH"] == "main":
stage_deb(repo, "materialized-unstable", deb.unstable_version(workspace))
elif os.environ["BUILDKITE_TAG"]:
version = workspace.crates["materialized"].version
assert (
f"v{version}" == os.environ["BUILDKITE_TAG"]
), f'materialized version {version} does not match tag {os.environ["BUILDKITE_TAG"]}'
stage_deb(repo, "materialized", str(version))
elif os.environ["BUILDKITE_BRANCH"] == "master":
raise errors.MzError(f"Tried to build branch master {git.rev_parse('HEAD')}")
else:
print("Not on main branch or tag; skipping")
def stage_deb(repo: mzbuild.Repository, package: str, version: str) -> None:
"""Stage a Debian package on S3.
Note that this function does not cause anything to become public; a
step to publish the files and add them to the apt packages index
will be run during the deploy job.
"""
print(f"Staging deb {package} {version}")
# Extract the materialized binary from the Docker image. This avoids
# an expensive rebuild if we're using a cached image.
ci_util.acquire_materialized(
repo, repo.rd.xcargo_target_dir() / "release" / "materialized"
)
# Build the Debian package.
deb_path = repo.rd.xcargo_target_dir() / "debian" / f"materialized-{version}.deb"
spawn.runv(
[
repo.rd.xcargo(),
"deb",
f"--variant={package}",
"--no-build",
"--no-strip",
"--deb-version",
version,
"-p",
"materialized",
"-o",
deb_path,
],
cwd=repo.root,
)
deb_size = deb_path.stat().st_size
# Stage the package on S3
boto3.client("s3").upload_file(
str(deb_path),
APT_BUCKET,
apt_materialized_path(version),
)
if __name__ == "__main__":
main()
``` |
{
"source": "josharian/Theano",
"score": 2
} |
#### File: sandbox/cuda/blas.py
```python
from theano import Op, Type, Apply, Variable, Constant
from theano import tensor, scalar
import StringIO, os
import cuda_ndarray.cuda_ndarray as cuda
from theano.sandbox.cuda.type import CudaNdarrayType
class GpuDot22(Op):
def __str__(self):
return 'GpuDot22'
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
def make_node(self, x, y):
if x.type.ndim != 2:
raise TypeError(x)
if y.type.ndim != 2:
raise TypeError(y)
otype = CudaNdarrayType(
(x.type.broadcastable[0], y.type.broadcastable[1]))
return Apply(self, [x,y], [otype()])
def c_code_cache_version(self):
return (1,1)
def c_code(self, node, nodename, inputs, outputs, sub):
x, y = inputs
z, = outputs
fail = sub['fail']
return """
if (%(x)s->nd != 2)
{
PyErr_Format(PyExc_TypeError, "rank(x)==%%i must be 2", %(x)s->nd);
%(fail)s;
}
if (%(y)s->nd != 2)
{
PyErr_Format(PyExc_TypeError, "rank(y)==%%i must be 2", %(y)s->nd);
%(fail)s;
}
if ((NULL == %(z)s)
|| (CudaNdarray_HOST_DIMS(%(z)s)[0] != CudaNdarray_HOST_DIMS(%(x)s)[0])
|| (CudaNdarray_HOST_DIMS(%(z)s)[1] != CudaNdarray_HOST_DIMS(%(y)s)[1]))
{
//if (%(z)s) Py_DECREF(%(z)s);
Py_XDECREF(%(z)s);
npy_intp dims[2];
dims[0] = CudaNdarray_HOST_DIMS(%(x)s)[0];
dims[1] = CudaNdarray_HOST_DIMS(%(y)s)[1];
%(z)s = (CudaNdarray*)CudaNdarray_New();
if ((NULL == %(z)s) || CudaNdarray_alloc_contiguous(%(z)s, 2, dims))
{
if (%(z)s)
{
Py_DECREF(%(z)s);
%(z)s = NULL;
}
%(fail)s;
}
}
if (CudaNdarray_gemm(1.0f, %(x)s, %(y)s, 0.0f, %(z)s))
{
if (%(z)s)
{
Py_DECREF(%(z)s);
%(z)s = NULL;
}
%(fail)s;
}
""" % locals()
gpu_dot22 = GpuDot22()
class GpuDot22Scalar(Op):
def __str__(self):
return 'GpuDot22Scalar'
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
def make_node(self, x, y, a):
if x.type.ndim != 2:
raise TypeError(x)
if y.type.ndim != 2:
raise TypeError(y)
if not tensor.blas._as_scalar(a):
raise TypeError(a)
otype = CudaNdarrayType(
(x.type.broadcastable[0], y.type.broadcastable[1]))
return Apply(self, [x,y,a], [otype()])
def c_code_cache_version(self):
return (1,1)
def c_code(self, node, name, inputs, outputs, sub):
x, y, a = inputs
z, = outputs
fail = sub['fail']
return """
#define REAL float
float %(name)s_a = (%(a)s->descr->type_num == PyArray_FLOAT)
? (REAL)(((float*)%(a)s->data)[0])
: (REAL)(((double*)%(a)s->data)[0]);
#undef REAL
if (%(x)s->nd != 2)
{
PyErr_Format(PyExc_TypeError, "rank(x)==%%i must be 2", %(x)s->nd);
%(fail)s;
}
if (%(y)s->nd != 2)
{
PyErr_Format(PyExc_TypeError, "rank(y)==%%i must be 2", %(y)s->nd);
%(fail)s;
}
if ((NULL == %(z)s)
|| (CudaNdarray_HOST_DIMS(%(z)s)[0] != CudaNdarray_HOST_DIMS(%(x)s)[0])
|| (CudaNdarray_HOST_DIMS(%(z)s)[1] != CudaNdarray_HOST_DIMS(%(y)s)[1]))
{
//if (%(z)s) Py_DECREF(%(z)s);
Py_XDECREF(%(z)s);
npy_intp dims[2];
dims[0] = CudaNdarray_HOST_DIMS(%(x)s)[0];
dims[1] = CudaNdarray_HOST_DIMS(%(y)s)[1];
%(z)s = (CudaNdarray*)CudaNdarray_New();
if ((NULL == %(z)s) || CudaNdarray_alloc_contiguous(%(z)s, 2, dims))
{
if (%(z)s)
{
Py_DECREF(%(z)s);
%(z)s = NULL;
}
%(fail)s;
}
}
if (CudaNdarray_gemm(%(name)s_a, %(x)s, %(y)s, 0.0f, %(z)s))
{
if (%(z)s)
{
Py_DECREF(%(z)s);
%(z)s = NULL;
}
%(fail)s;
}
""" % locals()
gpu_dot22scalar = GpuDot22Scalar()
class GpuGemm(Op):
"""
implement the gemm on the gpu.
"""
def __init__(self, inplace):
self.__setstate__({'inplace':inplace})
def __str__(self):
if self.inplace:
return 'GpuGemm{inplace}'
else:
return 'GpuGemm{no_inplace}'
def __eq__(self, other):
return (type(self) == type(other)\
and self.inplace == other.inplace)
def __hash__(self):
return hash(type(self)) ^ hash(self.inplace)
def __setstate__(self, dct):
inplace = dct.get('inplace', True)
if inplace:
self.destroy_map = {0: [0]}
self.inplace = inplace
def __getstate__(self):
return dict(inplace=self.inplace)
def make_node(self, z, a, x, y, b):
# the more complicated error checking performed by tensor.gemm is assumed to already
# have been done
return Apply(self, [z, a, x, y, b], [z.type()])
def c_code_cache_version(self):
return (3,)
def c_code(self, node, name, inputs, outputs, sub):
#z_out = alpha * dot(x,y) + beta * z_in
#inplace version, set set z_out = z_in
#not inplace version, we copy z_in to z_out.
z_in, a, x, y, b = inputs
z_out, = outputs
fail = sub['fail']
sio = StringIO.StringIO()
print >> sio, """
#define REAL float
float %(name)s_a = (%(a)s->descr->type_num == PyArray_FLOAT)
? (REAL)(((float*)%(a)s->data)[0])
: (REAL)(((double*)%(a)s->data)[0]);
float %(name)s_b = (%(b)s->descr->type_num == PyArray_FLOAT) ?
(REAL)(((float*)%(b)s->data)[0])
: (REAL)(((double*)%(b)s->data)[0]);
#undef REAL
"""
if self.inplace:
print >> sio, """
Py_XDECREF(%(z_out)s);
%(z_out)s = %(z_in)s;
Py_INCREF(%(z_out)s);
"""
else:
print >> sio, """
if (!%(z_out)s
|| (%(z_out)s->nd != 2)
|| (CudaNdarray_HOST_DIMS(%(z_out)s)[0] != CudaNdarray_HOST_DIMS(%(z_in)s)[0])
|| (CudaNdarray_HOST_DIMS(%(z_out)s)[1] != CudaNdarray_HOST_DIMS(%(z_in)s)[1])
)
{
Py_XDECREF(%(z_out)s);
%(z_out)s = (CudaNdarray*)CudaNdarray_Copy(%(z_in)s);
if (!%(z_out)s)
{
%(fail)s;
}
}
else
{
if (CudaNdarray_CopyFromCudaNdarray(%(z_out)s, %(z_in)s))
{
%(fail)s;
}
}
"""
print >> sio, """
if (CudaNdarray_gemm(%(name)s_a, %(x)s, %(y)s, %(name)s_b, %(z_out)s))
{
%(fail)s;
}
"""
return sio.getvalue() % locals()
gpu_gemm_no_inplace = GpuGemm(inplace=False)
gpu_gemm_inplace = GpuGemm(inplace=True)
class GpuOuter(Op):
def make_node(self, x, y):
# we suppose type checking has been done, but make sure.
assert (x.type.ndim == 1 and y.type.ndim == 1 and
x.type.dtype == 'float32' and y.type.dtype == 'float32')
bz = [x.type.broadcastable[0], y.type.broadcastable[0]]
outputs = [CudaNdarrayType(dtype='float32', broadcastable=bz)()]
return Apply(self, [x, y], outputs)
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
def c_code_cache_version(self):
return (4,)
def c_code(self, node, name, inputs, outputs, sub):
# A = x * y'
x, y = inputs
A, = outputs
fail = sub['fail']
return """
CudaNdarray *%(name)sx = NULL, *%(name)sy = NULL;
int %(name)sres;
if (CudaNdarray_HOST_STRIDES(%(x)s)[0] < 0) {
%(name)sx = (CudaNdarray *)CudaNdarray_Copy(%(x)s);
if (!%(name)sx) {
%(fail)s;
}
} else {
%(name)sx = %(x)s;
Py_INCREF(%(name)sx);
}
if (CudaNdarray_HOST_STRIDES(%(y)s)[0] < 0) {
%(name)sy = (CudaNdarray *)CudaNdarray_Copy(%(y)s);
if (!%(name)sy) {
Py_DECREF(%(name)sx);
%(fail)s;
}
} else {
%(name)sy = %(y)s;
Py_INCREF(%(name)sy);
}
if (!(%(A)s &&
CudaNdarray_HOST_DIMS(%(A)s)[0] == CudaNdarray_HOST_DIMS(%(x)s)[0] &&
CudaNdarray_HOST_DIMS(%(A)s)[1] == CudaNdarray_HOST_DIMS(%(y)s)[0] &&
CudaNdarray_is_c_contiguous(%(A)s))) {
Py_XDECREF(%(A)s);
int dims[2];
dims[0] = CudaNdarray_HOST_DIMS(%(x)s)[0];
dims[1] = CudaNdarray_HOST_DIMS(%(y)s)[0];
%(A)s = (CudaNdarray *)CudaNdarray_ZEROS(2, dims);
if (!%(A)s) {
Py_DECREF(%(name)sy);
Py_DECREF(%(name)sx);
%(fail)s;
}
}
else
{
// sger accumulates into A. We need to zero it first.
int total_size = (sizeof(real) *
CudaNdarray_HOST_DIMS(%(A)s)[0] *
CudaNdarray_HOST_DIMS(%(A)s)[1]);
if (cudaSuccess != cudaMemset(%(A)s->devdata, 0, total_size))
{
PyErr_Format(PyExc_MemoryError, "GpuOuter: Error memsetting %%d bytes of device memory.", total_size);
Py_DECREF(%(name)sy);
Py_DECREF(%(name)sx);
%(fail)s;
}
}
%(name)sres = CudaNdarray_sger(1.0, %(name)sx, %(name)sy, %(A)s);
Py_DECREF(%(name)sy);
Py_DECREF(%(name)sx);
if (%(name)sres) {
%(fail)s;
}
"""%dict(x=x,y=y,A=A,fail=fail,name=name)
gpu_outer = GpuOuter()
##
# Not really a BLAS operation, but whatever.
#
class GpuConv(Op):
@staticmethod
def logical_output_shape_2d(imshp, kshp, mode):
if mode == 'valid':
return imshp[0] - kshp[0] + 1, imshp[1] - kshp[1] + 1
if mode == 'full':
return imshp[0] + kshp[0] - 1, imshp[1] + kshp[1] - 1
raise ValueError(mode)
def __init__(self, border_mode,
subsample=(1,1),
logical_img_hw=None,
logical_kern_hw=None,
logical_kern_align_top=True,
version=-1,
verbose=0,
kshp=None,
imshp=None):
"""
:param version: each version of c_code implement many kernel for the
convolution. By default we try to guess the best one.
You can force one version with this parameter. This
parameter is used by the tests.
:param verbose: for value of 1,2 and 3. Print more information during
the execution of the convolution. Mostly used for
optimization or debugging.
:param kshp: The size of the kernel. If provided, can genera
faster code. If the GpuConv op is automatically inserted,
we take its value automatically from the Conv op.
:param imshp: The size of the image. Not used for code generation but
allow to select an experimental new version in another repo.
"""
self.border_mode = border_mode
self.subsample = subsample
if logical_img_hw is not None:
h,w = logical_img_hw
#TODO: reconsider this... since shapes are not given in constructor,
# maybe a multiplier + offset is a more appropriate way of passing this logical
# grid
logical_img_hw = tuple(logical_img_hw)
self.logical_img_hw = logical_img_hw
if logical_kern_hw is not None:
h,w = logical_kern_hw
#TODO: reconsider this... since shapes are not given in constructor,
# maybe a multiplier + offset is a more appropriate way of passing this logical
# grid
logical_kern_hw = tuple(logical_kern_hw)
self.logical_kern_hw = logical_kern_hw
self.logical_kern_align_top = logical_kern_align_top
self.version=version
self.verbose=verbose
self.kshp = kshp
self.imshp = imshp
def __eq__(self, other):
return type(self) == type(other) \
and self.border_mode == other.border_mode \
and self.subsample == other.subsample \
and self.logical_img_hw == other.logical_img_hw \
and self.logical_kern_hw == other.logical_kern_hw \
and self.logical_kern_align_top == other.logical_kern_align_top \
and self.version == other.version \
and self.verbose == other.verbose \
and self.kshp == other.kshp\
and self.imshp == other.imshp
def __setstate__(self, d):
self.__dict__.update(d)
if not hasattr(self,"imshp"):
self.imshp = None
def __hash__(self):
# don't use hash(self.version) as hash(-1)==-2 and hash(-2)==-2 in python!
return hash(type(self)) \
^ hash(self.border_mode) \
^ hash(self.subsample) \
^ hash(self.logical_img_hw) \
^ hash(self.logical_kern_hw) \
^ hash(self.logical_kern_align_top) \
^ self.version \
^ hash(self.verbose) \
^ hash(self.kshp)\
^ hash(self.imshp)
def __str__(self):
return '%s{%s, %s, %s, %s, %s, %s, %s}' %(self.__class__.__name__,
self.border_mode,
str(self.subsample),
str(self.logical_img_hw),
str(self.logical_kern_hw),
str(self.logical_kern_align_top),
str(self.imshp),
str(self.kshp))
def make_node(self, img, kern):
if img.type.ndim != 4:
raise TypeError('img must be 4D tensor')
if kern.type.ndim != 4:
raise TypeError('kern must be 4D tensor')
broadcastable = [img.type.broadcastable[0], kern.type.broadcastable[0], False, False]
return Apply(self, [img, kern], [CudaNdarrayType(broadcastable)()])
def c_compile_args(self):
nb = 0
if self.kshp is not None:
nb = self.kshp[1]
return ['-DTHEANO_KERN_WID='+str(nb)]#,'-g','-G']
def c_headers(self):
return ['cuda_ndarray.cuh','<stdio.h>']
def c_code_cache_version(self):
return (0, 16) # raise this whenever modifying any of the support_code_files
def c_support_code_apply(self, node, nodename):
# REMEMBER TO RAISE c_code_cache_version when changing any of these files
return open(os.path.join(os.path.split(__file__)[0],'conv_kernel.cu')).read()+\
open(os.path.join(os.path.split(__file__)[0],'conv_full_kernel.cu')).read()+\
open(os.path.join(os.path.split(__file__)[0],'conv.cu')).read()
def c_code(self, node, nodename, inp, out_, sub):
img, kern = inp
out, = out_
dx = self.subsample[0]
dy = self.subsample[1]
border_mode = self.border_mode
version = self.version
verbose = self.verbose
sub = sub.copy()
sub.update(locals())
return """
//Mandatory args
const char *mode_str = "%(border_mode)s";
//Optional args
int version = %(version)s;
int verbose = %(verbose)s;
int dx = %(dx)s;
int dy = %(dy)s;
int mode;
if (strcmp(mode_str, "full") == 0)
{
mode = ConvMode_FULL;
}
else if (strcmp(mode_str, "valid") == 0)
{
mode = ConvMode_VALID;
}
else
{
PyErr_SetString(PyExc_ValueError, "mode must be one of 'full' or 'valid'");
return NULL;
}
CudaNdarray * out2 = (CudaNdarray *)CudaNdarray_Conv(%(img)s, %(kern)s, %(out)s,
mode, dx, dy, version, verbose);
Py_XDECREF(%(out)s);
%(out)s = out2;
"""%sub
class GpuDownsampleFactorMax(Op):
def __init__(self, ds, ignore_border=False):
self.ds = tuple(ds)
self.ignore_border = ignore_border
def __eq__(self, other):
return type(self) == type(other) and self.ds == other.ds and self.ignore_border == other.ignore_border
def __hash__(self):
return hash(type(self)) ^ hash(self.ds) ^ hash(self.ignore_border)
def __str__(self):
return '%s{%s,%s}' % (self.__class__.__name__, self.ds, self.ignore_border)
def make_node(self, x):
if not isinstance(x.type, CudaNdarrayType):
raise TypeError()
if not x.type.ndim == 4:
raise TypeError()
return Apply(self, [x], [x.type()])
#def perform(self, node, input_storage, output_storage):
#raise NotImplementedError('only C is implemented')
def c_code_cache_version(self):
return (3)
def c_code(self, node, nodename, inp, out, sub):
x, = inp
z, = out
fail = sub['fail']
ds0, ds1 = self.ds
ignore_border = int(self.ignore_border)
return """
int dims[4], xdim2, xdim3;
if (%(x)s->nd != 4)
{
PyErr_SetString(PyExc_ValueError, "rank error");
%(fail)s;
}
xdim2 = CudaNdarray_HOST_DIMS(%(x)s)[2];
xdim3 = CudaNdarray_HOST_DIMS(%(x)s)[3];
dims[0] = CudaNdarray_HOST_DIMS(%(x)s)[0];
dims[1] = CudaNdarray_HOST_DIMS(%(x)s)[1];
dims[2] = xdim2 / %(ds0)s;
dims[3] = xdim3 / %(ds1)s;
if (! %(ignore_border)s)
{
dims[2] += (xdim2%%(%(ds0)s)?1:0);
dims[3] += (xdim3%%(%(ds1)s)?1:0);
}
if(dims[3]>512){
PyErr_Format(PyExc_ValueError, "GpuDownsampleFactorMax: last dimention size of %%d is bigger then 512. This case is not implemented.", dims[3]);
%(fail)s;
}
if ((NULL == %(z)s)
|| (CudaNdarray_HOST_DIMS(%(z)s)[0] != dims[0])
|| (CudaNdarray_HOST_DIMS(%(z)s)[1] != dims[1])
|| (CudaNdarray_HOST_DIMS(%(z)s)[2] != dims[2])
|| (CudaNdarray_HOST_DIMS(%(z)s)[3] != dims[3]))
{
Py_XDECREF(%(z)s);
%(z)s = (CudaNdarray*)CudaNdarray_New();
if ((NULL == %(z)s)
|| CudaNdarray_alloc_contiguous(%(z)s, 4, dims))
{
Py_XDECREF(%(z)s);
%(z)s = NULL;
PyErr_SetString(PyExc_ValueError, "Was not able to allocate output!");
%(fail)s;
}
}
{
dim3 grid(dims[0] * dims[1], dims[2]);
//dim3 block(std::min(dims[3], 512)); //TODO: implement this by supporting more
//outputs than threads
dim3 block(dims[3]);
if ((grid.x*grid.y) && dims[3])
kMaxPool_%(nodename)s<%(ds0)s, %(ds1)s> <<<grid, block, xdim3*sizeof(float)>>>(
dims[0], dims[1], dims[2], dims[3], xdim2, xdim3,
CudaNdarray_DEV_DATA(%(x)s),
CudaNdarray_HOST_STRIDES(%(x)s)[0],
CudaNdarray_HOST_STRIDES(%(x)s)[1],
CudaNdarray_HOST_STRIDES(%(x)s)[2],
CudaNdarray_HOST_STRIDES(%(x)s)[3],
CudaNdarray_DEV_DATA(%(z)s));
CNDA_THREAD_SYNC;
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %%s: %%s. (grid: %%i x %%i; block: %%i x %%i x %%i)\\n",
"kMaxPool_%(nodename)s",
cudaGetErrorString(err),
grid.x,
grid.y,
block.x,
block.y,
block.z);
%(fail)s;
}
}
""" % locals()
def c_support_code_apply(self, node, nodename):
ignore_border = int(self.ignore_border)
return """
template<int pf2, int pf3>
__global__ void kMaxPool_%(nodename)s(
int D0, int D1, int D2, int D3, int xD2, int xD3,
const float * x, int xS0, int xS1, int xS2, int xS3,
float *z)
{
float cur_max, cur_x;
int i0 = blockIdx.x %% D0;
int i1 = blockIdx.x / D0;
int i2 = blockIdx.y;
extern __shared__ float xbuf[]; //size [xD3]
for (int r2 = 0; (r2 < pf2) && (%(ignore_border)s || (r2 + i2*pf2 < xD2)); ++r2)
{
__syncthreads();
// load the current row of the image into shared memory
for (int j = threadIdx.x; j < xD3; j += blockDim.x)
{
xbuf[j] = x[i0*xS0 + i1*xS1 + (i2*pf2+r2)*xS2 + j*xS3];
}
__syncthreads();
// initialize our max if this is the first row we're loading
cur_max = (r2 == 0) ? xbuf[threadIdx.x*pf3] : cur_max;
// do a mini-reduction over the pf3 relevant elements in the current row
if (%(ignore_border)s)
{
for (int k = 0; k < pf3; ++k)
{
cur_x = xbuf[threadIdx.x*pf3+k];
cur_max = (cur_x > cur_max) ? cur_x : cur_max;
}
}
else
{
for (int k = 0; k < pf3; ++k)
{
if (threadIdx.x*pf3 + k < xD3)
{
cur_x = xbuf[threadIdx.x*pf3+k];
cur_max = (cur_x > cur_max) ? cur_x : cur_max;
}
}
}
}
//store the result to global memory
z[i0 * D1*D2*D3 + i1*D2*D3 + i2*D3 + threadIdx.x] = cur_max;
}
""" % locals()
class GpuDownsampleFactorMaxGrad(Op):
def __init__(self, ds, ignore_border):
self.ds = tuple(ds)
self.ignore_border = ignore_border
def __eq__(self, other):
return type(self) == type(other) and self.ds == other.ds and self.ignore_border == other.ignore_border
def __hash__(self):
return hash(type(self)) ^ hash(self.ds) ^ hash(self.ignore_border)
def __str__(self):
return '%s{%s,%s}' % (self.__class__.__name__, self.ds, self.ignore_border)
def make_node(self, x, z, gz):
return Apply(self, [x, z, gz], [x.type()])
def c_code_cache_version(self):
#return ()
return (5,)
def c_code(self, node, nodename, inp, out, sub):
x, z, gz = inp
gx, = out
fail = sub['fail']
ds0, ds1 = self.ds
ignore_border = int(self.ignore_border)
return """
if (%(x)s->nd != 4
|| %(z)s->nd != 4
|| %(gz)s->nd != 4)
{
PyErr_SetString(PyExc_ValueError, "rank error");
%(fail)s;
}
if ((NULL == %(gx)s)
|| (CudaNdarray_HOST_DIMS(%(gx)s)[0] != CudaNdarray_HOST_DIMS(%(x)s)[0])
|| (CudaNdarray_HOST_DIMS(%(gx)s)[1] != CudaNdarray_HOST_DIMS(%(x)s)[1])
|| (CudaNdarray_HOST_DIMS(%(gx)s)[2] != CudaNdarray_HOST_DIMS(%(x)s)[2])
|| (CudaNdarray_HOST_DIMS(%(gx)s)[3] != CudaNdarray_HOST_DIMS(%(x)s)[3]))
{
Py_XDECREF(%(gx)s);
%(gx)s = (CudaNdarray*)CudaNdarray_New();
if ((NULL == %(gx)s)
|| CudaNdarray_alloc_contiguous(%(gx)s, 4, CudaNdarray_HOST_DIMS(%(x)s)))
{
Py_XDECREF(%(gx)s);
%(gx)s = NULL;
%(fail)s;
}
}
{
//TODO: supporting more output columns than threads
// make sure we cover every x row when ignore border isset and there's a border present to be ignored
int needs_extra_z_col = %(ignore_border)s && (CudaNdarray_HOST_DIMS(%(x)s)[2] %% %(ds0)s);
dim3 grid(CudaNdarray_HOST_DIMS(%(z)s)[0],CudaNdarray_HOST_DIMS(%(z)s)[2] + (needs_extra_z_col ? 1 : 0));
dim3 block(std::min(CudaNdarray_HOST_DIMS(%(x)s)[3], 512));
kDownsampleMaxGrad_%(nodename)s<%(ds0)s, %(ds1)s> <<<grid, block>>>(
CudaNdarray_HOST_DIMS(%(z)s)[0],
CudaNdarray_HOST_DIMS(%(z)s)[1],
CudaNdarray_HOST_DIMS(%(z)s)[2],
CudaNdarray_HOST_DIMS(%(z)s)[3],
CudaNdarray_HOST_DIMS(%(x)s)[2],
CudaNdarray_HOST_DIMS(%(x)s)[3],
CudaNdarray_DEV_DATA(%(x)s),
CudaNdarray_HOST_STRIDES(%(x)s)[0],
CudaNdarray_HOST_STRIDES(%(x)s)[1],
CudaNdarray_HOST_STRIDES(%(x)s)[2],
CudaNdarray_HOST_STRIDES(%(x)s)[3],
CudaNdarray_DEV_DATA(%(z)s),
CudaNdarray_HOST_STRIDES(%(z)s)[0],
CudaNdarray_HOST_STRIDES(%(z)s)[1],
CudaNdarray_HOST_STRIDES(%(z)s)[2],
CudaNdarray_HOST_STRIDES(%(z)s)[3],
CudaNdarray_DEV_DATA(%(gz)s),
CudaNdarray_HOST_STRIDES(%(gz)s)[0],
CudaNdarray_HOST_STRIDES(%(gz)s)[1],
CudaNdarray_HOST_STRIDES(%(gz)s)[2],
CudaNdarray_HOST_STRIDES(%(gz)s)[3],
CudaNdarray_DEV_DATA(%(gx)s));
CNDA_THREAD_SYNC;
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %%s: %%s. (grid: %%i x %%i; block: %%i x %%i x %%i)\\n",
"kDownsampleMaxGrad_%(nodename)s",
cudaGetErrorString(err),
grid.x,
grid.y,
block.x,
block.y,
block.z);
%(fail)s;
}
}
""" % locals()
def c_support_code_apply(self, node, nodename):
# This code considers every position in the output z, andthen computes the gradient for the
# input pixels that were downsampled to that z-position. It does so by running along every
# z row (sometimes plus one, to make sure every gx row gets totally filled), and by
# running along every x col. This code is not sensitive to the ignore_border flag along
# the row dimension (since it runs for every position in the output z), but it is sensitive
# along the col dimension.
ignore_border = int(self.ignore_border)
return """
template<int ds0, int ds1> // ds0 is the downsampling factor in rows, ds1 in columns
__global__ void kDownsampleMaxGrad_%(nodename)s(
int D0, int D1, int D2, int D3, int xD2, int xD3,
const float * x, int xS0, int xS1, int xS2, int xS3,
const float * z, int zS0, int zS1, int zS2, int zS3,
const float * gz, int gzS0, int gzS1, int gzS2, int gzS3,
float *gx)
{
// D0: number of image rows
// D1: number of image cols
// D2: number of z rows
// D3: number of z cols
// xD2: number of x rows
// xD3: number of x cols
// various .S. variables are strides
float cur_max, cur_x, my_z, my_gz;
int i0 = blockIdx.x; // image row
int i1 = 0; // image col
int i2 = blockIdx.y; // row wrt z and/or gz, ranges from 0 to D2 - 1 OR D2 (as needed to cover all x rows)
int x_col = threadIdx.x; // col wrt x, ranges from 0 to xD3 - 1
int z_col = x_col/ds1; // z_col corresponding to this x_col
//TODO: raise occupancy. Use threadIdx.y to run several iterations of this i1 loop
//in parallel
for (i1 = 0; i1 < D1; ++i1) // loop over images (same for z and x)
{
for(int col_iter = 0; col_iter * blockDim.x <= xD3 ; col_iter++){
//The if inside is to don't do the division if we need only 1 col_iter
if(blockDim.x != xD3)
{
x_col = threadIdx.x + col_iter * blockDim.x;
z_col = x_col/ds1;
}
if (%(ignore_border)s && x_col >= ds1 * D3)
{
// This happens only if x_col was ignored (via ignore_border)
// TODO: if ignore_border is False, this is impossible and we don't even
// need to generate this code.
my_gz = 0.0f;
//any fp number suffices for my_z, so we don't even need to set it to
//anything in particular.
}
else
{
// this is effectively:
// my_gz = gz[image_row][image_col][z_row][z_col]
// my_z = z[image_row][image_col][z_row][z_col]
my_gz = gz[i0 * gzS0 + i1 * gzS1 + i2 * gzS2 + z_col*gzS3];
my_z = z[i0 * zS0 + i1 * zS1 + i2 * zS2 + z_col* zS3];
}
if(x_col<xD3){
for (int x_row = i2*ds0; (x_row < i2*ds0+ds0) && (x_row < xD2); ++x_row)
{
// this is effectively:
// gx[image_row][image_col][x_row][x_col]
// = (my_z == x[image_row][image_col][x_row][x_col]) ? my_gz : 0.0f;
gx[i0 * D1*xD2*xD3 + i1*xD2*xD3 + x_row*xD3 + x_col]
= (my_z == x[i0*xS0 + i1*xS1 + x_row*xS2 + x_col*xS3]) ? my_gz : 0.0f;
}
//gx[i0 * D1*xD2*xD3 + i1*xD2*xD3 + x_row*xD3 + x_col] = -999;
}
}
}
}
""" % locals()
``` |
{
"source": "josharnoldjosh/Image-Caption-Joint-Embedding",
"score": 3
} |
#### File: josharnoldjosh/Image-Caption-Joint-Embedding/evaluate.py
```python
import numpy
import torch
def image_to_text(captions, images, npts=None, verbose=False):
"""
Images->Text (Image Annotation)
Images: (5N, K) matrix of images
Captions: (5N, K) matrix of captions
"""
if npts == None:
npts = images.size()[0] / 5
npts = int(npts)
ranks = numpy.zeros(npts)
for index in range(npts):
# Get query image
im = images[5 * index].unsqueeze(0)
# Compute scores
d = torch.mm(im, captions.t())
d_sorted, inds = torch.sort(d, descending=True)
inds = inds.data.squeeze(0).cpu().numpy()
# Score
rank = 1e20
# find the highest ranking
for i in range(5*index, 5*index + 5, 1):
tmp = numpy.where(inds == i)[0][0]
if tmp < rank:
rank = tmp
ranks[index] = rank
# Compute metrics
r1 = 100.0 * len(numpy.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(numpy.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(numpy.where(ranks < 10)[0]) / len(ranks)
medr = numpy.floor(numpy.median(ranks)) + 1
if verbose:
print(" * Image to text scores: R@1: %.1f, R@5: %.1f, R@10: %.1f, Medr: %.1f" % (r1, r5, r10, medr))
return r1+r5+r10, (r1, r5, r10, medr)
def text_to_image(captions, images, npts=None, verbose=False):
if npts == None:
npts = images.size()[0] / 5
npts = int(npts)
ims = torch.cat([images[i].unsqueeze(0) for i in range(0, len(images), 5)])
ranks = numpy.zeros(5 * npts)
for index in range(npts):
# Get query captions
queries = captions[5*index : 5*index + 5]
# Compute scores
d = torch.mm(queries, ims.t())
for i in range(d.size()[0]):
d_sorted, inds = torch.sort(d[i], descending=True)
inds = inds.data.squeeze(0).cpu().numpy()
ranks[5 * index + i] = numpy.where(inds == index)[0][0]
# Compute metrics
r1 = 100.0 * len(numpy.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(numpy.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(numpy.where(ranks < 10)[0]) / len(ranks)
medr = numpy.floor(numpy.median(ranks)) + 1
if verbose:
print(" * Text to image scores: R@1: %.1f, R@5: %.1f, R@10: %.1f, Medr: %.1f" % (r1, r5, r10, medr))
return r1+r5+r10, (r1, r5, r10, medr)
``` |
{
"source": "josharnoldjosh/image_feature_extractor",
"score": 3
} |
#### File: josharnoldjosh/image_feature_extractor/extract.py
```python
from keras.preprocessing import image
from keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import preprocess_input
import os
from glob import glob
import numpy as np
import pickle
from PIL import Image
import h5py
def LoadImages():
print("Loading image files...")
image_list = []
for filename in glob('images/*.jpg'):
print(filename)
image_list.append(filename)
return image_list
def ExtractImageFeature(img_path):
img = image.load_img(img_path, target_size=(224, 224))
img_data = image.img_to_array(img)
img_data = np.expand_dims(img_data, axis=0)
img_data = preprocess_input(img_data)
vgg16_feature = model.predict(img_data)
return np.array(vgg16_feature).flatten()
extracted_image_features = []
model = VGG16(weights='imagenet', include_top=False)
i = 0
images = LoadImages()
for path in images:
i += 1
feature = ExtractImageFeature(path)
data = (path, feature)
extracted_image_features.append(data)
print(i, "out of", len(images))
import hickle
hickle.dump(extracted_image_features, 'output.hkl', mode='w', compression='gzip')
print("Script done!")
``` |
{
"source": "josharnoldjosh/leetcode",
"score": 4
} |
#### File: josharnoldjosh/leetcode/Invert Binary Tree.py
```python
class Solution(object):
def invertTree(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
if not root:
return None
r, l = self.invertTree(root.right), self.invertTree(root.left)
root.right = l
root.left = r
return root
``` |
{
"source": "josharnoldjosh/ParlAI",
"score": 2
} |
#### File: parlai/core/logs.py
```python
import os
class TensorboardLogger(object):
"""Log objects to tensorboard."""
_shared_state = {}
@staticmethod
def add_cmdline_args(argparser):
"""Add tensorboard CLI args."""
logger = argparser.add_argument_group('Tensorboard Arguments')
logger.add_argument(
'-tblog',
'--tensorboard-log',
type='bool',
default=False,
help="Tensorboard logging of metrics, default is %(default)s",
hidden=True,
)
logger.add_argument(
'-tbtag',
'--tensorboard-tag',
type=str,
default=None,
help='Specify all opt keys which you want to be presented in in TB name',
hidden=True,
)
logger.add_argument(
'-tbmetrics',
'--tensorboard-metrics',
type=str,
default=None,
help='Specify metrics which you want to track, it will be extracted '
'from report dict.',
hidden=True,
)
logger.add_argument(
'-tbcomment',
'--tensorboard-comment',
type=str,
default='',
hidden=True,
help='Add any line here to distinguish your TB event file, optional',
)
def __init__(self, opt):
self.__dict__ = self._shared_state
try:
from tensorboardX import SummaryWriter
except ImportError:
raise ImportError('Please `pip install tensorboardX` for logs with TB.')
if opt['tensorboard_tag'] is None:
tensorboard_tag = opt['starttime']
else:
tensorboard_tag = opt['starttime'] + '__'.join(
[i + '-' + str(opt[i]) for i in opt['tensorboard_tag'].split(',')]
)
if opt['tensorboard_comment']:
tensorboard_tag += '__' + opt['tensorboard_comment']
tbpath = os.path.join(os.path.dirname(opt['model_file']), 'tensorboard')
print('[ Saving tensorboard logs here: {} ]'.format(tbpath))
if not os.path.exists(tbpath):
os.makedirs(tbpath)
self.writer = SummaryWriter(logdir='{}/{}'.format(tbpath, tensorboard_tag))
if opt['tensorboard_metrics'] is None:
self.tbmetrics = ['ppl', 'loss']
else:
self.tbmetrics = opt['tensorboard_metrics'].split(',')
def add_metrics(self, setting, step, report):
"""
Add all metrics from tensorboard_metrics opt key.
:param setting: whatever setting is used, train valid or test, it will
be just the title of the graph
:param step: num of parleys (x axis in graph), in train - parleys, in
valid - wall time
:param report: from TrainingLoop
:return:
"""
for met in self.tbmetrics:
if met in report.keys():
self.writer.add_scalar(
"{}/{}".format(setting, met), report[met], global_step=step
)
def add_scalar(self, name, y, step=None):
"""
Add a scalar.
:param str name:
the title of the graph, use / to group like "train/loss/ce" or so
:param y:
value
:param step:
x axis step
"""
self.writer.add_scalar(name, y, step)
def add_histogram(self, name, vector, step=None):
"""Add a histogram."""
self.writer.add_histogram(name, vector, step)
def add_text(self, name, text, step=None):
"""Add text."""
self.writer.add_text(name, text, step)
``` |
{
"source": "josharnoldjosh/Simple-Joint-Embedding-Pytorch",
"score": 3
} |
#### File: josharnoldjosh/Simple-Joint-Embedding-Pytorch/time_dist.py
```python
from settings import config
import torch
import torch.nn as nn
import torch.nn.init
class TimeDistributed(nn.Module):
"""
Time Distributed Wrapper Layer similar to keras
Apply a module across each time step
"""
def __init__(self, module, batch_first=False):
super(TimeDistributed, self).__init__()
self.module = module
self.batch_first = batch_first
def forward(self, x):
if len(x.size()) <= 2:
return self.module(x)
# Squash samples and timesteps into a single axis
x_reshape = x.contiguous().view(-1, x.size(-1)) # (samples * timesteps, input_size)
y = self.module(x_reshape)
# We have to reshape Y
if self.batch_first:
y = y.contiguous().view(x.size(0), -1, y.size(-1)) # (samples, timesteps, output_size)
else:
y = y.view(-1, x.size(1), y.size(-1)) # (timesteps, samples, output_size)
return y
class SentenceEmbedding(nn.Module):
def __init__(self):
super(SentenceEmbedding, self).__init__()
self.time_distributed = TimeDistributed(module = nn.Linear(config['sentence_embedding_size'],
config["linear_hidden"]), batch_first = True)
self.rnn = nn.LSTM(config["linear_hidden"], config['lstm_hidden_size'], config["lstm_depth"], batch_first = True)
self.encoder = nn.Linear(config['lstm_hidden_size'], config["joint_embedding_latent_space_dimension"])
if torch.cuda.is_available():
self.time_distributed.cuda()
self.rnn.cuda()
self.encoder.cuda()
self.init_model_weights()
def init_model_weights(self):
"""
Here we reproduce Keras default initialization weights to initialize Embeddings/LSTM weights
"""
ih = (param.data for name, param in self.named_parameters() if 'weight_ih' in name)
hh = (param.data for name, param in self.named_parameters() if 'weight_hh' in name)
b = (param.data for name, param in self.named_parameters() if 'bias' in name)
for t in ih:
nn.init.xavier_uniform_(t)
for t in hh:
nn.init.orthogonal_(t)
for t in b:
nn.init.constant_(t, 0)
def forward(self, qa_pairs):
qa_pairs = qa_pairs.float()
qa_pairs_emb = self.time_distributed(qa_pairs)
_, (h_qa_t, _) = self.rnn(qa_pairs_emb)
qa_hist_state = h_qa_t[-1]
diag_state = self.encoder(qa_hist_state)
out = self.l2norm(diag_state)
return out
def l2norm(self, X):
"""L2-normalize columns of X
"""
norm = torch.pow(X, 2).sum(dim=1, keepdim=True).sqrt()
X = torch.div(X, norm)
return X
``` |
{
"source": "josharnoldjosh/visdom-plot",
"score": 3
} |
#### File: josharnoldjosh/visdom-plot/visual.py
```python
import visdom
import subprocess
import torch
from data import Epoch
from sklearn.metrics import confusion_matrix
import numpy as np
import calc
LINE_TYPE = "window_type_line"
CM_TYPE = "window_confusion_matrix"
class Plot:
"""
Use the follow methods to initialize a plot:
- line (for line plot)
- cm (for confusion matrix plot)
Use the follow method to update data sources (note, this does not display changes)
- update
Use the follow method to visually display new results:
- plot
"""
def __init__(self, model_name="Model"):
self.vis = visdom.Visdom() # must run python -m visdom.server
self.model_name = model_name
self.plots = {}
return
def clear(self):
self.vis.close()
def line(self, plot_name, xlabel, ylabel, legend_ext):
window = self.vis.line(
Y=torch.zeros((1)).cpu(),
X=torch.zeros((1)).cpu(),
opts=dict(xlabel=xlabel,ylabel=ylabel,title=self.model_name+' '+plot_name,
legend=['Train '+legend_ext, 'Validation '+legend_ext, 'Test '+legend_ext]))
self.plots[plot_name] = {"type":LINE_TYPE, "window":window, "idx":0, "train":[], "val":[], "test":[], "legend_ext":legend_ext}
def cm(self, plot_name):
window = self.vis.heatmap(
X=[[0, 0], [0, 0]],
opts=dict(
columnnames=['Positive', 'Negative'],
rownames=['True', 'False'],
colormap='Electric', title=self.model_name+' '+plot_name
))
self.plots[plot_name] = {"type":CM_TYPE, "window":window, "data":[]}
def update(self, plot_name, data=[], train=0, val=0, test=0):
"""
Appends data to the plot.
"""
if self.plots[plot_name]["type"] == LINE_TYPE:
if train != 0:
self.plots[plot_name]["train"] += [train]
if val != 0:
self.plots[plot_name]["val"] += [val]
if test != 0:
self.plots[plot_name]["test"] += [test]
if self.plots[plot_name]["type"] == CM_TYPE:
self.plots[plot_name]["data"].append(data)
def plot_line_update(self, plot, key):
window = plot["window"]
x = plot["idx"]
train_y = calc.average_array(plot["train"])
if train_y != 0:
self.vis.line(X=torch.ones((1,1)).cpu()*x,
Y=torch.Tensor([train_y]).unsqueeze(0).cpu(),
win=window, update='append', name='Train '+plot["legend_ext"])
val_y = calc.average_array(plot["val"])
if val_y != 0:
self.vis.line(X=torch.ones((1,1)).cpu()*x,
Y=torch.Tensor([val_y]).unsqueeze(0).cpu(),
win=window, update='append', name='Val '+plot["legend_ext"])
test_y = calc.average_array(plot["test"])
if test_y != 0:
self.vis.line(X=torch.ones((1,1)).cpu()*x,
Y=torch.Tensor([test_y]).unsqueeze(0).cpu(),
win=window, update='append', name='Test '+plot["legend_ext"])
plot["idx"] = x+1
plot["train"] = []
plot["val"] = []
plot["test"] = []
self.plots[key] = plot
return
def plot_cm_update(self, plot, key):
window = plot["window"]
(tp, tn, fp, fn) = calc.average_arrays(plot["data"])
window = self.vis.heatmap(win=window,
X=[[tp, fn],
[fp, tn]],
opts=dict(
columnnames=['Positive', 'Negative'],
rownames=['True', 'False'],
colormap='Electric', title=self.model_name+' '+key, update="replace", name=self.model_name+' '+key
))
def plot(self):
"""
Updates the plot data.
"""
for key in self.plots.keys():
plot = self.plots[key]
if plot["type"] == LINE_TYPE:
self.plot_line_update(plot, key)
if plot["type"] == CM_TYPE:
self.plot_cm_update(plot, key)
return
``` |
{
"source": "josharsh/Submitty",
"score": 2
} |
#### File: tests/e2e/test_accessibility.py
```python
from .base_testcase import BaseTestCase
import json
import os
import tempfile
import subprocess
from .test_office_hours_queue import enableQueue
class TestAccessibility(BaseTestCase):
"""
Test cases revolving around the logging in functionality of the site
"""
def __init__(self, testname):
super().__init__(testname, log_in=False)
# This should contain a url for every type of page on the webiste
# please replace the semester and course with '/courses/{}/{}'
# So '/courses/s20/sample/users' becomes '/courses/{}/{}/users'
urls = [
'/home',
'/home/courses/new',
'/courses/{}/{}',
'/courses/{}/{}/gradeable/future_no_tas_homework/update?nav_tab=0',
'/courses/{}/{}/autograding_config?g_id=future_no_tas_homework',
'/courses/{}/{}/gradeable/future_no_tas_lab/grading?view=all',
'/courses/{}/{}/gradeable/future_no_tas_test/grading?view=all',
'/courses/{}/{}/gradeable/open_homework/grading/status',
'/courses/{}/{}/gradeable/open_homework/bulk_stats',
'/courses/{}/{}/gradeable/open_homework/grading/details?view=all',
'/courses/{}/{}/gradeable/open_homework',
'/courses/{}/{}/gradeable/open_team_homework/team',
'/courses/{}/{}/gradeable/grades_released_homework_autota',
'/courses/{}/{}/notifications',
'/courses/{}/{}/notifications/settings',
'/courses/{}/{}/gradeable',
'/courses/{}/{}/config',
'/courses/{}/{}/theme',
'/courses/{}/{}/office_hours_queue',
'/courses/{}/{}/course_materials',
'/courses/{}/{}/forum',
'/courses/{}/{}/forum/threads/new',
'/courses/{}/{}/forum/categories',
'/courses/{}/{}/forum/stats',
'/courses/{}/{}/users',
'/courses/{}/{}/graders',
'/courses/{}/{}/sections',
'/courses/{}/{}/student_photos',
'/courses/{}/{}/late_days',
'/courses/{}/{}/extensions',
'/courses/{}/{}/grade_override',
'/courses/{}/{}/plagiarism',
'/courses/{}/{}/plagiarism/configuration/new',
'/courses/{}/{}/reports',
'/courses/{}/{}/late_table',
'/courses/{}/{}/grades',
'/courses/{}/{}/polls',
'/courses/{}/{}/polls/newPoll',
]
urls_formatted = []
baseline_path = ''
def test_w3_validator(self):
# Uncomment this to generate a new baseline for all pages on the website
# Then run 'python3 -m unittest e2e.test_accessibility' from inside the tests folder
# self.genBaseline()
self.validatePages()
# Any code that should be run before checking for accessibility
def setUp(self):
super().setUp()
self.baseline_path = f'{os.path.dirname(os.path.realpath(__file__))}/accessibility_baseline.json'
self.urls_formatted = [url.format(self.semester, 'sample') for url in self.urls]
# Enables the office hours queue
enableQueue(self)
def validatePages(self):
self.log_out()
self.log_in(user_id='instructor')
self.click_class('sample')
with open(self.baseline_path, encoding="utf8") as f:
baseline = json.load(f)
self.maxDiff = None
for url_index, url in enumerate(self.urls_formatted):
with self.subTest(url=url):
foundErrors = []
foundErrorMessages = []
self.get(url=url)
with tempfile.NamedTemporaryFile(mode='w+', suffix='.html') as tmp:
tmp.write("<!DOCTYPE html>\n")
tmp.write(self.driver.page_source)
error_json = subprocess.check_output(["java", "-jar", "/usr/bin/vnu.jar", "--exit-zero-always", "--format", "json", tmp.name], stderr=subprocess.STDOUT)
for error in json.loads(error_json)['messages']:
# For some reason the test fails to detect this even though when you actually look at the rendered
# pages this error is not there. So therefore the test is set to just ignore this error.
skip_messages = [
"Start tag seen without seeding a doctype first",
"Possible misuse of “aria-label”",
"The “date” input type is not supported in all browsers."
]
skip_error = False
for skip_msg in skip_messages:
if error['message'].startswith(skip_msg):
skip_error = True
break
if skip_error:
continue
if error['message'] not in baseline[self.urls[url_index]] and error['message'] not in foundErrorMessages:
foundErrorMessages.append(error['message'])
clean_error = {
"error": error['message'].replace('\u201c', "'").replace('\u201d', "'").strip(),
"html extract": error['extract'].strip(),
"type": error['type'].strip()
}
foundErrors.append(clean_error)
msg = f"\n{json.dumps(foundErrors, indent=4, sort_keys=True)}\nMore info can be found by using the w3 html validator. You can read more about it on submitty.org:\nhttps://validator.w3.org/#validate_by_input\nhttps://submitty.org/developer/interface_design_style_guide/web_accessibility#html-css-and-javascript"
self.assertFalse(foundErrors != [], msg=msg)
def genBaseline(self):
self.log_out()
self.log_in(user_id='instructor')
self.click_class('sample')
baseline = {}
for url_index, url in enumerate(self.urls_formatted):
self.get(url=url)
with tempfile.NamedTemporaryFile(mode='w+', suffix='.html') as tmp:
tmp.write("<!DOCTYPE html>\n")
tmp.write(self.driver.page_source)
error_json = subprocess.check_output(["java", "-jar", "/usr/bin/vnu.jar", "--exit-zero-always", "--format", "json", tmp.name], stderr=subprocess.STDOUT)
baseline[self.urls[url_index]] = []
for error in json.loads(error_json)['messages']:
# For some reason the test fails to detect this even though when you actually look at the rendered
# pages this error is not there. So therefore the test is set to just ignore this error.
if error['message'].startswith("Start tag seen without seeing a doctype first"):
continue
if error['message'].startswith("Possible misuse of “aria-label”"):
continue
if error['message'] not in baseline[self.urls[url_index]]:
baseline[self.urls[url_index]].append(error['message'])
with open(self.baseline_path, 'w') as file:
json.dump(baseline, file, ensure_ascii=False, indent=4)
``` |
{
"source": "joshas/ExplorerProperties",
"score": 2
} |
#### File: ExplorerProperties/explorer_properties/__init__.py
```python
from fman import DirectoryPaneCommand
import ctypes
SEE_MASK_NOCLOSEPROCESS = 0x00000040
SEE_MASK_INVOKEIDLIST = 0x0000000C
class SHELLEXECUTEINFO(ctypes.Structure):
_fields_ = (
("cbSize", ctypes.wintypes.DWORD),
("fMask", ctypes.c_ulong),
("hwnd", ctypes.wintypes.HANDLE),
("lpVerb", ctypes.c_wchar_p),
("lpFile", ctypes.c_wchar_p),
("lpParameters", ctypes.c_char_p),
("lpDirectory", ctypes.c_char_p),
("nShow", ctypes.c_int),
("hInstApp", ctypes.wintypes.HINSTANCE),
("lpIDList", ctypes.c_void_p),
("lpClass", ctypes.c_char_p),
("hKeyClass", ctypes.wintypes.HKEY),
("dwHotKey", ctypes.wintypes.DWORD),
("hIconOrMonitor", ctypes.wintypes.HANDLE),
("hProcess", ctypes.wintypes.HANDLE),
)
class DisplayExplorerProperties(DirectoryPaneCommand):
def __call__(self):
file_name = self.pane.get_file_under_cursor()
if not file_name:
return
shell_execute_ex = ctypes.windll.shell32.ShellExecuteExW
shell_execute_ex.restype = ctypes.wintypes.BOOL
sei = SHELLEXECUTEINFO()
sei.cbSize = ctypes.sizeof(sei)
sei.fMask = SEE_MASK_NOCLOSEPROCESS | SEE_MASK_INVOKEIDLIST
sei.lpVerb = "properties"
sei.lpFile = file_name
sei.nShow = 1
shell_execute_ex(ctypes.byref(sei))
``` |
{
"source": "joshasgard/Disaster-Response-App",
"score": 3
} |
#### File: Disaster-Response-App/app/run.py
```python
import json
import pandas as pd
import joblib
# Import natural language took kits
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
# Other imports
from flask import Flask
from flask import render_template, request, jsonify
import plotly
from plotly.graph_objs import Bar, Scatter
from sqlalchemy import create_engine
app = Flask(__name__)
def tokenize(text):
"""Function converts raw messages into tokens, cleans the tokens and removes
stopwords.
Args:
text(str): raw message data to be classified.
Returns:
clean_tokens(list): cleaned list of tokens(words).
"""
# convert each text input into tokens
tokens = word_tokenize(text)
# initialize lemmatizer for converting tokens to root
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
# remove stopwords
clean_tokens = [x for x in clean_tokens if x not in stopwords.words('english')]
return clean_tokens
# load data
engine = create_engine('sqlite:///../data/DisasterResponse.db')
df = pd.read_sql_table('DisasterResponse', engine)
metrics = pd.read_csv("../data/model_metrics.csv")
# load model
model = joblib.load("../models/classifier.pkl")
# index webpage displays cool visuals and receives user input text for model
@app.route('/')
@app.route('/index')
def index():
# Figure 1: data showing Number of Negative-Positive class per category - top 5
genre_per_category = df.iloc[:,3:].groupby('genre').sum().T
top_category = genre_per_category.sum(axis=1).sort_values(ascending=False).reset_index()
top_category.columns = ['categories', 'true_proportion -1']
top_category['false_proportion -0'] = df.shape[0] - top_category['true_proportion -1']
top_category['categories'] = top_category['categories'].apply(lambda x: str(x).replace('_', ' '))
top_classes = top_category.head(5)
# Figure 2: a scatter plot of performance metrics for all categories
# Figure 3: data visuals
genre_counts = df.groupby('genre').count()['message']
genre_names = list(genre_counts.index)
# Create visuals
graphs = [
{
'data': [
Bar(
name = 'Positive',
y=top_classes['categories'],
x=top_classes['true_proportion -1'],
orientation = 'h',
marker=dict(
color='rgba(246, 78, 139, 0.6)',
line=dict(color='rgba(246, 78, 139, 1.0)', width=3))
),
Bar(
name = 'Negative',
y=top_classes['categories'],
x=top_classes['false_proportion -0'],
orientation = 'h',
marker=dict(
color='rgba(58, 71, 80, 0.6)',
line=dict(color='rgba(58, 71, 80, 1.0)', width=3)
)
)
],
'layout':{
'barmode' : 'stack',
'title': 'Number of Negative-Positive class per category (top 5)',
"xaxis": {
'title': 'Number of messages'
},
"yaxis": {
'title': 'Categories',
'title_standoff' : 40,
'tickangle' : 45
},
}
},
{
'data':[
Scatter(
name = 'Precision',
x = metrics['Target Category'],
y = metrics['PRECISION'],
mode = 'lines'
),
Scatter(
name = 'Recall',
x = metrics['Target Category'],
y = metrics['RECALL'],
mode = 'lines'
),
Scatter(
name = 'F1 Score',
x = metrics['Target Category'],
y = metrics['F1'],
mode = 'lines'
)
],
'layout':{
'title': 'AdaBoost Model Performance Metrics',
"xaxis":{
'title': 'Categories',
'title_standoff': 100,
'tickangle': 45
},
"yaxis":{
'title': ""
}
}
},
{
'data': [
Bar(
x=genre_names,
y=genre_counts,
marker=dict(
color='rgba(174, 132, 255, 0.99)',
line=dict(color='rgba(174, 132, 255, 0.99)', width=3))
)
],
'layout': {
'title': 'Distribution of Message Genres',
'yaxis': {
'title': "Count"
},
'xaxis': {
'title': "Genre"
},
'template': "seaborn"
}
},
]
# encode plotly graphs in JSON
ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)]
graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)
# render web page with plotly graphs
return render_template('master.html', ids=ids, graphJSON=graphJSON)
# web page that handles user query and displays model results
@app.route('/go')
def go():
# save user input in query
query = request.args.get('query', '')
# use model to predict classification for query
classification_labels = model.predict([query])[0]
classification_results = dict(zip(df.columns[4:], classification_labels))
# This will render the go.html Please see that file.
return render_template(
'go.html',
query=query,
classification_result=classification_results)
# web page displays training data visualizations in greater detail
@app.route('/databoard')
def databoard():
# Group data by message genres
genre_per_category = df.iloc[:,3:].groupby('genre').sum().T
# Figure 1: Message count in each class per genre - Filters for categories with greater than
# 10% (0.1) true values.
class_per_genre = genre_per_category[genre_per_category.sum(axis=1)/df.shape[0]>0.1].reset_index()
class_per_genre.columns = ['categories', 'direct', 'news', 'social']
# Figure 2: A scatter plot of performance metrics for all categories
# Figure 3: Data showing Number of Negative-Positive class per category - top 5
top_category = genre_per_category.sum(axis=1).sort_values(ascending=False).reset_index()
top_category.columns = ['categories', 'true_proportion -1']
top_category['false_proportion -0'] = df.shape[0] - top_category['true_proportion -1']
top_category['categories'] = top_category['categories'].apply(lambda x: str(x).replace('_', ' '))
top_classes = top_category.head(5)
# Figure 4: Data visuals
genre_counts = df.groupby('genre').count()['message']
genre_names = list(genre_counts.index)
# Create visuals
graphs = [
{
'data': [
Bar(
name = 'direct',
x=class_per_genre['categories'],
y=class_per_genre['direct'],
marker=dict(
color='rgba(246, 78, 139, 0.6)',
line=dict(color='rgba(246, 78, 139, 1.0)', width=3))
),
Bar(
name = 'news',
x=class_per_genre['categories'],
y=class_per_genre['news'],
marker=dict(
color='rgba(58, 71, 80, 0.6)',
line=dict(color='rgba(58, 71, 80, 1.0)', width=3))
),
Bar(
name = 'social',
x=class_per_genre['categories'],
y=class_per_genre['social'],
marker=dict(
color='rgba(174, 132, 255, 0.99)',
line=dict(color='rgba(0,153,153,0.2)', width=3)
)
)
],
'layout':{
#'barmode' : 'stack',
'title': 'Message count per class per genre',
"yaxis": {
'title': 'Number of messages'
},
"xaxis": {
'title': 'Categories',
'title_standoff' : 40,
'tickangle' : 45
},
}
},
{
'data':[
Scatter(
name = 'Precision',
x = metrics['Target Category'],
y = metrics['PRECISION'],
mode = 'lines'
),
Scatter(
name = 'Recall',
x = metrics['Target Category'],
y = metrics['RECALL'],
mode = 'lines'
),
Scatter(
name = 'F1 Score',
x = metrics['Target Category'],
y = metrics['F1'],
mode = 'lines'
)
],
'layout':{
'title': 'AdaBoost Model Performance Metrics',
"xaxis":{
'title': 'Categories',
'title_standoff': 100,
'tickangle': 45
},
"yaxis":{
'title': ""
}
}
},
{
'data': [
Bar(
name = 'Positive',
y=top_classes['categories'],
x=top_classes['true_proportion -1'],
orientation = 'h',
marker=dict(
color='rgba(246, 78, 139, 0.6)',
line=dict(color='rgba(246, 78, 139, 1.0)', width=3))
),
Bar(
name = 'Negative',
y=top_classes['categories'],
x=top_classes['false_proportion -0'],
orientation = 'h',
marker=dict(
color='rgba(58, 71, 80, 0.6)',
line=dict(color='rgba(58, 71, 80, 1.0)', width=3))
)
],
'layout':{
'barmode' : 'stack',
'title': 'Number of Negative-Positive class per category (top 5)',
"xaxis": {
'title': 'Number of messages'
},
"yaxis": {
'title': 'Categories',
'title_standoff' : 40,
'tickangle' : 45
},
# 'template': "seaborn"
}
},
{
'data': [
Bar(
x=genre_names,
y=genre_counts,
marker=dict(
color='rgba(174, 132, 255, 0.99)',
line=dict(color='rgba(174, 132, 255, 0.99)', width=3))
)
],
'layout': {
'title': 'Distribution of Message Genres',
'yaxis': {
'title': "Count"
},
'xaxis': {
'title': "Genre"
},
'template': "seaborn"
}
},
]
# encode plotly graphs in JSON
ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)]
graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)
return render_template('databoard.html', ids=ids, graphJSON=graphJSON )
def main():
app.run(host='0.0.0.0', port=3001, debug=True)
if __name__ == '__main__':
main()
``` |
{
"source": "joshasgard/Diversity_RepresentationInTech",
"score": 4
} |
#### File: joshasgard/Diversity_RepresentationInTech/Diversity_RepresentationInTech.py
```python
import pandas as pd
from collections import defaultdict
import numpy as np
def total_count(df, col1, col2, look_for):
'''
INPUT:
df - the pandas dataframe you want to search
col1 - the column name you want to look through
col2 - the column you want to count values from
look_for - a list of strings you want to search for in each row of df[col]
OUTPUT:
new_df - a dataframe of each look_for with the count of how often it shows up
'''
new_df = defaultdict(int)
#loop through list of ed types
for val in look_for:
#loop through rows
for idx in range(df.shape[0]):
#if the ed type is in the row add 1
if val in df[col1][idx]:
new_df[val] += int(df[col2][idx])
new_df = pd.DataFrame(pd.Series(new_df)).reset_index()
new_df.columns = [col1, col2]
new_df.sort_values('count', ascending=False, inplace=True)
return new_df
def clean_race(race_item):
possible_race = ['White or of European descent', 'South Asian', 'Hispanic or Latino/Latina',
'East Asian', 'Middle Eastern','Native American, Pacific Islander, or Indigenous Australian',
'I prefer not to say','Black or of African descent','I don\'t know']
'''
INPUT
race_item - line item of the dataframe column
OUTPUT
val - cleaned and streamlined data
'''
race_item = str(race_item)
for val in possible_race:
if val in race_item:
return val
else:
pass
``` |
{
"source": "joshas/PackFiles",
"score": 3
} |
#### File: PackFiles/pack_files/async_tar.py
```python
import os
import tarfile
import threading
class AsyncTar(threading.Thread):
def __init__(self, selected_files, archive_path, mode, callback):
threading.Thread.__init__(self)
self.selected_files = selected_files
self.archive_path = archive_path
self.mode = mode
self.callback = callback
def run(self):
tar = tarfile.open(self.archive_path, self.mode)
for file in self.selected_files:
tar.add(file, os.path.basename(file))
tar.close()
self.callback()
``` |
{
"source": "joshatron/DasherAssistant",
"score": 3
} |
#### File: DasherAssistant/assistant/Dash.py
```python
from assistant.Delivery import Delivery
from datetime import datetime
"""
Contains data for a single dash. Fields included are:
start: timestamp of when it began
end: timestamp of when it ended
region: the region the dash was worked in
total: total pay for the dash
additional: additional pay for the dash
deliveries: array of deliveries made in the dash
"""
class Dash:
def __init__(self, start, end, region, total, additional, deliveries):
self.start = start
self.end = end
self.region = region
self.total = total
self.additional = additional
self.deliveries = deliveries
def __init__(self, start, end, region, total, additional):
self.start = start
self.end = end
self.region = region
self.total = total
self.additional = additional
self.deliveries = []
def addDelivery(self, restaurant, pay):
self.deliveries.append(Delivery(restaurant, pay))
def print(self):
print("start: " + self.start.strftime("%m/%d/%y %H:%M"))
print("end: " + self.end.strftime("%m/%d/%y %H:%M"))
print("region: " + self.region)
print("total pay: " + "${:,.2f}".format(self.total / 100.))
print("additional pay: " + "${:,.2f}".format(self.additional / 100.))
print("deliveries:")
length = 0
for d in self.deliveries:
if(len(d.restaurant) > length):
length = len(d.restaurant)
for d in self.deliveries:
print(("{:" + str(length + 2) + "}").format(d.restaurant) + "${:,.2f}".format(d.pay / 100.))
```
#### File: DasherAssistant/assistant/Import.py
```python
import json
import readline
from datetime import datetime
from assistant.Dash import Dash
from assistant.Dashes import Dashes
from assistant.Completer import Completer
'''
Imports dash data from a json file
'''
def importJSON(file):
jsonString = open(file).read()
data = json.loads(jsonString)
dashes = Dashes()
for dash in data:
d = Dash(datetime.strptime(dash["start"], "%m/%d/%y %H:%M"), datetime.strptime(dash["end"], "%m/%d/%y %H:%M"), dash["region"], dash["total"], dash["additional"])
for delivery in dash["deliveries"]:
d.addDelivery(delivery["restaurant"], delivery["pay"])
dashes.addDash(d)
return dashes
'''
Imports dash data from user input
'''
def importManual(restaurants, regions):
regionComp = Completer(regions)
restaurantComp = Completer(restaurants)
readline.parse_and_bind("tab: complete")
readline.set_completer_delims('')
start = 0
while True:
startStr = input("What is the start date and time (MM/DD/YY HH:MM)? ")
try:
start = datetime.strptime(startStr, "%m/%d/%y %H:%M")
break;
except:
print("Invalid date. Please use specified format")
end = 0
while True:
endStr = input("What is the end date and time (HH:MM or MM/DD/YY HH:MM)? ")
try:
end = datetime.strptime(str(start.month) + "/" + str(start.day) + "/" + str(start.year) + " " + endStr, "%m/%d/%Y %H:%M")
break
except:
try:
end = datetime.strptime(endStr, "%m/%d/%y %H:%M")
break
except:
print("Invalid date. Please use specified format")
readline.set_completer(regionComp.complete)
region = input("What is the region? ")
readline.set_completer(None)
total = 0
while True:
totalStr = input("What was the total pay? $")
try:
total = int(float(totalStr) * 100)
break
except:
print("Invalid input. Please enter a valid number")
additional = 0
while True:
additionalStr = input("What was the additional pay? $")
try:
additional = int(float(additionalStr) * 100)
break
except:
print("Invalid input. Please enter a valid number")
d = Dash(start, end, region, total, additional)
print("Type the restaurant name and pay for each delivery.")
print("Leave the restaurant blank to exit.")
while True:
readline.set_completer(restaurantComp.complete)
restaurant = input("What was the restaurant name? ")
readline.set_completer(None)
if(restaurant != ""):
pay = 0
while True:
payStr = input("What was the pay? $")
try:
pay = int(float(payStr) * 100)
break
except:
print("Invalid input. Please enter a valid number")
d.addDelivery(restaurant, pay)
else:
break
return d
``` |
{
"source": "joshatron/WiiRemote",
"score": 3
} |
#### File: WiiRemote/remote/StateExecutor.py
```python
from pyautogui import press, hotkey
class StateExecutor:
def buttonAPress(self):
press(' ')
def buttonBPress(self):
press('esc')
def button1Press(self):
hotkey('ctrl', 'alt', 'up')
def button2Press(self):
hotkey('ctrl', 'alt', 'down')
def buttonPlusPress(self):
press('volumeup')
def buttonMinusPress(self):
press('volumedown')
def buttonHomePress(self):
hotkey('ctrl', 'alt', 'up')
hotkey('ctrl', 'alt', 'up')
hotkey('ctrl', 'alt', 'up')
hotkey('ctrl', 'alt', 'up')
hotkey('ctrl', 'alt', 'up')
hotkey('ctrl', 'alt', 'up')
def buttonUpPress(self):
press('up')
def buttonDownPress(self):
press('down')
def buttonLeftPress(self):
press('left')
def buttonRightPress(self):
press('right')
``` |
{
"source": "joshavenue/data_science",
"score": 4
} |
#### File: joshavenue/data_science/class_3.py
```python
class Dataset:
def __init__(self, data):
self.data = data
def print_data(self, num_rows):
print(self.data[:num_rows])
nfl_dataset = Dataset(nfl_data)
nfl_dataset.print_data(5)
``` |
{
"source": "joshavenue/hardcore_challenge",
"score": 4
} |
#### File: joshavenue/hardcore_challenge/number_game_v0.1.py
```python
import random
def play_game():
try:
x = int(input('Enter your lower number : '))
y = int(input('Enter your upper number : '))
limit = int(input('How many times a player can guess? : '))
except ValueError:
print('Error within inputs.')
else:
pass
random_number = random.randint(x,y)
guess_time = []
while len(guess_time) < limit:
try:
guess = int(input('Enter your guess : '))
except ValueError:
print('{} is not a number'.format(guess))
else:
if guess == random_number:
print('CORRECT! The number is indeed {}!'.format(random_number))
break
else:
print('Try again!')
guess_time.append(guess)
else:
print('GG. It was {}'.format(random_number))
try_again = input('Do you wish to try again? Y/N : ')
if try_again.lower() == 'y':
play_game()
elif try_again.lower() == 'n':
print('GG.')
else:
print('Not sure that was a yes or not.')
play_game()
``` |
{
"source": "joshavenue/object-oriented-python-note",
"score": 3
} |
#### File: joshavenue/object-oriented-python-note/class_2.1.py
```python
def praise(self):
return "You inspire me, {}".format(self.name)
def reassurance(self):
return "Chin up, {}. You'll get it next time!".format(self.name)
def feedback(self, grade):
if grade > 50:
return self.praise()
else:
return self.reassurance()
``` |
{
"source": "joshavenue/ongoing_challenge",
"score": 4
} |
#### File: joshavenue/ongoing_challenge/test.py
```python
from datetime import datetime
day_now = datetime.now()
print('Let\'s recruit people by age')
print('Today is {} / {} / {}'.format(day_now.day,day_now.month,day_now.year))
i = 0;
num_data = [1,2,3,4,5,6,7,8,9,0]
alphabet = ['abcdefghijklmnopqrdtuvwxyz']
boolean_yes = ['y']
boolean_no = ['n']
name = []
age = []
def get_help():
print('Type \'END\' when you are done.')
print('Type \'ERASE\' when you want to choose again.')
print('Type \'HELP\' when you need help.')
print('Type \'SHOW\' when you need to check your list.')
print('Type \'EXIT\' to stop the program.')
def clean_up_name():
del name[:]
def clean_up_age():
del age[:]
def erase_All():
del name[:]
del age[:]
def show_list_END():
print('You have recruited {} people today.'.format(len(name)))
for names in name and ages in age :
print(names, ' - ',ages)
def show_list():
for names in name and ages in age :
print(names, ' - ',ages)
def warning():
if name[len(name)] in num_data:
print('Number is not allow in naming')
else:
pass
while True:
print('You have {} new recruits.'.format(len(name)))
new_name = input('NAME : ')
new_age = input('AGE :')
if new_name.lower() == 'DONE'.lower() or new_age.lower() == 'DONE'.lower():
break
elif new_name.lower() == 'CLEAN'.lower() or new_age.lower == 'CLEAN':
erase_All()
continue
elif new_name.lower() == "HELP".lower() or new_age.lower() == 'HELP'.lower():
get_help()
continue
elif new_name.lower() == 'EXIT'.lower():
raise SystemExit
elif new_name.lower() == 'SHOW'.lower() or new_age.lower() == 'SHOW'.lower():
show_list()
continue
else:
name.append(new_name)
age.append(new_age)
show_list_END()
``` |
{
"source": "joshavenue/python_notebook",
"score": 3
} |
#### File: joshavenue/python_notebook/global.py
```python
count = 0 # A global count variable
def remember():
global count
count += 1 # Count this invocation
print(str(count))
remember()
remember()
remember()
remember()
remember()
```
#### File: joshavenue/python_notebook/make_upper_case.py
```python
def sillycase(string):
half = round(len(string)/2) // Find the half index
return string[:half].lower() + string[half:].upper() // If you only want certain letters to be upper case //
```
#### File: python_notebook/notebook2/lambda_1.py
```python
def polynomial(x):
return x**2 + 5*x + 4
print(polynomial(-4))
#lambda
print((lambda x: x**2 + 5*x + 4) (-4))
print((lambda y: y*2 + 5) (5))
def linear(y):
return y*2 + 5
print(linear(5))
```
#### File: python_notebook/notebook2/map_lambda.py
```python
def fiveex(x):
return x**x
nums = [1,2,3,5,7,11,13,17]
results = list(map(fiveex,nums))
print(results)
## The function map takes a function and an
## iterable as arguments, and returns a new iterable
## with the function applied to each argument.
result = list(map(lambda x:x**x,nums))
print(result)
## Works for lambda style as well
```
#### File: joshavenue/python_notebook/*variable.py
```python
def num(*nums): // One * takes in any number of single data type, in this case : Int
sum = 0
for x in nums:
sum += x
return sum
sum(22,33,44,55,66) // You can type as many numbers as you wish
def whatever(**kwargs): // Double ** take more than just a type of data, in this case, there is Str and Int
print(first_name)
print(last_name)
print(age)
whatever('first_name': 'John', 'last_name': 'Lee', 'age': 22) // Create a dictionary
```
#### File: joshavenue/python_notebook/yield_next.py
```python
def gen():
yield 3 # Like return but one by one
yield 'wow'
yield -1
yield 1.2
x = gen()
print(next(x)) # Use next() function to go one by one
print(next(x))
print(next(x))
print(next(x))
```
#### File: joshavenue/python_notebook/zip_method.py
```python
def combo(a, b):
return list(zip(a, b))
``` |
{
"source": "joshavenue/TIL-python",
"score": 4
} |
#### File: joshavenue/TIL-python/def_try_except.py
```python
def add(x, y): // Define a function with 2 argument //
try: // Try to run codeblock if it can be converted from Int to Float
a = float(x)
b = float(y)
total = a + b // Add them together //
except ValueError: // If it is not a number, it will tells the system to return nothing //
return None
else:
return total // If the try statement works and input is an integer, it will return the total //
```
#### File: joshavenue/TIL-python/if_A_in_loop_continue.py
```python
def loopy(items):
# Code goes here
for item in items:
if item[0] == "a":
continue
else:
print(item)
```
#### File: joshavenue/TIL-python/random_letter_1.py
```python
import random
def random_item(x):
random_index = random.randint(0, len(x) -1)
random_item = x[random_index]
return random_item
x = 'hello everyone'
print(random_item(x))
``` |
{
"source": "Josh-Ay/tic-tac-toe",
"score": 4
} |
#### File: Josh-Ay/tic-tac-toe/main.py
```python
from random import choice
from brain import Brain
from board import Board
from clear import clear
def start_game():
print("===========================TIC-TAC-TOE GAME====================================")
brain, board = Brain(), Board()
brain.display_instructions()
brain.display_board(board.explanatory_board)
valid_input = False
user_marker, computer_marker, winner = "", "", None
while not valid_input:
user_answer = input("\nX or O? 😈: ")
if user_answer.upper() == "X":
valid_input, user_marker, computer_marker = True, "X", "O"
elif user_answer.upper() == "O":
valid_input, user_marker, computer_marker = True, "O", "X"
else:
print("Invalid input. Please enter 'x' or 'o'.")
def play():
nonlocal winner
accept_input = True
user_placement = 0
while accept_input:
try:
if len(brain.available_options) != 0:
user_placement = int(input("Where would you like to play? "))
else:
accept_input = False
except ValueError:
print("Please enter a number")
else:
if user_placement < 0:
print("Please enter a positive number")
elif user_placement == 0:
print("Please enter a value greater than 0")
elif user_placement > 9:
print("Please enter a number between 1 and 9")
else:
if len(brain.available_options) != 0:
if brain.check_available_options(user_placement):
board.place_marker_in_board(user_placement - 1, user_marker)
brain.remove_option_from_board(user_placement)
if len(brain.available_options) != 0:
computer_placement = choice(brain.available_options)
board.place_marker_in_board(computer_placement - 1, computer_marker)
brain.remove_option_from_board(computer_placement)
clear(), brain.display_board(board.starting_board)
if brain.check_for_3_in_a_row(board.starting_board)[0]:
accept_input = False
winner = brain.check_for_3_in_a_row(board.starting_board)[1]
else:
print("Sorry, that spot is already taken.")
else:
accept_input = False
play(), brain.display_winner(user_marker, winner)
play_again = input("Would you like to play again? (y/n): ")
if play_again.upper() == "Y":
clear(), start_game()
else:
print("\nThank you for playing! ")
start_game()
``` |
{
"source": "joshbaptiste/media_cache_cluster",
"score": 3
} |
#### File: joshbaptiste/media_cache_cluster/mcc.py
```python
import os
import configparser
import psutil
import scanner
def parseConfig(configFile):
""" Parses config file """
options = []
config = configparser.ConfigParser()
config.read(configFile)
if config.has_section('global'):
sections = config.sections()
log.info("Loading Sections:")
for section in sections:
value = config.get(section)
log.info("Found Section: {} = {}".format(section, val))
options.add(section, value)
else:
raise configparser.NoSectionError
return options
def checkDiskUsage(path):
try:
du = psutil.disk_usage(path)
return du
except OSError:
log.error("Unable to check disk usage of: " + path)
def checkSymLinks(config):
"""
Scans Local directory which contain symlinks
"""
log.info("Scanning directory {} for symlinks")
scandir = scanner.Scanner()
for file in scandir.scanDirectory(config.LOCAL_DIR):
stats = os.path.stat(file, follow_symlink=False):
``` |
{
"source": "joshbarrass/go-bench-graph",
"score": 3
} |
#### File: joshbarrass/go-bench-graph/graph.py
```python
import sys
import re
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
## create regex for getting data
data_regex = re.compile(r"""(\d+)\s*([\d\.]+) ns\/op$""")
ALLOWED_NAME_CHARS = "\w\d_"
ALLOWED_DIGIT_CHARS = "\d"
def format_regex(general_regex):
"""Takes a general regular expression and formats it into the three
required expressions. These are returned in the order: specific_regex,
label_regex, graph_regex
arg general_regex should contain three format tags:
{N} : This should be placed where the value of N can be found
{label} : This should be placed where the line label can be found
{graph} : (optional) This should be placed where the graph name
can be found. If ommitted, the name "default" will be used.
"""
disable_graph = False
if "{graph}" not in general_regex:
disable_graph = True
general_regex += "{graph}"
specific_regex = re.compile(general_regex.format(N="(["+ALLOWED_DIGIT_CHARS+"]+)",
label="["+ALLOWED_NAME_CHARS+"]+",
graph=("["+ALLOWED_NAME_CHARS+"]+") if not disable_graph else ""))
label_regex = re.compile(general_regex.format(N="["+ALLOWED_DIGIT_CHARS+"]+",
label="(["+ALLOWED_NAME_CHARS+"]+)",
graph=("["+ALLOWED_NAME_CHARS+"]+") if not disable_graph else ""))
graph_regex = re.compile(general_regex.format(N="["+ALLOWED_DIGIT_CHARS+"]+",
label="["+ALLOWED_NAME_CHARS+"]+",
graph=("(["+ALLOWED_NAME_CHARS+"]+)"))) if not disable_graph else None
return specific_regex, label_regex, graph_regex
if __name__ == "__main__":
from docopt import docopt
args = docopt(__doc__)
# print(args)
regex = args["<regex>"]
specific_regex, label_regex, graph_regex = format_regex(regex)
if args["--output-file"] is not None:
output_file = args["--output-file"]
else:
output_file = "graph.png"
show_only = args["--show-only"]
## data["graph"]["label"] = np.array
data = {}
## read data from stdin (so tests can be piped in)
for line in sys.stdin:
## identify the data
match = data_regex.search(line)
if match is None:
print("Ignoring non-matching line:", line)
continue
groups = match.groups()
to_add = [float(d) for d in list(groups)]
## identify the N value
match = specific_regex.search(line)
if match is None:
print("Ignoring non-matching line:", line)
continue
groups = match.groups()
N = float(groups[0])
to_add = [N]+to_add
## identify the label
match = label_regex.search(line)
if match is None:
print("Ignoring non-matching line:", line)
continue
groups = match.groups()
label = groups[0].replace("_", " ")
## identify the graph name
if graph_regex is not None:
match = graph_regex.search(line)
if match is None:
print("Ignoring non-matching line:", line)
continue
groups = match.groups()
graph = groups[0].replace("_", " ")
else:
graph = ""
## add to the data set
if graph not in data:
data[graph] = {}
if label not in data[graph]:
data[graph][label] = []
data[graph][label].append(to_add)
## make into numpy arrays
for key in data.keys():
for k, val in data[key].items():
data[key][k] = np.array(val)
## data has structure [n, iterations, time (ns)]
###################
# generate a plot #
###################
fig = plt.figure(figsize=(12*len(data), 12))
## construct a gridspec for this data
gs = gridspec.GridSpec(1,len(data))
## do the graphs in graph name order
graph_names = sorted([k for k in data.keys()], key=lambda x:x.lower())
for g in range(len(data)):
## Add next plot
ax = fig.add_subplot(gs[g])
graph_name = graph_names[g]
## Iterate through all the data for this graph
for key in data[graph_name].keys():
ax.plot(data[graph_name][key][:,0], data[graph_name][key][:,2], "o-", label=key)
ax.legend()
ax.set_title(graph_name)
ax.set_xlabel("N")
ax.set_ylabel("Time Taken, $ns$")
ax.set_xscale("log")
if show_only:
plt.show()
else:
plt.savefig(output_file)
``` |
{
"source": "joshbarrass/hptools",
"score": 3
} |
#### File: joshbarrass/hptools/anims.py
```python
import numpy as np
from scipy.spatial.transform import Rotation as Rot
from model import Vertex, Normal, Model
class Animation(object):
def __init__(self, frames, groups):
self.groups = groups
self.frames = frames
class Frame(object):
def __init__(self, index, subframes):
self.index = index
self.subframes = subframes
self.groups = len(subframes)
def apply_to_model(self, model):
"""\
Produces a new model with the rotation and translation
applied to the correct vertex groups.
"""
verts = model.verts
norms = model.norms
# transform all verts and norms
newverts = []
newnorms = []
for i in range(len(verts)):
group = verts[i].group
sf = self.subframes[group]
rot_matrix = sf.rot.as_dcm()
v_rotated = verts[i].r.dot(rot_matrix)
v_rotated += sf.trans
newvert = Vertex(v_rotated[0],
v_rotated[1],
v_rotated[2],
group)
n_rotated = norms[i].r.dot(rot_matrix)
newnorm = Normal(n_rotated[0],
n_rotated[1],
n_rotated[2],
group)
newverts.append(newvert)
newnorms.append(newnorm)
return Model(newverts, newnorms, model.faces[:], model.groups)
class Subframe(object):
def __init__(self, group, rot, trans):
self.group = group # redundant but good to store
self.rot = rot
self.trans = np.array(trans, dtype=np.float64)
class NewSubframe(Subframe):
def __init__(self, group, quat, trans, index):
"""\
Represents a subframe for a new style animation.
Takes the rotation quaternion and translation vector
in tuple or list format and converts them to rot
objects.
"""
# rearrange the quaternion to [x,y,z,w]
w, x, y, z = quat
quat_fixed = np.array([x, y, z, w])
# construct a scipy rotation object
rot = Rot.from_quat(quat_fixed)
super().__init__(group, rot, trans)
self.index = index
class OldSubframe(Subframe):
def __init__(self, group, matrix, trans):
"""\
Represents a subframe for an old style animation.
Takes the rotation matrix and translation vector
in tuple or list format and converts them to rot
objects.
"""
# construct a scipy rotation object
rot = Rot.from_matrix(quat)
super().__init__(group, rot, trans)
```
#### File: joshbarrass/hptools/img.py
```python
import struct
import os
from typing import List, Tuple, Union, Optional
import numpy as np
from PIL import Image
# sizes in bytes
FULLSCREEN_SIZE = 262144
STORY_SIZE = 123392
TEXT_SIZE = 72192
# dimensions
FULLSCREEN_DIM = (512, 256)
STORY_DIM = (512, 240)
TEXT_DIM = (448, 160)
def linear_to_image_array(pixels:List[List[int]], size:Tuple[int,int]) -> np.ndarray:
"""\
Converts a linear array ( shape=(width*height, channels) ) into an array
usable by PIL ( shape=(height, width, channels) )."""
a = np.array(pixels, dtype=np.uint8)
split = np.split(pixels, [i*size[0] for i in range(1,size[1])])
return np.array(split, dtype=np.uint8)
def bit_selector(number:int, start:int, end:int, normalise:bool=False) -> Union[int, float]:
"""\
Select bits between start and end. Bit 0 is the least significant bit."""
val = (number>>start)&((2**(end-start+1))-1)
if normalise:
return val/((2**(end-start+1))-1)
return val
def read_IMG(fp:str, filesize:Optional[int]=None) -> bytes:
"""\
Reads in the IMG file at fp, returning the contents as a bytes-type. If
filesize is non-None, a ValueError will be raised if the size in bytes
of the file does not match the value in filesize."""
if filesize is not None:
if os.path.getsize(fp) != filesize:
raise ValueError("filesize does not match")
with open(fp, "rb") as f:
data = f.read()
return data
def convert_IMG(data:bytes, size:Tuple[int,int], alpha:bool=False) -> Image.Image:
"""\
Convert a .IMG file into a PIL Image. The contents of the .IMG file
should be supplied as the bytes-type parameter 'data'. These files should
be 15-bit colour, with the first bit from left to right being the
discarded bit. This bit does change across the image, so it probably has
some purpose, but at the moment I don't know what it's for. As far as I
know, there is no metadata. Image dimensions will need to be determined
manually. The first byte from right to left is red, then green, then blue.
Set alpha=True if you want to use the discard bit as an alpha mask."""
# read in the image file
## with open(fp, "rb") as f:
## data = f.read()
# convert the data into a linear array of pixel values
pixels = [] # type: List[List[int]]
for i in range(0,len(data),2): # select two bytes at a time
number = struct.unpack("H", data[i:i+2])[0]
r = int(round(bit_selector(number, 0, 4, True)*255))
g = int(round(bit_selector(number, 5, 9, True)*255))
b = int(round(bit_selector(number, 10, 14, True)*255))
pixel = [r,g,b]
if alpha:
pixel.append(int(bit_selector(number, 14, 15)*255))
pixels.append(pixel)
a = linear_to_image_array(pixels, size)
img_type = "RGBA" if alpha else "RGB"
return Image.fromarray(a, img_type)
def convert_palette_IMG(data:bytes, size:Tuple[int,int], second_palette:bool=False) -> Image.Image:
"""\
Convert a .IMG file that utilises a palette into a PIL Image. The first
512 bytes are 15-bit colours (the palette). After that, every value is
an 8-bit value that maps to the palette.
TODO: Second palette specifies whether the image has a second palette
available at the end of the file. This is used in things like the sky
images, as an "animation" palette for the lightning strike. Enabling
this will use the second palette instead of the primary palette."""
# read in the palette
palette = [] # type: List[int]
for i in range(0, 512, 2):
number = struct.unpack("H", data[i:i+2])[0]
r = int(round(bit_selector(number, 0, 4, True)*255))
g = int(round(bit_selector(number, 5, 9, True)*255))
b = int(round(bit_selector(number, 10, 14, True)*255))
colour = [r,g,b]
palette += colour # the palette has to be flat, i.e. [r,g,b,r,g,b...]
# read in the pixels
pixels = []
for i in range(512, len(data), 1):
number = struct.unpack("B", data[i:i+1])[0]
pixels.append(number)
a = linear_to_image_array(pixels, size)
img_type = "P"
im = Image.fromarray(a, img_type)
im.putpalette(palette)
return im
def convert_fullscreen(fp:str, alpha:bool=False) -> Image.Image:
"""\
Converts an image that should occupy the whole screen. These have a file size
of 262144 bytes and dimensions of 512x256.
"""
return convert_IMG(read_IMG(fp, FULLSCREEN_SIZE), FULLSCREEN_DIM, alpha)
def convert_LOAD(fp:str, alpha:bool=False) -> Image.Image:
"""\
Converts a loading screen image (LOADxx.IMG). This is an alias for
convert_fullscreen.
"""
return convert_fullscreen(fp, alpha)
def convert_STORY(fp:str) -> Image.Image:
"""\
Converts a story image (STORYxxx.IMG).
"""
return convert_palette_IMG(read_IMG(fp, STORY_SIZE), STORY_DIM)
def convert_TEXT(fp:str) -> Image.Image:
"""\
Converts a title text image (XX_TEXT.IMG).
"""
return convert_palette_IMG(read_IMG(fp, TEXT_SIZE), TEXT_DIM)
def convert_to_IMG(im:Image.Image, fp:str):
"""\
Converts a PIL image into a non-palette IMG file.
"""
# convert the IMG to RGB
im = im.convert("RGB")
# convert to numpy array
a = np.array(im)
# scale each channel to 5-bit
scaled = np.round((a/255)*31).astype(np.uint8)
# convert to raw bytes and write to file
raw_data = b""
for y in range(scaled.shape[0]):
for x in range(scaled.shape[1]):
n = scaled[y,x,2]
n = n << 5
n += scaled[y,x,1]
n = n << 5
n += scaled[y,x,0]
raw_data += struct.pack("H", n)
with open(fp, "wb") as f:
f.write(raw_data)
def convert_to_palette_IMG(im:Image.Image, fp:str):
"""\
Converts a PIL image into a palette IMG file.
"""
# convert the IMG to a palette
im = im.convert("P", palette=Image.ADAPTIVE, colors=255)
palette_bytes = im.getpalette()
palette = [(palette_bytes[i], palette_bytes[i+1], palette_bytes[i+2]) for i in range(0, len(palette_bytes), 3)]
HP_palette = palette[::-1]
# write the palette to the raw_data
raw_data = b""
for colour in HP_palette:
n = int(round((colour[2]/255)*31))
n = n << 5
n += int(round((colour[1]/255)*31))
n = n << 5
n += int(round((colour[0]/255)*31))
raw_data += struct.pack("H", n)
assert len(raw_data) == 512
# write the image to the raw_data
pix = im.load()
for y in range(im.size[1]):
for x in range(im.size[0]):
raw_data += struct.pack("B", HP_palette.index(palette[pix[x,y]]))
with open(fp, "wb") as f:
f.write(raw_data)
``` |
{
"source": "joshbarrass/IFailedTheClassTest",
"score": 3
} |
#### File: joshbarrass/IFailedTheClassTest/CelledImage.py
```python
from typing import Tuple, Union
from PIL import Image
class CelledImage:
def __init__(
self, fp: Union[str, Image.Image], cells: Tuple[int, int]
):
if isinstance(fp, str):
im = Image.open(fp)
self.im = im.copy()
im.close()
elif isinstance(fp, Image.Image):
self.im = fp.copy()
else:
raise TypeError
self.cells = cells
size = self.im.size
self.w = int(round(size[0] / cells[0]))
self.h = int(round(size[1] / cells[1]))
def _get_box(self, x, y):
if x >= self.cells[0]:
raise IndexError("index out of range")
while x < 0:
x += self.cells[0]
if y >= self.cells[1]:
raise IndexError("index out of range")
while y < 0:
y += self.cells[1]
u = self.w * x
u_prime = min(self.im.size[0] - 1, u + self.w)
v = self.h * y
v_prime = min(self.im.size[1] - 1, v + self.h)
return (u, v, u_prime, v_prime)
def __getitem__(self, xy: Tuple[int, int]):
assert len(xy) == 2
x, y = xy
box = self._get_box(x, y)
return self.im.crop(box)
def __setitem__(self, xy: Tuple[int, int], cell: Image.Image):
assert len(xy) == 2
x, y = xy
box = self._get_box(x, y)
sx = box[2] - box[0]
sy = box[3] - box[1]
self.im.paste(cell.resize((sx, sy), Image.ANTIALIAS), box)
def show(self, title=None, command=None):
self.im.show(title=title, command=command)
def save(self, fp, format=None, **params):
self.im.save(fp=fp, format=format, **params)
def open(
fp: Union[str, Image.Image], cells: Tuple[int, int]
) -> CelledImage:
"""Open an in-memory copy of an image, closing the original"""
return CelledImage(fp, cells)
def new(mode, size, cells, color=0):
newim = Image.new(mode, size, color)
return CelledImage(newim, cells)
```
#### File: joshbarrass/IFailedTheClassTest/rearranger.py
```python
import copy
import os
import random
import tempfile
import zipfile
import CelledImage
from mutations import MUTATIONS
PATTERN_FILE = "pattern.txt"
IMAGE_NAME = "image"
class NotEnoughLettersError(Exception):
pass
class Rearranger:
"""Class for handling the automated rearranging of letters in an image."""
def __init__(self, fp, pattern=None):
self.im = None
self.pattern = None
_, ext = os.path.splitext(fp)
if ext.lower() in [".zip", ".fuck"]:
# "custom" format that packages everything in one
self.load_zip(fp)
else:
if pattern is None:
raise ValueError(
"Can only use 'None' pattern if file contains a pattern"
)
self.load_other(fp, pattern)
def load_zip(self, fp):
"""Load an image and pattern from a zip file"""
with tempfile.TemporaryDirectory() as tmpdir:
with zipfile.ZipFile(fp, "r") as z:
files = z.namelist()
if not PATTERN_FILE in files:
raise TypeError("file is missing pattern")
image_ext = None
for f in files:
n, x = os.path.splitext(f)
if n == IMAGE_NAME:
image_ext = x
break
if image_ext is None:
raise TypeError("file is missing image")
extracted_pattern = z.extract(PATTERN_FILE, tmpdir)
extracted_image = z.extract(
IMAGE_NAME + image_ext, tmpdir
)
with open(extracted_pattern) as f:
self.pattern = f.read()
self.im = CelledImage.open(
extracted_image, self.pattern_size
)
self.process_pattern()
def load_other(self, fp, pattern):
"""Load and store an image and pattern separately."""
self.pattern = pattern
self.im = CelledImage.open(fp, self.pattern_size)
self.process_pattern()
@property
def pattern_size(self):
lines = self.pattern.splitlines()
return (max([len(line) for line in lines]), len(lines))
def process_pattern(self):
"""Turn the pattern into a letter dictionary. Stored as self.patterndict"""
# ensure lines are all the same length
lines = pad_lines(self.pattern)
self.patterndict = {}
for y in range(len(lines)):
for x in range(len(lines[0])):
char = lines[y][x]
if char not in self.patterndict:
self.patterndict[char] = []
self.patterndict[char].append((x, y))
def save_pattern(self, fp, image_ext=".png"):
"""Save an image together with it's pattern in a zip file"""
name, ext = os.path.splitext(fp)
if ext == "":
ext = ".fuck"
with tempfile.TemporaryDirectory() as tmpdir:
self.im.save(os.path.join(tmpdir, IMAGE_NAME + image_ext))
with open(os.path.join(tmpdir, PATTERN_FILE), "w") as f:
f.write(self.pattern)
with zipfile.ZipFile(name + ext, "w") as z:
z.write(
os.path.join(tmpdir, IMAGE_NAME + image_ext),
IMAGE_NAME + image_ext
)
z.write(
os.path.join(tmpdir, PATTERN_FILE), PATTERN_FILE
)
def rearrange(
self,
target,
allow_mutations=True,
allow_S_to_Z=False,
allow_duplicates=False,
space=None,
unlimited_spaces=True
):
"""Rearrange the letters into the desired pattern.
If allow_mutations is True, letters can be mutated to form missing letters.
If allow_S_to_Z is True, the letter S can be mutated into a Z and vice versa.
If allow_duplicates is True, letters can be reused. Implies unlimited_spaces.
If space is set to a coordinate (x,y), this coordinate will be used for
all spaces. Implies unlimited_spaces.
If unlimited_spaces is True, an unlimited number of spaces can be used.
"""
if isinstance(space, tuple):
unlimited_spaces = True
else:
space = None
if allow_duplicates:
unlimited_spaces = True
patterndict = copy.deepcopy(self.patterndict)
lines = pad_lines(target)
cells = (len(lines[0]), len(lines))
newim = CelledImage.new(
self.im.im.mode,
(self.im.w * cells[0], self.im.h * cells[1]), cells
)
for y in range(len(lines)):
for x in range(len(lines[0])):
char = lines[y][x]
mutator = None
# make sure the character is available
if char not in patterndict or len(
patterndict[char]
) == 0:
# check if the opposite case is available
if char.swapcase() in patterndict and len(
patterndict[char.swapcase()]
) > 0:
char = char.swapcase()
else:
char, mutator = self.mutate_letter(
char,
patterndict,
allow_S_to_Z=allow_S_to_Z,
)
if mutator is None:
raise NotEnoughLettersError(
"No {} remaining.".format(repr(char))
)
if char == " " and space is not None:
index = space
else:
index = random.choice(patterndict[char])
if (char == " " and not unlimited_spaces
) or (char != " " and not allow_duplicates):
patterndict[char].remove(index)
newcell = self.im[index[0], index[1]]
if mutator is not None:
newcell = mutator(newcell)
newim[x, y] = newcell
return newim
def mutate_letter(
self, target, patterndict, allow_S_to_Z=False, _first=True
):
"""If a letter is unavailable, attempt to produce it through a mutation
If a mutation can be done, will return the character and a mutation
function. If no mutation is available, will return (target, None).
Mutation functions will take an image in and return a new image.
"""
if target not in MUTATIONS:
if target.swapcase() in MUTATIONS:
target = target.swapcase()
else:
return (target, None)
for required in MUTATIONS[target].keys():
if (required == "S" or required == "s") and (
target == "Z" or target == "z"
) and not allow_S_to_Z:
continue
if required in patterndict and len(
patterndict[required]
) > 0:
return (required, MUTATIONS[target][required])
if _first:
return self.mutate_letter(
target.swapcase(),
patterndict,
allow_S_to_Z=allow_S_to_Z,
_first=False
)
return (target, None)
def pad_lines(string):
lines = string.splitlines()
required_length = max([len(line) for line in lines])
for i, line in enumerate(lines):
while len(lines[i]) < required_length:
lines[i] += " "
return lines
``` |
{
"source": "joshbarrass/projection",
"score": 3
} |
#### File: joshbarrass/projection/screen.py
```python
import math
try:
import numpy as np
have_numpy = True
except ImportError:
have_numpy = False
class Screen(object):
def __init__(self, size, fov, centre=(0,0)):
self.size = size
self.centre = centre
self.set_fov(fov)
def set_fov(self, fov):
self.fov = fov
self.fov_rad = math.radians(fov)
width = self.size[0]
self.z = width/(2*math.tan(self.fov_rad/2))
def transform_coords(self, coords, add=True):
"""coord can be numpy array or any other iterable containing
3D coordinates
z coordinate is fairly meaningless after projecting as it should
always equal Screen.z. If z == 0 after projecting, then the original
z coordinate was behind the screen."""
isarray = isinstance(coords, np.ndarray)
if isarray:
if len(coords.shape) != 2 or coords.shape[1] != 3:
raise TypeError("coords must contain 3D coordinates")
else:
if len(coords[0]) != 3:
raise TypeError("coords must contain 3D coordiantes")
if isarray:
cc = [self.centre[0], self.centre[1], 0] # centre correction
zeros = np.zeros(coords.shape)
coords = coords.astype("float64")
if add:
coords[:,2] += self.z
mask = coords[:,2]>0
trans = coords[mask]
zeros[mask] = (trans-cc)*(self.z/trans[:,2])[:,None]+cc
return zeros
# else:
for i in range(len(coords)):
if add:
coords[i][2] += self.z
if coords[i][2] <= 0:
coords[i] = [inf, inf, inf]
return [((c[0]-self.centre[0])*(self.z/c[2])+self.centre[0],
(c[1]-self.centre[1])*(self.z/c[2])+self.centre[1], self.z) if c[2] > 0 else (0,0,0) for c in coords]
``` |
{
"source": "joshbarrass/spyctrum",
"score": 2
} |
#### File: spyctrum/audio/fixes.py
```python
def fix_RIFF_chunk_sizes(data):
"""Need to manually fix RIFF chunk size when using output piped from
ffmpeg. See https://github.com/kkroening/ffmpeg-python/issues/118
"""
riff_chunk_size = len(data) - 8
quotient = riff_chunk_size
binarray = list()
for _ in range(4):
quotient, remainder = divmod(quotient, 256) # every 8 bits
binarray.append(remainder)
riff = data[:4] + bytes(binarray) + data[8:]
return riff
```
#### File: spyctrum/audio/object.py
```python
import os
import numpy as np
from scipy.io import wavfile
from spyctrum.audio.reading import memread, tempread
from spyctrum.audio.fourier import get_chunk, ALIGN_CENTRAL
READ_MEMORY = 0
READ_TEMPFILE = 1
class Audio(object):
def __init__(self, fp, method=READ_MEMORY):
"""wrapper for audio data to simplify storing and tracking
# Args:
- fp: string, path to audio file
- method: int, either READ_MEMORY or READ_TEMPFILE. Specifies how the file
will be read.
"""
self.fp = os.path.abspath(os.path.expanduser(fp))
if os.path.splitext(self.fp)[1].lower() == "wav":
# don't need ffmpeg to read .wav, just use scipy directly
self.rate, self.data = wavfile.read(self.fp)
elif method == READ_MEMORY:
self.rate, self.data = memread(self.fp)
elif method == READ_TEMPFILE:
self.rate, self.data = tempread(self.fp)
else:
raise ValueError("'method' should be either READ_MEMORY or READ_TEMPFILE")
# TODO: currently the fourier output is complex, but the input
# signal is real. Need to remove the complex part and the negative
# frequencies
def fourierChunk(self, timestamp, chunk_size, alignment=ALIGN_CENTRAL, mono=True):
"""Uses audio.fourier.get_chunk to get a chunk from the audio data and
perform the Fourier transform of it.
## Args:
- timestamp: float, time from the start of the audio, in seconds, to
the point of about which you wish to sample
- chunk_size: int, number of samples to include in the chunk
- alignment: int, how to align the chunk to the timestamp. Default:
ALIGN_CENTRAL
- mono: bool, whether to make the data mono before performing the FFT
## Returns
- freqdata: array, the FFT of the chunk of data. The negative
frequencies will be removed, but the coefficients will still be
complex. For an array corresponding to the frequencies in this array,
use Audio.fourierFreq
"""
chunk = get_chunk(self.data, timestamp, self.rate, chunk_size, alignment)
# add without averaging/normalising to avoid decreasing
# amplitude of signal
#
# see https://stackoverflow.com/questions/23504901/convert-16-bit-stereo-sound-to-16-bit-mono-sound#comment36048770_23505029
if mono:
chunk = np.sum(chunk, axis=1)
fftdata = np.fft.fft(chunk, axis=0)
# remove negative frequencies since audio is a solely real
# signal
N = len(fftdata)
posfftdata = fftdata[0:int((N + 1) / 2)]
return posfftdata
def fourierFreq(self, chunk_size):
"""Returns the frequency array for a given chunk size. The negative
frequencies will be removed.
## Args:
- chunk_size: int, number of samples in your chunk
## Returns:
- f: array, contains the frequencies in the FFT
"""
A = np.fft.fftfreq(chunk_size, 1.0 / self.rate)
N = len(A)
return A[0:int((N + 1) / 2)]
```
#### File: spyctrum/test/test_ffmpeg.py
```python
import unittest
import subprocess
import spyctrum.audio.ffmpeg
class FFMPEGTests(unittest.TestCase):
def test_call_normal(self):
p = spyctrum.audio.ffmpeg.call(["-h"])
self.assertTrue(isinstance(p, subprocess.Popen))
def test_call_err(self):
with self.assertRaises(TypeError):
spyctrum.audio.ffmpeg.call("test")
``` |
{
"source": "JoshBarr/python-mammoth",
"score": 3
} |
#### File: python-mammoth/mammoth/document_matchers.py
```python
import collections
def paragraph(style_name=None, numbering=None):
return ParagraphMatcher(style_name, numbering)
ParagraphMatcher = collections.namedtuple("ParagraphMatcher", ["style_name", "numbering"])
ParagraphMatcher.element_type = "paragraph"
def run(style_name=None):
return RunMatcher(style_name)
RunMatcher = collections.namedtuple("RunMatcher", ["style_name"])
RunMatcher.element_type = "run"
```
#### File: python-mammoth/mammoth/__init__.py
```python
from .results import Result
from . import docx, conversion, options
def convert_to_html(fileobj, transform_document=None, **kwargs):
if transform_document is None:
transform_document = lambda x: x
return docx.read(fileobj).map(transform_document).bind(lambda document:
conversion.convert_document_element_to_html(
document,
**options.read_options(kwargs)
)
)
```
#### File: mammoth/style_reader/html_path_reader.py
```python
from parsimonious.grammar import Grammar
from .. import html_paths
def read_html_path(string):
path_node = _grammar.parse(string)
return read_html_path_node(path_node)
def read_html_path_node(path_node):
elements = [
_read_element_node(child)
for child in _repeated_children_with_separator(path_node, has_whitespace=True)
]
return html_paths.path(elements)
def _read_element_node(node):
tag_names = _read_tag_names_node(node.children[0])
class_names = _read_class_names_node(node.children[1])
fresh = _read_fresh_node(node.children[2])
return html_paths.element(tag_names, class_names=class_names, fresh=fresh)
def _read_tag_names_node(node):
return [
child.text
for child in _repeated_children_with_separator(node, has_whitespace=False)
]
def _read_class_names_node(class_names_node):
return [
_read_class_name_node(node)
for node in class_names_node.children
]
def _read_class_name_node(node):
return node.children[1].text
def _read_fresh_node(node):
return len(node.children) > 0
def _repeated_children_with_separator(node, has_whitespace):
yield node.children[0]
if has_whitespace:
sequence_node_index = 3
else:
sequence_node_index = 1
sequence_node = node.children[1]
for child in sequence_node.children:
yield child.children[sequence_node_index]
grammar_text = r"""
html_path = element (whitespace* ">" whitespace* element)*
element = tag_names class_name* fresh?
tag_names = identifier ("|" identifier)*
class_name = "." identifier
fresh = ":fresh"
identifier = ~"[A-Z0-9]*"i
whitespace = ~"\s"*
"""
_grammar = Grammar(grammar_text)
```
#### File: tests/docx/document_xml_tests.py
```python
import io
from nose.tools import istest, assert_equal
import funk
from mammoth import documents, results
from mammoth.docx.xmlparser import element as xml_element, text as xml_text
from mammoth.docx.document_xml import read_document_xml_element
from mammoth.docx.numbering_xml import Numbering
from mammoth.docx.relationships_xml import Relationships, Relationship
@istest
class ReadXmlElementTests(object):
@istest
def text_from_text_element_is_read(self):
element = _text_element("Hello!")
assert_equal(documents.Text("Hello!"), _read_and_get_document_xml_element(element))
@istest
def can_read_text_within_run(self):
element = _run_element_with_text("Hello!")
assert_equal(
documents.run([documents.Text("Hello!")]),
_read_and_get_document_xml_element(element)
)
@istest
def can_read_text_within_paragraph(self):
element = _paragraph_element_with_text("Hello!")
assert_equal(
documents.paragraph([documents.run([documents.Text("Hello!")])]),
_read_and_get_document_xml_element(element)
)
@istest
def can_read_text_within_document(self):
element = _document_element_with_text("Hello!")
assert_equal(
documents.Document([documents.paragraph([documents.run([documents.Text("Hello!")])])]),
_read_and_get_document_xml_element(element)
)
@istest
def paragraph_has_no_style_if_it_has_no_properties(self):
element = xml_element("w:p")
assert_equal(None, _read_and_get_document_xml_element(element).style_name)
@istest
def paragraph_has_style_name_read_from_paragraph_properties_if_present(self):
style_xml = xml_element("w:pStyle", {"w:val": "Heading1"})
properties_xml = xml_element("w:pPr", {}, [style_xml])
paragraph_xml = xml_element("w:p", {}, [properties_xml])
paragraph = _read_and_get_document_xml_element(paragraph_xml)
assert_equal("Heading1", paragraph.style_name)
@istest
def paragraph_has_no_numbering_if_it_has_no_numbering_properties(self):
element = xml_element("w:p")
assert_equal(None, _read_and_get_document_xml_element(element).numbering)
@istest
def paragraph_has_numbering_properties_from_paragraph_properties_if_present(self):
numbering_properties_xml = xml_element("w:numPr", {}, [
xml_element("w:ilvl", {"w:val": "1"}),
xml_element("w:numId", {"w:val": "42"}),
])
properties_xml = xml_element("w:pPr", {}, [numbering_properties_xml])
paragraph_xml = xml_element("w:p", {}, [properties_xml])
numbering = Numbering({"42": {"1": documents.numbering_level("1", True)}})
paragraph = _read_and_get_document_xml_element(paragraph_xml, numbering=numbering)
assert_equal("1", paragraph.numbering.level_index)
assert_equal(True, paragraph.numbering.is_ordered)
@istest
def run_has_no_style_if_it_has_no_properties(self):
element = xml_element("w:r")
assert_equal(None, _read_and_get_document_xml_element(element).style_name)
@istest
def run_has_style_name_read_from_run_properties_if_present(self):
style_xml = xml_element("w:rStyle", {"w:val": "Emphasis"})
run = self._read_run_with_properties([style_xml])
assert_equal("Emphasis", run.style_name)
@istest
def run_is_not_bold_if_bold_element_is_not_present(self):
run = self._read_run_with_properties([])
assert_equal(False, run.is_bold)
@istest
def run_is_bold_if_bold_element_is_present(self):
run = self._read_run_with_properties([xml_element("w:b")])
assert_equal(True, run.is_bold)
@istest
def run_is_not_italic_if_italic_element_is_not_present(self):
run = self._read_run_with_properties([])
assert_equal(False, run.is_italic)
@istest
def run_is_italic_if_italic_element_is_present(self):
run = self._read_run_with_properties([xml_element("w:i")])
assert_equal(True, run.is_italic)
def _read_run_with_properties(self, properties):
properties_xml = xml_element("w:rPr", {}, properties)
run_xml = xml_element("w:r", {}, [properties_xml])
return _read_and_get_document_xml_element(run_xml)
@istest
def can_read_tab_element(self):
element = xml_element("w:tab")
tab = _read_and_get_document_xml_element(element)
assert_equal(documents.tab(), tab)
@istest
def children_of_w_ins_are_converted_normally(self):
element = xml_element("w:p", {}, [
xml_element("w:ins", {}, [
xml_element("w:r")
])
])
assert_equal(
documents.paragraph([documents.run([])]),
_read_and_get_document_xml_element(element)
)
@istest
def children_of_w_smart_tag_are_converted_normally(self):
element = xml_element("w:p", {}, [
xml_element("w:smartTag", {}, [
xml_element("w:r")
])
])
assert_equal(
documents.paragraph([documents.run([])]),
_read_and_get_document_xml_element(element)
)
@istest
def hyperlink_is_read_if_it_has_a_relationship_id(self):
relationships = Relationships({
"r42": Relationship(target="http://example.com")
})
run_element = xml_element("w:r")
element = xml_element("w:hyperlink", {"r:id": "r42"}, [run_element])
assert_equal(
documents.hyperlink("http://example.com", [documents.run([])]),
_read_and_get_document_xml_element(element, relationships=relationships)
)
@istest
def hyperlink_is_ignored_if_it_does_not_have_a_relationship_id(self):
run_element = xml_element("w:r")
element = xml_element("w:hyperlink", {}, [run_element])
assert_equal(
[documents.run([])],
_read_and_get_document_xml_element(element)
)
@istest
@funk.with_context
def can_read_inline_pictures(self, context):
drawing_element = _create_inline_image(
relationship_id="rId5",
description="It's a hat",
)
image_bytes = b"Not an image at all!"
relationships = Relationships({
"rId5": Relationship(target="media/hat.png")
})
docx_file = context.mock()
funk.allows(docx_file).open("word/media/hat.png").returns(io.BytesIO(image_bytes))
content_types = context.mock()
funk.allows(content_types).find_content_type("word/media/hat.png").returns("image/png")
image = _read_and_get_document_xml_element(
drawing_element,
content_types=content_types,
relationships=relationships,
docx_file=docx_file,
)[0]
assert_equal(documents.Image, type(image))
assert_equal("It's a hat", image.alt_text)
assert_equal("image/png", image.content_type)
with image.open() as image_file:
assert_equal(image_bytes, image_file.read())
@istest
@funk.with_context
def can_read_anchored_pictures(self, context):
drawing_element = _create_anchored_image(
relationship_id="rId5",
description="It's a hat",
)
image_bytes = b"Not an image at all!"
relationships = Relationships({
"rId5": Relationship(target="media/hat.png")
})
docx_file = context.mock()
funk.allows(docx_file).open("word/media/hat.png").returns(io.BytesIO(image_bytes))
content_types = context.mock()
funk.allows(content_types).find_content_type("word/media/hat.png").returns("image/png")
image = _read_and_get_document_xml_element(
drawing_element,
content_types=content_types,
relationships=relationships,
docx_file=docx_file,
)[0]
assert_equal(documents.Image, type(image))
assert_equal("It's a hat", image.alt_text)
assert_equal("image/png", image.content_type)
with image.open() as image_file:
assert_equal(image_bytes, image_file.read())
@istest
def ignored_elements_are_ignored_without_message(self):
element = xml_element("w:bookmarkStart")
result = read_document_xml_element(element)
assert_equal(None, result.value)
assert_equal([], result.messages)
@istest
def unrecognised_elements_emit_warning(self):
element = xml_element("w:huh", {}, [])
result = read_document_xml_element(element)
expected_warning = results.warning("An unrecognised element was ignored: w:huh")
assert_equal([expected_warning], result.messages)
@istest
def unrecognised_elements_are_ignored(self):
element = xml_element("w:huh", {}, [])
assert_equal(None, read_document_xml_element(element).value)
@istest
def unrecognised_children_are_ignored(self):
element = xml_element("w:r", {}, [_text_element("Hello!"), xml_element("w:huh", {}, [])])
assert_equal(
documents.run([documents.Text("Hello!")]),
read_document_xml_element(element).value
)
def _read_and_get_document_xml_element(*args, **kwargs):
result = read_document_xml_element(*args, **kwargs)
assert_equal([], result.messages)
return result.value
def _document_element_with_text(text):
return xml_element("w:document", {}, [
xml_element("w:body", {}, [_paragraph_element_with_text(text)])
])
def _paragraph_element_with_text(text):
return xml_element("w:p", {}, [_run_element_with_text(text)])
def _run_element_with_text(text):
return xml_element("w:r", {}, [_text_element(text)])
def _text_element(value):
return xml_element("w:t", {}, [xml_text(value)])
def _create_inline_image(description, relationship_id):
return xml_element("w:drawing", {}, [
xml_element("wp:inline", {}, _create_image_elements(description, relationship_id))
])
def _create_anchored_image(description, relationship_id):
return xml_element("w:drawing", {}, [
xml_element("wp:anchor", {}, _create_image_elements(description, relationship_id))
])
def _create_image_elements(description, relationship_id):
return [
xml_element("wp:docPr", {"descr": description}),
xml_element("a:graphic", {}, [
xml_element("a:graphicData", {}, [
xml_element("pic:pic", {}, [
xml_element("pic:blipFill", {}, [
xml_element("a:blip", {"r:embed": relationship_id})
])
])
])
])
]
``` |
{
"source": "joshbaskaran/xzceb-flask_eng_fr",
"score": 3
} |
#### File: machinetranslation/tests/test.py
```python
import unittest
from translator import english_to_french, french_to_english
class TestTranslator(unittest.TestCase):
def test_english_to_french_null(self):
self.assertNotEqual(english_to_french(''), None)
def test_english_to_french_equal(self):
self.assertEqual(english_to_french('Hello'), 'Bonjour')
def test_french_to_english_null(self):
self.assertNotEqual(french_to_english(''), None)
def test_french_to_english_equal(self):
self.assertEqual(french_to_english('Bonjour'), 'Hello')
if __name__ == '__main__':
unittest.main()
```
#### File: final_project/machinetranslation/translator.py
```python
import os
from ibm_watson import LanguageTranslatorV3
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from dotenv import load_dotenv
load_dotenv()
apikey = os.environ['apikey']
url = os.environ['url']
authenticator = IAMAuthenticator(apikey)
language_translator = LanguageTranslatorV3(
version='2021-08-16',
authenticator=authenticator
)
language_translator.set_service_url(url)
def english_to_french(english_text):
"""
Translate text from English to French
"""
if english_text == '':
return ''
translation = language_translator.translate(
text = english_text,
model_id='en-fr').get_result()
return translation.get('translations')[0].get('translation')
def french_to_english(french_text):
"""
Translate text from French to English
"""
if french_text == '':
return ''
translation = language_translator.translate(
text = french_text,
model_id = 'fr-en').get_result()
return translation.get('translations')[0].get('translation')
``` |
{
"source": "joshbatchelor/cartridge-stats",
"score": 2
} |
#### File: cartridge-stats/templatetags/cartridge_charts.py
```python
from __future__ import unicode_literals
import datetime
from django.db import connection
from django.db.models import Sum, Count
from django.utils.timezone import localtime, now
from datetime import timedelta
from mezzanine import template
from mezzanine.conf import settings
from cartridge.shop.models import Order, OrderItem
register = template.Library()
@register.inclusion_tag("admin/includes/sales_today.html", takes_context=True)
def sales_today(context):
"""
Admin dashboard tag for cartridge to show daily sales totals. Excluding tax and shipping.
"""
today_min = localtime(now()).replace(hour=0, minute=0, second=0, microsecond=0)
today_max = localtime(now()).replace(hour=23, minute=59, second=59, microsecond=59)
output = Order.objects.filter(time__range=(today_min, today_max),status__in=settings.SHOP_ORDER_STATUS_COMPLETE).aggregate(Sum('item_total'))
context["daily_total"] = output
context["daily_date"] = today_min
return context
@register.inclusion_tag("admin/includes/sales_yesterday.html", takes_context=True)
def sales_yesterday(context):
"""
Admin dashboard tag for cartridge to show daily sales totals. Excluding tax and shipping.
"""
yesterday_min = localtime(now() + timedelta(days=-1)).replace(hour=0, minute=0, second=0, microsecond=0)
yesterday_max = localtime(now() + timedelta(days=-1)).replace(hour=23, minute=59, second=59, microsecond=59)
output = Order.objects.filter(time__range=(yesterday_min, yesterday_max),status__in=settings.SHOP_ORDER_STATUS_COMPLETE).aggregate(Sum('item_total'))
context["yesterday_total"] = output
context["yesterday_date"] = yesterday_min
return context
@register.inclusion_tag("admin/includes/sales_past_7_days.html", takes_context=True)
def sales_past_7_days(context):
"""
Admin dashboard tag for cartridge to show daily sales totals. Excluding tax and shipping.
"""
week_min = localtime(now() + timedelta(days=-7)).replace(hour=0, minute=0, second=0, microsecond=0)
week_max = localtime(now()).replace(hour=23, minute=59, second=59, microsecond=59)
output = Order.objects.filter(time__range=(week_min, week_max),status__in=settings.SHOP_ORDER_STATUS_COMPLETE).aggregate(Sum('item_total'))
context["week_total"] = output
context["week_min"] = week_min
context["week_max"] = week_max
return context
@register.inclusion_tag("admin/includes/sales_current_month.html", takes_context=True)
def sales_current_month(context):
"""
Admin dashboard tag for cartridge to show daily sales totals. Excluding tax and shipping.
"""
today = localtime(now())
output = Order.objects.filter(time__month=today.month,status__in=settings.SHOP_ORDER_STATUS_COMPLETE).aggregate(Sum('item_total'))
context["month_total"] = output
context["today"] = today
return context
@register.inclusion_tag("admin/includes/sales_item_popularity.html", takes_context=True)
def popular_items(context):
items = OrderItem.objects.values('sku','description').filter(order__status__in=settings.SHOP_ORDER_STATUS_COMPLETE).annotate(sku_count=Count('sku')).order_by('-sku_count')
context["popular_items"] = items
return context
@register.inclusion_tag("admin/includes/sales_item_popularity_today.html", takes_context=True)
def popular_items_today(context):
today_min = localtime(now()).replace(hour=0, minute=0, second=0, microsecond=0)
today_max = localtime(now()).replace(hour=23, minute=59, second=59, microsecond=59)
items = OrderItem.objects.values('sku','description').filter(order__time__range=(today_min, today_max),order__status__in=settings.SHOP_ORDER_STATUS_COMPLETE).annotate(sku_count=Count('sku')).order_by('-sku_count')
context["popular_items"] = items
return context
@register.inclusion_tag("admin/includes/sales_by_month.html", takes_context=True)
def sales_by_month(context):
month_min = localtime(now() + timedelta(days=-365)).replace(hour=0, minute=0, second=0, microsecond=0)
truncate_date = connection.ops.date_trunc_sql('month', 'time')
qs = Order.objects.extra({'month':truncate_date}).filter(status__in=[2,4],time__gte=month_min)
reports = qs.values('month').annotate(Sum('item_total'),\
Count('pk')).order_by('month')
context["sales_by_months"] = reports
return context
``` |
{
"source": "JoshBBOXX/py-smsframework",
"score": 3
} |
#### File: smsframework/data/OutgoingMessage.py
```python
from .OutgoingMessageOptions import OutgoingMessageOptions
from ..lib import digits_only
class OutgoingMessage(object):
""" Outgoing Message: Mobile Terminated (MT)
Represents a message that's being sent or was sent to the provider
"""
#: Routing values
routing_values = None
#: Unique message id, populated by the provider on send
msgid = None
#: Provider-dependent message info dict, populated by the provider on send
meta = None
def __init__(self, dst, body, src=None, provider=None):
""" Create a message for sending
:type dst: str | None
:param dst: Destination phone number. Non-digit chars are cut off
:type body: str | unicode
:param body: Message
:type src: str | None
:param src: Source phone number. Non-digit chars are cut off
:type provider: str | None
:param provider: Provider name to use for sending.
If not specified explicitly, the message will be routed using the routing values:
see :meth:`OutgoingMessage.route`
"""
self.src = src
self.dst = digits_only(dst)
self.body = body
self.provider = provider
#: Sending options for the Gateway
self.provider_options = OutgoingMessageOptions()
#: Provider-dependent sending parameters
self.provider_params = {}
def options(self, **kwargs):
""" Specify sending options for the Gateway.
See: :class:`OutgoingMessageOptions`
:param allow_reply: Replies allowed?
:param status_report: Request a status report from the network?
:param expires: Message validity period, minutes
:param senderId: Sender ID to replace the number
:param escalate: Is a high-pri message? These are delivered faster and costier.
:rtype: OutgoingMessage
"""
self.provider_options.__dict__.update(kwargs)
return self
def params(self, **params):
""" Specify provider-specific sending parameters
:rtype: OutgoingMessage
"""
self.provider_params = params
return self
def route(self, *args):
""" Specify arbitrary routing values.
These are used by the Gateway's routing function to decide on which provider to use for the message
(if no provider was explicitly specified),
If no routing values are provided at all - the default route is used.
:rtype: OutgoingMessage
"""
self.routing_values = args
return self
def __repr__(self):
return '{cls}({dst!r}, {body!r}, src={src!r}, provider={provider!r}, msgid={msgid!r})'.format(
cls=self.__class__.__name__,
dst=self.dst,
body=self.body,
src=self.src,
provider=self.provider,
msgid=self.msgid
)
```
#### File: py-smsframework/smsframework/IProvider.py
```python
from abc import abstractmethod
class IProvider(object):
""" SmsFramework provider interface
Implements methods to interact with the :class:`smsframework.Gateway`
"""
def __init__(self, gateway, name, **config):
""" Initialize the provider
:type gateway: Gateway
:param gateway: Parent Gateway
:type name: str
:param name: Provider name. Used to uniquely identify the provider
:param config: Provider-dependent configuration
"""
self.gateway = gateway
self.name = name
def send(self, message):
""" Send a message
Providers are required to:
* Populate `message.msgid` and `message.meta` on completion
* Expect that `message.src` can be empty
* Support both ASCII and Unicode messages
* Use `message.params` for provider-dependent configuration
* Raise exceptions from `exc.py` for errors
:type message: data.OutgoingMessage
:param message: The message to send
:rtype: OutgoingMessage
:returns: The sent message with populated fields
:raises MessageSendError: sending errors
"""
raise NotImplementedError('Provider.send not implemented')
def make_receiver_blueprint(self):
""" Get a Blueprint for the HTTP receiver
:rtype: flask.Blueprint
:returns: configured Flask Blueprint receiver
:raises NotImplementedError: Provider does not support message reception
"""
raise NotImplementedError('Provider does not support message reception')
#region Receiver callbacks
def _receive_message(self, message):
""" Incoming message callback
Calls Gateway.onReceive event hook
Providers are required to:
* Cast phone numbers to digits-only
* Support both ASCII and Unicode messages
* Populate `message.msgid` and `message.meta` fields
* If this method fails with an exception, the provider is required to respond with an error to the service
:type message: IncomingMessage
:param message: The received message
:rtype: IncomingMessage
"""
# Populate fields
message.provider = self.name
# Fire the event hook
self.gateway.onReceive(message)
# Finish
return message
def _receive_status(self, status):
""" Incoming status callback
Calls Gateway.onStatus event hook
Providers are required to:
* Cast phone numbers to digits-only
* Use proper MessageStatus subclasses
* Populate `status.msgid` and `status.meta` fields
* If this method fails with an exception, the provider is required to respond with an error to the service
:type status: MessageStatus
:param status: The received status
:rtype: MessageStatus
"""
# Populate fields
status.provider = self.name
# Fire the event hook
self.gateway.onStatus(status)
# Finish
return status
#endregion
```
#### File: smsframework/lib/__init__.py
```python
import re
def digits_only(num):
""" Remove all non-digit characters from the phone number
:type num: str
:param num: Phone number
:rtype: str
"""
return re.sub(r'[^\d]+', '', num)
```
#### File: smsframework/providers/log.py
```python
import logging
from .null import NullProvider
class LogProvider(NullProvider):
""" Log Provider
Logs the outgoing messages to a python logger provided as the config option.
Configuration: target logger
Sending: does nothing, increments message.msgid, prints the message to the log
Receipt: Not implemented
Status: Not implemented
"""
def __init__(self, gateway, name, logger=None):
""" Configure provider
:type logger: logging.Logger | None
:param logger: The logger to use. Default logger is used if nothing provided
"""
super(LogProvider, self).__init__(gateway, name)
self.logger = logger or logging.getLogger(__name__)
def send(self, message):
# Log
self.logger.info('Sent SMS to {message.dst}: {message.body}'.format(message=message))
# Finish
return super(LogProvider, self).send(message)
```
#### File: py-smsframework/tests/log_test.py
```python
import unittest
from testfixtures import LogCapture
from smsframework import Gateway
from smsframework.providers import LogProvider
from smsframework import OutgoingMessage
class LoopbackProviderTest(unittest.TestCase):
""" Test LoopbackProvider """
def setUp(self):
self.gw = Gateway()
# Providers
self.gw.add_provider('main', LogProvider)
def test_basic_send(self):
with LogCapture() as l:
msg = self.gw.send(OutgoingMessage('+1234', 'body'))
l.check(
('smsframework.providers.log', 'INFO', 'Sent SMS to {}: {}'.format(msg.dst, msg.body)),
)
``` |
{
"source": "JoshBClemons/gesture_recognition",
"score": 3
} |
#### File: gesture_recognition/gesture_recognition/auth.py
```python
import pdb
from flask import g, jsonify, session
from flask_httpauth import HTTPBasicAuth, HTTPTokenAuth
from . import db
from .models import User
# Authentication objects for username/password auth, token auth, and a token optional auth that is used for open endpoints.
basic_auth = HTTPBasicAuth()
token_auth = HTTPTokenAuth('Bearer')
@basic_auth.verify_password
def verify_password(username, password):
"""Password verification callback
Args:
username (str): Client user name
password (str): Client password
Returns:
(boolean): True if successful password verification. False otherwise or if blank credentials entered.
"""
if not username or not password or username == "''" or password == "''":
return False
user = User.query.filter_by(username=username).first()
# if not a user, create the user
if user is None:
user_dict = {'username': username, 'password': password}
user = User.create(user_dict)
elif not user.verify_password(password):
return False
else:
user.new_login()
# mark user as online
user.ping()
db.session.add(user)
db.session.commit()
g.current_user = user
return True
@basic_auth.error_handler
def password_error():
"""Return a 401 error to the client
Returns:
(Response): Serialized error message
"""
return (jsonify({'error': 'authentication required'}), 401,{'WWW-Authenticate': 'Bearer realm="Authentication Required"'})
@token_auth.verify_token
def verify_token(token, add_to_session=False):
"""Token verification callback
Args:
token (str): Client token
add_to_session (boolean): Determines whether to store client information in session
Returns:
(boolean): True if successful token verification. False if user does not exist
"""
if add_to_session:
# clear the session in case auth fails
if 'username' in session:
del session['username']
user = User.query.filter_by(token=token).first()
if user is None:
return False
# mark the user as online
user.ping()
db.session.add(user)
db.session.commit()
g.current_user = user
# store username in client session
if add_to_session:
session['username'] = user.username
return True
@token_auth.error_handler
def token_error():
"""Return a 401 error to the client
Returns:
(Response): Serialized error message
"""
return (jsonify({'error': 'authentication required'}), 401, {'WWW-Authenticate': 'Bearer realm="Authentication Required"'})
```
#### File: gesture_recognition/gesture_recognition/featurizer.py
```python
from os.path import join
import numpy as np
import cv2
import pdb
import os
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator
# background parameters
history = 0
bgSubThreshold = 50
learningRate = 0 # applying learning rate partially solves problem of poor background detection
def set_background(frame):
"""Save user background image
Args:
frame (array): Raw image streamed from client
Returns:
bgModel (cv2 Background Subtractor): cv2 object containing the client's background image
"""
bgModel = cv2.createBackgroundSubtractorMOG2(history, bgSubThreshold)
return bgModel
def remove_background(frame, bgModel):
"""Subtract user background image from new image
Args:
frame (array): Raw image streamed from client
bgModel (cv2 Background Subtractor): cv2 object containing the client's background image
Returns:
res (array): Array of original image with background removed
"""
global learningRate
fgmask = bgModel.apply(frame, learningRate=learningRate)
kernel = np.ones((3, 3), np.uint8)
fgmask = cv2.erode(fgmask, kernel, iterations=1)
res = cv2.bitwise_and(frame, frame, mask=fgmask)
return res
def process(frame, bgModel):
"""Process user image by removing background and converting resultant to binary image
Args:
frame (array): Raw image streamed from client
bgModel (cv2 Background Subtractor): cv2 object containing the client's background image
Returns:
frame_processed (array): Array of processed image with shape (144, 256, 3)
frame_prediction (array): Array of processed image with shape (1, 144, 256, 3), which is necessary for prediction algorithm
"""
# subtract background if background object has been saved. Otherwise, just resize image
if bgModel != 0:
frame_nobg = remove_background(frame, bgModel)
# resize image
frame_nobg = cv2.resize(frame_nobg, dsize=(256,144), interpolation=cv2.INTER_LINEAR)
# turn non-black pixels white
frame_processed = np.zeros(frame_nobg.shape, dtype=np.uint8)
frame_processed[frame_nobg>0] = 255
else:
frame_processed = cv2.resize(frame, dsize=(256,144), interpolation=cv2.INTER_LINEAR)
# convert the image into binary image to reduce file size
frame_processed = cv2.cvtColor(frame_processed, cv2.COLOR_BGR2GRAY)
# Expand dimensions since the model expects images to have shape: [1, 144, 256, 3]
frame_prediction = frame_processed.astype(np.float)
frame_prediction = np.stack((frame_prediction,)*3, axis=-1)
frame_prediction /= 255
frame_prediction = np.expand_dims(frame_prediction, axis=0)
return [frame_processed, frame_prediction]
def rotate(frame, df_row, df_feats):
"""Generate rotated versions of image.
Args:
frame (array): Array representing original image
df_row (Pandas series): Row from frames dataframe. Series contains information about frame
df_feats (list): List containing strings "instance," "user_id," and "root_dir," which are keys for df_row]
Returns:
rotate_dict (dict): Dictionary with keys "flipped," "mirrored," and "mirrored_flipped." Dictionary values for each key contain respective rotated image path (str) and frame (array)
"""
instance = df_row[df_feats[0]]
user_id = int(df_row[df_feats[1]])
root_dir = df_row[df_feats[2]]
rotated_dir = os.path.join(root_dir, 'rotated')
rotated_dir = os.path.join(rotated_dir, str(user_id))
file_type = '.jpg'
# flip image about x-axis
frame_flipped = tf.image.rot90(frame)
frame_flipped = tf.image.rot90(frame_flipped)
frame_flipped = np.array(frame_flipped)
frame_flipped = cv2.cvtColor(frame_flipped, cv2.COLOR_BGR2GRAY)
flipped_path = os.path.join(rotated_dir, instance + '_flipped' + file_type)
datagen = ImageDataGenerator(horizontal_flip=True)
frame_ext = frame.reshape((1,) + frame.shape)
for frame_mirrored in datagen.flow(frame_ext, batch_size=1):
# mirror image about y-axis
frame_mirrored = frame_mirrored.reshape((144,256,3))
# flip and mirror image
frame_mirrored_flipped = tf.image.rot90(frame_mirrored)
frame_mirrored_flipped = tf.image.rot90(frame_mirrored_flipped)
frame_mirrored_flipped = np.array(frame_mirrored_flipped)
frame_mirrored_flipped = cv2.cvtColor(frame_mirrored_flipped, cv2.COLOR_BGR2GRAY)
mirrored_flipped_path = os.path.join(rotated_dir, instance + '_mirrored_flipped' + file_type)
frame_mirrored = cv2.cvtColor(frame_mirrored, cv2.COLOR_BGR2GRAY)
mirrored_path = os.path.join(rotated_dir, instance + '_mirrored' + file_type)
break # break to avoid generating multiple copies of mirrored images
# package results in dictionary
rotate_dict = {}
rotate_dict['flipped'] = {}
rotate_dict['flipped']['path'] = flipped_path
rotate_dict['flipped']['frame'] = frame_flipped
rotate_dict['mirrored'] = {}
rotate_dict['mirrored']['path'] = mirrored_path
rotate_dict['mirrored']['frame'] = frame_mirrored
rotate_dict['mirrored_flipped'] = {}
rotate_dict['mirrored_flipped']['path'] = mirrored_flipped_path
rotate_dict['mirrored_flipped']['frame'] = frame_mirrored_flipped
return rotate_dict
```
#### File: JoshBClemons/gesture_recognition/manage.py
```python
import pdb
import os
import subprocess
import sys
import eventlet
eventlet.monkey_patch()
from config import Config
from flask_script import Manager, Command, Server as _Server, Option
from gesture_recognition import create_app, db, socketio
from offline import reset_tables
import shutil
manager = Manager(create_app)
def grab_and_save_model():
"""Grab highest ranking model and corresponding gestures map from database and save them locally."""
import psycopg2
import psycopg2.extras
import json
conn = psycopg2.connect(host=Config.DB_HOST, database=Config.DB_NAME, user=Config.DB_USER, password=Config.DB_PASS)
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
table = 'model_scores'
query = f"SELECT model_name FROM {table} WHERE rank = 1;"
cur.execute(query)
conn.commit()
model_name = cur.fetchone()[0]
table = 'models'
query = f"SELECT model, gestures_map FROM {table} WHERE model_name = '{model_name}'"
cur.execute(query)
conn.commit()
data = cur.fetchone()
model = data[0]
gestures_map = data[1]
cur.close()
# save model and gestures map locally
write_model = open(Config.MODEL_PATH, 'wb')
write_model.write(model)
write_model.close()
with open(Config.GESTURES_MAP_PATH, 'w') as fp:
json.dump(gestures_map, fp)
@manager.command
def create_db(drop_first=False):
"""Creates application database tables
Args:
drop_first (bool): Boolean that indicates whether to drop all database tables and directories before creating them
"""
# if drop_first is True, delete database tables first
if drop_first:
db.drop_all()
db.create_all()
print(f'[INFO] Created database tables.')
@manager.command
def reset_dirs():
"""Reset image and figure directories"""
image_dir = Config.IMAGE_DIRECTORY
fig_dir = Config.FIGURE_DIRECTORY
# delete directories
if os.path.isdir(image_dir):
shutil.rmtree(image_dir)
if os.path.isdir(fig_dir):
shutil.rmtree(fig_dir)
# create directories
os.mkdir(image_dir)
orig_dir = os.path.join(image_dir, 'original')
processed_dir = os.path.join(image_dir, 'processed')
os.mkdir(orig_dir)
os.mkdir(processed_dir)
os.mkdir(fig_dir)
print(f'[INFO] Created image and figure directories.')
@manager.command
def reset_offline():
"""Reset database tables used for model storage and generation and analysis."""
reset_tables.reset_tables()
@manager.command
def model_orchestrator():
"""Run model orchestrator to generate new model."""
from offline import orchestrator
orchestrator.orchestrator()
@manager.command
def test():
"""Run unit tests."""
import os
tests = os.system('python app_tests.py')
sys.exit(tests)
class Server(_Server):
help = description = 'Runs the Socket.IO web server'
def get_options(self):
options = (
Option('-h', '--host',
dest='host',
default='0.0.0.0'),
Option('-p', '--port',
dest='port',
type=int,
default=self.port), # or self.port
Option('-o', '--online',
action='store_true',
dest='online',
help='run application in SSL context',
default=False),
Option('-ro', '--reset-online',
action='store_true',
dest='reset_online',
help='reset application database tables and directories before running server',
default=False),
Option('-rof', '--reset-offline',
action='store_true',
dest='reset_offline',
help='reset offline database tables before running server',
default=False),
)
return options
def __call__(self, app, host, port, online, reset_online, reset_offline):
"""Creates all application database tables, image directories, and figure directories and starts server
Args:
app (Flask application): Flask application
host (str): IP address that hosts Flask application
port (int): Port Flask application binds to
online (boolean): Boolean that indicates whether to run application with secure connection
reset_online (boolean): Boolean that indicates whether to reset application data tables and directories before starting server
reset_offline (boolean): Boolean that indicates whether to reset database tables before starting server
"""
# reset database tables and directories
if reset_online:
with app.app_context():
create_db(reset_online)
print('[INFO] Starting server.')
# run server with or without secure connection. Online instances must be ran with secure connection
if online:
basedir = os.path.abspath(os.path.dirname(__file__))
certfile = os.path.join(basedir, 'cert.pem')
keyfile = os.path.join(basedir, 'key.pem')
socketio.run(
app,
host=host,
port=443,
keyfile=keyfile,
certfile=certfile,
use_reloader=False,
)
else:
socketio.run(
app,
host=host,
port=port,
use_reloader=False,
)
manager.add_command("start", Server())
class CeleryWorker(Command):
"""Starts the celery worker."""
name = 'celery'
capture_all_args = True
def run(self, argv):
# create figure and image directories
reset_dirs()
ret = subprocess.call(
['celery', '--app=gesture_recognition.celery', 'worker'] + argv)
sys.exit(ret)
manager.add_command("celery", CeleryWorker())
if __name__ == '__main__':
if sys.argv[1] == 'test':
# ensure that Flask-Script uses the testing configuration
os.environ['FLACK_CONFIG'] = 'testing'
elif "-rof" in sys.argv:
# reset offline database tables. kind of hacky implementation.
reset_tables.reset_tables()
if "celery" not in sys.argv:
# pull best model from database and save locally
grab_and_save_model()
manager.run()
```
#### File: gesture_recognition/offline/reset_tables.py
```python
from config import Config
import psycopg2
from psycopg2.extras import Json, DictCursor
import pdb
import os
import datetime
def reset_tables():
"""Reset offline tables. This is necessary when first running application."""
# select all high-scoring predictions. These will be used to train new models.
conn = psycopg2.connect(host=Config.DB_HOST, database=Config.DB_NAME, user=Config.DB_USER, password=Config.DB_PASS)
cur = conn.cursor(cursor_factory=DictCursor)
# create table for confident predictions
table = 'conf_preds'
query = f'DROP TABLE IF EXISTS {table};'
cur.execute(query)
conn.commit()
query = f'CREATE TABLE {table}(instance text PRIMARY KEY NOT NULL, gesture text, processed_path text, flipped_path text, mirrored_path text, mirrored_flipped_path text);'
cur.execute(query)
conn.commit()
print(f'[INFO] Created table {table}')
# create table for newly trained models
table = 'models'
query = f'DROP TABLE IF EXISTS {table};'
cur.execute(query)
conn.commit()
query = f'CREATE TABLE {table}(model_name text PRIMARY KEY NOT NULL, training_date date, gestures_map jsonb, model bytea, model_path text);'
cur.execute(query)
conn.commit()
print(f'[INFO] Created table {table}')
# create table mapping training data instances with models
table = 'model_train_data_map'
query = f'DROP TABLE IF EXISTS {table};'
cur.execute(query)
conn.commit()
query = f'CREATE TABLE {table}(instance text PRIMARY KEY NOT NULL, model_0 integer)'
cur.execute(query)
conn.commit()
print(f'[INFO] Created table {table}')
# create table for model scores
table = 'model_scores'
query = f'DROP TABLE IF EXISTS {table};'
cur.execute(query)
conn.commit()
# load original model to model_scores table is not empty when application attempts to get table from it
from . import load_orig_model
load_orig_model.load_orig_model()
# close db connection
cur.close()
``` |
{
"source": "joshbduncan/quotr",
"score": 2
} |
#### File: quotrapp/api/routes.py
```python
from flask import Blueprint, jsonify
from sqlalchemy import func
from quotrapp.models import Quote
api_bp = Blueprint('api_bp', __name__)
@api_bp.route('/api/quote')
def index():
quote = Quote.query.order_by(func.random()).first()
return jsonify({'Quote': quote.content, 'Author': quote.author.name})
``` |
{
"source": "joshbduncan/swatcher",
"score": 2
} |
#### File: examples/flask_app/routes.py
```python
import json
import os
import secrets
import swatcher
from flask import (
abort,
current_app,
flash,
redirect,
render_template,
request,
send_file,
session,
url_for,
)
from . import app
from .forms import UploadImage, ResampleImage
from io import BytesIO
from PIL import Image
def reset_session_vars():
"""
resets all image flask session variables
"""
session.pop("id", None)
session.pop("filename", None)
session.pop("image_path", None)
session.pop("colors", None)
session.pop("palette", None)
session["max_colors"] = 8
session["sensitivity"] = 75
def prepare_pil_image(image: object) -> object:
"""
Temporarily save a PIL Image object so it can be sent to the client
:param `image`: PIL Image object
:returns: temporary file object
"""
img_io = BytesIO()
image.save(img_io, "PNG")
img_io.seek(0)
return img_io
@app.route("/", methods=["GET", "POST"])
def index():
# setup forms objects
upload_form = UploadImage()
resample_form = ResampleImage()
if request.method == "GET":
reset_session_vars()
return render_template("upload.html", upload_form=upload_form)
# if upload_form was submitted
if upload_form.upload.data:
if upload_form.validate_on_submit():
# reset all session vars for new upload
reset_session_vars()
# grab the user uploaded image
submitted_img = upload_form.image.data
# generate a random id for this image
random_hex = secrets.token_hex(8)
# _, f_ext = os.path.splitext(submitted_img.filename)
filename = random_hex + ".jpg"
# process the uploaded image
image = swatcher.Swatcher(submitted_img)
# save it locally in static folder
filepath = os.path.join(current_app.root_path, "static/images", filename)
image.processed_image.save(filepath, "JPEG", quality=100, subsampling=0)
image_path = url_for("static", filename="images/" + filename)
# reduce sampled colors to using defaults
image.max_colors = session.get("max_colors")
image.sensitivity = session.get("sensitivity")
colors = image.palette
# setup each sampled color as a dict for use in jinja template
# including the RGB values, Hex code, and CMYK values
colors = swatcher.color.colors_2_dicts(colors)
# set session values
session["id"] = random_hex
session["filename"] = filename
session["image_path"] = image_path
session["colors"] = json.dumps(image._colors)
session["palette"] = json.dumps(colors)
return render_template(
"colors.html",
resample_form=resample_form,
colors=colors,
)
else:
# if validation error start over
reset_session_vars()
flash("Please upload image files only!", "danger")
return render_template("upload.html", upload_form=upload_form)
# if resample_form was submitted instead
elif resample_form.resample.data:
if resample_form.validate_on_submit():
# get update sampling values from user and save them
max_colors = int(resample_form.colors.data)
sensitivity = int(resample_form.sensitivity.data)
# get stored image colors and resample them using new settings
colors = json.loads(session.get("colors"))
colors = swatcher.palette.sample(
colors=colors, max_colors=max_colors, sensitivity=sensitivity
)
# setup each sampled color as a dict for use in jinja template
# including the RGB values, Hex code, and CMYK values
colors = swatcher.color.colors_2_dicts(colors)
# update session values
session["max_colors"] = max_colors
session["sensitivity"] = sensitivity
session["palette"] = json.dumps(colors)
return render_template(
"colors.html",
resample_form=resample_form,
colors=colors,
)
# if session has expired then start over from scratch
if "id" not in session:
reset_session_vars()
flash("Sorry, your session has expired! Reupload.", "warning")
return render_template("upload.html", upload_form=upload_form)
# if any validation errors start over
reset_session_vars()
flash("Whoops, something went wrong! Reupload to try again.", "warning")
return render_template("upload.html", upload_form=upload_form)
@app.route("/palette")
def palette():
if "id" in session:
# grab session information
id = session.get("id")
colors = [tuple(color["rgb"]) for color in json.loads(session.get("palette"))]
# create adobe ase swatch file
file = swatcher.export.write_ase_file(colors)
# return the file as a download
return send_file(file, download_name=f"SWATCHER-{id}.ase", as_attachment=True)
else:
abort(410)
@app.route("/image")
def image():
if "id" in session:
# grab session information
id = session.get("id")
# get all RGB vales from the sampled colors
colors = [tuple(color["rgb"]) for color in json.loads(session.get("palette"))]
# create swatch palette image
file = swatcher.palette.draw_swatches(colors)
# create a temporary file and return it to the user as a download
return send_file(
prepare_pil_image(file),
mimetype="image/jpeg",
download_name=f"SWATCHER-{id}.png",
as_attachment=True,
)
else:
abort(410)
```
#### File: swatcher/swatcher/color.py
```python
from collections import Counter
from math import sqrt
def normalize_rgb_values(color: tuple) -> tuple:
"""
Clean-up any slight color differences in PIL sampling.
:param color: a tuple of RGB color values eg. (255, 255, 255)
:returns: a tuple of RGB color values
"""
return tuple([0 if val <= 3 else 255 if val >= 253 else val for val in color])
def rgb_2_luma(color: tuple) -> int:
"""
Calculate the "brightness" of a color.
...and, yes I know this is a debated subject
but this way works for just fine my purposes.
:param color: a tuple of RGB color values eg. (255, 255, 255)
:returns: luminance "brightness" value
"""
r, g, b = color[0] / 255, color[1] / 255, color[2] / 255
l = 0.33 * r + 0.5 * g + 0.16 * b
return round(l, 2)
def sort_by_brightness(colors: list) -> list:
"""
Sort of list of RGB colors values by their brightness.
:param color: tuple of RGB values for color eg. (255, 255, 255)
:returns: list of color value dictionaries
"""
l = {color: rgb_2_luma(color) for color in colors}
return sorted(l, key=l.get, reverse=True)
def rgb_2_hex(color: tuple) -> str:
"""
Convert RGB color vales to Hex code (eg. #ffffff).
:param color: tuple of RGB values for color eg. (255, 255, 255)
:returns: color Hex code
"""
r, g, b = color
return f"#{r:02x}{g:02x}{b:02x}"
def rgb_2_cmyk(color: tuple) -> tuple:
"""
Convert RGB color vales to CMYK color values.
:param color: tuple of RGB values for color eg. (255, 255, 255)
:returns: CMYK values eg. (C, M, Y, K)
"""
# if RGB color is black return CMYK black
if color == (0, 0, 0):
return (0, 0, 0, 100)
# convert the RGB values
r, g, b = color
k = 1 - max((r, g, b)) / 255
c = int(((1 - (r / 255) - k) / (1 - k)) * 100)
m = int(((1 - (g / 255) - k) / (1 - k)) * 100)
y = int(((1 - (b / 255) - k) / (1 - k)) * 100)
return (c, m, y, int(k * 100))
def color_2_dict(color: tuple) -> dict:
"""
Convert tuple of RGB color vales to HEX and CMYK then
combine into a dictionary in the following format.
{"rgb": (0, 0, 0), "hex": "#000000", "cmyk": (0, 0, 0, 100)}
:param color: tuple of RGB values for color eg. (255, 255, 255)
:returns: RGB, HEX and CMYK values
"""
rgb = color
return {"rgb": rgb, "hex": rgb_2_hex(color), "cmyk": rgb_2_cmyk(color)}
def colors_2_dicts(colors: list) -> list:
"""
Convert a list of RGB color vales to a list of
dicts with RGB, HEX, and CMYK values.
:param color: tuple of RGB values for color eg. (255, 255, 255)
:returns: list of color value dictionaries
"""
return [color_2_dict(color) for color in colors]
def color_distance(color1: tuple, color2: tuple) -> int:
"""
Calculate the Euclidean distance between two colors.
https://en.wikipedia.org/wiki/Color_difference
:param color1: tuple of RGB color values eg. (255, 255, 255)
:param color2: tuple of RGB color values
:returns: Euclidean distance of two colors
"""
r1, g1, b1 = color1
r2, g2, b2 = color2
return int(sqrt(((r2 - r1) ** 2) + ((g2 - g1) ** 2) + ((b2 - b1) ** 2)))
def get_colors(image: object) -> list:
"""
Sample all pixels from an image and sort their RGB values by most common
:param image: PIL Image object
:returns: list of RGB tuples (255, 255, 255)
"""
colors = Counter([pixel for pixel in image.getdata()])
return [color for (color, _) in colors.most_common()]
```
#### File: swatcher/swatcher/image.py
```python
from collections import Counter
from PIL import Image, ImageChops
from .color import normalize_rgb_values
def trim_excess(image: object) -> object:
"""
Trim excess background pixels from around an image.
:param image: PIL Image object
:returns: PIL Image object
"""
w, h = image.size
# get RGB value for each corner of image
corners = [
normalize_rgb_values(image.getpixel((0, 0))),
normalize_rgb_values(image.getpixel((w - 1, 0))),
normalize_rgb_values(image.getpixel((0, h - 1))),
normalize_rgb_values(image.getpixel((w - 1, h - 1))),
]
# count how many times each value is present
color_count = Counter([pixel for pixel in corners]).most_common()
# if multiple corners have the same pixel count don't trim
if len(color_count) > 1 and color_count[0][1] == color_count[1][1]:
return image
else: # set the comparison pixel to the most common value
bg_pixel = color_count[0][0]
# compare the original image to the excess pixels
comp = Image.new("RGB", image.size, bg_pixel)
diff = ImageChops.difference(image, comp)
bbox = diff.getbbox()
# crop the difference
return image.crop(bbox)
def process_image(image: object, max_size: int = 500) -> object:
"""
Process the image for best color sampling results.
:param image: PIL Image object
:param max_size: maximum size of the image for color sampling
:returns: PIL Image object
"""
image = image.convert("RGBA")
# check to make sure image has pixels
w, h = image.size
if w == 0 or h == 0:
raise ValueError("The provided image has no pixels.")
# composite the image on a white background just in case it has transparency
bg = Image.new("RGBA", image.size, (255, 255, 255))
comp = Image.alpha_composite(bg, image)
# convert composite image to RGB since we only need the RGB color values
comp = comp.convert("RGB")
# crop the image if extra surrounding background pixels are found
comp = trim_excess(comp)
# reduce the image down to `max_size` to speed up processing
if comp.width > max_size or comp.height > max_size:
comp.thumbnail((max_size, max_size), resample=0)
return comp
```
#### File: swatcher/tests/test_image.py
```python
import pytest
from PIL import Image, ImageDraw, ImageFilter
from swatcher import image
def test_01(): # one centered pixel
img = Image.new("RGB", (1000, 1000), (255, 255, 255))
d = ImageDraw.Draw(img)
d.point((500, 500), (0, 0, 0))
trimmed = image.trim_excess(img)
assert trimmed.size == (1, 1)
def test_02(): # two inset pizels (top-left, bottom-right)
img = Image.new("RGB", (1000, 1000), (255, 255, 255))
d = ImageDraw.Draw(img)
d.point(((10, 10), (990, 990)), (0, 0, 0))
trimmed = image.trim_excess(img)
assert trimmed.size == (981, 981)
def test_03(): # transparent circle centered
img = Image.new("RGBA", (1000, 1000))
d = ImageDraw.Draw(img)
d.ellipse((400, 400, 600, 600), (255, 128, 0))
trimmed = image.process_image(img)
assert trimmed.size == (201, 201)
def test_04(): # transparent image two objects over 500 px
img = Image.new("RGBA", (1000, 1000))
d = ImageDraw.Draw(img)
d.ellipse((100, 100, 200, 200), (0, 0, 0))
d.ellipse((500, 500, 700, 700), (255, 128, 0))
trimmed = image.process_image(img)
assert trimmed.size == (500, 500)
def test_05(): # aliased pixels on solid background
canvas_w, canvas_h = 1000, 800
shape_w, shape_h = 600, 286
img = Image.new("RGB", (canvas_w, canvas_h), (255, 255, 255))
d = ImageDraw.Draw(img)
p1 = ((canvas_w // 2) - (shape_w // 2), (canvas_h // 2) - (shape_h // 2))
p2 = (p1[0] + shape_w, p1[1] + shape_h)
d.ellipse(xy=(p1, p2), fill=(255, 0, 255))
for _ in range(10):
img = img.filter(ImageFilter.BLUR)
aliased = image.process_image(img)
assert aliased.size == (500, 250)
def test_06(): # aliased pixels on transparent background
canvas_w, canvas_h = 1000, 800
shape_w, shape_h = 600, 286
img = Image.new("RGBA", (canvas_w, canvas_h), (0, 0, 0, 0))
d = ImageDraw.Draw(img)
p1 = ((canvas_w // 2) - (shape_w // 2), (canvas_h // 2) - (shape_h // 2))
p2 = (p1[0] + shape_w, p1[1] + shape_h)
d.ellipse(xy=(p1, p2), fill=(255, 0, 255))
for _ in range(10):
img = img.filter(ImageFilter.BLUR)
aliased = image.process_image(img)
assert aliased.size == (500, 250)
def test_07(): # image has no pixels
with pytest.raises(ValueError):
img = Image.new(size=(2, 0), mode="RGBA")
img = image.process_image(img)
def test_08(): # small image
img = Image.new(size=(50, 50), mode="RGBA")
img = image.process_image(img)
assert img.size == (50, 50)
```
#### File: swatcher/tests/test_sampling.py
```python
from PIL import Image, ImageDraw
from swatcher import color, palette
def test_01(): # sampled_colors from control image
img = Image.new("RGB", (3, 1), (255, 255, 255))
d = ImageDraw.Draw(img)
d.point((0, 0), (255, 0, 0))
d.point((1, 0), (0, 255, 0))
d.point((2, 0), (0, 0, 255))
image_colors = color.get_colors(img)
sampled_colors = palette.sample(image_colors)
assert sampled_colors == [(255, 0, 0), (0, 255, 0), (0, 0, 255)]
def test_02(): # sampled_colors from control image
img = Image.new("RGB", (3, 1), (255, 255, 255))
d = ImageDraw.Draw(img)
d.point((0, 0), (0, 0, 255))
d.point((1, 0), (0, 255, 0))
d.point((2, 0), (255, 0, 0))
image_colors = color.get_colors(img)
sampled_colors = palette.sample(image_colors)
assert sampled_colors == [(0, 0, 255), (0, 255, 0), (255, 0, 0)]
def test_03(): # sampled_colors from control image
img = Image.new("RGB", (4, 1), (255, 255, 255))
d = ImageDraw.Draw(img)
d.point(((0, 0), (1, 0)), (255, 0, 0))
d.point((2, 0), (0, 255, 0))
d.point((3, 0), (0, 0, 255))
image_colors = color.get_colors(img)
sampled_colors = palette.sample(image_colors)
assert sampled_colors == [(255, 0, 0), (0, 255, 0), (0, 0, 255)]
def test_04(): # sampled_colors from control image
img = Image.new("RGB", (4, 1), (255, 255, 255))
d = ImageDraw.Draw(img)
d.point((0, 0), (255, 0, 0))
d.point(((1, 0), (2, 0)), (0, 255, 0))
d.point((3, 0), (0, 0, 255))
image_colors = color.get_colors(img)
sampled_colors = palette.sample(image_colors)
assert sampled_colors == [(0, 255, 0), (255, 0, 0), (0, 0, 255)]
def test_05(): # sampled_colors from control image
img = Image.new("RGB", (4, 1), (255, 255, 255))
d = ImageDraw.Draw(img)
d.point((0, 0), (255, 0, 0))
d.point((1, 0), (0, 255, 0))
d.point(((2, 0), (3, 0)), (0, 0, 255))
image_colors = color.get_colors(img)
sampled_colors = palette.sample(image_colors)
assert sampled_colors == [(0, 0, 255), (255, 0, 0), (0, 255, 0)]
def test_06(): # sampled_colors from control image
img = Image.new("RGB", (4, 4), (255, 255, 255))
d = ImageDraw.Draw(img)
d.point(((0, 0), (1, 0), (0, 1), (1, 1)), (255, 0, 0))
d.point(((2, 0), (3, 0), (2, 1), (3, 1)), (0, 255, 0))
d.point(((0, 2), (1, 2), (0, 3), (1, 3)), (0, 0, 255))
image_colors = color.get_colors(img)
sampled_colors = palette.sample(image_colors)
assert sampled_colors == [
(255, 0, 0),
(0, 255, 0),
(0, 0, 255),
(255, 255, 255),
]
def test_07(): # sample function on sample list
assert palette.sample([(0, 0, 0), (128, 128, 128)]) == [
(0, 0, 0),
(128, 128, 128),
]
def test_08(): # sample function on sample list with increased sensitivity
assert palette.sample([(0, 0, 0), (128, 128, 128)], sensitivity=250) == [(0, 0, 0)]
def test_09(): # sample function on sample list with reduced max_colors
colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255)]
assert palette.sample(colors, max_colors=3) == [
(255, 0, 0),
(0, 255, 0),
(0, 0, 255),
]
``` |
{
"source": "joshbduncan/word-search-generator",
"score": 3
} |
#### File: word-search-generator/tests/test_cli.py
```python
import os
import pathlib
import tempfile
TEMP_DIR = tempfile.TemporaryDirectory()
def test_entrypoint():
exit_status = os.system("word-search --help")
assert exit_status == 0
def test_no_words_provided():
exit_status = os.system("word-search")
assert os.WEXITSTATUS(exit_status) == 1
def test_just_words():
exit_status = os.system("word-search some test words")
assert exit_status == 0
def test_stdin():
exit_status = os.system("echo computer robot soda | word-search")
assert os.WEXITSTATUS(exit_status) == 0
def test_export_pdf():
temp_path = TEMP_DIR.name + "/test.pdf"
exit_status = os.system(f"word-search some test words -e pdf -o {temp_path}")
assert exit_status == 0 and pathlib.Path(temp_path).exists()
def test_export_csv():
temp_path = TEMP_DIR.name + "/test.csv"
exit_status = os.system(f"word-search some test words -e csv -o {temp_path}")
assert exit_status == 0 and pathlib.Path(temp_path).exists()
def test_invalid_export_location():
exit_status = os.system("word-search some test words -e csv -o ~/RANDOMTESTLOC")
assert os.WEXITSTATUS(exit_status) == 1
``` |
{
"source": "joshbelot/GettingStarted",
"score": 4
} |
#### File: GettingStarted/my_pkg/trial.py
```python
def square(x):
"""Finds the square of the input.
Args:
x (float): The number to be squared.
Returns:
x2 (float): The squared number.
"""
return x**2
def factorial(n):
"""Factorial calculates the factorial of the provided integer.
Args:
n (int): The value that the factorial will be computed from.
Returns:
fact (int): The factorial of n.
Raises:
ValueError: If n is not an integer.
"""
if not isinstance(n,int):
if int(n) == n:
n = int(n)
else:
raise ValueError("The input to factorial must be an integer.")
if n <= 0:
fact = 1
else:
fact = n
for i in range(1,n):
fact = i*fact
return fact
``` |
{
"source": "joshbenjamin3/Static_BFP_CNN",
"score": 3
} |
#### File: Static_BFP_CNN/benchmark/r3d.py
```python
import math
import torch.nn as nn
from torch.nn.modules.utils import _triple
from lib import BFPActivation
from lib import BFPFullyConnet
import torch
import torch.nn as nn
######### Orig Model Define #############
class SpatioTemporalConv(nn.Module):
r"""Applies a factored 3D convolution over an input signal composed of several input
planes with distinct spatial and time axes, by performing a 2D convolution over the
spatial axes to an intermediate subspace, followed by a 1D convolution over the time
axis to produce the final output.
Args:
in_channels (int): Number of channels in the input tensor
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to the sides of the input during their respective convolutions. Default: 0
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=False):
super(SpatioTemporalConv, self).__init__()
# if ints are entered, convert them to iterables, 1 -> [1, 1, 1]
kernel_size = _triple(kernel_size)
stride = _triple(stride)
padding = _triple(padding)
self.temporal_spatial_conv = nn.Conv3d(in_channels, out_channels, kernel_size,
stride=stride, padding=padding, bias=bias)
#self.bn = nn.BatchNorm3d(out_channels)
#self.relu = nn.ReLU()
def forward(self, x):
x = self.temporal_spatial_conv(x)
#x = self.bn(self.temporal_spatial_conv(x))
#x = self.relu(x)
return x
class SpatioTemporalResBlock(nn.Module):
r"""Single block for the ResNet network. Uses SpatioTemporalConv in
the standard ResNet block layout (conv->batchnorm->ReLU->conv->batchnorm->sum->ReLU)
Args:
in_channels (int): Number of channels in the input tensor.
out_channels (int): Number of channels in the output produced by the block.
kernel_size (int or tuple): Size of the convolving kernels.
downsample (bool, optional): If ``True``, the output size is to be smaller than the input. Default: ``False``
"""
def __init__(self, in_channels, out_channels, kernel_size, downsample=False):
super(SpatioTemporalResBlock, self).__init__()
# If downsample == True, the first conv of the layer has stride = 2
# to halve the residual output size, and the input x is passed
# through a seperate 1x1x1 conv with stride = 2 to also halve it.
# no pooling layers are used inside ResNet
self.downsample = downsample
# to allow for SAME padding
padding = kernel_size // 2
if self.downsample:
# downsample with stride =2 the input x
self.downsampleconv = SpatioTemporalConv(in_channels, out_channels, 1, stride=2)
self.downsamplebn = nn.BatchNorm3d(out_channels)
# downsample with stride = 2when producing the residual
self.conv1 = SpatioTemporalConv(in_channels, out_channels, kernel_size, padding=padding, stride=2)
else:
self.conv1 = SpatioTemporalConv(in_channels, out_channels, kernel_size, padding=padding)
self.bn1 = nn.BatchNorm3d(out_channels)
self.relu1 = nn.ReLU()
# standard conv->batchnorm->ReLU
self.conv2 = SpatioTemporalConv(out_channels, out_channels, kernel_size, padding=padding)
self.bn2 = nn.BatchNorm3d(out_channels)
self.outrelu = nn.ReLU()
def forward(self, x):
res = self.relu1(self.bn1(self.conv1(x)))
res = self.bn2(self.conv2(res))
if self.downsample:
x = self.downsamplebn(self.downsampleconv(x))
return self.outrelu(x + res)
class SpatioTemporalResLayer(nn.Module):
r"""Forms a single layer of the ResNet network, with a number of repeating
blocks of same output size stacked on top of each other
Args:
in_channels (int): Number of channels in the input tensor.
out_channels (int): Number of channels in the output produced by the layer.
kernel_size (int or tuple): Size of the convolving kernels.
layer_size (int): Number of blocks to be stacked to form the layer
block_type (Module, optional): Type of block that is to be used to form the layer. Default: SpatioTemporalResBlock.
downsample (bool, optional): If ``True``, the first block in layer will implement downsampling. Default: ``False``
"""
def __init__(self, in_channels, out_channels, kernel_size, layer_size, block_type=SpatioTemporalResBlock,
downsample=False):
super(SpatioTemporalResLayer, self).__init__()
# implement the first block
self.block1 = block_type(in_channels, out_channels, kernel_size, downsample)
# prepare module list to hold all (layer_size - 1) blocks
self.blocks = nn.ModuleList([])
for i in range(layer_size - 1):
# all these blocks are identical, and have downsample = False by default
self.blocks += [block_type(out_channels, out_channels, kernel_size)]
def forward(self, x):
x = self.block1(x)
for block in self.blocks:
x = block(x)
return x
class R3DNet(nn.Module):
r"""Forms the overall ResNet feature extractor by initializng 5 layers, with the number of blocks in
each layer set by layer_sizes, and by performing a global average pool at the end producing a
512-dimensional vector for each element in the batch.
Args:
layer_sizes (tuple): An iterable containing the number of blocks in each layer
block_type (Module, optional): Type of block that is to be used to form the layers. Default: SpatioTemporalResBlock.
"""
def __init__(self, layer_sizes, block_type=SpatioTemporalResBlock):
super(R3DNet, self).__init__()
# first conv, with stride 1x2x2 and kernel size 3x7x7
self.conv1 = SpatioTemporalConv(3, 64, [3, 7, 7], stride=[1, 2, 2], padding=[1, 3, 3])
self.bn1 = nn.BatchNorm3d(64)
self.relu1 = nn.ReLU()
# output of conv2 is same size as of conv1, no downsampling needed. kernel_size 3x3x3
self.conv2 = SpatioTemporalResLayer(64, 64, 3, layer_sizes[0], block_type=block_type)
# each of the final three layers doubles num_channels, while performing downsampling
# inside the first block
self.conv3 = SpatioTemporalResLayer(64, 128, 3, layer_sizes[1], block_type=block_type, downsample=True)
self.conv4 = SpatioTemporalResLayer(128, 256, 3, layer_sizes[2], block_type=block_type, downsample=True)
self.conv5 = SpatioTemporalResLayer(256, 512, 3, layer_sizes[3], block_type=block_type, downsample=True)
# global average pooling of the output
self.pool = nn.AdaptiveAvgPool3d(1)
def forward(self, x):
x = self.relu1(self.bn1(self.conv1(x)))
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
x = self.pool(x)
return x.view(-1, 512)
class r3d(nn.Module):
r"""Forms a complete ResNet classifier producing vectors of size num_classes, by initializng 5 layers,
with the number of blocks in each layer set by layer_sizes, and by performing a global average pool
at the end producing a 512-dimensional vector for each element in the batch,
and passing them through a Linear layer.
Args:
num_classes(int): Number of classes in the data
layer_sizes (tuple): An iterable containing the number of blocks in each layer
block_type (Module, optional): Type of block that is to be used to form the layers. Default: SpatioTemporalResBlock.
"""
def __init__(self, num_classes, block_type=SpatioTemporalResBlock, pretrained=False):
super(r3d, self).__init__()
self.res3d = R3DNet(layer_sizes, block_type)
self.linear = nn.Linear(512, num_classes)
self.__init_weight()
if pretrained:
self.__load_pretrained_weights()
def forward(self, x):
x = self.res3d(x)
logits = self.linear(x)
return logits
def __load_pretrained_weights(self):
p_dict = torch.load("/mnt/ccnas2/bdp/hf17/TCAD_3DCNNs/R3D-ucf101_epoch-99.pth.tar")
print ("Loading from pretrained models")
self.load_state_dict(p_dict['state_dict'])
#for name in self.state_dict():
# print (name)
#s_dict = self.state_dict()
def __init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
############################################
######### BFP Model Define #############
############################################
class BFP_SpatioTemporalConv(nn.Module):
r"""Applies a factored 3D convolution over an input signal composed of several input
planes with distinct spatial and time axes, by performing a 2D convolution over the
spatial axes to an intermediate subspace, followed by a 1D convolution over the time
axis to produce the final output.
Args:
in_channels (int): Number of channels in the input tensor
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to the sides of the input during their respective convolutions. Default: 0
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True): # bias needs to be True for BFP module
super(BFP_SpatioTemporalConv, self).__init__()
# if ints are entered, convert them to iterables, 1 -> [1, 1, 1]
kernel_size = _triple(kernel_size)
stride = _triple(stride)
padding = _triple(padding)
# self.exp_bit = exp_bit
# self.mantisa_bit = mantisa_bit
# self.opt_exp_act_list = opt_exp_act_list
self.temporal_spatial_conv = nn.Conv3d(in_channels, out_channels, kernel_size,
stride=stride, padding=padding, bias=bias)
#self.bn = nn.BatchNorm3d(out_channels)
# self.relu = nn.ReLU()
def forward(self, x):
#x = self.bn(self.temporal_spatial_conv(x))
x = self.temporal_spatial_conv(x)
# x = BFPActivation.transform_activation_offline(x, self.exp_bit, self.mantisa_bit,
# self.opt_exp_act_list, is_3d=True)
# x = self.relu(x)
return x
class BFP_SpatioTemporalResBlock(nn.Module):
r"""Single block for the ResNet network. Uses SpatioTemporalConv in
the standard ResNet block layout (conv->batchnorm->ReLU->conv->batchnorm->sum->ReLU)
Args:
in_channels (int): Number of channels in the input tensor.
out_channels (int): Number of channels in the output produced by the block.
kernel_size (int or tuple): Size of the convolving kernels.
downsample (bool, optional): If ``True``, the output size is to be smaller than the input. Default: ``False``
"""
def __init__(self, in_channels, out_channels, kernel_size, downsample=False, exp_bit=4, mantisa_bit=8, opt_exp_act_list=None):
super(BFP_SpatioTemporalResBlock, self).__init__()
# If downsample == True, the first conv of the layer has stride = 2
# to halve the residual output size, and the input x is passed
# through a seperate 1x1x1 conv with stride = 2 to also halve it.
# no pooling layers are used inside ResNet
self.downsample = downsample
self.exp_bit = exp_bit
self.mantisa_bit = mantisa_bit
self.opt_exp_act_list = opt_exp_act_list
# to allow for SAME padding
padding = kernel_size // 2
if self.downsample:
# downsample with stride =2 the input x
self.downsampleconv = BFP_SpatioTemporalConv(in_channels, out_channels, 1, stride=2)
#self.downsamplebn = nn.BatchNorm3d(out_channels)
# downsample with stride = 2when producing the residual
self.conv1 = BFP_SpatioTemporalConv(in_channels, out_channels, kernel_size, padding=padding, stride=2)
else:
self.conv1 = BFP_SpatioTemporalConv(in_channels, out_channels, kernel_size, padding=padding)
#self.bn1 = nn.BatchNorm3d(out_channels)
self.relu1 = nn.ReLU()
# standard conv->batchnorm->ReLU
self.conv2 = BFP_SpatioTemporalConv(out_channels, out_channels, kernel_size, padding=padding)
#exp_bit=self.exp_bit, mantisa_bit=self.mantisa_bit, opt_exp_act_list=self.opt_exp_act_list[])
#self.bn2 = nn.BatchNorm3d(out_channels)
self.outrelu = nn.ReLU()
def forward(self, x):
res = self.conv1(x)
res = BFPActivation.transform_activation_online(res, self.exp_bit, self.mantisa_bit, -1, is_3d=True)
res = self.relu1(res)
res = self.conv2(res)
res = BFPActivation.transform_activation_online(res, self.exp_bit, self.mantisa_bit, -1, is_3d=True)
if self.downsample:
x = self.downsampleconv(x)
#x = self.downsamplebn(self.downsampleconv(x))
x = BFPActivation.transform_activation_online(x, self.exp_bit, self.mantisa_bit, -1, is_3d=True)
return self.outrelu(x + res)
class BFP_SpatioTemporalResLayer(nn.Module):
r"""Forms a single layer of the ResNet network, with a number of repeating
blocks of same output size stacked on top of each other
Args:
in_channels (int): Number of channels in the input tensor.
out_channels (int): Number of channels in the output produced by the layer.
kernel_size (int or tuple): Size of the convolving kernels.
layer_size (int): Number of blocks to be stacked to form the layer
block_type (Module, optional): Type of block that is to be used to form the layer. Default: SpatioTemporalResBlock.
downsample (bool, optional): If ``True``, the first block in layer will implement downsampling. Default: ``False``
"""
def __init__(self, in_channels, out_channels, kernel_size, layer_size, block_type=BFP_SpatioTemporalResBlock,
downsample=False, exp_bit=4, mantisa_bit=8, opt_exp_act_list=None):
super(BFP_SpatioTemporalResLayer, self).__init__()
self.exp_bit = exp_bit
self.mantisa_bit = mantisa_bit
self.opt_exp_act_list = opt_exp_act_list
# implement the first block
self.block1 = block_type(in_channels, out_channels, kernel_size, downsample,
exp_bit=self.exp_bit, mantisa_bit=self.mantisa_bit, opt_exp_act_list=self.opt_exp_act_list[0:2])
# prepare module list to hold all (layer_size - 1) blocks
self.blocks = nn.ModuleList([])
cur_indx = 2
for i in range(layer_size - 1):
# all these blocks are identical, and have downsample = False by default
self.blocks += [block_type(out_channels, out_channels, kernel_size, exp_bit=self.exp_bit,
mantisa_bit=self.mantisa_bit, opt_exp_act_list=self.opt_exp_act_list[cur_indx:cur_indx+2])]
cur_indx +=2
def forward(self, x):
x = self.block1(x)
for block in self.blocks:
x = block(x)
return x
class BFP_R3DNet(nn.Module):
r"""Forms the overall ResNet feature extractor by initializng 5 layers, with the number of blocks in
each layer set by layer_sizes, and by performing a global average pool at the end producing a
512-dimensional vector for each element in the batch.
Args:
layer_sizes (tuple): An iterable containing the number of blocks in each layer
block_type (Module, optional): Type of block that is to be used to form the layers. Default: SpatioTemporalResBlock.
"""
def __init__(self, layer_sizes, block_type=BFP_SpatioTemporalResBlock, exp_bit=4, mantisa_bit=8, opt_exp_act_list=None):
super(BFP_R3DNet, self).__init__()
self.exp_bit = exp_bit
self.mantisa_bit = mantisa_bit
self.opt_exp_act_list = opt_exp_act_list
print ("******The length of exp list:", len(self.opt_exp_act_list))
# first conv, with stride 1x2x2 and kernel size 3x7x7
self.conv1 = BFP_SpatioTemporalConv(3, 64, [3, 7, 7], stride=[1, 2, 2], padding=[1, 3, 3])
self.relu1 = nn.ReLU()
cur_indx = 2
next_indx = cur_indx + layer_sizes[0] * 2
# output of conv2 is same size as of conv1, no downsampling needed. kernel_size 3x3x3
self.conv2 = BFP_SpatioTemporalResLayer(64, 64, 3, layer_sizes[0], block_type=block_type,
exp_bit=self.exp_bit, mantisa_bit=self.mantisa_bit, opt_exp_act_list=self.opt_exp_act_list[cur_indx:next_indx])
cur_indx = next_indx
next_indx += layer_sizes[1] * 2
# each of the final three layers doubles num_channels, while performing downsampling
# inside the first block
self.conv3 = BFP_SpatioTemporalResLayer(64, 128, 3, layer_sizes[1], block_type=block_type, downsample=True,
exp_bit=self.exp_bit, mantisa_bit=self.mantisa_bit, opt_exp_act_list=self.opt_exp_act_list[cur_indx:next_indx])
cur_indx = next_indx
next_indx += layer_sizes[2] * 2
self.conv4 = BFP_SpatioTemporalResLayer(128, 256, 3, layer_sizes[2], block_type=block_type, downsample=True,
exp_bit=self.exp_bit, mantisa_bit=self.mantisa_bit, opt_exp_act_list=self.opt_exp_act_list[cur_indx:next_indx])
cur_indx = next_indx
next_indx += layer_sizes[3] * 2
self.conv5 = BFP_SpatioTemporalResLayer(256, 512, 3, layer_sizes[3], block_type=block_type, downsample=True,
exp_bit=self.exp_bit, mantisa_bit=self.mantisa_bit, opt_exp_act_list=self.opt_exp_act_list[cur_indx:next_indx])
# global average pooling of the output
self.pool = nn.AdaptiveAvgPool3d(1)
def forward(self, x):
x = BFPActivation.transform_activation_online(x, self.exp_bit, self.mantisa_bit, -1, is_3d=True)
x = self.relu1(self.conv1(x))
x = BFPActivation.transform_activation_online(x, self.exp_bit, self.mantisa_bit, -1, is_3d=True)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
x = self.pool(x)
return x.view(-1, 512)
class r3d_18_bfp(nn.Module):
r"""Forms a complete ResNet classifier producing vectors of size num_classes, by initializng 5 layers,
with the number of blocks in each layer set by layer_sizes, and by performing a global average pool
at the end producing a 512-dimensional vector for each element in the batch,
and passing them through a Linear layer.
Args:
num_classes(int): Number of classes in the data
layer_sizes (tuple): An iterable containing the number of blocks in each layer
block_type (Module, optional): Type of block that is to be used to form the layers. Default: SpatioTemporalResBlock.
"""
def __init__(self, num_classes, layer_sizes=[2, 2, 2, 2], block_type=BFP_SpatioTemporalResBlock, pretrained=False,
exp_bit=4, mantisa_bit=8, opt_exp_act_list=None):
super(r3d_bfp, self).__init__()
self.exp_bit = exp_bit
self.mantisa_bit = mantisa_bit
self.opt_exp_act_list = opt_exp_act_list
self.res3d = BFP_R3DNet(layer_sizes, block_type, exp_bit=self.exp_bit, mantisa_bit=self.mantisa_bit, opt_exp_act_list=self.opt_exp_act_list)
self.linear = nn.Linear(512, num_classes)
self.__init_weight()
if pretrained:
self.__load_pretrained_weights()
#for name in self.state_dict():
# print (name)
def forward(self, x):
x = self.res3d(x)
logits = self.linear(x)
logits = BFPFullyConnet.transform_fc_online(logits, self.exp_bit, self.mantisa_bit, -1)
return logits
def __load_pretrained_weights(self):
s_dict = self.state_dict()
for name in s_dict:
print(name)
print(s_dict[name].size())
def __init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class r3d_34_bfp(nn.Module):
r"""Forms a complete ResNet classifier producing vectors of size num_classes, by initializng 5 layers,
with the number of blocks in each layer set by layer_sizes, and by performing a global average pool
at the end producing a 512-dimensional vector for each element in the batch,
and passing them through a Linear layer.
Args:
num_classes(int): Number of classes in the data
layer_sizes (tuple): An iterable containing the number of blocks in each layer
block_type (Module, optional): Type of block that is to be used to form the layers. Default: SpatioTemporalResBlock.
"""
def __init__(self, num_classes, layer_sizes=[3, 4, 6, 3], block_type=BFP_SpatioTemporalResBlock, pretrained=False,
exp_bit=4, mantisa_bit=8, opt_exp_act_list=None):
super(r3d_34_bfp, self).__init__()
self.exp_bit = exp_bit
self.mantisa_bit = mantisa_bit
self.opt_exp_act_list = opt_exp_act_list
self.res3d = BFP_R3DNet(layer_sizes, block_type, exp_bit=self.exp_bit, mantisa_bit=self.mantisa_bit, opt_exp_act_list=self.opt_exp_act_list)
self.linear = nn.Linear(512, num_classes)
self.__init_weight()
if pretrained:
self.__load_pretrained_weights()
#for name in self.state_dict():
# print (name)
def forward(self, x):
x = self.res3d(x)
logits = self.linear(x)
logits = BFPFullyConnet.transform_fc_online(logits, self.exp_bit, self.mantisa_bit, -1)
return logits
def __load_pretrained_weights(self):
s_dict = self.state_dict()
for name in s_dict:
print(name)
print(s_dict[name].size())
def __init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def get_1x_lr_params(model):
"""
This generator returns all the parameters for the conv layer of the net.
"""
b = [model.res3d]
for i in range(len(b)):
for k in b[i].parameters():
if k.requires_grad:
yield k
def get_10x_lr_params(model):
"""
This generator returns all the parameters for the fc layer of the net.
"""
b = [model.linear]
for j in range(len(b)):
for k in b[j].parameters():
if k.requires_grad:
yield k
if __name__ == "__main__":
import torch
inputs = torch.rand(1, 3, 16, 112, 112)
net = r3d(101, (2, 2, 2, 2), pretrained=True)
outputs = net.forward(inputs)
print(outputs.size())
```
#### File: Static_BFP_CNN/lib/BFPFullyConnet.py
```python
import sys, os
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from Utils import bfp_quantize, to_exponent_mantissa_width
import math
import time
# PyTorch
import torch
def transform_fc_online(tensor, exponent, mantissa, chnl_group):
# Offline means the shared exponent is fixed
# it is deternmined during the pre-inference
# Quantize the activation tensor along channel dimension
# Here we require the input tensor has the shape: [batch, channel]
# opt_exp_list: the shared exponent list for offline quantization
shp = tensor.shape
#print ("shape1:", shp[1], " opt_exp_list:", len(opt_exp_list))
if (chnl_group == -1):
chnl_group = shp[1]
number_of_blocks = math.ceil(shp[1]/chnl_group)
if shp[1] % chnl_group == 0:
# shp[1] is divisible by block size
# Therefore just one tensor will be created
tensor = bfp_quantize(tensor, exponent, mantissa, quant_dim=len(tensor.shape)-1)
else:
raise ValueError("Channel is not divisible by channel group while bfp quantizeing the FC")
return tensor
def transform_fc_offline(tensor, exponent, mantissa, opt_exp_list):
# Offline means the shared exponent is fixed
# it is deternmined during the pre-inference
# Quantize the activation tensor along channel dimension
# Here we require the input tensor has the shape: [batch, channel]
# opt_exp_list: the shared exponent list for offline quantization
shp = tensor.shape
#print ("shape1:", shp[1], " opt_exp_list:", len(opt_exp_list))
number_of_blocks = len(opt_exp_list)
block_size = (int)(shp[1]/len(opt_exp_list))
opt_exp_list = torch.Tensor(opt_exp_list).cuda()
#print ("shp:", shp)
#print ("opt_exp_list:", len(opt_exp_list))
if shp[1] % block_size == 0:
# shp[1] is divisible by block size
# Therefore just one tensor will be created
tensor = torch.reshape(tensor, (shp[0], number_of_blocks, block_size))
opt_exp_list = opt_exp_list.unsqueeze(0) ##### Need Unit test
tensor = to_exponent_mantissa_width(tensor, opt_exp_list, mantissa, quant_dim=len(tensor.shape)-1)
tensor = torch.reshape(tensor, shp)
else:
raise ValueError("Channel is not divisible by channel group while bfp quantizeing the FC")
return tensor
```
#### File: Static_BFP_CNN/lib/BFPWeight.py
```python
import sys, os
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import math
import time
# PyTorch
import torch
from Utils import bfp_quantize
def transform_weight(tensor, exponent, mantissa, filter_group):
# Quantize the weight tensor along filter dimension
# Here we require the weight has the shape: [filter, channel, k, k]
# filter_group : Inditate the number of filters in one group, where one group shared the same exponenet
shp = tensor.shape
number_of_blocks = math.ceil(shp[0]/filter_group)
if shp[0] % filter_group == 0:
# shp[1] is divisible by block size
# Therefore just one tensor will be created
tensor = torch.reshape(tensor, (number_of_blocks, filter_group*shp[1]*shp[2]*shp[3]))
tensor = bfp_quantize(tensor, exponent, mantissa, quant_dim=len(tensor.shape)-1)
tensor = torch.reshape(tensor, (shp[0], shp[1], shp[2], shp[3]))
return tensor
else:
# shp[0] is not divisible by channel group
# Therefore two tensors will be created
input('Filter is not divisible by filter group')
if number_of_blocks == 1:
# This means that the depth is less than the block size, so just one tensor will be created
tensor = torch.reshape(tensor, (1, shp[0]*shp[1]*shp[2]*shp[3]))
tensor = bfp_quantize(tensor, exponent, mantissa, quant_dim=len(tensor.shape)-1)
tensor = torch.reshape(tensor, (shp[0], shp[1], shp[2], shp[3]))
return tensor
else:
# Separate two part, tensor1 contain (number_of_blocks-1), tensor2 contain the rest
first_filter = ((number_of_blocks-1)*filter_group)
tensor1 = tensor[0 : first_filter, :, :, :]
t1_shp = tensor1.shape
tensor2 = tensor[first_filter : shp[0], :, :, :]
t2_shp = tensor2.shape
# Perform quantization
tensor1 = torch.reshape(tensor1, (number_of_blocks-1, filter_group*shp[1]*shp[2]*shp[3]))
tensor2 = torch.reshape(tensor2, (1, (shp[0]-first_first_filter)*shp[1]*shp[2]*shp[3]))
tensor1 = bfp_quantize(tensor1, exponent, mantissa, quant_dim=len(tensor.shape)-1)
tensor2 = bfp_quantize(tensor2, exponent, mantissa, quant_dim=len(tensor.shape)-1)
# Reshape and put back to original tensor
tensor1 = torch.reshape(tensor1, t1_shp)
tensor2 = torch.reshape(tensor2, t2_shp)
tensor[0 : first_filter, :, :, :] = tensor1
tensor[first_filter : shp[0], :, :, :] = tensor2
return tensor
return tensor
```
#### File: Static_BFP_CNN/models/model_factory_3d.py
```python
from models import c3d
from models import r3d
from models import res3dnet
from lib.BFPConvertor import BFPConvertor_3D
from benchmark import c3d as br_c3d
from benchmark import r3d as br_r3d
models_map = { "c3d" : c3d.c3d,
"lq_c3d" : c3d.c3d_lq,
"c3d_bfp" : c3d.c3d_bfp,
"br_c3d_bfp" : br_c3d.c3d_bfp,
"r3d_18" : r3d.r3d_18,
"r3d_18_bfp" : r3d.r3d_18_bfp,
"br_r3d_18_bfp" : br_r3d.r3d_18_bfp,
"r3d_34" : r3d.r3d_34,
"r3d_34_bfp" : r3d.r3d_34_bfp,
"br_r3d_34_bfp" : br_r3d.r3d_34_bfp
}
def get_network(model_name, pretrained=True, bfp=False, group=1, mantisa_bit=8, exp_bit=8, opt_exp_act_list=None, is_online=False, exp_act='kl'):
if (bfp):
bfp_model_name = model_name + "_bfp"
if is_online:
bfp_model_name = "br_" + bfp_model_name
assert opt_exp_act_list != None, "When construct the bfp model, the opt_exp_act should be Non-empty"
golden_model = models_map[model_name](101, pretrained=True)
c3d_converter = BFPConvertor_3D(mantisa_bit, exp_bit)
bfp_model = models_map[bfp_model_name](num_classes=101, pretrained=True, exp_bit=exp_bit, mantisa_bit=mantisa_bit, opt_exp_act_list=opt_exp_act_list)
conv_isbias = True if ("c3d" in model_name) else False
bfp_model, weight_exp_list = c3d_converter(golden_model, bfp_model, group, conv_isbias=conv_isbias, is_kl=True, exp_act=exp_act)
return bfp_model, weight_exp_list
else:
return models_map[model_name](num_classes=101, pretrained=True), None
```
#### File: Static_BFP_CNN/models/resnet.py
```python
import sys, os
sys.path.append(os.path.dirname(os.path.abspath(__file__))+'/../')
import numpy as np
# Internal
from lib.BFPConvertor import BFPConvertor
from lib import BFPActivation
from lib import BFPFullyConnet
# PyTorch
import torch
import torch.nn as nn
import torchvision
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=True, dilation=dilation) #enable bias for fused BN
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=True)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, exp_bit=8, mantisa_bit=8,
start_exp_ind=0, opt_exp_act_list=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride=stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
self.exp_bit = exp_bit
self.mantisa_bit = mantisa_bit
self.opt_exp_act_list = opt_exp_act_list
self.start_exp_ind = start_exp_ind
def forward(self, x):
residual = x
out = self.conv1(x)
#out = self.bn1(out)
# disble bn for fused BN
out = BFPActivation.transform_activation_offline(out, self.exp_bit, self.mantisa_bit, self.opt_exp_act_list[self.start_exp_ind])
out = self.relu(out)
out = self.conv2(out)
#out = self.bn2(out)
out = BFPActivation.transform_activation_offline(out, self.exp_bit, self.mantisa_bit, self.opt_exp_act_list[self.start_exp_ind+1])
if self.downsample is not None:
residual = self.downsample(x)
residual = BFPActivation.transform_activation_offline(residual, self.exp_bit,
self.mantisa_bit, self.opt_exp_act_list[self.start_exp_ind+2])
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, exp_bit=8, mantisa_bit=8,
start_exp_ind=0, opt_exp_act_list=None):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2= conv3x3(planes,planes,stride=stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes*self.expansion)
self.bn3 = nn.BatchNorm2d(planes*self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.exp_bit = exp_bit
self.mantisa_bit = mantisa_bit
self.opt_exp_act_list = opt_exp_act_list
self.start_exp_ind = start_exp_ind
def forward(self, x):
residual = x
out = self.conv1(x)
#out = self.bn1(out)
out = BFPActivation.transform_activation_offline(out, self.exp_bit, self.mantisa_bit, self.opt_exp_act_list[self.start_exp_ind])
out = self.relu(out)
out = self.conv2(out)
#out = self.bn2(out)
out = BFPActivation.transform_activation_offline(out, self.exp_bit, self.mantisa_bit, self.opt_exp_act_list[self.start_exp_ind+1])
out = self.relu(out)
out = self.conv3(out)
#out = self.bn3(out)
out = BFPActivation.transform_activation_offline(out, self.exp_bit, self.mantisa_bit, self.opt_exp_act_list[self.start_exp_ind+2])
if self.downsample is not None:
# Get a max of two list
#max_exp_act_list = np.maximum.reduce([self.opt_exp_act_list[self.start_exp_ind+2], self.opt_exp_act_list[self.start_exp_ind+3]]).tolist()
residual = self.downsample(x)
# bfp quantize both tensor for shortcut using the max exponent list
# since they have the same exp list, no need for realignment
residual = BFPActivation.transform_activation_offline(residual, self.exp_bit,
self.mantisa_bit, self.opt_exp_act_list[self.start_exp_ind+3])
#out = BFPActivation.transform_activation_offline(out, self.exp_bit, self.mantisa_bit, max_exp_act_list)
else:
# bfp quantize both tensor for shortcut using the third exponent list
residual = BFPActivation.transform_activation_offline(residual, self.exp_bit, self.mantisa_bit, self.opt_exp_act_list[self.start_exp_ind+2])
out+=residual
out = self.relu(out)
return out
class BlockResNet(nn.Module):
def __init__(self, block, layers,num_classes = 1000, exp_bit=8, mantisa_bit=8, opt_exp_act_list=None):
self.inplanes = 64
super(BlockResNet, self).__init__()
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=True)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.exp_bit = exp_bit
self.mantisa_bit = mantisa_bit
self.opt_exp_act_list = opt_exp_act_list
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], exp_bit=self.exp_bit,
mantisa_bit=self.mantisa_bit, opt_exp_act_list=self.opt_exp_act_list,
start_exp_ind=2)
self.layer2 = self._make_layer(block, 128, layers[1],stride=2, exp_bit=self.exp_bit,
mantisa_bit=self.mantisa_bit, opt_exp_act_list=self.opt_exp_act_list,
start_exp_ind=2 + (layers[0]*3+1))
self.layer3 = self._make_layer(block, 256, layers[2],stride=2, exp_bit=self.exp_bit,
mantisa_bit=self.mantisa_bit, opt_exp_act_list=self.opt_exp_act_list,
start_exp_ind=2 + (layers[0]*3+1) + (layers[1]*3+1))
self.layer4 = self._make_layer(block, 512, layers[3],stride=2, exp_bit=self.exp_bit,
mantisa_bit=self.mantisa_bit, opt_exp_act_list=self.opt_exp_act_list,
start_exp_ind=2 + (layers[0]*3+1) + (layers[1]*3+1) + (layers[2]*3+1))
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512*block.expansion, num_classes)
#print ("fc exponent:", self.opt_exp_act_list[-1])
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
#nn.init.constant_(m.alpha, 1)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1, exp_bit=8, mantisa_bit=8, opt_exp_act_list=None, start_exp_ind=0):
downsample = None
if stride!=1 or self.inplanes !=planes*block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
# Fused BN
#nn.BatchNorm2d(planes*block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, exp_bit=exp_bit,
mantisa_bit=mantisa_bit, opt_exp_act_list=opt_exp_act_list, start_exp_ind=start_exp_ind))
start_exp_ind = start_exp_ind + 3 + (int)(downsample != None)
self.inplanes = planes*block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, exp_bit=exp_bit,
mantisa_bit=mantisa_bit, opt_exp_act_list=opt_exp_act_list, start_exp_ind=start_exp_ind))
start_exp_ind = start_exp_ind + 3
return nn.Sequential(*layers)
def forward(self, x):
x = BFPActivation.transform_activation_offline(x, self.exp_bit, self.mantisa_bit, self.opt_exp_act_list[0])
x = self.conv1(x)
#x = self.bn1(x) #Fused BN
x = BFPActivation.transform_activation_offline(x, self.exp_bit, self.mantisa_bit, self.opt_exp_act_list[1])
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
x = BFPFullyConnet.transform_fc_offline(x, self.exp_bit, self.mantisa_bit, self.opt_exp_act_list[-1])
return x
# bfp indicate if insert bfp quantization during inference
def resnet101(pretrained=False, bit_nmb=8, num_classes=1000, bfp=False, mantisa_bit=8, exp_bit=8, opt_exp_act_list=None):
"""Constructs a ResNet101 model
"""
if (bfp):
block_model = BlockResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes, mantisa_bit=mantisa_bit,
exp_bit=exp_bit, opt_exp_act_list=opt_exp_act_list)
if pretrained==True:
golden_model = torchvision.models.resnet101(pretrained=True)
resnet_converter = BFPConvertor(mantisa_bit, exp_bit)
block_model = resnet_converter(golden_model, block_model)
else:
if pretrained==True:
model = torchvision.models.resnet101(pretrained=True)
else:
model = torchvision.models.resnet101()
block_model = model
return block_model
def resnet50(pretrained=False, num_classes=1000, bfp=False,
group=1, mantisa_bit=8, exp_bit=8, opt_exp_act_list=None):
""" Constructs a ResNet50 model
"""
weight_exp_list = []
if (bfp):
#print ("Shape of exp list:", np.shape(opt_exp_act_list))
#print (opt_exp_act_list[0])
block_model = BlockResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes, mantisa_bit=mantisa_bit,
exp_bit=exp_bit, opt_exp_act_list=opt_exp_act_list)
if pretrained==True:
golden_model = torchvision.models.resnet50(pretrained=True)
resnet_converter = BFPConvertor(mantisa_bit, exp_bit)
block_model, weight_exp_list = resnet_converter(golden_model, block_model, group, is_kl=False)
else:
if pretrained==True:
model = torchvision.models.resnet50(pretrained=True)
else:
model = torchvision.models.resnet50()
block_model = model
#block_model = torch.nn.DataParallel(block_model).cuda()
return block_model, weight_exp_list
def resnet34(pretrained=False, bit_nmb=8, num_classes=1000, bfp=False, mantisa_bit=8, exp_bit=8, opt_exp_act_list=None):
""" Constructs a ResNet34 model
"""
if (bfp):
print ("Shape of exp list:", np.shape(opt_exp_act_list))
block_model = BlockResNet(BasicBlock, [3, 4, 6, 3], num_classes=num_classes, mantisa_bit=mantisa_bit,
exp_bit=exp_bit, opt_exp_act_list=opt_exp_act_list)
if pretrained==True:
golden_model = torchvision.models.resnet34(pretrained=True)
resnet_converter = BFPConvertor(mantisa_bit, exp_bit)
block_model = resnet_converter(golden_model, block_model)
else:
if pretrained==True:
model = torchvision.models.resnet34(pretrained=True)
else:
model = torchvision.models.resnet34()
block_model = model
return block_model
if __name__ == "__main__":
import os
import time
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
with torch.no_grad():
net, _ = resnet50()
dev = "gpu"
if dev == "cpu":
inputs = torch.rand(1, 3, 224, 224)
net.cpu()
test_iter = 100
else:
inputs = torch.rand(1, 3, 224, 224).cuda()
net.cuda()
test_iter = 10000
net.eval()
start = time.time()
for i in range(test_iter):
outputs = net.forward(inputs)
end = time.time()
avg_time = ((end-start) * 1000) / test_iter
print(avg_time, " ms")
```
#### File: Static_BFP_CNN/tests/utils_tests.py
```python
import unittest
from lib import Utils
import torch
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
class Test_Utils(unittest.TestCase):
def test_smooth_hist(self):
test_tensor = torch.Tensor([[1, 2, 3, 4]])
test_hist = torch.histc(test_tensor, bins=5, min=0, max=4)
#print("test_hist:", test_hist)
test_hist = Utils.smooth_hist(test_hist, eps=0.5)
golden_hist = torch.Tensor([0.5, 0.875, 0.875, 0.875, 0.875])
is_equal = torch.equal(test_hist, golden_hist)
self.assertEqual(is_equal, True)
def test_find_exp_KL(self):
test_tensor = torch.Tensor([0, 1, 1, 3, 3, 3, 5, 5])
#test_tensor = torch.arange(25)
#test_tensor = torch.reshape(test_tensor, (1, 5, 5))
test_tensor = test_tensor.float()
test_exp = Utils.find_exp_KL(test_tensor, 8, 8, num_bins=6)
golden_exp = torch.Tensor([5])
print(test_exp)
is_equal = torch.equal(test_exp, golden_exp)
self.assertEqual(is_equal, True)
if __name__ == '__main__':
unittest.main()
```
#### File: Static_BFP_CNN/tools/linear_quant_3d.py
```python
from models import model_factory_3d
from lib import Stat_Collector
from lib import Utils
from models import inceptionv4
from lib.dataset import VideoDataset
#from models import bfp_modules
# The Basic Library
import argparse
import os
import logging
import numpy as np
import sys
import copy
import math
import time
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Other Required Library
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from tensorboardX import SummaryWriter
import pretrainedmodels
from torch.utils.data import DataLoader
import random
random.seed(666)
torch.manual_seed(666)
np.random.seed(666)
writer = SummaryWriter("./tensorboard/statistics")
modules_map = { "Linear" : nn.Linear,
"BatchNorm3d" : nn.BatchNorm3d,
"Conv3d" : nn.Conv3d
}
# Perform the Block Floting Quantization(BFP) on given model
def bfp_quant(model_name, dataset_dir, num_classes, gpus, mantisa_bit, exp_bit, batch_size=1,
num_bins=8001, eps=0.0001, num_workers=2, num_examples=10, std=None, mean=None,
resize=256, crop=224, exp_act=None, bfp_act_chnl=1, bfp_weight_chnl=1, bfp_quant=1,
target_module_list=None, act_bins_factor=3, fc_bins_factor=4, is_online=0):
# Setting up gpu environment
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(gpus)
# Setting up dataload for evaluation
valdir = os.path.join(dataset_dir, 'val')
normalize = transforms.Normalize(mean=mean,
std=std)
train_dataloader = DataLoader(VideoDataset(dataset='ucf101', split='train',clip_len=16, model_name=model_name), batch_size=batch_size, shuffle=True,
num_workers=4)
val_dataloader = DataLoader(VideoDataset(dataset='ucf101', split='val', clip_len=16, model_name=model_name), batch_size=num_examples, num_workers=4)
test_dataloader = DataLoader(VideoDataset(dataset='ucf101', split='test', clip_len=16, model_name=model_name), batch_size=batch_size, num_workers=4)
# # for collect intermediate data use
# collect_loader = torch.utils.data.DataLoader(
# datasets.ImageFolder(valdir, transforms.Compose([
# transforms.Resize(resize),
# transforms.CenterCrop(crop),
# transforms.ToTensor(),
# normalize,
# ])),
# batch_size=num_examples, shuffle=False,
# num_workers=num_workers, pin_memory=True)
# # for validate the bfp model use
# val_loader = torch.utils.data.DataLoader(
# datasets.ImageFolder(valdir, transforms.Compose([
# transforms.Resize(resize),
# transforms.CenterCrop(crop),
# transforms.ToTensor(),
# normalize,
# ])),
# batch_size=batch_size, shuffle=False,
# num_workers=num_workers, pin_memory=True)
exp_act_list = None
lq_model, weight_exp_list = model_factory_3d.get_network(model_name, pretrained=True, bfp=(bfp_quant==1), group=bfp_weight_chnl, mantisa_bit=mantisa_bit,
exp_bit=exp_bit, opt_exp_act_list=exp_act_list, is_online=is_online, exp_act=exp_act)
lq_model.eval()
lq_model.qconfig = torch.quantization.get_default_qat_qconfig('fbgemm')
lq_model.fuse_model()
lq_model_prepared = torch.quantization.prepare_qat(lq_model)
#torch.cuda.empty_cache()
inpt_fp32 = torch.randn(1, 3, 16, 112, 112)
lq_model_prepared(inpt_fp32)
lq_model_8bit = torch.quantization.convert(lq_model_prepared)
logging.info("Evaluating linear-quant model....")
correct = 0
total = 0
lq_model_8bit.cuda()
lq_model_8bit.eval()
with torch.no_grad():
for i_batch, (images, lables) in enumerate(test_dataloader):
images = images.cuda()
outputs = lq_model_8bit(images)
#outputs = model(images)
probs = nn.Softmax(dim=1)(outputs)
_, predicted = torch.max(probs, 1)
predicted = predicted.cpu()
total += lables.size(0)
correct += (predicted == lables).sum().item()
logging.info("Current images: %d" % (total))
#if (total > 2000):
# break
logging.info("Total: %d, Accuracy: %f " % (total, float(correct / total)))
logging.info("Floating conv weight and fc(act and weight), act bins_factor is %d,fc bins_factor is %d, exp_opt for act is %s, act group is %d"%(act_bins_factor, fc_bins_factor, exp_act, bfp_act_chnl))
writer.close()
if __name__ == '__main__':
# Let's allow the user to pass the filename as an argument
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", default="resnet34", type=str, required=True,
help="Select the model for bfp quant now only support one of {resnet34,resnet50,resnet101,inceptionv4, mobilenetv2}")
parser.add_argument("--dataset_dir", default="/dataset/", type=str, required=True,
help="Dataset to evaluate the bfp_quantied model. Pls use absolute path point to the dataset")
parser.add_argument("--mantisa_bit", default=8, required=True, type=int, help="The bitwidth of mantissa in block floating point representation")
parser.add_argument("--exp_bit", default=8, required=True, type=int, help="The bitwidth of mantissa in block floating point representation")
parser.add_argument("--batch_size", default=1, type=int, help="Batch size on each gpu when evaluation of bfp_quantied model")
parser.add_argument("--num_bins", default=8001, type=int, help="Used to construct the histogram/distribution for intermidate results")
parser.add_argument("--eps", default=0.0001, type=float, help="Used to smooth the histogram/distribution")
parser.add_argument("--num_workers", default=2, type=int, help="Number of workers in data loader when evaluation of bfp_quantied model")
parser.add_argument("--num_examples", default=10, type=int, help="Number of examples to collect internal outputs for bfp quant")
parser.add_argument("--num_classes", default=1000, type=int, required=True, help="The number of classes when evaluation of bfp_quantied model")
parser.add_argument("--gpus", default="0,1", type=str, required=True, help="GPUs id, separated by comma withougt space, for example: 0,1,2")
parser.add_argument("--std", default="0.229,0.224,0.225", type=str, help="std values for image preprocessing")
parser.add_argument("--mean", default="0.485,0.456,0.406", type=str, help="mean values for image preprocessing")
parser.add_argument("--resize", default=256, type=int, help="The size of resized image, resize shoule be lager than crop")
parser.add_argument("--crop", default=224, type=int, help="The size of cropped image, crop should be less than resize")
parser.add_argument("--exp_act", default="kl", type=str, help="The way to determine the exponents in activation, suppor {max, kl}")
parser.add_argument("--bfp_act_chnl", default=1, type=int, help="Number of channels per block in activation, -1 means whole")
parser.add_argument("--bfp_weight_chnl", default=1, type=int, help="Number of channels per block in weight, -1 means whole")
parser.add_argument("--bfp_quant", default=1, type=int, help="1 indicate using bfp model, 0 indicate using floating-point model")
parser.add_argument("--hooks", default="Conv2d,Linear", type=str, help="The name of hooked nn modules, one of{BatchNorm2d,Linear,Conv2d}")
parser.add_argument("--act_bins_factor", default=3, type=int, help="The bins_factor for constructing act histogram")
parser.add_argument("--fc_bins_factor", default=3, type=int, help="The bins_factor for constructing act histogram")
parser.add_argument("--is_online", default=0, type=int, help="Use online BFP quantization for benchmark")
args = parser.parse_args()
# Split the argument
gpus = args.gpus.split(",")
gpus_num = len(gpus)
logging.info("totally {} gpus are using".format(gpus_num))
# String to float
std_str = args.std.split(",")
std = []
for std_v in std_str:
std.append((float)(std_v))
mean_str = args.mean.split(",")
mean = []
for mean_v in mean_str:
mean.append((float)(mean_v))
target_module_list = []
hooks_str = args.hooks.split(",")
for hook in hooks_str:
target_module_list.append(modules_map[hook])
bfp_quant(model_name = args.model_name, dataset_dir = args.dataset_dir, num_classes = args.num_classes, gpus = gpus,
mantisa_bit = args.mantisa_bit, exp_bit = args.exp_bit, num_bins = args.num_bins, eps = args.eps,
batch_size = args.batch_size, num_workers = args.num_workers, num_examples = args.num_examples, std=std, mean=mean,
resize=args.resize, crop=args.crop, exp_act=args.exp_act, bfp_act_chnl=args.bfp_act_chnl,
bfp_weight_chnl=args.bfp_weight_chnl, bfp_quant=args.bfp_quant, target_module_list=target_module_list,
act_bins_factor=args.act_bins_factor, fc_bins_factor=args.fc_bins_factor, is_online=args.is_online)
``` |
{
"source": "joshbenner/ansible-role-certbot",
"score": 2
} |
#### File: default/tests/test_default.py
```python
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('clients')
def test_certbot_installed(host):
file = host.file('/opt/certbot/certbot-auto')
assert file.exists
assert file.is_file
assert file.user == 'root'
assert file.mode == 0o755
def test_wrapper_installed(host):
assert host.file('/usr/local/sbin/certbot').exists
def test_cron_configured(host):
file = host.file('/etc/cron.d/ansible-certbot')
assert file.exists
assert file.is_file
assert 'certbot-auto' in file.content_string
def test_cert_issued(host):
file = host.file('/etc/certbot/live/mycert/cert.pem')
assert file.exists
def test_wildcard_issued(host):
file = host.file('/etc/certbot/live/wildcard/cert.pem')
assert file.exists
``` |
{
"source": "joshbenner/ansible-role-uchiwa",
"score": 2
} |
#### File: default/tests/test_default.py
```python
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_uchiwa_installed(host):
assert host.package('uchiwa').is_installed
def test_uchiwa_service(host):
uchiwa = host.service('uchiwa')
assert uchiwa.is_enabled
assert uchiwa.is_running
def test_uchiwa_listening(host):
assert host.socket('tcp://0.0.0.0:3000').is_listening
def test_uchiwa_config(host):
config = host.file('/etc/sensu/uchiwa.json')
assert config.exists
assert config.is_file
assert config.user == 'uchiwa'
assert config.group == 'uchiwa'
assert config.mode == 0o600
assert config.contains('"name": "Site 1"')
``` |
{
"source": "joshbenner/docker-ansible",
"score": 2
} |
#### File: default/tests/test_docker_ce.py
```python
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('docker-ce')
def test_docker_engine_installed(host):
assert host.package('docker-ce').is_installed
def test_docker_engine_version(host):
assert host.package('docker-ce').version.startswith('18.06')
``` |
{
"source": "joshbenner/imbi",
"score": 2
} |
#### File: imbi/endpoints/authentication_tokens.py
```python
import re
import uuid
import ulid
from tornado import web
from . import base
class RequestHandler(base.ValidatingRequestHandler):
ENDPOINT = 'authentication-tokens'
CREATE_SQL = re.sub(r'\s+', ' ', """
INSERT INTO v1.authentication_tokens (token, "name", username)
VALUES (%(token)s, %(name)s, %(username)s)
RETURNING username, token, "name",
created_at, expires_at, last_used_at
""")
DELETE_SQL = re.sub(r'\s+', ' ', """
DELETE FROM v1.authentication_tokens
WHERE username = %(username)s
AND token = %(token)s""")
GET_SQL = re.sub(r'\s+', ' ', """
SELECT token, "name", username, created_at, expires_at, last_used_at
FROM v1.authentication_tokens
WHERE username = %(username)s
ORDER BY created_at""")
async def delete(self, token):
result = await self.postgres_execute(self.DELETE_SQL, {
'token': token,
'username': self.current_user.username
}, 'delete-authentication-token')
if not result.row_count:
raise web.HTTPError(404, reason='Item not found')
self.set_status(204, reason='Item Deleted')
async def get(self):
result = await self.postgres_execute(
self.GET_SQL, {'username': self.current_user.username},
'get-authentication-tokens')
self.send_response(result.rows)
async def post(self):
values = self.get_request_body()
values.update({
'token': uuid.UUID(ulid.ULID().hex),
'username': self.current_user.username
})
result = await self.postgres_execute(
self.CREATE_SQL, values, 'create-authentication-token')
self.send_response(result.row)
```
#### File: imbi/endpoints/base.py
```python
import asyncio
import datetime
import logging
import typing
import uuid
from email import utils
import jsonpatch
import problemdetails
import sprockets_postgres as postgres
from openapi_core.deserializing.exceptions import DeserializeError
from openapi_core.schema.media_types.exceptions import InvalidContentType
from openapi_core.templating.paths.exceptions import \
OperationNotFound, PathNotFound
from openapi_core.unmarshalling.schemas.exceptions import ValidateError
from openapi_core.validation.exceptions import InvalidSecurity
from sprockets.http import mixins
from sprockets.mixins import mediatype
from tornado import httputil, web
from imbi import session, user, version
LOGGER = logging.getLogger(__name__)
def require_permission(permission):
"""Decorator function for requiring a permission string for an endpoint
:param str permission: The permission string to require
:raises: problemdetails.Problem
"""
def _require_permission(f):
def wrapped(self, *args, **kwargs):
"""Inner-wrapping of the decorator that performs the logic"""
if not self._current_user or \
not self._current_user.has_permission(permission):
if self._respond_with_html:
return self.render(
'index.html',
javascript_url=self.application.settings.get(
'javascript_url'))
LOGGER.info('%r does not have the "%s" permission',
self._current_user, permission)
raise problemdetails.Problem(
status_code=403, title='Unauthorized')
return f(self, *args, **kwargs)
return wrapped
return _require_permission
class RequestHandler(postgres.RequestHandlerMixin,
mixins.ErrorLogger,
problemdetails.ErrorWriter,
mediatype.ContentMixin,
web.RequestHandler):
"""Base RequestHandler class used for recipients and subscribers."""
APPLICATION_JSON = 'application/json'
TEXT_HTML = 'text/html'
NAME = 'Base'
ITEM_NAME = ''
def __init__(self,
application,
request: httputil.HTTPServerRequest,
**kwargs):
super().__init__(application, request, **kwargs)
self.logger = logging.getLogger(f'imbi.endpoints.{self.NAME}')
self.session: typing.Optional[session.Session] = None
self._current_user: typing.Optional[user.User] = None
self._links = {}
async def prepare(self) -> None:
"""Prepare the request handler for the request. If the application
is not ready return a ``503`` error.
Checks for a session cookie and if present, loads the session into
the current user and authenticates it. If authentication fails,
the current user and cookie is cleared.
"""
if self.application.settings.get('javascript_url'):
self.set_header('Access-Control-Allow-Origin', '*')
if not self.application.ready_to_serve:
return self.send_error(503, reason='Application not ready')
self.session = session.Session(self)
await self.session.initialize()
self._current_user = await self.get_current_user()
future = super().prepare()
if asyncio.isfuture(future):
await future
def on_finish(self) -> None:
"""Invoked after a request has completed"""
super().on_finish()
metric_id = '{}.{}'.format(self.NAME, self.request.method)
self.application.loop.add_callback(
self.application.stats.incr,
'response.{}.{}'.format(metric_id, self.get_status()))
self.application.loop.add_callback(
self.application.stats.add_duration,
'request.{}.{}'.format(metric_id, self.get_status()),
self.request.request_time())
def compute_etag(self) -> None:
"""Override Tornado's built-in ETag generation"""
return None
async def get_current_user(self) -> typing.Optional[user.User]:
"""Used by the system to manage authentication behaviors"""
if self.session and self.session.user:
return self.session.user
token = self.request.headers.get('Private-Token', None)
if token:
current_user = user.User(self.application, token=token)
if await current_user.authenticate():
return current_user
def get_template_namespace(self) -> dict:
"""Returns a dictionary to be used as the default template namespace.
The results of this method will be combined with additional defaults
in the :mod:`tornado.template` module and keyword arguments to
:meth:`~tornado.web.RequestHandler.render`
or :meth:`~tornado.web.RequestHandler.render_string`.
"""
namespace = super(RequestHandler, self).get_template_namespace()
namespace.update({'version': version})
return namespace
def send_response(self, value: typing.Union[dict, list]) -> None:
"""Send the response to the client"""
if 'self' not in self._links:
self._add_self_link(self.request.path)
self._add_link_header()
if hasattr(self, 'TTL') and \
not self.request.headers.get('Pragma') == 'no-cache':
self._add_response_caching_headers(self.TTL)
super().send_response(value)
def set_default_headers(self) -> None:
"""Override the default headers, setting the Server response header"""
super().set_default_headers()
self.set_header('Server', self.settings['server_header'])
def write_error(self, status_code, **kwargs):
if self._respond_with_html:
return self.render(
'error.html',
javascript_url=self.application.settings.get('javascript_url'),
status_code=status_code, **kwargs)
super().write_error(status_code, **kwargs)
def _add_last_modified_header(self, value: datetime.datetime) -> None:
"""Add a RFC-822 formatted timestamp for the Last-Modified HTTP
response header.
"""
if not value:
return
self.set_header('Last-Modified', self._rfc822_date(value))
def _add_link_header(self) -> None:
"""Takes the accumulated links and creates a link header value"""
links = []
for rel, path in self._links.items():
links.append('<{}://{}{}>; rel="{}"'.format(
self.request.protocol, self.request.host, path, rel))
if links:
self.add_header('Link', ','.join(links))
def _add_self_link(self, path: str) -> None:
"""Adds the self Link response header"""
self._links['self'] = path
def _add_response_caching_headers(self, ttl: int) -> None:
"""Adds the cache response headers for the object being returned."""
self.add_header('Cache-Control', 'public, max-age={}'.format(ttl))
def _on_postgres_timing(self,
metric_name: str,
duration: float) -> None:
"""Invoked by sprockets-postgres after each query"""
self.application.loop.add_callback(
self.application.stats.add_duration,
'postgres.{}'.format(metric_name), duration)
@property
def _respond_with_html(self) -> bool:
"""Returns True if the current response should respond with HTML"""
return self.get_response_content_type().startswith(self.TEXT_HTML)
@staticmethod
def _rfc822_date(value: datetime.datetime) -> str:
"""Return an RFC-822 formatted timestamp for the given value"""
return utils.format_datetime(value)
class AuthenticatedRequestHandler(RequestHandler):
"""RequestHandler base class for authenticated requests"""
async def prepare(self) -> None:
await super().prepare()
if not self._current_user:
if self._respond_with_html:
return await self.render(
'index.html',
javascript_url=self.application.settings.get(
'javascript_url'))
self.set_status(401)
await self.finish()
class ValidatingRequestHandler(AuthenticatedRequestHandler):
"""Validates the request against the OpenAPI spec"""
async def prepare(self) -> None:
await super().prepare()
try:
self.application.validate_request(self.request)
except DeserializeError as err:
self.logger.warning('Request failed to deserialize: %s', err)
raise problemdetails.Problem(
status_code=400, title='Bad Request', detail=str(err))
except InvalidSecurity as err:
self.logger.debug('Invalid OpenAPI spec security: %s', err)
raise problemdetails.Problem(
status_code=500, title='OpenAPI security error',
detail=str(err))
except OperationNotFound as err:
raise problemdetails.Problem(
status_code=405, title='Method Not Allowed', detail=str(err))
except InvalidContentType as err:
raise problemdetails.Problem(
status_code=415, title='Unsupported Media Type',
detail=str(err))
except PathNotFound as err:
self.logger.error('OpenAPI Spec Error: %s', err)
raise problemdetails.Problem(
status_code=500, title='OpenAPI Spec Error', detail=str(err))
except ValidateError as err:
self.logger.warning('Request failed to validate: %s', err)
raise problemdetails.Problem(
status_code=400, title='Bad Request',
detail='The request did not validate',
errors=[str(e).split('\n')[0] for e in err.schema_errors])
class CRUDRequestHandler(ValidatingRequestHandler):
"""CRUD request handler to reduce large amounts of duplicated code"""
NAME = 'default'
DEFAULTS = {}
ID_KEY: typing.Union[str, list] = 'id'
IS_COLLECTION = False
FIELDS = None
GET_NAME = None # Used to create link headers for POST requests
TTL = 300
DELETE_SQL: typing.Optional[str] = None
GET_SQL: typing.Optional[str] = None
PATCH_SQL: typing.Optional[str] = None
POST_SQL: typing.Optional[str] = None
async def delete(self, *args, **kwargs):
if self.DELETE_SQL is None:
self.logger.debug('DELETE_SQL not defined')
raise problemdetails.Problem(
status_code=405, title='Not Implemented')
await self._delete(kwargs)
async def get(self, *args, **kwargs):
if self.GET_SQL is None:
self.logger.debug('GET_SQL not defined')
raise problemdetails.Problem(
status_code=405, title='Not Implemented')
if self._respond_with_html:
return self.render(
'index.html',
javascript_url=self.application.settings.get('javascript_url'))
await self._get(kwargs)
async def patch(self, *args, **kwargs):
if self.PATCH_SQL is None:
self.logger.debug('PATCH_SQL not defined')
raise problemdetails.Problem(
status_code=405, title='Not Implemented')
await self._patch(kwargs)
async def post(self, *args, **kwargs):
if self.POST_SQL is None:
self.logger.debug('POST_SQL not defined')
raise problemdetails.Problem(
status_code=405, title='Not Implemented')
await self._post(kwargs)
def send_response(self, value: typing.Union[dict, list]) -> None:
"""Send the response to the client"""
if isinstance(value, list):
return super().send_response(value)
if not (self.request.method == 'GET' and self.IS_COLLECTION):
self._add_last_modified_header(
value.get('last_modified_at', value.get('created_at')))
for key in {'created_at', 'last_modified_at'}:
if key in value:
del value[key]
if self.ID_KEY:
if isinstance(self.ID_KEY, list):
args = [str(value[k]) for k in self.ID_KEY]
else:
args = [str(value[self.ID_KEY])]
try:
self._add_self_link(
self.reverse_url(self.ITEM_NAME or self.NAME, *args))
except (AssertionError, KeyError):
self.logger.debug('Failed to reverse URL for %s %r',
self.NAME, args)
self._add_link_header()
super().send_response(value)
async def _delete(self, kwargs):
result = await self.postgres_execute(
self.DELETE_SQL, self._get_query_kwargs(kwargs),
'delete-{}'.format(self.NAME))
if not result.row_count:
raise problemdetails.Problem(
status_code=404, title='Item not found')
self.set_status(204, reason='Item Deleted')
async def _get(self, kwargs):
result = await self.postgres_execute(
self.GET_SQL, self._get_query_kwargs(kwargs),
'get-{}'.format(self.NAME))
if not result.row_count or not result.row:
raise problemdetails.Problem(
status_code=404, title='Item not found')
for key, value in result.row.items():
if isinstance(value, uuid.UUID):
result.row[key] = str(value)
self.send_response(result.row)
def _get_query_kwargs(self, kwargs) -> dict:
if isinstance(self.ID_KEY, list):
return {k: kwargs[k] for k in self.ID_KEY}
return {self.ID_KEY: kwargs[self.ID_KEY]}
async def _patch(self, kwargs):
patch_value = self.get_request_body()
result = await self.postgres_execute(
self.GET_SQL, self._get_query_kwargs(kwargs),
'get-{}'.format(self.NAME))
if not result.row_count:
raise problemdetails.Problem(
status_code=404, title='Item not found')
original = dict(result.row)
for key in {'created_at', 'created_by',
'last_modified_at', 'last_modified_by'}:
del original[key]
for key, value in original.items():
if isinstance(value, uuid.UUID):
original[key] = str(value)
# Apply the patch to the current value
patch = jsonpatch.JsonPatch(patch_value)
updated = patch.apply(original)
# Bail early if there are no changes
if not {k: original[k] for k in original
if k in updated and original[k] != updated[k]}:
self._add_self_link(self.request.path)
self._add_link_header()
return self.set_status(304)
if isinstance(self.ID_KEY, list):
for key in self.ID_KEY:
updated['current_{}'.format(key)] = kwargs[key]
else:
updated['current_{}'.format(self.ID_KEY)] = kwargs[self.ID_KEY]
updated['username'] = self._current_user.username
result = await self.postgres_execute(
self.PATCH_SQL, updated,
'patch-{}'.format(self.NAME))
if not result.row_count:
raise problemdetails.Problem(
status_code=500, title='Failed to update record')
# Send the new record as a response
await self._get(self._get_query_kwargs(updated))
async def _post(self, kwargs) -> None:
values = self.get_request_body()
# Handle compound keys for child object CRUD
if isinstance(self.ID_KEY, list):
for key in self.ID_KEY:
if key not in values and key in kwargs:
values[key] = kwargs[key]
elif self.ID_KEY not in values and self.ID_KEY in kwargs:
values[self.ID_KEY] = kwargs[self.ID_KEY]
# Set defaults of None for all fields in insert
for name in self.FIELDS:
if name not in values:
values[name] = self.DEFAULTS.get(name)
values['username'] = self._current_user.username
result = await self.postgres_execute(
self.POST_SQL, values, 'post-{}'.format(self.NAME))
if not result.row_count:
self.logger.debug('No rows returned')
raise problemdetails.Problem(
status_code=500, title='Failed to create record')
# Return the record as if it were a GET
await self._get(self._get_query_kwargs(result.row))
class CollectionRequestHandler(CRUDRequestHandler):
DEFAULTS = {}
ID_KEY: typing.Union[str, list] = 'id'
IS_COLLECTION: True
FIELDS = None
GET_NAME = None # Used to create link headers for POST requests
COLLECTION_SQL = """SELECT * FROM pg_tables WHERE schemaname = 'v1';"""
TTL = 300
async def get(self, *args, **kwargs):
result = await self.postgres_execute(
self.COLLECTION_SQL, kwargs,
metric_name='get-{}'.format(self.NAME))
self.send_response(result.rows)
class AdminCRUDRequestHandler(CRUDRequestHandler):
@require_permission('admin')
async def delete(self, *args, **kwargs):
await super().delete(*args, **kwargs)
@require_permission('admin')
async def get(self, *args, **kwargs):
await super().get(*args, **kwargs)
@require_permission('admin')
async def patch(self, *args, **kwargs):
await super().patch(*args, **kwargs)
@require_permission('admin')
async def post(self, *args, **kwargs):
await super().post(*args, **kwargs)
```
#### File: imbi/endpoints/metrics.py
```python
import datetime
import isodate
from sprockets.mixins import mediatype
from tornado import web
class RequestHandler(mediatype.ContentMixin,
web.RequestHandler):
"""Returns internal metrics"""
ENDPOINT = 'metrics'
async def get(self):
"""Tornado RequestHandler GET request endpoint for reporting status"""
prune = self.get_argument('flush', 'false') == 'true'
self.send_response({
'counters': await self.application.stats.counters(prune),
'durations': await self.application.stats.durations(prune),
'postgres': await self.application.postgres_status(),
'uptime': isodate.duration_isoformat(
datetime.datetime.now(datetime.timezone.utc) -
self.application.started_at)})
def set_default_headers(self) -> None:
"""Override the default headers, setting the Server response header"""
super().set_default_headers()
self.set_header('Server', self.settings['server_header'])
```
#### File: imbi/endpoints/permissions.py
```python
from . import base
class RequestHandler(base.AuthenticatedRequestHandler):
NAME = 'permissions'
@base.require_permission('admin')
async def get(self):
self.send_response(list(self.settings['permissions']))
```
#### File: imbi/endpoints/ui.py
```python
from imbi.endpoints import base
class IndexRequestHandler(base.RequestHandler):
ENDPOINT = 'ui-index'
def get(self, *args, **kwargs):
if self.request.path == '/':
return self.redirect('/ui/')
self.render(
'index.html',
javascript_url=self.application.settings.get('javascript_url'))
class LoginRequestHandler(base.RequestHandler):
ENDPOINT = 'ui-login'
async def post(self, *args, **kwargs):
body = self.get_request_body()
if not await self.session.authenticate(
body.get('username'), body.get('password')):
self.logger.debug('Session failed to authenticate')
self.set_status(401)
self.send_response({'message': 'Authentication Failure'})
return
await self.session.save()
self.set_status(200)
self.send_response(self.session.user.as_dict())
class LogoutRequestHandler(base.RequestHandler):
ENDPOINT = 'ui-logout'
async def get(self, *args, **kwargs):
await self.session.clear()
self.send_response({'loggedOut': True})
class GroupsRequestHandler(base.CRUDRequestHandler):
ENDPOINT = 'ui-groups'
GET_SQL = 'SELECT name FROM v1.groups ORDER BY name ASC;'
TTL = 300
class UserRequestHandler(base.AuthenticatedRequestHandler):
ENDPOINT = 'ui-user'
def get(self, *args, **kwargs):
user = self.current_user.as_dict()
del user['password']
self.send_response(user)
```
#### File: tests/endpoints/test_status.py
```python
from tests import base
class AsyncHTTPTestCase(base.TestCase):
def test_status_ok(self):
response = self.fetch('/status', headers=self.headers)
self.assertEqual(response.code, 200)
self.validate_response(response)
def test_status_error(self):
self._app._ready_to_serve = False
response = self.fetch('/status', headers=self.headers)
self.assertEqual(response.code, 503)
self.validate_response(response)
``` |
{
"source": "joshbenner/pytagonist",
"score": 2
} |
#### File: joshbenner/pytagonist/setup.py
```python
import os
from setuptools import setup
from setuptools.command.sdist import sdist
from setuptools.command.develop import develop
from distutils.command.build import build
from distutils import log
from subprocess import call
SETUP_PATH = os.path.dirname(os.path.abspath(__file__))
DRAFTER_PATH = os.path.join(SETUP_PATH, 'drafter')
def build_drafter():
log.info('Building drafter')
code = call(['make', 'drafter'], cwd=DRAFTER_PATH)
if code != 0:
raise RuntimeError('Cannot build drafter library')
class Build(build):
"""
Custom builder that runs drafter Makefile instead of build_clib.
This is to avoid having to reproduce all the build configurations of drafter
(and its dependencies).
"""
def run(self):
build_drafter()
build.run(self)
class Develop(develop):
"""
Custom develop-mode builder that runs Makefile.
"""
def run(self):
build_drafter()
develop.run(self)
class SourceDistribution(sdist):
"""
Custom source distribution prep that makes sure we don't mistakenly include
build output in the source distribution.
While MANIFEST.in can do this, keeping the MANIFEST.in up-to-date is much
more difficult than just asking the dependencies to clean themselves first.
"""
def run(self):
log.info('Cleaning drafter')
code = call(['make', 'distclean'], cwd=DRAFTER_PATH)
if code != 0:
raise RuntimeError('Cannot clean drafter library')
sdist.run(self)
setup(
name='pytagonist',
setup_requires=['setuptools_scm', 'cffi>=1.0.0,<2.0.0'],
use_scm_version=True,
cmdclass={
'build': Build,
'develop': Develop,
'sdist': SourceDistribution
},
py_modules=['pytagonist'],
url='https://github.com/joshbenner/pytagonist',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='Wrapper for Drafter library, a C parser for APIBlueprint.',
install_requires=[
'cffi>=1.0.0,<2.0.0'
],
cffi_modules=[
'./build_drafter.py:ffibuilder'
],
extras_require={
'test': ['pytest']
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Documentation',
'Topic :: Text Processing :: Markup'
]
)
``` |
{
"source": "joshbenner/python-refract",
"score": 2
} |
#### File: python-refract/tests/test_member.py
```python
import pytest
from refract import MemberElement, Namespace
def test_member_native_value():
me = MemberElement(('foo', 'bar'), namespace=Namespace())
assert me.native_value == {'key': 'foo', 'value': 'bar'}
def test_member_bad_set_content():
with pytest.raises(ValueError):
MemberElement(('foo',))
def test_member_direct_from_refract():
with pytest.raises(NotImplementedError):
MemberElement.from_refract({}, Namespace())
```
#### File: python-refract/tests/test_namespace.py
```python
import pytest
from refract.namespace import Namespace, ElementClassNotFound
from refract.elements import *
def test_namespace_default():
n = Namespace()
assert len(n.element_classes) > 0
assert isinstance(n.element(1), NumberElement)
def test_namespace_register():
n = Namespace()
class FooElement(Element):
element = 'foo'
n.register_element_class(FooElement)
assert n.element_classes['foo'] == FooElement
def test_namespace_register_rename():
n = Namespace()
class FooElement(Element):
element = 'foo'
n.register_element_class(FooElement, name='bar')
assert n.element_classes['bar'] == FooElement
assert 'foo' not in n.element_classes
def test_namespace_detect():
n = Namespace()
class Foo(object):
pass
class FooElement(Element):
element = 'foo'
n.register_element_class(FooElement)
n.add_detection(lambda v: isinstance(v, Foo), FooElement)
assert n.detected_element_class(Foo()) == FooElement
def test_namespace_detect_prepend():
n = Namespace()
class FooElement(Element):
element = 'foo'
n.add_detection(lambda v: v is None, FooElement, prepend=True)
assert n.detected_element_class(None) == FooElement
def test_namespace_unregister():
n = Namespace()
n.unregister_element_class('null')
assert 'null' not in n.element_classes
def test_namespace_element_not_detected():
n = Namespace(no_defaults=True)
with pytest.raises(ElementClassNotFound):
n.detected_element_class('foo')
```
#### File: python-refract/tests/test_string.py
```python
import pytest
from refract import StringElement
def test_string_element_name():
assert StringElement().element == 'string'
def test_string_default_value():
assert StringElement().native_value == ''
def test_string_require_type():
with pytest.raises(ValueError):
StringElement(5)
def test_string_length():
assert StringElement('foo').length == 3
``` |
{
"source": "joshbenner/sensu-ansible-role",
"score": 2
} |
#### File: default/tests/test_default.py
```python
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_packages(host):
package = host.package('sensu')
assert package.is_installed
assert '1.7.0' in package.version
def test_dir_ownership(host):
assert host.file('/opt/sensu').group == 'sensu'
def test_main_config(host):
f = host.file('/etc/sensu/config.json')
assert f.exists
assert f.is_file
assert f.user == 'sensu'
assert f.group == 'sensu'
assert f.mode == 0o600
assert f.contains('rabbitmq')
assert f.contains('check-cpu.rb')
assert f.contains('"foo": "bar"')
assert f.contains('example_subscription')
assert f.contains('"zip": "zap"')
assert not f.contains('subscription_to_be_overridden')
def test_server_running(host):
server = host.service('sensu-server')
assert server.is_running
assert server.is_enabled
def test_api_running(host):
api = host.service('sensu-api')
assert api.is_running
assert api.is_enabled
def test_client_running(host):
client = host.service('sensu-client')
assert client.is_running
assert client.is_enabled
def test_api_listening(host):
assert host.socket('tcp://0.0.0.0:4567').is_listening
def test_plugin_installed(host):
assert host.file('/opt/sensu/embedded/bin/check-memory.rb').exists
# Tests extension install/enable
def test_snmp_listening(host):
assert host.socket('udp://0.0.0.0:1062').is_listening
``` |
{
"source": "joshberry911/terraplanfeed",
"score": 3
} |
#### File: terraplanfeed/terraplanfeed/stdout.py
```python
import logging
import sys
logger = logging.getLogger(__name__)
ACTION_SYMBOLS = {
"no-op": "👍",
"create": "✨",
"read": "📖",
"update": "📝",
"replace": "🚧",
"delete": "🛑",
}
ACTION_TEXT = {
"no-op": ".",
"create": "+",
"read": "r",
"update": "U",
"replace": "R",
"delete": "X",
}
HEADER_CHANGES = """
**Terraform Plan changes summary:**
===================================
"""
HEADER_DRIFT = """
**Terraform Plan drift summary:**
===================================
"""
FOOTER = """
"""
def getAction(actions, textonly):
"""
Get action
Args:
actions(list): list of actions
textonly(bool): disable emoji
Returns:
action symbol
"""
logger.debug("get action")
lookup = ACTION_TEXT if textonly else ACTION_SYMBOLS
if "create" in actions and len(actions) > 1:
return lookup["replace"]
else:
return lookup[actions[0]]
def parseChanges(changes, textonly, drift):
"""
Parse changes.
Args:
changes(list): list of resources dict
textonly(bool): disable emoji
Returns:
Multiline string with summary of changes
"""
content = ""
logger.debug("parsing changes...")
for c in changes:
action = getAction(c["actions"], textonly)
message = "({action}): {name} ({address})".format(
action=action, name=c["name"], address=c["address"]
)
content += message + "\n"
return content
def write(content, drift):
"""
Writes summary of changes to stdout.
Args:
content(str): multiline string
drift(bool): flag denoting drift mode
"""
logger.debug("write to stdout")
if drift:
print(HEADER_DRIFT)
else:
print(HEADER_CHANGES)
print(content)
print(FOOTER)
def detexit(content):
"""
Exit with detailed exit codes
Args:
content(str): multiline string
"""
logger.debug("exit")
if content != "No changes":
sys.exit(2)
else:
sys.exit(0)
def generate_stdout(changes, textonly=False, drift=False, detailed_exitcode=False):
"""
Entrypoint for stdout output driver.
Args:
changes(list): list of resources dict
textonly(bool): disable emoji
drift(bool): enable drift mode
detailed_exitcode(bool): enable detailed exit codes
"""
logger.debug("stdout entrypoint")
if not changes:
content = "No changes"
else:
content = parseChanges(changes, textonly, drift)
write(content, drift)
if detailed_exitcode:
detexit(content)
``` |
{
"source": "joshbickett/makememe_ai",
"score": 3
} |
#### File: generator/nlp/gpt.py
```python
import requests, json
with open("/etc/make_meme/config.json") as config_file:
config = json.load(config_file)
class GPT:
@staticmethod
def completion_request(prompt, user_id):
d_url = "https://api.openai.com/v1/engines/davinci/completions"
payload = {
"prompt": prompt,
"stop": "###",
"temperature": 0.7,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"best_of": 1,
"max_tokens": 50,
"user": f"user_id",
}
headers = {
"Content-Type": "application/json",
"Authorization": f'Bearer {config["OPEN_AI_KEY"]}',
}
response = requests.post(d_url, data=json.dumps(payload), headers=headers)
response = response.json()
return response
@staticmethod
def search_request(documents, query, user_id):
d_url = "https://api.openai.com/v1/engines/babbage/search"
payload = {
"documents": documents,
"query": query,
"user": f"user_id",
}
headers = {
"Content-Type": "application/json",
"Authorization": f'Bearer {config["OPEN_AI_KEY"]}',
}
response = requests.post(d_url, data=json.dumps(payload), headers=headers)
response = response.json()
return response
def content_filter(prompt, user_id):
wrapped_prompt = "<|endoftext|>" + prompt + "\n--\nLabel:"
# print(f'wrapped_prompt: {wrapped_prompt}')
d_url = "https://api.openai.com/v1/engines/content-filter-alpha-c4/completions"
payload = {
"prompt": wrapped_prompt,
"temperature": 0,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"best_of": 1,
"max_tokens": 1,
"logprobs": 10,
"user": f"user_id",
}
headers = {
"Content-Type": "application/json",
"Authorization": f'Bearer {config["OPEN_AI_KEY"]}',
}
response = requests.post(d_url, data=json.dumps(payload), headers=headers)
response = response.json()
return response
```
#### File: prompts/types/accurate_depiction.py
```python
from makememe.generator.prompts.prompt import Prompt
import datetime
from PIL import Image
from makememe.generator.design.image_manager import Image_Manager
class Accurate_Depiction(Prompt):
name = "Accurate_Depiction"
description = "accurate depiction"
def __init__(self):
self.instruction = """
###
Message:They told me I am too interested in crypto currencies and they couldn't be more right
Meme:{"depiction":"You are too interested in crypto currencies"}
###
Message:I had a fortune cookie tell me I code too much and It is so correct.
Meme:{"depiction":"You code too much"}
###
Message:You want to hear an accurate depiction. I am not running enough.
Meme:{"depiction":"You are not running enough"}
###
Message:They don't go outside enough. They need to get some sunlight. It's the truth
Meme:{"depiction":"They need to go outside more"}
###
Message:Humans making memes ok, AI making memes awesome.
Meme:{"depiction":"You want AI making memes"}
###
Message:Make a meme with strong and weak doggo comparing two types of pots
Meme:{"depiction":"strong and weak doggo comparing two types of pots"}
###
Message:Too much coffee
Meme:{"depiction":"You drink too much coffee"}
###
"""
def create(self, meme_text):
with Image.open(f"makememe/static/meme_pics/{self.name.lower()}.jpg").convert(
"RGBA"
) as base:
overlay_image = Image_Manager.add_text(
base=base,
text=meme_text["depiction"],
position=(275, 760),
font_size=30,
wrapped_width=25,
rotate_degrees=350,
)
watermark = Image_Manager.add_text(
base=base, text="makememe.ai", position=(10, 1150), font_size=20
)
base = Image.alpha_composite(base, watermark)
out = Image.alpha_composite(base, overlay_image)
if out.mode in ("RGBA", "P"):
out = out.convert("RGB")
date = datetime.datetime.now()
image_name = f"{date}.jpg"
file_location = f"makememe/static/creations/{image_name}"
out.save(file_location)
return image_name
```
#### File: prompts/types/waiting.py
```python
from makememe.generator.prompts.prompt import Prompt
import datetime
from PIL import Image
from makememe.generator.design.image_manager import Image_Manager
class Waiting(Prompt):
name = "Waiting"
description = "waiting"
def __init__(self):
self.instruction = """
###
Message:I've been waiting for SpaceX to launch the starship for ever
Meme:{"subject": "SpaceX Startship"}
###
Message:I can't wait for makememe.ai to launch, but it's taking a little while
Meme:{"subject": "makememe.ai"}
###
Message:Drakes new album is going to be fire. Why do I have to wait
Meme:{"subject": "Drakes new album"}
###
Message:I want to create an NFT, but opensea.com is taking a while to load
Meme:{"subject": "opensea.com"}
###
"""
def create(self, meme_text):
with Image.open(f"makememe/static/meme_pics/{self.name.lower()}.jpg").convert(
"RGBA"
) as base:
overlay_image = Image_Manager.add_text(
base=base,
text=meme_text["subject"],
position=(600, 950),
font_size=40,
wrapped_width=20,
)
watermark = Image_Manager.add_text(
base=base, text="makememe.ai", position=(30, 1100), font_size=20
)
base = Image.alpha_composite(base, watermark)
out = Image.alpha_composite(base, overlay_image)
if out.mode in ("RGBA", "P"):
out = out.convert("RGB")
# User.objects.filter()
date = datetime.datetime.now()
image_name = f"{date}.jpg"
file_location = f"makememe/static/creations/{image_name}"
out.save(file_location)
return image_name
```
#### File: makememe_ai/makememe/models.py
```python
from datetime import datetime
from makememe import db, login_manager
from flask_login import UserMixin
@login_manager.user_loader
def load_user(user_id):
return Users.query.get(int(user_id))
class Users(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
image_file = db.Column(db.String(20), nullable=False, default="default.jpg")
password = db.Column(db.String(60), nullable=False)
is_beta = db.Column(db.Boolean, default=False)
date_created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
posts = db.relationship("Post", backref="author", lazy=True)
memes = db.relationship("Meme", backref="creator", lazy=True)
feedback = db.relationship("Feedback", backref="author", lazy=True)
def __repr__(self):
return f"User('{self.username}', '{self.email}', '{self.image_file}')"
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), nullable=False)
date_posted = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
content = db.Column(db.Text, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=False)
def __repr__(self):
return f"Post('{self.title}', '{self.date_posted}')"
class Meme(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(200), nullable=False)
date_created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
text_input = db.Column(db.Text, nullable=False)
nlp_output = db.Column(db.Text, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=False)
def __repr__(self):
return f"Meme('{self.title}', '{self.date_created}')"
class Feedback(db.Model):
id = db.Column(db.Integer, primary_key=True)
description = db.Column(db.String(1000), nullable=False)
date_created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=False)
def __repr__(self):
return f"Feedback('{self.description}', '{self.date_created}')"
``` |
{
"source": "joshbj360/Softwork",
"score": 2
} |
#### File: apps/business/models.py
```python
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models.fields import NullBooleanField
from django.utils.text import format_lazy
from django.utils.translation import gettext_lazy as _
from softwork.apps.authme.models import User
class ServiceTag(models.Model):
tag = models.SlugField()
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
def __str__(self):
return self.tag
class Service(models.Model):
# One cannot delete the business profile except the Business owner is deleted
service_name = models.CharField(max_length=255)
price = models.PositiveIntegerField(_("Price starting at:"), default=0)
price_negotiable = models.BooleanField(_("Price is Negotiable?"), default=False)
years_of_experience = models.IntegerField(_("Select you years of experience on the job"))
tags = GenericRelation(ServiceTag)
business_services = GenericRelation(
'BusinessService',
'service_object_id',
'service_content_type_id',
related_query_name='services',
)
def __str__(self):
return self.service_name
class BusinessAddress(models.Model):
country = models.CharField(max_length=255, blank=True, null=True)
state = models.CharField(max_length=255, blank=True, null=True)
LGA = models.CharField(max_length=255, blank=True, null=True)
postal_code = models.IntegerField(blank=True, null=True)
street_name = models.CharField(max_length=255, blank=True, null=True)
building_number = models.IntegerField(blank=True, null=True)
address = models.TextField(_("Full Address"))
business_gps_coordinates = models.FloatField(_("Find the exact business location"))
business_is_verified = models.BooleanField(default=False)
def get_full_address(self):
return "No. {0} {1} {2} {3} State".format(self.building_number, self.street, self.LGA, self.state)
class BusinessProfile(BusinessAddress):
owner = models.OneToOneField(
get_user_model(),
primary_key=True,
on_delete=models.CASCADE,
)
business_name = models.CharField(max_length=255, )
business_motto = models.CharField(max_length=255)
business_description = models.TextField(blank=True, null=True)
business_logo = models.ImageField(blank=True, null=True)
business_header_image = models.ImageField(blank=True, null=True)
business_industry_tags = models.CharField(max_length=255, blank=True, null=True)
class Meta:
ordering = ['business_name']
def __str__(self):
return self.business_name
def add_business_service(self, service) -> 'BusinessService':
service_content_type = ContentType.objects.get_for_model(service)
return BusinessService.objects.create(
business=self,
service_content_type=service_content_type,
service_object_id=service.pk,
)
class BusinessService(models.Model):
business = models.ForeignKey(
BusinessProfile,
verbose_name=_("Name of Business"),
on_delete=models.CASCADE,
related_name='services',
)
service_object_id = models.IntegerField()
service_content_type = models.ForeignKey(
ContentType,
on_delete=models.PROTECT,
)
service = GenericForeignKey(
'service_content_type',
'service_object_id',
)
def __str__(self):
return self.service.service_name
``` |
{
"source": "JoshBlake/libfreenect",
"score": 2
} |
#### File: c/python/demo_cv_depth_show.py
```python
from freenect import *
import cv
import numpy as np
cv.NamedWindow('Depth')
def display(dev, data, timestamp):
data -= np.min(data.ravel())
data *= 65536 / np.max(data.ravel())
image = cv.CreateImageHeader((data.shape[1], data.shape[0]),
cv.IPL_DEPTH_16U,
1)
cv.SetData(image, data.tostring(),
data.dtype.itemsize * data.shape[1])
cv.ShowImage('Depth', image)
cv.WaitKey(5)
runloop(depth=depth_cb_np_factory(display))
``` |
{
"source": "joshblum/chronology",
"score": 2
} |
#### File: analytics_tools/aggregates/analysis.py
```python
from pykronos.common.event_tools import get_property
from pykronos.client import TIMESTAMP_FIELD
from pykronos.common.time import kronos_time_to_epoch_time
def aggregate_stream(events, aggregator_class, field, bucket_width):
current_bucket, aggregator = None, None
emitted = False
for event in events:
timestamp = kronos_time_to_epoch_time(event[TIMESTAMP_FIELD])
bucket = timestamp - (timestamp % bucket_width)
if bucket != current_bucket:
if current_bucket is not None:
yield current_bucket, aggregator.aggregate()
emitted = True
current_bucket = bucket
aggregator = aggregator_class()
emitted = False
aggregator.update(get_property(event, field))
if not emitted and current_bucket and aggregator:
yield current_bucket, aggregator.aggregate()
```
#### File: analytics_tools/funnels/analysis.py
```python
import logging
from datetime import timedelta
from pykronos.common.event_tools import get_property
from pykronos.client import TIMESTAMP_FIELD
from pykronos.common.time import datetime_to_kronos_time
from pykronos.common.time import timedelta_to_kronos_time
from pykronos.common.time import EPOCH
log = logging.getLogger(__name__)
EARLIEST_TIME = datetime_to_kronos_time(EPOCH)
class FilterCache(object):
def __init__(self, a_filter):
self._filter = a_filter
self._filter_results = {}
def _caching_filter(self, key):
if self._filter:
result = self._filter_results.get(key, None)
if result is None:
result = self._filter(key)
self._filter_results[key] = result
return result
else:
return True
def get(self, key):
return EARLIEST_TIME if self._caching_filter(key) else None
class IdentityDict(object):
def __getitem__(self, key):
return key
def get(self, key):
return key
class FunnelStep(object):
""" Data structure for funnel step properties. """
def __init__(self, stream_name, event_filter=None, user_field='userId',
output_fields=None, invert=False):
"""
:param stream_name: Kronos stream name
:param event_filter: a function that returns True/False depending on whether
an event on the stream should be considered (for segmentation, for
instance). If the event filter is None, all events will be accepted.
:param user_field: name of the field in an event that returns the user
identifier. Defaults to 'userId'.
:param output_fields: fields in event to output.
:param invert: boolean. If true, include all users from the previous step
that do NOT appear in the current event stream or are not filtered by
`event_filter`. Example: If the stream_name is 'clicked_tab' and the
event_filter is all Essential accounts, if invert is True, users counted
in this step of the funnel are those from the previous step that are not
in 'clicked_tab' stream or are not Essential accounts. The first funnel
step can not have invert=True. If invert=True, an exception is raised.
"""
self.stream_name = stream_name
self.event_filter = event_filter
self.user_field = user_field
self.output_fields = output_fields
self.invert = invert
class FunnelOutput(object):
""" Data structure for storing the output of the funnel. """
def __init__(self):
self.step_output = []
def add(self, step):
self.step_output.append(step)
def stream_sizes(self):
""" Returns size of each funnel step in a list. """
return [len(s['user_action']) for s in self.step_output]
def user_ids(self):
""" Returns list of user ids at each funnel in a list. """
return [s['user_action'].keys() for s in self.step_output]
def stream_data(self):
""" Returns output data of each funnel step in a list.
Each list element is a dictionary with user id as the key. The value for
each key is a dictionary with the requested output properties for that
funnel step as the keys, the values of the properties as the values.
"""
return [s['stream_data'] for s in self.step_output]
def _stream_earliest_action(client, stream, start, end, fuzzy_time,
last_user_action, user_id_mappings):
""" Find users who advance to this step of the funnel.
:returns: dictionary with user_action and stream_data. user_action is a
dictionary of user ids and time of last action. This is for determining
if events in subsequent streams occur after the current stream.
stream_data is a dictionary of user ids and dictionary of output properties
as specified in stream.output_fields.
"""
events = client.get(stream.stream_name, start, end)
user_action = {}
stream_data = {}
for idx, event in enumerate(events):
if idx % 10000 == 0:
log.debug('...processed', idx, 'events')
if stream.event_filter and not stream.event_filter(event):
continue
try:
user = user_id_mappings[stream.user_field].get(event[stream.user_field])
except:
log.error('Unable to get field %s on %s from %s',
stream.user_field, stream.stream_name, event)
last_time = last_user_action.get(user)
event_time = event[TIMESTAMP_FIELD]
# If we've seen an action from this user in the last stream, and
# if they performed an action on the current stream (fuzzily)
# after their last action, update their current user action time.
if (user is not None and
last_time is not None and
((last_time - fuzzy_time) < event_time)):
user_action[user] = min(user_action.get(user, event_time), event_time)
if stream.output_fields and not stream.invert:
event_fields = {}
for field in stream.output_fields:
try:
event_fields[field] = get_property(event, field)
except KeyError:
log.warn('Field %s does not appear in stream %s',
field, stream.stream_name)
stream_data[user] = event_fields
# If stream results should be inverted, include all users that are NOT in
# user_action, and use their timestamp from the previous step as the timestamp
# of the current step. We can not use the timestamp for this stream, since they
# may not have an event in this stream.
if stream.invert:
inverted_user_action = {}
for user, timestamp in last_user_action.iteritems():
if user not in user_action:
inverted_user_action[user] = timestamp
return {'user_action': inverted_user_action, 'stream_data': {}}
else:
return {'user_action': user_action, 'stream_data': stream_data}
def _load_user_id_mappings(mappings, user_id_mappers, user_ids):
for mapping, getter in user_id_mappers.iteritems():
log.debug('...loading mapping for %s', mapping)
the_dict = mappings[mapping] = {}
for user_id in user_ids:
new_id = getter(user_id)
if new_id:
the_dict[new_id] = user_id
def _sanity_check_args(streams, user_id_mappers):
assert len(streams) > 1 # Must have more than one stream for funnel analysis.
assert streams[0].invert == False # Can't handle invert=True in first stream.
for stream in streams:
if stream.invert: # If invert is true, can not specify output fields.
assert not stream.output_fields
first_stream_user_id = streams[0].user_field
required_mappings = ({stream.user_field for stream in streams} -
{first_stream_user_id})
# Filter down mappings to only the ones we'll need to execute here.
user_id_mappers = {required: user_id_mappers[required]
for required in required_mappings}
return streams, user_id_mappers
def funnel_analyze(client, streams, start, end, end_first_funnel_step,
user_id_mappers, user_filter,
fuzzy_time=timedelta(minutes=5)):
"""
`streams`: a list of FunnelStep objects, each representing a step in the
funnel. The funnel is composed from these objects.
`start`/`end`: the start and end datetimes to analyze.
`end_first_funnel_step`: the end time of the first funnel step. You
sometimes want this to be earlier than the rest of the other steps
so you can study how a cohort takes certain actions down the line.
`user_id_mappers`: a dictionary of the form
{user_id_field: user_id_mapping_function}.
A user_id_field entry should exist for
any user_id fieldname of `streams` subsequent to the first stream
in the funnel. For example, if `streams` is:
[(s1, f1, 'userId'), (s2, f2, 'userId'), (s3, f3, 'username')],
then user_id_mappings should be:
{'username': function_from_userId_to_username(userId)}
`user_filter`: a function that returns True/False depending on
whether an event from a user should be considered (for segmentation,
for instance). If user_filter is None, all users will be accepted.
`fuzzy_time`: a timedelta representing the time that two events in
subsequent streams can be out-of-order with one-another.
"""
assert end >= end_first_funnel_step
streams, user_id_mappers = _sanity_check_args(streams, user_id_mappers)
last_user_action = FilterCache(user_filter)
fuzzy_time = timedelta_to_kronos_time(fuzzy_time)
funnel_output = FunnelOutput()
user_id_mappings = {}
for idx, stream in enumerate(streams):
log.debug('Processing stream', stream.stream_name)
step_end = end
if idx == 0:
user_id_mappings[stream.user_field] = IdentityDict()
step_end = end_first_funnel_step
output = _stream_earliest_action(
client, stream, start, step_end,
fuzzy_time, last_user_action, user_id_mappings)
funnel_output.add(output)
last_user_action = output['user_action']
# For the first stream in the funnel, load the mappings to other
# user_id formats we'll find in subsequent streams.
if idx == 0:
log.debug('Loading user_id mappings')
_load_user_id_mappings(user_id_mappings, user_id_mappers, last_user_action)
return funnel_output
```
#### File: analytics_tools/scripts/stream_comparator.py
```python
import argparse
import hashlib
import itertools
import json
import logging
import random
import sys
from dateutil.parser import parse
from pykronos import KronosClient
from pykronos import ID_FIELD
from pykronos import TIMESTAMP_FIELD
from pykronos.common.time import epoch_time_to_kronos_time
from pykronos.common.time import datetime_to_kronos_time
log = logging.getLogger(__name__)
def get_next(events):
try:
return events.next()
except StopIteration:
return None
def push_back(event, iterator):
return itertools.chain([event], iterator)
def main(args):
client1 = KronosClient(args.kronos_url1, namespace=args.namespace1)
client2 = KronosClient(args.kronos_url2, namespace=args.namespace2)
if args.streams_file:
streams = map(lambda s: (s, s), # Use same stream name for both.
filter(lambda s: len(s),
open(args.streams_file).read().split('\n')))
else:
streams = [(args.stream1, args.stream2)]
for stream1_name, stream2_name in streams:
if args.num_samples:
samples = []
for _ in xrange(args.num_samples):
start = random.randint(args.start, args.end - args.sample_interval)
samples.append((start, start + args.sample_interval))
else:
samples = [(args.start, args.end)]
total_stream1 = 0
extra_stream1 = 0
total_stream2 = 0
extra_stream2 = 0
for start, end in samples:
stream1 = client1.get(stream1_name, start, end)
stream2 = client2.get(stream2_name, start, end)
# Sorting of events with the same timestamp may vary across backends,
# hence we can't do a simple loop comparison. We need to aggregate all
# events with the same timestamp from both streams and then compare the
# two sets.
stream1_hashes = set()
stream2_hashes = set()
current_timestamp = None
while True:
event1 = get_next(stream1)
event2 = get_next(stream2)
# Are both streams exhausted?
if not (event1 or event2):
break
# Pick the smaller timestamp from the two events.
min_timestamp = min(event1.get(TIMESTAMP_FIELD, sys.maxint),
event2.get(TIMESTAMP_FIELD, sys.maxint))
if current_timestamp is None:
current_timestamp = min_timestamp
# If min_timestamp is greater than current_timestamp, then aggregate
# stats for current_timestamp and roll over.
if min_timestamp > current_timestamp:
total_stream1 += len(stream1_hashes)
total_stream2 += len(stream2_hashes)
extra_stream1 += len(stream1_hashes - stream2_hashes)
extra_stream2 += len(stream2_hashes - stream1_hashes)
stream1_hashes.clear()
stream2_hashes.clear()
current_timestamp = min_timestamp
if event1:
assert event1[TIMESTAMP_FIELD] >= current_timestamp
if event1[TIMESTAMP_FIELD] == current_timestamp:
del event1[ID_FIELD]
stream1_hashes.add(
hashlib.sha224(json.dumps(event1, sort_keys=True)).hexdigest())
else:
stream1 = push_back(event1, stream1)
if event2:
assert event2[TIMESTAMP_FIELD] >= current_timestamp
if event2[TIMESTAMP_FIELD] == current_timestamp:
del event2[ID_FIELD]
stream2_hashes.add(
hashlib.sha224(json.dumps(event2, sort_keys=True)).hexdigest())
else:
stream2 = push_back(event2, stream2)
print 'Diff: [%s/%s], [%s/%s]' % (args.namespace1, stream1_name,
args.namespace2, stream2_name)
print '< total: %d' % total_stream1
print '> total: %d' % total_stream2
print '< extra: %d' % extra_stream1
print '> extra: %d' % extra_stream2
print
def process_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--kronos-url1',
required=True,
help='The first Kronos server to retrieve data from')
parser.add_argument(
'--kronos-url2',
required=True,
help='The second Kronos server to retrieve data from')
parser.add_argument(
'--namespace1',
help='The namespace to read from the first Kronos server (optional)')
parser.add_argument(
'--namespace2',
help='The namespace to read from the second Kronos server (optional)')
parser.add_argument(
'--stream1',
help='The stream to read from the first Kronos server')
parser.add_argument(
'--stream2',
help='The stream to read from the second Kronos server')
parser.add_argument(
'--streams-file',
help='The name of the file with a stream name per line to copy')
parser.add_argument(
'--start',
required=True,
help='When to start retreiving? (format: 2003-09-25T10:49:41.5-03:00)')
parser.add_argument(
'--end',
required=True,
help='When to end retreiving? (format: 2003-09-25T10:49:41.5-03:00)')
group = parser.add_argument_group(title='sampling arguments',
description=('Only compare samples of '
'events.'))
group.add_argument('--num-samples',
type=int,
help=('Number of samples to compare?'))
group.add_argument('--sample-interval',
type=int,
default=3600,
help=('Interval of the sample (in seconds). Defaults to '
'1 hour (3600) (optional)'))
args = parser.parse_args()
if not bool(args.streams_file) ^ bool(args.stream1 and args.stream2):
print 'Please specify either `stream-file` or both `stream1 and stream2`.'
sys.exit(1)
args.start = datetime_to_kronos_time(parse(args.start))
args.end = datetime_to_kronos_time(parse(args.end))
args.sample_interval = epoch_time_to_kronos_time(args.sample_interval)
return args
if __name__ == '__main__':
main(process_args())
```
#### File: common/src/event_tools.py
```python
def get_property(event, name):
"""
Gets a property from `event` using a dot-notation name `name`. It's preferable
that dots are not used in key names but this function tries to do the right
thing by trying all combinations of property names. It prefers property
names with dots over nested dicts. For example if the dict looks like:
{ 'a': { 'b': 1 },
'a.b': 2 }
then this function will return 2 if `name` is 'a.b' rather than 1.
"""
if isinstance(name, list):
parts = name
else:
parts = name.split('.')
for i in xrange(len(parts), 0, -1):
name = '.'.join(parts[:i])
if name not in event:
continue
value = event[name]
if i == len(parts):
return value
elif isinstance(value, dict):
return get_property(value, parts[i:])
raise KeyError
else:
raise KeyError
return parts
```
#### File: common/src/runner.py
```python
from __future__ import absolute_import
import os
import subprocess
import sys
import time
import urllib2
def _http_probe_wait(url, sleep_time=0.05, num_probes=50):
for i in xrange(num_probes):
try:
urllib2.urlopen(url)
return True
except urllib2.URLError:
time.sleep(sleep_time)
return False
class ProcessRunnerError(Exception):
pass
class ProcessRunner(object):
def __init__(self, args, cwd=None, verbose=False):
'''
Runs a shell command in a separate process.
`args` - list of args that represent the shell command.
`cwd` - working directory to execute the shell command from.
'''
self.args = args
if cwd and not os.path.isabs(cwd):
cwd = os.path.join(os.getcwd(), cwd)
self.cwd = cwd
self.verbose = verbose
self._sub_proc = None
def start(self):
if self._sub_proc:
raise ProcessRunnerError('Process already running!')
if self.verbose:
stdout = sys.stdout
stderr = sys.stderr
else:
stdout = stderr = open(os.devnull, 'w')
self._sub_proc = subprocess.Popen(self.args,
stdout=stdout,
stderr=stderr,
cwd=self.cwd)
self.wait()
def wait(self):
pass
def stop(self):
if not self._sub_proc:
raise ProcessRunnerError('Process not running!')
self._sub_proc.terminate()
self._sub_proc.wait()
def restart(self):
if self._sub_proc:
self.stop()
self.start()
class KronosRunner(ProcessRunner):
def __init__(self, kronos_dir, config=None, port='9191', **kwargs):
self.port = port
args = ['python', 'runserver.py',
'--port', port,
'--debug']
if config:
args.extend(['--config', config])
super(KronosRunner, self).__init__(args, cwd=kronos_dir, **kwargs)
def wait(self):
if not _http_probe_wait('http://localhost:%s/1.0/index' % self.port):
raise ProcessRunnerError('Failed to start KronosRunner.')
class MetisRunner(ProcessRunner):
def __init__(self, metis_dir, config=None, port='9192', **kwargs):
self.port = port
args = ['python', 'runserver.py',
'--port', port,
'--debug']
if config:
args.extend(['--config', config])
super(MetisRunner, self).__init__(args, cwd=metis_dir, **kwargs)
def wait(self):
if not _http_probe_wait('http://localhost:%s/1.0/index' % self.port):
raise ProcessRunnerError('Failed to start MetisRunner.')
```
#### File: jia/jia/decorators.py
```python
import json
import sys
import traceback
from functools import wraps
from flask import Response
from jia import errors
from jia.errors import PyCodeError
def json_endpoint(function):
@wraps(function)
def wrapper(*args, **kwargs):
# Will let abort() exceptions bubble up.
try:
try:
# Don't call flask's `jsonify` because it sometimes
# pretty-prints output, calling indent=2 when dumping json.
# This causes certain datatypes (e.g., `numpy.int64`) to be
# implicitly converted when they shouldn't be.
response = Response(json.dumps(function(*args, **kwargs)),
status=200,
mimetype='application/json')
return response
except PyCodeError:
raise
except:
_, exception, tb = sys.exc_info()
raise PyCodeError(exception, traceback.format_tb(tb))
except errors.JiaError as e:
return e.to_response()
return wrapper
```
#### File: jia/jia/utils.py
```python
import smtplib
from email.mime.text import MIMEText
from flask import current_app
def get_seconds(value, scale):
"""Convert time scale dict to seconds
Given a dictionary with keys for scale and value, convert
value into seconds based on scale.
"""
scales = {
'seconds': lambda x: x,
'minutes': lambda x: x * 60,
'hours': lambda x: x * 60 * 60,
'days': lambda x: x * 60 * 60 * 24,
'weeks': lambda x: x * 60 * 60 * 24 * 7,
'months': lambda x: x * 60 * 60 * 24 * 30,
'years': lambda x: x * 60 * 60 * 24 * 365,
}
return scales[scale](value)
def send_mail(recipients, subject, message, from_addr=None):
if not from_addr:
from_addr = current_app.config['SYSTEM_EMAIL']
msg = MIMEText(message)
msg['Subject'] = subject
msg['From'] = from_addr
msg['To'] = ','.join(recipients)
s = smtplib.SMTP('localhost')
s.sendmail(from_addr, recipients, msg.as_string())
s.quit()
```
#### File: jia/scheduler/scheduler.py
```python
from __future__ import absolute_import
import atexit
import datetime
import gevent
import gipc
import traceback
import sys
from heapq import heappush, heappop, heapify
from jia.errors import PyCodeError
from jia.utils import send_mail
from scheduler import get_app
from scheduler.common.concurrent import GIPCExecutor
from scheduler.models import Task
class Scheduler(object):
"""Inteval based code execution scheduler"""
def __init__(self):
"""Initialize the queue and spawn the main loop thread
Upon initialization, tasks stored in the database are immediately
scheduled.
_task_queue is a priority queue ordered using Python's heapq functionality.
Elements in _task_queue are tuples of the form (datetime, task) where
datetime is the scheduled run time and task is a dictionary as defined
in the above docstring for the Scheduler class.
For concurrency safety reasons, never write to _task_queue outside the
_loop() thread.
"""
self._task_queue = [] # Never write to this outside the _loop thread
self._pending_cancels = set()
self._executor = GIPCExecutor()
# Load previously scheduled tasks from database
now = datetime.datetime.now()
with get_app().app_context():
saved_schedule = Task.query.filter_by(active=True)
for task in saved_schedule:
new_task = {
'id': task.id,
'interval': task.interval,
'code': task.code
}
# Writing directly to the _task_queue is safe since we haven't started
# the _loop yet
self._task_queue.append((now, new_task))
# Make _task_queue a priority queue
heapify(self._task_queue)
# Spawn main loop and save writer for future communication
(read, write) = gipc.pipe()
self._main_thread = gevent.spawn(self._loop, read)
self._schedule_pipe = write
atexit.register(self._interrupt)
def schedule(self, task):
"""Pass schedule request to the main loop
Tasks should be dictionaries with the following attributes:
task = {
'id': 'a93de0f3',
'code': ..., # string of Python code
'interval': 600, # in seconds
}
An interval of 0 indicates the task should only be run once.
"""
self._schedule_pipe.put(('schedule', task))
def cancel(self, task_id):
"""Pass cancel request to the main loop"""
self._schedule_pipe.put(('cancel', task_id))
def _schedule(self, task, next_run=None):
if not next_run:
next_run = datetime.datetime.now()
heappush(self._task_queue, (next_run, task))
def _cancel(self, task_id):
self._pending_cancels.add(task_id)
def _interrupt(self):
self._main_thread.kill()
#TODO(derek): kill child threads
def _loop(self, reader):
"""Main execution loop of the scheduler.
The loop runs every second. Between iterations, the loop listens for
schedule or cancel requests coming from Flask via over the gipc pipe
(reader) and modifies the queue accordingly.
When a task completes, it is rescheduled
"""
results = set()
while True:
now = datetime.datetime.now()
if self._task_queue and self._task_queue[0][0] <= now:
task = heappop(self._task_queue)[1]
if task['id'] not in self._pending_cancels:
result = self._executor.submit(_execute, task)
results.add(result)
else:
self._pending_cancels.remove(task['id'])
else:
# Check for new tasks coming from HTTP
with gevent.Timeout(0.5, False) as t:
message = reader.get(timeout=t)
if message[0] == 'schedule':
self._schedule(message[1], next_run=now)
elif message[0] == 'cancel':
self._cancel(message[1])
# Reschedule completed tasks
if not results:
gevent.sleep(0.5)
continue
ready = self._executor.wait(results, num=1, timeout=0.5)
for result in ready:
results.remove(result)
if result.value:
task = result.value
interval = int(task['interval'])
if interval:
run_at = now + datetime.timedelta(seconds=int(task['interval']))
self._schedule(task, next_run=run_at)
else:
err_msg = result.exception
sys.stderr.write("ERROR: %s" % err_msg)
email_msg = 'Task %s failed at %s\n\n%s' % (
task['id'],
datetime.datetime.now(),
err_msg
)
send_mail(get_app().config['SCHEDULER_FAILURE_EMAILS'],
'Scheduler Failure',
email_msg)
def _execute(task):
"""A wrapper around exec
This exists outside the Scheduler class because it is pickled after it is
sent to the executor.
"""
print "[%s] -- %s -- START" % (datetime.datetime.now(), task['id'])
try:
with get_app().app_context():
exec task['code'] in {}, {}
print "[%s] -- %s -- COMPLETE" % (datetime.datetime.now(), task['id'])
except Exception as e:
if isinstance(e, PyCodeError):
err_msg = "%s: %s\n%s" % (e.data['name'], e.data['message'],
''.join(e.data['traceback']))
else:
err_msg = traceback.format_exc()
sys.stderr.write(err_msg)
sys.stderr.write("[%s] -- %s -- FAIL\n" % (datetime.datetime.now(),
task['id']))
email_msg = 'Task %s failed at %s\n\n%s' % (task['id'],
datetime.datetime.now(),
err_msg)
send_mail(get_app().config['SCHEDULER_FAILURE_EMAILS'],
'Scheduler Failure', email_msg)
finally:
return task
```
#### File: kronos/core/marshal.py
```python
import json as _json
import sys
import ujson
class json(object):
# ujson has superior decoding performance and produces the same output as
# json.loads would. We use it in pykronos to speed up event parsing, use it
# here to speed up request body parsing.
loads = staticmethod(lambda s: ujson.loads(s, precise_float=True))
dumps = staticmethod(_json.dumps)
def get_marshaler(name):
# This is convenient because to support other marshaling libraries, we mostly
# will just have to add an import statement for them at the top. This is
# because most Python marshaling libraries support the loads/dumps calls.
# For example if we want to support msgpack, all we need to do is add an
# `import msgpack` statement at the top and then call
# `set_marshaler('msgpack') below.
return globals()[name]
def set_marshaler(name):
marshaler = get_marshaler(name)
setattr(sys.modules[__name__], 'loads', marshaler.loads)
setattr(sys.modules[__name__], 'dumps', marshaler.dumps)
set_marshaler('json')
```
#### File: kronos/utils/math.py
```python
def round_down(value, base):
"""
Round `value` down to the nearest multiple of `base`.
Expects `value` and `base` to be non-negative.
"""
return int(value - (value % base))
```
#### File: kronos/utils/streams.py
```python
import time
from kronos.common.json_schema import get_schema_type
from kronos.common.json_schema import NullType
from kronos.common.time import epoch_time_to_kronos_time
from kronos.conf.constants import ResultOrder
from kronos.core import marshal
from kronos.storage.router import router
def infer_schema(namespace, stream):
now = epoch_time_to_kronos_time(time.time())
backend, configuration = router.backend_to_retrieve(namespace, stream)
events = backend.retrieve(namespace, stream, 0, now, None, configuration,
order=ResultOrder.DESCENDING, limit=100)
schema_type = NullType()
for event in events:
schema_type = schema_type.combine(get_schema_type(marshal.loads(event)))
schema = schema_type.to_dict()
schema['$schema'] = 'http://json-schema.org/draft-04/schema'
return schema
```
#### File: chronology/kronos/runserver.py
```python
import datetime
import gevent.monkey; gevent.monkey.patch_all()
import gevent.pywsgi
import imp
import logging; logging.basicConfig()
import werkzeug.serving
from argparse import ArgumentParser
from kronos.conf import settings
from kronos.conf.constants import ServingMode
def log_info(port):
return """
%(date)s
Starting kronos server at http://0.0.0.0:%(port)s/
Quit the server with CONTROL-C.""" % {
'date': datetime.datetime.now().strftime("%B %d, %Y - %H:%M:%S"),
'port': port,
}
if __name__ == '__main__':
parser = ArgumentParser(description='Kronos HTTP server.')
parser.add_argument('--debug', action='store_true', help='Debug mode?')
parser.add_argument('--reload', action='store_true', help='Auto-reload?')
parser.add_argument('--port', action='store', default='8150',
help='port to listen for incoming requests')
parser.add_argument('--serving-mode',
choices=[ServingMode.ALL, ServingMode.COLLECTOR,
ServingMode.READONLY],
help='which serving mode to run in')
parser.add_argument('--config', action='store',
help='path of config file to use')
parser.add_argument('--profile', action='store_true',
help='Profile each request using cProfile?')
args = parser.parse_args()
settings.clear()
if args.config:
# If a config file path is given, import that as the `settings` module.
settings.update(imp.load_source('kronos.conf.run_settings', args.config))
else:
# Otherwise use default settings. This is to ensure we never try to read
# the settings for the configured kronos service when using this runner
# script.
from kronos.conf import default_settings
settings.update(default_settings)
# Override the `debug` in the settings module and `debug` for
# `args`.
settings.debug = args.debug or settings.debug
settings.serving_mode = args.serving_mode or settings.serving_mode
settings.profile = args.profile or settings.profile
# Only load the application after we've overwritten settings.serving_mode, or
# else the endpoint access control logic will kick in too early.
from kronos.app import application
print log_info(args.port)
if args.reload:
def reload():
print 'Reloading kronosd...'
gevent.pywsgi.WSGIServer(('0.0.0.0', int(args.port)),
application).serve_forever()
werkzeug.serving.run_with_reloader(reload)
else:
gevent.pywsgi.WSGIServer(('0.0.0.0', int(args.port)),
application).serve_forever()
```
#### File: kronos/tests/server.py
```python
import unittest
from werkzeug.test import Client
from werkzeug.wrappers import BaseResponse
from kronos.app import application
from kronos.conf.constants import SUCCESS_FIELD
from kronos.core import marshal
VERSION = 1.0
BASE_PATH = '/%s' % VERSION
EVENT_BASE_PATH = '%s/events' % BASE_PATH
class KronosServerTestCase(unittest.TestCase):
""" Wrapper `TestCase` class which be used by all server tests because it
provides a clean API to Kronos and performs all necessary clean up logic.
"""
def setUp(self):
self.http_client = Client(application, BaseResponse)
self.get_path = '%s/get' % EVENT_BASE_PATH
self.put_path = '%s/put' % EVENT_BASE_PATH
self.delete_path = '%s/delete' % EVENT_BASE_PATH
self.index_path = '%s/index' % BASE_PATH
self.streams_path = '%s/streams' % BASE_PATH
self.infer_schema_path = '%s/streams/infer_schema' % BASE_PATH
def index(self):
response = self.http_client.get(path=self.index_path)
self.assertEqual(response.status_code, 200)
return marshal.loads(response.data)
def put(self, stream_or_mapping, events=None, namespace=None):
data = {}
if isinstance(stream_or_mapping, dict):
data['events'] = stream_or_mapping
else:
self.assertTrue(events is not None)
data['events'] = {stream_or_mapping: events}
if namespace is not None:
data['namespace'] = namespace
response = self.http_client.post(path=self.put_path,
data=marshal.dumps(data),
buffered=True)
self.assertEqual(response.status_code, 200)
response = marshal.loads(response.data)
self.assertTrue(response[SUCCESS_FIELD])
return response
def get(self, stream, start_time, end_time, start_id=None, limit=None,
order=None, namespace=None):
data = {'stream': stream, 'end_time': end_time}
if start_id:
data['start_id'] = start_id
else:
data['start_time'] = start_time
if limit is not None:
data['limit'] = limit
if order is not None:
data['order'] = order
if namespace is not None:
data['namespace'] = namespace
response = self.http_client.post(path=self.get_path,
data=marshal.dumps(data),
buffered=True)
self.assertEqual(response.status_code, 200)
return map(marshal.loads, response.data.splitlines())
def delete(self, stream, start_time, end_time, start_id=None, namespace=None):
data = {'stream': stream, 'end_time': end_time}
if start_id:
data['start_id'] = start_id
else:
data['start_time'] = start_time
if namespace is not None:
data['namespace'] = namespace
response = self.http_client.post(path=self.delete_path,
data=marshal.dumps(data),
buffered=True)
self.assertEqual(response.status_code, 200)
response = marshal.loads(response.data)
self.assertTrue(response[SUCCESS_FIELD])
return response
def get_streams(self, namespace=None):
data = {}
if namespace is not None:
data['namespace'] = namespace
response = self.http_client.post(self.streams_path,
data=marshal.dumps(data),
buffered=True)
self.assertEqual(response.status_code, 200)
return response.data.splitlines()
def infer_schema(self, stream, namespace=None):
response = self.http_client.post(
self.infer_schema_path,
data=marshal.dumps({'stream': stream, 'namespace': namespace}),
buffered=True)
self.assertEqual(response.status_code, 200)
return marshal.loads(response.data)
```
#### File: core/execute/base.py
```python
from metis.core.execute.registry import DataSourceAdapterRegistry
from metis.core.query.operator import DataAccess
from metis.core.query.operator import Aggregate
from metis.core.query.operator import Filter
from metis.core.query.operator import Join
from metis.core.query.operator import Limit
from metis.core.query.operator import OrderBy
from metis.core.query.operator import Project
# XXX: Whenever adding a new `Operator` or a subclass of `ExecutableNode`,
# please add a method for to the `Executor` class below and create a dispatch
# case inside `Executor.execute`.
class Executor(object):
"""
The Executor interface. ALL methods must be copied over (and implemented)
to an Executor implementation, including the first method below which sets
up the dynamic dispatcher.
"""
def execute(self, node):
if isinstance(node, DataAccess):
return self.execute_data_access(node)
if isinstance(node, Aggregate):
return self.execute_aggregate(node)
if isinstance(node, Filter):
return self.execute_filter(node)
if isinstance(node, Join):
return self.execute_join(node)
if isinstance(node, Limit):
return self.execute_limit(node)
if isinstance(node, OrderBy):
return self.execute_order_by(node)
if isinstance(node, Project):
return self.execute_project(node)
raise NotImplemented
def finalize(self, result):
return result
def execute_data_access(self, node):
registry = DataSourceAdapterRegistry()
adapter = registry.get(type(self), type(node))()
return adapter.execute(node, self)
def execute_aggregate(self, node):
raise NotImplemented
def execute_filter(self, node):
raise NotImplemented
def execute_join(self, node):
raise NotImplemented
def execute_limit(self, node):
raise NotImplemented
def execute_order_by(self, node):
raise NotImplemented
def execute_project(self, node):
raise NotImplemented
```
#### File: core/query/adapter.py
```python
class DataSourceAdapter(object):
@classmethod
def executor_source_pair(self):
raise NotImplemented
def execute(self, node, executor):
raise NotImplemented
```
#### File: metis/metis/__init__.py
```python
from gevent import monkey; monkey.patch_all()
import os
VERSION = (0, 1, 'alpha')
def get_version(version=None):
version = version or VERSION
assert(len(version) == 3)
return '%s.%s %s' % version
# The file path will have `metis.zip` in it if its being run on Spark workers.
# In that case we don't want to run the following initialization code because
# it can (and does) break things.
if 'metis.zip' in str(__file__):
app = None
else:
from flask import Flask
METIS_PATH = os.path.realpath(os.path.dirname(__file__))
app = Flask(__name__)
app.config.from_pyfile('%s/conf/default_settings.py' % METIS_PATH)
app.config['PATH'] = METIS_PATH
import metis.views # noqa
```
#### File: tests/utils/test_compute_cache.py
```python
import functools
import unittest
from collections import defaultdict
from datetime import datetime
from datetime import timedelta
from mock import patch
from pykronos import KronosClient
from pykronos.client import TIMESTAMP_FIELD
from pykronos.utils.cache import QueryCache
from pykronos.common.time import datetime_to_kronos_time
from pykronos.common.time import timedelta_to_kronos_time
class QueryCacheTest(unittest.TestCase):
def setUp(self):
self.client = KronosClient('http://localhost:9191/',
blocking=False,
sleep_block=0.2)
self.total_events = 500
self.computed_namespace = 'computed'
self.increment = timedelta(minutes=1)
self.start_time = datetime(2014, 6, 4, 22)
self.bucket_width = timedelta(minutes=20)
def compute_cache_test(function):
"""A wrapper that sets up a stream with test data.
The stream takes the name of the function being run, and contains
`self.total_events` events. The events are each one
`self.increment` apart.
"""
@functools.wraps(function)
def wrapper(self):
self.stream = 'ComputeCacheTest_%s' % (function.__name__)
for i in xrange(self.total_events):
self.client.put({
self.stream: [{TIMESTAMP_FIELD:
self.start_time + (self.increment * i),
'a': i % 5, 'b': i}]})
self.client.flush()
function(self)
return wrapper
def filter_and_sum(self, start_time, end_time):
"""Bin `self.stream` into buckets, returning the sum of `b` when `a` == 2.
For all events between `start_time` and `end_time`, create an
event for every 20-minute interval of events that contains the sum
of `b` when `a`==2.
"""
events = self.client.get(self.stream, start_time, end_time)
counts = defaultdict(int)
grouping_minutes = timedelta_to_kronos_time(self.bucket_width)
for event in events:
if event['a'] == 2:
counts[event['@time'] -
(event['@time'] % grouping_minutes)] += event['b']
for group_time in sorted(counts.iterkeys()):
yield {'@time': group_time, 'b_sum': counts[group_time]}
def verify_results(self, result_func, cache, expected_results,
expected_computations):
with patch.object(cache, '_compute_bucket',
wraps=cache._compute_bucket) as mock_method:
results = result_func()
self.assertEqual(mock_method.call_count, expected_computations)
self.assertEqual(len(results), expected_results)
result_time = self.start_time
for idx, result in enumerate(results):
self.assertEqual(result[TIMESTAMP_FIELD],
datetime_to_kronos_time(result_time))
self.assertEqual(
result['b_sum'],
sum([2, 7, 12, 17] +
[idx * 4 * (self.bucket_width.total_seconds() / 60)]))
result_time += self.bucket_width
def test_cache_exceptions(self):
# Bucket width shouldn't be more granular than 1 second.
def bad_bucket_width():
return QueryCache(self.client, self.filter_and_sum,
self.bucket_width + timedelta(milliseconds=1),
self.computed_namespace)
self.assertRaises(ValueError, bad_bucket_width)
# start_time and end_time should align to bucket_width boundaries.
cache = QueryCache(self.client, self.filter_and_sum,
self.bucket_width, self.computed_namespace)
start_time = self.start_time - (self.bucket_width * 3)
end_time = self.start_time + (self.total_events * self.increment) + (
self.bucket_width * 3)
def bad_start_boundary():
return list(
cache.retrieve_interval(start_time + timedelta(minutes=1),
end_time))
self.assertRaises(ValueError, bad_start_boundary)
@compute_cache_test
def test_cache_layer(self):
cache = QueryCache(self.client, self.filter_and_sum,
self.bucket_width, self.computed_namespace)
start_time = self.start_time - (self.bucket_width * 3)
end_time = self.start_time + (self.total_events * self.increment) + (
self.bucket_width * 3)
untrusted_time = self.start_time + (
timedelta(minutes=(self.total_events / 2) - 25))
# Verify all results were computed correctly.
self.verify_results(lambda: list(
cache.compute_and_cache_missing_buckets(start_time, end_time,
untrusted_time)),
cache, 25, 31)
# Verify only trusted results are cached.
self.verify_results(
lambda: list(cache.retrieve_interval(start_time, end_time)),
cache, 11, 0)
# Running the same operations twice should result in the same
# results as before.
self.verify_results(
lambda: list(cache.compute_and_cache_missing_buckets(start_time, end_time,
untrusted_time)),
cache, 25, 17)
self.verify_results(
lambda: list(cache.retrieve_interval(start_time, end_time)),
cache, 11, 0)
# Expanding the time range without caching should also result in the same
# results
self.verify_results(
lambda: list(cache.retrieve_interval(start_time - self.bucket_width,
end_time + self.bucket_width)),
cache, 11, 0)
# But specifying compute_missing should get all results for the timerange
self.verify_results(
lambda: list(cache.retrieve_interval(start_time - self.bucket_width,
end_time + self.bucket_width,
compute_missing=True)),
cache, 25, 19)
# Overlapping time queries should result in the same
# results as before, and benefit from the cache.
self.verify_results(
lambda: list(cache.compute_and_cache_missing_buckets(start_time -
self.bucket_width,
end_time +
self.bucket_width,
untrusted_time)),
cache, 25, 19)
self.verify_results(
lambda: list(cache.retrieve_interval(start_time, end_time)),
cache, 11, 0)
# Increasing the trusted time should increase the cached results.
untrusted_time = untrusted_time + timedelta(minutes=40)
self.verify_results(
lambda: list(cache.compute_and_cache_missing_buckets(start_time, end_time,
untrusted_time)),
cache, 25, 17)
self.verify_results(
lambda: list(cache.retrieve_interval(start_time, end_time)),
cache, 13, 0)
# Decreasing trusted time shouldn't remove results.
untrusted_time = untrusted_time - timedelta(minutes=40)
self.verify_results(
lambda: list(cache.compute_and_cache_missing_buckets(start_time, end_time,
untrusted_time)),
cache, 25, 15)
self.verify_results(
lambda: list(cache.retrieve_interval(start_time, end_time)),
cache, 13, 0)
# If there are two cached entries, that cached time should no
# longer be returned.
results = list(cache.retrieve_interval(start_time, end_time))
duplicate_result = dict(results[10])
duplicate_result['b_sum'] = 0
self.client.put({cache._scratch_stream: [duplicate_result]},
namespace=cache._scratch_namespace)
self.client.flush()
safe_results = list(cache.retrieve_interval(start_time, end_time))
self.assertEqual(results[:10] + results[11:], safe_results)
# Rerunning the cache/computation should re-cache the corrupted
# element.
self.verify_results(
lambda: list(cache.compute_and_cache_missing_buckets(start_time, end_time,
untrusted_time)),
cache, 25, 16)
self.verify_results(
lambda: list(cache.retrieve_interval(start_time, end_time)),
cache, 13, 0)
# Forcing computation should generate the same result set.
self.verify_results(
lambda: list(cache.compute_and_cache_missing_buckets(
start_time, end_time, untrusted_time, force_recompute=True)),
cache, 25, 31)
self.verify_results(
lambda: list(cache.retrieve_interval(start_time, end_time)),
cache, 13, 0)
``` |
{
"source": "joshblum/eeg-toolkit",
"score": 2
} |
#### File: joshblum/eeg-toolkit/fabfile.py
```python
from fabric.api import env, cd, run
PROJ_NAME = 'eeg-toolkit'
def env():
env.user = 'ubuntu'
env.hosts = ['172.16.31.10', '192.168.3.11']
env.key_filename = '~/.ssh/id_rsa.pub'
env.server_path = '~/%s' % PROJ_NAME
env.python_path = '~/.virtualenvs/%s' % PROJ_NAME
return
def prod_deploy():
with cd(env.server_path):
run('git pull --rebase origin master')
run('make docker-run')
```
#### File: toolkit/experiments/get_file_sizes.py
```python
import glob
import os
import json
import pprint
from collections import defaultdict
DATADIR = '/home/ubuntu/eeg-data/eeg-data'
FILE_PATTERNS = {
'TileDBBackend': 'tiledb_workspace/*-tiledb',
'HDF5Backend': '.h5',
'BinaryBackend': '.bin',
}
DUMP_FILE = '%s-file-sizes.json'
def get_file_size(path):
# http://stackoverflow.com/questions/1392413/calculating-a-directory-size-using-python
if os.path.isdir(path):
total_size = 0
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
else:
total_size = os.path.getsize(path)
return total_size
def get_file_sizes(backend_name):
file_ext = FILE_PATTERNS[backend_name]
filenames = glob.glob('%s/*%s' % (DATADIR, file_ext))
results = {}
for filename in filenames:
if 'cached' in filename:
continue
key = os.path.basename(filename).replace(file_ext, '').split('-')
if len(key) < 2:
continue
else:
key = key[1] # 005-xgb-backend_name
file_size = get_file_size(filename)
results[key] = file_size
return results
def store_file_sizes(backend_name, file_sizes):
with open(DUMP_FILE % backend_name, 'w') as f:
f.write(json.dumps(file_sizes))
def extract_file_sizes(backend_name):
try:
with open(DUMP_FILE % backend_name) as f:
return json.loads(f.read())
except IOError:
return {}
def extract_all():
return {
backend_name: extract_file_sizes(backend_name)
for backend_name in FILE_PATTERNS
}
if __name__ == '__main__':
import sys
if len(sys.argv) == 2:
backend_name = sys.argv[1]
file_sizes = get_file_sizes(backend_name)
store_file_sizes(backend_name, file_sizes)
pprint.pprint(file_sizes)
elif len(sys.argv) == 1:
pprint.pprint(extract_all())
else:
print 'usage: python get_file_sizes.py [BinaryBackend|HDF5Backend|TileDBBackend]'
```
#### File: webapp/webapp/server.py
```python
from flask import Flask
from flask import jsonify
from flask import render_template
from flask import request
import os
import csv
import simplejson as json
import regression
app = Flask(__name__)
PROFILE_FILENAME = 'profile-dump-{0}.csv'
# TODO: All of these should live in flask config.
# Are we running an experiment?
EXPERIMENT = False
TARGET_LATENCY = 3500
# Regressor tags have 3 parts: 1) which data dump to look at; 2) which profile
# statistic to train on; 3) the value that the model should predict (always
# latency for us).
# TODO: change to named tuple...
DEFAULT_REGRESSOR_TAG = ('chrome-ubuntu', 'bandwidth', 'latency')
REGRESSORS = regression.save_and_load_regressors(*DEFAULT_REGRESSOR_TAG)
@app.errorhandler(404)
def page_not_found(error):
return render_template('404.html'), 404
@app.errorhandler(500)
def server_error(error):
return render_template('500.html'), 500
@app.route('/cluster-viewer')
def cluster_viewer():
return render_template('cluster-viewer.html')
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/spec-viewer')
@app.route('/')
def index():
# TODO(joshblum): put stuff behind a login?
return render_template('spec-viewer.html')
def dump_profiles(profile_dumps):
# Figure out what label to give this experiment.
next_experiment_num = 0
for filename in os.listdir(os.getcwd()):
split_filename = filename.split('-')
if PROFILE_FILENAME.split('-')[:2] == split_filename[:2]:
try:
experiment_num = int(split_filename[2].split('.')[0])
except:
continue
if experiment_num >= next_experiment_num:
next_experiment_num = experiment_num + 1
next_experiment_num = str(next_experiment_num).zfill(3)
profile_filename = PROFILE_FILENAME.format(next_experiment_num)
# Sort the rows by increasing extent.
profile_dumps.sort(key=lambda profile: profile.get('extent', 1))
with open(profile_filename, 'w') as csvfile:
for profile in profile_dumps:
if 'extent' not in profile:
profile['extent'] = 1
# Column order is: extent, then all others alphabetically.
fieldnames = profile_dumps[0].keys()
fieldnames.remove('extent')
fieldnames.sort()
fieldnames = ['extent'] + fieldnames
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for profile in profile_dumps:
# Set default value of -1 for any missing fields.
for fieldname in fieldnames:
if fieldname not in profile:
profile[fieldname] = -1
writer.writerow(profile)
@app.route('/dump-visgoth', methods=['POST'])
def dump_visgoth():
success = True
message = ''
try:
profile_dumps = json.loads(request.form['profileDumps'])
profile_dumps = profile_dumps.values()
if len(profile_dumps) > 0:
dump_profiles(profile_dumps)
except Exception as e:
success = False
message = e.message
return jsonify({
'success': success,
'message': message,
})
@app.route('/visgoth/get_extent', methods=['POST'])
def get_extent():
success = True
try:
data = request.get_json()
except:
return jsonify({
'success': False,
})
# If we're running an experiment, the client will request the extent.
# Otherwise, use the models to predict a best extnet.
if EXPERIMENT:
extent = data['client_profile'].get('extent', 1)
else:
x_key = DEFAULT_REGRESSOR_TAG[1]
x_value = data['client_profile'].get(x_key)
# The client profile features needed weren't included.
if x_value is None:
return jsonify({
'success': False,
})
extent = regression.predict_extent(REGRESSORS, TARGET_LATENCY, x_value)
return jsonify({
'success': success,
'extent': extent,
})
if __name__ == '__main__':
app.debug = True # enable auto reload
app.run(host='0.0.0.0')
``` |
{
"source": "joshblum/keybase-reminder-bot",
"score": 2
} |
#### File: joshblum/keybase-reminder-bot/keybase.py
```python
import json, subprocess, sys, time
from subprocess import PIPE
from user import User
class Message(object):
'''
Example message json: {
u'msg': {
u'unread': True,
u'sent_at': 1522813326,
u'sent_at_ms': 1522813326658,
u'content': {u'text': {u'body': u'Hi'}, u'type': u'text'},
u'prev': [{u'hash': u'DEZRY/2G+NYYgB34g1X9ocuFqfBBDIfEHvT+qOwotqE=', u'id': 1}],
u'id': 2,
u'channel': {
u'members_type': u'impteamnative',
u'topic_type': u'chat',
u'name': u'jessk,reminderbot',
u'public': False
},
u'bot_info': {
u'bot_uid': 'f1f49e2da3db6392b47dc913b4e85519',
u'bot_username': 'reminderbot'
},
u'sender': {
u'username': u'jessk',
u'device_name': u'phone',
u'uid': u'653ba091fa61606e5a3c8fb2086b3419',
u'device_id': u'c4aec52a455b551af3b042c46537fc18'
}
}
}
'''
def __init__(self, conv_id, json, db):
self.text = json["msg"]["content"]["text"]["body"]
self.author = json["msg"]["sender"]["username"]
self.conv_id = conv_id
self.channel_json = json["msg"]["channel"]
self.bot_username = json["msg"].get("bot_info", {}).get("bot_username")
self.json = json
self.db = db
@classmethod
def inject(cls, text, author, conv_id, channel, db):
return Message(conv_id, {"msg": {
"content": {"text": {"body": text}},
"sender": {"username": author},
"channel": {
"name": channel,
"members_type": "impteamnative"}}}, db)
def user(self):
return User.lookup(self.author, self.db)
def is_private_channel(self):
# `jessk,reminderbot` or `jessk` with reminderbot as a bot or
# restricted bot member
return self.channel_json["members_type"] != "team" and self.channel_json["name"].count(',') <= 1
def call(method, params=None, retries=0):
# method: string, params: dict
# return: dict
#print "keybase call " + method
#print "will call keybase " + method
if params is None:
params = {}
query = {"method": method, "params": params}
proc = subprocess.Popen(['keybase','chat','api'], stdin=PIPE, stdout=PIPE)
proc.stdin.write(json.dumps(query) + "\n")
proc.stdin.close()
response = proc.stdout.readline()
try:
j = json.loads(response)
except Exception as e:
if retries < 3:
print "Unable to parse json from:", response
time.sleep(1)
return call(method, params, retries+1)
else:
raise e
if "error" in j:
print "Problem with query:", query
raise Exception(j["error"]["message"])
return j["result"]
def send(conv_id, text):
call("send", {"options": {"conversation_id": conv_id, "message": {"body": text}}})
return True, None
def debug_crash(message, config):
debug(message, config)
if config.autosend_logs:
try:
subprocess.check_call(['keybase', 'log', 'send',
'--feedback', 'reminderbot crash', '--no-confirm'])
except subprocess.CalledProcessError:
print >> sys.stderr, "Error during call to `keybase log send`"
def debug(message, config):
if config.debug_team and config.debug_topic:
call("send", {"options": {"channel": {
"name": config.debug_team,
"members_type": "team",
"topic_name": config.debug_topic},
"message": {"body": message}}})
else:
print >> sys.stderr, "[DEBUG]", message
def _status():
proc = subprocess.Popen(['keybase','status', '-j'], stdout=PIPE)
out, err = proc.communicate()
return json.loads(out)
def setup(config):
status = _status()
logged_in = status["Username"]
if not status["LoggedIn"]:
try:
subprocess.check_call(['keybase', 'login', config.username])
except subprocess.CalledProcessError:
print >> sys.stderr, "FATAL: Error during call to `keybase login " \
+ config.username + "`"
sys.exit(1)
elif not logged_in == config.username:
print >> sys.stderr, "FATAL: Logged in to Keybase as wrong user."
print >> sys.stderr, "Logged in as "+logged_in+" but expected "+config.username+". "
print >> sys.stderr, "Run `keybase logout` to log them out."
sys.exit(1)
# Disable typing notifications
try:
subprocess.check_call(['keybase', 'chat', 'notification-settings', '--disable-typing'])
except subprocess.CalledProcessError as e:
print >> sys.stderr, "Error during disabling typing notifications", e.message
if config.debug_team and config.debug_topic:
try:
call("read", {"options": {"channel": {
"name": config.debug_team,
"members_type": "team",
"topic_name": config.debug_topic}}})
except Exception as e:
print >> sys.stderr, "Can't read from the debug channel:"
print >> sys.stderr, e.message
sys.exit(1)
def advertise_commands():
remind_me_extended = """Set a reminder at a specific time. Examples:
```
!remind me [when] to [what]
!remind me to [what] [when]```"""
delete_extended = """Examples:
```
!delete the reminder to [what]
!delete the [when] reminder
!delete reminder #2```"""
tz_extended = """Set your timezone to [tz]. This changes when any upcoming reminders will happen. Examples:
```
!timezone GMT
!timezone US/Pacific```"""
call("advertisecommands", {"options": {
"alias": "Reminder Bot",
"advertisements": [{
"type": "public",
"commands": [
{
"name": "help",
"description": "See help with available commands.",
},
{
"name": "remind me",
"description": "Set a reminder.",
"extended_description": {
"title": "*!remind me*",
"desktop_body": remind_me_extended,
"mobile_body": remind_me_extended,
}
},
{
"name": "list",
"description": "Show upcoming reminders.",
},
{
"name": "delete",
"description": "Delete a reminder.",
"extended_description": {
"title": "*!delete*",
"desktop_body": delete_extended,
"mobile_body": delete_extended,
}
},
{
"name": "timezone",
"description": "Set your timezone.",
"extended_description": {
"title": "*!timezone*",
"desktop_body": tz_extended,
"mobile_body": tz_extended,
}
},
{
"name": "source",
"description": "Learn about my beginnings.",
},
],
}],
}
})
def clear_command_advertisements():
call("clearcommands")
``` |
{
"source": "joshblum/netflix-rate",
"score": 2
} |
#### File: joshblum/netflix-rate/app.py
```python
from flask import render_template
from flask import request
from flask import jsonify
from flask.ext.assets import Environment, Bundle
from flask.ext.sqlalchemy import SQLAlchemy
from flask_gzip import Gzip
from flask_cors import cross_origin
from sqlalchemy.exc import IntegrityError
from datetime import datetime
from datetime import timedelta
import flask
import requests
import os
EMAIL_URL = 'http://u-mail.herokuapp.com/send?to=<EMAIL>&payload=%s'
USER_COUNT = 10000 # send an email every 10k users
DATE_FMT = '%Y-%m-%d %H:%M:%S.%f'
app = flask.Flask(__name__)
app.config.from_object(os.environ['APP_SETTINGS'])
gzip = Gzip(app)
db = SQLAlchemy(app)
assets = Environment(app)
js = Bundle('js/libs/jquery-1.8.2.min.js', 'js/libs/carousel.js',
'js/libs/es5-shim.min.js', 'js/home.js',
output='js/netflix-rate.min.js')
css = Bundle('css/libs/reset.css', 'css/libs/bootstrap.css',
'css/libs/bootstrap-responsive.css',
'css/base.css', output='css/netflix-rate.min.css')
assets.register('js_all', js)
assets.register('css_all', css)
@app.after_request
def after_request(response):
response.direct_passthrough = False
return response
@app.route('/')
def index():
return render_template('home.html', title='downloads')
@app.errorhandler(404)
def page_not_found(error):
return render_template('404.html', title=404), 404
@app.errorhandler(500)
def page_not_found(error):
return render_template('500.html', title=500), 500
@app.route('/track', methods=['POST'])
@app.route('/count', methods=['POST'])
@cross_origin()
def track():
uuid = request.form.get('uuid')
ip_addr = request.remote_addr
src = request.form.get('src')
success = False
user = None
errors = ''
if all([uuid, ip_addr, src]):
try:
user = User(uuid, ip_addr, src)
db.session.add(user)
db.session.commit()
success = True
except IntegrityError:
try:
db.session.rollback()
user = _update_user_timestamp(uuid)
success = True
except Exception as e:
errors = str(e)
user = None
if user is not None:
_send_mail()
_clear_old_users()
user = user.to_dict()
return jsonify(**{
'success': success,
'user': user,
'errors': errors,
})
def _update_user_timestamp(uuid):
"""
Update the timestamp when we last saw the user
"""
user = User.query.filter(User.uuid == uuid).first()
user.created_at = datetime.utcnow()
db.session.add(user)
db.session.commit()
return user
def _send_mail():
user_count = User.query.count()
if user_count > 0 and not user_count % USER_COUNT:
payload = '%d unique users.' % user_count
requests.get(EMAIL_URL % payload)
def _clear_old_users(weeks=4):
"""
Delete any users that have not be undated in the past given weeks
"""
current_time = datetime.utcnow()
four_weeks_ago = current_time - timedelta(weeks=weeks)
User.query.filter(User.created_at < four_weeks_ago).delete()
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
uuid = db.Column(db.String(60), unique=True)
created_at = db.Column(db.DateTime, index=True)
ip_addr = db.Column(db.String(40))
src = db.Column(db.String(10))
def __init__(self, uuid, ip_addr, src, created_at=None):
self.uuid = uuid
self.ip_addr = ip_addr
self.src = src # chrome or firefox
if created_at is None:
self.created_at = datetime.utcnow()
else:
try:
self.created_at = datetime.strptime(created_at, DATE_FMT)
except ValueError as e:
print e
self.created_at = datetime.utcnow()
def __repr__(self):
return str(self.__dict__)
def to_dict(self):
return {
'uuid': self.uuid,
'ip_addr': self.ip_addr,
'src': self.src,
'created_at': str(self.created_at)
}
if __name__ == '__main__':
host = '0.0.0.0'
port = int(os.environ.get('PORT', 7070))
app.run(host=host, port=port)
``` |
{
"source": "joshbode/black",
"score": 3
} |
#### File: tests/data/python37.py
```python
def f():
return (i*2 async for i in arange(42))
def g():
return (something_long * something_long async for something_long in async_generator(with_an_argument))
async def func():
if test:
out_batched = [
i
async for i in aitertools._async_map(
self.async_inc, arange(8), batch_size=3
)
]
# output
#!/usr/bin/env python3.7
def f():
return (i * 2 async for i in arange(42))
def g():
return (
something_long * something_long
async for something_long in async_generator(with_an_argument)
)
async def func():
if test:
out_batched = [
i
async for i in aitertools._async_map(
self.async_inc, arange(8), batch_size=3
)
]
``` |
{
"source": "joshbode/jupyter-slack-notify",
"score": 3
} |
#### File: jupyter-slack-notify/jupyter_slack/jupyter_slack.py
```python
import os
import requests
import time
from IPython.core import magic_arguments
from IPython.core.magics import ExecutionMagics
from IPython.core.magic import cell_magic, magics_class
def notify_self(message):
slack_token = os.environ["SLACK_TOKEN"]
slack_id = os.environ["SLACK_ID"]
parameters = {
"token": slack_token,
"channel": "@" + slack_id,
"text": message
}
r = requests.post("https://slack.com/api/chat.postMessage", params=parameters)
return r.text
def construct_time_mess(elapsed):
day = elapsed // (24 * 3600)
elapsed = elapsed % (24 * 3600)
hour = elapsed // 3600
elapsed %= 3600
minutes = elapsed // 60
elapsed %= 60
seconds = round(elapsed, 1)
time_mess = ""
if day > 0:
time_mess += " {} days".format(day)
if hour > 0:
time_mess += " {} hours ".format(hour)
if minutes > 0:
time_mess += " {} minutes".format(minutes)
if seconds > 0:
time_mess += " {} seconds".format(seconds)
return time_mess
@magics_class
class MessengerMagics(ExecutionMagics):
def __init__(self, shell):
super().__init__(shell)
@cell_magic
@magic_arguments.magic_arguments()
@magic_arguments.argument("message", type=str)
@magic_arguments.argument("--time", "-t", action="store_true")
def notify(self, line="", cell=None):
args = magic_arguments.parse_argstring(self.notify, line)
mess = args.message.replace("\"", "")
start = time.time()
try:
self.shell.ex(cell)
if args.time:
elapsed = time.time() - start
time_mess = construct_time_mess(elapsed)
mess += " in" + time_mess
notify_self("Finished {}".format(mess))
except BaseException as e:
notify_self("Error while {}: {}".format(mess, e.__repr__()))
raise e
``` |
{
"source": "joshbode/larger",
"score": 3
} |
#### File: larger/tests/test_lazy.py
```python
from larger import larger
@larger
def f(x, y=lambda x: x + 1, z=lambda y: y ** 2):
"""test function"""
return x, y, z
def test_lazy_basic():
"""Test lazy invocation with only required arguments."""
assert f(1) == (1, 2, 4)
def test_lazy_with_all_args():
"""Test invocation with all arguments specified."""
assert f(1, 3, 5) == (1, 3, 5)
``` |
{
"source": "joshbode/python-on-whales",
"score": 2
} |
#### File: components/service/cli_wrapper.py
```python
from __future__ import annotations
from datetime import datetime, timedelta
from typing import Any, Dict, List, Optional, Union, overload
import python_on_whales.components.task.cli_wrapper
from python_on_whales.client_config import (
ClientConfig,
DockerCLICaller,
ReloadableObjectFromJson,
)
from python_on_whales.components.container.cli_wrapper import to_seconds
from python_on_whales.components.service.models import (
ServiceEndpoint,
ServiceInspectResult,
ServiceSpec,
ServiceUpdateStatus,
ServiceVersion,
)
from python_on_whales.exceptions import NoSuchService
from python_on_whales.utils import (
ValidPath,
format_dict_for_cli,
format_time_arg,
run,
stream_stdout_and_stderr,
to_list,
)
class Service(ReloadableObjectFromJson):
def __init__(
self, client_config: ClientConfig, reference: str, is_immutable_id=False
):
super().__init__(client_config, "id", reference, is_immutable_id)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.remove()
def _fetch_inspect_result_json(self, reference):
return run(self.docker_cmd + ["service", "inspect", reference])
def _parse_json_object(self, json_object: Dict[str, Any]) -> ServiceInspectResult:
return ServiceInspectResult.parse_obj(json_object)
def _get_inspect_result(self) -> ServiceInspectResult:
"""Only there to allow tools to know the return type"""
return super()._get_inspect_result()
@property
def id(self) -> str:
return self._get_immutable_id()
@property
def version(self) -> ServiceVersion:
return self._get_inspect_result().version
@property
def created_at(self) -> datetime:
return self._get_inspect_result().created_at
@property
def updated_at(self) -> datetime:
return self._get_inspect_result().updated_at
@property
def spec(self) -> ServiceSpec:
return self._get_inspect_result().spec
@property
def previous_spec(self) -> Optional[ServiceSpec]:
return self._get_inspect_result().previous_spec
@property
def endpoint(self) -> ServiceEndpoint:
return self._get_inspect_result().endpoint
@property
def update_status(self) -> Optional[ServiceUpdateStatus]:
return self._get_inspect_result().update_status
def ps(self) -> List[python_on_whales.components.task.cli_wrapper.Task]:
"""Returns the list of tasks of this service."""
return ServiceCLI(self.client_config).ps(self)
def remove(self) -> None:
"""Removes this service
It's also possible to use a service as a context manager.
By using a context manager, you ensures that the service will be removed even
if an exception occurs.
```python
from python_on_whales import docker
docker.swarm.init()
with docker.service.create("ubuntu", ["sleep", "infinity"]) as my_service:
print("I'm doing things with the service here")
print(my_service.update_status)
print("I'm out of the context manager, the service has been removed.")
```
"""
ServiceCLI(self.client_config).remove(self)
def scale(self, new_scale: int, detach: bool = False) -> None:
"""Change the scale of a service.
See the [`docker.service.scale`](../sub-commands/service.md#scale) command for
information about the arguments.
"""
ServiceCLI(self.client_config).scale({self: new_scale}, detach=detach)
def update(
self,
detach: bool = False,
force: bool = False,
image: Optional[str] = None,
with_registry_authentication: bool = False,
):
"""Updates a service
See the [`docker.service.update`](../sub-commands/service.md#update) command for
information about the arguments.
"""
ServiceCLI(self.client_config).update(
self, detach, force, image, with_registry_authentication
)
def exists(self) -> bool:
"""Returns `True` if the service is still present in the swarm, `False`
if the service has been removed.
"""
return ServiceCLI(self.client_config).exists(self.id)
ValidService = Union[str, Service]
class ServiceCLI(DockerCLICaller):
def create(
self,
image: str,
command: Union[str, List[str], None],
cap_add: List[str] = [],
cap_drop: List[str] = [],
constraints: List[str] = [],
detach: bool = False,
dns: List[str] = [],
dns_options: List[str] = [],
dns_search: List[str] = [],
endpoint_mode: Optional[str] = None,
entrypoint: Optional[str] = None,
envs: Dict[str, str] = {},
env_files: Union[ValidPath, List[ValidPath]] = [],
generic_resources: List[str] = [],
groups: List[str] = [],
healthcheck: bool = True,
health_cmd: Optional[str] = None,
health_interval: Union[None, int, timedelta] = None,
health_retries: Optional[int] = None,
health_start_period: Union[None, int, timedelta] = None,
health_timeout: Union[None, int, timedelta] = None,
hosts: Dict[str, str] = {},
hostname: Optional[str] = None,
init: bool = False,
isolation: Optional[str] = None,
labels: Dict[str, str] = {},
limit_cpu: Optional[float] = None,
limit_memory: Optional[str] = None,
limit_pids: Optional[int] = None,
log_driver: Optional[str] = None,
):
"""Creates a Docker swarm service.
Consider using 'docker stack deploy' instead as it's idempotent and
easier to read for complex applications.
docker stack deploy is basically docker compose for swarm clusters.
# Arguments:
image: The image to use as the base for the service.
command: The command to execute in the container(s).
"""
full_cmd = self.docker_cmd + ["service", "create", "--quiet"]
full_cmd.add_args_list("--cap-add", cap_add)
full_cmd.add_args_list("--cap-drop", cap_drop)
full_cmd.add_args_list("--constraint", constraints)
full_cmd.add_flag("--detach", detach)
full_cmd.add_args_list("--dns", dns)
full_cmd.add_args_list("--dns-option", dns_options)
full_cmd.add_args_list("--dns-search", dns_search)
full_cmd.add_simple_arg("--endpoint-mode", endpoint_mode)
full_cmd.add_simple_arg("--entrypoint", entrypoint)
full_cmd.add_args_list("--env", format_dict_for_cli(envs))
full_cmd.add_args_list("--env-file", env_files)
full_cmd.add_args_list("--generic-resource", generic_resources)
full_cmd.add_args_list("--group", groups)
full_cmd.add_flag("--no-healthcheck", not healthcheck)
full_cmd.add_simple_arg("--health-cmd", health_cmd)
full_cmd.add_simple_arg("--health-interval", to_seconds(health_interval))
full_cmd.add_simple_arg("--health-retries", health_retries)
full_cmd.add_simple_arg(
"--health-start-period", to_seconds(health_start_period)
)
full_cmd.add_simple_arg("--health-timeout", to_seconds(health_timeout))
for key, value in hosts:
full_cmd += ["--host", f"{key}:{value}"]
full_cmd.add_simple_arg("--hostname", hostname)
full_cmd.add_flag("--init", init)
full_cmd.add_simple_arg("--isolation", isolation)
full_cmd.add_args_list("--label", format_dict_for_cli(labels))
full_cmd.add_simple_arg("--limit-cpu", limit_cpu)
full_cmd.add_simple_arg("--limit-memory", limit_memory)
full_cmd.add_simple_arg("--limit-pids", limit_pids)
full_cmd.add_simple_arg("--log-driver", log_driver)
full_cmd.append(image)
if command is not None:
for string in to_list(command):
full_cmd.append(string)
service_id = run(full_cmd)
return Service(self.client_config, service_id, is_immutable_id=True)
@overload
def inspect(self, x: str) -> Service:
pass
@overload
def inspect(self, x: List[str]) -> List[Service]:
...
def inspect(self, x: Union[str, List[str]]) -> Union[Service, List[Service]]:
"""Returns one or a list of `python_on_whales.Service` object(s).
# Raises
`python_on_whales.exceptions.NoSuchService` if one of the services
doesn't exists.
"""
if isinstance(x, str):
return Service(self.client_config, x)
else:
return [Service(self.client_config, a) for a in x]
def exists(self, x: str) -> bool:
"""Verify that a service exists.
It's just calling `docker.service.inspect(...)` and verifies that it doesn't throw
a `python_on_whales.exceptions.NoSuchService`.
# Returns
A `bool`
"""
try:
self.inspect(x)
except NoSuchService:
return False
else:
return True
def logs(
self,
service: ValidService,
details: bool = False,
since: Union[None, datetime, timedelta] = None,
tail: Optional[int] = None,
timestamps: bool = False,
follow: bool = False,
raw: bool = False,
task_ids: bool = True,
resolve: bool = True,
truncate: bool = True,
stream: bool = False,
):
"""Returns the logs of a service as a string or an iterator.
# Arguments
service: The service to get the logs of
details: Show extra details provided to logs
since: Use a datetime or timedelta to specify the lower
date limit for the logs.
tail: Number of lines to show from the end of the logs (default all)
timestamps: Put timestamps next to lines.
follow: If `False` (the default), the logs returned are the logs up to the time
of the function call. If `True`, the logs of the container up to the time the
service is stopped (removed) are displayed.
Which is why you must use the `stream` option if you use the `follow` option.
Without `stream`, only a `str` will be returned, possibly much later in the
future (maybe never if the service is never removed). So this option is not
possible (You'll get an error if you use follow and not stream).
With `stream`, you'll be able to read the logs in real time and stop
whenever you need.
stream: Similar to the `stream` argument of `docker.run()`.
This function will then returns and iterator that will yield a
tuple `(source, content)` with `source` being `"stderr"` or
`"stdout"`. `content` is the content of the line as bytes.
Take a look at [the user guide](https://gabrieldemarmiesse.github.io/python-on-whales/user_guide/docker_run/#stream-the-output)
to have an example of the output.
# Returns
`str` if `stream=False` (the default), `Iterable[Tuple[str, bytes]]`
if `stream=True`.
# Raises
`python_on_whales.exceptions.NoSuchService` if the service does not exists.
"""
# first we verify that the service exists and raise an exception if not.
self.inspect(str(service))
full_cmd = self.docker_cmd + ["service", "logs"]
full_cmd.add_flag("--details", details)
full_cmd.add_simple_arg("--since", format_time_arg(since))
full_cmd.add_simple_arg("--tail", tail)
full_cmd.add_flag("--timestamps", timestamps)
full_cmd.add_flag("--follow", follow)
full_cmd.add_flag("--raw", raw)
full_cmd.add_flag("--no-task-ids", not task_ids)
full_cmd.add_flag("--no-resolve", not resolve)
full_cmd.add_flag("--no-trunc", not truncate)
full_cmd.append(service)
iterator = stream_stdout_and_stderr(full_cmd)
if stream:
return iterator
else:
return "".join(x[1].decode() for x in iterator)
def list(self) -> List[Service]:
"""Returns the list of services
# Returns
A `List[python_on_whales.Services]`
"""
full_cmd = self.docker_cmd + ["service", "list", "--quiet"]
ids_truncated = run(full_cmd).splitlines()
# the ids are truncated because there is no single docker command that allows us to get them
# untruncated. We must run an inspect command to get all untruncated ids.
if ids_truncated == []:
return []
full_cmd = (
self.docker_cmd
+ ["service", "inspect"]
+ ids_truncated
+ ["--format", "{{.ID}}"]
)
ids_not_truncated = run(full_cmd).splitlines()
return [
Service(self.client_config, x, is_immutable_id=True)
for x in ids_not_truncated
]
def ps(
self, x: Union[ValidService, List[ValidService]]
) -> List[python_on_whales.components.task.cli_wrapper.Task]:
"""Returns the list of swarm tasks associated with this service.
You can pass multiple services at once at this function.
```python
from python_on_whales import docker
tasks = docker.service.ps("my-service-name")
print(tasks[0].desired_state)
# running
```
# Arguments
x: One or more services (can be id, name or `python_on_whales.Service` object.)
# Returns
`List[python_on_whales.Task]`
# Raises
`python_on_whales.exceptions.NoSuchService` if one of the services
doesn't exists.
"""
full_cmd = (
self.docker_cmd + ["service", "ps", "--quiet", "--no-trunc"] + to_list(x)
)
ids = run(full_cmd).splitlines()
return [
python_on_whales.components.task.cli_wrapper.Task(
self.client_config, id_, is_immutable_id=True
)
for id_ in ids
]
def remove(self, services: Union[ValidService, List[ValidService]]) -> None:
"""Removes a service
# Arguments
services: One or a list of services to remove.
# Raises
`python_on_whales.exceptions.NoSuchService` if one of the services
doesn't exists.
"""
full_cmd = self.docker_cmd + ["service", "remove"]
for service in to_list(services):
full_cmd.append(service)
run(full_cmd)
def rollback(self):
"""Not yet implemented"""
raise NotImplementedError
def scale(self, new_scales: Dict[ValidService, int], detach: bool = False) -> None:
"""Scale one or more services.
# Arguments
new_scales: Mapping between services and the desired scales. For example
you can provide `new_scale={"service1": 4, "service2": 8}`
detach: If True, does not wait for the services to converge and return
immediately.
# Raises
`python_on_whales.exceptions.NoSuchService` if one of the services
doesn't exists.
"""
# verify that the services exists
self.inspect(list(new_scales.keys()))
full_cmd = self.docker_cmd + ["service", "scale"]
full_cmd.add_flag("--detach", detach)
for service, new_scale in new_scales.items():
full_cmd.append(f"{str(service)}={new_scale}")
run(full_cmd, capture_stderr=False, capture_stdout=False)
def update(
self,
service: ValidService,
detach: bool = False,
force: bool = False,
image: Optional[str] = None,
with_registry_authentication: bool = False,
):
"""Update a service
More options coming soon
# Arguments
service: The service to update
detach: Exit immediately instead of waiting for the service to converge
force: Force update even if no changes require it
image: Service image tag
with_registry_authentication: Send registry authentication details
to swarm agents
# Raises
`python_on_whales.exceptions.NoSuchService` if the service doesn't exists.
"""
full_cmd = self.docker_cmd + ["service", "update"]
full_cmd.add_flag("--force", force)
full_cmd.add_simple_arg("--image", image)
full_cmd.add_flag("--with-registry-auth", with_registry_authentication)
full_cmd.add_flag("--detach", detach)
full_cmd.append(service)
run(full_cmd, capture_stdout=False)
```
#### File: python-on-whales/python_on_whales/download_binaries.py
```python
import platform
import shutil
import tempfile
import warnings
from pathlib import Path
import requests
from tqdm import tqdm
DOCKER_VERSION = "20.10.5"
BUILDX_VERSION = "0.5.1"
CACHE_DIR = Path.home() / ".cache" / "python-on-whales"
TEMPLATE_CLI = (
"https://download.docker.com/{os}/static/stable/{arch}/docker-{version}.tgz"
)
WINDOWS_CLI_URL = "https://github.com/StefanScherer/docker-cli-builder/releases/download/{version}/docker.exe"
def get_docker_binary_path_in_cache():
return CACHE_DIR / "docker-cli" / DOCKER_VERSION / "docker"
def get_docker_cli_url():
user_os = get_user_os()
if user_os == "windows":
return WINDOWS_CLI_URL.format(version=DOCKER_VERSION)
arch = get_arch_for_docker_cli_url()
return TEMPLATE_CLI.format(os=user_os, arch=arch, version=DOCKER_VERSION)
def download_docker_cli():
file_to_download = get_docker_cli_url()
extension = file_to_download.split(".")[-1]
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_dir = Path(tmp_dir)
downloaded_file_path = tmp_dir / f"docker.{extension}"
download_from_url(file_to_download, downloaded_file_path)
docker_binary_path = get_docker_binary_path_in_cache()
docker_binary_path.parent.mkdir(exist_ok=True, parents=True)
if extension == "tgz":
extract_dir = tmp_dir / "extracted"
shutil.unpack_archive(str(downloaded_file_path), str(extract_dir))
shutil.move(extract_dir / "docker" / "docker", docker_binary_path)
elif extension == "exe":
shutil.move(downloaded_file_path, docker_binary_path)
warnings.warn(
f"The docker client binary file {DOCKER_VERSION} was downloaded and put "
f"in `{docker_binary_path.absolute()}`. \n"
f"You can feel free to remove it if you wish, Python on whales will download "
f"it again if needed."
)
def download_from_url(url, dst):
try:
_download_from_url(url, dst)
except Exception as e:
raise ConnectionError(f"Error while downloading {url}") from e
def _download_from_url(url, dst):
# Streaming, so we can iterate over the response.
response = requests.get(url, stream=True)
total_size_in_bytes = int(response.headers.get("content-length", 0))
block_size = 1024
progress_bar = tqdm(total=total_size_in_bytes, unit="iB", unit_scale=True)
with open(dst, "wb") as file:
for data in response.iter_content(block_size):
progress_bar.update(len(data))
file.write(data)
progress_bar.close()
if total_size_in_bytes != 0 and progress_bar.n != total_size_in_bytes:
raise ConnectionError(
f"Total size should be {total_size_in_bytes}, downloaded {progress_bar.n}"
)
def get_user_os():
user_os = platform.system()
if user_os == "Linux":
return "linux"
elif user_os == "Darwin":
return "mac"
elif user_os == "Windows":
return "windows"
else:
raise NotImplementedError(
f"Unknown OS: {user_os}, cannot determine which Docker CLI binary file to "
f"download. \n"
f"Please open an issue at \n"
f"https://github.com/gabrieldemarmiesse/python-on-whales/issues \n"
f"and in the meantime, install Docker manually to make python-on-whales "
f"work."
)
def get_arch_for_docker_cli_url():
arch = platform.architecture()[0]
# I don't know the exact list of possible architectures,
# so if a user reports a NotImplementedError, we can easily add
# his/her platform here.
arch_mapping = {
"NotImplementedError": "aarch64",
"NotImplementedError2": "armel",
"NotImplementedError3": "armhf",
"NotImplementedError4": "ppc64le",
"NotImplementedError5": "s390x",
"64bit": "x86_64",
}
try:
return arch_mapping[arch]
except KeyError:
raise NotImplementedError(
f"The architecture detected on your system is `{arch}`, the list of "
f"available architectures is {list(arch_mapping.values())}. \n"
f"Please open an issue at \n"
f"https://github.com/gabrieldemarmiesse/python-on-whales/issues "
f"and make sure to copy past this error message. \n"
f"In the meantime, install Docker manually on your system."
)
``` |
{
"source": "joshbohde/functional_python",
"score": 3
} |
#### File: joshbohde/functional_python/combinators.py
```python
from functools import wraps
from itertools import takewhile
class Combinators(object):
def __init__(self, value):
self._value = value
def K(self, f, *args, **kwargs):
"""The Kestrel combinator, invokes a method, and returns the original value"""
if callable(f):
f(self._value, *args, **kwargs)
else:
getattr(self._value, f)(*args, **kwargs)
return self._value
def T(self, function, *args, **kwargs):
"""The Thrush combinator, makes a function call look like a method"""
return function(self._value, *args, **kwargs)
def R(self, function, *args, **kwargs):
"""The Robin combinator, like the thrust, except appends to the end of the argument list"""
return function(*(args + (self._value,)), **kwargs)
def chain(self):
return ChainedCombinators(self._value)
class ChainedCombinators(object):
def __init__(self, value):
self._value = value
def K(self, f, *args, **kwargs):
"""The Kestrel combinator, invokes a method, and returns the original value"""
if callable(f):
f(self._value, *args, **kwargs)
else:
getattr(self._value, f)(*args, **kwargs)
return self
def T(self, function, *args, **kwargs):
"""The Thrush combinator, makes a function call look like a method"""
return self.__class__(function(self._value, *args, **kwargs))
def R(self, function, *args, **kwargs):
"""The Robin combinator, like the thrust, except appends to the end of the argument list."""
return self.__class__(function(*(args + (self._value,)), **kwargs))
def value(self):
return self._value
def bw(value):
return Combinators(value)
def fluent_combinator_style(xs):
return bw(xs).chain()\
.R(takewhile, lambda x: x < 7)\
.R(filter, lambda x: x < 2)\
.R(map, lambda x: 4 * x)\
.value()
assert fluent_combinator_style(range(10)) == [0, 4]
```
#### File: joshbohde/functional_python/pipes.py
```python
def imperative_style(xs):
results = []
for x in xs:
if x >= 7:
break
if x < 2:
result = 4 * x
results.append(result)
return results
# Let's add a sanity check, since we're going
# to refactor this version
assert imperative_style(range(10)) == [0, 4]
# Whenever I see code like the above `imperative_style`,
# I mentally file it as point of possible complexity and bugs,
# especially as requirements change and more logic
# is tacked on.
# As a comparison, let's look at a simple functional version
# First, we're going to need `takewhile` from itertools,
# which will allow us to build something like the `break` statement
from itertools import takewhile
# And now the functions definition, utilized two
# builtins, `map` and `filter`, as well as `takewhile`
# to break the problem down into logically independent parts
# notice that the conditional in the `takewhile` is inverted
def functional_style(xs):
return map(lambda x: 4 * x,
filter(lambda x: x < 2,
takewhile(lambda x: x < 7, xs)))
assert functional_style(range(10)) == [0, 4]
# There are less moving parts here,
# but it seems too much like a Christmas tree for me.
# Can we make it flatter?
# For this, we're going to need more tools for working with
# functions. First is compose, which let's us feed the result
# of one function as the argument of another.
# I'm always surprised that Python doesn't have this builtin.
# In more functional languages this is a basic feature, with
# Haskell making it one character (`.`)
def compose_two(g, f):
"""Function composition for two functions, e.g. compose_two(f, g)(x) == f(g(x))"""
return lambda *args, **kwargs: g(f(*args, **kwargs))
assert compose_two(lambda x: x * 2,
lambda y: y + 4)(1) == 10
def compose(*funcs):
"""Compose an arbitrary number of functions left-to-right passed as args"""
return reduce(compose_two, funcs)
# With compose, we need one more function, `partial`,
# which can be used to provice only some of a function's
# arguments
from functools import partial
composition_style = compose(
partial(map, lambda x: 4 * x),
partial(filter, lambda x: x < 2),
partial(takewhile, lambda x: x < 7))
assert composition_style(range(10)) == [0, 4]
# There's a quite a bit of boilerplate
# in this definition.
# Can we abstract out a reusable pattern?
from itertools import starmap
def composed_partials(*partial_funcs):
return compose(*starmap(partial, partial_funcs))
composed_partials_style = composed_partials(
(map, lambda x: 4 * x),
(filter, lambda x: x < 2),
(takewhile, lambda x: x < 7))
assert composed_partials_style(range(10)) == [0, 4]
# This is less noisy, but it's a bit difficult to
# read the logic in reverse order. Can we change that?
def pipe(*partial_funcs):
return composed_partials(*reversed(partial_funcs))
pipe_style = pipe(
(takewhile, lambda x: x < 7),
(filter, lambda x: x < 2),
(map, lambda x: 4 * x))
assert pipe_style(range(10)) == [0, 4]
# This definition is more dataflow oriented, much
# like using pipes in a shell, or Clojure's `->` macro.
# Looking at the definition for composed_partials and pipe,
# they follow a similar structure.
# Can we extract that out?
def transform_args(func, transformer):
return lambda *args: func(*transformer(args))
composed_partials = transform_args(compose, partial(starmap, partial))
composed_partials_style = composed_partials(
(map, lambda x: 4 * x),
(filter, lambda x: x < 2),
(takewhile, lambda x: x < 7))
assert composed_partials_style(range(10)) == [0, 4]
pipe = transform_args(composed_partials, reversed)
pipe_style = pipe(
(takewhile, lambda x: x < 7),
(filter, lambda x: x < 2),
(map, lambda x: 4 * x))
assert pipe_style(range(10)) == [0, 4]
``` |
{
"source": "joshbohde/redash",
"score": 2
} |
#### File: tests/tasks/test_refresh_queries.py
```python
from mock import patch, call, ANY
from tests import BaseTestCase
from redash.tasks.queries.maintenance import refresh_queries
from redash.models import Query
ENQUEUE_QUERY = "redash.tasks.queries.maintenance.enqueue_query"
class TestRefreshQuery(BaseTestCase):
def test_enqueues_outdated_queries_for_sqlquery(self):
"""
refresh_queries() launches an execution task for each query returned
from Query.outdated_queries().
"""
query1 = self.factory.create_query(options={"apply_auto_limit": True})
query2 = self.factory.create_query(
query_text="select 42;", data_source=self.factory.create_data_source(),
options={"apply_auto_limit": True}
)
oq = staticmethod(lambda: [query1, query2])
with patch(ENQUEUE_QUERY) as add_job_mock, patch.object(
Query, "outdated_queries", oq
):
refresh_queries()
self.assertEqual(add_job_mock.call_count, 2)
add_job_mock.assert_has_calls(
[
call(
query1.query_text + " LIMIT 1000",
query1.data_source,
query1.user_id,
scheduled_query=query1,
metadata=ANY,
),
call(
"select 42 LIMIT 1000",
query2.data_source,
query2.user_id,
scheduled_query=query2,
metadata=ANY,
),
],
any_order=True,
)
def test_enqueues_outdated_queries_for_non_sqlquery(self):
"""
refresh_queries() launches an execution task for each query returned
from Query.outdated_queries().
"""
ds = self.factory.create_data_source(
group=self.factory.org.default_group, type="prometheus"
)
query1 = self.factory.create_query(data_source=ds, options={"apply_auto_limit": True})
query2 = self.factory.create_query(
query_text="select 42;", data_source=ds, options={"apply_auto_limit": True}
)
oq = staticmethod(lambda: [query1, query2])
with patch(ENQUEUE_QUERY) as add_job_mock, patch.object(
Query, "outdated_queries", oq
):
refresh_queries()
self.assertEqual(add_job_mock.call_count, 2)
add_job_mock.assert_has_calls(
[
call(
query1.query_text,
query1.data_source,
query1.user_id,
scheduled_query=query1,
metadata=ANY,
),
call(
query2.query_text,
query2.data_source,
query2.user_id,
scheduled_query=query2,
metadata=ANY,
),
],
any_order=True,
)
def test_doesnt_enqueue_outdated_queries_for_paused_data_source_for_sqlquery(self):
"""
refresh_queries() does not launch execution tasks for queries whose
data source is paused.
"""
query = self.factory.create_query(options={"apply_auto_limit": True})
oq = staticmethod(lambda: [query])
query.data_source.pause()
with patch.object(Query, "outdated_queries", oq):
with patch(ENQUEUE_QUERY) as add_job_mock:
refresh_queries()
add_job_mock.assert_not_called()
query.data_source.resume()
with patch(ENQUEUE_QUERY) as add_job_mock:
refresh_queries()
add_job_mock.assert_called_with(
query.query_text + " LIMIT 1000",
query.data_source,
query.user_id,
scheduled_query=query,
metadata=ANY,
)
def test_doesnt_enqueue_outdated_queries_for_paused_data_source_for_non_sqlquery(self):
"""
refresh_queries() does not launch execution tasks for queries whose
data source is paused.
"""
ds = self.factory.create_data_source(
group=self.factory.org.default_group, type="prometheus"
)
query = self.factory.create_query(data_source=ds, options={"apply_auto_limit": True})
oq = staticmethod(lambda: [query])
query.data_source.pause()
with patch.object(Query, "outdated_queries", oq):
with patch(ENQUEUE_QUERY) as add_job_mock:
refresh_queries()
add_job_mock.assert_not_called()
query.data_source.resume()
with patch(ENQUEUE_QUERY) as add_job_mock:
refresh_queries()
add_job_mock.assert_called_with(
query.query_text,
query.data_source,
query.user_id,
scheduled_query=query,
metadata=ANY,
)
def test_enqueues_parameterized_queries_for_sqlquery(self):
"""
Scheduled queries with parameters use saved values.
"""
query = self.factory.create_query(
query_text="select {{n}}",
options={
"parameters": [
{
"global": False,
"type": "text",
"name": "n",
"value": "42",
"title": "n",
}
],
"apply_auto_limit": True
},
)
oq = staticmethod(lambda: [query])
with patch(ENQUEUE_QUERY) as add_job_mock, patch.object(
Query, "outdated_queries", oq
):
refresh_queries()
add_job_mock.assert_called_with(
"select 42 LIMIT 1000",
query.data_source,
query.user_id,
scheduled_query=query,
metadata=ANY,
)
def test_enqueues_parameterized_queries_for_non_sqlquery(self):
"""
Scheduled queries with parameters use saved values.
"""
ds = self.factory.create_data_source(
group=self.factory.org.default_group, type="prometheus"
)
query = self.factory.create_query(
query_text="select {{n}}",
options={
"parameters": [
{
"global": False,
"type": "text",
"name": "n",
"value": "42",
"title": "n",
}
],
"apply_auto_limit": True
},
data_source=ds,
)
oq = staticmethod(lambda: [query])
with patch(ENQUEUE_QUERY) as add_job_mock, patch.object(
Query, "outdated_queries", oq
):
refresh_queries()
add_job_mock.assert_called_with(
"select 42",
query.data_source,
query.user_id,
scheduled_query=query,
metadata=ANY,
)
def test_doesnt_enqueue_parameterized_queries_with_invalid_parameters(self):
"""
Scheduled queries with invalid parameters are skipped.
"""
query = self.factory.create_query(
query_text="select {{n}}",
options={
"parameters": [
{
"global": False,
"type": "text",
"name": "n",
"value": 42, # <-- should be text!
"title": "n",
}
],
"apply_auto_limit": True
},
)
oq = staticmethod(lambda: [query])
with patch(ENQUEUE_QUERY) as add_job_mock, patch.object(
Query, "outdated_queries", oq
):
refresh_queries()
add_job_mock.assert_not_called()
def test_doesnt_enqueue_parameterized_queries_with_dropdown_queries_that_are_detached_from_data_source(
self
):
"""
Scheduled queries with a dropdown parameter which points to a query that is detached from its data source are skipped.
"""
query = self.factory.create_query(
query_text="select {{n}}",
options={
"parameters": [
{
"global": False,
"type": "query",
"name": "n",
"queryId": 100,
"title": "n",
}
],
"apply_auto_limit": True
},
)
dropdown_query = self.factory.create_query(id=100, data_source=None)
oq = staticmethod(lambda: [query])
with patch(ENQUEUE_QUERY) as add_job_mock, patch.object(
Query, "outdated_queries", oq
):
refresh_queries()
add_job_mock.assert_not_called()
```
#### File: tests/tasks/test_worker.py
```python
from mock import patch, call
from rq import Connection
from rq.job import JobStatus
from redash.tasks import Worker
from tests import BaseTestCase
from redash import rq_redis_connection
from redash.tasks.worker import Queue
from redash.tasks.queries.execution import (
enqueue_query,
)
from redash.worker import job, default_queues
@patch("statsd.StatsClient.incr")
class TestWorkerMetrics(BaseTestCase):
def tearDown(self):
with Connection(rq_redis_connection):
for queue_name in default_queues:
Queue(queue_name).empty()
def test_worker_records_success_metrics(self, incr):
query = self.factory.create_query()
with Connection(rq_redis_connection):
enqueue_query(
query.query_text,
query.data_source,
query.user_id,
False,
None,
{"Username": "Patrick", "Query ID": query.id},
)
Worker(["queries"]).work(max_jobs=1)
calls = [
call("rq.jobs.running.queries"),
call("rq.jobs.started.queries"),
call("rq.jobs.running.queries", -1, 1),
call("rq.jobs.finished.queries")
]
incr.assert_has_calls(calls)
@patch("rq.Worker.execute_job")
def test_worker_records_failure_metrics(self, _, incr):
"""
Force superclass execute_job to do nothing and set status to JobStatus.Failed to simulate query failure
"""
query = self.factory.create_query()
with Connection(rq_redis_connection):
job = enqueue_query(
query.query_text,
query.data_source,
query.user_id,
False,
None,
{"Username": "Patrick", "Query ID": query.id},
)
job.set_status(JobStatus.FAILED)
Worker(["queries"]).work(max_jobs=1)
calls = [
call("rq.jobs.running.queries"),
call("rq.jobs.started.queries"),
call("rq.jobs.running.queries", -1, 1),
call("rq.jobs.failed.queries")
]
incr.assert_has_calls(calls)
@patch("statsd.StatsClient.incr")
class TestQueueMetrics(BaseTestCase):
def tearDown(self):
with Connection(rq_redis_connection):
for queue_name in default_queues:
Queue(queue_name).empty()
def test_enqueue_query_records_created_metric(self, incr):
query = self.factory.create_query()
with Connection(rq_redis_connection):
enqueue_query(
query.query_text,
query.data_source,
query.user_id,
False,
None,
{"Username": "Patrick", "Query ID": query.id},
)
incr.assert_called_with("rq.jobs.created.queries")
def test_job_delay_records_created_metric(self, incr):
@job("default", timeout=300)
def foo():
pass
foo.delay()
incr.assert_called_with("rq.jobs.created.default")
``` |
{
"source": "joshbradshaw11/python-sshtail",
"score": 3
} |
#### File: python-sshtail/sshtail/tailers.py
```python
import os
import time
import paramiko
class SSHTailer(object):
"""
Class to handle the tailing of a single file via SSH.
"""
def __init__(self, host, remote_filename, private_key=None, verbose=False):
if '@' in host:
self.username, self.host = tuple(host.split('@'))
else:
self.username, self.host = None, host
self.remote_filename = remote_filename
self.private_key = private_key
self.client = None
self.sftp_client = None
self.remote_file_size = None
self.line_terminators = ['\r', '\n', '\r\n']
self.line_terminators_joined = '\r\n'
self.verbose = verbose
def connect(self):
if self.verbose:
print('Connecting to %s...' % self.host)
# connect to the host
self.client = paramiko.SSHClient()
self.client.load_system_host_keys()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if self.private_key:
self.client.connect(self.host, username=self.username, pkey=self.private_key)
else:
self.client.connect(self.host, username=self.username)
if self.verbose:
print('Opening remote file %s...' % self.remote_filename)
# open a connection to the remote file via SFTP
self.sftp_client = self.client.open_sftp()
def tail(self):
# make sure there's a connection
if not self.sftp_client:
self.connect()
fstat = self.sftp_client.stat(self.remote_filename)
# check if we have the file size
if self.remote_file_size is not None:
# if the file's grown
if self.remote_file_size < fstat.st_size:
for line in self.get_new_lines():
yield line
self.remote_file_size = fstat.st_size
def get_new_lines(self):
"""
Opens the file and reads any new data from it.
"""
remote_file = self.sftp_client.open(self.remote_filename, 'r')
# seek to the latest read point in the file
remote_file.seek(self.remote_file_size, 0)
# read any new lines from the file
line = remote_file.readline()
while line:
yield line.strip(self.line_terminators_joined)
line = remote_file.readline()
remote_file.close()
def disconnect(self):
if self.sftp_client:
if self.verbose:
print("Closing SFTP connection...")
self.sftp_client.close()
self.sftp_client = None
if self.client:
if self.verbose:
print("Closing SSH connection...")
self.client.close()
self.client = None
class SSHMultiTailer(object):
"""
Class to handle tailing of multiple files.
"""
def __init__(self, host_files, poll_interval=2.0, private_key=None, verbose=False):
"""
host_files is a dictionary whose keys must correspond to unique
remote hosts to which this machine has access (ideally via SSH key).
The values of the host_files dictionary must be arrays of file names
that must be tailed.
"""
self.host_files = host_files
self.poll_interval = poll_interval
self.private_key = private_key
self.tailers = {}
self.verbose = verbose
def connect(self):
"""
Connects to all of the host machines.
"""
if self.verbose:
print "Connecting to multiple hosts..."
for host, files in self.host_files.iteritems():
self.tailers[host] = {}
for f in files:
self.tailers[host][f] = SSHTailer(host, f, private_key=self.private_key, verbose=self.verbose)
def tail(self, report_sleep=False):
"""
Tails all of the requested files across all of the hosts.
"""
# make sure we're connected
if not self.tailers:
self.connect()
try:
# assuming this script is to run until someone kills it (Ctrl+C)
while 1:
lines_read = 0
for host, tailers in self.tailers.iteritems():
for filename, tailer in tailers.iteritems():
# read as much data as we can from the file
for line in tailer.tail():
yield host, filename, line
lines_read += 1
if not lines_read:
if report_sleep:
yield None, None, None
self.sleep()
finally:
self.disconnect()
def sleep(self):
time.sleep(self.poll_interval)
def disconnect(self):
"""
Disconnects all active connections.
"""
for host, tailers in self.tailers.iteritems():
for filename, tailer in tailers.iteritems():
tailer.disconnect()
self.tailers = {}
if self.verbose:
print "Disconnected from hosts."
``` |
{
"source": "JoshBradshaw/pressure-peak-detect",
"score": 3
} |
#### File: JoshBradshaw/pressure-peak-detect/pressure_peak_detect.py
```python
from __future__ import division
import collections
import numpy as np
import matplotlib.pyplot as plt
import math
from scipy.signal import butter, filtfilt
import warnings
"""
Algorithm adapted from:
An Open-source Algorithm to Detect Onset of Arterial Blood Pressure Pulses
by: <NAME>, <NAME>, <NAME>, <NAME>
"""
### USER TUNABLE FILTER SETTINGS
# Hz !!! MUST BE SET CORRECTLY FOR TIME OUTPUT TO BE ACCURATE
# otherwise setting it approximately will be sufficient for sample rates >50Hz
SAMPLING_RATE = 561
MAX_EXPECTED_HR = 320 # BPM -- used to set the refractory period
LOWPASS_FILTER_CUTTOFF_FREQUENCY = 16 # Hz
# effective filtering will be 2x this order, because the filter is applied twice
# see http://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.signal.filtfilt.html
LOWPASS_FILTER_ORDER = 1
### USER TUNABLE WAVEFORM TRANSFORM SETTINGS
# this value is used to determine the SSF window length. This paramater is not
# very sensitive and does not typically require adjustment
PULSE_RISE_TIME = 0.06 # seconds
# 128 ms window recommended by Zong et. Al for Adult BP. It is sometimes
# necessary to shorten this window length when dealing with very fast heartrates
SSF_WINDOW_LENGTH = int(math.floor(SAMPLING_RATE * PULSE_RISE_TIME))
### USER TUNABLE PEAK THRESHOLDING SETTINGS
# BPM -- used to calculate the refractory period
# you must use a much higher value is the subject has arrythmia or skip-beats
REFRACTORY_PERIOD = int(math.floor((60 / MAX_EXPECTED_HR) * SAMPLING_RATE))
# the peak threshold is initialized as ~2-4 times the mean SSF signal value,
# then dyanamically updated as peaks are detected
INITIAL_PEAK_THRESHOLD = 2 # arbitrary coefficient -- should be 1-5
# to avoid false positive detections of peaks, only detect a peaks
# that exceed this fraction of the magnitude of the last 5 detected peaks
PEAK_THRESHOLD_FRACTION = 0.6 # 0-1
# number of peaks to average the peak threshold value over. A higher number
# will decrease the risk of false positive detections, but increase the chance
# of false negative (misses) detections in the event of a sudden baseline change
PEAK_BUFFER_LEN = 3 # number of peaks
# the stability of the rolling point peak detection algorithm is improved
# marginally if a larger rolling point spacing is used.
ROLLING_POINT_SPACING = 4 # samples
# seek up to 1 second back to find the start of the pressure peak
MAXIMUM_TROUGH_SEEK_SAMPLES = SAMPLING_RATE
DEBUG = True # plot the algorithm output
def butter_lowpass(highcut, fs, order=5):
"""generates the filter coefficients for a butterworth lowpass filter,
see: http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.signal.butter.html
"""
nyq = 0.5 * fs # nyquist rate in Hz
high = highcut / nyq
b, a = butter(order, high, btype='lowpass')
return b, a
def butter_lowpass_filter(data, highcut, fs, order=5):
"""
applies the butterworth lowpass filter in the conventional time-forward manner
note that this filtering method will result in a positive phase (time) delay
on the output, which may need to be accounted for in the R_WAVE_DELAY setting
"""
b, a = butter_lowpass(highcut, fs, order=order)
# using filtfilt makes this a zero phase filter
y = filtfilt(b, a, data)
return y
def ssf(x0, x1):
"""slope sum function"""
if x1 > x0:
return x1 - x0
else:
return 0
def windowed_slope_sum_function(fitered_bp_vector, window_length):
"""see the mathematical description of this algorithm in:
An Open-source Algorithm to Detect Onset of Arterial Blood Pressure Pulses
"""
# this could be replaced with simple pointer manipulation, but this is easier
# init with a monotonically increasing function
window_ring_buffer = collections.deque(np.zeros(window_length), maxlen=window_length)
ssf_vector = []
window_ssf = 0
for ii, bp_sample in enumerate(fitered_bp_vector):
window_ssf -= ssf(window_ring_buffer[0], window_ring_buffer[1])
window_ssf += ssf(window_ring_buffer[-1], bp_sample)
window_ring_buffer.append(bp_sample)
if ii > window_length:
ssf_vector.append(window_ssf)
else:
ssf_vector.append(0)
return ssf_vector
class PeakThreshold(object):
"""
the peak threshold value is simply a rolling average of the the preceding
peak values.
"""
def __init__(self, ssf_vector, sampling_rate):
# takes the mean of the SSF waveform
# assumes that the SSF waveform is very bottom heavy ie. mean << max
initial_threshold = INITIAL_PEAK_THRESHOLD * np.mean(ssf_vector)
init_peak_buffer = [initial_threshold for _ in xrange(PEAK_BUFFER_LEN)]
self.threshold_sum = PEAK_BUFFER_LEN * initial_threshold
self.peak_buffer = collections.deque(init_peak_buffer, maxlen=PEAK_BUFFER_LEN)
def get_threshold(self):
return (self.threshold_sum / PEAK_BUFFER_LEN) * PEAK_THRESHOLD_FRACTION
def update(self, new_peak_value):
self.threshold_sum -= self.peak_buffer[0]
self.peak_buffer.append(new_peak_value)
self.threshold_sum += new_peak_value
return self.get_threshold()
def estimate_all_pressure_peak_locations(bp_vector, sampling_rate, backtrack_to_pulse_onset=False, output_type='sample_idx'):
"""returns a list of the R wave locations, in samples
output_types:
sample_idx -- a list of the bp_vector sample indexes which correspond to R-wave locations
time -- the times of the R wave locations in seconds, relative to the start of bp_vector
"""
detected_locations = []
lp_filtered_vector = butter_lowpass_filter(bp_vector, LOWPASS_FILTER_CUTTOFF_FREQUENCY,
sampling_rate, order=LOWPASS_FILTER_ORDER)
ssf_transformed_vector = windowed_slope_sum_function(lp_filtered_vector, SSF_WINDOW_LENGTH)
peak_thresholder = PeakThreshold(ssf_transformed_vector, sampling_rate)
p_threshold = peak_thresholder.get_threshold()
rolling_point_buffer = collections.deque(np.zeros(ROLLING_POINT_SPACING), maxlen=ROLLING_POINT_SPACING)
r_period_count = REFRACTORY_PERIOD
rising_edge = False
# peak detection state machine
for sample_num, bp_val in enumerate(ssf_transformed_vector):
lrp = rolling_point_buffer[0] # left rolling point
rrp = bp_val # right rolling point
if rising_edge and lrp > rrp:
r_period_count = 0
peak_thresholder.update(lrp)
p_threshold = peak_thresholder.get_threshold()
rising_edge = False
if backtrack_to_pulse_onset:
detection_sample_num = find_onset_of_trough(lp_filtered_vector, sample_num, ssf_transformed_vector, p_threshold, sampling_rate)
else:
detection_sample_num = sample_num - ROLLING_POINT_SPACING
# detection_sample_num is None if find_onset_of_trough backtracking fails
if detection_sample_num is not None:
detected_locations.append(detection_sample_num)
elif not rising_edge and r_period_count > REFRACTORY_PERIOD and rrp > lrp and lrp > p_threshold:
rising_edge = True
else:
pass # state unchanged during this step
r_period_count += 1
rolling_point_buffer.append(bp_val)
# edge case where the recording ends on a rising edge, but cuts off before the peak
if backtrack_to_pulse_onset and rising_edge:
find_onset_of_trough(lp_filtered_vector, sample_num, ssf_transformed_vector, p_threshold, sampling_rate)
if DEBUG:
# display the intermediate filtered and SSF waveforms, for testing
x = np.linspace(0, len(bp_vector), len(bp_vector))
plt.figure(1)
plt.plot(x, bp_vector)
x = np.arange(0, len(bp_vector))
plt.plot(x, lp_filtered_vector)
plt.plot(x[:len(ssf_transformed_vector)], ssf_transformed_vector)
bp_min = min(bp_vector)
bp_max = max(bp_vector)
detections_x = []
detections_y = []
for sample_num in detected_locations:
detections_x.append(sample_num)
detections_y.append(bp_vector[sample_num])
plt.title("BP, FILTERED BP, SSF TRANSFORMED BP, AND DETECTIONS")
plt.ylabel("Voltage, or mmHg, or whatever BP was measured in")
plt.xlabel("Samples")
plt.plot(detections_x, detections_y, 'ro')
if output_type=='sample_idx':
return detected_locations
elif output_type=='time':
return np.array(detected_locations) / sampling_rate
else:
error_msg = "output_type: '{}' is invalid".format(output_type)
raise ValueError(error_msg)
def find_onset_of_trough(lp_filtered_vector, pressure_peak_sample_num, ssf_vector, ssf_threshold, sampling_rate):
"""
given the location of a pressure peak, searches backwards along the rising
edge to find the rightmost edge of the trough between peaks
"""
trough_onset_locations = []
backwards_seek_range = reversed(xrange(pressure_peak_sample_num-MAXIMUM_TROUGH_SEEK_SAMPLES, pressure_peak_sample_num))
rolling_point_buffer = collections.deque(np.zeros(ROLLING_POINT_SPACING), maxlen=ROLLING_POINT_SPACING)
rising_edge = False
for ii in backwards_seek_range:
if ii < 0:
msg = "Unable to locate the beginning of the first pressure peak, algorithm reached start of recording"
warnings.warn(msg)
return None
rrp = rolling_point_buffer[0] # left rolling point
lrp = lp_filtered_vector[ii] # right rolling point
rolling_point_buffer.append(lrp)
if rising_edge and lrp > rrp:
pressure_peak_location = ii + ROLLING_POINT_SPACING
trough_onset_locations.append(ii)
return pressure_peak_location
elif not rising_edge and lrp < rrp and ssf_vector[ii] < ssf_threshold:
rising_edge = True
else:
pass
msg = "Unable to find the start of the pressure peak at time: '{}'".format(pressure_peak_sample_num / sampling_rate)
warnings.warn(msg)
return None
# TUNING / TEST METHODS
def filter_frequency_response_tester():
"""convenience method from http://stackoverflow.com/a/12233959/2754647
plots the frequency responses of a scipy filter
"""
from scipy.signal import freqz
# Sample rate and desired cutoff frequencies (in Hz).
fs = 250
highcut = 16
# Plot the frequency response for a few different orders.
plt.figure(2)
plt.clf()
for order in [3, 6, 9]:
b, a = butter_lowpass(highcut, fs, order=order)
w, h = freqz(b, a, worN=2000)
plt.plot((fs * 0.5 / np.pi) * w, abs(h), label="order = %d" % order)
plt.plot([0, 0.5 * fs], [np.sqrt(0.5), np.sqrt(0.5)],
'--', label='sqrt(0.5)')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain')
plt.grid(True)
plt.legend(loc='best')
plt.show()
def test():
signal = []
skip_num = 1 #sometimes there is an abrupt jump at the first sample
us_trace = np.load("trace_IMG_20160510_1_12_anon.dcm.npy")
signal = us_trace[0][1][skip_num:]
signal_time = us_trace[0][0][skip_num:]
print "sampling rate is 1 sample per %f seconds = %f samples per second" % (signal_time[1] - signal_time[0], 1/(signal_time[1] - signal_time[0]))
if False:
plt.figure(1)
plt.clf()
plt.plot(signal_time, signal)
L = estimate_all_pressure_peak_locations(signal, SAMPLING_RATE, True, 'time')
print "R-wave time points = %s" % L
plt.show()
if __name__ == '__main__':
test()
``` |
{
"source": "joshbressers/blockchain-elasticsearch",
"score": 2
} |
#### File: blockchain-elasticsearch/python/esbtc.py
```python
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import NotFoundError
from elasticsearch.exceptions import ConnectionTimeout
from elasticsearch.exceptions import TransportError
import elasticsearch
import elasticsearch.helpers
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
import os
class ElasticsearchBTC:
"Class for querying the Elasticsearch BTC instance"
def __init__(self, url=None):
if url is None:
self.url = os.environ['ESURL']
else:
self.url = url
self.es = Elasticsearch([self.url], http_compress = True, timeout=60)
self.size = None
def get_transactions_indices(self):
return self.es.indices.get('btc-transactions-*').keys()
def get_block(self, block=None, height=None):
result = {}
if block:
result = self.es.search(index="btc-blocks-*", body={"query": { "match": { "_id": block }}})
elif height:
result = self.es.search(index="btc-blocks-*", body={"query": { "match": { "height": height }}})
# We're just going to assume neither of these can return
# multiple things
if len(result['hits']['hits']) == 0:
return None
else:
return result['hits']['hits'][0]['_source']
def get_transactions(self, tx):
unsorted = {}
# Use batches of 20
for batch_range in range(0, len(tx), 20):
result = self.es.search(index="btc-transactions-*",
body={"size": 50,
"query": {
"terms": {
"txid": tx[batch_range:batch_range+20]
}
}
}
)
if len(result['hits']['hits']) == 0:
return None
# Collect the results
for i in result['hits']['hits']:
unsorted[i['_source']['txid']] = i['_source']
# Return the results in the right order
output = []
for i in tx:
if i in unsorted:
output.append(unsorted[i])
else:
pass
# Sometimes crazy things happen
#import pdb; pdb.set_trace()
return output
def get_transaction(self, tx):
result = self.es.search(index="btc-transactions-*", body={"query": { "match": { "txid": tx }}})
# We're just going to assume neither of these can return
# multiple things
if len(result['hits']['hits']) == 0:
return None
else:
return result['hits']['hits'][0]['_source']
def get_block_transactions(self, block):
result = self.es.search(index="btc-transactions-*", body={"query": { "match": { "block": block }}})
txs = []
for i in result['hits']['hits']:
txs.append(i['_source'])
return txs
def get_block_transactions_number(self, block):
result = self.es.search(index="btc-transactions-*", body={"query": { "match": { "block": block }}})
return result['hits']['total']
def get_nonstandard_transactions(self):
query = { "_source": ["hash", "vout.scriptPubKey.hex", "vout.scriptPubKey.type"], "query" : { "match": { "vout.scriptPubKey.type": "nonstandard" } } }
return elasticsearch.helpers.scan(self.es, index="btc-transactions-*", query=query, scroll='1m')
def count_nulldata_transactions(self, index):
result = self.es.count(index=index,
body={
"query": {
"term": { "vout.scriptPubKey.type": "nulldata" }
}
}
)
return result
def get_nulldata_transactions(self, index):
# This is a mess. Apologies if you're looking at this
query = { "_source": ["hash",
"height",
"txid",
"vin.txid",
"vout.scriptPubKey.asm",
"vout.scriptPubKey.type",
"vout.n"
],
"query" : {
"bool": {
"must": [
{"term": { "vout.scriptPubKey.type": "nulldata" }}
]
}
}
}
return elasticsearch.helpers.scan(self.es, index=index, query=query, scroll='5m')
def get_opreturn_data(self, bottom = None, top = None):
query = { "_source": ["tx",
"height",
"n",
"txid",
"vin.txid",
],
"query" : {
"match_all" : {}
}
}
if bottom is not None and top is not None:
query['query'] = {"range" : { "height" : { "gte" : bottom, "lte" : top}}}
return elasticsearch.helpers.scan(self.es, index="btc-opreturn", query=query, size=100, scroll='1m')
def get_opreturn_tx(self, tx):
result = self.es.search(index="btc-opreturn", body={"query": {
"match": { "txid": { "query": tx }}}})
# We're just going to assume neither of these can return
# multiple things
if len(result['hits']['hits']) == 0:
return None
else:
return result['hits']['hits']
def set_opreturn_tx_parent(self, tx):
my_id = tx['_id']
data = tx['_source']
data['is_parent'] = True
self.es.update(id=my_id, index="btc-opreturn", doc_type='doc', body={'doc' :data}, request_timeout=10)
def add_opreturn_tx_child(self, parent_txid, child_txid):
tx = self.get_opreturn_tx(parent_txid)
if tx is None:
return None
my_id = tx[0]['_id']
data = tx[0]['_source']
if 'children' in data and child_txid not in data['children']:
data['children'].append(child_txid)
else:
data['children'] = [child_txid]
self.es.update(id=my_id, index="btc-opreturn", doc_type='doc', body={'doc' :data}, request_timeout=10)
def update_opreturns(self, the_iter):
errors = []
for ok, item in elasticsearch.helpers.streaming_bulk(self.es, the_iter, max_retries=2):
if not ok:
errors.append(item)
return errors
def add_opreturn(self, data):
my_id = "%s-%s" % (data['tx'], data['n'])
#self.es.update(id=my_id, index="btc-opreturn", doc_type='doc', body={'doc' :data, 'doc_as_upsert': True}, request_timeout=30)
self.es.update(id=my_id, index="btc-opreturn", body={'doc' :data, 'doc_as_upsert': True}, request_timeout=30)
def add_block(self, block, force_add=False):
"Add a block. Do nothing if the block already exists"
read_index = "btc-blocks-*"
the_index = "btc-blocks"
exists = False
try:
#self.es.get(index=the_index, doc_type="doc", id=block['hash'])
self.es.get(index=read_index, id=block['hash'])
exists = True
except NotFoundError:
# We need to add this block
exists = False
if exists is False or force_add is True:
#self.es.update(id=block['hash'], index=the_index, doc_type='doc', body={'doc' :block, 'doc_as_upsert': True}, request_timeout=30)
self.es.update(id=block['hash'], index=the_index, body={'doc' :block, 'doc_as_upsert': True}, request_timeout=30)
def add_transaction(self, tx):
"Add a transaction. Do nothing if the block already exists"
the_index = "btc-transactions"
try:
#self.es.get(index=the_index, doc_type="doc", id=tx['hash'])
self.es.get(index=the_index, id=tx['hash'])
# It exists if this returns, let's skip it
except NotFoundError:
# We need to add this transaction
#self.es.update(id=tx['hash'], index=the_index, doc_type='doc', body={'doc' :tx, 'doc_as_upsert': True}, request_timeout=30)
self.es.update(id=tx['hash'], index=the_index, body={'doc' :tx, 'doc_as_upsert': True}, request_timeout=30)
def add_price(self, date, price):
"Add the price for a given timestamp"
price_data = { 'time': date, 'price': price }
#self.es.update(id=date, index="btc-price-date", doc_type='_doc', body={'doc' :price_data, 'doc_as_upsert': True}, request_timeout=30)
self.es.update(id=date, index="btc-price-date", body={'doc' :price_data, 'doc_as_upsert': True}, request_timeout=30)
def add_opreturn_files(self, data):
errors = []
for ok, item in elasticsearch.helpers.streaming_bulk(self.es, data, max_retries=2):
if not ok:
errors.append(item)
return errors
def get_max_block(self):
"Get the largest block in the system"
if self.size is None:
query = {'sort': [{'height': 'desc'}], 'size': 1, 'query': {'match_all': {}}, '_source': ['height']}
res = self.es.search(index="btc-blocks-*", body=query)
self.size = res['hits']['hits'][0]['_source']['height']
return self.size
def add_bulk_tx(self, data_iterable):
"Do some sort of bulk thing with an iterable"
errors = []
for ok, item in elasticsearch.helpers.streaming_bulk(self.es, data_iterable, max_retries=2):
if not ok:
errors.append(item)
return errors
class DaemonBTC:
def __init__(self, url, timeout=90):
self.rpc = AuthServiceProxy(url, timeout=timeout)
def get_block(self, i):
block = self.rpc.getblockhash(i)
block_data = self.rpc.getblock(block)
block_data['transactions'] = len(block_data['tx'])
# Elasticsearch struggles with these as integers
#block_data['chainwork_int'] = int(block_data['chainwork'], 16)
block_data['difficulty'] = int(block_data['difficulty'])
del(block_data['tx'])
# Figure out how many coins moved
value = 0
txs = self.get_block_transactions(i)
# This is the data we need for value
# txs[0]['vout'][0]['value']
for tx in txs:
for vout in tx['vout']:
if vout['scriptPubKey']['type'] == 'nonstandard':
pass
else:
value = value + vout['value']
block_data['value'] = value
return block_data
def get_transaction(self, tx):
rtx = self.rpc.getrawtransaction(tx)
dtx = self.rpc.decoderawtransaction(rtx)
return dtx
def get_transactions(self, txs):
rtx = self.rpc.batch_([["getrawtransaction", t] for t in txs])
dtx = self.rpc.batch_([["decoderawtransaction", t] for t in rtx])
return dtx
def get_block_transactions(self, block):
# The genesis block is special
if block == 0:
return []
blockhash = self.rpc.getblockhash(block)
block_data = self.rpc.getblock(blockhash)
transactions = []
rtx = self.rpc.batch_([["getrawtransaction", t] for t in block_data['tx']])
dtx = self.rpc.batch_([["decoderawtransaction", t] for t in rtx])
for tx in dtx:
tx['height'] = block_data['height']
tx['block'] = block_data['hash']
tx['time'] = block_data['time']
# We can't use this data, let's get rid of it
for i in tx['vin']:
if 'scriptSig' in i:
del(i['scriptSig'])
for i in tx['vout']:
if 'hex' in i['scriptPubKey']:
del(i['scriptPubKey']['hex'])
if 'asm' in i['scriptPubKey']:
del(i['scriptPubKey']['asm'])
transactions.append(tx)
return transactions
def get_block_transactions_bulk(self, block):
"Return an iterable object for bulk transactions"
transactions = self.get_block_transactions(block)
tx = Transactions()
for i in transactions:
tx.add_transaction(i)
return tx
def get_blocks_bulk(self, blocks):
rbh = self.rpc.batch_([["getblockhash", t] for t in blocks])
dbh = self.rpc.batch_([["get_block", t] for t in rbh])
output = []
for block_data in dbh:
block_data['transactions'] = len(block_data['tx'])
block_data['chainwork_int'] = int(block_data['chainwork'], 16)
del(block_data['tx'])
output.append(block_data)
return output
def get_max_block(self):
return self.rpc.getblockcount()
class Transactions:
def __init__(self):
self.transactions = []
self.current = -1
def add_transaction(self, tx):
temp = {
#'_type': 'doc',
'_op_type': 'update',
'_index': "btc-transactions",
'_id': tx['hash'],
'doc_as_upsert': True,
'doc': tx
}
self.transactions.append(temp)
def __next__(self):
"handle a call to next()"
self.current = self.current + 1
if self.current >= len(self.transactions):
raise StopIteration
return self.transactions[self.current]
def __iter__(self):
"Just return ourself"
return self
def __len__(self):
return len(self.transactions)
class OP_RETURN:
def __init__(self, es):
self.transactions = []
self.current = -1
self.es_handle = es
def add_transaction(self, tx):
temp = { '_type': 'doc',
'_op_type': 'update',
'_index': "btc-opreturn",
'_id': "%s-%s" % (tx['tx'], tx['n']),
'doc_as_upsert': True,
'doc': tx
}
self.transactions.append(temp)
if len(self.transactions) > 200:
try:
self.es_handle.add_bulk_tx(self)
except TransportError:
import pdb; pdb.set_trace
self.transactions = []
self.current = -1
def __next__(self):
"handle a call to next()"
self.current = self.current + 1
if self.current >= len(self.transactions):
raise StopIteration
return self.transactions[self.current]
def __iter__(self):
"Just return ourself"
return self
def __len__(self):
return len(self.transactions)
``` |
{
"source": "joshbressers/covid19-es",
"score": 3
} |
#### File: covid19-es/covid19es/eshelper.py
```python
import os
import json
import elasticsearch
import elasticsearch.helpers
from elasticsearch import Elasticsearch
class ES:
def __init__(self, idx='covid19-country'):
if 'ESURL' not in os.environ:
es_url = "http://localhost:9200"
else:
es_url = os.environ['ESURL']
es = Elasticsearch([es_url])
# Delete the index if it exists
if es.indices.exists(idx) is True:
es.indices.delete(index=idx, ignore=[400, 404])
# We have to create it and add a mapping
fh = open('mapping.json')
mapping = json.load(fh)
es.indices.create(idx, body=mapping)
fh.close()
self.es = es
def add(self, data):
for ok, item in elasticsearch.helpers.streaming_bulk(self.es, data, max_retries=2):
if not ok:
print("ERROR:")
print(item)
``` |
{
"source": "joshbressers/dwf-chain",
"score": 3
} |
#### File: dwf-chain/tests/test_DB.py
```python
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import unittest
import vuln_chain
import time
import tempfile
class TestDB(unittest.TestCase):
def setUp(self):
the_time = time.time()
self.the_block = vuln_chain.Block('id', 0, None)
self.second_block = self.the_block.add_next('id2', the_time, data = "Secret data2")
self.third_block = self.second_block.add_next('id3', the_time, data = "Secret data3")
self.tempdir = tempfile.TemporaryDirectory()
def tearDown(self):
self.tempdir.cleanup()
def test_lookup_id(self):
tempdb = os.path.join(self.tempdir.name, 'test_db')
# Open DB
the_db = vuln_chain.DB(tempdb)
# Write blocks
the_db.add_chain(self.the_block)
# Close the DB
the_db.close()
# Open the DB
the_db = vuln_chain.DB(tempdb)
# Read in a block
a_block = the_db.load_by_id(self.the_block.get_id())
self.assertEqual(a_block, self.the_block)
a_block = the_db.load_by_id(self.second_block.get_id())
self.assertEqual(a_block, self.second_block)
a_block = the_db.load_by_id(self.third_block.get_id())
self.assertEqual(a_block, self.third_block)
def test_write_block(self):
tempdb = os.path.join(self.tempdir.name, 'test_db')
# Open DB
the_db = vuln_chain.DB(tempdb)
# Write blocks
the_db.add_block(self.the_block)
# Close the DB
the_db.close()
# Open the DB
the_db = vuln_chain.DB(tempdb)
# Read in a block
a_block = the_db.load_by_id(self.the_block.get_id())
self.assertEqual(a_block, self.the_block)
with self.assertRaises(Exception) as context:
a_block = the_db.load_by_id(self.second_block.get_id())
def test_lookup_hash(self):
tempdb = os.path.join(self.tempdir.name, 'test_db')
# Open DB
the_db = vuln_chain.DB(tempdb)
# Write blocks
the_db.add_chain(self.the_block)
# Close the DB
the_db.close()
# Open the DB
the_db = vuln_chain.DB(tempdb)
# Read in a block
a_block = the_db.load_by_hash(self.the_block.get_hash())
self.assertEqual(a_block, self.the_block)
def test_child(self):
tempdb = os.path.join(self.tempdir.name, 'test_db')
# Open DB
the_db = vuln_chain.DB(tempdb)
# Write chain
the_db.add_chain(self.the_block)
# add some children
child1 = vuln_chain.Block('child1', 0, None, parent = self.the_block)
child2 = vuln_chain.Block('child2', 0, None, parent = self.the_block)
child3 = vuln_chain.Block('child3', 0, None, parent = self.the_block)
the_db.add_block(child1)
the_db.add_block(child2)
the_db.add_block(child3)
# Close the DB
the_db.close()
# Open the DB
the_db = vuln_chain.DB(tempdb)
# Read in a block
child_blocks = the_db.load_by_parent(self.the_block.get_hash())
print(child_blocks)
self.assertTrue(child1 in child_blocks)
self.assertTrue(child2 in child_blocks)
self.assertTrue(child3 in child_blocks)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joshbressers/dwf-request",
"score": 3
} |
#### File: dwf-bot/DWF/DWFIssue.py
```python
import requests
import os
import json
import re
class Issue:
def __init__(self, details):
self.raw_data = details
self.lines = details['body'].splitlines()
self.comments_url = details['comments_url']
self.events_url = details['events_url']
self.url = details['url']
self.title = details['title']
self.id = details['number'];
self.creator = details['user']['login']
self.creator_id = details['user']['id']
self.auth = (os.environ['GH_USERNAME'], os.environ['GH_TOKEN'])
def get_dwf_id(self):
# We are going to only trust the comment from <username> for this
# ID. It's the most trustworthy ID
comments = self.get_comments()
comments.reverse()
for i in comments:
if i['user']['login'] == os.environ['GH_USERNAME']:
if i['body'].startswith('This issue has been assigned'):
match = re.search('((CVE|CAN)-\d{4}-\d+)', i['body'])
dwf_id = match.groups()[0]
return dwf_id
return None
def get_events(self):
events = []
page = 0
while(True):
params = {
'per_page': 100,
'page': page
}
resp = requests.get(self.events_url, auth=self.auth, params=params)
resp.raise_for_status()
if len(resp.json()) == 0:
break
else:
page = page + 1
events.extend(resp.json())
return events
def get_comments(self):
comments = []
page = 0
while(True):
params = {
'per_page': 100,
'page': page
}
resp = requests.get(self.comments_url, auth=self.auth, params=params)
resp.raise_for_status()
if len(resp.json()) == 0:
break
else:
page = page + 1
comments.extend(resp.json())
return comments
def who_approved(self):
events = self.get_events()
# We should reverse the list as we want to figure out who gave the last approval
events.reverse()
for i in events:
# I'm pretty sure we need better logic here
if i['event'] == 'labeled' and i['label']['name'] == 'approved':
approver = i['actor']['login']
approver_id = i['actor']['id']
return "%s:%s" % (approver, approver_id)
def get_reporter(self):
data = self.get_dwf_json()
the_reporter = "%s:%s" % (data['reporter'], data['reporter_id'])
return the_reporter
def get_dwf_json(self):
dwf_json = ""
found_json = False
for l in self.lines:
if l == "--- DWF JSON ---":
found_json = not found_json
elif found_json is True:
dwf_json = dwf_json + l
dwf_data = json.loads(dwf_json)
return dwf_data
def add_comment(self, comment):
body = {
"body": comment
}
headers = {
"accept": "application/json"
}
resp = requests.post(self.comments_url, json=body, auth=self.auth, headers=headers)
resp.raise_for_status()
def can_to_dwf(self):
can_id = self.get_dwf_id()
# Make sure the ID starts with CAN
if not can_id.startswith('CAN-'):
return None
# Get the path to the file
year = can_id.split('-')[1]
id_str = can_id.split('-')[2]
dwf_id = "CVE-%s-%s" % (year, id_str)
self.title = self.title.replace(can_id, dwf_id)
body = {
"title": self.title,
"state": "closed"
}
headers = {
"accept": "application/json"
}
resp = requests.post(self.url, json=body, auth=self.auth, headers=headers)
resp.raise_for_status()
def assign_dwf(self, dwf_id, approved_user = False):
# Add a comment to the issue
self.add_comment("This issue has been assigned %s" % dwf_id)
# Modify the title and labels
body = {
"title": "[%s] %s" % (dwf_id, self.title),
"labels": ["assigned"]
}
headers = {
"accept": "application/json"
}
if approved_user:
body["state"] = "closed"
else:
# CAN IDs get the candidate label
body["labels"] = ["assigned", "candidate"]
resp = requests.post(self.url, json=body, auth=self.auth, headers=headers)
resp.raise_for_status()
``` |
{
"source": "joshbressers/salto",
"score": 2
} |
#### File: joshbressers/salto/parse-log.py
```python
import os
import sys
import json
import socket
import glob
import boto3
import time
from s3logparse import s3logparse
import elasticsearch
import elasticsearch.helpers
from elasticsearch import Elasticsearch
import threading, queue
s3_q = queue.Queue(maxsize=10000)
es_q = queue.Queue(maxsize=3000)
def s3_worker():
session = boto3.session.Session()
s3 = session.resource('s3',
aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'],
aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY'],
aws_session_token=os.environ['AWS_SESSION_TOKEN'],
)
bucket = 'toolbox-data.anchore.io-logs'
while True:
key = <KEY>()
obj = s3.Object(bucket, key)
body = obj.get()['Body'].read()
line = body.decode('utf-8')
lines = line.split("\n")
if lines[-1] == '':
lines = lines[0:-1]
print(key)
count = 0
for line in s3logparse.parse_log_lines(lines):
count = count + 1
data = {}
data['id'] = obj.key + "-" + str(count)
data['bucket'] = line.bucket
data['timestamp'] = line.timestamp.isoformat()
data['remote_ip'] = line.remote_ip
#try:
# data['dns'] = socket.gethostbyaddr(line.remote_ip)[0]
#except:
# pass
data['operation'] = line.operation
data['s3_key'] = line.s3_key
data['request_uri'] = line.request_uri
if line.request_uri is not None:
uri = line.request_uri.split(' ')
if len(uri) > 2:
data['request_verb'] = uri[0]
data['request_uri'] = uri[1]
data['request_ver'] = uri[2]
data['status_code'] = line.status_code
data['error_code'] = line.error_code
data['bytes_sent'] = line.bytes_sent
data['user_agent'] = line.user_agent
data['total_time'] = line.total_time
data['turn_around_time'] = line.turn_around_time
data['referrer'] = line.referrer
data['version_id'] = line.version_id
aws_bulk = {
"_op_type": "update",
"_index": "aws",
"_id": data['id'],
"doc_as_upsert": True,
"pipeline": "aws",
"doc": data
}
es_q.put(aws_bulk)
s3_q.task_done()
def es_worker():
if 'ESURL' not in os.environ:
es_url = "http://localhost:9200"
else:
es_url = os.environ['ESURL']
es = Elasticsearch([es_url])
# First let's see if the index exists
if es.indices.exists(index='aws') is False:
# We have to create it and add a mapping
fh = open('mapping.json')
mapping = json.load(fh)
es.indices.create(index='aws', body=mapping)
bulk_data = []
last_run = False
while True:
obj = es_q.get()
if obj == "Done":
last_run = True
else:
bulk_data.append(obj)
if last_run or len(bulk_data) >= 2000:
for ok, item in elasticsearch.helpers.streaming_bulk(es, bulk_data, max_retries=2):
if not ok:
print("ERROR:")
print(item)
bulk_data = []
es_q.task_done()
#path = sys.argv[1]
#for file in glob.glob(os.path.join(path, "*")):
# with open(file) as fh:
threading.Thread(target=es_worker, daemon=True).start()
for i in range(0, 20):
threading.Thread(target=s3_worker, daemon=True).start()
session = boto3.Session(
aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'],
aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY'],
aws_session_token=os.environ['AWS_SESSION_TOKEN']
)
s3 = session.resource('s3')
bucket = s3.Bucket('toolbox-data.anchore.io-logs')
skip = 'access_logs/%s' % sys.argv[1]
#for obj in bucket.objects.all():
for obj in bucket.objects.filter(Prefix=skip):
s3_q.put(obj.key)
while not s3_q.empty():
time.sleep(1)
es_q.put("Done")
while not es_q.empty():
time.sleep(1)
``` |
{
"source": "joshbressers/sbom-analysis",
"score": 2
} |
#### File: joshbressers/sbom-analysis/load-vulns.py
```python
import docker
import json
import uuid
import os
import glob
import tempfile
from esbulkstream import Documents
def main():
cwd = os.getcwd()
es = Documents('vulns', mapping='')
docker_client = docker.from_env()
# Pull down a new GrypeDB
grype_env = [
"GRYPE_DB_CACHE_DIR=/grype_cache",
"GRYPE_DB_AUTO_UPDATE=false"
]
temp_dir = tempfile.TemporaryDirectory()
grype_db_location = temp_dir.name
output = docker_client.containers.run("anchore/grype", "db update" ,volumes=[f"{grype_db_location}:/grype_cache"], environment=grype_env)
the_files = glob.glob(f"{cwd}/SBOMs/*.json")
for sbom_file in the_files:
# Get just the container name
sbom_name = os.path.split(sbom_file)[-1]
c = sbom_name[:-5]
print(c)
output = docker_client.containers.run("anchore/grype", \
"-o json sbom:/SBOMs/%s" % sbom_name, volumes=[f"{cwd}/SBOMs:/SBOMs", f"{grype_db_location}:/grype_cache"], environment=grype_env)
json_data = json.loads(output)
for p in json_data['matches']:
the_doc = {}
the_doc['vuln_id'] = p['vulnerability']['id']
the_doc['severity'] = p['vulnerability']['severity']
the_doc['fix'] = p['vulnerability']['fix']
the_doc['package'] = p['artifact']['name']
the_doc['version'] = p['artifact']['version']
the_doc['type'] = p['artifact']['type']
the_doc['container'] = c
the_doc['related'] = p['relatedVulnerabilities']
es.add(the_doc, uuid.uuid4())
# We have to clean up the temporary directory, the DB is owned by root
output = docker_client.containers.run("alpine", "rm -rf /grype_cache/3"
,volumes=[f"{grype_db_location}:/grype_cache"])
temp_dir.cleanup()
es.done()
if __name__ == "__main__":
main()
``` |
{
"source": "joshbressers/wifilogger",
"score": 4
} |
#### File: wifilogger/raspberrypi-files/shutdown-button.py
```python
import RPi.GPIO as GPIO
import time
import os
presses = 0
press_time = 0
# Use the Broadcom SOC Pin numbers
# Setup the Pin with Internal pullups enabled and PIN in reading mode.
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.IN, pull_up_down = GPIO.PUD_UP)
# Our function on what to do when the button is pressed
def Shutdown(channel):
global presses
global press_time
print "Button"
# We're going to look for 3 fast presses to shut things down
if (time.time() - press_time > 3):
presses = 0
press_time = time.time()
presses = presses + 1
if presses >= 5:
if (time.time() - press_time) < 3:
print "5"
#os.system("sudo shutdown -h now")
# Add our function to execute when the button pressed event happens
GPIO.add_event_detect(18, GPIO.FALLING, callback = Shutdown, bouncetime = 500)
# Now wait!
while 1:
time.sleep(1)
``` |
{
"source": "joshbriegal/gacf",
"score": 3
} |
#### File: gacf/gacf/gacf.py
```python
from .datastructure import DataStructure
from .correlator import CorrelationIterator, Correlator
SELECTION_FUNCTIONS = {
"fast": Correlator.fastSelectionFunctionIdx,
"natural": Correlator.naturalSelectionFunctionIdx,
}
WEIGHT_FUNCTIONS = {
"gaussian": Correlator.getGaussianWeights,
"fractional": Correlator.getFractionWeights,
}
GACF_LOG_MESSAGE = (
" ###### ### ###### ######## \n"
"## ## ## ## ## ## ## \n"
"## ## ## ## ## \n"
"## #### ## ## ## ###### \n"
"## ## ######### ## ## \n"
"## ## ## ## ## ## ## \n"
" ###### ## ## ###### ## \n"
"------------------------------\n"
"Number of Data Points: {no_data}\n"
"Number of Lag Timesteps: {no_lag_points}\n"
"Lag Resolution: {lag_resolution}\n"
"------------------------------\n"
)
class GACF:
"""Compute the Generalised Autocorrelation Function (G-ACF)"""
def __init__(self, timeseries=None, values=None, errors=None, filename=None):
if filename:
self.data = DataStructure(filename)
else:
if errors is None:
self.data = DataStructure(timeseries, values)
else:
self.data = DataStructure(timeseries, values, errors)
@staticmethod
def set_up_correlation(
corr, min_lag=None, max_lag=None, lag_resolution=None, alpha=None
):
""" No return type. Applies non-default values to correlator """
if max_lag is not None:
corr.max_lag = max_lag
if min_lag is not None:
corr.min_lag = min_lag
if lag_resolution is not None:
corr.lag_resolution = lag_resolution
if alpha is not None:
corr.alpha = alpha
@staticmethod
def find_correlation(
corr, selection_function="natural", weight_function="fractional"
):
"""If user specifies a different weight or selection function this method is invoked.
Will be considerably slower than the C++ implementation.
Args:
corr (Correlator): Correlator object to iterate over
selection_function (str, optional): Selection Function. Defaults to "natural".
weight_function (str, optional): Weight Function. Defaults to "fractional".
"""
def lag_generator(min_lag, max_lag, lag_resolution):
k = min_lag
is_positive = False
while k <= max_lag:
if k == 0:
is_positive = True
if k > 0 and not is_positive:
is_positive = True
yield 0
yield k
k += lag_resolution
for i, k in enumerate(
lag_generator(corr.min_lag, corr.max_lag, corr.lag_resolution)
):
col_it = CorrelationIterator(k, corr.N_datasets)
SELECTION_FUNCTIONS[selection_function](corr, col_it)
corr.deltaT(col_it)
WEIGHT_FUNCTIONS[weight_function](corr, col_it)
corr.findCorrelation(col_it)
corr.addCorrelationData(col_it, i)
corr.cleanCorrelationData(i + 1)
def autocorrelation(
self,
min_lag=None,
max_lag=None,
lag_resolution=None,
alpha=None,
selection_function="natural",
weight_function="fractional",
return_correlator=False,
):
"""Compute G-ACF
Using a fixed set up of natural selection function and linear weight function
will be faster than the python implementation in general.
It is reccomended to leave selection_function and weight_function as default for speed.
Args:
min_lag (float, optional): min lag in units of time. Defaults to None.
max_lag (float, optional): max lag in units of time. Defaults to None.
lag_resolution (float, optional): lag resolution in units of time. Defaults to None.
alpha (float, optional): weight function characteristic length scale, default is t.median_time. Defaults to None.
selection_function (str, optional): 'fast' or 'natural' - see paper for more details. Defaults to "natural".
weight_function: (str, optional) 'gaussian' or 'fractional' see paper for more details. Defaults to "fractional".
return_correlator (bool, optional): return correlator object. Defaults to False.
Returns:
Tuple: (lag_timeseries, correlations, [Optional: correlator])
"""
corr = Correlator(self.data)
self.set_up_correlation(corr, min_lag, max_lag, lag_resolution, alpha)
if selection_function == "natural" and weight_function == "fractional":
# fast c++ method
corr.calculateStandardCorrelation()
else:
# slow python method with user specified function
self.find_correlation(corr, selection_function, weight_function)
if return_correlator:
return (
corr.lag_timeseries(),
corr.correlations()
if len(corr.correlations()) > 1.0
else corr.correlations()[0],
corr,
)
else:
return (
corr.lag_timeseries(),
corr.correlations()
if len(corr.correlations()) > 1.0
else corr.correlations()[0],
)
``` |
{
"source": "joshbriegal/roto",
"score": 2
} |
#### File: roto/methods/gacf.py
```python
from typing import Optional, Tuple
import numpy as np
from astropy.convolution import Gaussian1DKernel, convolve
from gacf import GACF
from matplotlib.axes import Axes
from scipy.stats import median_abs_deviation
from roto.methods.fft import FFTPeriodFinder
from roto.methods.periodfinder import PeriodFinder, PeriodResult
class GACFPeriodFinder(PeriodFinder):
"""Generalised Autocorrelation Function (G-ACF) method to find periods.
Conforms to PeriodFinder interface.
"""
def __init__(
self,
timeseries: np.ndarray,
flux: np.ndarray,
flux_errors: Optional[np.ndarray] = None,
min_ratio_of_maximum_peak_size: float = 0.2,
samples_per_peak: int = 3,
time_units: str = "days",
flux_units: str = "relative flux units",
):
"""
Args:
timeseries (np.ndarray): array like time series.
flux (np.ndarray): array like flux values
flux_errors (Optional[np.ndarray], optional): array like errors on flux values. Defaults to None.
"""
super().__init__(
timeseries,
flux,
flux_errors,
min_ratio_of_maximum_peak_size,
samples_per_peak,
time_units,
flux_units,
)
self._gacf = GACF(self.timeseries, self.flux, self.flux_errors)
self.lag_timeseries = None
self.correlations = None
def calculate_periodogram(self, **kwargs) -> None:
"""A "periodogram" does not exist for an ACF
Returns:
None
"""
return None
def calculate_autocorrelation(self, **kwargs) -> Tuple[np.ndarray, np.ndarray]:
"""Calculate G-ACF of data.
It is recommended to leave selection_function and weight_function as default for speed.
Args:
min_lag (float, optional): min lag in time_units. Defaults to None.
max_lag (float, optional): max lag in time_units. Defaults to None.
lag_resolution (float, optional): lag resolution in time_units. Defaults to None.
alpha (float, optional): weight function characteristic length scale, default is t.median_time. Defaults to None.
selection_function (str, optional): 'fast' or 'natural' - see paper for more details. Defaults to "natural".
weight_function: (str, optional) 'gaussian' or 'fractional' see paper for more details. Defaults to "fractional".
Returns:
Tuple[np.ndarray, np.ndarray]: G-ACF lag timeseries and correlations
"""
min_lag = kwargs.get("min_lag", 0)
max_lag = kwargs.get("max_lag", None)
lag_resolution = kwargs.get("lag_resolution", None)
selection_function = kwargs.get("selection_function", "natural")
weight_function = kwargs.get("weight_function", "fractional")
alpha = kwargs.get("alpha", None)
lag_timeseries, correlations = self._gacf.autocorrelation(
min_lag=min_lag,
max_lag=max_lag,
lag_resolution=lag_resolution,
selection_function=selection_function,
weight_function=weight_function,
alpha=alpha,
)
return np.array(lag_timeseries), np.array(correlations)
def __call__(self, gacf_method="fft", **kwargs) -> PeriodResult:
"""Overrides parent call method to allow 2-stage period extraction.
Args:
method (str, optional): Method used to get final period values. Defaults to "fft".
- "fft" will use an FFT on the G-ACF
- "peaks" will find peaks within the G-ACF itself.
Returns:
PeriodResult: [description]
"""
self.lag_timeseries, self.correlations = self.calculate_autocorrelation(
**kwargs
)
if gacf_method == "fft":
fft = FFTPeriodFinder(self.lag_timeseries, self.correlations)
fft_period = fft(**kwargs)
return PeriodResult(
period=fft_period.period,
neg_error=fft_period.neg_error,
pos_error=fft_period.pos_error,
method=self.__class__.__name__,
)
elif gacf_method == "peaks":
return self.find_acf_peaks(self.lag_timeseries, self.correlations)
def find_acf_peaks(
self, lag_timeseries: np.ndarray, correlation: np.ndarray
) -> PeriodResult:
"""Method taken from McQuillan 2013:
Convolve ACF with Gaussian Kernel
Identify peaks in ACF
Select peak associated with mean rotation period
Evaluate uncertainty on error
Args:
lag_timeseries (np.ndarray): Lag time series. Must be positive side only.
correlation (np.ndarray): Correlations
Returns:
PeriodResult: [description]
"""
gaussian_fwhm = lag_timeseries[18] - lag_timeseries[0]
gauss_kernel = Gaussian1DKernel(
gaussian_fwhm, x_size=(np.ceil(gaussian_fwhm * (57 / 18)) // 2 * 2 + 1)
)
smoothed_correlations = convolve(correlation, gauss_kernel)
acf_peak_indexes = self.calculate_peak_indexes(
smoothed_correlations, sort=False
)
# Remove zero point as not a real peak
acf_peak_indexes = np.delete(acf_peak_indexes, 0)
if len(acf_peak_indexes) <= 1:
# just one peak, use width of Gaussian as stdev
# find left min:
central_index = acf_peak_indexes[0]
left_idx = acf_peak_indexes[0]
value = smoothed_correlations[left_idx]
while value > 0.5 * smoothed_correlations[central_index]:
try:
value = smoothed_correlations[left_idx]
left_idx -= 1
except IndexError:
left_idx = None
break
# find right min:
right_idx = acf_peak_indexes[0]
value = smoothed_correlations[right_idx]
while value > 0.5 * smoothed_correlations[central_index]:
try:
value = smoothed_correlations[right_idx]
right_idx += 1
except IndexError:
right_idx = None
break
sigma_p = 0
if left_idx and right_idx:
sigma_p = lag_timeseries[right_idx] - lag_timeseries[left_idx]
return PeriodResult(
period=lag_timeseries[acf_peak_indexes[0]],
neg_error=sigma_p,
pos_error=sigma_p,
method=self.__class__.__name__,
)
peak_lags = lag_timeseries[acf_peak_indexes]
local_heights = np.zeros(len(acf_peak_indexes))
for i, peak_idx in enumerate(acf_peak_indexes):
# find left min:
left_idx = peak_idx
diff = smoothed_correlations[left_idx] - smoothed_correlations[left_idx - 1]
while diff > 0:
try:
diff = (
smoothed_correlations[left_idx]
- smoothed_correlations[left_idx - 1]
)
left_idx -= 1
except IndexError:
left_idx = None
break
if left_idx:
left_height = correlation[peak_idx] - correlation[left_idx]
# find right min:
right_idx = peak_idx
diff = (
smoothed_correlations[right_idx] - smoothed_correlations[right_idx + 1]
)
while diff > 0:
try:
diff = (
smoothed_correlations[right_idx]
- smoothed_correlations[right_idx + 1]
)
right_idx += 1
except IndexError:
right_idx = None
break
if right_idx:
right_height = correlation[peak_idx] - correlation[right_idx]
if left_height and right_height:
local_heights[i] = (left_height + right_height) / 2
elif right_height:
local_heights[i] = right_height
elif left_height:
local_heights[i] = left_height
else:
local_heights[i] = np.nan
first_lag = peak_lags[0]
second_lag = peak_lags[1]
p_start = 0
if not (2 * first_lag * 0.8) <= second_lag <= (2 * first_lag * 1.2):
if local_heights[1] > local_heights[0]:
p_start = 1
valid_peaks = [peak_lags[p_start]]
valid_peak_indexes = [p_start]
gap = 0
peak_number = 2
for i in range(1, len(peak_lags)):
if (i + p_start) >= len(peak_lags):
break
if len(valid_peaks) >= 10:
break
if i + p_start - 1 >= 0:
gap = peak_lags[i + p_start] - peak_lags[i + p_start - 1]
gap_ratio = gap / peak_lags[p_start]
if (
(peak_lags[p_start] * 0.8 * peak_number)
<= peak_lags[i + p_start]
<= (peak_lags[p_start] * 1.2 * peak_number)
):
if gap_ratio > 0.3:
valid_peaks.append(peak_lags[i + p_start] / peak_number)
valid_peak_indexes.append(i + p_start)
peak_number += 1
# use median / MAD estimate from multiple peaks.
mad = median_abs_deviation(valid_peaks)
sigma_p = 1.483 * mad / np.sqrt(len(valid_peaks) - 1)
med_p = np.median(valid_peaks)
return PeriodResult(
period=med_p,
neg_error=sigma_p,
pos_error=sigma_p,
method=self.__class__.__name__,
)
def plot(
self, ax: Axes, period: PeriodResult, colour: Optional[str] = "orange"
) -> Axes:
"""Given a figure and an axis plot the interesting output of the object.
Args:
ax (Axes): Matplotlib axis
period (PeriodResult): Outputted period to plot around
"""
if (self.lag_timeseries is None) or (self.correlations is None):
self()
ax.scatter(self.lag_timeseries, self.correlations, s=1, color="k")
ax.axvline(period.period, color=colour, lw=1)
ax.axvspan(
period.period - period.neg_error,
period.period + period.pos_error,
color=colour,
alpha=0.5,
)
ax.set_xlim([0, min(5 * period.period, self.lag_timeseries[-1])])
ax.set_xlabel(f"Lag time / {self.time_units}")
ax.set_ylabel("G-ACF Power")
ax.set_title("G-ACF")
return ax
```
#### File: roto/methods/lombscargle.py
```python
import logging
from typing import Optional
import numpy as np
import progressbar
from astropy.timeseries import LombScargle
from matplotlib.axes import Axes
from roto.methods.periodfinder import PeriodFinder, Periodogram, PeriodResult
logger = logging.getLogger(__name__)
class LombScarglePeriodFinder(PeriodFinder):
"""LombScargle method to find periods.
Conforms to PeriodFinder interface.
"""
def __init__(
self,
timeseries: np.ndarray,
flux: np.ndarray,
flux_errors: Optional[np.ndarray] = None,
min_ratio_of_maximum_peak_size: float = 0.2,
samples_per_peak: int = 3,
time_units: str = "days",
flux_units: str = "relative flux units",
fit_mean: Optional[bool] = True,
center_data: Optional[bool] = True,
nterms: Optional[bool] = 1,
normalization: Optional[bool] = "standard",
sliding: Optional[bool] = True,
):
"""
Args:
timeseries (np.ndarray): array like time series.
flux (np.ndarray): array like flux values
flux_errors (Optional[np.ndarray], optional): array like errors on flux values. Defaults to None.
fit_mean (Optional[bool]): if True, include a constant offset as part of the model at each frequency. This can lead to more accurate results, especially in the case of incomplete phase coverage.
center_data (Optional[bool]): if True, pre-center the data by subtracting the weighted mean of the input data. This is especially important if fit_mean = False.
nterms (Optional[bool]): number of terms to use in the Fourier fit. {‘standard’, ‘model’, ‘log’, ‘psd’},
normalization (Optional[bool]): Normalization to use for the periodogram.
sliding (Optional[bool]): Use a sliding window to generate an error on the period.
"""
super().__init__(
timeseries,
flux,
flux_errors,
min_ratio_of_maximum_peak_size,
samples_per_peak,
time_units,
flux_units,
)
self._lombscargle = LombScargle(
self.timeseries,
self.flux,
dy=self.flux_errors,
fit_mean=fit_mean,
center_data=center_data,
nterms=nterms,
normalization=normalization,
)
self.sliding = sliding
if self.sliding:
self.ls_kwargs = {
"fit_mean": fit_mean,
"center_data": center_data,
"nterms": nterms,
"normalization": normalization,
}
def __call__(self, **kwargs) -> PeriodResult:
"""Call the PeriodFinder object to return a PeriodResult object.
If sliding, will run first run the standard period finder to find a period,
and then generate a set of PeriodResults using a sliding window over periods.
Returns:
PeriodResult: PeriodResult contains period, error and method information.
"""
period_result = super().__call__(**kwargs)
if not self.sliding:
return period_result
return self._sliding_ls_periodogram(period_result, **kwargs)
def _sliding_ls_periodogram(
self,
period_result_estimate: PeriodResult,
n_periods: int = 5,
sliding_aggregation: str = "median",
max_sliding_windows: int = 100,
**autopower_kwargs,
) -> PeriodResult:
"""Generate a set of PeriodResults using a sliding window over n_periods.
Args:
period_result_estimate (PeriodResult): First estimate period result
n_periods (int, optional): Number of complete periods to consider in each window. Defaults to 5.
sliding_aggregation (str, optional): How to aggregate the outputted periods. Defaults to "median". One of ["mean", "median"].
max_sliding_windows (int, optional): Max number of sliding windows to consider. Defaults to 100.
If period is too short, will cap the number of windows at this value.
Raises:
ValueError: If incorrect method given.
Returns:
PeriodResult: Single PeriodResult with errors calculated using spread across window calculations.
"""
methods = ["mean", "median"]
if sliding_aggregation not in methods:
raise ValueError(
f"method must be on of {methods}, not {sliding_aggregation}"
)
period_estimate = period_result_estimate.period
periods = []
epoch = self.timeseries.min()
time_tolerance = np.diff(
self.timeseries
).min() # allow a small tolerance when calculating last window
number_of_windows = (
int(
(
(self.timeseries.max() + time_tolerance)
- (period_estimate * n_periods)
- epoch
)
/ period_estimate
)
+ 1
)
if number_of_windows < 3:
logger.warning(
"Sliding window too large to generate good estimate, returning regular lombscargle"
)
return period_result_estimate
if number_of_windows > max_sliding_windows:
logger.warning(
"Attempting to calculate too many sliding windows, reducing to %d"
% max_sliding_windows
)
number_of_windows = max_sliding_windows
n_periods = ((self.timeseries.max() - epoch) / period_estimate) - (
number_of_windows - 1
)
count = 0
with progressbar.ProgressBar(
max_value=number_of_windows,
widgets=[
"Sliding LombScargle Window: ",
progressbar.Counter(),
" windows (",
progressbar.Timer(),
")",
],
) as bar:
while epoch <= (self.timeseries.max() + time_tolerance) - (
period_estimate * n_periods
):
idxs = np.logical_and(
self.timeseries >= epoch,
self.timeseries < epoch + (period_estimate * n_periods),
)
if len(self.timeseries[idxs]) == 0:
logger.debug("Empty slice %d, continuing" % count)
epoch += period_estimate
count += 1
bar.update(count)
continue
ls_periodfinder = LombScarglePeriodFinder(
self.timeseries[idxs],
self.flux[idxs],
self.flux_errors[idxs] if self.flux_errors is not None else None,
**self.ls_kwargs,
sliding=False,
)
period_result = ls_periodfinder(**autopower_kwargs)
if period_result is not None:
periods.append(period_result.period)
epoch += period_estimate
count += 1
bar.update(count)
logger.info("Calculated %d periods from sliding windows" % len(periods))
logger.debug(periods)
try:
if sliding_aggregation == "median":
percentiles = np.percentile(periods, [10, 50, 90])
ave_period = percentiles[1]
std_period = percentiles[2] - percentiles[0]
elif sliding_aggregation == "mean":
ave_period = np.nanmean(periods)
std_period = np.nanstd(periods)
except (IndexError, ValueError) as err:
logger.error(
"Unable to calculate %s, returning initial guess" % sliding_aggregation
)
logger.error(err, exc_info=True)
return period_result_estimate
return PeriodResult(
ave_period, std_period, std_period, method=self.__class__.__name__
)
def calculate_periodogram(self, **kwargs) -> Periodogram:
"""Calculate LS Periodogram of data
Args:
method (str, optional): [description]. Defaults to "auto".
method_kwds ([type], optional): [description]. Defaults to None.
normalization ([type], optional): [description]. Defaults to None.
samples_per_peak (int, optional): [description]. Defaults to 5.
nyquist_factor (int, optional): [description]. Defaults to 5.
minimum_frequency ([type], optional): [description]. Defaults to None.
maximum_frequency ([type], optional): [description]. Defaults to None.
Returns:
Tuple[np.ndarray, np.ndarray]: The frequency and Lomb-Scargle power
"""
method = kwargs.get("method", "auto")
method_kwds = kwargs.get("method_kwds", None)
normalization = kwargs.get("normalization", None)
samples_per_peak = kwargs.get("samples_per_peak", 5)
nyquist_factor = kwargs.get("nyquist_factor", 5)
minimum_frequency = kwargs.get("minimum_frequency", None)
maximum_frequency = kwargs.get("maximum_frequency", None)
if maximum_frequency is None:
# set max frequency to nyquist limit to prevent small spurious periods.
min_timestamp_diff = np.min(np.diff(self.timeseries))
maximum_frequency = 1.0 / (nyquist_factor * min_timestamp_diff)
return Periodogram(
*self._lombscargle.autopower(
method=method,
method_kwds=method_kwds,
normalization=normalization,
samples_per_peak=samples_per_peak,
nyquist_factor=nyquist_factor,
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency,
)
)
def plot(
self, ax: Axes, period: PeriodResult, colour: Optional[str] = "orange"
) -> Axes:
"""Given a figure and an axis plot the interesting output of the object.
Args:
ax ([type]): Matplotlib axis
period (PeriodResult): Outputted period to plot around
"""
ax = self.plot_periodogram(ax, period, colour=colour)
ax.set_title("Lomb Scargle Periodogram")
return ax
```
#### File: roto/plotting/plotting_tools.py
```python
from math import floor, log10
from typing import Optional
import numpy as np
from matplotlib.figure import Axes, Figure
from matplotlib.ticker import Formatter, ScalarFormatter
from scipy.stats import binned_statistic
def split_phase(phase, data, timeseries=None, buff=0.9, nperiods=1):
"""
returns list of lists of data & phases for complete periods
:param phase: (requires sorted phased timeseries)
:param data:
:param timeseries:
:param nperiods:
:param buff: require this much phase coverage in first and last segments
"""
phases = []
datas = []
timeseriess = [] if timeseries is not None else None
idx_changes = np.where(np.diff(phase) < 0)[0][::nperiods]
if len(idx_changes) > 0:
use_first = True if (phase[0] < 1.0 - buff) else False
use_last = True if (phase[-1] > buff) else False
if use_first:
phases.append(phase[: idx_changes[0]])
datas.append(data[: idx_changes[0]])
if timeseriess is not None:
timeseriess.append(timeseries[: idx_changes[0]])
for i, idx in enumerate(idx_changes[:-1]):
phases.append(phase[idx + 1 : idx_changes[i + 1]])
datas.append(data[idx + 1 : idx_changes[i + 1]])
if timeseriess is not None:
timeseriess.append(timeseries[idx + 1 : idx_changes[i + 1]])
if use_last or np.any(np.diff(phase[idx_changes[-1] + 1 :]) < 0):
phases.append(phase[idx_changes[-1] :])
datas.append(data[idx_changes[-1] :])
if timeseriess is not None:
timeseriess.append(timeseries[idx_changes[-1] :])
if timeseriess is not None:
return phases, datas, timeseriess
else:
return phases, datas
return [phase], [data]
def calculate_phase(timeseries, period, epoch):
"""
Phase fold time series on period and epoch
"""
return np.mod(np.array(timeseries) - epoch, period) / period
def rescale_phase(phase, max_phase=0.2):
"""
Shift phase points if greater than max_phase to negative
"""
return [p - 1 if p > 1 - max_phase else p for p in phase]
def append_to_phase(phase, data, amt=0.05):
"""
Add additional data outside of phase 0-1.
"""
indexes_before = [i for i, p in enumerate(phase) if p > 1 - amt]
indexes_after = [i for i, p in enumerate(phase) if p < amt]
phase_before = [phase[i] - 1 for i in indexes_before]
data_before = [data[i] for i in indexes_before]
phase_after = [phase[i] + 1 for i in indexes_after]
data_after = [data[i] for i in indexes_after]
return (
np.concatenate((phase_before, phase, phase_after)),
np.concatenate((data_before, data, data_after)),
)
def bin_phase_curve(phase, data, statistic="median", bins=20):
"""
Bin phase curve.
"""
bin_medians, bin_edges, _ = binned_statistic(
phase, data, statistic=statistic, bins=bins
)
bin_width = bin_edges[1] - bin_edges[0]
bin_centers = bin_edges[1:] - bin_width / 2
return bin_centers, bin_medians
def create_axis_with_formatter(
fig: Figure, gridspec_position, formatter: Optional[Formatter] = None, **kwargs
) -> Axes:
"""Create subplot figure and apply formatter to x/y axis.
Args:
fig (Figure): Matplotlib Figure
gridspec_position (gridspec slice): gridspec slice / position.
formatter (Optional[Formatter], optional): Matplotlib Ticker Formatter.
Returns:
Axes: [description]
"""
if not formatter:
formatter = ScalarFormatter()
formatter.set_scientific(False)
ax = fig.add_subplot(gridspec_position, **kwargs)
ax.xaxis.set_major_formatter(formatter)
ax.yaxis.set_major_formatter(formatter)
return ax
def rel_flux_to_ppt(
flux_arr: np.ndarray,
normalise: bool = False,
normalisation_value: float = 1.0,
center_around: float = 0.0,
) -> np.ndarray:
"""Convert an array in relative flux into ppt
Args:
flux_arr (np.ndarray): [description]
normalise (bool, optional): [description]. Defaults to False.
normalisation_value (float, optional): [description]. Defaults to 1.0.
center_around (float, optional): [description]. Defaults to 0.0.
Returns:
np.ndarray: [description]
"""
if not normalise:
return flux_arr * 1.0e3
else:
return (flux_arr / normalisation_value - center_around) * 1.0e3
def ppt_to_rel_flux(
flux_arr: np.ndarray,
normalise: bool = False,
normalisation_value: float = 1.0,
center_around: float = 0.0,
) -> np.ndarray:
"""Convert an array in ppt into relative flux
Args:
flux_arr (np.ndarray): [description]
normalise (bool, optional): [description]. Defaults to False.
normalisation_value (float, optional): [description]. Defaults to 1.0.
center_around (float, optional): [description]. Defaults to 0.0.
Returns:
np.ndarray: [description]
"""
if not normalise:
return flux_arr / 1.0e3
else:
return (flux_arr / 1.0e3 + center_around) * normalisation_value
def round_sig(x, sig=2, return_dp=False):
"""Round a number to a number of significant digits.
Args:
x: number
sig (int, optional): Number of significant figures. Defaults to 2.
return_dp (bool, optional): If true also return dp (argument to round function). Defaults to False.
Returns:
[type]: Rounded number to sig sig figs.
"""
dp = sig - int(floor(log10(abs(x)))) - 1
if return_dp:
return round(x, dp), dp
else:
return round(x, dp)
```
#### File: src/roto/roto.py
```python
import logging
from itertools import cycle
from typing import Dict, List, Optional, Tuple, Union
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from matplotlib.ticker import ScalarFormatter
from scipy.stats import gaussian_kde, median_abs_deviation
from roto.methods.fft import FFTPeriodFinder
from roto.methods.gacf import GACFPeriodFinder
from roto.methods.gaussianprocess import GPPeriodFinder
from roto.methods.lombscargle import LombScarglePeriodFinder
from roto.methods.periodfinder import PeriodResult
from roto.plotting.plotting_tools import (
calculate_phase,
create_axis_with_formatter,
round_sig,
split_phase,
)
DEFAULT_COLOUR_CYCLE = cycle(plt.rcParams["axes.prop_cycle"].by_key()["color"])
logger = logging.getLogger(__name__)
class RoTo:
METHODS = {
"lombscargle": LombScarglePeriodFinder,
"fft": FFTPeriodFinder,
"gacf": GACFPeriodFinder,
"gp": GPPeriodFinder, # keep at end of dictionary to allow seed period generation from other methods.
}
METHOD_NAMES = {
"lombscargle": "Lomb-Scargle",
"fft": "Fast Fourier Transform",
"gacf": "G-ACF",
"gp": "Gaussian Process Regression",
}
PLOTTING_COLOURS = {method: next(DEFAULT_COLOUR_CYCLE) for method in METHODS}
def __init__(
self,
timeseries: np.ndarray,
flux: np.ndarray,
flux_errors: Optional[np.ndarray] = None,
methods_parameters: Optional[dict] = None,
name: str = "Unnamed RoTo Object",
time_units: str = "days",
flux_units: str = "relative flux units",
):
self.name = name
self.timeseries = timeseries
self.flux = flux
self.flux_errors = flux_errors
timeseries_diffs = np.diff(self.timeseries)
self.regular_sampling = (timeseries_diffs.max() - timeseries_diffs.min()) < 1e-5
self.time_units = time_units
if self.time_units != "days":
logger.warning(
"GP prior scaled to expect timeseries data in days. Check prior or convert units."
)
self.flux_units = flux_units
if self.flux_units != "relative flux units":
logger.warning(
"GP prior scaled to expect flux data in relative flux units. Check prior or convert units."
)
self.methods = self._parse_constructor_parameters(methods_parameters)
self.periods = {}
def _parse_constructor_parameters(
self,
methods_parameters: Optional[dict] = None,
) -> dict:
if methods_parameters is None:
return {
name: method(
self.timeseries,
self.flux,
self.flux_errors,
time_units=self.time_units,
flux_units=self.flux_units,
)
for name, method in self.METHODS.items()
if (name != "fft") or (self.regular_sampling)
}
methods = {}
if list(methods_parameters.keys()) == ["gp"]:
# if just a GP, use a lomb scargle also to seed GP period.
methods_parameters = {"lombscargle": {}, **methods_parameters}
for method, kwargs in methods_parameters.items():
methods[method] = self.METHODS[method](
self.timeseries,
self.flux,
self.flux_errors,
time_units=self.time_units,
flux_units=self.flux_units,
**kwargs,
)
return methods
def __call__(self, **kwargs):
for name, method in self.methods.items():
if name == "gp":
if "gp_seed_period" not in kwargs:
average_period = np.median(
[
period_result.period
for period_result in self.periods.values()
]
)
kwargs["gp_seed_period"] = average_period
try:
self.periods[name] = method(**kwargs)
except Exception as e:
logger.error("Unable to run method %s" % name)
logger.error(e, exc_info=True)
continue
def periods_to_table(self) -> pd.DataFrame:
"""Convert roto.periods into a DataFrame for display.
Returns:
pd.DataFrame: Dataframe with all outputted periods
"""
columns = {"period": [], "neg_error": [], "pos_error": [], "method": []}
if not self.periods:
return pd.DataFrame()
for period_result in self.periods.values():
columns["period"].append(period_result.period)
columns["neg_error"].append(period_result.neg_error)
columns["pos_error"].append(period_result.pos_error)
columns["method"].append(period_result.method)
period_df = pd.DataFrame.from_dict(columns)
return period_df
def __str__(self):
return self.periods_to_table().to_string(index=False)
def best_period(
self,
method: str = "mean",
include: Optional[List] = None,
exclude: Optional[List] = None,
) -> PeriodResult:
"""Calculate best period based on methods already run. If called before
running the period finding methods, will return None.
Args:
method (str, optional): method should be one of 'mean', 'median' or a period finding method. Defaults to "mean".
include (Optional[List], optional): Method outputs to include. Defaults to [].
exclude (Optional[List], optional): Method outputs to exclude. Defaults to [].
Raises:
ValueError: If method specified incorrect.
Returns:
PeriodResult: CombinedPeriodResult.
"""
if not self.periods:
return None
periods_to_use = self.periods.values()
try:
if include:
include_classes = [
self.METHODS[method_to_include].__name__
for method_to_include in include
]
periods_to_use = [
period_result
for period_result in periods_to_use
if period_result.method in include_classes
]
if exclude:
exclude_classes = [
self.METHODS[method_to_exclude].__name__
for method_to_exclude in exclude
]
periods_to_use = [
period_result
for period_result in periods_to_use
if period_result.method not in exclude_classes
]
if not periods_to_use:
raise ValueError(
"Provided incompatible list of include / exclude values. No best period calculated. \n include: {include} \n exclude: {exclude}"
)
except KeyError:
raise ValueError(
f"Unable to parse include / exclude values given. \n include: {include} \n exclude: {exclude}"
)
if method == "mean":
mean = np.mean([p.period for p in periods_to_use])
std = np.std([p.period for p in periods_to_use]) / np.sqrt(
len(periods_to_use)
)
return PeriodResult(
period=mean, neg_error=std, pos_error=std, method="CombinedPeriodResult"
)
elif method == "median":
median = np.median([p.period for p in periods_to_use])
std = (
1.4826
* median_abs_deviation([p.period for p in periods_to_use])
/ np.sqrt(len(periods_to_use))
)
return PeriodResult(
period=median,
neg_error=std,
pos_error=std,
method="CombinedPeriodResult",
)
elif method in self.periods:
return self.periods[method]
raise ValueError(
f"Parameter 'method' must be one of ['mean', 'median'] or {list(self.METHODS.keys())}]. Did you specify a period extraction method not run?"
)
def plot(
self,
savefig: bool = False,
filename: Optional[str] = None,
include: Optional[List] = None,
exclude: Optional[List] = None,
plot_gp: bool = True,
show: bool = True,
summary: bool = False,
scientific: bool = False,
return_fig_ax: bool = False,
) -> Union[None, Tuple[Figure, Dict]]:
"""Generate summary plot of RoTo object run.
Args:
savefig (bool, optional): Save figure to pdf. Defaults to False.
filename (Optional[str], optional): Name of pdf. Defaults to None.
include (Optional[List], optional): Methods to include. Defaults to None (all methods).
exclude (Optional[List], optional): Methods to exclude. Defaults to None (no methods).
plot_gp (bool, optional): Plot Gaussian Process prediction & residuals. Defaults to True.
show (bool, optional): Show interactive plot. Defaults to True.
summary (bool, optional): Just plot summary, no methods. Defaults to False.
scientific (bool, optional): Scientific formatting of numbers vs linear scale. Defaults to False.
return_fig_ax (bool, optional): Return figure and axis tuples for further processing. Defaults to False.
Returns:
Union[None, Tuple(Figure, Dict)]: None or a tuple (matplotlib figure, dictionary of matplotlib axes)
"""
if savefig and not filename:
filename = f"{self.name}.pdf"
if (not include) or (not self.periods):
include = list(self.periods.keys())
plot_gp = plot_gp and ("gp" in self.periods)
fig, ax_dict = self._setup_figure(
include=include,
exclude=exclude,
summary=summary,
scientific=scientific,
plot_gp=plot_gp,
)
epoch = self.timeseries.min()
self.plot_data(ax_dict["data"])
self.plot_periods(ax_dict["distributions"])
self.plot_phase_folded_data(
ax_dict["phase_fold"], self.best_period().period, epoch=epoch
)
if not summary:
for method_name, method in self.methods.items():
try:
if method_name == "gp":
if plot_gp:
ax_dict["data"].get_xaxis().set_visible(False)
method.plot_gp_predictions(
ax_dict["data"],
colour=self.PLOTTING_COLOURS[method_name],
)
method.plot_gp_residuals(
ax_dict["residuals"],
colour=self.PLOTTING_COLOURS[method_name],
)
ax_dict["residuals"].set_xlim(ax_dict["data"].get_xlim())
method.plot(
ax_dict[method_name]["method"],
self.periods[method_name],
colour=self.PLOTTING_COLOURS[method_name],
)
self.plot_phase_folded_data(
ax_dict[method_name]["phase_fold"],
self.periods[method_name].period,
epoch=epoch,
)
except KeyError as err:
logger.warning(f"Not plotting method {method} as no results found")
continue
if savefig:
fig.savefig(filename, bbox_inches="tight", pad_inches=0.25)
if show:
plt.show()
if return_fig_ax:
return fig, ax_dict
def plot_summary(
self,
savefig: bool = False,
filename: Optional[str] = None,
plot_gp: bool = True,
show: bool = True,
):
"""Helper function to create just summary plots, same as calling self.plot(summary=True)
Args:
savefig (bool, optional): Save figure to pdf. Defaults to False.
filename (Optional[str], optional): Name of pdf. Defaults to None.
plot_gp (bool, optional): Plot Gaussian Process prediction & residuals. Defaults to True.
show (bool, optional): Show interactive plot. Defaults to True.
"""
self.plot(
savefig=savefig, filename=filename, plot_gp=plot_gp, show=show, summary=True
)
def plot_periods(self, ax: Axes) -> Axes:
"""Plot figure comparing outputted periods and errors.
Args:
ax (Axes): Matplotlib axis
Returns:
Axes: Matplotlib axis
"""
for name, period in self.periods.items():
if period.period_distribution is not None:
# plot as distribution
density = gaussian_kde(period.period_distribution)
pmin = max(0, period.period - 5 * period.neg_error)
pmax = period.period + 5 * period.pos_error
xs = np.linspace(pmin, pmax, 100)
kde_plot = density(xs)
kde_plot *= 1.0 / kde_plot.max()
ax.plot(xs, kde_plot, color=self.PLOTTING_COLOURS[name])
ax.axvline(
period.period,
label=self.METHOD_NAMES[name],
color=self.PLOTTING_COLOURS[name],
)
ax.axvspan(
period.period - period.neg_error,
period.period + period.pos_error,
color=self.PLOTTING_COLOURS[name],
alpha=0.2,
)
# plot best period as a single point with error bars
best_period = self.best_period()
ax.errorbar(
best_period.period,
0.5,
xerr=[[best_period.neg_error], [best_period.pos_error]],
ms=10,
marker="s",
c="k",
capsize=10,
)
ax.set_xlim(
[
best_period.period - 5 * best_period.neg_error,
best_period.period + 5 * best_period.pos_error,
]
)
ax.set_ylim([0, 1])
ax.get_yaxis().set_visible(False)
ax.set_xlabel("Period")
two_sided_error = np.average([best_period.neg_error, best_period.pos_error])
error_rounded, error_precision = round_sig(two_sided_error, 2, return_dp=True)
ax.set_title(
f"Adopted Period: {round(best_period.period, error_precision)} ± {error_rounded} {self.time_units}"
)
ax.legend()
return ax
def plot_gp_diagnostics(
self,
show: bool = True,
savefig: bool = False,
filename: str = "",
fileext: str = "pdf",
):
"""Plot Gaussian Process diagnostic outputs figures.
Args:
show (bool, optional): Show interactive plot. Defaults to True.
savefig (bool, optional): Save figure to pdf. Defaults to False.
filename (Optional[str], optional): Name of pdf. Defaults to None.
fileext (str, optional): File extension to save figure. Defaults to "pdf".
Raises:
RuntimeError: If no GP found.
"""
if "gp" not in self.methods:
raise RuntimeError("Cannot plot GP diagnostics, no GP method found.")
if savefig and not filename:
filename = f"{self.name}.pdf"
try:
self.methods["gp"].plot_trace(
show=show, savefig=savefig, filename=filename, fileext=fileext
)
except RuntimeError as trace_err:
logger.error("Unable to plot trace")
logger.error(trace_err, exc_info=True)
try:
self.methods["gp"].plot_distributions(
show=show, savefig=savefig, filename=filename, fileext=fileext
)
except (RuntimeError, ValueError) as dist_err:
logger.error("Unable to plot GP distributions")
logger.error(dist_err, exc_info=True)
def plot_data(self, ax: Axes) -> Axes:
"""Scatter plot of input data.
Args:
ax (Axes): Matplotlib axis
Returns:
Axes: Matplotlib axis
"""
if "gp" in self.methods:
mask = self.methods["gp"].mask
else:
mask = np.ones(len(self.timeseries), dtype=bool)
ax.errorbar(
self.timeseries[mask],
self.flux[mask],
self.flux_errors[mask],
markersize=2,
errorevery=1,
linestyle="none",
marker="o",
color="k",
ecolor="gray",
alpha=0.7,
capsize=0,
elinewidth=1,
mec="none",
)
ax.errorbar(
self.timeseries[~mask],
self.flux[~mask],
self.flux_errors[~mask],
markersize=2,
errorevery=1,
linestyle="none",
marker="o",
color="k",
ecolor="gray",
alpha=0.3,
capsize=0,
elinewidth=1,
mec="none",
)
ax.set_xlabel(f"Time / {self.time_units}")
ax.set_ylabel(f"Flux / {self.flux_units}")
ymin = np.min(self.flux - self.flux_errors)
ymax = np.max(self.flux + self.flux_errors)
yextent = ymax - ymin
ax.set_ylim([ymin - (yextent * 0.01), ymax + (yextent * 0.01)])
return ax
def plot_phase_folded_data(self, ax: Axes, period: float, epoch: float = 0) -> Axes:
"""Plot data phase folded on period and epoch.
Colour scale incremented for each period.
Args:
ax (Axes): Matplotlib axis
period (float): Period on which to phase fold.
epoch (float, optional): Epoch on which to phase fold. Defaults to 0.
Returns:
Axes: Matplotlib axis
"""
phased_timeseries = calculate_phase(self.timeseries, period, epoch)
split_phases, split_flux = split_phase(phased_timeseries, self.flux)
colours = iter(cm.viridis(np.r_[0 : 1 : len(split_phases) * 1j]))
for phase, flux in zip(split_phases, split_flux):
ax.scatter(phase, flux, color=next(colours), s=1)
ax.set_title(f"Period: {period:.4f} {self.time_units}")
ax.set_xlim([0, 1])
ax.set_xlabel("Phase")
ax.set_ylabel(f"Flux / {self.flux_units}")
return ax
def _setup_figure(
self,
include: Optional[List] = [],
exclude: Optional[List] = [],
summary: bool = False,
scientific: bool = False,
plot_gp: bool = False,
):
unit_grid_width = 5
unit_grid_height = 1
data_plot_size = (2, 3) # in units of grid width, height
residuals_plot_size = (2, 1)
distributions_plot_size = (1, 3)
phase_fold_plot_size = (1, 3)
method_plot_size = (1, 3)
spacer_plot_size = (2, 1)
if summary:
# just plot summary stats, no method plots.
methods = {}
else:
methods = {name: method for name, method in self.methods.items()}
if include:
methods = {
name: method for name, method in methods.items() if name in include
}
if exclude:
methods = {
name: method
for name, method in methods.items()
if name not in exclude
}
n_grid_units_width = 2
n_grid_units_height = (
data_plot_size[1]
+ (residuals_plot_size[1] * int(plot_gp))
+ distributions_plot_size[1]
+ method_plot_size[1] * len(methods)
+ spacer_plot_size[1] * (1 + len(methods))
)
figsize = (
unit_grid_width * n_grid_units_width,
unit_grid_height * n_grid_units_height,
)
fig = plt.figure(figsize=figsize)
gridspec = fig.add_gridspec(n_grid_units_height, n_grid_units_width)
plt.subplots_adjust(hspace=0.0, wspace=0.2)
axes = {}
formatter = ScalarFormatter()
formatter.set_scientific(scientific)
height = 0
axes["data"] = create_axis_with_formatter(
fig, gridspec[height : height + data_plot_size[1], :], formatter
)
height += data_plot_size[1]
if plot_gp:
axes["residuals"] = create_axis_with_formatter(
fig,
gridspec[height : height + residuals_plot_size[1], :],
formatter,
sharex=axes["data"],
)
height += residuals_plot_size[1]
height += spacer_plot_size[1]
axes["distributions"] = create_axis_with_formatter(
fig, gridspec[height : height + distributions_plot_size[1], 0], formatter
)
axes["phase_fold"] = create_axis_with_formatter(
fig, gridspec[height : height + phase_fold_plot_size[1], 1], formatter
)
height += phase_fold_plot_size[1]
height += spacer_plot_size[1]
for method in methods:
axes[method] = {
"method": create_axis_with_formatter(
fig, gridspec[height : height + method_plot_size[1], 0], formatter
),
"phase_fold": create_axis_with_formatter(
fig, gridspec[height : height + method_plot_size[1], 1], formatter
),
}
height += method_plot_size[1]
height += spacer_plot_size[1]
axes["data"].set_title(self.name)
return fig, axes
```
#### File: roto/test/test_roto.py
```python
import sys
with open("/tmp/python-sys-path.txt", "w") as outfile:
outfile.write(str(sys.path))
from unittest import mock
import pytest
from roto.methods.periodfinder import PeriodFinder, PeriodResult
from roto.roto import RoTo
inverse_methods_dictionary = {method: name for name, method in RoTo.METHODS.items()}
@pytest.mark.parametrize(
"method_parameters",
[
None,
{"lombscargle": {"normalization": True, "fit_mean": False}},
{"gacf": {}},
{"fft": {}},
],
)
def test_create_roto(method_parameters, timeseries, flux, flux_errors):
roto = RoTo(timeseries, flux, flux_errors, method_parameters)
for name, method in roto.methods.items():
assert isinstance(method, PeriodFinder)
assert name in roto.METHODS.keys()
if method_parameters:
assert inverse_methods_dictionary[method.__class__] in method_parameters
def test_create_roto_only_gp(timeseries, flux, flux_errors):
method_parameters = {"gp": {}}
roto = RoTo(timeseries, flux, flux_errors, method_parameters)
for name, method in roto.methods.items():
assert isinstance(method, PeriodFinder)
assert name in roto.METHODS.keys()
assert list(roto.methods.keys()) == ["lombscargle", "gp"]
@mock.patch("roto.roto.LombScarglePeriodFinder", autospec=True)
@mock.patch("roto.roto.FFTPeriodFinder", autospec=True)
@mock.patch("roto.roto.GACFPeriodFinder", autospec=True)
@mock.patch("roto.roto.GPPeriodFinder", autospec=True)
def test_call(mock_gp, mock_gacf, mock_fft, mock_ls, timeseries, flux, flux_errors):
mock_gacf_object = mock.Mock(return_value=PeriodResult(1))
mock_fft_object = mock.Mock(return_value=PeriodResult(420))
mock_ls_object = mock.Mock(return_value=PeriodResult(69))
mock_gp_object = mock.Mock(return_value=PeriodResult(1))
mock_broken_function = mock.MagicMock(side_effect=RuntimeError("this is broken"))
mock_gacf.return_value = mock_gacf_object
mock_fft.return_value = mock_fft_object
mock_ls.return_value = mock_ls_object
mock_gp.return_value = mock_gp_object
mock_broken = mock.MagicMock()
mock_broken.return_value = mock_broken_function
with mock.patch.dict(
RoTo.METHODS,
{
"lombscargle": mock_ls,
"fft": mock_fft,
"gacf": mock_gacf,
"gp": mock_gp,
"broken": mock_broken,
},
) as patched_dict:
roto = RoTo(timeseries, flux, flux_errors)
kwargs = {"some": "random", "keywords": True, "gp_seed_period": 7}
roto(**kwargs)
print(roto)
mock_gacf_object.assert_called_once_with(**kwargs)
mock_fft_object.assert_called_once_with(**kwargs)
mock_ls_object.assert_called_once_with(**kwargs)
mock_gp_object.assert_called_once_with(**kwargs)
mock_broken_function.assert_called_once_with(**kwargs)
# check no extra calls have been made.
# note this may fail if we allow multiple periods per method.
assert len(roto.periods) == len(roto.METHODS) - 1
@pytest.mark.parametrize(
"method, period, error, outputted_method",
[
("mean", 3.0, 0.6324555320336759, "CombinedPeriodResult"),
("median", 3.0, 0.6630388766882376, "CombinedPeriodResult"),
("lombscargle", 1.0, 0.0, "LombScarglePeriodFinder"),
],
)
def test_best_period(
method, period, error, outputted_method, timeseries, flux, flux_errors
):
roto = RoTo(timeseries, flux, flux_errors)
roto.periods = {
"lombscargle": PeriodResult(1.0, 0.0, 0.0, "LombScarglePeriodFinder"),
"fft": PeriodResult(3.0, 0.0, 0.0, "FFTPeriodFinder"),
"fft2": PeriodResult(2.0, 0.0, 0.0, "FFTPeriodFinder"),
"gacf": PeriodResult(4.0, 0.0, 0.0, "GACFPeriodFinder"),
"gp": PeriodResult(5.0, 0.0, 0.0, "GPPeriodFinder"),
}
best_period = roto.best_period(method)
assert best_period == PeriodResult(period, error, error, outputted_method)
@mock.patch("roto.roto.np.mean", autospec=True, return_value=69)
@mock.patch("roto.roto.np.std", autospec=True, return_value=2)
@mock.patch("roto.roto.np.sqrt", autospec=True, return_value=1)
@pytest.mark.parametrize(
"include, periods",
[
([], [1.0, 3.0, 4.0, 5.0]),
(["lombscargle"], [1.0]),
(["gp", "gacf"], [4.0, 5.0]),
],
)
def test_best_period_include(
mock_sqrt, mock_std, mock_mean, include, periods, timeseries, flux, flux_errors
):
roto = RoTo(timeseries, flux, flux_errors)
roto.periods = {
"lombscargle": PeriodResult(1.0, 0.0, 0.0, "LombScarglePeriodFinder"),
"fft": PeriodResult(3.0, 0.0, 0.0, "FFTPeriodFinder"),
"gacf": PeriodResult(4.0, 0.0, 0.0, "GACFPeriodFinder"),
"gp": PeriodResult(5.0, 0.0, 0.0, "GPPeriodFinder"),
}
best_period = roto.best_period("mean", include=include)
mock_mean.assert_called_once_with(periods)
mock_std.assert_called_once_with(periods)
mock_sqrt.assert_called_once_with(len(periods))
assert best_period == PeriodResult(69, 2, 2, "CombinedPeriodResult")
def test_best_period_include_wrong_type(timeseries, flux, flux_errors):
roto = RoTo(timeseries, flux, flux_errors)
roto.periods = {
"lombscargle": PeriodResult(1.0, 0.0, 0.0, "LombScarglePeriodFinder"),
}
with pytest.raises(ValueError) as err:
roto.best_period("mean", include=["non_existent_method"])
@mock.patch("roto.roto.np.mean", autospec=True, return_value=69)
@mock.patch("roto.roto.np.std", autospec=True, return_value=2)
@mock.patch("roto.roto.np.sqrt", autospec=True, return_value=1)
@pytest.mark.parametrize(
"exclude, periods",
[
([], [1.0, 3.0, 4.0, 5.0]),
(["lombscargle"], [3.0, 4.0, 5.0]),
(["gp", "gacf"], [1.0, 3.0]),
],
)
def test_best_period_exclude(
mock_sqrt, mock_std, mock_mean, exclude, periods, timeseries, flux, flux_errors
):
roto = RoTo(timeseries, flux, flux_errors)
roto.periods = {
"lombscargle": PeriodResult(1.0, 0.0, 0.0, "LombScarglePeriodFinder"),
"fft": PeriodResult(3.0, 0.0, 0.0, "FFTPeriodFinder"),
"gacf": PeriodResult(4.0, 0.0, 0.0, "GACFPeriodFinder"),
"gp": PeriodResult(5.0, 0.0, 0.0, "GPPeriodFinder"),
}
best_period = roto.best_period("mean", exclude=exclude)
mock_mean.assert_called_once_with(periods)
mock_std.assert_called_once_with(periods)
mock_sqrt.assert_called_once_with(len(periods))
assert best_period == PeriodResult(69, 2, 2, "CombinedPeriodResult")
def test_best_period_exclude_wrong_type(timeseries, flux, flux_errors):
roto = RoTo(timeseries, flux, flux_errors)
roto.periods = {
"lombscargle": PeriodResult(1.0, 0.0, 0.0, "LombScarglePeriodFinder"),
}
with pytest.raises(ValueError) as err:
roto.best_period("mean", exclude=["non_existent_method"])
def test_best_period_include_exclude_incompatible(timeseries, flux, flux_errors):
roto = RoTo(timeseries, flux, flux_errors)
roto.periods = {
"lombscargle": PeriodResult(1.0, 0.0, 0.0, "LombScarglePeriodFinder"),
}
with pytest.raises(ValueError) as err:
roto.best_period("mean", exclude=["lombscargle"], include=["lombscargle"])
``` |
{
"source": "joshbrooks/django-chartjs",
"score": 2
} |
#### File: demo/demoproject/views.py
```python
from itertools import islice
from random import randint, shuffle
from django.views.generic import TemplateView
from chartjs.colors import COLORS, next_color
from chartjs.util import date_range, value_or_null
from chartjs.views.lines import BaseLineChartView, BaseLineOptionsChartView
from demoproject.models import Meter
class ColorsView(TemplateView):
template_name = "colors.html"
def get_context_data(self, **kwargs):
data = super(ColorsView, self).get_context_data(**kwargs)
data["colors"] = islice(next_color(), 0, 50)
return data
class ChartMixin(object):
def get_providers(self):
"""Return names of datasets."""
return ["Central", "Eastside", "Westside"]
def get_labels(self):
"""Return 7 labels."""
return ["January", "February", "March", "April", "May", "June", "July"]
def get_data(self):
"""Return 3 random dataset to plot."""
def data():
"""Return 7 randint between 0 and 100."""
return [randint(0, 100) for x in range(7)]
return [data() for x in range(3)]
def get_colors(self):
"""Return a new shuffle list of color so we change the color
each time."""
colors = COLORS[:]
shuffle(colors)
return next_color(colors)
class LineChartJSONView(ChartMixin, BaseLineChartView):
pass
class DiscontinuousDatesChartJSONView(ChartMixin, BaseLineChartView):
start_date = "2019-05-26"
end_date = "2019-06-04"
def get_providers(self):
return ["Water", "Gas"]
def get_labels(self):
return [dt for dt in date_range(self.start_date, self.end_date)]
def get_data(self):
result = []
water = Meter.objects.filter(name="water")
data = [item for item in value_or_null(self.start_date, self.end_date, water, "date", "reading")]
result.append(data)
gas = Meter.objects.filter(name="gas")
data = [item for item in value_or_null(self.start_date, self.end_date, gas, "date", "reading")]
result.append(data)
return result
class LineChartWithOptionsJSONView(ChartMixin, BaseLineOptionsChartView):
def get_options(self):
options = {
"title": {"display": True, "text": "Custom Chart Title"},
"elements": {"point": {"pointStyle": "rectRounded", "radius": 10}},
"responsive": False,
}
return options
# Pre-configured views.
colors = ColorsView.as_view()
line_chart = TemplateView.as_view(template_name="line_chart.html")
line_chart_json = LineChartJSONView.as_view()
discontinuous_dates_chart_json = DiscontinuousDatesChartJSONView.as_view()
line_chart_with_options = LineChartWithOptionsJSONView.as_view()
``` |
{
"source": "joshbrooks/pgosm-flex",
"score": 2
} |
#### File: pgosm-flex/docker/pgosm_flex.py
```python
import configparser
import logging
import os
from pathlib import Path
import sys
import subprocess
import click
import osm2pgsql_recommendation as rec
import db, geofabrik, helpers
BASE_PATH_DEFAULT = '/app'
"""Default path for pgosm-flex project for Docker.
"""
@click.command()
# Required and most common options first
@click.option('--ram', required=True,
type=float,
help='Amount of RAM in GB available on the machine running this process. Used to determine appropriate osm2pgsql command via osm2pgsql-tuner recommendation engine.')
@click.option('--region', required=False,
help='Region name matching the filename for data sourced from Geofabrik. e.g. north-america/us. Optional when --input-file is specified, otherwise required.')
@click.option('--subregion', required=False,
help='Sub-region name matching the filename for data sourced from Geofabrik. e.g. district-of-columbia')
# Remainder of options in alphabetical order
@click.option('--basepath',
required=False,
default=BASE_PATH_DEFAULT,
help='Debugging option. Used when testing locally and not within Docker')
@click.option('--data-only',
default=False,
is_flag=True,
help="When set, skips running Sqitch and importing QGIS Styles.")
@click.option('--debug', is_flag=True,
help='Enables additional log output')
@click.option('--input-file',
required=False,
default=None,
help='Set explicit filepath to input osm.pbf file. Overrides default file handling, archiving, and MD5 checksum.')
@click.option('--layerset', required=True,
default='default',
help='Layerset to load. Defines name of included layerset unless --layerset-path is defined.')
@click.option('--layerset-path', required=False,
help='Custom path to load layerset INI from. Custom paths should be mounted to Docker via docker run -v ...')
@click.option('--language', default=None,
envvar="PGOSM_LANGUAGE",
help="Set default language in loaded OpenStreetMap data when available. e.g. 'en' or 'kn'.")
@click.option('--pgosm-date', required=False,
default=helpers.get_today(),
envvar="PGOSM_DATE",
help="Date of the data in YYYY-MM-DD format. If today (default), automatically downloads when files not found locally. Set to historic date to load locally archived PBF/MD5 file, will fail if both files do not exist.")
@click.option('--schema-name', required=False,
default='osm',
help="Change the final schema name, defaults to 'osm'.")
@click.option('--skip-dump', default=False, is_flag=True,
help='Skips the final pg_dump at the end. Useful for local testing when not loading into more permanent instance.')
@click.option('--skip-nested',
default=False,
is_flag=True,
help='When set, skips calculating nested admin polygons. Can be time consuming on large regions.')
@click.option('--srid', required=False, default=helpers.DEFAULT_SRID,
envvar="PGOSM_SRID",
help="SRID for data loaded by osm2pgsql to PostGIS. Defaults to 3857")
def run_pgosm_flex(ram, region, subregion, basepath, data_only, debug,
input_file, layerset, layerset_path, language, pgosm_date,
schema_name, skip_dump, skip_nested, srid):
"""Run PgOSM Flex within Docker to automate osm2pgsql flex processing.
"""
paths = get_paths(base_path=basepath)
setup_logger(debug)
logger = logging.getLogger('pgosm-flex')
logger.info('PgOSM Flex starting...')
validate_region_inputs(region, subregion, input_file)
# Ensure always a region name
if region is None and input_file:
region = input_file
helpers.set_env_vars(region, subregion, srid, language, pgosm_date,
layerset, layerset_path)
if input_file is None:
geofabrik.prepare_data(region=region,
subregion=subregion,
pgosm_date=pgosm_date,
out_path=paths['out_path'])
pbf_filename = geofabrik.get_region_filename(region, subregion)
osm2pgsql_command = rec.osm2pgsql_recommendation(ram=ram,
pbf_filename=pbf_filename,
out_path=paths['out_path'])
else:
osm2pgsql_command = rec.osm2pgsql_recommendation(ram=ram,
pbf_filename=input_file,
out_path=paths['out_path'])
db.wait_for_postgres()
db.prepare_pgosm_db(data_only=data_only, db_path=paths['db_path'])
flex_path = paths['flex_path']
run_osm2pgsql(osm2pgsql_command=osm2pgsql_command,
flex_path=flex_path)
if not skip_nested:
# Auto-set skip_nested when place layer not imported
skip_nested = check_layerset_places(layerset_path, layerset, flex_path)
run_post_processing(flex_path=flex_path, skip_nested=skip_nested)
if input_file is None:
geofabrik.remove_latest_files(region, subregion, paths)
export_filename = get_export_filename(region,
subregion,
layerset,
pgosm_date,
input_file)
export_path = get_export_full_path(paths['out_path'], export_filename)
if schema_name != 'osm':
db.rename_schema(schema_name)
if skip_dump:
logger.info('Skipping pg_dump')
else:
db.run_pg_dump(export_path=export_path,
data_only=data_only,
schema_name=schema_name)
logger.info('PgOSM Flex complete!')
def validate_region_inputs(region, subregion, input_file):
"""Ensures the combination of region, subregion and input_file is valid.
No return, raises error when invalid.
Parameters
-----------------------
region : str
subregion : str
input_file : str
"""
if region is None and input_file is None:
raise ValueError('Either --region or --input-file must be provided')
if region is None and subregion is not None:
raise ValueError('Cannot use --subregion without --region')
if region is not None:
if '/' in region and subregion is None:
err_msg = 'Region provided appears to include subregion. '
err_msg += 'The portion after the final "/" in the Geofabrik URL '
err_msg += 'should be the --subregion.'
raise ValueError(err_msg)
def setup_logger(debug):
"""Prepares logging.
Parameters
------------------------------
debug : bool
Enables debug mode when True. INFO when False.
"""
if debug:
log_level = logging.DEBUG
else:
log_level = logging.INFO
log_format = '%(asctime)s:%(levelname)s:%(name)s:%(module)s:%(message)s'
logging.basicConfig(stream=sys.stdout,
level=log_level,
filemode='w',
format=log_format)
# Reduce verbosity of urllib3 logging
logging.getLogger('urllib3').setLevel(logging.INFO)
logger = logging.getLogger('pgosm-flex')
logger.debug('Logger configured')
def get_paths(base_path):
"""Returns dictionary of various paths used.
Ensures `out_path` exists.
Parameters
-------------------
base_path : str
Returns
-------------------
paths : dict
"""
db_path = os.path.join(base_path, 'db')
out_path = os.path.join(base_path, 'output')
flex_path = os.path.join(base_path, 'flex-config')
paths = {'base_path': base_path,
'db_path': db_path,
'out_path': out_path,
'flex_path': flex_path}
Path(out_path).mkdir(parents=True, exist_ok=True)
return paths
def get_export_filename(region, subregion, layerset, pgosm_date, input_file):
"""Returns the .sql filename to use for pg_dump.
Parameters
----------------------
region : str
subregion : str
layerset : str
pgosm_date : str
input_file : str
Returns
----------------------
filename : str
"""
# region is always set internally, even with --input-file and no --region
region = region.replace('/', '-')
if subregion:
subregion = subregion.replace('/', '-')
if input_file:
# Assumes .osm.pbf
base_name = input_file[:-8]
filename = f'{base_name}-{layerset}-{pgosm_date}.sql'
elif subregion is None:
filename = f'{region}-{layerset}-{pgosm_date}.sql'
else:
filename = f'{region}-{subregion}-{layerset}-{pgosm_date}.sql'
return filename
def get_export_full_path(out_path, export_filename):
"""If `export_filename` is an absolute path, `out_path` is not considered.
Parameters
-----------------
out_path : str
export_filename : str
Returns
-----------------
export_path : str
"""
if os.path.isabs(export_filename):
export_path = export_filename
else:
export_path = os.path.join(out_path, export_filename)
return export_path
def run_osm2pgsql(osm2pgsql_command, flex_path):
"""Runs the provided osm2pgsql command.
Parameters
----------------------
osm2pgsql_command : str
flex_path : str
"""
logger = logging.getLogger('pgosm-flex')
logger.info('Running osm2pgsql')
output = subprocess.run(osm2pgsql_command.split(),
text=True,
cwd=flex_path,
check=False,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
logger.info(f'osm2pgsql output: \n {output.stdout}\nEND PgOSM Flex output')
if output.returncode != 0:
err_msg = f'Failed to run osm2pgsql. Return code: {output.returncode}'
logger.error(err_msg)
sys.exit(f'{err_msg} - Check the log output for details.')
logger.info('osm2pgsql completed.')
def check_layerset_places(layerset_path, layerset, flex_path):
"""If `place` layer is not included `skip_nested` should be true.
Parameters
------------------------
layerset_path : str
layerset : str
flex_path : str
Returns
------------------------
skip_nested : boolean
"""
logger = logging.getLogger('pgosm-flex')
if layerset_path is None:
layerset_path = os.path.join(flex_path, 'layerset')
logger.info(f'Using default layerset path {layerset_path}')
ini_file = os.path.join(layerset_path, f'{layerset}.ini')
config = configparser.ConfigParser()
config.read(ini_file)
try:
place = config['layerset']['place']
except KeyError:
logger.debug('Place layer not defined, setting skip_nested')
return True
if place:
logger.debug('Place layer is defined as true. Not setting skip_nested')
return False
logger.debug('Place set to false, setting skip_nested')
return True
def run_post_processing(flex_path, skip_nested):
"""Runs steps following osm2pgsql import.
Post-processing SQL scripts and (optionally) calculate nested admin polgyons
Parameters
----------------------
flex_path : str
skip_nested : bool
"""
db.pgosm_after_import(flex_path)
logger = logging.getLogger('pgosm-flex')
if skip_nested:
logger.info('Skipping calculating nested polygons')
else:
logger.info('Calculating nested polygons')
db.pgosm_nested_admin_polygons(flex_path)
if __name__ == "__main__":
logging.getLogger('pgosm-flex').info('Running PgOSM Flex!')
run_pgosm_flex()
``` |
{
"source": "JoshBroomberg/MovieGenrePrediction",
"score": 3
} |
#### File: predict/api/api_svc.py
```python
import pickle
from src.features.utils import remove_punctuation
with open('models/count_vectorizer.pkl','rb') as f:
count_vectorizer=pickle.load(f)
with open('models/classifier_svc.pkl','rb') as f:
classif_svc=pickle.load(f)
with open('data/processed/genre_id_to_name_dict.pkl','rb') as f:
Genre_ID_to_name=pickle.load(f)
with open('models/tfidf_transformer.pkl','rb') as f:
tfidf_transformer=pickle.load(f)
genre_list=sorted(list(Genre_ID_to_name.keys()))
def svc_predict(input_string):
cleaned_string = remove_punctuation(input_string)
vectorized_doc = count_vectorizer.transform([cleaned_string])
tfidf_doc = tfidf_transformer.transform(vectorized_doc)
pred_array = classif_svc.predict(tfidf_doc)
# pred_prob = classif_svc.predict_proba(tfidf_doc) # trained with probability=False to save time, so not available
pred_genres = []
for i, score in enumerate(pred_array[0]):
if score!=0:
genre=Genre_ID_to_name[genre_list[i]]
pred_genres.append(genre)
return pred_genres #, pred_prob
# print(svc_predict("The boy with long stripped pants jumped over many walls to get to the computer."))
```
#### File: src/scoring/scoring_utils.py
```python
def precision_recall(gt,preds):
TP=0
FP=0
FN=0
for t in gt:
if t in preds:
TP+=1
else:
FN+=1
for p in preds:
if p not in gt:
FP+=1
if TP+FP==0:
precision=0
else:
precision=TP/float(TP+FP)
if TP+FN==0:
recall=0
else:
recall=TP/float(TP+FN)
return precision,recall
def generate_predictions(Genre_ID_to_name, X_test, preds):
genre_list=sorted(list(Genre_ID_to_name.keys()))
predictions=[]
for i in range(X_test.shape[0]):
pred_genres=[]
movie_label_scores=preds[i]
for j in range(len(genre_list)):
#print j
if movie_label_scores[j]!=0:
genre=Genre_ID_to_name[genre_list[j]]
pred_genres.append(genre)
predictions.append(pred_genres)
return predictions
def precsc_recs(test_movies, movies_with_overviews, Genre_ID_to_name, predictions):
precs=[]
recs=[]
for i in range(len(test_movies)):
if i%1==0:
pos=test_movies[i]
test_movie=movies_with_overviews[pos]
gtids=test_movie['genre_ids']
gt=[]
for g in gtids:
g_name=Genre_ID_to_name[g]
gt.append(g_name)
# print predictions[i],movies_with_overviews[i]['title'],gt
a,b=precision_recall(gt,predictions[i])
precs.append(a)
recs.append(b)
return precs, recs
``` |
{
"source": "joshbrunty/SMStoCSV",
"score": 4
} |
#### File: joshbrunty/SMStoCSV/SMSCSV.py
```python
import sqlite3
import csv
def extract_db():
sql_query = """
SELECT
message.ROWID as 'ID',
chat.ROWID as 'Chat ID',
chat.chat_identifier as 'Chat Handle',
message.date as 'Date',
message.text as 'Message Content',
message.is_from_me as 'From Account Owner'
FROM
message
LEFT JOIN chat ON
message.handle_id = chat.ROWID
ORDER BY
message.date ASC
"""
conn = sqlite3.connect('sms.db')
c = conn.cursor()
with open('sms.db extraction.csv', mode = 'w', newline = '', encoding = 'utf-8') as csv_file:
# Write the header row
fieldnames = ['ID', 'Chat ID', 'Chat Handle', 'Date', 'Message Content', 'From Account Owner']
writer = csv.DictWriter(csv_file, fieldnames = fieldnames)
writer.writeheader()
for message_id, chat_id, chat_handle, message_date, message_text, message_from_account in c.execute(sql_query):
# Construct our data
parsed_message = {
'ID': message_id,
'Chat ID': chat_id,
'Chat Handle': chat_handle,
'Date': message_date,
'Message Content': message_text,
'From Account Owner': message_from_account,
}
# Write our data to the file.
writer.writerow(parsed_message)
def main():
extract_db()
if __name__ == '__main__':
main()
``` |
{
"source": "joshburnett/canaveral",
"score": 2
} |
#### File: canaveral/canaveral/main.py
```python
from __future__ import annotations
import sys
from pathlib import Path
from PySide6 import QtCore, QtGui
from PySide6.QtWidgets import (QApplication, QMainWindow, QLabel, QDialog, QMenu, QSystemTrayIcon)
from PySide6.QtCore import Qt
from PySide6.QtCore import QAbstractNativeEventFilter, QAbstractEventDispatcher
import win32api
import win32gui
from loguru import logger
from appdirs import AppDirs
# Try different ways of importing, so we can run this as an application installed via pip/pipx,
# and also just from the source directory.
try:
from canaveral.qtkeybind import keybinder
from canaveral.basemodels import SearchPathEntry, Catalog, QuerySet
from canaveral.qtmodels import LaunchListModel
from canaveral.widgets import CharLineEdit, CharListWidget
except ImportError:
from .qtkeybind import keybinder
from .basemodels import SearchPathEntry, Catalog, QuerySet
from .qtmodels import LaunchListModel
from .widgets import CharLineEdit, CharListWidget
class WinEventFilter(QAbstractNativeEventFilter):
def __init__(self, keybinder):
self.keybinder = keybinder
super().__init__()
def nativeEventFilter(self, eventType, message):
ret = self.keybinder.handler(eventType, message)
return ret, 0
# Helper object for defining locations for config & log files
DIRS = AppDirs('Canaveral', appauthor='')
sys.path.append(DIRS.user_data_dir) # so we can load the paths file
class CanaveralWindow(QMainWindow):
def __init__(self):
super().__init__()
self.dragging = False
self.drag_start_point = None
try:
from paths import search_path_entries
logger.debug('Loaded search path entries from paths.py.')
except ImportError:
logger.debug('No paths.py present, creating from paths-example.py.')
import shutil
shutil.copy(Path(__file__).parent / 'paths-example.py', Path(DIRS.user_data_dir) / 'paths.py')
from paths import search_path_entries
self.search_path_entries = search_path_entries
self.catalog = Catalog(self.search_path_entries, launch_data_file=Path(DIRS.user_data_dir) / 'launch_data.txt')
self.query_set = QuerySet(catalog=self.catalog)
# self.query_set.create_query('doc')
self.model = LaunchListModel(catalog=self.catalog, query_set=self.query_set, max_launch_list_entries=10)
self.setup()
self.setup_sys_tray_icon()
self.launch_list_view.setModel(self.model)
self.update_launch_list_size()
self.line_input.textEdited.connect(self.update_query)
self.item_refresh_timer = QtCore.QTimer(self)
self.item_refresh_timer.setInterval(5*60*1000) # 5 minutes
self.item_refresh_timer.timeout.connect(self.catalog.refresh_items_list)
self.item_refresh_timer.start()
self.show_main_window_and_focus()
def setup_sys_tray_icon(self):
self.tray = QSystemTrayIcon()
if self.tray.isSystemTrayAvailable():
icon = QtGui.QIcon(str(Path(__file__).parent / 'resources/rocket_with_shadow_blue.png'))
menu = QMenu()
setting_action = menu.addAction('Settings...')
setting_action.triggered.connect(self.setting)
exit_action = menu.addAction('Exit')
exit_action.triggered.connect(sys.exit)
self.tray.setIcon(icon)
self.tray.setContextMenu(menu)
self.tray.show()
self.tray.setToolTip('Canaveral')
self.setWindowFlag(QtCore.Qt.Tool)
self.line_input.setFocusPolicy(QtCore.Qt.StrongFocus)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
else:
self.tray = None
def setting(self):
self.dialog = QDialog()
self.dialog.setWindowTitle("Settings Dialog")
self.dialog.show()
def setup(self):
self.setWindowFlags(Qt.FramelessWindowHint)
self.setAttribute(Qt.WA_TranslucentBackground)
self.setAttribute(Qt.WA_AlwaysShowToolTips)
self.setAttribute(Qt.WA_InputMethodEnabled)
# self.setFocusPolicy(Qt.ClickFocus)
self.background = QLabel(parent=self)
bg_pixmap = QtGui.QPixmap(str(Path(__file__).parent / 'resources/light_background_2x.png'))
bg_pixmap.setDevicePixelRatio(2)
# bg_pixmap = QtGui.QPixmap(':/styles/frame')
self.background_size_scaled = bg_pixmap.size() / bg_pixmap.devicePixelRatio()
self.background.setPixmap(bg_pixmap)
self.background.resize(self.background_size_scaled)
self.background.move(0, 0)
self.resize(self.background_size_scaled)
self.search_icon = QLabel(parent=self)
search_pixmap = QtGui.QPixmap(str(Path(__file__).parent / 'resources/search_icon.png'))
search_pixmap.setDevicePixelRatio(2)
# bg_pixmap = QtGui.QPixmap(':/styles/frame')
self.search_icon.setPixmap(search_pixmap)
self.search_icon.resize(search_pixmap.size() / search_pixmap.devicePixelRatio())
self.search_icon.move(550, 12)
self.line_input = CharLineEdit(parent=self)
self.line_input.setObjectName('input')
self.line_input.resize(530, 30)
self.line_input.move(12, 15)
# self.line_input.setAutoFillBackground(True)
self.line_input.setFont(QtGui.QFont('Franklin Gothic', 24))
self.line_input.setStyleSheet('qproperty-frame: false;'
'background-color: rgba(0, 0, 0, 0);')
self.output_icon = QLabel(parent=self)
self.output_icon.setObjectName('outputIcon')
self.output = QLabel(parent=self)
self.output.setObjectName('output')
self.output.setAlignment(Qt.AlignHCenter)
self.launch_list_view = CharListWidget(parent=self)
self.launch_list_view.setAutoFillBackground(True)
self.launch_list_view.setIconSize(QtCore.QSize(32, 32))
self.launch_list_view.setObjectName('alternatives')
def update_launch_list_size(self):
x_margin = 40
y_margin = 0
width = self.width() - 2*x_margin
x = x_margin
y = self.background_size_scaled.height() - y_margin
num_items_in_results = self.model.num_results()
if num_items_in_results > 0:
index = self.launch_list_view.model().index(0, 0)
item_height = self.launch_list_view.model().data(index, Qt.SizeHintRole)
num_display_items = min(self.model.max_launch_list_entries, num_items_in_results)
# I guess we need 4px total for the top & bottom border?
height = num_display_items * item_height.height() + \
(num_display_items-1)*self.launch_list_view.spacing() + 4
self.launch_list_view.setGeometry(x, y, width, height)
self.resize(self.width(), self.background_size_scaled.height()-y_margin+height)
self.launch_list_view.show()
else:
self.hide_launch_list()
self.resize(self.width(), self.background_size_scaled.height())
def update_query(self, query_text):
self.model.set_query(query_text)
self.update_launch_list_size()
def hide_main_window(self):
self.launch_list_view.hide()
self.hide()
def show_main_window_and_focus(self):
frame_geometry = self.frameGeometry()
monitor_center = self.screen().availableGeometry().center()
frame_geometry.moveCenter(monitor_center)
self.move(frame_geometry.topLeft())
self.show()
# self.setFocus()
self.line_input.setFocus()
self.line_input.selectAll()
win32gui.SetForegroundWindow(self.winId())
def show_launch_list(self):
self.launch_list_view.show()
self.launch_list_view.setFocus()
def hide_launch_list(self):
self.launch_list_view.setCurrentIndex(self.launch_list_view.model().index(-1, 0))
self.launch_list_view.repaint()
self.launch_list_view.hide()
self.line_input.setFocus()
def mousePressEvent(self, event: QtGui.QMouseEvent) -> None:
if event.buttons() == Qt.LeftButton:
self.dragging = True
self.drag_start_point = event.position()
self.activateWindow()
self.line_input.setFocus()
def mouseMoveEvent(self, event: QtGui.QMouseEvent) -> None:
if event.buttons() == Qt.LeftButton and self.dragging:
p = event.globalPosition() - self.drag_start_point
self.move(round(p.x()), round(p.y()))
self.line_input.setFocus()
self.launch_list_view.move(round(p.x()), round(p.y()))
def mouseReleaseEvent(self, event: QtGui.QMouseEvent) -> None:
self.dragging = False
self.line_input.setFocus()
def keyPressEvent(self, event: QtGui.QKeyEvent) -> None:
key = event.key()
# logger.debug(f'Key pressed: {event.key()}')
if key == Qt.Key_Escape:
if self.launch_list_view.isVisible():
self.hide_launch_list()
else:
# QApplication.instance().quit()
self.hide_main_window()
elif key in (Qt.Key_Enter, Qt.Key_Return):
logger.debug('Return/Enter key')
if self.launch_list_view.currentIndex().row() == -1:
self.launch_list_view.setCurrentIndex(self.launch_list_view.model().index(0, 0))
item = self.model.data(self.launch_list_view.currentIndex(), role=Qt.UserRole)
self.hide_main_window()
# http://timgolden.me.uk/pywin32-docs/win32api__ShellExecute_meth.html
# win32api.ShellExecute(hwnd, op, file, params, dir, bShow)
logger.debug(f'Executing: {item.full_path}')
win32api.ShellExecute(0, None, str(item.full_path), '', '', 1)
self.catalog.update_launch_data(query_string=self.line_input.text(), launch_choice=item.full_path)
elif key in (Qt.Key_Down, Qt.Key_Up, Qt.Key_PageDown, Qt.Key_PageUp):
if self.launch_list_view.isVisible():
logger.debug(f'spot 1: self.launch_list_view.isActiveWindow(): '
f'{self.launch_list_view.isActiveWindow()}')
logger.debug(f'self.launch_list_view.hasFocus(): {self.launch_list_view.hasFocus()}')
if not self.launch_list_view.hasFocus():
if self.launch_list_view.currentIndex().row() < 0 < self.model.num_results():
logger.debug('spot 2')
self.launch_list_view.setFocus()
self.launch_list_view.setCurrentIndex(self.launch_list_view.model().index(0, 0))
else:
logger.debug('spot 3')
self.launch_list_view.setFocus()
QApplication.sendEvent(self.launch_list_view, event)
elif self.launch_list_view.currentIndex().row() == 0 and key == Qt.Key_Up:
logger.debug('spot 1.5')
self.line_input.setFocus()
elif key in (Qt.Key_Down, Qt.Key_PageDown) and 0 < self.model.num_results():
logger.debug('spot 4')
self.launch_list_view.setCurrentIndex(self.launch_list_view.model().index(0, 0))
self.show_launch_list()
elif self.launch_list_view.hasFocus():
self.launch_list_view.setCurrentIndex(self.launch_list_view.model().index(-1, 0))
self.line_input.setFocus()
self.line_input.keyPressEvent(event)
def changeEvent(self, event):
if event.type() == QtCore.QEvent.ActivationChange and not self.isActiveWindow():
self.hide_main_window()
super().changeEvent(event)
def closeEvent(self, event):
logger.info('closeEvent')
keybinder.unregister_hotkey(self.winId(), "Ctrl+Alt+Space")
event.accept()
sys.exit()
def run():
if Path(sys.executable).stem == 'pythonw':
Path(DIRS.user_log_dir).mkdir(parents=True, exist_ok=True)
sys.stdout = open(Path(DIRS.user_log_dir) / 'canaveral.log', 'w')
logger.remove()
logger.add(sys.stdout)
logger.debug('Starting.')
app = QApplication([])
app.setFont(QtGui.QFont('Franklin Gothic'))
app.setAttribute(Qt.AA_EnableHighDpiScaling)
app.setQuitOnLastWindowClosed(False)
main_window = CanaveralWindow()
# Install a native event filter to receive events from the OS
keybinder.init()
keybinder.register_hotkey(main_window.winId(), "Ctrl+Alt+Space", main_window.show_main_window_and_focus)
win_event_filter = WinEventFilter(keybinder)
event_dispatcher = QAbstractEventDispatcher.instance()
event_dispatcher.installNativeEventFilter(win_event_filter)
app.exec()
sys.stdout.close()
sys.exit()
if __name__ == '__main__':
run()
``` |
{
"source": "joshburnett/msteamsnotifiers",
"score": 2
} |
#### File: joshburnett/msteamsnotifiers/setup.py
```python
import re
from setuptools import setup
with open('README.md') as readme:
long_description = readme.read()
def get_version(filename='msteamsnotifiers.py'):
""" Extract version information stored as a tuple in source code """
version = ''
with open(filename, 'r') as fp:
for line in fp:
m = re.search("__version__ = '(.*)'", line)
if m is not None:
version = m.group(1)
break
return version
# What packages are required for this module to be executed?
REQUIRED = [
'pymsteams',
'friendly_traceback',
]
setup(
name="msteamsnotifiers",
version=get_version(),
py_modules=["msteamsnotifiers"],
install_requires=REQUIRED,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
# metadata for upload to PyPI
author="<NAME>",
author_email="<EMAIL>",
description="Decorators for automatically notifying an MS Teams channel of events",
long_description=long_description,
long_description_content_type="text/markdown",
license="MIT",
keywords="Microsoft Teams msteams channel notify message post",
url="https://github.com/joshburnett/msteamsnotifiers",
)
``` |
{
"source": "joshcai/shengji",
"score": 4
} |
#### File: shengji/shengji/cards.py
```python
import json
import random
class Card(object):
"""Representation of a single card."""
C = 'clubs'
D = 'diamonds'
H = 'hearts'
S = 'spades'
T = 'trump'
SUITS = (C, D, H, S)
NUM = {
2: 'two',
3: 'three',
4: 'four',
5: 'five',
6: 'six',
7: 'seven',
8: 'eight',
9: 'nine',
10: 'ten',
11: 'jack',
12: 'queen',
13: 'king',
14: 'ace',
15: 'trump number',
16: 'trump number trump suit',
17: 'small joker',
18: 'big joker'
}
def __str__(self):
"""Returns string representation."""
a = self.NUM[self.actual_num] + " " + self.actual_suit
if self.num != self.actual_num or self.suit != self.actual_suit:
a +=" (" + self.NUM[self.num] + " " + self.suit + ")"
return a
def __init__(self, num, suit):
self.num = num # used to compare order of cards
self.suit = suit # same as actual_suit unless trump
self.actual_num = num # actual card number
self.actual_suit = suit # actual card suit
def smallerThan(self, suit, other_card):
"""Returns whether other_card is greater.
Args:
suit: String, suit of the current trick
other_card: Card, other card to compare to
Returns:
Boolean, True if other_card is greater
"""
if suit == self.T or self.suit == other_card.suit:
return other_card.num > self.num
if self.suit == self.T and other_card.suit != self.T:
return False
if self.suit != self.T and other_card.suit == self.T:
return True
if self.suit == suit and other_card.suit != suit:
return False
if self.suit != suit and other_card.suit == suit:
return True
raise Exception('Could not compare cards')
def trumpify(self, suit, num):
"""Convert card to trump if it matches trump suit or trump num.
Args:
suit: String, trump suit
num: Integer, trump number
"""
if self.suit == suit: # trump suit
self.suit = self.T
if self.num == num: # trump suit, trump num
self.num = 16
elif self.num == num: # trump num, not trump suit
self.suit = self.T
self.num = 15
def objRepr(self):
"""Return dictionary of attributes for converting to JSON."""
temp_card = {
'suit': self.suit,
'num': self.num,
'actual_suit': self.actual_suit,
'actual_num': self.actual_num
}
return temp_card
def convertToJson(self):
"""Returns card in JSON form."""
return json.dumps(self.objRepr(), separators=(',', ':'))
class Deck(object):
def __str__(self):
return str(self.cards)
def __init__(self, decks=2):
cards = [Card(x,y) for x in range(2, 15) for y in Card.SUITS]
cards.append(Card(17, Card.T)) # small joker
cards.append(Card(18, Card.T)) # big joker
self.cards = int(decks) * cards
self.size = decks * 54
self.current = 0 # used when dealing cards
def shuffle(self):
"""Shuffle cards, reset position to 0 for dealing."""
random.shuffle(self.cards)
self.current = 0
def getNextCard(self):
"""Gets next card in the deck, return None if no more left."""
if self.current < self.size:
self.current += 1
return self.cards[self.current-1]
return None
class Cards(object):
def __str__(self):
return str([str(x) for x in self.cards])
def __init__(self):
self.cards = []
def __getitem__(self, index):
return self.cards[index]
def append(self, card):
self.cards.append(card)
def convertToJson(self):
"""Returns entire array in JSON form."""
temp_cards = [card.objRepr() for card in self.cards]
return json.dumps(temp_cards, separators=(',', ':'))
def points(self):
"""Counts number of points in the cards."""
p = 0
for card in self.cards:
if card.actual_num == 5:
p += 5
elif card.actual_num == 10 or card.actual_num == 13: # ten or kings
p += 10
return p
def __contains__(self, key):
"""Checks to see if a card is in these cards.
Args:
key: tuple holding num, suit, and number of times to check (default 1)
"""
if len(key) == 3:
num, suit, times = key
elif len(key) == 2:
num, suit = key
times = 1
else:
raise Exception('Wrong number of arguments in tuple')
count = 0
for card in self.cards:
if card.suit == suit and card.num == num:
count += 1
if count == times:
return True
return False
class Hand(Cards):
def __init__(self):
super(Hand, self).__init__()
def removeCard(self, position):
"""Removes and returns card at a certain position.
Args:
position: Integer, position of the card to be removed.
"""
return self.cards.pop(position)
def empty(self):
"""Returns True if hand is empty."""
return len(self.cards) == 0
def trumpify(self, suit, num):
"""Trumpify all cards in the hand.
Args:
suit: String, trump suit
num: Integer, trump num
"""
for card in self.cards:
card.trumpify(suit, num)
def sort(self):
"""Sort cards by suit first, then by number.
Note: should be called after hand has been trumpified.
"""
self.cards = sorted(self.cards, key=lambda x: (x.suit, x.num))
def containsSuit(self, suit):
"""Check if hand has any cards left of a certain suit.
Args:
suit: String, suit to check
"""
for card in self.cards:
if card.suit == suit:
return True
return False
class Trick(Cards):
@property
def suit(self):
"""Suit of the trick (equal to the suit of the first card in the trick)."""
return self.cards[0].suit
def __init__(self):
super(Trick, self).__init__()
def biggest(self):
"""Returns position of the biggest card in the trick."""
biggestCard = self.cards[0]
biggestPosition = 0
for i, card in enumerate(self.cards[1:]):
if biggestCard.smallerThan(self.suit, card):
biggestCard = card
biggestPosition = i + 1
return biggestPosition
```
#### File: shengji/shengji/players.py
```python
from shengji import cards
class Player(object):
def __init__(self, name="Default", ws=None):
self.name = name
self.ws = ws # WebSockets connection
self.hand = cards.Hand()
def sendMessage(self, message):
"""Send message to specific player.
Args:
message: String, message to send
"""
self.ws.write_message(message)
def fromClient(self):
"""Returns toro queue with client messages.
To get next message, use .get()
"""
return self.ws.clientMessages
class Players(object):
def __init__(self):
self.players = []
def __len__(self):
return len(self.players)
def __getitem__(self, index):
return self.players[index]
def __iter__(self):
self.a = 0
return self
def __next__(self):
if self.a >= len(self.players):
raise StopIteration
result = self.players[self.a]
self.a += 1
return result
def add(self, name, ws):
self.players.append(Player(name, ws))
# TODO: removePlayer function
def sendMessage(self, message):
"""Sends message to all players.
Args:
message: String, message to send.
"""
for player in self.players:
player.sendMessage(message)
```
#### File: shengji/test/test_cards.py
```python
import unittest
from shengji import cards
class TestCards(unittest.TestCase):
def testContainsCard(self):
c = cards.Cards()
c.append(cards.Card(4, 'hearts'))
c.append(cards.Card(8, 'hearts'))
c.append(cards.Card(8, 'spades'))
self.assertTrue((4, 'hearts') in c)
self.assertTrue((8, 'hearts') in c)
self.assertTrue((8, 'spades') in c)
self.assertFalse((8, 'diamonds') in c)
self.assertFalse((5, 'clubs') in c)
def testContainsCardMultiple(self):
c = cards.Cards()
c.append(cards.Card(4, 'hearts'))
c.append(cards.Card(4, 'hearts'))
self.assertTrue((4, 'hearts') in c)
self.assertTrue((4, 'hearts', 1) in c)
self.assertTrue((4, 'hearts', 2) in c)
self.assertFalse((4, 'hearts', 3) in c)
self.assertFalse((4, 'hearts', 4) in c)
def testContainsCardRaiseException(self):
with self.assertRaises(Exception):
c = cards.Cards()
c.append(cards.Card(4, 'hearts'))
c.append(cards.Card(4, 'hearts'))
(4) in c
def testPoints(self):
c = cards.Cards()
c.append(cards.Card(5, 'hearts'))
c.append(cards.Card(10, 'hearts'))
c.append(cards.Card(13, 'spades'))
c.append(cards.Card(7, 'spades'))
self.assertEqual(25, c.points())
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joshcai/utdcs",
"score": 2
} |
#### File: processing/migrations/0001_initial.py
```python
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Post'
db.create_table(u'processing_post', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=120)),
('content', self.gf('django.db.models.fields.TextField')()),
('author', self.gf('django.db.models.fields.CharField')(max_length=120)),
('date', self.gf('django.db.models.fields.DateTimeField')()),
('date_str', self.gf('django.db.models.fields.CharField')(max_length=120)),
('deleted', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'processing', ['Post'])
def backwards(self, orm):
# Deleting model 'Post'
db.delete_table(u'processing_post')
models = {
u'processing.post': {
'Meta': {'object_name': 'Post'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'content': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'date_str': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '120'})
}
}
complete_apps = ['processing']
``` |
{
"source": "joshcalcino/plonk",
"score": 2
} |
#### File: plonk/analysis/extra.py
```python
from functools import partial
from .._logging import logger
from . import profiles
def extra_profiles(profile, num_separate_dust: int = 0, num_mixture_dust: int = 0):
"""Make extra profiles available.
Parameters
----------
profile
The profile object to add extra profiles to.
num_separate_dust
The number of "separate sets of particles" dust species.
num_mixture_dust
The number of "mixture" dust species.
"""
num_dust_species = num_mixture_dust + num_separate_dust
for prof, ndims in profiles.appropriate_ndim.items():
if profile.ndim in ndims:
if prof not in profiles.dust_profiles:
profile._profile_functions[prof] = getattr(profiles, prof)
else:
if profiles.dust_profiles[prof] == 'both':
for idx in range(num_dust_species):
profile._profile_functions[f'{prof}_{idx+1:03}'] = partial(
getattr(profiles, prof), idx
)
elif profiles.dust_profiles[prof] == 'mixture':
for idx in range(num_mixture_dust):
profile._profile_functions[f'{prof}_{idx+1:03}'] = partial(
getattr(profiles, prof), idx
)
elif profiles.dust_profiles[prof] == 'mixture (gas)':
if num_mixture_dust > 0:
profile._profile_functions[prof] = getattr(profiles, prof)
else:
logger.debug(f'{prof} - cannot add profile')
```
#### File: plonk/analysis/filters.py
```python
from .._units import Quantity
from .._units import units as plonk_units
from ..snap.snap import SnapLike, SubSnap
from .particles import radius_cylindrical, radius_spherical
CENTER = (0, 0, 0) * plonk_units.au
def box(
snap: SnapLike,
xwidth: Quantity,
ywidth: Quantity,
zwidth: Quantity,
center: Quantity = CENTER,
) -> SubSnap:
"""Particles within a box.
Parameters
----------
snap
The Snap object.
xwidth
The x-width of the box.
ywidth
The y-width of the box.
zwidth
The z-width of the box.
center : optional
The center of the box as a Quantity like (x, y, z) * au.
Default is (0, 0, 0).
Returns
-------
SubSnap
The SubSnap with particles in the box.
"""
dx, dy, dz = xwidth / 2, ywidth / 2, zwidth / 2
mask = (
(snap['position_x'] > center[0] - dx)
& (snap['position_x'] < center[0] + dx)
& (snap['position_y'] > center[1] - dy)
& (snap['position_y'] < center[1] + dy)
& (snap['position_z'] > center[2] - dz)
& (snap['position_z'] < center[2] + dz)
)
return snap[mask]
def cylinder(
snap: SnapLike, radius: Quantity, height: Quantity, center: Quantity = CENTER
) -> SubSnap:
"""Particles within a cylinder.
Parameters
----------
snap
The Snap object.
radius
The radius of the cylinder.
height
The height of the cylinder.
center : optional
The center of the cylinder as a Quantity like (x, y, z) * au.
Default is (0, 0, 0).
Returns
-------
SubSnap
The SubSnap with particles in the cylinder.
"""
dh = height / 2
R = radius_cylindrical(snap=snap, origin=center)
mask = (
(R < radius)
& (snap['position_z'] < center[2] + dh)
& (snap['position_z'] > center[2] - dh)
)
return snap[mask]
def annulus(
snap: SnapLike,
radius_min: Quantity,
radius_max: Quantity,
height: Quantity,
center: Quantity = CENTER,
) -> SubSnap:
"""Particles within an annulus.
Parameters
----------
snap
The Snap object.
radius_min
The inner radius of the annulus.
radius_max
The outer radius of the annulus.
height
The height of the annulus.
center : optional
The center of the annulus as a Quantity like (x, y, z) * au.
Default is (0, 0, 0).
Returns
-------
SubSnap
The SubSnap with particles in the annulus.
"""
dh = height / 2
R = radius_cylindrical(snap=snap, origin=center)
mask = (
(R > radius_min)
& (R < radius_max)
& (snap['position_z'] < center[2] + dh)
& (snap['position_z'] > center[2] - dh)
)
return snap[mask]
def sphere(snap: SnapLike, radius: Quantity, center: Quantity = CENTER) -> SubSnap:
"""Particles within a sphere.
Parameters
----------
snap
The Snap object.
radius
The radius of the sphere.
center : optional
The center of the sphere as a Quantity like (x, y, z) * au.
Default is (0, 0, 0).
Returns
-------
SubSnap
The SubSnap with particles in the sphere.
"""
R = radius_spherical(snap=snap, origin=center)
mask = R < radius
return snap[mask]
def shell(
snap: SnapLike,
radius_min: Quantity,
radius_max: Quantity,
center: Quantity = CENTER,
) -> SubSnap:
"""Particles within a spherical shell.
Parameters
----------
snap
The Snap object.
radius_min
The inner radius of the shell.
radius_max
The outer radius of the shell.
center : optional
The center of the shell as a Quantity like (x, y, z) * au.
Default is (0, 0, 0).
Returns
-------
SubSnap
The SubSnap with particles in the shell.
"""
R = radius_spherical(snap=snap, origin=center)
mask = (R > radius_min) & (R < radius_max)
return snap[mask]
```
#### File: plonk/plonk/_logging.py
```python
import logging
import platform
# Logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
file_handler = logging.FileHandler('.plonk.log')
console_handler.setLevel(logging.INFO)
file_handler.setLevel(logging.DEBUG)
console_format = logging.Formatter('%(levelname)s - %(funcName)s - %(message)s')
file_format = logging.Formatter(
'%(asctime)s - %(levelname)s - %(funcName)s - %(message)s'
)
console_handler.setFormatter(console_format)
file_handler.setFormatter(file_format)
logger.addHandler(console_handler)
logger.addHandler(file_handler)
def get_os_info():
"""Get the operating system version for logging."""
system = platform.system()
if system == 'Darwin':
system = 'macOS'
release = platform.release()
return f'{system} version: {release}'
def logger_init(__version__):
"""Log versions and platform."""
logger.debug(f'Plonk v{__version__} on Python {platform.python_version()}')
logger.debug(f'{get_os_info()}, {platform.machine()}')
```
#### File: plonk/simulation/simulation.py
```python
from __future__ import annotations
import warnings
from copy import copy
from pathlib import Path
from typing import TYPE_CHECKING, Any, Dict, List, Union
import numpy as np
from pandas import DataFrame
from .._logging import logger
from .._units import Quantity
from ..snap import load_snap
from ..visualize.simulation import visualize_sim
from .time_series import load_time_series, time_series_units
if TYPE_CHECKING:
from ..snap.snap import Snap
_properties_vary_per_snap = ('time',)
_data_sources = ('Phantom',)
class Simulation:
"""Smoothed particle hydrodynamics simulation object.
This class aggregates snapshot files, global quantity and sink time
series data. Snapshot files contain a complete snapshot of the
simulation at a particular time. Other files contain time series of
global quantities on particles such as energy and momentum, and
time series data for sink particles.
Examples
--------
Reading simulation data into a Simulation object.
>>> sim = plonk.load_simulation('prefix', path_to_directory)
Accessing the snapshots.
>>> sim.snaps
Accessing the properties.
>>> sim.properties
Accessing the global quantity and sink time series data.
>>> sim.time_series['global']
>>> sim.time_series['sinks']
"""
def __init__(self):
self.data_source: str
self.prefix: str
self.paths: Dict[str, Any]
self._snaps: List[Snap] = None
self._properties: Dict[str, Any] = None
self._code_units: Dict[str, Any] = None
self._time_series: Dict[str, Union[DataFrame, List[DataFrame]]] = None
self._snap_file_extension = ''
self._len = -1
def load_simulation(
self,
prefix: str,
directory: Union[str, Path] = None,
data_source: str = 'Phantom',
) -> Simulation:
"""Load Simulation.
Parameters
----------
prefix
Simulation prefix, e.g. 'disc', if files are named like
disc_00000.h5, disc01.ev, discSink0001N01.ev, etc.
directory : optional
Directory containing simulation snapshot files and auxiliary
files. Default is None.
data_source : optional
The SPH code used to produce the simulation data. Default
is 'Phantom'.
"""
if data_source not in _data_sources:
raise ValueError(f'Data source not available: try {_data_sources}')
self.data_source = data_source
if directory is None:
directory = '.'
logger.debug(f'Loading {data_source} simulation: {prefix} at {directory}')
self.prefix = prefix
self.paths = {
'directory': Path(directory).expanduser().resolve(),
}
if not list(self.paths['directory'].glob(self.prefix + '*')):
raise FileNotFoundError(f'No files with prefix: {prefix}')
self._snap_file_extension = self._get_snap_file_extension()
self.paths['snaps'] = self._get_snap_files()
self.paths['time_series_global'] = self._get_global_ts_files()
self.paths['time_series_sinks'] = self._get_sink_ts_files()
#print(self.paths['snaps'], self.paths['time_series_global'], self.paths['time_series_sinks'])
return self
@property
def snaps(self) -> List[Snap]:
"""List of Snap objects associated with the simulation."""
if self._snaps is None:
self._generate_snap_objects()
return self._snaps
@property
def properties(self) -> Dict[str, Any]:
"""Properties associated with the simulation."""
if self._properties is None:
self._generate_properties()
return self._properties
@property
def code_units(self) -> Dict[str, Any]:
"""Units associated with the simulation."""
if self._code_units is None:
self._generate_units()
return self._code_units
@property
def time_series(self) -> DataFrame:
"""Time series data."""
if self._time_series is None:
self._generate_time_series()
return self._time_series
def to_array(self, quantity: str, indices: List[int] = None) -> Quantity:
"""Generate an array of a quantity over all snapshots.
Warning: this can be very memory intensive and slow.
Parameters
----------
quantity
The quantity as a string, e.g. 'position'.
indices
You can select a subset of particles by indices
corresponding to snap['id'].
Returns
-------
An array with units.
Examples
--------
Get the position of every particle during the whole simulation.
>>> pos = sim.to_array(quantity='position')
>>> pos.shape
(31, 1100000, 3)
"""
q = list()
arr: Quantity = self.snaps[0][quantity]
units = arr.units
for snap in self.snaps:
if indices is None:
array: Quantity = snap[quantity]
else:
array = snap[quantity][indices]
q.append(array.magnitude)
q *= units
return q
def _generate_snap_objects(self):
"""Generate Snap objects."""
snaps = list()
fail = 0
for snap in self.paths['snaps']:
try:
snaps.append(load_snap(snap))
except (OSError, RuntimeError):
fail += 1
if fail > 0:
logger.warning(f'Cannot read {fail} snap(s)')
self._snaps = snaps
def _generate_properties(self):
"""Generate sim.properties from snap.properties."""
prop = copy(self.snaps[0].properties)
for key in _properties_vary_per_snap:
prop[key] = list()
for snap in self.snaps:
for key, val in snap.properties.items():
if isinstance(prop[key], list):
prop[key].append(val)
else:
if np.any(prop[key] != val):
prop[key] = '__inconsistent__'
for key, val in prop.items():
if isinstance(val, list):
if isinstance(val[0], Quantity):
prop[key] = np.array([v.m for v in val]) * val[0].u
else:
prop[key] = np.array(val)
self._properties = prop
def _generate_units(self):
"""Generate sim.code_units from snap.code_units."""
u = copy(self.snaps[0].code_units)
for snap in self.snaps:
for key, val in snap.code_units.items():
if u[key] != val:
u[key] = '__inconsistent__'
self._code_units = u
def _generate_time_series(self):
"""Generate time series data."""
self._time_series = dict()
if self.paths['time_series_global']:
self._time_series['global'] = load_time_series(
self.paths['time_series_global']
)
if self.paths['time_series_sinks']:
self._time_series['sinks'] = [
load_time_series(files) for files in self.paths['time_series_sinks']
]
def _get_global_ts_files(self, glob: str = None) -> List[Path]:
"""Get global time series files."""
if glob is None:
# Phantom ev file name format
glob = self.prefix + '[0-9][0-9].ev'
return sorted(list(self.paths['directory'].glob(glob)))
def _get_sink_ts_files(self, glob: str = None) -> List[List[Path]]:
"""Get sink time series files."""
if glob is None:
# Phantom ev file name format
glob = self.prefix + 'Sink[0-9][0-9][0-9][0-9]N[0-9][0-9].ev'
n = len(self.prefix) + len('Sink')
n_sinks = len(np.unique({p.name[n : n + 4] for p in self.paths['directory'].glob(glob)}))
sinks = list()
for idx in range(1, n_sinks + 1):
sinks.append(
sorted(
self.paths['directory'].glob(
self.prefix + f'Sink{idx:04}N[0-9][0-9].ev'
)
)
)
print(sinks)
return sinks
def set_units_on_time_series(self, config: Union[str, Path] = None):
"""Set physical units on time series data.
Parameters
----------
config : optional
The path to a Plonk config.toml file.
"""
units = time_series_units(sim=self, data_source=self.data_source, config=config)
if 'global' in self.time_series:
_apply_units_to_dataframe(self.time_series['global'], units)
if 'sinks' in self.time_series:
for ts in self.time_series['sinks']:
_apply_units_to_dataframe(ts, units)
return self
def unset_units_on_time_series(self, config: Union[str, Path] = None):
"""Un-set physical units on time series data.
Parameters
----------
config : optional
The path to a Plonk config.toml file.
"""
units = time_series_units(sim=self, data_source=self.data_source, config=config)
if 'global' in self.time_series:
_un_apply_units_to_dataframe(self.time_series['global'], units)
if 'sinks' in self.time_series:
for ts in self.time_series['sinks']:
_un_apply_units_to_dataframe(ts, units)
return self
def _get_snap_files(self, glob: str = None) -> List[Path]:
"""Get snapshot files."""
if glob is None:
# Phantom snapshot file name format
glob = (
self.prefix + '_[0-9][0-9][0-9][0-9][0-9].' + self._snap_file_extension
)
return sorted(list(self.paths['directory'].glob(glob)))
def _get_snap_file_extension(self, glob: str = None):
"""Snapshot file extension.
Determine snap file type from extension assuming file names
follow a glob pattern.
"""
if glob is None:
# Phantom HDF5 snapshot file name format
glob = self.prefix + '_[0-9][0-9][0-9][0-9][0-9].h5'
file_types = {f.suffix for f in self.paths['directory'].glob(glob)}
if len(file_types) > 1:
raise ValueError(
'Cannot determine simulation snapshot file type: '
f'is it one of {file_types}?'
)
if len(file_types) == 0:
raise ValueError(
'Cannot determine snapshot file type: '
'no files named like prefix_xxxxx.ext'
)
file_ext = file_types.pop()[1:]
if file_ext not in ('h5',):
raise ValueError('File extension not available; must be ".h5"')
return file_ext
def __len__(self):
"""Length as number of snaps."""
if self._len == -1:
self._len = len(self.snaps)
return self._len
def __repr__(self):
"""Dunder repr method."""
return self.__str__()
def __str__(self):
"""Dunder str method."""
return (
f'<plonk.Simulation: "{self.prefix}", '
f'directory="{self.paths["directory"].name}">'
)
visualize = visualize_sim
def load_sim(
prefix: str,
directory: Union[str, Path] = None,
data_source: str = 'Phantom',
) -> Simulation:
"""Load Simulation.
Parameters
----------
prefix
Simulation prefix, e.g. 'disc', if files are named like
disc_00000.h5, disc01.ev, discSink0001N01.ev, etc.
directory : optional
Directory containing simulation snapshot files and auxiliary
files. Default is None.
data_source : optional
The SPH code used to produce the simulation data. Default
is 'Phantom'.
"""
msg = (
'load_sim is deprecated and will be removed in v0.7.4, '
'please use load_simulation instead'
)
logger.warning(msg)
warnings.warn(msg, DeprecationWarning)
return load_simulation(prefix=prefix, directory=directory, data_source=data_source)
def load_simulation(
prefix: str,
directory: Union[str, Path] = None,
data_source: str = 'Phantom',
) -> Simulation:
"""Load Simulation.
Parameters
----------
prefix
Simulation prefix, e.g. 'disc', if files are named like
disc_00000.h5, disc01.ev, discSink0001N01.ev, etc.
directory : optional
Directory containing simulation snapshot files and auxiliary
files. Default is None.
data_source : optional
The SPH code used to produce the simulation data. Default
is 'Phantom'.
"""
return (
Simulation()
.load_simulation(prefix=prefix, directory=directory, data_source=data_source)
.set_units_on_time_series()
)
def _apply_units_to_dataframe(dataframe, units):
keys = list()
for key, val in units.items():
if key in dataframe:
keys.append(key)
dataframe[key] = (dataframe[key].to_numpy() * units[key]).magnitude
mapper = {key: f'{key} [{units[key].units:~}]' for key in keys}
dataframe.rename(columns=mapper, inplace=True)
return dataframe
def _un_apply_units_to_dataframe(dataframe, units):
keys = list()
for key, val in units.items():
key_unit = f'{key} [{units[key].units:~}]'
if key_unit in dataframe:
keys.append(key)
dataframe[key_unit] = (
dataframe[key_unit].to_numpy() / units[key]
).magnitude
mapper = {f'{key} [{units[key].units:~}]': key for key in keys}
dataframe.rename(columns=mapper, inplace=True)
return dataframe
```
#### File: plonk/utils/snap.py
```python
from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING, List, Union
from .._config import read_config
from .._units import units as plonk_units
if TYPE_CHECKING:
from ..snap.snap import SnapLike
def gravitational_constant_in_code_units(snap: SnapLike) -> float:
"""Gravitational constant in code units.
Parameters
----------
snap
The Snap object.
Returns
-------
float
The gravitational constant in code units.
"""
G = plonk_units.newtonian_constant_of_gravitation
G_units = (
snap.code_units['length'] ** 3
/ snap.code_units['mass']
/ snap.code_units['time'] ** 2
)
G = (G / G_units).to_base_units().magnitude
return G
def add_aliases(snap: SnapLike, filename: Union[str, Path] = None):
"""Add array aliases to a Snap.
Parameters
----------
snap
The Snap object.
config : optional
The path to a Plonk config.toml file. If None, use the default
file.
"""
conf = read_config(filename=filename)
for key, val in conf['arrays']['aliases'].items():
snap.add_alias(key, val)
def dust_array_names(
name: str, num_dust_species: int, add_gas: bool = False
) -> List[str]:
"""List dust array names.
Parameters
----------
name
The base array name, e.g. "dust_density" or "stopping_time".
num_dust_species
The number of dust species.
add_gas
If True add the gas version of the dust name.
Returns
-------
List
A list of array names with appropriate suffixes.
Examples
--------
Get the dust density strings.
>>> dust_name_list('dust_density', 5)
['dust_density_001',
'dust_density_002',
'dust_density_003',
'dust_density_004',
'dust_density_005']
Get the dust density strings with gas.
>>> dust_name_list(name='dust_density', num_dust_species=5, add_gas=True)
['gas_density',
'dust_density_001',
'dust_density_002',
'dust_density_003',
'dust_density_004',
'dust_density_005']
"""
names = list()
if add_gas:
names.append(f'{name.replace("dust", "gas")}')
names += [f'{name}_{n+1:03}' for n in range(num_dust_species)]
return names
def vector_array_names(name: str, add_mag: bool = False) -> List[str]:
"""List vector array names.
Parameters
----------
name
The base array name, e.g. "angular_momentum".
add_mag
If True add the magnitude of the array.
Returns
-------
List
A list of array names with appropriate suffixes.
Examples
--------
Get the angular momentum strings.
>>> vector_name_list('angular_momentum')
['angular_momentum_x',
'angular_momentum_y',
'angular_momentum_z']
Get the angular momentum strings with magnitude.
>>> vector_name_list(name='angular_momentum', add_mag=True)
['angular_momentum_x',
'angular_momentum_y',
'angular_momentum_z',
'angular_momentum_mag']
"""
names = [f'{name}_{x}' for x in ['x', 'y', 'z']]
if add_mag:
names.append(f'{name}_mag')
return names
```
#### File: plonk/tests/test_animation.py
```python
from pathlib import Path
import plonk
from plonk import visualize
DIR_PATH = Path(__file__).parent / 'data/phantom'
PREFIX = 'dustseparate'
def test_animate():
"""Test animate."""
sim = plonk.load_simulation(prefix=PREFIX, directory=DIR_PATH)
snaps = [sim.snaps[0], sim.snaps[0], sim.snaps[0]]
filename = Path('animation.mp4')
plonk.animate(
filename=filename,
snaps=snaps,
quantity='density',
units={'position': 'au', 'density': 'g/cm^3'},
adaptive_colorbar=False,
num_pixels=(32, 32),
)
filename.unlink()
def test_animation_images():
"""Test animation of images."""
sim = plonk.load_simulation(prefix=PREFIX, directory=DIR_PATH)
snaps = [sim.snaps[0], sim.snaps[0], sim.snaps[0]]
filename = Path('animation.mp4')
visualize.animation_images(
filename=filename,
snaps=snaps,
quantity='density',
units={'position': 'au', 'density': 'g/cm^3'},
adaptive_colorbar=False,
num_pixels=(32, 32),
)
filename.unlink()
def test_animation_profiles():
"""Test animation of profiles."""
sim = plonk.load_simulation(prefix=PREFIX, directory=DIR_PATH)
snaps = [sim.snaps[0], sim.snaps[0], sim.snaps[0]]
profiles = [plonk.load_profile(snap) for snap in snaps]
filename = Path('animation.mp4')
visualize.animation_profiles(
filename=filename,
profiles=profiles,
x='radius',
y='surface_density',
units={'position': 'au', 'surface_density': 'g/cm^2'},
)
filename.unlink()
def test_animation_particles():
"""Test animation of particle plots."""
sim = plonk.load_simulation(prefix=PREFIX, directory=DIR_PATH)
snaps = [sim.snaps[0], sim.snaps[0], sim.snaps[0]]
filename = Path('animation.mp4')
visualize.animation_particles(
filename=filename,
snaps=snaps,
x='x',
y='density',
units={'position': 'au', 'density': 'g/cm^3'},
adaptive_limits=False,
)
filename.unlink()
```
#### File: plonk/tests/test_interpolation.py
```python
import numpy as np
from plonk.visualize.interpolation import scalar_interpolation, vector_interpolation
from .data.interpolation_arrays import (
scalar_projection,
scalar_slice,
vector_projection,
vector_slice,
)
N = 10
XX = np.ones(N)
YY = np.ones(N)
ZZ = np.ones(N)
HH = np.ones(N)
WW = np.ones(N)
MM = np.ones(N)
S_DATA = np.ones(N)
X_DATA = np.ones(N)
Y_DATA = np.ones(N)
EXTENT = (0, 1, 0, 1)
PIX = (10, 10)
ZSLICE = 0.5
HFACT = 1.0
def test_scalar_interpolation_projection():
"""Test projection interpolation."""
im = scalar_interpolation(
quantity=S_DATA,
x_coordinate=XX,
y_coordinate=YY,
extent=EXTENT,
smoothing_length=HH,
particle_mass=MM,
hfact=HFACT,
num_pixels=PIX,
)
np.testing.assert_allclose(im, scalar_projection, rtol=1e-5)
def test_scalar_interpolation_slice():
"""Test cross section interpolation."""
im = scalar_interpolation(
quantity=S_DATA,
x_coordinate=XX,
y_coordinate=YY,
dist_from_slice=ZZ * ZSLICE,
extent=EXTENT,
smoothing_length=HH,
particle_mass=MM,
hfact=HFACT,
num_pixels=PIX,
)
np.testing.assert_allclose(im, scalar_slice, rtol=1e-5)
def test_vector_interpolation_projection():
"""Test projection interpolation."""
vec = vector_interpolation(
quantity_x=X_DATA,
quantity_y=Y_DATA,
x_coordinate=XX,
y_coordinate=YY,
extent=EXTENT,
smoothing_length=HH,
particle_mass=MM,
hfact=HFACT,
num_pixels=PIX,
)
np.testing.assert_allclose(vec, vector_projection, rtol=1e-5)
def test_vector_interpolation_slice():
"""Test cross section interpolation."""
vec = vector_interpolation(
quantity_x=X_DATA,
quantity_y=Y_DATA,
x_coordinate=XX,
y_coordinate=YY,
dist_from_slice=ZZ * ZSLICE,
extent=EXTENT,
smoothing_length=HH,
particle_mass=MM,
hfact=HFACT,
num_pixels=PIX,
)
np.testing.assert_allclose(vec, vector_slice, rtol=1e-5)
```
#### File: plonk/tests/test_snap.py
```python
from pathlib import Path
import numpy as np
import pytest
from scipy.spatial.transform import Rotation
import plonk
from .data.phantom import adiabatic, dustmixture, dustseparate, mhd
SNAPTYPES = [adiabatic, dustmixture, dustseparate, mhd]
DIR = Path(__file__).parent / 'data/phantom'
RTOL = 1e-6
@pytest.mark.parametrize('snaptype', SNAPTYPES)
def test_load_phantom_snap(snaptype):
"""Testing reading Phantom HDF5 snapshots."""
# Read from Path
filename = DIR / snaptype.filename
snap = plonk.load_snap(filename)
snap.close_file()
# Read from str
snap = plonk.load_snap(str(filename))
snap.close_file()
# Not exists
with pytest.raises(FileNotFoundError):
plonk.load_snap('does_not_exist.h5')
@pytest.mark.parametrize('snaptype', SNAPTYPES)
def test_get_item(snaptype):
"""Testing getting items from Snap."""
filename = DIR / snaptype.filename
snap = plonk.load_snap(filename)
position = snap['position']
assert position.shape == snaptype.position_shape
subsnap = snap['gas']
assert type(subsnap) == plonk.snap.snap.SubSnap
subsnap = snap[:10]
assert type(subsnap) == plonk.snap.snap.SubSnap
assert len(subsnap) == 10
subsnap = snap[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]]
assert type(subsnap) == plonk.snap.snap.SubSnap
assert len(subsnap) == 10
subsnap = snap[(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)]
assert type(subsnap) == plonk.snap.snap.SubSnap
assert len(subsnap) == 10
snap.close_file()
@pytest.mark.parametrize('snaptype', SNAPTYPES)
def test_read_particle_arrays_from_phantom(snaptype):
"""Testing reading Phantom HDF5 snapshot particle arrays."""
filename = DIR / snaptype.filename
snap = plonk.load_snap(filename)
_check_arrays(
snap,
snaptype.array_name_map,
snaptype.mean_array_values,
snaptype.std_array_values,
)
snap.close_file()
@pytest.mark.parametrize('snaptype', SNAPTYPES)
def test_read_properties_from_phantom(snaptype):
"""Testing reading Phantom HDF5 snapshot properties."""
filename = DIR / snaptype.filename
snap = plonk.load_snap(filename)
for key, value in snaptype.properties.items():
if isinstance(snap.properties[key], plonk.units.Quantity):
snap_value = snap.properties[key].magnitude
numpy_array = True
elif isinstance(snap.properties[key], str):
snap_value = snap.properties[key]
numpy_array = False
else:
snap_value = snap.properties[key]
numpy_array = True
if numpy_array:
np.testing.assert_allclose(snap_value, value, rtol=RTOL)
else:
assert snap_value == value
snap.close_file()
@pytest.mark.parametrize('snaptype', SNAPTYPES)
def test_available_loaded_arrays(snaptype):
"""Testing seeing available/loaded arrays on Snap."""
filename = DIR / snaptype.filename
snap = plonk.load_snap(filename)
assert snap.available_arrays() == snaptype.available_arrays
for arr in [
'position_x',
'position_y',
'position_z',
'position_mag',
'h',
'angular_momentum',
]:
snap[arr]
assert snap.loaded_arrays() == snaptype.loaded_arrays
snap.close_file()
@pytest.mark.parametrize('snaptype', SNAPTYPES)
def test_array_code_unit(snaptype):
"""Testing getting array code unit."""
filename = DIR / snaptype.filename
snap = plonk.load_snap(filename)
position_unit = snaptype.length_unit * plonk.units('meter')
assert snap.array_code_unit('position') == position_unit
for arr in ['position', 'position_x', 'x']:
snap.array_code_unit(arr)
with pytest.raises(ValueError):
snap.array_code_unit('does_not_exist')
snap.close_file()
@pytest.mark.parametrize('snaptype', SNAPTYPES)
def test_rotate_snap(snaptype):
"""Testing rotating Snap."""
filename = DIR / snaptype.filename
snap = plonk.load_snap(filename)
snap['position']
snap['radius_cylindrical']
if snap.num_sinks > 0:
snap.sinks['position']
snap.rotate(axis=(1, 2, 3), angle=np.pi)
snap.rotate(axis=(1, 2, 3), angle=-np.pi)
_check_arrays(
snap,
snaptype.array_name_map,
snaptype.mean_array_values,
snaptype.std_array_values,
)
snap.rotate(axis=(1, 2, 3), angle=np.pi)
snap.reset()
_check_arrays(
snap,
snaptype.array_name_map,
snaptype.mean_array_values,
snaptype.std_array_values,
)
rot = np.array([1, 2, 3])
rot = rot / np.linalg.norm(rot)
rot *= 2 * np.pi
rotation = Rotation.from_rotvec(rot)
snap.rotate(rotation=rotation)
_check_arrays(
snap,
snaptype.array_name_map,
snaptype.mean_array_values,
snaptype.std_array_values,
)
snap.close_file()
@pytest.mark.parametrize('snaptype', SNAPTYPES)
def test_translate_snap(snaptype):
"""Testing translating Snap."""
filename = DIR / snaptype.filename
snap = plonk.load_snap(filename)
unit = f"{snap.code_units['length'].m} {snap.code_units['length'].u}"
snap['position']
if snap.num_sinks > 0:
snap.sinks['position']
snap.translate(translation=(100, 200, 300), unit=unit)
snap.translate(translation=(-100, -200, -300), unit=unit)
_check_arrays(
snap,
snaptype.array_name_map,
snaptype.mean_array_values,
snaptype.std_array_values,
)
snap.translate(translation=(100, 200, 300), unit=unit)
snap.reset()
_check_arrays(
snap,
snaptype.array_name_map,
snaptype.mean_array_values,
snaptype.std_array_values,
)
with pytest.raises(ValueError):
snap.translate(translation=(100, 200, 300))
with pytest.raises(ValueError):
snap.translate(translation=(100, 200))
snap.translate(translation=(100, 200, 300) * plonk.units(unit), unit=unit)
snap.translate(translation=(-100, -200, -300) * plonk.units(unit), unit=unit)
_check_arrays(
snap,
snaptype.array_name_map,
snaptype.mean_array_values,
snaptype.std_array_values,
)
snap.close_file()
@pytest.mark.parametrize('snaptype', SNAPTYPES)
def test_write_to_dataframe(snaptype):
"""Testing writing Snap to DataFrame."""
filename = DIR / snaptype.filename
snap = plonk.load_snap(filename)
columns = ['position', 'density', 'smoothing_length']
snap.to_dataframe(columns=columns)
columns = ['position', 'density', 'smoothing_length']
snap.to_dataframe(columns=columns, units=['au', 'g/cm^3', 'au'])
snap.close_file()
@pytest.mark.parametrize('snaptype', SNAPTYPES)
def test_subsnap(snaptype):
"""Testing getting SubSnap."""
filename = DIR / snaptype.filename
snap = plonk.load_snap(filename)
gas = snap['gas']
assert len(gas) == snaptype.len_gas
subsnap = snap[0:100]
assert len(subsnap) == 100
subsnap = snap[[0, 1, 2]]
assert len(subsnap) == 3
subsnap = snap[(0, 1, 2)]
assert len(subsnap) == 3
subsnap = snap[0]
assert len(subsnap) == 1
snap.close_file()
@pytest.mark.parametrize('snaptype', SNAPTYPES)
def test_sinks(snaptype):
"""Testing getting sink particles."""
filename = DIR / snaptype.filename
snap = plonk.load_snap(filename)
if snap.num_sinks > 0:
sinks = snap.sinks
assert snap.num_sinks == snaptype.num_sinks
assert len(sinks) == snaptype.num_sinks
snap.close_file()
@pytest.mark.parametrize('snaptype', SNAPTYPES)
def test_set_array(snaptype):
"""Testing setting array on particles."""
filename = DIR / snaptype.filename
snap = plonk.load_snap(filename)
particle_array = np.arange(len(snap)) * plonk.units('dimensionless')
snap['array'] = particle_array
np.testing.assert_allclose(snap['array'].m, particle_array.m, rtol=RTOL)
sink_array = np.arange(len(snap.sinks)) * plonk.units('dimensionless')
snap.sinks['array'] = sink_array
np.testing.assert_allclose(snap.sinks['array'].m, sink_array.m, rtol=RTOL)
snap.close_file()
@pytest.mark.parametrize('snaptype', SNAPTYPES)
def test_bulk_load(snaptype):
"""Testing bulk loading arrays."""
filename = DIR / snaptype.filename
snap = plonk.load_snap(filename)
snap.bulk_load()
snap.close_file()
@pytest.mark.parametrize('snaptype', SNAPTYPES)
def test_read_write_extra(snaptype):
"""Testing read write extra arrays."""
filename = DIR / snaptype.filename
snap = plonk.load_snap(filename)
_filename = Path('tmp.h5')
arr = np.arange(len(snap)) * plonk.units('dimensionless')
snap['my_array'] = arr
snap.write_extra_arrays(arrays=['my_array'], filename=_filename)
snap = None
snap = plonk.load_snap(filename)
snap.read_extra_arrays(filename=_filename)
np.testing.assert_allclose(snap['my_array'], arr, rtol=RTOL)
_filename.unlink()
snap.close_file()
@pytest.mark.parametrize('snaptype', SNAPTYPES)
def test_plot_as_methods(snaptype):
"""Testing plot methods."""
filename = DIR / snaptype.filename
snap = plonk.load_snap(filename)
snap.image('density', num_pixels=(16, 16))
snap.plot()
if snap.num_sinks > 0:
sinks = snap.sinks
sinks.plot()
subsnap = snap['gas']
subsnap.image('density', num_pixels=(16, 16))
subsnap.plot()
snap.close_file()
@pytest.mark.parametrize('snaptype', SNAPTYPES)
def test_context(snaptype):
"""Testing cache context manager."""
filename = DIR / snaptype.filename
snap = plonk.load_snap(filename)
snap.cache_arrays = True
with snap.context(cache=False):
assert snap.loaded_arrays() == []
with snap.context(cache=True):
snap.image('density', num_pixels=(16, 16))
assert snap.loaded_arrays() == [
'density',
'mass',
'position',
'smoothing_length',
]
assert snap.loaded_arrays() == []
assert snap.loaded_arrays() == []
snap.close_file()
def _check_arrays(snap, array_name_map, mean_array_values, std_array_values):
for array in mean_array_values.keys():
np.testing.assert_allclose(
snap.array_in_code_units(array_name_map[array]).mean(),
mean_array_values[array],
rtol=RTOL,
)
for array in std_array_values.keys():
np.testing.assert_allclose(
snap.array_in_code_units(array_name_map[array]).std(),
std_array_values[array],
rtol=RTOL,
)
```
#### File: plonk/tests/test_visualization.py
```python
from pathlib import Path
import pytest
import plonk
from plonk.utils import visualize
from .data.phantom import adiabatic, dustmixture, dustseparate, mhd
SNAPTYPES = [adiabatic, dustmixture, dustseparate, mhd]
DIR = Path(__file__).parent / 'data/phantom'
AU = plonk.units('au')
@pytest.mark.parametrize('snaptype', SNAPTYPES)
def test_plot(snaptype):
"""Test particle plot."""
filename = DIR / snaptype.filename
snap = plonk.load_snap(filename)
plonk.plot(snap=snap)
snap.close_file()
@pytest.mark.parametrize('snaptype', SNAPTYPES)
def test_plot_with_kwargs(snaptype):
"""Test particle plot with kwargs."""
filename = DIR / snaptype.filename
snap = plonk.load_snap(filename)
plonk.plot(
snap=snap,
x='x',
y='y',
c='density',
units={'position': 'au', 'density': 'g/cm^3', 'projection': 'cm'},
)
snap.close_file()
@pytest.mark.parametrize('snaptype', SNAPTYPES)
def test_image_projection(snaptype):
"""Test image projection."""
filename = DIR / snaptype.filename
snap = plonk.load_snap(filename)
plonk.image(snap=snap, quantity='density', num_pixels=(32, 32))
snap.close_file()
@pytest.mark.parametrize('snaptype', SNAPTYPES)
def test_image_projection_with_kwargs(snaptype):
"""Test image projection with kwargs."""
filename = DIR / snaptype.filename
snap = plonk.load_snap(filename)
plonk.image(
snap=snap,
quantity='density',
x='x',
y='y',
units={'position': 'au', 'density': 'g/cm^3', 'projection': 'cm'},
extent=(-150, 150, -150, 150) * AU,
norm='linear',
cmap='gist_heat',
num_pixels=(32, 32),
)
snap.close_file()
@pytest.mark.parametrize('snaptype', SNAPTYPES)
def test_image_on_snap(snaptype):
"""Test image projection as method on Snap."""
filename = DIR / snaptype.filename
snap = plonk.load_snap(filename)
snap.image(quantity='density', num_pixels=(32, 32))
snap.close_file()
@pytest.mark.parametrize('snaptype', SNAPTYPES)
def test_image_slice(snaptype):
"""Test image slice."""
filename = DIR / snaptype.filename
snap = plonk.load_snap(filename)
plonk.image(snap=snap, quantity='density', interp='slice', num_pixels=(32, 32))
snap.close_file()
@pytest.mark.parametrize('snaptype', SNAPTYPES)
def test_image_slice_with_kwargs(snaptype):
"""Test image slice with kwargs."""
filename = DIR / snaptype.filename
snap = plonk.load_snap(filename)
plonk.image(
snap=snap,
quantity='density',
interp='slice',
x='x',
y='y',
units={'position': 'au', 'density': 'g/cm^3', 'projection': 'cm'},
extent=(-150, 150, -150, 150) * AU,
norm='linear',
cmap='gist_heat',
num_pixels=(32, 32),
)
snap.close_file()
@pytest.mark.parametrize('snaptype', SNAPTYPES)
def test_plot_smoothing_length(snaptype):
"""Test plot smoothing length as circle."""
filename = DIR / snaptype.filename
snap = plonk.load_snap(filename)
visualize.plot_smoothing_length(snap=snap, indices=[0, 1])
snap.close_file()
@pytest.mark.parametrize('snaptype', SNAPTYPES)
def test_get_extent(snaptype):
"""Test getting extent from percentile."""
filename = DIR / snaptype.filename
snap = plonk.load_snap(filename)
visualize.get_extent_from_percentile(snap=snap, x='x', y='y')
snap.close_file()
``` |
{
"source": "joshcalcino/pymcfost",
"score": 3
} |
#### File: pymcfost/pymcfost/wake.py
```python
import numpy as np
import matplotlib.pyplot as plt
from .utils import rotate_coords,rotate_to_obs_plane
def get_wake_cartesian(rp,phip,npts,rmin,rmax,HonR,q):
'''
planet wake formula from Rafikov (2002)
'''
rplot = np.linspace(rmin,rmax,npts)
rr = rplot/rp
phi = phip + np.sign(rmax-rmin)*(1./HonR)*((rr**(q-0.5))/(q-0.5) \
- (rr**(q+1.))/(q+1.) \
- 3./((2.*q-1.)*(q+1.)))
xx = rplot*np.cos(phi)
yy = rplot*np.sin(phi)
return xx,yy
def plot_wake(xy_obs,inc,PA,HonR,q,color="black"):
'''
plot planet wake
and rotate to the observational viewpoint
'''
inc = np.deg2rad(inc)
PA = np.deg2rad(PA)
# we give planet location in the observational plane
# bad attempt to provide the location in the rotated plane
# by a simple linear scaling (does not get the angle correct)
x_scale, y_scale, dum = rotate_coords(xy_obs[0],xy_obs[1],0.,inc,PA)
x_p = xy_obs[0]*(xy_obs[0]/x_scale)
y_p = xy_obs[1]*(xy_obs[1]/y_scale)
# planet location in the unrotated plane
rp = np.sqrt(x_p**2 + y_p**2)
phip = np.arctan2(y_p,x_p)
print("rp = ",rp," phi planet = ",phip*180./np.pi)
# radial range over which to plot the wake
rmin = 1.e-3
rmax = 3.*rp
npts = 1000
# outer wake
xx,yy = get_wake_cartesian(rp,phip,npts,rp,rmax,HonR,q)
xp,yp = rotate_to_obs_plane(xx,yy,inc,PA)
plt.plot(xx,yy,color=color)
# inner wake
xx,yy = get_wake_cartesian(rp,phip,npts,rp,rmin,HonR,q)
xp,yp = rotate_to_obs_plane(xx,yy,inc,PA)
plt.plot(xx,yy,color=color)
``` |
{
"source": "joshcamas/screen-lapse",
"score": 4
} |
#### File: screen-lapse/fluid/fluid_progressive_light.py
```python
import tkinter as tk
from tkinter import ttk as ttk
import fluid.fluid_light as ui
class Progress(ui.Frame):
"""
Wrapper for the progressive system. Anything you want to progress goes in here.
"""
def __init__(self,parent):
ui.Frame.__init__(self, parent)
#This will not change. This tells the system the top level parent
self.toplevel = BaseLevel(parent)
#add this top level to the parent
self.toplevel.grid(row=0,column=0)
self.padding = {"x":5,"y":5}
self.sticky = ""
#This is the progressive level. This will change, depending if a level
#has been started or stopped
self.currlevel = self.toplevel
self.currlevel.start(self)
def _startcurrentlevel(self,newlevel):
self.currlevel.add(self,newlevel)
self.currlevel = newlevel
self.currlevel.start(self)
def _stopcurrentlevel(self):
if(self.currlevel != self.toplevel):
self.currlevel.stop(self)
self.currlevel = self.currlevel.parentLevel
def _add(self,widget):
self.currlevel.add(self,widget)
return(widget)
def _gridwidget(self,widget,x,y):
widget.grid(row=y,column=x,padx=self.padding["x"],pady=self.padding["y"],sticky=self.sticky)
def setpadding(self,x=0,y=0):
self.padding["x"] = x
self.padding["y"] = y
def setsticky(self,sticky):
self.sticky = sticky
#============[ Widgets ]============#
#Adds a ui.Button
def addbutton(self,label):
button = ui.Button(self.currlevel,label)
self._add(button)
return(button)
#Adds a ui.Label
def addlabel(self,label,bold=False):
label = ui.Label(self.currlevel,label,bold)
self._add(label)
return(label)
#Adds a ui.OutputBox
def addoutput(self,label,value,vertical=False):
horioutput = ui.OutputBox(self.currlevel,label,value,vertical)
self._add(horioutput)
return(horioutput)
#Adds a ui.InputBox
def addinputbox(self,label,default="",width=10):
horioutput = ui.InputBox(self.currlevel,label,default,width)
self._add(horioutput)
return(horioutput)
#Adds a ui.OutputBox
def addcheckbox(self,label,value):
checkbox = ui.CheckBox(self.currlevel,label,value)
self._add(checkbox)
return(checkbox)
#Adds a ui.OutputBox
def addscale(self,label,start,end):
scale = ui.Scale(self.currlevel,label,start,end)
self._add(scale)
return(scale)
def adddropdown(self,label,options=None):
dd = ui.DropDown(self.currlevel,label,options)
self._add(dd)
return(dd)
#Adds a custom ui widget element.
#Use this for ui Graphs and other things
def addcustom(self,customclass,*args, **kwargs):
if(isinstance(customclass,ui.Frame)):
custom = customclass(self.currlevel,*args, **kwargs)
else:
custom = customclass(self.currlevel.frame,*args, **kwargs)
self._add(custom)
return(custom)
#============[ Levels ]============#
#Start Horizontal List
def starthorizontal(self):
newlevel = HorizontalList(self.currlevel)
self._startcurrentlevel(newlevel)
return(newlevel)
#Stop Horizontal List
def stophorizontal(self):
self._stopcurrentlevel()
#Start Vertical List
def startvertical(self):
newlevel = VerticalList(self.currlevel)
self._startcurrentlevel(newlevel)
return(newlevel)
#Stop Vertical List
def stopvertical(self):
self._stopcurrentlevel()
#Start a Tab List (And create the first tab, optionally)
def starttabs(self,tabname):
newlevel = TabList(self.currlevel)
self._startcurrentlevel(newlevel)
if(tabname != None):
newlevel.newtab(tabname,self)
#Stop a Tab List
def stoptabs(self):
self._stopcurrentlevel()
#Start a new Tab in a Tab List
def newtab(self,tabname):
self.currlevel.newtab(tabname,self)
class ProgressLevel(ui.Frame):
"""
Base wrapper for levels. They can be "started" and "stopped",
and anything added to the progressive application afterwards
will be added to this level. (Including other levels)
"""
def __init__(self,parent):
ui.Frame.__init__(self,parent)
self.parentLevel = parent
def setParentLevel(self,level):
self.parentLevel = level
def start(self,progress):
pass
def add(self,progress,widget):
pass
def stop(self,progress):
pass
class BaseLevel(ProgressLevel):
"""
The level each `Progress` system starts at. By default, everything will
be added in a horizontal manner
"""
def start(self,progress):
self.currentx = 0
def add(self,progress,widget):
progress._gridwidget(widget,self.currentx,0)
self.currentx+=1
class HorizontalList(ProgressLevel):
def start(self,progress):
self.currentx = 0
def add(self,progress,widget):
progress._gridwidget(widget,self.currentx,0)
self.currentx+=1
class VerticalList(ProgressLevel):
def start(self,progress):
self.currenty = 0
def add(self,progress,widget):
progress._gridwidget(widget,0,self.currenty)
self.currenty+=1
class TabList(ProgressLevel):
"""
The level each `Progress` system starts at. By default, everything will
be added in a horizontal manner
"""
def start(self,progress,text=None):
self.tabs = ttk.Notebook(progress.currlevel.frame)
self.tabs.grid(row=0,column=0,padx=5,pady=5,sticky=tk.NW)
def newtab(self,text,progress):
tab = Tab(self.parent)
self.tabs.add(tab.frame, text=text)
progress.currlevel = tab
tab.start(progress)
tab.setParentLevel(self.parentLevel)
tab.tabOwner = self
self.currentframe = tab
def add(self,progress,widget):
progress._gridwidget(widget,0,0)
#progress._gridwidget(widget,self.currentx,0)
#self.currentx+=1
class Tab(VerticalList):
def newtab(self,text,progress):
self.tabOwner.newtab(text,progress)
``` |
{
"source": "joshcarp/CodeCamp",
"score": 3
} |
#### File: CodeCamp/unit-test/test_verifier.py
```python
def func(x):
return x + 1
def test_answer():
assert func(3) == 4
def test_answer2():
assert func(4) > 6
``` |
{
"source": "Joshcarp/go-slides",
"score": 3
} |
#### File: go-slides/pingserver/compare.py
```python
import csv
import os
import pandas
import matplotlib.pyplot as plt
responsetime_filename = "responsetime.csv"
address = "localhost"
request_range = [1000, 20000, 1000]
def testport(address, port, numRequests):
temp_filename = "temp.csv"
timeout = 2000
command = f"ab -n {numRequests} -c {numRequests} -s {timeout} -q -r -e {temp_filename} {address}:{port}/ping"
print(command)
os.system(command)
file = open(temp_filename)
os.remove(temp_filename)
return file
def read_file(file):
reader = pandas.read_csv(file)
time_list = [reader.loc[i]["Time in ms"] for i in range(0, 101, 1)]
return time_list
def compare(responsetime_filename, address, request_range):
file = open(responsetime_filename, "w+")
c = csv.writer(file)
header = ["numRequests"] + ["go_"+ str(i) for i in range(0, 101, 1)] + ["java_"+ str(i) for i in range(0, 101, 1)]
c.writerow (header)
for numRequests in range(request_range[0], request_range[1], request_range[2]):
f = testport(address, 9090, numRequests)
go_list = read_file(f)
f = testport(address, 8080, numRequests)
java_list = read_file(f)
c.writerow([numRequests] + go_list + java_list)
file.close()
def generate_graph(responsetime_filename, request_range):
data = pandas.read_csv(responsetime_filename)
plt.figure(figsize=(10,6))
plt.plot(data.numRequests.tolist(), data.java_50.div(1000).tolist(), "o", label = "Java Median", color='#ff9721')
plt.plot(data.numRequests.tolist(), data.java_95.div(1000).tolist(), "^", label = "Java 95th percentile", color='#ff9721')
plt.plot(data.numRequests.tolist(), data.go_50.div(1000).tolist(), "o", label = "Go Median", color = '#4f92ff')
plt.plot(data.numRequests.tolist(), data.go_95.div(1000).tolist(), "^", label = "Go 95th percentile", color = '#4f92ff')
plt.title('Java vs Go server response time')
plt.xlabel('Number of concurrent requests')
plt.ylabel('Time taken (s)')
plt.legend()
plt.xticks(ticks = [x for x in range(request_range[0], request_range[1], request_range[2]*2)])
plt.savefig(os.path.abspath('..')+"/content/_img/JavaVsGoLoadTest.png", dpi=300, bbox_inches='tight', figsize=(50,25))
if not os.path.isfile(responsetime_filename):
compare(responsetime_filename, address, request_range)
generate_graph(responsetime_filename, request_range)
``` |
{
"source": "joshcarty/dgl",
"score": 2
} |
#### File: benchmarks/model_speed/bench_gat.py
```python
import time
import dgl
from dgl.nn.pytorch import GATConv
import torch
import torch.nn as nn
import torch.nn.functional as F
from .. import utils
class GAT(nn.Module):
def __init__(self,
num_layers,
in_dim,
num_hidden,
num_classes,
heads,
activation,
feat_drop,
attn_drop,
negative_slope,
residual):
super(GAT, self).__init__()
self.num_layers = num_layers
self.gat_layers = nn.ModuleList()
self.activation = activation
# input projection (no residual)
self.gat_layers.append(GATConv(
in_dim, num_hidden, heads[0],
feat_drop, attn_drop, negative_slope, False, self.activation))
# hidden layers
for l in range(1, num_layers):
# due to multi-head, the in_dim = num_hidden * num_heads
self.gat_layers.append(GATConv(
num_hidden * heads[l-1], num_hidden, heads[l],
feat_drop, attn_drop, negative_slope, residual, self.activation))
# output projection
self.gat_layers.append(GATConv(
num_hidden * heads[-2], num_classes, heads[-1],
feat_drop, attn_drop, negative_slope, residual, None))
def forward(self, g, inputs):
h = inputs
for l in range(self.num_layers):
h = self.gat_layers[l](g, h).flatten(1)
# output projection
logits = self.gat_layers[-1](g, h).mean(1)
return logits
@utils.benchmark('time')
@utils.parametrize('data', ['cora', 'pubmed'])
def track_time(data):
data = utils.process_data(data)
device = utils.get_bench_device()
num_epochs = 200
g = data[0].to(device)
features = g.ndata['feat']
labels = g.ndata['label']
train_mask = g.ndata['train_mask']
val_mask = g.ndata['val_mask']
test_mask = g.ndata['test_mask']
in_feats = features.shape[1]
n_classes = data.num_labels
g = dgl.remove_self_loop(g)
g = dgl.add_self_loop(g)
# create model
model = GAT(1, in_feats, 8, n_classes, [8, 1], F.elu,
0.6, 0.6, 0.2, False)
loss_fcn = torch.nn.CrossEntropyLoss()
model = model.to(device)
model.train()
# optimizer
optimizer = torch.optim.Adam(model.parameters(),
lr=1e-2,
weight_decay=5e-4)
# dry run
for epoch in range(10):
logits = model(g, features)
loss = loss_fcn(logits[train_mask], labels[train_mask])
optimizer.zero_grad()
loss.backward()
optimizer.step()
# timing
t0 = time.time()
for epoch in range(num_epochs):
logits = model(g, features)
loss = loss_fcn(logits[train_mask], labels[train_mask])
optimizer.zero_grad()
loss.backward()
optimizer.step()
t1 = time.time()
return (t1 - t0) / num_epochs
```
#### File: pytorch/graphsage/utils.py
```python
import torch.multiprocessing as mp
from _thread import start_new_thread
from functools import wraps
import traceback
def thread_wrapped_func(func):
"""
Wraps a process entry point to make it work with OpenMP.
"""
@wraps(func)
def decorated_function(*args, **kwargs):
queue = mp.Queue()
def _queue_result():
exception, trace, res = None, None, None
try:
res = func(*args, **kwargs)
except Exception as e:
exception = e
trace = traceback.format_exc()
queue.put((res, exception, trace))
start_new_thread(_queue_result, ())
result, exception, trace = queue.get()
if exception is None:
return result
else:
assert isinstance(exception, Exception)
raise exception.__class__(trace)
return decorated_function
```
#### File: tests/compute/test_traversal.py
```python
import random
import sys
import time
import unittest
import dgl
import networkx as nx
import numpy as np
import scipy.sparse as sp
import backend as F
import itertools
from utils import parametrize_dtype
np.random.seed(42)
def toset(x):
# F.zerocopy_to_numpy may return a int
return set(F.zerocopy_to_numpy(x).tolist())
@parametrize_dtype
def test_bfs(idtype, n=100):
def _bfs_nx(g_nx, src):
edges = nx.bfs_edges(g_nx, src)
layers_nx = [set([src])]
edges_nx = []
frontier = set()
edge_frontier = set()
for u, v in edges:
if u in layers_nx[-1]:
frontier.add(v)
edge_frontier.add(g.edge_ids(int(u), int(v)))
else:
layers_nx.append(frontier)
edges_nx.append(edge_frontier)
frontier = set([v])
edge_frontier = set([g.edge_ids(u, v)])
# avoids empty successors
if len(frontier) > 0 and len(edge_frontier) > 0:
layers_nx.append(frontier)
edges_nx.append(edge_frontier)
return layers_nx, edges_nx
a = sp.random(n, n, 3 / n, data_rvs=lambda n: np.ones(n))
g = dgl.from_scipy(a).astype(idtype)
g_nx = g.to_networkx()
src = random.choice(range(n))
layers_nx, _ = _bfs_nx(g_nx, src)
layers_dgl = dgl.bfs_nodes_generator(g, src)
assert len(layers_dgl) == len(layers_nx)
assert all(toset(x) == y for x, y in zip(layers_dgl, layers_nx))
g_nx = nx.random_tree(n, seed=42)
g = dgl.from_networkx(g_nx).astype(idtype)
src = 0
_, edges_nx = _bfs_nx(g_nx, src)
edges_dgl = dgl.bfs_edges_generator(g, src)
assert len(edges_dgl) == len(edges_nx)
assert all(toset(x) == y for x, y in zip(edges_dgl, edges_nx))
@parametrize_dtype
def test_topological_nodes(idtype, n=100):
a = sp.random(n, n, 3 / n, data_rvs=lambda n: np.ones(n))
b = sp.tril(a, -1).tocoo()
g = dgl.from_scipy(b).astype(idtype)
layers_dgl = dgl.topological_nodes_generator(g)
adjmat = g.adjacency_matrix(transpose=False)
def tensor_topo_traverse():
n = g.number_of_nodes()
mask = F.copy_to(F.ones((n, 1)), F.cpu())
degree = F.spmm(adjmat, mask)
while F.reduce_sum(mask) != 0.:
v = F.astype((degree == 0.), F.float32)
v = v * mask
mask = mask - v
frontier = F.copy_to(F.nonzero_1d(F.squeeze(v, 1)), F.cpu())
yield frontier
degree -= F.spmm(adjmat, v)
layers_spmv = list(tensor_topo_traverse())
assert len(layers_dgl) == len(layers_spmv)
assert all(toset(x) == toset(y) for x, y in zip(layers_dgl, layers_spmv))
DFS_LABEL_NAMES = ['forward', 'reverse', 'nontree']
@parametrize_dtype
def test_dfs_labeled_edges(idtype, example=False):
dgl_g = dgl.DGLGraph().astype(idtype)
dgl_g.add_nodes(6)
dgl_g.add_edges([0, 1, 0, 3, 3], [1, 2, 2, 4, 5])
dgl_edges, dgl_labels = dgl.dfs_labeled_edges_generator(
dgl_g, [0, 3], has_reverse_edge=True, has_nontree_edge=True)
dgl_edges = [toset(t) for t in dgl_edges]
dgl_labels = [toset(t) for t in dgl_labels]
g1_solutions = [
# edges labels
[[0, 1, 1, 0, 2], [0, 0, 1, 1, 2]],
[[2, 2, 0, 1, 0], [0, 1, 0, 2, 1]],
]
g2_solutions = [
# edges labels
[[3, 3, 4, 4], [0, 1, 0, 1]],
[[4, 4, 3, 3], [0, 1, 0, 1]],
]
def combine_frontiers(sol):
es, ls = zip(*sol)
es = [set(i for i in t if i is not None)
for t in itertools.zip_longest(*es)]
ls = [set(i for i in t if i is not None)
for t in itertools.zip_longest(*ls)]
return es, ls
for sol_set in itertools.product(g1_solutions, g2_solutions):
es, ls = combine_frontiers(sol_set)
if es == dgl_edges and ls == dgl_labels:
break
else:
assert False
if __name__ == '__main__':
test_bfs(idtype='int32')
test_topological_nodes(idtype='int32')
test_dfs_labeled_edges(idtype='int32')
```
#### File: tests/compute/utils.py
```python
import pytest
import backend as F
if F._default_context_str == 'cpu':
parametrize_dtype = pytest.mark.parametrize("idtype", [F.int32, F.int64])
else:
# only test int32 on GPU because many graph operators are not supported for int64.
parametrize_dtype = pytest.mark.parametrize("idtype", [F.int32, F.int64])
def check_fail(fn, *args, **kwargs):
try:
fn(*args, **kwargs)
return False
except:
return True
```
#### File: tests/distributed/test_partition.py
```python
import dgl
import sys
import os
import numpy as np
from scipy import sparse as spsp
from numpy.testing import assert_array_equal
from dgl.heterograph_index import create_unitgraph_from_coo
from dgl.distributed import partition_graph, load_partition
from dgl import function as fn
import backend as F
import unittest
import pickle
import random
def create_random_graph(n):
arr = (spsp.random(n, n, density=0.001, format='coo', random_state=100) != 0).astype(np.int64)
return dgl.from_scipy(arr)
def check_partition(g, part_method, reshuffle):
g.ndata['labels'] = F.arange(0, g.number_of_nodes())
g.ndata['feats'] = F.tensor(np.random.randn(g.number_of_nodes(), 10), F.float32)
g.edata['feats'] = F.tensor(np.random.randn(g.number_of_edges(), 10), F.float32)
g.update_all(fn.copy_src('feats', 'msg'), fn.sum('msg', 'h'))
g.update_all(fn.copy_edge('feats', 'msg'), fn.sum('msg', 'eh'))
num_parts = 4
num_hops = 2
partition_graph(g, 'test', num_parts, '/tmp/partition', num_hops=num_hops,
part_method=part_method, reshuffle=reshuffle)
part_sizes = []
for i in range(num_parts):
part_g, node_feats, edge_feats, gpb, _ = load_partition('/tmp/partition/test.json', i)
# Check the metadata
assert gpb._num_nodes() == g.number_of_nodes()
assert gpb._num_edges() == g.number_of_edges()
assert gpb.num_partitions() == num_parts
gpb_meta = gpb.metadata()
assert len(gpb_meta) == num_parts
assert len(gpb.partid2nids(i)) == gpb_meta[i]['num_nodes']
assert len(gpb.partid2eids(i)) == gpb_meta[i]['num_edges']
part_sizes.append((gpb_meta[i]['num_nodes'], gpb_meta[i]['num_edges']))
local_nid = gpb.nid2localnid(F.boolean_mask(part_g.ndata[dgl.NID], part_g.ndata['inner_node']), i)
assert F.dtype(local_nid) in (F.int64, F.int32)
assert np.all(F.asnumpy(local_nid) == np.arange(0, len(local_nid)))
local_eid = gpb.eid2localeid(F.boolean_mask(part_g.edata[dgl.EID], part_g.edata['inner_edge']), i)
assert F.dtype(local_eid) in (F.int64, F.int32)
assert np.all(F.asnumpy(local_eid) == np.arange(0, len(local_eid)))
# Check the node map.
local_nodes = F.boolean_mask(part_g.ndata[dgl.NID], part_g.ndata['inner_node'])
llocal_nodes = F.nonzero_1d(part_g.ndata['inner_node'])
local_nodes1 = gpb.partid2nids(i)
assert F.dtype(local_nodes1) in (F.int32, F.int64)
assert np.all(np.sort(F.asnumpy(local_nodes)) == np.sort(F.asnumpy(local_nodes1)))
# Check the edge map.
local_edges = F.boolean_mask(part_g.edata[dgl.EID], part_g.edata['inner_edge'])
local_edges1 = gpb.partid2eids(i)
assert F.dtype(local_edges1) in (F.int32, F.int64)
assert np.all(np.sort(F.asnumpy(local_edges)) == np.sort(F.asnumpy(local_edges1)))
if reshuffle:
part_g.ndata['feats'] = F.gather_row(g.ndata['feats'], part_g.ndata['orig_id'])
part_g.edata['feats'] = F.gather_row(g.edata['feats'], part_g.edata['orig_id'])
# when we read node data from the original global graph, we should use orig_id.
local_nodes = F.boolean_mask(part_g.ndata['orig_id'], part_g.ndata['inner_node'])
local_edges = F.boolean_mask(part_g.edata['orig_id'], part_g.edata['inner_edge'])
else:
part_g.ndata['feats'] = F.gather_row(g.ndata['feats'], part_g.ndata[dgl.NID])
part_g.edata['feats'] = F.gather_row(g.edata['feats'], part_g.edata[dgl.NID])
part_g.update_all(fn.copy_src('feats', 'msg'), fn.sum('msg', 'h'))
part_g.update_all(fn.copy_edge('feats', 'msg'), fn.sum('msg', 'eh'))
assert F.allclose(F.gather_row(g.ndata['h'], local_nodes),
F.gather_row(part_g.ndata['h'], llocal_nodes))
assert F.allclose(F.gather_row(g.ndata['eh'], local_nodes),
F.gather_row(part_g.ndata['eh'], llocal_nodes))
for name in ['labels', 'feats']:
assert name in node_feats
assert node_feats[name].shape[0] == len(local_nodes)
assert np.all(F.asnumpy(g.ndata[name])[F.asnumpy(local_nodes)] == F.asnumpy(node_feats[name]))
for name in ['feats']:
assert name in edge_feats
assert edge_feats[name].shape[0] == len(local_edges)
assert np.all(F.asnumpy(g.edata[name])[F.asnumpy(local_edges)] == F.asnumpy(edge_feats[name]))
if reshuffle:
node_map = []
edge_map = []
for i, (num_nodes, num_edges) in enumerate(part_sizes):
node_map.append(np.ones(num_nodes) * i)
edge_map.append(np.ones(num_edges) * i)
node_map = np.concatenate(node_map)
edge_map = np.concatenate(edge_map)
nid2pid = gpb.nid2partid(F.arange(0, len(node_map)))
assert F.dtype(nid2pid) in (F.int32, F.int64)
assert np.all(F.asnumpy(nid2pid) == node_map)
eid2pid = gpb.eid2partid(F.arange(0, len(edge_map)))
assert F.dtype(eid2pid) in (F.int32, F.int64)
assert np.all(F.asnumpy(eid2pid) == edge_map)
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
def test_partition():
g = create_random_graph(10000)
check_partition(g, 'metis', True)
check_partition(g, 'metis', False)
check_partition(g, 'random', True)
check_partition(g, 'random', False)
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
def test_hetero_partition():
g = create_random_graph(10000)
check_partition(g, 'metis', True)
check_partition(g, 'metis', False)
check_partition(g, 'random', True)
check_partition(g, 'random', False)
if __name__ == '__main__':
os.makedirs('/tmp/partition', exist_ok=True)
test_partition()
test_hetero_partition()
``` |
{
"source": "joshcarty/ra-resale",
"score": 3
} |
#### File: ra-resale/alerts/__init__.py
```python
import datetime
import re
import lxml.etree
import requests
EVENT_ID_PATTERN = re.compile(r"https?:\/\/(?:www\.)?ra.co\/events\/(\d+)")
USER_AGENT = (
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36"
)
def make_request(url):
headers = {'User-Agent': USER_AGENT}
return requests.get(url, timeout=10, headers=headers)
def extract_event_id(url):
return EVENT_ID_PATTERN.search(url)[1]
def extract_title(dom):
return dom.xpath('//h1//text()')[0]
def extract_date(dom):
extracted = dom.xpath("//span[text() = 'Date']/../..//a//text()")
extracted = extracted[0].strip()
extracted = extracted.rsplit(', ', maxsplit=1)[-1]
return datetime.datetime.strptime(extracted, '%d %b %Y').date()
def extract_tickets(dom):
tickets = dom.xpath("//li[@id='ticket-types']/ul/li")
for ticket in tickets:
yield {
'title': extract_ticket_title(ticket),
'price': extract_price(ticket),
'available': extract_availability(ticket)
}
def extract_ticket_title(element):
path = [
'.//div[@class="pr8"]/text()',
'.//div[@class="type-title"]/text()'
]
return element.xpath('|'.join(path))[0]
def extract_price(element):
return element.xpath('.//div[@class="type-price"]/text()')[0]
def extract_availability(element):
mapping = {'closed': False, 'onsale but': True}
availability = element.xpath('./@class')[0]
return mapping.get(availability, False)
def is_resale_active(tickets):
return not any(ticket['available'] for ticket in tickets)
def parse_tickets(html):
try:
dom = lxml.etree.HTML(html)
tickets = list(extract_tickets(dom))
except IndexError:
raise ExtractionError()
return tickets
def parse_event(html):
try:
dom = lxml.etree.HTML(html)
title = extract_title(dom)
date = extract_date(dom)
except IndexError:
raise ExtractionError()
return {
'title': title,
'date': date
}
def get_tickets(url):
event_id = extract_event_id(url)
ticket_url = f"https://ra.co/widget/event/{event_id}/embedtickets"
html = make_request(ticket_url)
return parse_tickets(html.text)
def get_event(url):
html = make_request(url)
return parse_event(html.text)
def get_page(url):
event = get_event(url)
tickets = get_tickets(url)
event['tickets'] = tickets
event['resale_active'] = is_resale_active(tickets)
return event
class ExtractionError(Exception):
pass
class ResaleInactiveError(ExtractionError):
pass
class EventExpiredError(ExtractionError):
pass
``` |
{
"source": "joshcarty/tfgnn-ogb",
"score": 3
} |
#### File: tfgnn-ogb/tfgnn_ogb/data.py
```python
import random
from dataclasses import dataclass
from typing import Any, Generator, Iterator, List, Sequence, Set, Tuple
import networkx as nx
import numpy as np
import tensorflow as tf
import tensorflow_gnn as tfgnn
from ogb.nodeproppred import NodePropPredDataset
from tqdm import tqdm
def ogb_as_networkx_graph(name: str) -> nx.Graph:
"""
Load an Open Graph Benchmark dataset as a NetworkX graph.
"""
dataset = NodePropPredDataset(name)
splits = dataset.get_idx_split()
ogb_graph, labels = dataset[0]
num_nodes = ogb_graph["num_nodes"]
ogb_features = ogb_graph["node_feat"]
ogb_edgelist = ogb_graph["edge_index"]
ogb_node_indices = np.arange(num_nodes)
num_classes = labels.max() + 1
graph = nx.from_edgelist(ogb_edgelist.T)
data = zip(ogb_node_indices, ogb_features, labels)
features = {
node: {"features": features, "label": label}
for node, features, label in data
}
nx.set_node_attributes(graph, values=features)
return graph, splits, num_classes
@dataclass
class NodeSampler:
graph: nx.Graph
neighbour_samples: Tuple[int, ...] = (10, 2)
@property
def num_neighbourhoods(self) -> int:
return len(self.neighbour_samples)
def sample(self, seed_node: int) -> nx.Graph:
sampled_nodes = {seed_node}
to_sample = sampled_nodes.copy()
for num_neighbours in self.neighbour_samples:
for node in to_sample:
neighbourhood_nodes = self.gather_neighbourhood(
node, exclude_nodes=sampled_nodes
)
if not neighbourhood_nodes:
continue
sampled_neighbourhood_nodes = self.sample_neighbourhood(
neighbourhood_nodes, num_neighbours
)
sampled_nodes.update(sampled_neighbourhood_nodes)
to_sample = sampled_neighbourhood_nodes.copy()
return self.graph.subgraph(sampled_nodes)
def gather_neighbourhood(
self, node: int, exclude_nodes: Set[int]
) -> Set[int]:
neighbourhood_nodes = set(self.graph.neighbors(node))
neighbourhood_nodes = self.exclude_already_sampled_nodes(
neighbourhood_nodes, exclude_nodes
)
return neighbourhood_nodes
@staticmethod
def exclude_already_sampled_nodes(
neighbourhood_nodes: Set[int], sampled_nodes: Set[int]
) -> set[int]:
return neighbourhood_nodes - sampled_nodes
@staticmethod
def sample_neighbourhood(nodes: Set[int], num_neighbours: int) -> Set[int]:
return set(random.choices(list(nodes), k=num_neighbours))
def _prepare_data_for_node_classification(
graph: nx.Graph, seed_node: int
) -> List[Tuple[Any, Any]]:
"""
Position seed node as the first node in the data.
TensorFlow GNN has a convention whereby the node to be classified, the "seed node",
is positioned first in the component. This is for use with layers such as
`tfgnn.keras.layers.ReadoutFirstNode` which extracts the first node from a component.
"""
seed_data = graph.nodes(data=True)[seed_node]
data = [(seed_data["features"], seed_data["label"])]
data += [
(data["features"], data["label"])
for node, data in graph.nodes(data=True)
if node != seed_node
]
return data
def generate_graph_samples(
graph: nx.Graph, seed_nodes: Sequence[int], sampler: NodeSampler
) -> Iterator[tfgnn.GraphTensor]:
"""
Lazily samples subgraphs from a NetworkX graph and converts them to
GraphTensors.
In practice, this would be a preprocessing step that builds the subgraphs
using a Apache Beam, constructs the GraphTensors and serialises them as
tf.Examples.
"""
for seed_node in seed_nodes:
subgraph = sampler.sample(seed_node)
subgraph = nx.convert_node_labels_to_integers(
subgraph, label_attribute="graph_index"
)
subgraph_seed_node = next(
node
for node, data in subgraph.nodes(data=True)
if data["graph_index"] == seed_node
)
num_edges = subgraph.number_of_edges()
edge_list = np.asarray(subgraph.edges)
edges = tfgnn.EdgeSet.from_fields(
sizes=[num_edges],
adjacency=tfgnn.Adjacency.from_indices(
source=("paper", edge_list[:, 0]),
target=("paper", edge_list[:, 1]),
),
)
data = _prepare_data_for_node_classification(
subgraph, subgraph_seed_node
)
features, labels = zip(*data)
num_nodes = subgraph.number_of_nodes()
nodes = tfgnn.NodeSet.from_fields(
features={
"hidden_state": np.asarray(features),
"label": np.asarray(labels),
},
sizes=[num_nodes],
)
context = tfgnn.Context.from_fields(features=None)
graph_tensor = tfgnn.GraphTensor.from_pieces(
edge_sets={"cites": edges},
node_sets={"paper": nodes},
context=context,
)
yield graph_tensor
def merge_graph_batches(graph: tfgnn.GraphTensor) -> tfgnn.GraphTensor:
"""TensorFlow GNN expects batches of graphs to be a single component."""
return graph.merge_batch_to_components()
def load_dataset_from_graph(
graph: nx.Graph,
split: Sequence[int],
sampler: NodeSampler,
batch_size: int,
graph_type_spec: tfgnn.GraphTensorSpec,
) -> tf.data.Dataset:
"""
Load a TensorFlow Dataset sampled as ego subgraphs from a NetworkX graph.
Only suitable for small graphs or very low neighbourhood sizes. Since the
Dataset is small, we can cache the data in memory. For larger neighbourhood
sizes, it's preferable to write examples to disk using `main` and loading
with `load_dataset_from_examples`.
"""
def generator() -> Generator[tfgnn.GraphTensor, None, None]:
"""tf.data.Dataset expects a Callable that returns a Generator."""
samples = generate_graph_samples(graph, split, sampler=sampler)
yield from samples
dataset = tf.data.Dataset.from_generator(
generator, output_signature=graph_type_spec
)
return (
dataset.cache()
.repeat()
.batch(batch_size, drop_remainder=True)
.map(merge_graph_batches)
.prefetch(tf.data.AUTOTUNE)
)
def load_dataset_from_examples(
path: str, batch_size: int, type_spec: tfgnn.GraphTensorSpec
) -> tf.data.TFRecordDataset:
return (
tf.data.TFRecordDataset(path)
.batch(batch_size, drop_remainder=True)
.map(lambda example: tfgnn.parse_example(type_spec, example))
.repeat()
.map(merge_graph_batches)
.prefetch(tf.data.AUTOTUNE)
)
def write_graph_tensors_to_examples(
graph_generator: Iterator[tfgnn.GraphTensor], path: str
) -> None:
# TODO: Shard over multiple files.
with tf.io.TFRecordWriter(path) as writer:
for graph in graph_generator:
example = tfgnn.write_example(graph)
writer.write(example.SerializeToString())
def main() -> None:
dataset = "ogbn-arxiv"
neighbour_samples = (10, 2)
graph, splits, _ = ogb_as_networkx_graph(dataset)
train_split, val_split = splits["train"], splits["valid"]
sampler = NodeSampler(graph, neighbour_samples)
for split_name, split in (("train", train_split), ("val", val_split)):
generator = generate_graph_samples(graph, split, sampler)
filename = f"{split_name}_hop_{'-'.join(map(str, neighbour_samples))}.tfrecords"
write_graph_tensors_to_examples(
graph_generator=tqdm(generator, total=len(split)),
path=f"data/{dataset}/{split_name}/{filename}",
)
if __name__ == "__main__":
main()
``` |
{
"source": "joshch1630/movie_data_platform",
"score": 2
} |
#### File: src/aws_lambda/imdb_data_injection_lambda.py
```python
import os
import gzip
import logging
import boto3
import wget
logger = logging.getLogger()
logger.setLevel(logging.INFO)
S3_BUCKET_NAME = "movie-data-platform.mpd"
TEMP_PATH = "/tmp/"
IMDB_PATH = "imdb/"
RAW_ZONE_PATH = "raw_zone/"
IMDB_URL_PREFIX = "https://datasets.imdbws.com/"
IMDB_FILE_NAME_LIST = [
"name.basics.tsv.gz",
"title.akas.tsv.gz",
"title.basics.tsv.gz",
"title.crew.tsv.gz",
"title.episode.tsv.gz",
"title.principals.tsv.gz",
"title.ratings.tsv.gz"
]
def lambda_handler(event, context):
logger.info("===== Start IMDB data injection =====")
for imdb_file_name in IMDB_FILE_NAME_LIST:
zip_file_path = get_file_from_imdb(imdb_file_name)
file_content = unzip_file(zip_file_path)
save_file_into_s3(file_content, imdb_file_name)
os.remove(zip_file_path)
file_content = None
start_glue_workflow()
logger.info("===== End IMDB data injection =====")
return {
"statusCode": 200,
"headers": {
"Content-Type": "application/json"
}
}
def get_file_from_imdb(imdb_file_name):
logger.info("Start get_file_from_imdb")
remote_url = IMDB_URL_PREFIX + imdb_file_name
zip_file_path = TEMP_PATH + imdb_file_name
logger.info('zip_file: {}'.format(zip_file_path))
wget.download(remote_url, zip_file_path)
logger.info('File info: {}'.format(os.stat(zip_file_path)))
return zip_file_path
def unzip_file(zip_file_path):
logger.info("Start unzip_file")
with gzip.open(zip_file_path, 'r') as f:
file_content = f.read()
return file_content
def save_file_into_s3(file_content, file_name):
logger.info("Start save_file_into_s3")
unzip_file_name = file_name.replace(".gz", "")
logger.info('unzip_file: {}'.format(unzip_file_name))
s3_raw_path = RAW_ZONE_PATH + IMDB_PATH + unzip_file_name
s3 = boto3.resource("s3")
s3.Bucket(S3_BUCKET_NAME).put_object(Key=s3_raw_path, Body=file_content)
def start_glue_workflow():
logger.info("Start start_glue_workflow")
glue = boto3.client("glue")
glue.start_workflow_run(
Name = "imdb_data_cleansing_glue_workflow"
)
```
#### File: src/aws_lambda/movie_lens_data_injection_lambda.py
```python
import os
import zipfile
import logging
import boto3
import wget
logger = logging.getLogger()
logger.setLevel(logging.INFO)
S3_BUCKET_NAME = "movie-data-platform.mpd"
TEMP_PATH = "/tmp/"
MOVIE_LENS_PATH = "movie_lens/"
RAW_ZONE_PATH = "raw_zone/"
MOVIE_LENS_URL_PREFIX = "https://files.grouplens.org/datasets/movielens/"
MOVIE_LENS_FILE_NAME = "ml-25m.zip"
MOVIE_LENS_DIR_NAME = "ml-25m/"
def lambda_handler(event, context):
logger.info("===== Start MovieLens data injection =====")
zip_file_path = get_file_from_movie_lens(MOVIE_LENS_URL_PREFIX + MOVIE_LENS_FILE_NAME, MOVIE_LENS_FILE_NAME)
unzip_file_and_save_into_s3(zip_file_path)
start_glue_workflow()
logger.info("===== End MovieLens data injection =====")
return {
"statusCode": 200,
"headers": {
"Content-Type": "application/json"
}
}
def get_file_from_movie_lens(remote_url, file_name):
logger.info("Start get_file_from_movie_lens")
zip_file_path = TEMP_PATH + file_name
logger.info('zip_file_path: {}'.format(zip_file_path))
unzip_file_name = file_name.replace(".zip", "")
logger.info('unzip_file: {}'.format(unzip_file_name))
wget.download(remote_url, zip_file_path)
logger.info('File info: {}'.format(os.stat(zip_file_path)))
return zip_file_path
def unzip_file_and_save_into_s3(zip_file_path):
logger.info("Start unzip_file_and_save_into_s3")
s3 = boto3.resource("s3")
z = zipfile.ZipFile(zip_file_path)
logger.info('file name list: {}'.format(", ".join(str(file_name) for file_name in z.namelist())))
for file_name in z.namelist():
s3_raw_path = RAW_ZONE_PATH + MOVIE_LENS_PATH + file_name.replace(MOVIE_LENS_DIR_NAME, "")
logger.info('s3_raw_path: {}'.format(s3_raw_path))
s3.Bucket(S3_BUCKET_NAME).put_object(Key=s3_raw_path, Body=z.open(file_name))
def start_glue_workflow():
logger.info("Start start_glue_workflow")
glue = boto3.client("glue")
glue.start_workflow_run(
Name = "movie_lens_data_cleansing_glue_workflow"
)
``` |
{
"source": "joshchang1112/gcnn-survey-paper",
"score": 2
} |
#### File: joshchang1112/gcnn-survey-paper/best_model.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
import numpy as np
import scipy.stats as stats
import tensorflow as tf
flags.DEFINE_string('dir', '/tmp/launch', 'path were models are saved.')
flags.DEFINE_string('target', 'node_acc', 'target metric to use.')
flags.DEFINE_string('datasets', 'cora', 'datasets to use.')
flags.DEFINE_string('drop_prop', '0-10-20-30-40-50-60-70-80-90',
'proportion of edges dropped')
flags.DEFINE_string('save_file', 'best_params', 'name of files to same the'
'results.')
flags.DEFINE_string('models', 'Gcn', 'name of model directories to parse.')
FLAGS = flags.FLAGS
def get_val_test_acc(data):
"""Parses log file to retrieve test and val accuracy."""
data = [x.split() for x in data if len(x.split()) > 1]
val_acc_idx = data[-4].index('val_{}'.format(FLAGS.target))
test_acc_idx = data[-3].index('test_{}'.format(FLAGS.target))
val_acc = data[-4][val_acc_idx + 2]
test_acc = data[-3][test_acc_idx + 2]
return float(val_acc) * 100, float(test_acc) * 100
def main(_):
log_file = tf.gfile.Open(os.path.join(FLAGS.dir, FLAGS.save_file), 'w')
for dataset in FLAGS.datasets.split('-'):
for prop in FLAGS.drop_prop.split('-'):
dir_path = os.path.join(FLAGS.dir, dataset, prop)
if tf.gfile.IsDirectory(dir_path):
print(dir_path)
for model_name in tf.gfile.ListDirectory(dir_path):
if model_name in FLAGS.models.split('-'):
model_dir = os.path.join(dir_path, model_name)
train_log_files = [
filename for filename in tf.gfile.ListDirectory(model_dir)
if 'log' in filename
]
eval_stats = {}
for filename in train_log_files:
data = tf.gfile.Open(os.path.join(model_dir,
filename)).readlines()
nb_lines = len(data)
if nb_lines > 0:
if 'Training done' in data[-1]:
val_acc, test_acc = get_val_test_acc(data)
params = '-'.join(filename.split('-')[:-1])
if params in eval_stats:
eval_stats[params]['val'].append(val_acc)
eval_stats[params]['test'].append(test_acc)
else:
eval_stats[params] = {'val': [val_acc], 'test': [test_acc]}
best_val_metric = -1
best_params = None
for params in eval_stats:
val_metric = np.mean(eval_stats[params]['val'])
if val_metric > best_val_metric:
best_val_metric = val_metric
best_params = params
# print(eval_stats)
log_file.write('\n' + model_dir + '\n')
log_file.write('Best params: {}\n'.format(best_params))
log_file.write('val_{}: {} +- {}\n'.format(
FLAGS.target, round(np.mean(eval_stats[best_params]['val']), 2),
round(stats.sem(eval_stats[best_params]['val']), 2)))
log_file.write('test_{}: {} +- {}\n'.format(
FLAGS.target, round(
np.mean(eval_stats[best_params]['test']), 2),
round(stats.sem(eval_stats[best_params]['test']), 2)))
if __name__ == '__main__':
app.run(main)
```
#### File: gcnn-survey-paper/models/edge_models.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from models.base_models import EdgeModel
import tensorflow as tf
from utils.model_utils import compute_adj
from utils.model_utils import gat_module
from utils.model_utils import gcn_module
from utils.model_utils import mlp_module
class Gae(EdgeModel):
"""Graph Auto-Encoder (GAE) (Kipf & al) for link prediction.
arXiv link: https://arxiv.org/abs/1611.07308
"""
def compute_inference(self, node_features, adj_matrix, is_training):
"""Forward step for GAE model."""
sparse = self.sparse_features
in_dim = self.input_dim
with tf.variable_scope('edge-model'):
h0 = gcn_module(node_features, adj_matrix, self.n_hidden, self.p_drop,
is_training, in_dim, sparse)
adj_matrix_pred = compute_adj(h0, self.att_mechanism, self.p_drop,
is_training)
self.adj_matrix_pred = tf.nn.sigmoid(adj_matrix_pred)
return adj_matrix_pred
class Egat(EdgeModel):
"""Edge-GAT for link prediction."""
def compute_inference(self, node_features, adj_matrix, is_training):
"""Forward step for GAE model."""
sparse = self.sparse_features
in_dim = self.input_dim
with tf.variable_scope('edge-model'):
h0 = gat_module(
node_features,
adj_matrix,
self.n_hidden,
self.n_att,
self.p_drop,
is_training,
in_dim,
sparse,
average_last=True)
adj_matrix_pred = compute_adj(h0, self.att_mechanism, self.p_drop,
is_training)
self.adj_matrix_pred = tf.nn.sigmoid(adj_matrix_pred)
return adj_matrix_pred
class Vgae(EdgeModel):
"""Variational Graph Auto-Encoder (VGAE) (Kipf & al) for link prediction.
arXiv link: https://arxiv.org/abs/1611.07308
"""
def compute_inference(self, node_features, adj_matrix, is_training):
"""Forward step for GAE model."""
sparse = self.sparse_features
in_dim = self.input_dim
with tf.variable_scope('edge-model'):
h0 = gcn_module(node_features, adj_matrix, self.n_hidden[:-1],
self.p_drop, is_training, in_dim, sparse)
# N x F
with tf.variable_scope('mean'):
z_mean = gcn_module(h0, adj_matrix, self.n_hidden[-1:], self.p_drop,
is_training, self.n_hidden[-2], False)
self.z_mean = z_mean
with tf.variable_scope('std'):
# N x F
z_log_std = gcn_module(h0, adj_matrix, self.n_hidden[-1:], self.p_drop,
is_training, self.n_hidden[-2], False)
self.z_log_std = z_log_std
# add noise during training
noise = tf.random_normal([self.nb_nodes, self.n_hidden[-1]
]) * tf.exp(z_log_std)
z = tf.cond(is_training, lambda: tf.add(z_mean, noise),
lambda: z_mean)
# N x N
adj_matrix_pred = compute_adj(z, self.att_mechanism, self.p_drop,
is_training)
self.adj_matrix_pred = tf.nn.sigmoid(adj_matrix_pred)
return adj_matrix_pred
def _compute_edge_loss(self, adj_pred, adj_train):
"""Overrides _compute_edge_loss to add Variational Inference objective."""
log_lik = super(Vgae, self)._compute_edge_loss(adj_pred, adj_train)
norm = self.nb_nodes**2 / float((self.nb_nodes**2 - self.nb_edges) * 2)
kl_mat = 0.5 * tf.reduce_sum(
1 + 2 * self.z_log_std - tf.square(self.z_mean) - tf.square(
tf.exp(self.z_log_std)), 1)
kl = tf.reduce_mean(kl_mat) / self.nb_nodes
edge_loss = norm * log_lik - kl
return edge_loss
class Emlp(EdgeModel):
"""Simple baseline for link prediction.
Creates a tensorflow graph to train and evaluate EMLP on graph data.
"""
def compute_inference(self, node_features, _, is_training):
"""Forward step for GAE model."""
sparse = self.sparse_features
in_dim = self.input_dim
with tf.variable_scope('edge-model'):
h0 = mlp_module(
node_features,
self.n_hidden,
self.p_drop,
is_training,
in_dim,
sparse,
use_bias=False)
adj_matrix_pred = compute_adj(h0, self.att_mechanism, self.p_drop,
is_training)
self.adj_matrix_pred = tf.nn.sigmoid(adj_matrix_pred)
return adj_matrix_pred
```
#### File: gcnn-survey-paper/models/node_edge_models.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from models.base_models import NodeEdgeModel
from models.edge_models import Gae
from models.node_models import Gat
from models.node_models import Gcn
import tensorflow as tf
from utils.model_utils import compute_adj
from utils.model_utils import gat_module
from utils.model_utils import gcn_module
from utils.model_utils import get_sp_topk
from utils.model_utils import mask_edges
class GaeGat(NodeEdgeModel):
"""GAE for link prediction and GAT for node classification."""
def __init__(self, config):
"""Initializes EGCNGAT model."""
super(GaeGat, self).__init__(config)
self.edge_model = Gae(config)
self.node_model = Gat(config)
def compute_inference(self, node_features_in, sp_adj_matrix, is_training):
adj_matrix_pred = self.edge_model.compute_inference(
node_features_in, sp_adj_matrix, is_training)
self.adj_matrix_pred = adj_matrix_pred
adj_mask = get_sp_topk(adj_matrix_pred, sp_adj_matrix, self.nb_nodes,
self.topk)
self.adj_mask = adj_mask
# masked_adj_matrix_pred = tf.multiply(adj_mask,
# tf.nn.sigmoid(adj_matrix_pred))
masked_adj_matrix_pred = mask_edges(tf.nn.sigmoid(adj_matrix_pred),
adj_mask)
sp_adj_pred = tf.contrib.layers.dense_to_sparse(masked_adj_matrix_pred)
logits = self.node_model.compute_inference(node_features_in, sp_adj_pred,
is_training)
return logits, adj_matrix_pred
class GaeGcn(NodeEdgeModel):
"""GAE for link prediction and GCN for node classification."""
def __init__(self, config):
"""Initializes EGCNGCN model."""
super(GaeGcn, self).__init__(config)
self.edge_model = Gae(config)
self.node_model = Gcn(config)
def compute_inference(self, node_features_in, sp_adj_matrix, is_training):
adj_matrix_pred = self.edge_model.compute_inference(
node_features_in, sp_adj_matrix, is_training)
self.adj_matrix_pred = adj_matrix_pred
adj_mask = get_sp_topk(adj_matrix_pred, sp_adj_matrix, self.nb_nodes,
self.topk)
sp_adj_pred = tf.contrib.layers.dense_to_sparse(
tf.multiply(adj_mask, tf.nn.leaky_relu(adj_matrix_pred)))
sp_adj_pred = tf.sparse_softmax(sp_adj_pred)
logits = self.node_model.compute_inference(node_features_in, sp_adj_pred,
is_training)
return logits, adj_matrix_pred
############################ EXPERIMENTAL MODELS #############################
class GatGraphite(NodeEdgeModel):
"""Gae for link prediction and GCN for node classification."""
def compute_inference(self, node_features_in, sp_adj_matrix, is_training):
with tf.variable_scope('edge-model'):
z_latent = gat_module(
node_features_in,
sp_adj_matrix,
self.n_hidden_edge,
self.n_att_edge,
self.p_drop_edge,
is_training,
self.input_dim,
self.sparse_features,
average_last=False)
adj_matrix_pred = compute_adj(z_latent, self.att_mechanism,
self.p_drop_edge, is_training)
self.adj_matrix_pred = adj_matrix_pred
with tf.variable_scope('node-model'):
concat = True
if concat:
z_latent = tf.sparse_concat(
axis=1,
sp_inputs=[
tf.contrib.layers.dense_to_sparse(z_latent), node_features_in
],
)
sparse_features = True
input_dim = self.n_hidden_edge[-1] * self.n_att_edge[
-1] + self.input_dim
else:
sparse_features = False
input_dim = self.n_hidden_edge[-1] * self.n_att_edge[-1]
logits = gat_module(
z_latent,
sp_adj_matrix,
self.n_hidden_node,
self.n_att_node,
self.p_drop_node,
is_training,
input_dim,
sparse_features=sparse_features,
average_last=False)
return logits, adj_matrix_pred
class GaeGatConcat(NodeEdgeModel):
"""EGCN for link prediction and GCN for node classification."""
def __init__(self, config):
"""Initializes EGCN_GAT model."""
super(GaeGatConcat, self).__init__(config)
self.edge_model = Gae(config)
self.node_model = Gat(config)
def compute_inference(self, node_features_in, sp_adj_matrix, is_training):
with tf.variable_scope('edge-model'):
z_latent = gcn_module(node_features_in, sp_adj_matrix, self.n_hidden_edge,
self.p_drop_edge, is_training, self.input_dim,
self.sparse_features)
adj_matrix_pred = compute_adj(z_latent, self.att_mechanism,
self.p_drop_edge, is_training)
self.adj_matrix_pred = adj_matrix_pred
with tf.variable_scope('node-model'):
z_latent = tf.sparse_concat(
axis=1,
sp_inputs=[
tf.contrib.layers.dense_to_sparse(z_latent), node_features_in
])
sparse_features = True
input_dim = self.n_hidden_edge[-1] + self.input_dim
sp_adj_train = tf.SparseTensor(
indices=sp_adj_matrix.indices,
values=tf.ones_like(sp_adj_matrix.values),
dense_shape=sp_adj_matrix.dense_shape)
logits = gat_module(
z_latent,
sp_adj_train,
self.n_hidden_node,
self.n_att_node,
self.p_drop_node,
is_training,
input_dim,
sparse_features=sparse_features,
average_last=True)
return logits, adj_matrix_pred
class GaeGcnConcat(NodeEdgeModel):
"""EGCN for link prediction and GCN for node classification."""
def compute_inference(self, node_features_in, sp_adj_matrix, is_training):
with tf.variable_scope('edge-model'):
z_latent = gcn_module(node_features_in, sp_adj_matrix, self.n_hidden_edge,
self.p_drop_edge, is_training, self.input_dim,
self.sparse_features)
adj_matrix_pred = compute_adj(z_latent, self.att_mechanism,
self.p_drop_edge, is_training)
self.adj_matrix_pred = adj_matrix_pred
with tf.variable_scope('node-model'):
z_latent = tf.sparse_concat(
axis=1,
sp_inputs=[
tf.contrib.layers.dense_to_sparse(z_latent), node_features_in
])
sparse_features = True
input_dim = self.n_hidden_edge[-1] + self.input_dim
logits = gcn_module(
z_latent,
sp_adj_matrix,
self.n_hidden_node,
self.p_drop_node,
is_training,
input_dim,
sparse_features=sparse_features)
return logits, adj_matrix_pred
class Gcat(NodeEdgeModel):
"""1 iteration Graph Convolution Attention Model."""
def __init__(self, config):
"""Initializes GCAT model."""
super(Gcat, self).__init__(config)
self.edge_model = Gae(config)
self.node_model = Gcn(config)
def compute_inference(self, node_features_in, sp_adj_matrix, is_training):
"""Forward pass for GAT model."""
adj_matrix_pred = self.edge_model.compute_inference(
node_features_in, sp_adj_matrix, is_training)
sp_adj_mask = tf.SparseTensor(
indices=sp_adj_matrix.indices,
values=tf.ones_like(sp_adj_matrix.values),
dense_shape=sp_adj_matrix.dense_shape)
sp_adj_att = sp_adj_mask * adj_matrix_pred
sp_adj_att = tf.SparseTensor(
indices=sp_adj_att.indices,
values=tf.nn.leaky_relu(sp_adj_att.values),
dense_shape=sp_adj_att.dense_shape)
sp_adj_att = tf.sparse_softmax(sp_adj_att)
logits = self.node_model.compute_inference(node_features_in, sp_adj_att,
is_training)
return logits, adj_matrix_pred
```
#### File: gcnn-survey-paper/models/node_models.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from models.base_models import NodeModel
import tensorflow as tf
from utils.model_utils import cheby_module
from utils.model_utils import compute_adj
from utils.model_utils import gat_module
from utils.model_utils import gcn_module
from utils.model_utils import gcn_pool_layer
from utils.model_utils import mlp_module
from utils.model_utils import sp_gat_layer
from utils.model_utils import sp_gcn_layer
class Gat(NodeModel):
"""Graph Attention (GAT) Model (Velickovic & al).
arXiv link: https://arxiv.org/abs/1710.10903
"""
def compute_inference(self, node_features, adj_matrix, is_training):
"""Forward step for GAT model."""
sparse = self.sparse_features
in_dim = self.input_dim
average_last = True
with tf.variable_scope('node-model'):
logits = gat_module(node_features, adj_matrix, self.n_hidden, self.n_att,
self.p_drop, is_training, in_dim, sparse,
average_last)
return logits
class Gcn(NodeModel):
"""Graph convolution network (Kipf & al).
arXiv link: https://arxiv.org/abs/1609.02907
"""
def compute_inference(self, node_features, adj_matrix, is_training):
"""Forward step for graph convolution model."""
with tf.variable_scope('node-model'):
logits = gcn_module(node_features, adj_matrix, self.n_hidden, self.p_drop,
is_training, self.input_dim, self.sparse_features)
return logits
class Mlp(NodeModel):
"""Multi-layer perceptron model."""
def compute_inference(self, node_features, adj_matrix, is_training):
"""Forward step for graph convolution model."""
with tf.variable_scope('node-model'):
logits = mlp_module(node_features, self.n_hidden, self.p_drop,
is_training, self.input_dim, self.sparse_features,
use_bias=True)
return logits
class SemiEmb(NodeModel):
"""Deep Learning via Semi-Supervised Embedding (Weston & al).
paper: http://icml2008.cs.helsinki.fi/papers/340.pdf
"""
def __init__(self, config):
super(SemiEmb, self).__init__(config)
self.semi_emb_k = config.semi_emb_k
def compute_inference(self, node_features, adj_matrix, is_training):
with tf.variable_scope('node-model'):
hidden_repr = mlp_module(node_features, self.n_hidden, self.p_drop,
is_training, self.input_dim,
self.sparse_features, use_bias=True,
return_hidden=True)
logits = hidden_repr[-1]
hidden_repr_reg = hidden_repr[self.semi_emb_k]
l2_scores = compute_adj(hidden_repr_reg, self.att_mechanism, self.p_drop,
is_training=False)
self.l2_scores = tf.gather_nd(l2_scores, adj_matrix.indices)
return logits
def _compute_node_loss(self, logits, labels):
supervised_loss = super(SemiEmb, self)._compute_node_loss(logits, labels)
# supervised_loss = tf.nn.softmax_cross_entropy_with_logits(
# labels=labels, logits=logits)
# supervised_loss = tf.reduce_sum(supervised_loss) / self.nb_nodes
reg_loss = tf.reduce_mean(self.l2_scores)
return supervised_loss + self.edge_reg * reg_loss
class Cheby(NodeModel):
"""Chebyshev polynomials for Spectral Graph Convolutions (Defferrard & al).
arXiv link: https://arxiv.org/abs/1606.09375
"""
def __init__(self, config):
super(Cheby, self).__init__(config)
self.cheby_k_loc = config.cheby_k_loc
def compute_inference(self, node_features, normalized_laplacian, is_training):
with tf.variable_scope('node-model'):
dense_normalized_laplacian = tf.sparse_to_dense(
sparse_indices=normalized_laplacian.indices,
output_shape=normalized_laplacian.dense_shape,
sparse_values=normalized_laplacian.values)
cheby_polynomials = [tf.eye(self.nb_nodes), dense_normalized_laplacian]
self.cheby = cheby_polynomials
for _ in range(2, self.cheby_k_loc+1):
cheby_polynomials.append(2 * tf.sparse_tensor_dense_matmul(
normalized_laplacian, cheby_polynomials[-1]) - cheby_polynomials[-2]
)
logits = cheby_module(node_features, cheby_polynomials, self.n_hidden,
self.p_drop, is_training, self.input_dim,
self.sparse_features)
return logits
############################ EXPERIMENTAL MODELS #############################
class Hgat(NodeModel):
"""Hierarchical Graph Attention (GAT) Model."""
def compute_inference(self, node_features, adj_matrix, is_training):
"""Forward step for GAT model."""
in_dim = self.input_dim
att = []
for j in range(4):
with tf.variable_scope('gat-layer1-att{}'.format(j)):
att.append(
sp_gat_layer(node_features, adj_matrix, in_dim, 8, self.p_drop,
is_training, True))
hidden_2 = []
hidden_2.append(tf.nn.elu(tf.concat(att[:2], axis=-1)))
hidden_2.append(tf.nn.elu(tf.concat(att[2:], axis=-1)))
att = []
for j in range(2):
with tf.variable_scope('gat-layer2-att{}'.format(j)):
att.append(
sp_gat_layer(hidden_2[j], adj_matrix, 16, 7, self.p_drop,
is_training, False))
return tf.add_n(att) / 2.
class Pgcn(NodeModel):
"""Pooling Graph Convolution Network."""
def compute_inference(self, node_features, adj_matrix, is_training):
adj_matrix_dense = tf.sparse_to_dense(
sparse_indices=adj_matrix.indices,
output_shape=adj_matrix.dense_shape,
sparse_values=adj_matrix.values,
validate_indices=False)
adj_matrix_dense = tf.cast(tf.greater(adj_matrix_dense, 0), tf.float32)
adj_matrix_dense = tf.expand_dims(adj_matrix_dense, -1) # N x N x 1
in_dim = self.input_dim
sparse = self.sparse_features
for i, out_dim in enumerate(self.n_hidden[:-1]):
if i > 0:
sparse = False
with tf.variable_scope('gcn-pool-{}'.format(i)):
node_features = gcn_pool_layer(
node_features,
adj_matrix_dense,
in_dim=in_dim,
out_dim=out_dim,
sparse=sparse,
is_training=is_training,
p_drop=self.p_drop)
node_features = tf.reshape(node_features, (-1, out_dim))
node_features = tf.contrib.layers.bias_add(node_features)
node_features = tf.nn.elu(node_features)
in_dim = out_dim
with tf.variable_scope('gcn-layer-last'):
logits = sp_gcn_layer(node_features, adj_matrix, in_dim,
self.n_hidden[-1], self.p_drop, is_training, False)
return logits
``` |
{
"source": "joshchang/autoencirt",
"score": 2
} |
#### File: autoencirt/scripts/rwas_test.py
```python
import math
from copy import copy, deepcopy
import numpy as np
import pandas as pd
from autoencirt.irt import GRModel
from autoencirt.data.rwa import item_text, get_data
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
tfd = tfp.distributions
tfb = tfp.bijectors
def main():
data, num_people = get_data(reorient=False)
item_names = [f"Q{j}" for j in range(1, 23)]
grm = GRModel(
data=data.shuffle(buffer_size=200),
item_keys=item_names,
num_people=num_people,
dim=2,
xi_scale=1e-4,
eta_scale=1e-4,
weight_exponent=1.,
response_cardinality=10,
)
# ds = next(iter(data.batch(121)))
# p = grm.surrogate_distribution.sample(13)
# grm.log_likelihood(**p, responses=ds)
# grm.unormalized_log_prob(**p, data=ds)
losses = grm.calibrate_advi(
num_epochs=5, rel_tol=1e-4,
learning_rate=.01, clip_value=4.,
data_batches=10
)
print(
grm.calibrated_expectations['discriminations'][0, ..., 0]
)
grm.calibrate_mcmc(
num_steps=1000, burnin=500)
print(
grm.calibrated_expectations['discriminations'][0, ..., 0]
)
if __name__ == "__main__":
main()
``` |
{
"source": "joshchea/geo-utils",
"score": 2
} |
#### File: geo-utils/scripts/compute_gcd.py
```python
from math import *
def computeGCD(lat1,lon1,lat2,lon2):
#computes great circle distance from lat/lon
'''lat1/lon1 = lat/lon of first pt
lat2/lon2 = lat/lon of second pt
'''
degRad = pi/180
lat1 = degRad*lat1
lon1 = degRad*lon1
lat2 = degRad*lat2
lon2 = degRad*lon2
dellambda = lon2-lon1
Numerator = sqrt((cos(lat2)*sin(dellambda))**2 + (cos(lat1)*sin(lat2)- sin(lat1)*cos(lat2)*cos(dellambda))**2)
Denominator = sin(lat1)*sin(lat2) + cos(lat1)*cos(lat2)*cos(dellambda)
delSigma = atan2(Numerator,Denominator)
return 3963.19059*delSigma
``` |
{
"source": "joshchea/python-tdm",
"score": 2
} |
#### File: python-tdm/scripts/CalcLogitChoice.py
```python
import numpy
import time
import math
#from memory_profiler import profile
def CalcMultinomialChoice(Utils, getLogSumAccess = 0):
'''Utils = Dictionary of utility matrices for each mode
ex. Utils = {'auto':mat1, 'transit':mat2, 'bike':mat3, 'walk':mat4}
getLogSumAccess (optional, accessibility log sum) 0=no, <>0=yes
'''
Probs = {}
eU = {}
eU_total = numpy.zeros(Utils[Utils.keys()[0]].shape)
for key in Utils.keys():
eU[key] = numpy.exp(Utils[key])
eU_total+=eU[key]
if getLogSumAccess <> 0:
lnSumAccess = numpy.log(eU_total)
eU_total[eU_total == 0] = 0.0001
for key in eU.keys():
Probs[key] = eU[key]/eU_total
del eU, eU_total, Utils
if getLogSumAccess == 0:
return Probs
else:
return Probs, lnSumAccess
def CalcPivotPoint(Utils, Po):
'''
Utils = Updated delta utility matrices in a dictionary i.e delta of Uk (k = mode)
ex. Utils = {'auto':mat1, 'transit':mat2, 'bike':mat3, 'walk':mat4}
Po = Base probabilities in a dictionary
ex. Po = {'auto':mat1, 'transit':mat2, 'bike':mat3, 'walk':mat4}
'''
Probs = {}
PeU = {}
PeU_total = numpy.zeros(Utils[Utils.keys()[0]].shape)
for key in Utils.keys():
PeU[key] = Po[key]*numpy.exp(Utils[key])
PeU_total+=PeU[key]
PeU_total[PeU_total == 0] = 0.0001
for key in PeU.keys():
Probs[key] = PeU[key]/PeU_total
del PeU, PeU_total, Utils
return Probs
#@profile
def CalcNestedChoice(TreeDefn, MatRefs, numZn, getLogSumAccess = 0):
'''
#TreeDefn = {(0,'ROOT'):[1.0,['AU', 'TR', 'AC']],
# (1,'AU'):[0.992,['CD', 'CP']],
# (1,'TR'):[0.992,['TB', 'TP']],
# (1,'AC'):[0.992,['BK', 'WK']]}
#
#Key-> (Level ID, Level Code): Values-> (LogSum Parameter enters as: 1/lambda, SubLevel IDs)
# ROOT should always be ID = 0 and Code = 'ROOT'
# ROOT
# / | \
# / | \
# / | \
# AU TR AC(logsum parameter)
# /\ /\ /\
# CD CP TB TP BK WK
#
#MatRefs = {'ROOT': 1.0, 'AU':0, 'TR':0, 'AC':0,
# 'CD':Ucd), 'CP':Ucp),
# 'TB':Utb), 'TP':Utp),
# 'BK':Ubk), 'WK':Uwk)} Stores utilities in dict of matrices, base level utilities are pre-specified!!
#
#numZn = number of zones
#
#getLogSumAccess (optional, accessibility log sum) 0=no, <>0=yes
'''
#ProbMats = {'ROOT': 1.0, 'AU':0, 'TR':0, 'AC':0, 'CD':0, 'CP':0, 'TB':0, 'TP':0, 'BK':0, 'WK':0} #Stores probabilities at each level
#TripMat = GetMatrixRaw(Visum, tripmatno) #--> Input trip distribution matrix
#numZn = Visum.Net.Zones.Count
ProbMats = dict(zip(MatRefs.keys(), numpy.zeros(len(MatRefs.keys()))))
ProbMats['ROOT'] = 1.0
#Utility calculator going up...
#print 'Getting logsums and utilities...'
for key in sorted(TreeDefn.keys(), reverse= True):
#print key, TreeDefn[key]
sumExp = numpy.zeros((numZn,numZn))
sublevelmat_codes = TreeDefn[key][1] #produces --> ex. ['WB', 'WX', 'DX']
for code in sublevelmat_codes:
#print ([code, TreeDefn[key][0]])
MatRefs[code] = MatRefs[code]/TreeDefn[key][0] #---> scale the utility
sumExp+=numpy.exp(MatRefs[code])
lnSum = sumExp.copy() #Maybe there is a better way of doing the next 4 steps in 1 shot
lnSum[sumExp == 0] = 0.000000001
lnSum = numpy.log(lnSum)
lnSum[sumExp == 0] = -999
MatRefs[key[1]] = TreeDefn[key][0]*lnSum #---> Get ln sum of sublevel
#Probability going down...
#print 'Getting probabilities...'
for key in sorted(TreeDefn.keys()):
#print key, TreeDefn[key]
eU_total = numpy.zeros((numZn,numZn))
sublevelmat_codes = TreeDefn[key][1] #1st set--> ROOT : AU, TR
for code in sublevelmat_codes:
#print ([code, TreeDefn[key][0]])
eU_total+=numpy.exp(MatRefs[code])
eU_total[eU_total == 0] = 0.0001 #Avoid divide by 0 error
## for code in sublevelmat_codes:
## ProbMats[code] = ProbMats[key[1]]*numpy.exp(MatRefs[code])/eU_total
nSublevels = len(sublevelmat_codes)
cumProb = 0
for i in xrange(nSublevels - 1):
code = sublevelmat_codes[i]
temp = numpy.exp(MatRefs[code])/eU_total
ProbMats[code] = ProbMats[key[1]]*temp
cumProb+=temp
code = sublevelmat_codes[i+1]
ProbMats[code] = ProbMats[key[1]]*(1.0-cumProb)
if getLogSumAccess == 0:
return ProbMats
else:
return ProbMats, MatRefs['ROOT']
def CalcNestedChoiceFlat(TreeDefn, MatRefs, vecLen, getLogSumAccess = 0):
'''
#TreeDefn = {(0,'ROOT'):[1.0,['AU', 'TR', 'AC']],
# (1,'AU'):[0.992,['CD', 'CP']],
# (1,'TR'):[0.992,['TB', 'TP']],
# (1,'AC'):[0.992,['BK', 'WK']]}
#
#Key-> (Level ID, Level Code): Values-> (LogSum Parameter enters as: 1/lambda, SubLevel IDs)
# ROOT should always be ID = 0 and Code = 'ROOT'
# ROOT
# / | \
# / | \
# / | \
# AU TR AC(logsum parameter)
# /\ /\ /\
# CD CP TB TP BK WK
#
#MatRefs = {'ROOT': 1.0, 'AU':0, 'TR':0, 'AC':0,
# 'CD':Ucd), 'CP':Ucp),
# 'TB':Utb), 'TP':Utp),
# 'BK':Ubk), 'WK':Uwk)} Stores utilities in dict of vectors, base level utilities are pre-specified!!
#
#vecLen = number of od pairs being evaluated
#
#getLogSumAccess (optional, accessibility log sum) 0=no, <>0=yes
'''
#ProbMats = {'ROOT': 1.0, 'AU':0, 'TR':0, 'AC':0, 'CD':0, 'CP':0, 'TB':0, 'TP':0, 'BK':0, 'WK':0} #Stores probabilities at each level
#TripMat = GetMatrixRaw(Visum, tripmatno) #--> Input trip distribution matrix
#numZn = Visum.Net.Zones.Count
ProbMats = dict(zip(MatRefs.keys(), numpy.zeros(len(MatRefs.keys()))))
ProbMats['ROOT'] = 1.0
#Utility calculator going up...
#print 'Getting logsums and utilities...'
for key in sorted(TreeDefn.keys(), reverse= True):
#print key, TreeDefn[key]
sumExp = numpy.zeros(vecLen)
sublevelmat_codes = TreeDefn[key][1] #produces --> ex. ['WB', 'WX', 'DX']
for code in sublevelmat_codes:
#print ([code, TreeDefn[key][0]])
MatRefs[code] = MatRefs[code]/TreeDefn[key][0] #---> scale the utility
sumExp+=numpy.exp(MatRefs[code])
lnSum = sumExp.copy() #Maybe there is a better way of doing the next 4 steps in 1 shot
lnSum[sumExp == 0] = 0.000000001
lnSum = numpy.log(lnSum)
lnSum[sumExp == 0] = -999
MatRefs[key[1]] = TreeDefn[key][0]*lnSum #---> Get ln sum of sublevel
#Probability going down...
#print 'Getting probabilities...'
for key in sorted(TreeDefn.keys()):
#print key, TreeDefn[key]
eU_total = numpy.zeros(vecLen)
sublevelmat_codes = TreeDefn[key][1] #1st set--> ROOT : AU, TR
for code in sublevelmat_codes:
#print ([code, TreeDefn[key][0]])
eU_total+=numpy.exp(MatRefs[code])
eU_total[eU_total == 0] = 0.0001 #Avoid divide by 0 error
## for code in sublevelmat_codes:
## ProbMats[code] = ProbMats[key[1]]*numpy.exp(MatRefs[code])/eU_total
nSublevels = len(sublevelmat_codes)
cumProb = 0
for i in xrange(nSublevels - 1):
code = sublevelmat_codes[i]
temp = numpy.exp(MatRefs[code])/eU_total
ProbMats[code] = ProbMats[key[1]]*temp
cumProb+=temp
code = sublevelmat_codes[i+1]
ProbMats[code] = ProbMats[key[1]]*(1.0-cumProb)
if getLogSumAccess == 0:
return ProbMats
else:
return ProbMats, MatRefs['ROOT']
#some generic utilities for reading and writing numpy arrays to disk..
def GetMatrix(fn, numZn):
return numpy.fromfile(fn).reshape((numZn, numZn))
def GetMatrixFlat(fn):
return numpy.fromfile(fn)
def PushMatrix(fn, mat):
mat.tofile(fn)
## DEMO---->
##def runNested():
## PMats = CalcNestedChoice(TreeDefn, MatRefs, numZn)
## for key in PMats.keys():
## if key <> 'ROOT':
## mat = PMats[key]
## print key, mat.sum(), mat[3398, 3397]
## PushMatrix(fn+str(key)+".np", mat)
## del PMats
##
###@profile
##def runMultiNomial():
## Utils = {'da':da, 'wb':wb, 'wx':wx}
## PMats = CalcMultinomialChoice(Utils)
## del PMats
##
##
##start = time.time()
##print 'Calculating nested choice...'
##numZn = 3399
##fn = r"C:\DevResearch\Visum_Utils\Test Matrices\\"
##da = GetMatrix(fn+"801.np", numZn)
##wb = GetMatrix(fn+"803.np", numZn)
##wx = GetMatrix(fn+"802.np", numZn)
##
##TreeDefn = {(0,'ROOT'):[1.0,['AU', 'TR']], (1,'TR'):[0.75,['WB', 'WX']]}
##MatRefs = {'ROOT': 1.0, 'AU':da , 'TR':0, 'WB':wb, 'WX':wx} #Stores utilities, base level utilities are pre-specified
###Utils = {'da':da, 'wb':wb, 'wx':wx}
###ProbMats = {'ROOT': 1.0, 'AU':0, 'TR':0, 'WB':0, 'WX':0} #Stores probabilities at each level
##print 'Matrices loaded and calculation initialized...'
###PMats = CalcMultinomialChoice(Utils)
##runNested()
##print 'Calculation completed.'
##print 'Time taken(secs): ', time.time()-start
```
#### File: python-tdm/scripts/MatEstimateGradient.py
```python
import numpy as np
import scipy.sparse
import VisumPy.helpers as VPH
import csv
import time
#Least squares matrix estimation with gradient method - Chetan Joshi, <NAME>
#------------User input -----------------------------------------------------------------#
matno = 41 # seed mat and also where result is stored
countAttrID = "AddVal1"
wtAttrID = "AddVal2"
wtOD = 42
flowmatfile = r"C:\Projects\KA_Work\LSODME\FlowMatrix.mtx"
#----------------------------------------------------------------------------------------#
#Matrix estimation function: takes flow prportion matrix, OD seed, Count and returns adjusted matrix: <NAME>, updated by <NAME>
def EstimateMatrix(FlowProp, FlowPropT, OD_Flows, Ca, Wt, iter=25):
Va = FlowPropT.dot(OD_Flows)
Visum.WriteToTrace('Length of Va' + str(len(Va)))
Visum.WriteToTrace('Length of Ca' + str(len(Ca)))
Z = sum((Va-Ca)**2)
Visum.WriteToTrace('Starting Z =' + str(Z))
#print 'Starting Z =', Z
for i in range(1, iter):
t1 = time.time()
Grad = FlowProp.dot((Va - Ca)*Wt)
Va_prime = FlowPropT.dot(-OD_Flows * Grad)
lambda_opt = sum((Ca - Va)*Va_prime)/sum(Va_prime*Va_prime)
if Grad.max()> 0:
lambda_opt = min(lambda_opt, 1/Grad.max())
OD_Flows = OD_Flows*(1 - lambda_opt*Grad) #variant - 1.1
OD_Flows[OD_Flows<0]=0 #remove very small -0.0 values from matrix if any...
Va = FlowPropT.dot(OD_Flows)
Z = sum((Va-Ca)**2)
t2 = time.time()
if Z < 1:
break;
else:
Visum.WriteToTrace(str(i) + ': Z =' + str(Z), True)
Visum.WriteToTrace('Final Z =' + str(Z), True)
return OD_Flows
def readFlowMat(nODs, nLinks, filename):
t1 = time.time()
with open(filename, "rb") as f:
reader = csv.reader(f, delimiter='\t')
for i in xrange(0, 12):
reader.next()
i = []
j = []
data = []
for row in reader: #each row = CountIndex \t ODIndex \t proportion
if row[0] == "": continue
if row[0].startswith("*"): continue
if row[0].startswith("0"): continue
i.append(long(row[1])-1)
j.append(long(row[0])-1)
data.append(float(row[2]))
ix = j[-1] + 1
z = max(i)
for k in xrange(len(Sparse_OD)):
# j-> link
i.append(long(k)) #index of od pair
j.append(long(ix))#index of link
data.append(1.0)
ix+=1
FlowProp = scipy.sparse.csr_matrix((data, (i,j)), shape=(len(Sparse_OD), nLinks+nODs), dtype='d')
#Visum.WriteToTrace([[k, z, i[-1],j[-1]], [len(i), len(j)]])
t2 = time.time()
Visum.WriteToTrace("read flow mat: " + str(t2-t1) , True)
return FlowProp
#--------------------------------------------------------------------------------------------------------
Ca = np.array(VPH.GetMulti(Visum.Net.Links, countAttrID, True))
Wt = np.array(VPH.GetMulti(Visum.Net.Links, wtAttrID, True))
AssignedOD = VPH.GetMatrixRaw(Visum, matno).flatten() #Get the flattened seed matrix
WeightOD = VPH.GetMatrixRaw(Visum, wtOD).flatten() #Get the flattened weight matrix
SparseWeightOD = WeightOD.compress(AssignedOD > 0).copy()
Sparse_OD = AssignedOD.compress(AssignedOD > 0).copy() #Get only cells > 0 to reduce array size
nLinks = Visum.Net.Links.CountActive # Get the number of active links based on filter - will be extended to turns if turns are used
#OD_constraint_block = numpy.identity(len(Sparse_OD))
Ca = np.append(Ca, Sparse_OD) #Extend the count array to include delta with the existing OD matrix on the least squares formulation
#Wt = np.append(Wt, np.ones(len(Sparse_OD))) #Extend the weight array for weight matrix using default of 1.0, should be changed to
Wt = np.append(Wt, SparseWeightOD) #Extend the weight array for weight matrix using default of 1.0, weight matrix values should be varied for testing
FlowProp = readFlowMat(len(Sparse_OD), nLinks, flowmatfile)
FlowPropT = scipy.sparse.csc_matrix(FlowProp.T)
Visum.WriteToTrace(FlowPropT.shape)
Visum.WriteToTrace(FlowProp.shape)
NewODFlows = EstimateMatrix(FlowProp, FlowPropT, Sparse_OD, Ca, Wt, iter=25)
#Set back result to Visum...
AssignedOD[AssignedOD>0] = NewODFlows
nZones = Visum.Net.Zones.Count
VPH.SetMatrixRaw(Visum, matno, AssignedOD.reshape((nZones,nZones)))
Visum.WriteToTrace("results stored" , True)
del FlowProp
del FlowPropT
``` |
{
"source": "JoshChima/Ichimoku",
"score": 3
} |
#### File: JoshChima/Ichimoku/Methods.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
import plotly
#import plotly.plotly as py
import plotly.graph_objs as go
import os
from plotly.offline import init_notebook_mode, plot, iplot
import plotly
def MetaTraderDataConverter(file):
df=pd.read_csv(file, parse_dates=[['Date','Time']], sep='\t')
df['Date'] = df['Date_Time']
df.set_index(df.Date, drop=True, inplace=True)
df = df[['Open', 'High', 'Low','Close']]
return df
def Ichimoku(dataframe):
d = dataframe
nine_period_high = d['High'].rolling(window= 9).max()
nine_period_low = d['Low'].rolling(window= 9).min()
d['tenkan_sen'] = (nine_period_high + nine_period_low) /2
# Kijun-sen (Base Line): (26-period high + 26-period low)/2))
period26_high = d['High'].rolling(window=26).max()
period26_low = d['Low'].rolling(window=26).min()
d['kijun_sen'] = (period26_high + period26_low) / 2
# Senkou Span A (Leading Span A): (Conversion Line + Base Line)/2))
d['senkou_span_a'] = ((d['tenkan_sen'] + d['kijun_sen']) / 2).shift(26)
# Senkou Span B (Leading Span B): (52-period high + 52-period low)/2))
period52_high = d['High'].rolling(window=52).max()
period52_low = d['Low'].rolling(window=52).min()
d['senkou_span_b'] = ((period52_high + period52_low) / 2).shift(52)
# The most current closing price plotted 26 time periods behind (optional)
d['chikou_span'] = d['Close'].shift(-26)
d['pct_change'] = (d['Close']-d['Close'].shift(1))/d['Close'].shift(1)*100
return d
def Ichimoku_plot(d):
# Set colours for up and down candles
INCREASING_COLOR = 'green'
DECREASING_COLOR = 'red'
# create list to hold dictionary with data for our first series to plot
# (which is the candlestick element itself)
data1 = [ dict(
type = 'candlestick',
open = d.Open,
high = d.High,
low = d.Low,
close = d.Close,
x = d.index,
yaxis = 'y2',
name = 'F',
increasing = dict( line = dict( color = INCREASING_COLOR ) ),
decreasing = dict( line = dict( color = DECREASING_COLOR ) ),
) ]
# Create empty dictionary for later use to hold settings and layout options
layout=dict()
# create our main chart "Figure" object which consists of data to plot and layout settings
fig = dict( data=data1, layout=layout )
# Assign various seeting and choices - background colour, range selector etc
fig['layout']['plot_bgcolor'] = 'grey'
fig['layout']['xaxis'] = dict( rangeselector = dict( visible = True ) )
fig['layout']['yaxis'] = dict( domain = [0, 0.2], showticklabels = False )
fig['layout']['yaxis2'] = dict( domain = [0.2, 0.8] )
fig['layout']['legend'] = dict( orientation = 'h', y=0.9, x=0.3, yanchor='bottom' )
fig['layout']['margin'] = dict( t=40, b=40, r=40, l=40 )
# Populate the "rangeselector" object with necessary settings
rangeselector=dict(
visible = True,
x = 0, y = 0.9,
bgcolor = 'rgba(150, 200, 250, 0.4)',
font = dict( size = 13 ),
buttons=list([
dict(count=1,
label='reset',
step='all'),
dict(count=1,
label='1yr',
step='year',
stepmode='backward'),
dict(count=3,
label='3 mo',
step='month',
stepmode='backward'),
dict(count=1,
label='1 mo',
step='month',
stepmode='backward'),
dict(step='all')
]))
fig['layout']['xaxis']['rangeselector'] = rangeselector
# Append the Ichimoku elements to the plot
fig['data'].append( dict( x=d['tenkan_sen'].index, y=d['tenkan_sen'], type='scatter', mode='lines',
line = dict( width = 1 ),
marker = dict( color = '#e7e14f' ),
yaxis = 'y2', name='tenkan_sen' ) )
fig['data'].append( dict( x=d['kijun_sen'].index, y=d['kijun_sen'], type='scatter', mode='lines',
line = dict( width = 1 ),
marker = dict( color = '#20A4F3' ),
yaxis = 'y2', name='kijun_sen' ) )
fig['data'].append( dict( x=d['senkou_span_a'].index, y=d['senkou_span_a'], type='scatter', mode='lines',
line = dict( width = 1 ),
marker = dict( color = '#228B22' ),
yaxis = 'y2', name='senkou_span_a' ) )
fig['data'].append( dict( x=d['senkou_span_b'].index, y=d['senkou_span_b'], type='scatter', mode='lines',
line = dict( width = 1 ),fill='tonexty',
marker = dict( color = '#FF3342' ),
yaxis = 'y2', name='senkou_span_b' ) )
fig['data'].append( dict( x=d['chikou_span'].index, y=d['chikou_span'], type='scatter', mode='lines',
line = dict( width = 1 ),
marker = dict( color = '#D105F5' ),
yaxis = 'y2', name='chikou_span' ) )
# Set colour list for candlesticks
colors = []
for i in range(len(d.Close)):
if i != 0:
if d.Close[i] > d.Close[i-1]:
colors.append(INCREASING_COLOR)
else:
colors.append(DECREASING_COLOR)
else:
colors.append(DECREASING_COLOR)
if not os.path.exists("images"):
os.mkdir("images")
dload = os.path.expanduser('./images')
html_file = 'candlestick-ichimoku.html'
fname = 'candlestick-ichimoku'
iplot(fig, filename="candlestick-ichimoku")
return d
#fig.show(renderer="png")
def u_d(num):
if num < 0:
return -1
else:
return 1
def NeoCandle(OHLC):
df = OHLC
df['Change'] = df['Close']-df['Close'].shift(1)
df['U_D'] = df['Change'].apply(u_d)
df['DHL'] = (df['High'] - df['Low'])
df['DOC'] = abs(df['Open'] - df['Close']) / (df['High'] - df['Low']) #percentage of space taken in DHL by DOC
df_OC = df[['Open','Close']]
# df_OC['Mid'] = df_OC.median(axis=1)
df_OC['Max'] = df_OC.max(axis=1)
# df['PODD'] = df_OC['Mid']
df['PODD'] = ((df_OC['Max'] - (abs(df['Open'] - df['Close'])/2)) - df['Low']) / (df['High'] - df['Low'])
#for i in range(df.shape[0]):
# df['PODD'].iloc[i] = (df_OC['Mid'].iloc[i] / df_OC['Max'].iloc[i] - 0.999) * 1000
return df
def PODD(OPEN, HIGH, LOW, CLOSE):
Max = max([OPEN, CLOSE])
DOChalf = abs(OPEN - CLOSE) / 2
DHLmini = HIGH - LOW
print((Max - DOChalf - LOW) / DHLmini)
#PODD(102.748, 102.839, 102.688, 102.791)
```
#### File: JoshChima/Ichimoku/script1.py
```python
import pandas as pd
from pandas_datareader import data, wb
import matplotlib as mpl
from mpl_finance import candlestick_ohlc
import matplotlib.dates as dates, time
import datetime
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import quandl
quandl.ApiConfig.api_key = "<KEY>"
d=pd.read_csv("USD_JPY_2017_to_2018.csv")
#reverse dataframe
d = d.iloc[::-1]
#calculate close price
d["Close"] = d.shift(-1)["Open"]
#set date as index
d.set_index(d.Date, drop=True, inplace=True)
#print(d.head())
#print(d.shape)
#print(d.iloc[:5])
def df_slices(dataframe):
sliced_frames = []
for shift in range(60,dataframe.shape[0]):
df_slice = dataframe.iloc[:shift]
sliced_frames.append(df_slice)
return sliced_frames
def Ichimoku(dataframe):
d = dataframe
nine_period_high = d['High'].rolling(window= 9).max()
nine_period_low = d['Low'].rolling(window= 9).min()
d['tenkan_sen'] = (nine_period_high + nine_period_low) /2
# Kijun-sen (Base Line): (26-period high + 26-period low)/2))
period26_high = d['High'].rolling(window=26).max()
period26_low = d['Low'].rolling(window=26).min()
d['kijun_sen'] = (period26_high + period26_low) / 2
# Senkou Span A (Leading Span A): (Conversion Line + Base Line)/2))
d['senkou_span_a'] = ((d['tenkan_sen'] + d['kijun_sen']) / 2).shift(26)
# Senkou Span B (Leading Span B): (52-period high + 52-period low)/2))
period52_high = d['High'].rolling(window=52).max()
period52_low = d['Low'].rolling(window=52).min()
d['senkou_span_b'] = ((period52_high + period52_low) / 2).shift(52)
# The most current closing price plotted 26 time periods behind (optional)
d['chikou_span'] = d['Close'].shift(-26)
return d
sf = df_slices(d)
#print(Ichimoku(sf[300]))
print(Ichimoku(d))
#for s in range(len(sf)):
# print("#%s"%(s))
# print("Start: ",sf[s].iloc[0])
# print("End: ",sf[s].iloc[sf[s].__len__()-1])
#print("###########")
``` |
{
"source": "JoshChima/ScratchDQN",
"score": 2
} |
#### File: JoshChima/ScratchDQN/agent.py
```python
import gym
import numpy as np
import pandas as pd
import wandb
import time
import cv2
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
from dataclasses import dataclass
from typing import Any
from collections import deque
from models import Model, ConvModel
import wandb
import argh
import sys
def img_display(img):
cv2.imshow("Image", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
class FrameStackingAndResizingEnv:
def __init__(self, env, w, h, num_stack=4):
self.env = env
self.n = num_stack
self.w = w
self.h = h
self.buffer = np.zeros((num_stack, h, w), 'uint8')
self.frame = None
def _preprocess_frame(self, fram):
image = cv2.resize(fram, (self.w, self.h))
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
return image
def step(self, action):
im, reward, done, info = self.env.step(action)
self.frame = im.copy()
im = self._preprocess_frame(im)
# 0,1,2 -> 1,2,3
self.buffer[1:self.n, :, :] = self.buffer[0:self.n-1, :, :]
self.buffer[0, :, :] = im
return self.buffer.copy(), reward, done, info
def render(self, mode):
if mode == 'rgb_array':
return self.frame
return super(FrameStackingAndResizingEnv, self).render(mode)
@property
def observation_space(self):
return np.zeros((self.n, self.h, self.w))
@property
def action_space(self):
return self.env.action_space
def reset(self):
im = self.env.reset()
self.frame = im.copy()
im = self._preprocess_frame(im)
self.buffer = np.stack([im]*self.n, 0)
return self.buffer.copy()
# def render(self, mode):
# self.env.render(mode)
@dataclass
class SARS:
state: Any
action: int
reward: float
done: bool
next_state: Any
# class DQNAgent:
# def __init__(self, model):
# self.model = model
# def get_actions(self, observations):
# # obs shape is (N, 4)
# # N is batch size
# q_vals = self.model(observations)
# # q_vals shape (N, 2)
# # .max(-1) the last axis
# return q_vals.max(-1)[1]
class ReplayBuffer():
def __init__(self, buffer_size=100000):
self.buffer_size = buffer_size
# could possibly improve by making it a dque or a database
# self.buffer = deque(maxlen=buffer_size)
# even better...
self.buffer = [None]*buffer_size
self.idx = 0
def insert(self, sars):
# self.buffer.append(sars)
# self.buffer = self.buffer[-self.buffer_size:]
self.buffer[self.idx % self.buffer_size] = sars
self.idx += 1
def sample(self, num_samples):
# assert num_samples <= len(self.buffer)
assert num_samples <= min(self.idx, self.buffer_size)
if self.idx < self.buffer_size:
return random.sample(self.buffer[:self.idx], num_samples)
return random.sample(self.buffer, num_samples)
def update_tgt_model(m, tgt):
tgt.load_state_dict(m.state_dict())
def train_step(model, state_transitions, tgt, num_actions, gamma, device):
cur_states = torch.stack(([torch.Tensor(s.state)
for s in state_transitions])).to(device)
rewards = torch.stack(([torch.Tensor([s.reward])
for s in state_transitions])).to(device)
mask = torch.stack(([torch.Tensor([0]) if s.done else torch.Tensor(
[1]) for s in state_transitions])).to(device)
next_states = torch.stack(
([torch.Tensor(s.next_state) for s in state_transitions])).to(device)
actions = [s.action for s in state_transitions]
with torch.no_grad():
qvals_next = tgt(next_states).max(-1)[0] # (N, num_actions)
model.opt.zero_grad()
qvals = model(cur_states) # (N, num_actions)
one_hot_actions = F.one_hot(
torch.LongTensor(actions), num_actions).to(device)
## MSE Loss ##
# loss = (((rewards + mask[:, 0] * (qvals_next*gamma) -
# torch.sum(qvals*one_hot_actions, -1))**2)).mean().to(device)
## Huber Loss ##
loss_fn = nn.SmoothL1Loss()
loss = loss_fn(torch.sum(qvals*one_hot_actions, -1), rewards.squeeze() + mask[:, 0] * (qvals_next*gamma))
loss.backward()
model.opt.step()
return loss
def run_test_episode(model, env, device, max_steps=1000):
frames = []
obs = env.reset()
frames.append(env.frame)
idx = 0
done = False
reward = 0
while not done and idx < max_steps:
#TODO make it do non conv actions too
action = model(torch.Tensor(obs).unsqueeze(0).to(device)).max(-1)[-1].item()
obs, r, done, _ = env.step(action)
reward += r
frames.append(env.frame)
return reward, np.stack(frames, 0)
hyperparameter_defaults = dict(
run_name=str(random.getrandbits(128)),
env_name='CartPole-v0',
max_reward=500,
max_steps=5_000_000,
memory_size=100_000,
min_rb_size=10000,
sample_size=2500,
env_steps_before_train=100,
tgt_model_update=5000,
reward_scaler=100.0,
eps_min=0.01,
eps_decay=0.999999,
gamma=0.99,
learning_rate=0.0001
)
### Used to solve Cartpole ###
def dqnmain(project_name, do_boltzman_exploration=False, test=False, chkpt=None, hypeparams=hyperparameter_defaults, steps=1000, device='cuda'):
image_arr = []
if (not test):
wdbrun = wandb.init( project=project_name, config=hypeparams, name=hypeparams['run_name'], reinit=True, monitor_gym=False)
# run.save("*.pth")
config = wdbrun.config
max_reward = config.max_reward
max_steps = config.max_steps
memory_size = config.memory_size
min_rb_size = config.min_rb_size
sample_size = config.sample_size
env_steps_before_train = config.env_steps_before_train
tgt_model_update = config.tgt_model_update
reward_scaler = config.reward_scaler
eps_min = config.eps_min
eps_decay = config.eps_decay
gamma = config.gamma
learning_rate = config.learning_rate
else:
max_reward = hypeparams['max_reward']
max_steps = steps
memory_size = hypeparams['memory_size']
min_rb_size = hypeparams['min_rb_size']
sample_size = hypeparams['sample_size']
env_steps_before_train = hypeparams['env_steps_before_train']
tgt_model_update = hypeparams['tgt_model_update']
reward_scaler = hypeparams['reward_scaler']
eps_min = hypeparams['eps_min']
eps_decay = hypeparams['eps_decay']
gamma = hypeparams['gamma']
learning_rate = hypeparams['learning_rate']
env = gym.make(hypeparams['env_name'])
if hypeparams['env_name'] == 'Breakout-v0':
#TODO
env = FrameStackingAndResizingEnv(env, 84, 84, 4) # change stack size here
env._max_episode_steps = 4000
test_env = gym.make(hypeparams['env_name'])
if hypeparams['env_name'] == 'Breakout-v0':
#TODO
test_env = FrameStackingAndResizingEnv(test_env, 84, 84, 4) # change stack size here
test_env._max_episode_steps = 4000
last_observation = env.reset()
if hypeparams['env_name'] == 'Breakout-v0':
m = ConvModel(env.observation_space.shape,
env.action_space.n, learning_rate).to(device)
else:
m = Model(env.observation_space.shape,
env.action_space.n, learning_rate).to(device)
if chkpt is not None:
m.load_state_dict(torch.load(chkpt))
if hypeparams['env_name'] == 'Breakout-v0':
tgt = ConvModel(env.observation_space.shape, env.action_space.n).to(
device)
else:
tgt = Model(env.observation_space.shape, env.action_space.n).to(
device) # target model, gets update fewer times
update_tgt_model(m, tgt)
rb = ReplayBuffer(memory_size)
steps_since_train = 0
epochs_since_tgt = 0
step_num = -1 * min_rb_size
i = 0
episode_rewards = []
rolling_reward = 0
solved = False
try:
while (not solved) and step_num < max_steps:
if test:
screen = env.render('rgb_array')
image_arr.append(screen)
eps = 0
else:
eps = eps_decay**(step_num)
if do_boltzman_exploration:
if hypeparams['env_name'] == 'Breakout-v0':
logits = m(torch.Tensor(last_observation).unsqueeze(0).to(device))[0]
action = torch.distributions.Categorical(logits=logits).sample().item()
else:
logits = m(torch.Tensor(last_observation).to(device))[0]
action = torch.distributions.Categorical(logits=logits).sample().item()
else:
if random.random() < eps:
action = env.action_space.sample()
else:
if hypeparams['env_name'] == 'Breakout-v0':
action = m(torch.Tensor(last_observation).unsqueeze(0).to(device)).max(-1)[-1].item()
else:
action = m(torch.Tensor(last_observation).to(device)).max(-1)[-1].item()
observation, reward, done, info = env.step(action)
rolling_reward += reward
reward = reward/reward_scaler
rb.insert(SARS(last_observation, action,
reward, done, observation))
last_observation = observation
if done:
episode_rewards.append(rolling_reward)
if test:
print(rolling_reward)
rolling_reward = 0
observation = env.reset()
steps_since_train += 1
i += 1
step_num += 1
if (not test) and rb.idx > min_rb_size and steps_since_train > env_steps_before_train:
loss = train_step(m, rb.sample(sample_size),
tgt, env.action_space.n, gamma, device)
ave_reward = np.mean(episode_rewards)
wdbrun.log({'loss': loss.detach().cpu().item(
), 'epsilon': eps, 'avg_reward': ave_reward}, step=step_num)
if ave_reward >= max_reward:
solved = True
episode_rewards = []
epochs_since_tgt += 1
# print(step_num, loss.detach().item())
if epochs_since_tgt > tgt_model_update:
# print('updating target model')
update_tgt_model(m, tgt)
rew, frames = run_test_episode(m, test_env, device)
# frames.shape == (T, H, W, C)
# wandb.log({'test_reward': rew, 'test_video': wandb.Video(frames.transpose(0, 3, 1, 2), str(rew), fps=25, format='mp4')})
wandb.log({'test_reward': rew})
epochs_since_tgt = 0
torch.save(
tgt.state_dict(), f"{wandb.run.dir}/{hypeparams['run_name']}_{step_num}.pth")
steps_since_train = 0
if ave_reward >= max_reward:
solved = True
wandb.join()
env.close()
except KeyboardInterrupt:
sys.exit()
if __name__ == "__main__":
hyperset = dict(
run_name=str(random.getrandbits(128)),
env_name='Breakout-v0',
max_reward=500,
max_steps=2_000_000,
memory_size=50_000,
min_rb_size=20000,
sample_size=128,
env_steps_before_train=16,
tgt_model_update=500,
reward_scaler=1,
eps_min=0.05,
eps_decay=0.999999,
gamma=0.99,
learning_rate=0.0001
)
# # argh.dispatch_command(dqnmain)
# # # dqnmain()
# env = gym.make("Breakout-v0")
# env = FrameStackingAndResizingEnv(env, 480, 640)
# # # print(env.observation_space.shape)
# # # print(env.action_space)
# im = env.reset()
# idx = 0
# ims = []
# print(im.shape)
# # for i in range(im.shape[-1]):
# # ims.append(im[:,:,i])
# # # cv2.imwrite(f"/tmp/{i}.jpg", im[:,:,i])
# # img_display(np.hstack(ims))
# env.step(1)
# for _ in range(10):
# idx += 1
# im, _, _, _ = env.step(random.randint(0, 3))
# for i in range(im.shape[-1]):
# ims.append(im[:, :, i])
# img_display(np.hstack(ims))
# ims = []
if __name__ == '__main__':
dqnmain('Breakout-Tutorial', do_boltzman_exploration=False, hypeparams=hyperset)
``` |
{
"source": "joshclimacell/rules_python",
"score": 2
} |
#### File: rules_python/rules_python/whl.py
```python
import argparse
import json
import os
import re
import shutil
import textwrap
import zipfile
import pkg_resources
# pylint: disable=R0914
def main():
args = _parse_args()
dependency_list = []
whl_dependency_list = []
extra_list = []
whl_extra_list = []
whl_paths = args.whl_paths
if args.whl is not None:
whl_paths = whl_paths + [args.whl]
# Extract the files into the current directory.
for wheel_path in args.whl_paths:
wheel = Wheel(wheel_path)
wheel.expand(args.directory)
copied_whl_path = os.path.join(args.directory,
os.path.basename(wheel_path))
shutil.copy(wheel_path, copied_whl_path)
if args.track_deps:
for dependency in wheel.dependencies():
dependency_list.append('requirement("{}")'.format(dependency))
whl_dependency_list.append(
'pypi_whl_requirement("{}")'.format(dependency))
for extra in args.extras:
extra_list.append(_make_extra(extra, wheel))
whl_extra_list.append(_make_whl_extra(extra, wheel))
# Generate BUILD file.
dependency_join_str = ',\n '
extras_join_str = '\n\n'
dependencies = dependency_join_str.join(dependency_list)
whl_dependencies = dependency_join_str.join(whl_dependency_list)
extras = extras_join_str.join(extra_list)
whl_extras = extras_join_str.join(whl_extra_list)
build_file_content = _make_build_file_content(
requirements_bzl=args.requirements,
dependencies=dependencies,
whl_dependencies=whl_dependencies,
extras=extras,
whl_extras=whl_extras)
with open(os.path.join(args.directory, 'BUILD'), 'w') as file_obj:
file_obj.write(build_file_content)
class Wheel(object):
def __init__(self, path):
self._path = path
def path(self):
return self._path
def basename(self):
return os.path.basename(self.path())
def distribution(self):
# See https://www.python.org/dev/peps/pep-0427/#file-name-convention
parts = self.basename().split('-')
return parts[0]
def version(self):
# See https://www.python.org/dev/peps/pep-0427/#file-name-convention
parts = self.basename().split('-')
return parts[1]
def repository_name(self):
# Returns the canonical name of the Bazel repository for this package.
canonical = 'pypi__{}_{}'.format(self.distribution(), self.version())
# Escape any illegal characters with underscore.
return re.sub('[-.]', '_', canonical)
def _dist_info(self):
# Return the name of the dist-info directory within the .whl file.
# e.g. google_cloud-0.27.0-py2.py3-none-any.whl ->
# google_cloud-0.27.0.dist-info
return '{}-{}.dist-info'.format(self.distribution(), self.version())
def metadata(self):
# Extract the structured data from metadata.json in the WHL's dist-info
# directory.
with zipfile.ZipFile(self.path(), 'r') as whl:
# first check for metadata.json
try:
with whl.open(
self._dist_info() + '/metadata.json') as file_obj:
return json.loads(file_obj.read().decode("utf-8"))
except KeyError:
pass
# fall back to METADATA file (https://www.python.org/dev/peps/pep-0427/)
with whl.open(self._dist_info() + '/METADATA') as file_obj:
return self._parse_metadata(file_obj.read().decode("utf-8"))
def name(self):
return self.metadata().get('name')
def dependencies(self, extra=None):
"""Access the dependencies of this Wheel.
Args:
extra: if specified, include the additional dependencies of the named
"extra".
Yields:
the names of requirements from the metadata.json
"""
# TODO(mattmoor): Is there a schema to follow for this?
run_requires = self.metadata().get('run_requires', [])
for requirement in run_requires:
if requirement.get('extra') != extra:
# Match the requirements for the extra we're looking for.
continue
marker = requirement.get('environment')
if marker and not pkg_resources.evaluate_marker(marker):
# The current environment does not match the provided PEP 508 marker,
# so ignore this requirement.
continue
requires = requirement.get('requires', [])
for entry in requires:
# Strip off any trailing versioning data.
parts = re.split('[ ><=()]', entry)
yield parts[0]
def extras(self):
return self.metadata().get('extras', [])
def expand(self, directory):
with zipfile.ZipFile(self.path(), 'r') as whl:
whl.extractall(directory)
# _parse_metadata parses METADATA files according to https://www.python.org/dev/peps/pep-0314/
def _parse_metadata(self, content):
# TODO: handle fields other than just name
name_pattern = re.compile('Name: (.*)')
return {'name': name_pattern.search(content).group(1)}
def _parse_args():
parser = argparse.ArgumentParser(
description='Unpack a .whl file as a py_library.')
parser.add_argument(
'--whl_paths',
action='append',
default=[],
help=('The .whl files we are expanding.'))
parser.add_argument(
'--whl',
action='store',
default=None,
help='Deprecated; use --whl_paths')
parser.add_argument('--track_deps', action='store', type=bool)
parser.add_argument(
'--requirements',
action='store',
default=None,
help='The pip_import from which to draw dependencies.')
parser.add_argument(
'--directory',
action='store',
default='.',
help='The directory into which to expand things.')
parser.add_argument(
'--extras',
action='append',
help='The set of extras for which to generate library targets.')
return parser.parse_args()
_EXTRA_TEMPLATE = textwrap.dedent("""\
py_library(
name = "{extra}",
deps = [
":pkg",{deps}
],
)
""")
_WHL_EXTRA_TEMPLATE = textwrap.dedent("""\
filegroup(
name = "{extra}_whl",
srcs = [
":whl",{deps}
],
)
""")
def _make_extra(extra, wheel):
return _EXTRA_TEMPLATE.format(
extra=extra,
deps=','.join(
['requirement("%s")' % dep for dep in wheel.dependencies(extra)]),
)
def _make_whl_extra(extra, wheel):
_WHL_EXTRA_TEMPLATE.format(
extra=extra,
deps=','.join([
'pypi_whl_requirement("%s")' % dep
for dep in wheel.dependencies(extra)
]),
)
def _make_build_file_content(requirements_bzl, dependencies, whl_dependencies,
extras, whl_extras):
if requirements_bzl:
template = (
'load("{requirements_bzl}", "requirement", "pypi_whl_requirement")'
)
load_requirements_statement = template.format(
requirements_bzl=requirements_bzl)
else:
load_requirements_statement = ''
return textwrap.dedent("""\
package(default_visibility = ["//visibility:public"])
{load_requirements_statement}
py_library(
name = "pkg",
srcs = glob(["**/*.py"]),
data = glob(["**/*"], exclude=["**/*.py", "**/* *", "BUILD", "WORKSPACE", "**/*.whl"]),
# This makes this directory a top-level in the python import
# search path for anything that depends on this.
imports = ["."],
deps = [{dependencies}],
)
filegroup(
name = "whl",
srcs = glob(["**/*.whl"]) + [{whl_dependencies}],
)
{extras}
{whl_extras}
""").format(
requirements_bzl=requirements_bzl,
dependencies=dependencies,
whl_dependencies=whl_dependencies,
extras=extras,
whl_extras=whl_extras,
load_requirements_statement=load_requirements_statement)
if __name__ == '__main__':
main()
``` |
{
"source": "joshcoales/Dailys-API",
"score": 2
} |
#### File: blueprints/views/chores_board.py
```python
import math
from datetime import date
import flask
import isodate
from blueprints.views.base_view import View
from colour_scale import ColourScale
from models.chores import Chore
class ChoresBoardJsonView(View):
def get_path(self):
return "/chores_board.json"
def call(self, **kwargs):
today = date.today()
chores_static = self.data_source.get_entries_for_stat_on_date("chores", "static")[0]
chores_data = self.data_source.get_entries_for_stat_over_range("chores", "earliest", "latest")
chores = [Chore(x) for x in chores_static['data']['chores']]
for chore_date in chores_data:
for chore in chores:
chore.parse_date_entry(chore_date)
# Sort chores into categories
categorised_chores = dict()
for chore in chores:
if chore.category not in categorised_chores:
categorised_chores[chore.category] = []
categorised_chores[chore.category].append(chore)
# Get layout info
layout = chores_static['data']['layout']
# Return json
return flask.jsonify({
"today": isodate.date_isoformat(today),
"chores": {k: [x.to_json() for x in v] for k, v in categorised_chores.items()},
"layout": layout
})
class ChoresBoardView(ChoresBoardJsonView):
def get_path(self):
return "/chores_board/"
def call(self, **kwargs):
board_name = kwargs.get("board_name")
chores_board = super().call(**kwargs).get_json()
today = isodate.parse_date(chores_board['today'])
categorised_chores = {k: [Chore.from_complete_json(x) for x in v] for k, v in chores_board['chores'].items()}
layout = chores_board['layout']
# Filter out unselected boards
for column in layout["columns"]:
if column.get("board_name") != board_name:
for category in column["categories"]:
del categorised_chores[category]
# Calculate overdue and neglected chores
overdue_chores = []
neglected_chores = []
for chore_list in categorised_chores.values():
for chore in chore_list:
if chore.recommended_period is not None:
if chore.is_overdue():
overdue_chores.append(chore)
else:
neglected_chores.append(chore)
# Sort overdue and neglected chores lists
overdue_chores.sort(key=lambda x: x.days_overdue(), reverse=True)
neglected_chores.sort(key=lambda x: x.days_since_done() or math.inf, reverse=True)
# Colour scales for non-recommended-period chores
start_colouring = today - isodate.parse_duration("P2M")
end_colouring = today - isodate.parse_duration("P1W")
colour_scale = ColourScale(
start_colouring, end_colouring,
ColourScale.RED, ColourScale.WHITE
)
# Render
return flask.render_template(
"chores_board.html",
today=today,
board_name=board_name,
categorised_chores=categorised_chores,
layout=layout,
overdue_chores=overdue_chores,
neglected_chores=neglected_chores,
colour_scale=colour_scale,
)
class ChoresBoardSpecificView(ChoresBoardView):
def get_path(self):
return "/chores_board/<board_name>/"
def call(self, **kwargs):
return super().call(**kwargs)
```
#### File: Dailys-API/models/dream_night.py
```python
from collections import defaultdict
from typing import Dict, List
import flask
from data_source import DailysData
from models.models import Data
class Dream:
def __init__(self, data):
self.data = data
self.text = data["text"]
self.disorientation = data.get("disorientation")
self.lewdness = data.get("lewdness")
self.false_facts = data.get("false_facts")
self.famous_people = data.get("famous_people")
self.known_people = data.get("known_people")
self.tags = data.get("tags")
class DreamNight(Data):
def __init__(self, json_data):
super().__init__(json_data)
self.dreams = [Dream(x) for x in json_data["data"]["dreams"]]
def dream_preview(self, length=50):
if len(self.dreams) == 0:
return ""
first_dream = self.dreams[0]
if len(first_dream.text) < 50:
return first_dream.text
return first_dream.text[:length] + "..."
@property
def dream_count(self):
return len(self.dreams)
@property
def total_dreams_length(self):
return sum(len(dream.text) for dream in self.dreams)
@property
def max_disorientation(self):
dream_values = [dream.disorientation for dream in self.dreams]
if list(filter(None, dream_values)):
return max(filter(None, dream_values))
return "-"
@property
def max_lewdness(self):
dream_values = [dream.lewdness for dream in self.dreams]
if list(filter(None, dream_values)):
return max(filter(None, dream_values))
return "-"
@property
def all_false_facts(self):
false_fact_dreams = [dream for dream in self.dreams if dream.false_facts is not None]
if len(false_fact_dreams) == 0:
return None
return [fact for dream in false_fact_dreams for fact in dream.false_facts]
@property
def all_famous_people(self):
famous_people_dreams = [dream for dream in self.dreams if dream.famous_people is not None]
if len(famous_people_dreams) == 0:
return None
return [person for dream in famous_people_dreams for person in dream.famous_people]
@property
def all_known_people(self):
known_people_dreams = [dream for dream in self.dreams if dream.known_people is not None]
if len(known_people_dreams) == 0:
return None
return [person for dream in known_people_dreams for person in dream.known_people]
@property
def all_tags(self):
tag_dreams = [dream for dream in self.dreams if dream.tags is not None]
if len(tag_dreams) == 0:
return None
return [tag for dream in tag_dreams for tag in dream.tags]
def suggest_enrichments(self) -> Dict[str, List[str]]:
suggestions = defaultdict(lambda: [])
for dream_idx in range(len(self.dreams)):
dream_data = self.dreams[dream_idx].data
sub_target = f"$.data.dreams[{dream_idx}]"
if "disorientation" not in dream_data:
suggestions["could add disorientation rating"].append(sub_target)
if "lewdness" not in dream_data:
suggestions["could add lewdness rating"].append(sub_target)
if "false_facts" not in dream_data:
suggestions["could list false facts"].append(sub_target)
if "famous_people" not in dream_data:
suggestions["could tag famous people."].append(sub_target)
if "known_people" not in dream_data:
suggestions["could tag known people."].append(sub_target)
if "tags" not in dream_data:
suggestions["could add tags."].append(sub_target)
if not suggestions:
return {}
return suggestions
def enriched_data(self, form_data) -> DailysData:
raw_data = self.raw_data["data"]
for dream_idx in range(len(self.dreams)):
if f"disorientation-{dream_idx}" in form_data:
disorientation = int(form_data[f"disorientation-{dream_idx}"])
raw_data["dreams"][dream_idx]["disorientation"] = disorientation
if f"lewdness-{dream_idx}" in form_data:
lewdness = int(form_data[f"lewdness-{dream_idx}"])
raw_data["dreams"][dream_idx]["lewdness"] = lewdness
if f"false_facts-{dream_idx}" in form_data:
false_facts = [fact.strip() for fact in form_data[f"false_facts-{dream_idx}"].split("|") if fact != ""]
raw_data["dreams"][dream_idx]["false_facts"] = false_facts
if f"famous_people-{dream_idx}" in form_data:
famous_people = [person.strip() for person in form_data[f"famous_people-{dream_idx}"].split("|") if person != ""]
raw_data["dreams"][dream_idx]["famous_people"] = famous_people
if f"known_people-{dream_idx}" in form_data:
known_people = [person.strip() for person in form_data[f"known_people-{dream_idx}"].split("|") if person != ""]
raw_data["dreams"][dream_idx]["known_people"] = known_people
if f"tags-{dream_idx}" in form_data:
tags = [tag.strip() for tag in form_data[f"tags-{dream_idx}"].split("|") if tag != ""]
raw_data["dreams"][dream_idx]["tags"] = tags
return raw_data
def enrichment_form(self, data_source):
# Get lists of tags and stuff
all_entries = data_source.get_entries_for_stat_over_range("dreams", "earliest", "latest")
tags = set()
known_people = set()
famous_people = set()
for entry in all_entries:
for dream in entry["data"]["dreams"]:
tags.update(dream.get("tags", []))
known_people.update(dream.get("known_people", []))
famous_people.update(dream.get("famous_people", []))
return flask.render_template(
"enrichment_forms/dreams.html",
dream_night=self,
entry=self.raw_data,
tags=sorted(tags),
known_people=sorted(known_people),
famous_people=sorted(famous_people)
)
``` |
{
"source": "joshcoales/Hallo",
"score": 2
} |
#### File: Hallo/hallo/events.py
```python
import enum
import logging
from abc import ABCMeta
from datetime import datetime
from typing import List, Dict, Any, Union, Optional, TYPE_CHECKING, Type, Tuple
if TYPE_CHECKING:
from hallo.hallo import Hallo
from telegram import Update, Message
from hallo.destination import Destination, User, Channel
from hallo.server import Server
KEY_SERVER_NAME = "server_name"
KEY_CHANNEL_ADDR = "channel_addr"
KEY_USER_ADDR = "user_addr"
KEY_MENU_BUTTONS = "menu_buttons"
KEY_FORMATTING = "formatting"
KEY_PHOTO_ID = "photo_id"
FLAG_MENU_UNCHANGED = object()
def server_from_json(hallo_obj: 'Hallo', data: Dict) -> 'Server':
return hallo_obj.get_server_by_name(data[KEY_SERVER_NAME])
def channel_from_json(server: 'Server', data: Dict) -> Optional['Channel']:
if data[KEY_CHANNEL_ADDR]:
return server.get_channel_by_address(data[KEY_CHANNEL_ADDR])
return None
def user_from_json(server: 'Server', data: Dict) -> Optional['User']:
if data[KEY_USER_ADDR]:
return server.get_user_by_address(data[KEY_USER_ADDR])
return None
def menu_buttons_from_json(data: Dict) -> Optional[List[List['MenuButton']]]:
if KEY_MENU_BUTTONS not in data:
return None
return [
[
MenuButton.from_json(button) for button in row
] for row in data[KEY_MENU_BUTTONS]
]
def event_from_json(hallo_obj: 'Hallo', data: Dict) -> 'ChannelUserTextEvent':
server = server_from_json(hallo_obj, data)
channel = channel_from_json(server, data)
user = user_from_json(server, data)
if KEY_FORMATTING not in data:
return ChannelUserTextEvent(
server,
channel,
user,
data["text"],
data["inbound"]
)
return message_from_json(hallo_obj, data)
def message_from_json(hallo_obj: 'Hallo', data: Dict) -> 'EventMessage':
server = server_from_json(hallo_obj, data)
channel = channel_from_json(server, data)
user = user_from_json(server, data)
text = data["text"]
inbound = data["inbound"]
menu_buttons = menu_buttons_from_json(data)
formatting = EventMessage.Formatting[data[KEY_FORMATTING]]
if KEY_PHOTO_ID in data:
msg = EventMessageWithPhoto(
server,
channel,
user,
text,
data[KEY_PHOTO_ID],
inbound,
menu_buttons=menu_buttons
)
else:
msg = EventMessage(
server,
channel,
user,
text,
inbound,
menu_buttons=menu_buttons
)
msg.formatting = formatting
msg._message_id = data.get("message_id")
return msg
class RawData(metaclass=ABCMeta):
pass
class RawDataIRC(RawData):
def __init__(self, line: str) -> None:
"""
:param line: Line of text direct from the IRC server
"""
self.line = line
class RawDataTelegram(RawData):
def __init__(self, update_obj: 'Update') -> None:
"""
:param update_obj: Update object from telegram server
"""
self.update_obj = update_obj
class RawDataTelegramOutbound(RawData):
def __init__(self, sent_msg_object: 'Message') -> None:
"""
:param sent_msg_object: Sent message object returned when sending message on telegram
:type sent_msg_object: ??
"""
self.sent_msg_object = sent_msg_object
class Event(metaclass=ABCMeta):
def __init__(self, inbound: bool = True) -> None:
self.is_inbound = inbound
self.send_time = datetime.now()
def get_send_time(self) -> datetime:
return self.send_time
def get_log_line(self) -> Optional[str]:
"""
:rtype: Optional[str]
"""
return None
def _get_log_extras(self) -> List[Dict[str, Any]]:
return []
def log(self) -> None:
if self.get_log_line() is None:
return
chat_logger = logging.getLogger("chat")
for extra in self._get_log_extras():
chat_logger.info(self.get_log_line(), extra=extra)
def get_print_line(self) -> Optional[str]:
return None
class EventSecond(Event):
pass
class EventMinute(Event):
pass
class EventHour(Event):
pass
class EventDay(Event):
def get_print_line(self) -> str:
return "Day changed: {}".format(self.send_time.strftime("%Y-%m-%d"))
class ServerEvent(Event, metaclass=ABCMeta):
def __init__(self, server: 'Server', inbound: bool = True):
Event.__init__(self, inbound=inbound)
self.server = server
self.raw_data = None
@property
def server_name(self) -> str:
return self.server.name
def with_raw_data(self, raw_data: RawData):
self.raw_data = raw_data
return self
def get_send_time(self) -> datetime:
if isinstance(self.raw_data, RawDataTelegram):
return self.raw_data.update_obj.message.date
return super().get_send_time()
def _get_log_extras(self) -> List[Dict[str, Any]]:
return [
{
"server": self.server
}
]
def get_print_line(self) -> str:
return "[{}] {}".format(self.server.name, self.get_log_line())
class EventPing(ServerEvent):
def __init__(self, server: 'Server', ping_number: str, inbound: bool = True):
ServerEvent.__init__(self, server, inbound=inbound)
self.ping_number = ping_number
def get_pong(self) -> 'EventPing':
return EventPing(self.server, self.ping_number, inbound=False)
def get_print_line(self) -> str:
return "[{}] {}".format(self.server.name, "PING" if self.is_inbound else "PONG")
class UserEvent(ServerEvent, metaclass=ABCMeta):
def __init__(self, server: 'Server', user: 'User', inbound: bool = True):
ServerEvent.__init__(self, server, inbound=inbound)
self.user = user
@property
def user_addr(self) -> Optional[str]:
return self.user.address if self.user else None
def _get_log_extras(self) -> List[Dict[str, Any]]:
channel_list = (
self.user.get_channel_list()
if self.is_inbound
else self.server.channel_list
)
return [
{
"server": self.server,
"destination": channel
}
for channel in channel_list
]
class EventQuit(UserEvent):
def __init__(self, server: 'Server', user: 'User', message: str, inbound: bool = True):
"""
:param user: User who quit the server, or none if outbound
"""
UserEvent.__init__(self, server, user, inbound=inbound)
self.quit_message = message
def get_log_line(self) -> str:
output = "{} has quit.".format(
self.user.name if self.is_inbound else self.server.get_nick()
)
if self.quit_message is not None and self.quit_message.strip() != "":
output += " ({})".format(self.quit_message)
return output
class EventNameChange(UserEvent):
def __init__(self, server: 'Server', user: 'User', old_name: str, new_name: str, inbound: bool = True) -> None:
"""
:param user: User object who has changed their name, or None if outbound
"""
UserEvent.__init__(self, server, user, inbound=inbound)
self.old_name = old_name
self.new_name = new_name
def get_log_line(self) -> str:
output = "Nick change: {} -> {}".format(self.old_name, self.new_name)
return output
class ChannelEvent(ServerEvent, metaclass=ABCMeta):
def __init__(self, server: 'Server', channel: 'Channel', inbound: bool = True) -> None:
ServerEvent.__init__(self, server, inbound=inbound)
self.channel = channel
@property
def channel_addr(self):
return self.channel.address if self.channel else None
def _get_log_extras(self) -> List[Dict[str, Any]]:
return [
{
"server": self.server,
"destination": self.channel
}
]
class ChannelUserEvent(ChannelEvent, UserEvent, metaclass=ABCMeta):
def __init__(self, server: 'Server', channel: 'Channel', user: 'User', inbound: bool = True) -> None:
ChannelEvent.__init__(self, server, channel, inbound=inbound)
UserEvent.__init__(self, server, user, inbound=inbound)
def _get_log_extras(self) -> List[Dict[str, Any]]:
return [
{
"server": self.server,
"destination": self.destination
}
]
@property
def destination(self) -> 'Destination':
return self.user if self.channel is None else self.channel
@property
def destination_addr(self) -> str:
return self.destination.address
class EventJoin(ChannelUserEvent):
def __init__(
self,
server: 'Server',
channel: 'Channel',
user: 'User',
password: Optional[str] = None,
inbound: bool = True
):
"""
:param user: User who joined the channel, or None if outbound
"""
ChannelUserEvent.__init__(self, server, channel, user, inbound=inbound)
self.password = password
def get_log_line(self) -> str:
output = "{} joined {}".format(
self.user.name if self.is_inbound else self.server.get_nick(),
self.channel.name,
)
return output
class EventLeave(ChannelUserEvent):
def __init__(
self,
server: 'Server',
channel: 'Channel',
user: 'User',
message: Optional[str],
inbound: bool = True
) -> None:
"""
:param user: User who left the channel, or None if outbound
"""
ChannelUserEvent.__init__(self, server, channel, user, inbound=inbound)
self.leave_message = message
def get_log_line(self) -> str:
output = "{} left {}".format(
self.user.name if self.is_inbound else self.server.get_nick(),
self.channel.name,
)
if self.leave_message is not None and self.leave_message.strip() != "":
output += " ({})".format(self.leave_message)
return output
class EventKick(ChannelUserEvent):
def __init__(
self,
server: 'Server',
channel: 'Channel',
kicking_user: Optional['User'],
kicked_user: 'User',
kick_message: Optional[str],
inbound: bool = True
) -> None:
"""
:type server: server.Server
:type channel: destination.Channel
:param kicking_user: User which sent the kick event, or None if outbound
:type kicking_user: destination.User | None
:type kicked_user: destination.User
:type kick_message: str | None
:type inbound: bool
"""
ChannelUserEvent.__init__(self, server, channel, kicking_user, inbound=inbound)
self.kicked_user = kicked_user
""" :type : Destination.User"""
self.kick_message = kick_message
""":type : str | None"""
def get_log_line(self) -> str:
output = "{} was kicked from {} by {}".format(
self.kicked_user.name,
self.channel.name,
self.user if self.is_inbound else self.server.get_nick(),
)
if self.kick_message is not None and self.kick_message.strip() != "":
output += " ({})".format(self.kick_message)
return output
class EventInvite(ChannelUserEvent):
def __init__(
self,
server: 'Server',
channel: 'Channel',
inviting_user: 'User',
invited_user: 'User',
inbound: bool = True
) -> None:
"""
:param inviting_user: User which is doing the inviting, or None if outbound
"""
ChannelUserEvent.__init__(self, server, channel, inviting_user, inbound=inbound)
self.invited_user = invited_user
def get_log_line(self) -> str:
output = "{} was invited to {} by {}".format(
self.invited_user.name,
self.channel.name,
self.user.name if self.is_inbound else self.server.get_nick(),
)
return output
class EventMode(ChannelUserEvent):
def __init__(
self,
server: 'Server',
channel: 'Channel',
user: 'User',
mode_changes: str,
inbound: bool = True
) -> None:
ChannelUserEvent.__init__(self, server, channel, user, inbound=inbound)
self.mode_changes = (
mode_changes # TODO: maybe have flags, arguments/users as separate?
)
def get_log_line(self) -> str:
channel_name = self.channel.name if self.channel is not None else "??"
output = "{} set {} on {}".format(
self.user.name if self.user is not None else self.server.get_nick(),
self.mode_changes,
channel_name,
)
return output
class ChannelUserTextEvent(ChannelUserEvent, metaclass=ABCMeta):
def __init__(
self,
server: 'Server',
channel: Optional['Channel'],
user: 'User',
text: str,
inbound: bool = True
) -> None:
ChannelUserEvent.__init__(self, server, channel, user, inbound=inbound)
self.text = text or ""
def create_response(
self,
text: str,
event_class: Optional[Type['ChannelUserTextEvent']] = None
) -> 'ChannelUserTextEvent':
if event_class is None:
event_class = self.__class__
resp = event_class(self.server, self.channel, self.user, text, inbound=False)
return resp
def reply(self, event: 'ChannelUserTextEvent') -> None:
"""
Shorthand for server.reply(event, event)
"""
self.server.reply(self, event)
def to_json(self) -> Dict:
return {
KEY_SERVER_NAME: self.server_name,
KEY_CHANNEL_ADDR: self.channel_addr,
KEY_USER_ADDR: self.user_addr,
"text": self.text,
"inbound": self.is_inbound
}
class MenuButton:
def __init__(self, text: str, data: str) -> None:
self.text = text
self.data = data
def to_json(self) -> Dict[str, str]:
return {
"text": self.text,
"data": self.data
}
@classmethod
def from_json(cls, data: Dict) -> 'MenuButton':
return MenuButton(
data["text"],
data["data"]
)
class EventMessage(ChannelUserTextEvent):
# Flags, can be passed as a list to function dispatcher, and will change how it operates.
FLAG_HIDE_ERRORS = (
"hide_errors" # Hide all errors that result from running the function.
)
class Formatting(enum.Enum):
PLAIN = 1
MARKDOWN = 2
HTML = 3
def __init__(
self,
server: 'Server',
channel: Optional['Channel'],
user: 'User',
text: str,
inbound: bool = True,
*,
menu_buttons: List[List['MenuButton']] = None
) -> None:
"""
:param user: User who sent the event, or None for outbound to channel
"""
ChannelUserTextEvent.__init__(
self, server, channel, user, text, inbound=inbound
)
self.command_name = None
self.command_args = None
self.is_prefixed, self.command_text = self.check_prefix()
self.formatting = EventMessage.Formatting.PLAIN
self.menu_buttons = menu_buttons
self._message_id = None
@property
def message_id(self) -> Optional[int]:
if isinstance(self.raw_data, RawDataTelegram):
return self.raw_data.update_obj.message.message_id
if isinstance(self.raw_data, RawDataTelegramOutbound):
return self.raw_data.sent_msg_object.message_id
return self._message_id
@property
def has_keyboard(self) -> bool:
return bool(self.menu_buttons)
@property
def has_photo(self) -> bool:
return False
def check_prefix(self) -> Tuple[Union[bool, str], Optional[str]]:
"""
Checks whether prefix was given, and if so, parses it out of command text.
:return: Returns whether prefix is given, and command text
"""
if self.channel is None:
return True, self.text
acting_prefix = self.channel.get_prefix()
if acting_prefix is False:
acting_prefix = self.server.get_nick().lower()
# Check if directly addressed
if any(self.text.lower().startswith(acting_prefix + x) for x in [":", ","]):
return True, self.text[len(acting_prefix) + 1:]
elif self.text.lower().startswith(acting_prefix):
return EventMessage.FLAG_HIDE_ERRORS, self.text[len(acting_prefix):]
else:
return False, None
elif self.text.lower().startswith(acting_prefix):
return True, self.text[len(acting_prefix):]
else:
return False, None
def split_command_text(self, command_name: str, command_args: str):
self.command_name = command_name
self.command_args = command_args
def get_log_line(self) -> str:
output = "<{}> {}".format(
self.user.name if self.is_inbound else self.server.get_nick(), self.text
)
return output
def create_response(
self,
text: str,
event_class: Optional['EventMessage'] = None,
menu_buttons: Optional[List[List[MenuButton]]] = None
) -> 'EventMessage':
if event_class is None:
event_class = self.__class__
resp = event_class(self.server, self.channel, self.user, text, inbound=False, menu_buttons=menu_buttons)
return resp
def create_edit(
self,
text: Optional[str] = None,
menu_buttons: Optional[List[List[MenuButton]]] = FLAG_MENU_UNCHANGED
) -> 'EventMessage':
if text is None:
text = self.text
if menu_buttons == FLAG_MENU_UNCHANGED:
menu_buttons = self.menu_buttons
edit = self.__class__(self.server, self.channel, self.user, text, inbound=False, menu_buttons=menu_buttons)
edit._message_id = self.message_id
return edit
def to_json(self) -> Dict:
data = super().to_json()
data[KEY_FORMATTING] = self.formatting.name
data["message_id"] = self.message_id
if self.menu_buttons:
data["menu_buttons"] = [
[
button.to_json() for button in row
] for row in self.menu_buttons
]
return data
class EventNotice(ChannelUserTextEvent):
def get_log_line(self) -> str:
output = "Notice from {}: {}".format(
self.user.name if self.is_inbound else self.server.get_nick(), self.text
)
return output
class EventCTCP(ChannelUserTextEvent):
def get_log_line(self) -> str:
ctcp_command = self.text.split()[0]
ctcp_arguments = " ".join(self.text.split()[1:])
user_name = self.user.name if self.is_inbound else self.server.get_nick()
if ctcp_command.lower() == "action":
output = "**{} {}**".format(user_name, ctcp_arguments)
else:
output = "<{} (CTCP)> {}".format(user_name, self.text)
return output
class EventMessageWithPhoto(EventMessage):
def __init__(
self,
server: 'Server',
channel: Optional['Channel'],
user: 'User',
text: str,
photo_id: Union[str, List[str]],
inbound: bool = True,
*,
menu_buttons: List[List[MenuButton]] = None
) -> None:
"""
:type server: server.Server
:type channel: destination.Channel | None
:param user: User who sent the event, or None for outbound to channel
:type user: destination.User | None
:type text: str
:type photo_id: Union[str, List[str]]
"""
super().__init__(server, channel, user, text, inbound=inbound, menu_buttons=menu_buttons)
self.photo_id = photo_id
def create_edit(
self,
text: Optional[str] = None,
menu_buttons: Optional[List[List[MenuButton]]] = FLAG_MENU_UNCHANGED
) -> 'EventMessage':
if text is None:
text = self.text
if menu_buttons == FLAG_MENU_UNCHANGED:
menu_buttons = self.menu_buttons
edit = self.__class__(
self.server,
self.channel,
self.user,
text,
self.photo_id,
inbound=False,
menu_buttons=menu_buttons
)
edit._message_id = self.message_id
return edit
def has_photo(self) -> bool:
return True
def to_json(self) -> Dict:
data = super().to_json()
data[KEY_PHOTO_ID] = self.photo_id
return data
class EventMenuCallback(ChannelUserEvent):
def __init__(
self,
server: 'Server',
channel: 'Channel',
user: 'User',
message_id: int,
callback_data: str
) -> None:
super().__init__(server, channel, user)
self.message_id = message_id
self.callback_data = callback_data
```
#### File: Hallo/hallo/function.py
```python
from abc import ABC, abstractmethod
from typing import Set, Type, Optional
from hallo.events import (
EventSecond,
EventMinute,
EventHour,
EventDay,
EventPing,
EventMessage,
EventJoin,
EventLeave,
EventQuit,
EventNameChange,
EventKick,
EventInvite,
EventNotice,
EventMode,
EventCTCP, Event, ServerEvent,
)
class Function(ABC):
"""
Generic function object. All functions inherit from this.
"""
# Static constants
EVENT_SECOND = EventSecond # Event which happens every second
EVENT_MINUTE = EventMinute # Event which happens every minute
EVENT_HOUR = EventHour # Event which happens every hour
EVENT_DAY = EventDay # Event which happens every day
EVENT_PING = EventPing # Event constant signifying a server ping has been received
EVENT_MESSAGE = EventMessage # Event constant signifying a standard message
EVENT_JOIN = EventJoin # Event constant signifying someone joined a channel
EVENT_LEAVE = EventLeave # Event constant signifying someone left a channel
EVENT_QUIT = EventQuit # Event constant signifying someone disconnected
EVENT_CHNAME = (
EventNameChange # Event constant signifying someone changed their name
)
EVENT_KICK = EventKick # Event constant signifying someone was forcibly removed from the channel
EVENT_INVITE = (
EventInvite # Event constant signifying someone has invited hallo to a channel
)
EVENT_NOTICE = (
EventNotice # Event constant signifying a notice was received. (IRC only?)
)
EVENT_MODE = (
EventMode # Event constant signifying a channel mode change. (IRC only?)
)
EVENT_CTCP = (
EventCTCP # Event constant signifying a CTCP message received (IRC only)
)
# EVENT_NUMERIC = "numeric" # Event constant signifying a numeric message from a server (IRC only)
# EVENT_RAW = "raw" # Event constant signifying raw data received from server which doesn't fit the above
def __init__(self):
self.help_name = None # Name for use in help listing
self.names: Set[str] = set() # Set of names which can be used to address the function
self.help_docs = (
None # Help documentation, if it's just a single line, can be set here
)
@abstractmethod
def run(self, event: EventMessage) -> EventMessage:
"""Runs the function when it is called directly
:param event: Event which function wants running on, for which, this should be true:
(is_prefixed is not false and command_args is not None)
"""
raise NotImplementedError
@staticmethod
def is_persistent() -> bool:
"""Returns boolean representing whether this function is supposed to be persistent or not"""
return False
@staticmethod
def load_function() -> 'Function':
"""Loads the function, persistent functions only."""
return Function()
def save_function(self) -> None:
"""Saves the function, persistent functions only."""
return None
def get_passive_events(self) -> Set[Type[Event]]:
"""Returns a list of events which this function may want to respond to in a passive way"""
return set()
def passive_run(self, event: Event, hallo_obj) -> Optional[ServerEvent]:
"""Replies to an event not directly addressed to the bot.
:param event: Event which has called the function
:param hallo_obj: Hallo object which fired the event.
"""
pass
def get_help_name(self) -> str:
"""Returns the name to be printed for help documentation"""
if self.help_name is None:
raise NotImplementedError
return self.help_name
def get_help_docs(self) -> str:
"""
Returns the help documentation, specific to given arguments, if supplied
"""
if self.help_docs is None:
raise NotImplementedError
return self.help_docs
def get_names(self) -> Set[str]:
"""Returns the list of names for directly addressing the function"""
self.names.add(self.help_name)
return self.names
```
#### File: hallo/inc/commons.py
```python
import inspect
import datetime
import logging
import re
import json
import random
from datetime import timedelta
from typing import List, Optional, Dict, TypeVar, Union, Callable, Generic, Type
import requests
from publicsuffixlist import PublicSuffixList
logger = logging.getLogger(__name__)
T = TypeVar('T')
S = TypeVar('S')
class Commons(object):
"""
Class of commons methods, useful anywhere, but all static.
"""
@staticmethod
def chunk_string_dot(string: str, length: int) -> List[str]:
if len(string) <= length:
return [string]
else:
list_of_strings = [string[: length - 3] + "..."]
rest_of_string = string[length - 3:]
while len(rest_of_string) > length - 3:
list_of_strings += ["..." + rest_of_string[: length - 6] + "..."]
rest_of_string = rest_of_string[length - 6:]
list_of_strings += ["..." + rest_of_string]
return list_of_strings
@staticmethod
def read_file_to_list(filename: str) -> List[str]:
with open(filename, "r") as f:
file_list = []
raw_line = f.readline()
while raw_line != "":
file_list.append(raw_line.replace("\n", ""))
raw_line = f.readline()
return file_list
@staticmethod
def get_domain_name(url: str) -> str:
"""
Gets the domain name of a URL, removing the TLD
:param url: URL to find domain of
"""
# Sanitise the URL, removing protocol and directories
url = url.split("://")[-1]
url = url.split("/")[0]
url = url.split(":")[0]
# Get the public suffix
public_suffix = PublicSuffixList()
url_tld = public_suffix.publicsuffix(url)
# Else return the last part before the TLD
return url[: -len(url_tld) - 1].split(".")[-1]
@staticmethod
def string_to_bool(string: str) -> Optional[bool]:
"""
Converts a string to a boolean.
:param string: String to convert to boolean
"""
string = string.lower()
if string in ["1", "true", "t", "yes", "y", "on", "enabled", "enable"]:
return True
if string in ["0", "false", "f", "no", "n", "off", "disabled", "disable"]:
return False
return None
@staticmethod
def is_string_null(string: str) -> bool:
"""
Checks if a string could mean null.
:param string: String to check for meaning null
"""
string = string.lower()
if string in [
"0",
"false",
"off",
"disabled",
"disable",
"",
"nul",
"null",
"none",
"nil",
]:
return True
return False
@staticmethod
def ordinal(number: int) -> str:
"""
Returns the ordinal of a number
:param number: Number to get ordinal string for
:type: number: int
"""
if number % 10 == 1 and number % 100 != 11:
return "{}st".format(number)
elif number % 10 == 2 and number % 100 != 12:
return "{}nd".format(number)
elif number % 10 == 3 and number % 100 != 13:
return "{}rd".format(number)
else:
return "{}th".format(number)
@staticmethod
def format_unix_time(time_stamp: float) -> str:
"""
Returns a string, formatted datetime from a timestamp
:param time_stamp: unix timestamp
"""
return datetime.datetime.utcfromtimestamp(time_stamp).strftime(
"%Y-%m-%d %H:%M:%S"
)
@staticmethod
def create_headers_dict(headers: List[List[str]]) -> Dict[str, str]:
"""
Creates a headers dictionary, for requests, and adds user agent
:param headers: List of HTTP headers to add to request
"""
if headers is None:
headers = []
headers_dict = {"User-Agent": "Hallo IRCBot <EMAIL>"}
for header in headers:
headers_dict[header[0]] = header[1]
return headers_dict
@staticmethod
def load_url_string(url: str, headers: List[List[str]] = None) -> str:
"""
Takes a url to an xml resource, pulls it and returns a dictionary.
:param url: URL to download
:param headers: List of HTTP headers to add to request
"""
headers_dict = Commons.create_headers_dict(headers)
resp = requests.get(url, headers=headers_dict)
return resp.text
@staticmethod
def load_url_json(url: str, headers: List[List[str]] = None, json_fix: bool = False) -> Dict:
"""
Takes a url to a json resource, pulls it and returns a dictionary.
:param url: URL of json to download
:param headers: List of HTTP headers to add to request
:param json_fix: Whether to "fix" the JSON being returned for parse errors
"""
if headers is None:
headers = []
code = Commons.load_url_string(url, headers)
if json_fix:
code = re.sub(",+", ",", code)
code = code.replace("[,", "[").replace(",]", "]")
try:
output_dict = json.loads(code)
except Exception as e:
logger.error("Failed to parse received JSON: %s", code, exc_info=e)
raise e
return output_dict
@staticmethod
def put_json_to_url(url: str, data: Dict, headers: List[List[str]] = None) -> None:
"""
Converts data to JSON and PUT it to the specified URL
:param url: URL to send PUT request to
:param data: data to send, as JSON
:param headers: List of HTTP headers to add to the request
"""
headers_dict = Commons.create_headers_dict(headers)
requests.put(url, headers=headers_dict, json=data)
@staticmethod
def check_numbers(message: str) -> bool:
"""
Checks that an argument is purely numbers
:param message: String to check for pure number-ness
"""
message = message.lower().replace(" ", "")
if message.count(".") > 1:
return False
if message.replace(".", "").isdigit():
return True
return False
@staticmethod
def check_calculation(message: str) -> bool:
"""
Checks that an argument is purely numbers and calculation characters
:param message: String to be checked to see if it's a calculation
"""
message = message.strip().lower()
valid_chars = [str(x) for x in range(10)]
valid_chars += [".", ")", "^", "*", "x", "/", "%", "+", "-", "pi", "e", " "]
valid_chars += [
"acos(",
"asin(",
"atan(",
"cos(",
"sin(",
"tan(",
"sqrt(",
"log(",
]
valid_chars += [
"acosh(",
"asinh(",
"atanh(",
"cosh(",
"sinh(",
"tanh(",
"gamma(",
"(",
]
for char in valid_chars:
message = message.replace(char, "")
if message == "":
return True
else:
return False
@staticmethod
def is_float_string(float_string: str) -> bool:
"""
Checks whether a string is a valid float
:param float_string: String to check for validity in float conversion
"""
try:
float(float_string)
return True
except ValueError:
return False
@classmethod
def is_int_string(cls, int_string: str) -> bool:
try:
int(int_string)
return True
except ValueError:
return False
@staticmethod
def get_digits_from_start_or_end(string: str) -> Optional[str]:
"""
Gets the longest string of digits from the start or end of a string, or None
:param string: String to find sequence of digits from
"""
start_digits = [
string[:x]
for x in range(1, len(string) + 1)
if Commons.is_float_string(string[:x])
]
if len(start_digits) != 0:
return start_digits[-1]
end_digits = [
string[x:]
for x in range(len(string))
if Commons.is_float_string(string[x:])
]
if len(end_digits) != 0:
return end_digits[0]
return None
@staticmethod
def get_calc_from_start_or_end(string: str) -> Optional[str]:
"""
Gets the longest calculation of digits from the start or end of a string, or None
:param string: String to find calculation from
"""
start_digits = [
string[:x]
for x in range(1, len(string) + 1)
if Commons.check_calculation(string[:x])
]
if len(start_digits) != 0:
return start_digits[-1]
end_digits = [
string[x:]
for x in range(len(string))
if Commons.check_calculation(string[x:])
]
if len(end_digits) != 0:
return end_digits[0]
return None
@staticmethod
def list_greater(list_one: List[float], list_two: List[float]) -> Optional[bool]:
"""
Checks whether listOne is "greater" than listTwo.
Checks if an earlier element of listOne is greater than the equally placed element in listTwo
:param list_one: List of elements, for checking against listTwo
:param list_two: List of elements, to check against listOne
"""
if len(list_one) != len(list_two):
raise ValueError("Lists must be the same length.")
for index in range(len(list_one)):
if list_one[index] == list_two[index]:
continue
if list_one[index] > list_two[index]:
return True
return False
return None
@staticmethod
def get_random_int(min_int: int, max_int: int, count: int = 1) -> List[int]:
"""
Returns a list of random integers in a given range
:param min_int: Minimum integer to return
:param max_int: Maximum integer to return
:param count: Number of random integers to return
"""
# If there's no range, just return a list.
if min_int == max_int:
return [min_int] * count
output_list = []
for _ in range(count):
output_list.append(random.randint(min_int, max_int))
return output_list
@staticmethod
def get_random_choice(choice_list: List[T], count: int = 1) -> List[T]:
"""
Replacement for random.choice
:param choice_list: List of choices to choose from
:param count: Number of choices to pick
"""
rand_int = Commons.get_random_int(0, len(choice_list) - 1, count)
output_list = []
for x in range(count):
output_list.append(choice_list[rand_int[x]])
return output_list
@staticmethod
def upper(data: str) -> str:
"""
Converts a string to upper case, except for the URL
"""
# Find any URLs, convert line to uppercase, then convert URLs back to original
urls = re.findall(
"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*(),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+",
data,
)
data = data.upper()
for url in urls:
data = data.replace(url.upper(), url)
return data
@staticmethod
def find_parameter(param_name: str, line: str) -> Optional[str]:
"""
Finds a parameter value in a line, if the format parameter=value exists in the line
"""
param_value = None
param_regex = re.compile(
r"(^|\s){}=([^\s]+)(\s|$)".format(param_name), re.IGNORECASE
)
param_search = param_regex.search(line)
if param_search is not None:
param_value = param_search.group(2)
return param_value
@staticmethod
def find_any_parameter(param_list: List[str], line: str) -> Union[str, bool]:
"""
Finds one of any parameter in a line.
"""
for param_name in param_list:
find = Commons.find_parameter(param_name, line)
if find is not None:
return find
return False
@staticmethod
def html_escape(string: str) -> str:
"""
Escapes a string to ensure it can be used in html without issues
"""
return string.replace("&", "&").replace("<", "<").replace(">", ">")
class CachedObject(Generic[S]):
def __init__(self, setter: Callable[[], S], cache_expiry: Optional[timedelta] = None) -> None:
"""
:type setter: Callable
:type cache_expiry: timedelta
"""
self.setter: Callable[[], S] = setter
self.cache_expiry: timedelta = (
cache_expiry if cache_expiry is not None else timedelta(minutes=5)
)
self.cache_time: Optional[datetime.datetime] = None
self.value: Optional[S] = None
def get(self) -> S:
if (
self.cache_time is None
or (self.cache_time + self.cache_expiry) < datetime.datetime.now()
):
self.value = self.setter()
self.cache_time = datetime.datetime.now()
return self.value
def inherits_from(child: Type, parent_name: str) -> bool:
if inspect.isclass(child):
if parent_name in [c.__name__ for c in inspect.getmro(child)[1:]]:
return True
return False
```
#### File: modules/channel_control/invite.py
```python
from hallo.events import EventInvite
from hallo.function import Function
import hallo.modules.channel_control.channel_control
from hallo.server import Server
class Invite(Function):
"""
IRC only, invites users to a given channel.
"""
def __init__(self):
"""
Constructor
"""
super().__init__()
# Name for use in help listing
self.help_name = "invite"
# Names which can be used to address the function
self.names = {"invite"}
# Help documentation, if it's just a single line, can be set here
self.help_docs = "Invite someone to a channel"
def run(self, event):
# Get server object
server_obj = event.server
# If server isn't IRC type, we can't invite people
if server_obj.type != Server.TYPE_IRC:
return event.create_response(
"Error, this function is only available for IRC servers."
)
# If 0 arguments, ask for clarification
line_split = event.command_args.split()
if len(line_split) == 0:
return event.create_response(
"Error, please specify a user to invite and/or a channel to invite to."
)
# If 1 argument, see if it's a channel or a user.
if len(line_split) == 1:
# If message was sent in private message, it's referring to a channel
if event.channel is None:
channel = server_obj.get_channel_by_name(event.command_args)
if channel is None:
return event.create_response(
"Error, {} is not known on {}.".format(
event.command_args, server_obj.name
)
)
return event.create_response(self.send_invite(channel, event.user))
# See if it's a channel that hallo is in
test_channel = server_obj.get_channel_by_name(event.command_args)
if test_channel is not None and test_channel.in_channel:
return event.create_response(self.send_invite(test_channel, event.user))
# Argument must be a user?
target_user = server_obj.get_user_by_name(event.command_args)
if target_user is None:
return event.create_response(
"Error, {} is not known on {}.".format(
event.command_args, server_obj.name
)
)
return event.create_response(self.send_invite(event.channel, target_user))
# If 2 arguments, try with first argument as channel
target_channel = server_obj.get_channel_by_name(line_split[0])
if target_channel is not None and target_channel.in_channel:
target_user = server_obj.get_user_by_name(line_split[1])
if target_user is None:
return event.create_response(
"Error, {} is not known on {}.".format(
line_split[1], server_obj.name
)
)
return event.create_response(self.send_invite(target_channel, target_user))
# 2 args, try with second argument as channel
target_user = server_obj.get_user_by_name(line_split[0])
if target_user is None:
return event.create_response(
"Error, {} is not known on {}.".format(line_split[0], server_obj.name)
)
target_channel = server_obj.get_channel_by_name(line_split[1])
if target_channel is None:
return event.create_response(
"Error, {} is not known on {}.".format(line_split[1], server_obj.name)
)
return event.create_response(self.send_invite(target_channel, target_user))
def send_invite(self, channel, user):
"""
Sends an invite to a specified user to join a given channel.
:param channel: Channel to invite target to
:type channel: destination.Channel
:param user: User to invite to channel
:type user: destination.User
:return: Response to send to requester
:rtype: str
"""
# Check if in channel
if not channel.in_channel:
return "Error, I'm not in that channel."
# Check if user is in channel
if user in channel.get_user_list():
return "Error, {} is already in {}".format(user.name, channel.name)
# Check if hallo has op in channel
if not hallo.modules.channel_control.channel_control.hallo_has_op(channel):
return "Error, I don't have power to invite users in {}.".format(
channel.name
)
# Send invite
invite_evt = EventInvite(channel.server, channel, None, user, inbound=False)
channel.server.send(invite_evt)
return "Invite sent."
```
#### File: modules/channel_control/kick.py
```python
from hallo.events import EventKick
from hallo.function import Function
import hallo.modules.channel_control.channel_control
from hallo.server import Server
class Kick(Function):
"""
Kicks a specified user from a specified channel. IRC Only.
"""
def __init__(self):
"""
Constructor
"""
super().__init__()
# Name for use in help listing
self.help_name = "kick"
# Names which can be used to address the function
self.names = {"kick"}
# Help documentation, if it's just a single line, can be set here
self.help_docs = (
"Kick given user in given channel, or current channel if no channel given."
)
def run(self, event):
# Get server object
server_obj = event.server
# If server isn't IRC type, we can't invite people
if server_obj.type != Server.TYPE_IRC:
return event.create_response(
"Error, this function is only available for IRC servers."
)
# If 0 arguments, ask for clarification
line_split = event.command_args.split()
if len(line_split) == 0:
return event.create_response(
"Error, please specify a user to kick and/or a channel to kick from."
)
# If 1 argument, see if it's a channel or a user.
if len(line_split) == 1:
# If message was sent in private message, it's referring to a channel
if event.channel is None:
channel = server_obj.get_channel_by_name(event.command_args)
if channel is None:
return event.create_response(
"Error, {} is not known on {}.".format(
event.command_args, server_obj.name
)
)
return event.create_response(self.send_kick(channel, event.user))
# See if it's a channel that hallo is in
test_channel = server_obj.get_channel_by_name(event.command_args)
if test_channel is not None and test_channel.in_channel:
return event.create_response(self.send_kick(test_channel, event.user))
# Argument must be a user?
target_user = server_obj.get_user_by_name(event.command_args)
if target_user is None:
return event.create_response(
"Error, {} is not known on {}.".format(
event.command_args, server_obj.name
)
)
return event.create_response(self.send_kick(event.channel, target_user))
if len(line_split) == 2:
# If message was in private message, it's either channel and user, user and channel or channel and message
if event.channel is None:
target_channel = server_obj.get_channel_by_name(line_split[0])
if target_channel is not None:
if target_channel.in_channel:
target_user = server_obj.get_user_by_name(line_split[1])
if (
target_user is not None
and target_channel.is_user_in_channel(target_user)
):
return event.create_response(
self.send_kick(target_channel, target_user)
)
return event.create_response(
self.send_kick(target_channel, event.user, line_split[1])
)
return event.create_response("Error, I am not in that channel.")
target_user = server_obj.get_user_by_name(line_split[0])
if target_user is None:
return event.create_response(
"Error, {} is not known on {}.".format(
line_split[0], server_obj.name
)
)
target_channel = server_obj.get_channel_by_name(line_split[1])
if target_channel is None:
return event.create_response(
"Error, {} is not known on {}.".format(
line_split[1], server_obj.name
)
)
return event.create_response(
self.send_kick(target_channel, target_user)
)
# If 2 arguments, try with first argument as channel
target_channel = server_obj.get_channel_by_name(line_split[0])
if target_channel is not None and target_channel.in_channel:
target_user = server_obj.get_user_by_name(line_split[1])
if target_user is not None and target_channel.is_user_in_channel(
target_user
):
return event.create_response(
self.send_kick(target_channel, target_user)
)
return event.create_response(
self.send_kick(target_channel, event.user, line_split[1])
)
# 2 args, try with second argument as channel
target_user = server_obj.get_user_by_name(line_split[0])
if target_user is None:
return event.create_response(
"Error, {} is not known on {}.".format(
line_split[0], server_obj.name
)
)
target_channel = server_obj.get_channel_by_name(line_split[1])
if target_channel is not None and target_channel.in_channel:
return event.create_response(
self.send_kick(target_channel, target_user)
)
return event.create_response(
self.send_kick(event.channel, target_user, line_split[1])
)
# If message was in private message, it's either channel, user and message or user, channel and message or
# channel and message
if event.channel is None:
target_channel = server_obj.get_channel_by_name(line_split[0])
if target_channel is not None:
if target_channel.in_channel:
target_user = server_obj.get_user_by_name(line_split[1])
if target_user is not None and target_channel.is_user_in_channel(
target_user
):
return event.create_response(
self.send_kick(
target_channel, target_user, " ".join(line_split[2:])
)
)
return event.create_response(
self.send_kick(
target_channel, event.user, " ".join(line_split[1:])
)
)
return event.create_response("Error, I am not in that channel.")
target_user = server_obj.get_user_by_name(line_split[0])
if target_user is None:
return event.create_response(
"Error, {} is not known on {}.".format(
line_split[0], server_obj.name
)
)
target_channel = server_obj.get_channel_by_name(line_split[1])
if target_channel is None:
return event.create_response(
"Error, {} is not known on {}.".format(
line_split[1], server_obj.name
)
)
return event.create_response(
self.send_kick(target_channel, target_user, " ".join(line_split[2:]))
)
# If more than 2 arguments, determine which of the first 2 is channel/user, the rest is a message.
target_channel = server_obj.get_channel_by_name(line_split[0])
if target_channel is not None and target_channel.in_channel:
target_user = server_obj.get_user_by_name(line_split[1])
if target_user is not None and target_channel.is_user_in_channel(
target_user
):
return event.create_response(
self.send_kick(
target_channel, target_user, " ".join(line_split[2:])
)
)
return event.create_response(
self.send_kick(target_channel, event.user, " ".join(line_split[1:]))
)
# 2 args, try with second argument as channel
target_user = server_obj.get_user_by_name(line_split[0])
if target_user is None:
return event.create_response(
"Error, {} is not known on {}.".format(line_split[0], server_obj.name)
)
target_channel = server_obj.get_channel_by_name(line_split[1])
if target_channel is not None and target_channel.in_channel:
return event.create_response(
self.send_kick(target_channel, target_user, " ".join(line_split[2:]))
)
return event.create_response(
self.send_kick(event.channel, target_user, " ".join(line_split[1:]))
)
def send_kick(self, channel, user, message=""):
"""
Sends an invite to a specified user to join a given channel.
:param channel: Channel to invite target to
:type channel: destination.Channel
:param user: User to invite to channel
:type user: destination.User
:param message: Kick message to send
:type message: str
:return: Response to send to requester
:rtype: str
"""
# Check if in channel
if not channel.in_channel:
return "Error, I'm not in that channel."
# Check if user is in channel
if user not in channel.get_user_list():
return "Error, {} is not in {}.".format(user.name, channel.name)
# Check if hallo has op in channel
if not hallo.modules.channel_control.channel_control.hallo_has_op(channel):
return "Error, I don't have power to kick users from {}.".format(
channel.name
)
# Send invite
kick_evt = EventKick(
channel.server, channel, None, user, message, inbound=False
)
channel.server.send(kick_evt)
return "Kicked {} from {}.".format(user.name, channel.name)
```
#### File: modules/convert/convert_unit_remove_name.py
```python
from hallo.function import Function
from hallo.inc.input_parser import InputParser
class ConvertUnitRemoveName(Function):
"""
Removes a name or abbreviation from a unit, unless it's the last name.
"""
NAMES_UNIT = ["unit", "u"]
NAMES_TYPE = ["type", "t"]
NAMES_DEL = ["delete name", "remove name", "del", "delete", "remove"]
def __init__(self):
"""
Constructor
"""
super().__init__()
# Name for use in help listing
self.help_name = "convert unit remove name"
# Names which can be used to address the Function
self.names = {
"convert unit remove name",
"convert unit delete name",
"convert unit remove abbreviation",
"convert unit delete abbreviation",
"convert unit remove abbr",
"convert unit delete abbr",
"convert remove unit name",
"convert delete unit name",
"convert remove unit abbreviation",
"convert delete unit abbreviation",
"convert remove unit abbr",
"convert delete unit abbr",
}
# Help documentation, if it's just a single line, can be set here
self.help_docs = (
"Removes a name or abbreviation from a unit, unless it's the last name."
)
def run(self, event):
# Load repo, clean line
function_dispatcher = event.server.hallo.function_dispatcher
convert_function = function_dispatcher.get_function_by_name("convert")
convert_function_obj = function_dispatcher.get_function_object(
convert_function
) # type: Convert
repo = convert_function_obj.convert_repo
# Parse input
parsed = InputParser(event.command_args)
# Check if unit is defined
unit_name = parsed.get_arg_by_names(self.NAMES_UNIT)
# Check if type is defined
type_obj = None
type_name = parsed.get_arg_by_names(self.NAMES_TYPE)
if type_name is not None:
type_obj = repo.get_type_by_name(type_name)
if type_obj is None:
return event.create_response("Invalid type specified.")
# Check if delete name is specified
del_name = parsed.get_arg_by_names(self.NAMES_DEL)
if del_name is None:
del_name = parsed.remaining_text
# Check if description is sufficient to narrow it to 1 and only 1 unit
unit_list = (
repo.get_full_unit_list()
if type_obj is None
else type_obj.get_full_unit_list()
)
user_unit_options = []
for unit_obj in unit_list:
# if unit name is defined and not a valid name for the unit, skip it.
if unit_name is not None and not unit_obj.has_name(unit_name):
continue
# If input_name is not a valid name for the unit, skip it.
if not unit_obj.has_name(del_name):
continue
# Otherwise it's the one, add it to the list
user_unit_options.append(unit_obj)
# Check if that narrowed it down correctly.
if len(user_unit_options) == 0:
return event.create_response(
"There are no units matching that description."
)
if len(user_unit_options) >= 2:
return event.create_response("It is ambiguous which unit you refer to.")
# Check this unit has other names.
user_unit = user_unit_options[0]
if len(user_unit.name_list) == 1:
return event.create_response(
"This unit only has 1 name, you cannot remove its last name."
)
# Remove name
user_unit.remove_name(del_name)
# Save repo
repo.save_json()
# Output
return event.create_response(
'Removed name "{}" from "{}" unit.'.format(del_name, user_unit.name_list[0])
)
```
#### File: modules/convert/update_currencies.py
```python
import logging
import time
from xml.dom import minidom
from hallo.events import EventHour
from hallo.function import Function
from hallo.inc.commons import Commons
logger = logging.getLogger(__name__)
class UpdateCurrencies(Function):
"""
Updates all currencies in the ConvertRepo
"""
def __init__(self):
"""
Constructor
"""
super().__init__()
# Name for use in help listing
self.help_name = "update currencies"
# Names which can be used to address the Function
self.names = {
"update currencies",
"convert update currencies",
"currency update",
"update currency",
"currencies update",
}
# Help documentation, if it's just a single line, can be set here
self.help_docs = (
"Update currency conversion figures, using data from the money converter, the European "
"central bank, forex and preev."
)
def run(self, event):
# Get convert repo
function_dispatcher = event.server.hallo.function_dispatcher
convert_function = function_dispatcher.get_function_by_name("convert")
convert_function_obj = function_dispatcher.get_function_object(
convert_function
) # type: Convert
repo = convert_function_obj.convert_repo
# Update all sources
output_lines = self.update_all(repo)
# Return output
return event.create_response("\n".join(output_lines))
def get_passive_events(self):
return {EventHour}
def passive_run(self, event, hallo_obj):
# Get convert repo
function_dispatcher = hallo_obj.function_dispatcher
convert_function = function_dispatcher.get_function_by_name("convert")
convert_function_obj = function_dispatcher.get_function_object(
convert_function
) # type: Convert
repo = convert_function_obj.convert_repo
# Update all sources
output_lines = self.update_all(repo)
for line in output_lines:
logger.info(line)
return None
def update_all(self, repo):
output_lines = []
# Update with the European Bank
try:
output_lines.append(
self.update_from_european_bank_data(repo)
or "Updated currency data from the European Central Bank."
)
except Exception as e:
output_lines.append(
"Failed to update European Central Bank data. {}".format(e)
)
# Update with Forex
try:
output_lines.append(
self.update_from_forex_data(repo) or "Updated currency data from Forex."
)
except Exception as e:
output_lines.append("Failed to update Forex data. {}".format(e))
# Update with Preev
try:
output_lines.append(
self.update_from_cryptonator_data(repo)
or "Updated currency data from Cryptonator."
)
except Exception as e:
output_lines.append("Failed to update Cryptonator data. {}".format(e))
# Save repo
repo.save_json()
return output_lines
def update_from_european_bank_data(self, repo):
"""
Updates the value of conversion currency units using The European Bank data.
:type repo: ConvertRepo
"""
# Get currency ConvertType
currency_type = repo.get_type_by_name("currency")
# Pull xml data from european bank website
url = "https://www.ecb.europa.eu/stats/eurofxref/eurofxref-daily.xml"
xml_string = Commons.load_url_string(url)
# Parse data
doc = minidom.parseString(xml_string)
root = doc.getElementsByTagName("gesmes:Envelope")[0]
cube_one_elem = root.getElementsByTagName("Cube")[0]
cube_two_elem = cube_one_elem.getElementsByTagName("Cube")[0]
for cube_three_elem in cube_two_elem.getElementsByTagName("Cube"):
# Get currency code from currency Attribute
currency_code = cube_three_elem.getAttributeNode("currency").nodeValue
# Get value from rate attribute and get reciprocal.
currency_value = 1 / float(
cube_three_elem.getAttributeNode("rate").nodeValue
)
# Get currency unit
currency_unit = currency_type.get_unit_by_name(currency_code)
# If unrecognised currency, SKIP
if currency_unit is None:
continue
# Set Value
currency_unit.update_value(currency_value)
def update_from_forex_data(self, repo):
"""
Updates the value of conversion currency units using Forex data.
:type repo: ConvertRepo
"""
# Get currency ConvertType
currency_type = repo.get_type_by_name("currency")
# Pull xml data from forex website
url = "https://rates.fxcm.com/RatesXML3"
xml_string = Commons.load_url_string(url)
# Parse data
doc = minidom.parseString(xml_string)
rates_elem = doc.getElementsByTagName("Rates")[0]
for rate_elem in rates_elem.getElementsByTagName("Rate"):
# Get data from element
symbol_data = rate_elem.getElementsByTagName("Symbol")[0].firstChild.data
if not symbol_data.startswith("EUR"):
continue
bid_data = float(rate_elem.getElementsByTagName("Bid")[0].firstChild.data)
ask_data = float(rate_elem.getElementsByTagName("Ask")[0].firstChild.data)
# Get currency code and value from data
currency_code = symbol_data[3:]
currency_value = 1 / (0.5 * (bid_data + ask_data))
# Get currency unit
currency_unit = currency_type.get_unit_by_name(currency_code)
# If unrecognised code, skip
if currency_unit is None:
continue
# Set Value
currency_unit.update_value(currency_value)
def update_from_cryptonator_data(self, repo):
"""
Updates the value of conversion cryptocurrencies using cryptonator data.
:type repo: ConvertRepo
"""
# Get currency ConvertType
currency_type = repo.get_type_by_name("currency")
# Pull json data from preev website, combine into 1 dict
currency_codes = ["LTC", "BTC", "BCH", "DOGE", "XMR", "ETH", "ETC", "DASH"]
for code in currency_codes:
# Get data
try:
data = Commons.load_url_json(
"https://api.cryptonator.com/api/ticker/{}-eur".format(code)
)
except Exception as e:
# If it fails, because it failed to parse the JSON, give it another go
# Cryptonator API returns HTML sometimes. I don't know why.
if "Expecting value:" in str(e):
time.sleep(5)
data = Commons.load_url_json(
"https://api.cryptonator.com/api/ticker/{}-eur".format(code)
)
else:
raise e
# Get the ConvertUnit object for the currency reference
currency_unit = currency_type.get_unit_by_name(code)
if currency_unit is None:
continue
# Update the value
currency_unit.update_value(data["ticker"]["price"])
```
#### File: modules/dailys/dailys.py
```python
import hallo.modules.dailys.dailys_field_factory
from hallo.events import EventMessage
from hallo.function import Function
import hallo.modules.dailys.dailys_field
import hallo.modules.dailys.dailys_repo
class Dailys(Function):
def __init__(self):
"""
Constructor
"""
super().__init__()
# Name for use in help listing
self.help_name = "dailys"
# Names which can be used to address the function
self.names = {"dailys"}
# Help documentation, if it's just a single line, can be set here
self.help_docs = (
"Core dailys method, does all the dailys processing passively."
" Doesn't do anything (currently) when called actively."
)
self.dailys_repo = None
""" :type : DailysRepo | None"""
def get_dailys_repo(self, hallo_obj):
if self.dailys_repo is None:
self.dailys_repo = hallo.modules.dailys.dailys_repo.DailysRepo.load_json(hallo_obj)
return self.dailys_repo
@staticmethod
def is_persistent():
return True
@staticmethod
def load_function():
"""Loads the function, persistent functions only."""
return Dailys()
def save_function(self):
"""Saves the function, persistent functions only."""
if self.dailys_repo is not None:
self.dailys_repo.save_json()
def get_passive_events(self):
"""Returns a list of events which this function may want to respond to in a passive way"""
return set(
[
event
for field in hallo.modules.dailys.dailys_field_factory.DailysFieldFactory.fields
for event in field.passive_events()
]
)
def run(self, event):
if event.text.strip().lower() in ["reload", "redeploy", "refresh"]:
self.dailys_repo.save_json()
self.dailys_repo = None
self.get_dailys_repo(event.server.hallo)
return event.reply(event.create_response("Dailys repository reloaded."))
return event.reply(event.create_response("Dailys system does not understand this command."))
def passive_run(self, event, hallo_obj):
repo = self.get_dailys_repo(hallo_obj)
spreadsheets = repo.spreadsheets
if isinstance(event, EventMessage):
msg_spreadsheet = repo.get_by_location(event)
if msg_spreadsheet is None:
return
spreadsheets = [msg_spreadsheet]
for spreadsheet in spreadsheets:
for field in spreadsheet.fields_list:
if event.__class__ in field.passive_events():
try:
field.passive_trigger(event)
except Exception as e:
hallo.modules.dailys.dailys_field.logger.error("Dailys failure: ", exc_info=e)
```
#### File: modules/dailys/field_sleep.py
```python
from datetime import timedelta
from hallo.events import EventMessage
import hallo.modules.dailys.dailys_field
class DailysSleepField(hallo.modules.dailys.dailys_field.DailysField):
# Does sleep and wake times, sleep notes, dream logs, shower?
type_name = "sleep"
WAKE_WORDS = ["morning", "wake", "woke"]
SLEEP_WORDS = ["goodnight", "sleep", "nini", "night"]
json_key_wake_time = "wake_time"
json_key_sleep_time = "sleep_time"
json_key_interruptions = "interruptions"
@staticmethod
def create_from_input(event, spreadsheet):
return DailysSleepField(spreadsheet)
@staticmethod
def passive_events():
return [EventMessage]
def passive_trigger(self, evt):
"""
:type evt: EventMessage
:rtype: None
"""
input_clean = evt.text.strip().lower()
now = evt.get_send_time()
time_str = now.isoformat()
sleep_date = evt.get_send_time().date()
current_data = self.load_data(sleep_date)
if current_data is None:
current_data = dict()
yesterday_date = sleep_date - timedelta(1)
yesterday_data = self.load_data(yesterday_date)
if yesterday_data is None:
yesterday_data = dict()
# If user is waking up
if input_clean in DailysSleepField.WAKE_WORDS:
# If today's data is blank, write in yesterday's sleep data
if len(current_data) == 0:
current_data = yesterday_data
sleep_date = yesterday_date
# If you already woke in this data, why are you waking again?
if self.json_key_wake_time in current_data:
self.message_channel("Didn't you already wake up?")
return
# If not, add a wake time to sleep data
else:
current_data[self.json_key_wake_time] = time_str
self.save_data(current_data, sleep_date)
self.message_channel("Good morning!")
return
# If user is going to sleep
if input_clean in DailysSleepField.SLEEP_WORDS:
# If it's before 4pm, it's probably yesterday's sleep.
if now.hour <= 16:
current_data = yesterday_data
sleep_date = yesterday_date
# Did they already go to sleep?
if self.json_key_sleep_time in current_data:
# Did they already wake? If not, they're updating their sleep time.
if self.json_key_wake_time not in current_data:
current_data[self.json_key_sleep_time] = time_str
self.save_data(current_data, sleep_date)
self.message_channel("Good night again!")
return
# Move the last wake time to interruptions
interruption = dict()
interruption[self.json_key_wake_time] = current_data.pop(
self.json_key_wake_time
)
interruption[self.json_key_sleep_time] = time_str
if self.json_key_interruptions not in current_data:
current_data[self.json_key_interruptions] = []
current_data[self.json_key_interruptions].append(interruption)
self.save_data(current_data, sleep_date)
self.message_channel("Oh, going back to sleep? Sleep well!")
return
# Otherwise they're headed to sleep
else:
current_data[self.json_key_sleep_time] = time_str
self.save_data(current_data, sleep_date)
self.message_channel("Goodnight!")
return
def to_json(self):
json_obj = dict()
json_obj["type_name"] = self.type_name
return json_obj
@staticmethod
def from_json(json_obj, spreadsheet):
return DailysSleepField(spreadsheet)
```
#### File: modules/furry/e621.py
```python
from hallo.function import Function
from hallo.inc.commons import Commons
class E621(Function):
"""
Returns a random image from e621
"""
def __init__(self):
"""
Constructor
"""
super().__init__()
# Name for use in help listing
self.help_name = "e621"
# Names which can be used to address the function
self.names = {"e621"}
# Help documentation, if it's just a single line, can be set here
self.help_docs = "Returns a random e621 result using the search you specify. Format: e621 <tags>"
def run(self, event):
search_result = self.get_random_link_result(event.command_args)
if search_result is None:
return event.create_response("No results.")
else:
link = "https://e621.net/posts/{}".format(search_result["id"])
if search_result["post"]["rating"] == "e":
rating = "(Explicit)"
elif search_result["post"]["rating"] == "q":
rating = "(Questionable)"
elif search_result["post"]["rating"] == "s":
rating = "(Safe)"
else:
rating = "(Unknown)"
line_response = event.command_args.strip()
return event.create_response(
'e621 search for "{}" returned: {} {}'.format(
line_response, link, rating
)
)
def get_random_link_result(self, search):
"""Gets a random link from the e621 api."""
line_clean = search.replace(" ", "%20")
url = "https://e621.net/posts.json?tags=order:random%20score:%3E0%20{}%20&limit=1".format(
line_clean
)
return_list = Commons.load_url_json(url)
if len(return_list["posts"]) == 0:
return None
else:
result = return_list["posts"][0]
return result
```
#### File: modules/furry/random_porn.py
```python
from hallo.function import Function
class RandomPorn(Function):
"""
Returns a random explicit image from e621
"""
def __init__(self):
"""
Constructor
"""
super().__init__()
# Name for use in help listing
self.help_name = "random porn"
# Names which can be used to address the function
self.names = {"random porn", "randomporn"}
# Help documentation, if it's just a single line, can be set here
self.help_docs = (
"Returns a random explicit e621 result using the search you specify. "
"Format: random porn <tags>"
)
def run(self, event):
line_unclean = "{} -rating:s".format(event.command_args.strip())
function_dispatcher = event.server.hallo.function_dispatcher
e621_class = function_dispatcher.get_function_by_name("e621")
e621_obj = function_dispatcher.get_function_object(e621_class) # type: E621
search_result = e621_obj.get_random_link_result(line_unclean)
if search_result is None:
return event.create_response("No results.")
else:
link = "https://e621.net/posts/{}".format(search_result["id"])
if search_result["post"]["rating"] == "e":
rating = "(Explicit)"
elif search_result["post"]["rating"] == "q":
rating = "(Questionable)"
elif search_result["post"]["rating"] == "s":
rating = "(Safe)"
else:
rating = "(Unknown)"
line_response = event.command_args.strip()
return event.create_response(
'e621 search for "{}" returned: {} {}'.format(
line_response, link, rating
)
)
```
#### File: modules/random/chosen_one.py
```python
from hallo.function import Function
from hallo.inc.commons import Commons
class ChosenOne(Function):
"""
Selects a random user from a channel
"""
def __init__(self):
"""
Constructor
"""
super().__init__()
# Name for use in help listing
self.help_name = "chosen one"
# Names which can be used to address the function
self.names = {"chosen one", "chosenone", "random user"}
# Help documentation, if it's just a single line, can be set here
self.help_docs = "Specifies who the chosen one is. Format: chosen one"
def run(self, event):
# If this command is run in privmsg, it won't work
if event.channel is None:
return event.create_response("This function can only be used in a channel")
# Get the user list
user_set = event.channel.get_user_list()
# Get list of users' names
names_list = [user_obj.name for user_obj in user_set]
rand_name = Commons.get_random_choice(names_list)[0]
return event.create_response(
"It should be obvious by now that {} is the chosen one.".format(rand_name)
)
```
#### File: modules/random/ouija.py
```python
from hallo.function import Function
from hallo.inc.commons import Commons
class Ouija(Function):
"""
Ouija board function. "Ouija board" is copyright Hasbro.
"""
def __init__(self):
"""
Constructor
"""
super().__init__()
# Name for use in help listing
self.help_name = "ouija"
# Names which can be used to address the function
self.names = {
"ouija",
"ouija board",
"random words",
"message from the other side",
}
# Help documentation, if it's just a single line, can be set here
self.help_docs = "Ouija board function. Format: ouija <message>"
def run(self, event):
word_list = Commons.read_file_to_list("store/ouija_wordlist.txt")
num_words = Commons.get_random_int(1, 3)[0]
rand_words = Commons.get_random_choice(word_list, num_words)
output_string = "I'm getting a message from the other side... {}.".format(
" ".join(rand_words)
)
return event.create_response(output_string)
```
#### File: modules/subscriptions/source_e621_tagging.py
```python
from typing import List, Optional, Dict, TYPE_CHECKING
from yippi import Post, Rating, YippiClient
import hallo.modules.subscriptions.common_e6_key
import hallo.modules.subscriptions.source_e621
import hallo.modules.subscriptions.stream_source
import hallo.modules.subscriptions.subscription_exception
from hallo.destination import Channel, User, Destination
from hallo.events import EventMessage, EventMessageWithPhoto, EventMenuCallback, MenuButton
from hallo.inc.input_parser import InputParser
from hallo.inc.menus import Menu
from hallo.server import Server
if TYPE_CHECKING:
from hallo.hallo import Hallo
import hallo.modules.subscriptions.subscription_check
import hallo.modules.subscriptions.subscription_repo
def buttons_for_submission(tag_results: Dict[str, bool], page: int = 1) -> List[List[MenuButton]]:
columns = 2
rows = 6
per_page = columns * rows
tag_names = sorted(tag_results.keys())
pages = ((len(tag_results) - 1) // per_page) + 1
tag_names_on_page = tag_names[(page - 1) * per_page: page * per_page]
buttons = []
button_row = []
for tag_name in tag_names_on_page:
button_emoji = "\u2714" if tag_results[tag_name] else "\u274C"
button_row.append(MenuButton(f"{button_emoji} {tag_name}", f"tag:{tag_name}"))
if len(button_row) >= columns:
buttons.append(button_row)
button_row = []
if button_row:
buttons.append(button_row)
# Bottom row
bottom_row = []
if page > 1:
bottom_row.append(MenuButton("\u23EE️Back", f"page:{page-1}"))
bottom_row.append(MenuButton("\U0001F504Refresh", "refresh"))
bottom_row.append(MenuButton("\U0001F4BESubmit", "submit"))
if page < pages:
bottom_row.append(MenuButton("\u23EDNext", f"page:{page+1}"))
buttons.append(bottom_row)
return buttons
def text_for_post(item: 'Post', *, prefix: str = None, suffix: str = None) -> str:
link = f"https://e621.net/posts/{item.id}"
# Create rating string
rating_dict = {Rating.EXPLICIT: "(Explicit)", Rating.QUESTIONABLE: "(Questionable)", Rating.SAFE: "(Safe)"}
rating = rating_dict.get(item.rating, "(Unknown)")
# Construct output
output = f'{link} {rating}.'
if prefix is not None:
output = prefix + "\n" + output
if suffix is not None:
output += "\n" + suffix
return output
class E621TaggingMenu(Menu):
type = "e621_tagging"
def __init__(
self,
msg: 'EventMessage',
user: 'User',
e6_client: YippiClient,
post_id: int,
search: str,
tag_results: Dict[str, bool],
page: int = 1
) -> None:
super().__init__(msg)
self.user = user
self.e6_client = e6_client
self.post_id = post_id
self.search = search
self.tag_results = tag_results
self.page = page
self.clicked = False
def text_for_post(self, item: 'Post', suffix: str = None) -> str:
prefix = f'Update on "{self.search}" tagging e621 search.'
return text_for_post(item, prefix=prefix, suffix=suffix)
def handle_callback(self, event: 'EventMenuCallback') -> None:
if self.clicked:
return
self.clicked = True
if event.callback_data.startswith("page:"):
self.page = int(event.callback_data.split(":")[1])
return self.update_tag_menu()
if event.callback_data.startswith("tag:"):
tag = event.callback_data.split(":", 1)[1]
self.tag_results[tag] = not self.tag_results[tag]
return self.update_tag_menu()
if event.callback_data == "refresh":
post = self.e6_client.post(self.post_id)
post_tags = [tag for tag_list in post.tags.values() for tag in tag_list]
old_tag_results = self.tag_results
self.tag_results = {tag: tag in post_tags for tag in self.tag_results.keys()}
if old_tag_results != self.tag_results:
return self.update_tag_menu()
else:
self.clicked = False
return
if event.callback_data == "submit":
post = self.e6_client.post(self.post_id)
negative_tags = set(tag for tag in self.tag_results.keys() if self.tag_results[tag] is False)
positive_tags = set(tag for tag in self.tag_results.keys() if self.tag_results[tag] is True)
current_tags = set(tag for tag_list in post.tags.values() for tag in tag_list)
new_tags = positive_tags - current_tags
del_tags = negative_tags.intersection(current_tags)
menu_buttons = [
[MenuButton("Save", "save")],
[MenuButton("Cancel", "cancel")]
]
if not new_tags and not del_tags:
text = self.text_for_post(post, "This will not make any changes, are you sure?")
return self.update(text, menu_buttons)
suffix = ["This will make these changes"]
if new_tags:
suffix.append("Add tags: " + ", ".join(new_tags))
if del_tags:
suffix.append("Remove tags: " + ", ".join(del_tags))
text = self.text_for_post(post, "\n".join(suffix))
return self.update(text, menu_buttons)
if event.callback_data == "cancel":
post = self.e6_client.post(self.post_id)
text = self.text_for_post(post)
menu_buttons = buttons_for_submission(self.tag_results, self.page)
return self.update(text, menu_buttons)
if event.callback_data == "save":
post = self.e6_client.post(self.post_id)
negative_tags = set(tag for tag in self.tag_results.keys() if self.tag_results[tag] is False)
positive_tags = set(tag for tag in self.tag_results.keys() if self.tag_results[tag] is True)
current_tags = set(tag for tag_list in post.tags.values() for tag in tag_list)
new_tags = positive_tags - current_tags
del_tags = negative_tags.intersection(current_tags)
text = self.text_for_post(post)
if not new_tags and not del_tags:
return self.update(text, None)
new_tag_dict = {
tag_key: [tag for tag in tag_list if tag not in negative_tags]
for tag_key, tag_list in post.tags.items()
}
new_tag_dict["general"].extend(positive_tags)
post.tags = new_tag_dict
has_notes = post._original_data["has_notes"]
post.update(has_notes=has_notes, reason="Tag change via Hallo bot")
return self.update(text, None)
def update_tag_menu(self) -> None:
buttons = buttons_for_submission(self.tag_results, self.page)
self.update(None, buttons)
def update(self, text: Optional[str], menu_buttons: Optional[List[List[MenuButton]]]) -> None:
new_event = self.msg.create_edit(text=text, menu_buttons=menu_buttons)
self.msg.server.edit(self.msg, new_event)
self.msg = new_event
self.clicked = False
@classmethod
def from_json(cls, hallo_obj: 'Hallo', msg: 'EventMessage', data: Dict) -> 'Menu':
server = hallo_obj.get_server_by_name(msg.server_name)
user = server.get_user_by_address(data["user_addr"])
if user is None:
raise hallo.modules.subscriptions.subscription_exception.SubscriptionException(
"Could not find user matching address `{}`".format(data["user_addr"])
)
function_dispatcher = hallo_obj.function_dispatcher
sub_check_class = function_dispatcher.get_function_by_name("check subscription")
sub_check_obj: hallo.modules.subscriptions.subscription_check.SubscriptionCheck = function_dispatcher.get_function_object(
sub_check_class
)
sub_repo = sub_check_obj.get_sub_repo(hallo_obj)
e6_keys = sub_repo.get_common_config_by_type(hallo.modules.subscriptions.common_e6_key.E6KeysCommon)
e6_client = e6_keys.get_client_by_user(user)
return cls(
msg,
user,
e6_client,
data["post_id"],
data["search"],
data["tag_results"],
data["page"]
)
def to_json(self) -> Dict:
return {
"user_addr": self.user.address,
"post_id": self.post_id,
"search": self.search,
"tag_results": self.tag_results,
"page": self.page
}
class E621TaggingSource(hallo.modules.subscriptions.source_e621.E621Source):
type_name = "e621_tagging"
type_names: List[str] = ["e621 tagging", "e621 tagging search", "tagging e621"]
def __init__(
self,
search: str,
e6_client: YippiClient,
sub_repo: 'hallo.modules.subscriptions.subscription_repo.SubscriptionRepo',
owner: User,
tags: List[str],
last_keys: Optional[List[hallo.modules.subscriptions.stream_source.Key]] = None
):
super().__init__(search, e6_client, owner, last_keys)
self.sub_repo = sub_repo
self.owner = owner
self.tags: List[str] = tags
@classmethod
def from_input(cls, argument: str, user: User, sub_repo) -> 'E621TaggingSource':
parsed = InputParser(argument)
tags_arg = parsed.get_arg_by_names(
["tags", "watched_tags", "to_tag", "watched tags", "to tag", "watch"]
)
search_arg = parsed.get_arg_by_names(
[
"search",
"query",
"search_query",
"search query",
"subscription",
"sub",
"search_term",
"search term",
]
)
if tags_arg is not None:
tags = tags_arg.split()
if search_arg is not None:
search = search_arg
else:
search = parsed.remaining_text
else:
if search_arg is not None:
search = search_arg
tags = parsed.remaining_text.split()
else:
raise hallo.modules.subscriptions.subscription_exception.SubscriptionException(
'You need to specify a search term with search="search term" and '
'tags to watch with tags="tags to watch"'
)
e6_keys = sub_repo.get_common_config_by_type(hallo.modules.subscriptions.common_e6_key.E6KeysCommon)
# Make sure you're not using the default user here
e6_client = e6_keys.get_client_by_user(user, allow_default=False)
return cls(search, e6_client, sub_repo, user, tags)
@property
def title(self) -> str:
return f'search for "{self.search}" to apply tags {self.tags}'
def item_text_prefix(self) -> str:
return f'Update on "{self.search}" tagging e621 search.'
def item_to_event(
self,
server: Server,
channel: Optional[Channel],
user: Optional[User],
item: Post
) -> EventMessage:
# Check tags
post_tags = [tag for tag_list in item.tags.values() for tag in tag_list]
tag_results = {tag: tag in post_tags for tag in self.tags}
# Construct output
output = text_for_post(item, prefix=self.item_text_prefix())
image_url = item.file["url"]
menu_buttons = buttons_for_submission(tag_results)
if item.file["ext"] in ["swf", "webm"] or image_url is None:
msg = EventMessage(server, channel, user, output, inbound=False, menu_buttons=menu_buttons)
else:
msg = EventMessageWithPhoto(
server, channel, user, output, image_url, inbound=False, menu_buttons=menu_buttons
)
menu = E621TaggingMenu(msg, self.owner, self.e6_client, item.id, self.search, tag_results)
self.sub_repo.menu_cache.add_menu(menu)
return msg
@classmethod
def from_json(cls, json_data: Dict, destination: Destination, sub_repo) -> 'E621TaggingSource':
user_addr = json_data["e621_user_address"]
user = destination.server.get_user_by_address(user_addr)
e6_client = hallo.modules.subscriptions.source_e621.e6_client_from_input(user, sub_repo)
return E621TaggingSource(
json_data["search"],
e6_client,
sub_repo,
user,
json_data["tags"],
json_data["last_keys"]
)
def to_json(self) -> Dict:
return {
"type": self.type_name,
"last_keys": self.last_keys,
"search": self.search,
"tags": self.tags,
"e621_user_address": self.owner.address
}
```
#### File: modules/subscriptions/source_fa_notif_comments.py
```python
from typing import Dict, Optional, List
from urllib.error import HTTPError
from hallo.destination import Destination, User, Channel
from hallo.events import EventMessage
import hallo.modules.subscriptions.source_fa_favs
import hallo.modules.subscriptions.stream_source
import hallo.modules.subscriptions.common_fa_key
import hallo.modules.subscriptions.source
from hallo.server import Server
class FASubmissionCommentSource(
hallo.modules.subscriptions.stream_source.StreamSource[
hallo.modules.subscriptions.common_fa_key.FAKey.FAReader.FANotificationCommentSubmission
]
):
type_name = "fa_submission_comments"
type_names = ["fa submission comments"]
def __init__(
self,
fa_key: hallo.modules.subscriptions.common_fa_key.FAKey,
last_keys: Optional[List[hallo.modules.subscriptions.stream_source.Key]] = None
):
super().__init__(last_keys)
self.fa_key = fa_key
def current_state(
self
) -> List[hallo.modules.subscriptions.common_fa_key.FAKey.FAReader.FANotificationCommentSubmission]:
notif_page = self.fa_key.get_fa_reader().get_notification_page()
return notif_page.submission_comments
def item_to_key(
self, item: hallo.modules.subscriptions.common_fa_key.FAKey.FAReader.FANotificationCommentSubmission
) -> hallo.modules.subscriptions.stream_source.Key:
return item.comment_id
def item_to_event(
self, server: Server, channel: Optional[Channel], user: Optional[User],
item: hallo.modules.subscriptions.common_fa_key.FAKey.FAReader.FANotificationCommentSubmission
) -> EventMessage:
fa_reader = self.fa_key.get_fa_reader()
response_str = "in response to your comment " if item.comment_on else ""
owner_str = "your" if item.submission_yours else "their"
try:
submission_page = fa_reader.get_submission_page(item.submission_id)
comment = submission_page.comments_section.get_comment_by_id(
item.comment_id
)
return EventMessage(
server, channel, user,
"You have a submission comment notification. "
f'{item.name} has made a new comment {response_str}on {owner_str} submission '
f'"{item.submission_name}" {item.submission_link} : \n\n{comment.text}',
inbound=False
)
except HTTPError:
return EventMessage(
server, channel, user,
"You have a submission comment notification. "
f'{item.name} has made a new comment {response_str}on {owner_str} submission '
f'"{item.submission_name}" {item.submission_link} : but I can\'t find the comment.',
inbound=False
)
def matches_name(self, name_clean: str) -> bool:
return name_clean in [s.lower().strip() for s in self.type_names + ["submission comments"]]
@property
def title(self) -> str:
return f"FA submission comments for {self.fa_key.user.name}"
@classmethod
def from_input(cls, argument: str, user: User, sub_repo) -> 'FASubmissionCommentSource':
fa_key = hallo.modules.subscriptions.source_fa_favs.fa_key_from_input(user, sub_repo)
return FASubmissionCommentSource(fa_key)
@classmethod
def from_json(cls, json_data: Dict, destination: Destination, sub_repo) -> 'FASubmissionCommentSource':
fa_key = hallo.modules.subscriptions.source_fa_favs.fa_key_from_json(
json_data["fa_key_user_address"], destination.server, sub_repo
)
return FASubmissionCommentSource(fa_key, json_data["last_keys"])
def to_json(self) -> Dict:
return {
"type": self.type_name,
"fa_key_user_address": self.fa_key.user.address,
"last_keys": self.last_keys
}
class FAJournalCommentSource(
hallo.modules.subscriptions.stream_source.StreamSource[
hallo.modules.subscriptions.common_fa_key.FAKey.FAReader.FANotificationCommentJournal
]
):
type_name = "fa_journal_comments"
type_names = ["fa journal comments"]
def __init__(
self,
fa_key: hallo.modules.subscriptions.common_fa_key.FAKey,
last_keys: Optional[List[hallo.modules.subscriptions.stream_source.Key]] = None
):
super().__init__(last_keys)
self.fa_key = fa_key
def current_state(
self
) -> List[hallo.modules.subscriptions.common_fa_key.FAKey.FAReader.FANotificationCommentJournal]:
notif_page = self.fa_key.get_fa_reader().get_notification_page()
return notif_page.journal_comments
def item_to_key(
self,
item: hallo.modules.subscriptions.common_fa_key.FAKey.FAReader.FANotificationCommentJournal
) -> hallo.modules.subscriptions.stream_source.Key:
return item.comment_id
def item_to_event(
self, server: Server, channel: Optional[Channel], user: Optional[User],
item: hallo.modules.subscriptions.common_fa_key.FAKey.FAReader.FANotificationCommentJournal
) -> EventMessage:
fa_reader = self.fa_key.get_fa_reader()
response_str = "in response to your comment " if item.comment_on else ""
owner_str = "your" if item.journal_yours else "their"
try:
journal_page = fa_reader.get_journal_page(item.journal_id)
comment = journal_page.comments_section.get_comment_by_id(
item.comment_id
)
return EventMessage(
server, channel, user,
f"You have a journal comment notification. {item.name} has made a new comment "
f"{response_str}on {owner_str} journal "
f'"{item.journal_name}" {item.journal_link} : \n\n{comment.text}',
inbound=False
)
except HTTPError:
return EventMessage(
server, channel, user,
f"You have a journal comment notification. {item.name} has made a new comment "
f"{response_str}on {owner_str} journal "
f'"{item.journal_name}" {item.journal_link} but I can\'t find the comment.',
inbound=False
)
def matches_name(self, name_clean: str) -> bool:
return name_clean in [s.lower().strip() for s in self.type_names + ["journal comments"]]
@property
def title(self) -> str:
return f"FA journal comments for {self.fa_key.user.name}"
@classmethod
def from_input(cls, argument: str, user: User, sub_repo) -> 'FAJournalCommentSource':
fa_key = hallo.modules.subscriptions.source_fa_favs.fa_key_from_input(user, sub_repo)
return FAJournalCommentSource(fa_key)
@classmethod
def from_json(cls, json_data: Dict, destination: Destination, sub_repo) -> 'FAJournalCommentSource':
fa_key = hallo.modules.subscriptions.source_fa_favs.fa_key_from_json(
json_data["fa_key_user_address"], destination.server, sub_repo
)
return FAJournalCommentSource(fa_key, json_data["last_keys"])
def to_json(self) -> Dict:
return {
"type": self.type_name,
"fa_key_user_address": self.fa_key.user.address,
"last_keys": self.last_keys
}
class FAShoutSource(
hallo.modules.subscriptions.stream_source.StreamSource[
hallo.modules.subscriptions.common_fa_key.FAKey.FAReader.FANotificationShout
]
):
type_name = "fa_shouts"
type_names = ["fa shouts"]
def __init__(
self, fa_key: hallo.modules.subscriptions.common_fa_key.FAKey,
last_keys: Optional[List[hallo.modules.subscriptions.stream_source.Key]] = None
):
super().__init__(last_keys)
self.fa_key = fa_key
def current_state(self) -> List[hallo.modules.subscriptions.common_fa_key.FAKey.FAReader.FANotificationShout]:
notif_page = self.fa_key.get_fa_reader().get_notification_page()
return notif_page.shouts
def item_to_key(
self,
item: hallo.modules.subscriptions.common_fa_key.FAKey.FAReader.FANotificationShout
) -> hallo.modules.subscriptions.stream_source.Key:
return item.shout_id
def item_to_event(
self, server: Server, channel: Optional[Channel], user: Optional[User],
item: hallo.modules.subscriptions.common_fa_key.FAKey.FAReader.FANotificationShout
) -> EventMessage:
fa_reader = self.fa_key.get_fa_reader()
try:
user_page_shouts = fa_reader.get_user_page(item.page_username).shouts
shout = [
shout
for shout in user_page_shouts
if shout.shout_id == item.shout_id
]
return EventMessage(
server, channel, user,
f"You have a new shout, from {item.name} ( http://furaffinity.net/user/{item.username}/ ) "
f"has left a shout saying: \n\n{shout[0].text}",
inbound=False
)
except HTTPError:
return EventMessage(
server, channel, user,
f"You have a new shout, from {item.name} ( http://furaffinity.net/user/{item.username}/ ) "
"has left a shout but I can't find it on your user page: \n"
f"https://furaffinity.net/user/{item.page_username}/",
inbound=False
)
def matches_name(self, name_clean: str) -> bool:
return name_clean in [s.lower().strip() for s in self.type_names + ["shouts"]]
@property
def title(self) -> str:
return f"FA shouts for {self.fa_key.user.name}"
@classmethod
def from_input(cls, argument: str, user: User, sub_repo) -> 'FAShoutSource':
fa_key = hallo.modules.subscriptions.source_fa_favs.fa_key_from_input(user, sub_repo)
return FAShoutSource(fa_key)
@classmethod
def from_json(cls, json_data: Dict, destination: Destination, sub_repo) -> 'FAShoutSource':
fa_key = hallo.modules.subscriptions.source_fa_favs.fa_key_from_json(
json_data["fa_key_user_address"],
destination.server, sub_repo
)
return FAShoutSource(fa_key, json_data["last_keys"])
def to_json(self) -> Dict:
return {
"type": self.type_name,
"fa_key_user_address": self.fa_key.user.address,
"last_keys": self.last_keys
}
class FACommentNotificationsSource(hallo.modules.subscriptions.source.Source[Dict, Dict]):
type_name: str = "fa_notif_comments"
type_names: List[str] = [
"{}{}{}".format(fa, comments, notifications)
for fa in ["fa ", "furaffinity "]
for comments in ["comments", "comment", "shouts", "shout"]
for notifications in ["", " notifications"]
]
def __init__(
self,
fa_key: hallo.modules.subscriptions.common_fa_key.FAKey,
submission_source: FASubmissionCommentSource,
journal_source: FAJournalCommentSource,
shout_source: FAShoutSource,
):
super().__init__()
self.fa_key = fa_key
self.submission_source = submission_source
self.journal_source = journal_source
self.shout_source = shout_source
def matches_name(self, name_clean: str) -> bool:
return name_clean in [s.lower().strip() for s in self.type_names + ["comments"]]
@property
def title(self) -> str:
return f"FA comments for {self.fa_key.user.name}"
@classmethod
def from_input(cls, argument: str, user: User, sub_repo) -> 'FACommentNotificationsSource':
fa_key = hallo.modules.subscriptions.source_fa_favs.fa_key_from_input(user, sub_repo)
submission_source = FASubmissionCommentSource(fa_key)
journal_source = FAJournalCommentSource(fa_key)
shout_source = FAShoutSource(fa_key)
return FACommentNotificationsSource(fa_key, submission_source, journal_source, shout_source)
def current_state(self) -> Dict:
return {
"submissions": self.submission_source.current_state(),
"journals": self.journal_source.current_state(),
"shouts": self.shout_source.current_state()
}
def state_change(self, state: Dict) -> Optional[Dict]:
submission_update = self.submission_source.state_change(state["submissions"])
journal_update = self.journal_source.state_change(state["journals"])
shout_update = self.shout_source.state_change(state["shouts"])
if not submission_update and not journal_update and not shout_update:
return None
return {
"submissions": submission_update,
"journals": journal_update,
"shouts": shout_update
}
def save_state(self, state: Dict) -> None:
self.submission_source.save_state(state["submissions"])
self.journal_source.save_state(state["journals"])
self.shout_source.save_state(state["shouts"])
def events(
self, server: Server, channel: Optional[Channel], user: Optional[User], update: Dict
) -> List[EventMessage]:
return (
self.submission_source.events(server, channel, user, update["submissions"])
+ self.journal_source.events(server, channel, user, update["journals"])
+ self.shout_source.events(server, channel, user, update["shouts"])
)
@classmethod
def from_json(cls, json_data: Dict, destination: Destination, sub_repo) -> 'FACommentNotificationsSource':
user_addr = json_data["fa_key_user_address"]
fa_key = hallo.modules.subscriptions.source_fa_favs.fa_key_from_json(
user_addr, destination.server,
sub_repo
)
submission_source = FASubmissionCommentSource.from_json(json_data["submissions"], destination, sub_repo)
journal_source = FAJournalCommentSource.from_json(json_data["journals"], destination, sub_repo)
shout_source = FAShoutSource.from_json(json_data["shouts"], destination, sub_repo)
return FACommentNotificationsSource(fa_key, submission_source, journal_source, shout_source)
def to_json(self) -> Dict:
json_data = {
"type": self.type_name,
"fa_key_user_address": self.fa_key.user.address,
"submissions": self.submission_source.to_json(),
"journals": self.journal_source.to_json(),
"shouts": self.shout_source.to_json()
}
return json_data
```
#### File: modules/subscriptions/subscription_factory.py
```python
from typing import List, Type, Dict, TYPE_CHECKING
import hallo.modules.subscriptions.subscription_exception
from hallo.destination import Destination
import hallo.modules.subscriptions.common_fa_key
import hallo.modules.subscriptions.subscription_common
import hallo.modules.subscriptions.subscription
import hallo.modules.subscriptions.source_reddit
import hallo.modules.subscriptions.source_fa_watchers
import hallo.modules.subscriptions.source_fa_favs
import hallo.modules.subscriptions.source_fa_notif_comments
import hallo.modules.subscriptions.source_fa_notif_favs
import hallo.modules.subscriptions.source_fa_notes
import hallo.modules.subscriptions.source_e621_tagging
import hallo.modules.subscriptions.source_e621_backlog
import hallo.modules.subscriptions.source_e621
import hallo.modules.subscriptions.source_rss
import hallo.modules.subscriptions.source_twitter
import hallo.modules.subscriptions.source
if TYPE_CHECKING:
from hallo.hallo import Hallo
class SubscriptionFactory:
sub_sources: List[Type['hallo.modules.new_subscriptions.source.Source']] = [
hallo.modules.subscriptions.source_e621.E621Source,
hallo.modules.subscriptions.source_e621_tagging.E621TaggingSource,
hallo.modules.subscriptions.source_e621_backlog.E621BacklogTaggingSource,
hallo.modules.subscriptions.source_fa_favs.FAFavsSource,
hallo.modules.subscriptions.source_fa_notes.FANotesSource,
hallo.modules.subscriptions.source_fa_notif_comments.FACommentNotificationsSource,
hallo.modules.subscriptions.source_fa_notif_favs.FAFavNotificationsSource,
hallo.modules.subscriptions.source_fa_watchers.FAWatchersSource,
hallo.modules.subscriptions.source_fa_watchers.FAUserWatchersSource,
hallo.modules.subscriptions.source_reddit.RedditSource,
hallo.modules.subscriptions.source_rss.RssSource,
]
common_classes: List[Type[hallo.modules.subscriptions.subscription_common.SubscriptionCommon]] = [
hallo.modules.subscriptions.common_fa_key.FAKeysCommon
]
@staticmethod
def get_source_names() -> List[str]:
return [
name
for sub_class in SubscriptionFactory.sub_sources
for name in sub_class.type_names
]
@staticmethod
def get_source_class_by_name(name: str) -> Type[hallo.modules.subscriptions.source.Source]:
classes = [
sub_class
for sub_class in SubscriptionFactory.sub_sources
if name in sub_class.type_names
]
if len(classes) != 1:
raise hallo.modules.subscriptions.subscription_exception.SubscriptionException(
"Failed to find a subscription type matching the name {}".format(name)
)
return classes[0]
@staticmethod
def source_from_json(
json_data: Dict,
destination: Destination,
sub_repo
) -> 'hallo.modules.new_subscriptions.source.Source':
name = json_data["type"]
classes = [
sub_class
for sub_class in SubscriptionFactory.sub_sources
if name == sub_class.type_name
]
if len(classes) != 1:
raise hallo.modules.subscriptions.subscription_exception.SubscriptionException(
f"Failed to find a subscription source type matching the name {name}"
)
return classes[0].from_json(json_data, destination, sub_repo)
@staticmethod
def common_from_json(
common_json: Dict,
hallo_obj: 'Hallo'
) -> hallo.modules.subscriptions.subscription_common.SubscriptionCommon:
common_type_name = common_json["common_type"]
for common_class in SubscriptionFactory.common_classes:
if common_class.type_name == common_type_name:
return common_class.from_json(common_json, hallo_obj)
raise hallo.modules.subscriptions.subscription_exception.SubscriptionException(
f"Could not load common configuration of type {common_type_name}"
)
```
#### File: hallo/modules/user_data.py
```python
import re
from abc import ABCMeta
from typing import Dict, TypeVar, Type, Optional, List
from hallo.destination import User
from hallo.function import Function
class UserDataException(Exception):
pass
class UserDatum(metaclass=ABCMeta):
type_name = ""
names = []
@staticmethod
def create_from_input(event):
raise NotImplementedError()
def get_name(self, event):
raise NotImplementedError()
def to_json(self):
"""
:rtype: dict
"""
raise NotImplementedError()
@staticmethod
def from_json(json_dict):
"""
:type json_dict: dict
"""
raise NotImplementedError()
T = TypeVar("T", bound=UserDatum)
class UserDataParser:
def __init__(self):
pass
def get_data_by_user(self, user: User) -> Dict[str, 'UserDatum']:
user_data_dict = user.extra_data_dict
user_data = dict()
for key in user_data_dict:
user_data[key] = UserDataFactory.from_dict(key, user_data_dict[key])
return user_data
def get_data_by_user_and_type(self, user: User, data_class: Type[T]) -> Optional[T]:
type_name = data_class.type_name
user_data_dict = user.extra_data_dict
if type_name in user_data_dict:
return UserDataFactory.from_dict(type_name, user_data_dict[type_name])
return None
def set_user_data(self, user: User, data: 'UserDatum'):
user.extra_data_dict[data.type_name] = data.to_json()
user.server.hallo.save_json()
def remove_data_by_user_and_type(self, user: User, data_class: Type[T]):
type_name = data_class.type_name
if type_name in user.extra_data_dict:
del user.extra_data_dict['type_name']
class FAKeyData(UserDatum):
type_name = "fa_key"
names = [
"furaffinity key",
"fa key",
"fa cookies",
"furaffinity cookies",
"fa",
"furaffinity",
]
def __init__(self, cookie_a, cookie_b):
self.cookie_a = cookie_a
self.cookie_b = cookie_b
@staticmethod
def create_from_input(event):
input_clean = event.command_args.strip().lower().replace(";", " ").split()
if len(input_clean) != 2:
raise UserDataException(
"Input must include cookie a and cookie b, in the format a=value;b=value"
)
if input_clean[0].startswith("b="):
input_clean = list(reversed(input_clean))
cookie_a = input_clean[0][2:]
cookie_b = input_clean[1][2:]
new_data = FAKeyData(cookie_a, cookie_b)
return new_data
def get_name(self, event):
return event.user.name + " FA login"
def to_json(self):
json_obj = dict()
json_obj["cookie_a"] = self.cookie_a
json_obj["cookie_b"] = self.cookie_b
return json_obj
@staticmethod
def from_json(json_dict):
cookie_a = json_dict["cookie_a"]
cookie_b = json_dict["cookie_b"]
return FAKeyData(cookie_a, cookie_b)
class E6KeyData(UserDatum):
type_name = "e6_key"
names = [
"e621 key",
"e6 key",
"e6 api key",
"e621 api key",
"e6",
"e621",
]
def __init__(self, username: str, api_key: str):
self.username = username
self.api_key = api_key
@staticmethod
def create_from_input(event):
input_clean = event.command_args.strip().split()
if len(input_clean) != 2:
raise UserDataException(
"Input must include username,then apikey, separated by a space"
)
new_data = E6KeyData(input_clean[0], input_clean[1])
return new_data
def get_name(self, event):
return event.user.name + " E621 api key"
def to_json(self):
json_obj = dict()
json_obj["username"] = self.username
json_obj["api_key"] = self.api_key
return json_obj
@staticmethod
def from_json(json_dict):
return E6KeyData(
json_dict["username"],
json_dict["api_key"]
)
class WeatherLocationData(UserDatum):
type_name = "weather_location"
names = ["weather location"]
TYPE_CITY = "city"
TYPE_COORDS = "coords"
TYPE_ZIP = "zip"
def __init__(self, location: 'Location'):
self.location = location
self.country_code = None
""" :type : str"""
@staticmethod
def create_from_input(event):
input_line = event.command_args
# Check if zip code is given
if re.match(r"^\d{5}(?:[-\s]\d{4})?$", input_line):
return WeatherLocationData(WeatherLocationData.ZipLocation(input_line))
# Check if coordinates are given
coord_match = re.match(r"^(-?\d+(\.\d+)?)[ ,]*(-?\d+(\.\d+)?)$", input_line)
if coord_match:
new_lat = coord_match.group(1)
new_long = coord_match.group(3)
return WeatherLocationData(
WeatherLocationData.CoordLocation(new_lat, new_long)
)
# Otherwise, assume it's a city
new_city = input_line
return WeatherLocationData(WeatherLocationData.CityLocation(new_city))
def get_name(self, event):
return event.user.name + " weather location"
def to_json(self):
return self.location.to_json()
@staticmethod
def from_json(json_dict):
if json_dict["type"] == WeatherLocationData.TYPE_CITY:
return WeatherLocationData(
WeatherLocationData.CityLocation.from_json(json_dict)
)
if json_dict["type"] == WeatherLocationData.TYPE_COORDS:
return WeatherLocationData(
WeatherLocationData.CoordLocation.from_json(json_dict)
)
if json_dict["type"] == WeatherLocationData.TYPE_ZIP:
return WeatherLocationData(
WeatherLocationData.ZipLocation.from_json(json_dict)
)
raise UserDataException("Unrecognised weather location type.")
class Location(metaclass=ABCMeta):
def to_json(self):
raise NotImplementedError()
@staticmethod
def from_json(json_obj):
raise NotImplementedError()
class CityLocation(Location):
def __init__(self, city):
self.city = city
def to_json(self):
return {"type": WeatherLocationData.TYPE_CITY, "city": self.city}
@staticmethod
def from_json(json_obj):
return WeatherLocationData.CityLocation(json_obj["city"])
class ZipLocation(Location):
def __init__(self, zip_code):
self.zip_code = zip_code
def to_json(self):
return {"type": WeatherLocationData.TYPE_ZIP, "zip_code": self.zip_code}
@staticmethod
def from_json(json_obj):
return WeatherLocationData.ZipLocation(json_obj["zip_code"])
class CoordLocation(Location):
def __init__(self, latitude, longitude):
self.latitude = latitude
self.longitude = longitude
def to_json(self):
return {
"type": WeatherLocationData.TYPE_COORDS,
"latitude": self.latitude,
"longitude": self.longitude,
}
@staticmethod
def from_json(json_obj):
return WeatherLocationData.CoordLocation(
json_obj["coord_x"], json_obj["coord_y"]
)
class UserDataFactory:
data_classes = [FAKeyData, E6KeyData]
@staticmethod
def get_data_type_names() -> List[str]:
return [
name
for common_class in UserDataFactory.data_classes
for name in common_class.names
]
@staticmethod
def get_data_class_by_name(name: str) -> Type[UserDatum]:
classes = [
data_class
for data_class in UserDataFactory.data_classes
if name in data_class.names
]
if len(classes) != 1:
raise UserDataException(
"Failed to find a common configuration type matching the name {}".format(
name
)
)
return classes[0]
@staticmethod
def from_dict(type_name, data_dict):
for data_class in UserDataFactory.data_classes:
if data_class.type_name == type_name:
return data_class.from_json(data_dict)
raise UserDataException("Could not load user data of type {}".format(type_name))
class UserDataSetup(Function):
"""
Sets up a user's common configuration in the subscription repository
"""
def __init__(self):
"""
Constructor
"""
super().__init__()
# Name for use in help listing
self.help_name = "setup user data"
# Names which can be used to address the function
name_templates = {
"setup {} user data",
"setup user data {}",
"setup user data for {}",
"{} user data setup",
}
self.names = set(
[
template.format(name)
for name in UserDataFactory.get_data_type_names()
for template in name_templates
]
)
# Help documentation, if it's just a single line, can be set here
self.help_docs = (
"Sets up user data which other functions may require. "
"Format: setup user data <type> <parameters>"
)
self.user_data_parser = UserDataParser()
""" :type : UserDataParser"""
def run(self, event):
# Construct type name
data_type_name = " ".join(
[
w
for w in event.command_name.lower().split()
if w not in ["setup", "user", "data", "for"]
]
).strip()
# Get class from type name
data_class = UserDataFactory.get_data_class_by_name(
data_type_name
)
if data_class is None:
return event.create_response(
"Could not find a user data type called {}. "
"Available types are: {}".format(
data_type_name,
", ".join(
[
data_class.names[0]
for data_class in UserDataFactory.data_classes
]
),
)
)
# Create user data object
data_obj = data_class.create_from_input(event)
# Save user data
self.user_data_parser.set_user_data(event.user, data_obj)
# Send response
return event.create_response(
"Set up a new user data for {}".format(data_obj.get_name(event))
)
class UserDataTeardown(Function):
"""
Tears down a user's user data of a given type
"""
tear_down_words = ["tear down", "teardown", "remove"]
def __init__(self):
"""
Constructor
"""
super().__init__()
# Name for use in help listing
self.help_name = "tear down user data"
# Names which can be used to address the function
name_templates = {
"{1} {0} user data",
"{1} user data {0}",
"{1} user data for {0}",
"{0} user data {1}",
}
self.names = set(
[
template.format(name, tearDown)
for name in UserDataFactory.get_data_type_names()
for template in name_templates
for tearDown in self.tear_down_words
]
)
# Help documentation, if it's just a single line, can be set here
self.help_docs = (
"Removes user data of a specified type. "
"Format: tear down user data <type> <parameters>"
)
def run(self, event):
# Construct type name
data_type_name = " ".join(
[
w
for w in event.command_name.split()
if w not in ["user", "data", "for", "teardown", "tear", "down"]
]
).strip()
# Get class from type name
data_class = UserDataFactory.get_data_class_by_name(data_type_name)
# Get a user data parser
user_data_parser = UserDataParser()
# Remove user data
common_obj = user_data_parser.get_data_by_user_and_type(event.user, data_class)
user_data_parser.remove_data_by_user_and_type(event.user, data_class)
# Send response
return event.create_response(
"Removed user data for {}".format(common_obj.get_name(event))
)
```
#### File: test/inc/test_input_parser.py
```python
from hallo.inc.input_parser import InputParser
def test_no_args():
p = InputParser("blah blah")
assert p.remaining_text == "blah blah"
assert len(p.args_dict) == 0
def test_multiple_simple_args():
p = InputParser("blah blah arg1=val1 arg2=val2 arg3=val3")
assert p.remaining_text == "blah blah"
assert p.args_dict["arg1"] == "val1"
assert p.args_dict["arg2"] == "val2"
assert p.args_dict["arg3"] == "val3"
def test_quoted_args_quoted_values():
p = InputParser("yo 'base unit'=\"hello world\"")
assert p.remaining_text == "yo"
assert p.args_dict["base unit"] == "hello world"
def test_quoted_args_unquoted_values():
p = InputParser("yo 'base unit'=hello world")
assert p.remaining_text == "yo world"
assert p.args_dict["base unit"] == "hello"
def test_unquoted_args_quoted_values():
p = InputParser('yo base unit="hello world"')
assert p.remaining_text == "yo base"
assert p.args_dict["unit"] == "hello world"
def test_unquoted_args_unquoted_values():
p = InputParser("yo base unit=hello world")
assert p.remaining_text == "yo base world"
assert p.args_dict["unit"] == "hello"
def test_mismatched_quotes():
p = InputParser('yo \'base unit"="hello world"')
assert p.remaining_text == "yo 'base"
assert p.args_dict['unit"'] == "hello world"
p = InputParser("yo 'base unit'=\"hello's world\"")
assert p.remaining_text == "yo"
assert p.args_dict["base unit"] == "hello's world"
def test_all_types():
p = InputParser(
"yo 'base unit'=\"hello world\" arg1='value 1' 'arg 2'=val2 arg3=val3"
)
assert p.remaining_text == "yo"
assert p.args_dict["base unit"] == "hello world"
assert p.args_dict["arg1"] == "value 1"
assert p.args_dict["arg 2"] == "val2"
assert p.args_dict["arg3"] == "val3"
def test_remaining_text_start_and_end():
p = InputParser("blah blah arg1=val1 arg2=val2 hey")
assert p.remaining_text == "blah blah hey"
assert p.args_dict["arg1"] == "val1"
assert p.args_dict["arg2"] == "val2"
def test_unstripped_input():
p = InputParser(" blah blah ")
assert p.remaining_text == "blah blah"
def test_get_arg_by_names():
p = InputParser("blah blah arg1=val1 arg2=val2 arg3=val3")
assert p.remaining_text == "blah blah"
assert p.get_arg_by_names(["arg2"]) == "val2"
assert p.args_dict["arg1"] == "val1"
assert p.args_dict["arg2"] == "val2"
assert p.args_dict["arg3"] == "val3"
def test_get_arg_by_names_no_match():
p = InputParser("blah blah arg1=val1 arg2=val2 arg3=val3")
assert p.remaining_text == "blah blah"
assert p.get_arg_by_names(["arg4"]) is None
assert p.args_dict["arg1"] == "val1"
assert p.args_dict["arg2"] == "val2"
assert p.args_dict["arg3"] == "val3"
def test_get_arg_by_names_one_match():
p = InputParser("blah blah arg1=val1 arg2=val2 arg3=val3")
assert p.remaining_text == "blah blah"
assert p.get_arg_by_names(["arg4", "arg5", "arg3"]) == "val3"
assert p.args_dict["arg1"] == "val1"
assert p.args_dict["arg2"] == "val2"
assert p.args_dict["arg3"] == "val3"
def test_get_arg_by_names_first_match():
p = InputParser("blah blah arg1=val1 arg2=val2 arg3=val3")
assert p.remaining_text == "blah blah"
assert p.get_arg_by_names(["arg1", "arg2"]) == "val1"
assert p.args_dict["arg1"] == "val1"
assert p.args_dict["arg2"] == "val2"
assert p.args_dict["arg3"] == "val3"
def test_parse_string_no_numbers():
p = InputParser("blah bloo blee")
assert p.remaining_text == "blah bloo blee"
assert len(p.args_dict) == 0
assert len(p.string_words) == 3
assert len(p.number_words) == 0
assert p.string_words == ["blah", "bloo", "blee"]
def test_parse_string_all_numbers():
p = InputParser("5 421 8916 34.5 -3")
assert p.remaining_text == "5 421 8916 34.5 -3"
assert len(p.args_dict) == 0
assert len(p.string_words) == 0
assert len(p.number_words) == 5
assert p.number_words == [5, 421, 8916, 34.5, -3]
def test_parse_string_mix_of_numbers_and_args():
p = InputParser("blah blah arg1=val1 arg2=val2 5")
assert p.remaining_text == "blah blah 5"
assert p.args_dict["arg1"] == "val1"
assert p.args_dict["arg2"] == "val2"
assert p.string_words == ["blah", "blah"]
assert p.number_words == [5]
```
#### File: modules/channel_control/test_de_operator.py
```python
from hallo.events import EventMessage, EventMode
from hallo.server import Server
from hallo.test.server_mock import ServerMock
def test_deop_not_irc(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = "NOT_IRC"
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
chan1.add_user(user1)
chan1.add_user(
serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
)
try:
test_hallo.function_dispatcher.dispatch(EventMessage(serv1, chan1, user1, "deop"))
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "only available for irc" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_deop_0_privmsg(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
user1 = serv1.get_user_by_address("test_user1", "test_user1")
chan1.add_user(user1)
chan1.add_user(
serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
)
try:
test_hallo.function_dispatcher.dispatch(EventMessage(serv1, None, user1, "deop"))
data = serv1.get_send_data(1, user1, EventMessage)
assert "error" in data[0].text.lower()
assert "in a private message" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_deop_0_no_power(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1", "test_user1")
chan1.add_user(user1)
chan1.add_user(
serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
)
try:
test_hallo.function_dispatcher.dispatch(EventMessage(serv1, chan1, user1, "deop"))
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "don't have power" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_deop_0_not_op(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1", "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(EventMessage(serv1, chan1, user1, "deop"))
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "doesn't have op" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_deop_0(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1", "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = True
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(EventMessage(serv1, chan1, user1, "deop"))
data = serv1.get_send_data(2)
assert "error" not in data[1].text.lower()
assert data[0].channel == chan1
assert data[1].channel == chan1
assert data[0].__class__ == EventMode
assert data[1].__class__ == EventMessage
assert "-o " + user1.name in data[0].mode_changes
assert "status taken" in data[1].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_deop_1priv_channel_not_known(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, None, user1, "deop other_channel")
)
data = serv1.get_send_data(1, user1, EventMessage)
assert "error" in data[0].text.lower()
assert "other_channel is not known" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_deop_1priv_not_in_channel(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, None, user1, "deop test_chan2")
)
data = serv1.get_send_data(1, user1, EventMessage)
assert "error" in data[0].text.lower()
assert "not in that channel" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_deop_1priv_user_not_there(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, None, user1, "deop test_chan1")
)
data = serv1.get_send_data(1, user1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user1 is not in test_chan1" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_deop_1priv_no_power(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, None, user1, "deop test_chan1")
)
data = serv1.get_send_data(1, user1, EventMessage)
assert "error" in data[0].text.lower()
assert "don't have power" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_deop_1priv_not_op(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, None, user1, "deop test_chan1")
)
data = serv1.get_send_data(1, user1, EventMessage)
assert "error" in data[0].text.lower()
assert "doesn't have op" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_deop_1priv(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = True
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, None, user1, "deop test_chan1")
)
data = serv1.get_send_data(2)
assert "error" not in data[1].text.lower()
assert data[0].channel == chan1
assert data[1].user == user1
assert data[0].__class__ == EventMode
assert data[1].__class__ == EventMessage
assert "-o " + user1.name in data[0].mode_changes
assert "status taken" in data[1].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_deop_1_chan_user_not_there(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan2.add_user(user2)
chan2.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2_user2 = chan2.get_membership_by_user(user2)
chan2_user2.is_op = False
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "deop test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user1 is not in test_chan2" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_deop_1_chan_no_power(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan2.add_user(user1)
chan2.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2_user1 = chan2.get_membership_by_user(user1)
chan2_user1.is_op = False
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "deop test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "don't have power" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_deop_1_chan_not_op(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan2.add_user(user1)
chan2.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2_user1 = chan2.get_membership_by_user(user1)
chan2_user1.is_op = False
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "deop test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "doesn't have op" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_deop_1_chan(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan2.add_user(user1)
chan2.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2_user1 = chan2.get_membership_by_user(user1)
chan2_user1.is_op = True
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "deop test_chan2")
)
data = serv1.get_send_data(2)
assert "error" not in data[1].text.lower()
assert data[0].channel == chan2
assert data[1].channel == chan1
assert data[0].__class__ == EventMode
assert data[1].__class__ == EventMessage
assert "-o " + user1.name in data[0].mode_changes
assert "status taken" in data[1].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_deop_1_user_not_here(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "deop test_user2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user2 is not in test_chan1" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_deop_1_user_no_power(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = False
chan1.add_user(user2)
chan1_user2 = chan1.get_membership_by_user(user2)
chan1_user2.is_op = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "deop test_user2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "don't have power" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_deop_1_user_not_op(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan1.add_user(user2)
chan1_user2 = chan1.get_membership_by_user(user2)
chan1_user2.is_op = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "deop test_user2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "doesn't have op" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_deop_1_user(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan1.add_user(user2)
chan1_user2 = chan1.get_membership_by_user(user2)
chan1_user2.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "deop test_user2")
)
data = serv1.get_send_data(2)
assert "error" not in data[1].text.lower()
assert data[0].channel == chan1
assert data[1].channel == chan1
assert data[0].__class__ == EventMode
assert data[1].__class__ == EventMessage
assert "-o " + user2.name in data[0].mode_changes
assert "status taken" in data[1].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_deop_2_chan_user_not_known(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "deop test_chan2 test_user3")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user3 is not known" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_deop_2_chan_user_not_there(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
serv1.get_user_by_address("test_user3".lower(), "test_user3")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "deop test_chan2 test_user3")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user3 is not in test_chan2" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_deop_2_chan_no_power(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "deop test_chan2 test_user2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "don't have power" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_deop_2_chan_not_op(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user2 = chan2.get_membership_by_user(user2)
chan2_user2.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "deop test_chan2 test_user2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "doesn't have op" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_deop_2_chan(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user2 = chan2.get_membership_by_user(user2)
chan2_user2.is_op = True
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "deop test_chan2 test_user2")
)
data = serv1.get_send_data(2)
assert "error" not in data[1].text.lower()
assert data[0].channel == chan2
assert data[1].channel == chan1
assert data[0].__class__ == EventMode
assert data[1].__class__ == EventMessage
assert "-o " + user2.name in data[0].mode_changes
assert "status taken" in data[1].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_deop_2_user_not_in_channel(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = False
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "deop test_user2 test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "i'm not in that channel" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_deop_2_user_user_not_known(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "deop test_user3 test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user3 is not known" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_deop_2_user_user_not_there(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
serv1.get_user_by_address("test_user3".lower(), "test_user3")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "deop test_user3 test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user3 is not in test_chan2" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_deop_2_user_no_power(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "deop test_user2 test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "don't have power" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_deop_2_user_not_op(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user2 = chan2.get_membership_by_user(user2)
chan2_user2.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "deop test_user2 test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "doesn't have op" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_deop_2_user(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user2 = chan2.get_membership_by_user(user2)
chan2_user2.is_op = True
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "deop test_user2 test_chan2")
)
data = serv1.get_send_data(2)
assert "error" not in data[1].text.lower()
assert data[0].channel == chan2
assert data[1].channel == chan1
assert data[0].__class__ == EventMode
assert data[1].__class__ == EventMessage
assert "-o " + user2.name in data[0].mode_changes
assert "status taken" in data[1].text.lower()
finally:
test_hallo.remove_server(serv1)
```
#### File: modules/channel_control/test_operator.py
```python
from hallo.events import EventMessage, EventMode
from hallo.server import Server
from hallo.test.server_mock import ServerMock
def test_op_not_irc(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = "NOT_IRC"
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
chan1.add_user(user1)
chan1.add_user(
serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
)
try:
test_hallo.function_dispatcher.dispatch(EventMessage(serv1, chan1, user1, "op"))
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "only available for irc" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_op_0_privmsg(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
chan1.add_user(user1)
chan1.add_user(
serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
)
try:
test_hallo.function_dispatcher.dispatch(EventMessage(serv1, None, user1, "op"))
data = serv1.get_send_data(1, user1, EventMessage)
assert "error" in data[0].text.lower()
assert "in a private message" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_op_0_no_power(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
chan1.add_user(user1)
chan1.add_user(
serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
)
try:
test_hallo.function_dispatcher.dispatch(EventMessage(serv1, chan1, user1, "op"))
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "don't have power" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_op_0_is_op(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = True
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(EventMessage(serv1, chan1, user1, "op"))
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "already has op" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_op_0(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(EventMessage(serv1, chan1, user1, "op"))
data = serv1.get_send_data(2)
assert "error" not in data[1].text.lower()
assert data[0].channel == chan1
assert data[1].channel == chan1
assert data[0].__class__ == EventMode
assert data[1].__class__ == EventMessage
assert data[0].mode_changes == "+o {}".format(user1.address)
assert "status given" in data[1].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_op_1priv_not_known(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, None, user1, "op other_channel")
)
data = serv1.get_send_data(1, user1, EventMessage)
assert "error" in data[0].text.lower()
assert "other_channel is not known" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_op_1priv_not_in_channel(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
serv1.get_channel_by_address("other_channel", "other_channel")
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, None, user1, "op other_channel")
)
data = serv1.get_send_data(1, user1, EventMessage)
assert "error" in data[0].text.lower()
assert "not in that channel" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_op_1priv_user_not_there(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, None, user1, "op test_chan1")
)
data = serv1.get_send_data(1, user1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user1 is not in test_chan1" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_op_1priv_no_power(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, None, user1, "op test_chan1")
)
data = serv1.get_send_data(1, user1, EventMessage)
assert "error" in data[0].text.lower()
assert "don't have power" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_op_1priv_is_op(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = True
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, None, user1, "op test_chan1")
)
data = serv1.get_send_data(1, user1, EventMessage)
assert "error" in data[0].text.lower()
assert "already has op" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_op_1priv(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, None, user1, "op test_chan1")
)
data = serv1.get_send_data(2)
assert "error" not in data[1].text.lower()
assert data[0].channel == chan1
assert data[1].user == user1
assert data[0].__class__ == EventMode
assert data[1].__class__ == EventMessage
assert data[0].mode_changes == "+o {}".format(user1.address)
assert "status given" in data[1].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_op_1_chan_user_not_there(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan2.add_user(user2)
chan2.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2_user2 = chan2.get_membership_by_user(user2)
chan2_user2.is_op = False
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "op test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user1 is not in test_chan2" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_op_1_chan_no_power(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan2.add_user(user1)
chan2.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2_user1 = chan2.get_membership_by_user(user1)
chan2_user1.is_op = False
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "op test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "don't have power" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_op_1_chan_is_op(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan2.add_user(user1)
chan2.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2_user1 = chan2.get_membership_by_user(user1)
chan2_user1.is_op = True
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "op test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "already has op" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_op_1_chan(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan2.add_user(user1)
chan2.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2_user1 = chan2.get_membership_by_user(user1)
chan2_user1.is_op = False
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "op test_chan2")
)
data = serv1.get_send_data(2)
assert "error" not in data[1].text.lower()
assert data[0].channel == chan2
assert data[1].channel == chan1
assert data[0].__class__ == EventMode
assert data[1].__class__ == EventMessage
assert data[0].mode_changes == "+o {}".format(user1.address)
assert "status given" in data[1].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_op_1_user_not_here(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "op test_user2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user2 is not in test_chan1" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_op_1_user_no_power(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = False
chan1.add_user(user2)
chan1_user2 = chan1.get_membership_by_user(user2)
chan1_user2.is_op = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "op test_user2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "don't have power" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_op_1_user_is_op(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan1.add_user(user2)
chan1_user2 = chan1.get_membership_by_user(user2)
chan1_user2.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "op test_user2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "already has op" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_op_1_user(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan1.add_user(user2)
chan1_user2 = chan1.get_membership_by_user(user2)
chan1_user2.is_op = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "op test_user2")
)
data = serv1.get_send_data(2)
assert "error" not in data[1].text.lower()
assert data[0].channel == chan1
assert data[1].channel == chan1
assert data[0].__class__ == EventMode
assert data[1].__class__ == EventMessage
assert data[0].mode_changes == "+o {}".format(user2.address)
assert "status given" in data[1].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_op_2_chan_user_not_known(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "op test_chan2 test_user3")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user3 is not known" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_op_2_chan_user_not_there(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
serv1.get_user_by_address("test_user3", "test_user3")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "op test_chan2 test_user3")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user3 is not in test_chan2" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_op_2_chan_no_power(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "op test_chan2 test_user2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "don't have power" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_op_2_chan_is_op(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user2 = chan2.get_membership_by_user(user2)
chan2_user2.is_op = True
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "op test_chan2 test_user2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "already has op" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_op_2_chan(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user2 = chan2.get_membership_by_user(user2)
chan2_user2.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "op test_chan2 test_user2")
)
data = serv1.get_send_data(2)
assert "error" not in data[1].text.lower()
assert data[0].channel == chan2
assert data[1].channel == chan1
assert data[0].__class__ == EventMode
assert data[1].__class__ == EventMessage
assert data[0].mode_changes == "+o {}".format(user2.address)
assert "status given" in data[1].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_op_2_user_not_in_channel(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = False
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "op test_user2 test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "i'm not in that channel" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_op_2_user_user_not_known(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "op test_user3 test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user3 is not known" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_op_2_user_user_not_there(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
serv1.get_user_by_address("test_user3", "test_user3")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "op test_user3 test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user3 is not in test_chan2" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_op_2_user_no_power(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "op test_user2 test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "don't have power" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_op_2_user_is_op(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user2 = chan2.get_membership_by_user(user2)
chan2_user2.is_op = True
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "op test_user2 test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "already has op" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_op_2_user(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user2 = chan2.get_membership_by_user(user2)
chan2_user2.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "op test_user2 test_chan2")
)
data = serv1.get_send_data(2)
assert "error" not in data[1].text.lower()
assert data[0].channel == chan2
assert data[1].channel == chan1
assert data[0].__class__ == EventMode
assert data[1].__class__ == EventMessage
assert data[0].mode_changes == "+o {}".format(user2.address)
assert "status given" in data[1].text.lower()
finally:
test_hallo.remove_server(serv1)
```
#### File: modules/convert/test_base.py
```python
from threading import Thread
import gc
from hallo.function_dispatcher import FunctionDispatcher
from hallo.hallo import Hallo
import unittest
from hallo.test.server_mock import ServerMock
import time
class TestBase(unittest.TestCase):
def setUp(self):
# Create a Hallo
self.hallo = Hallo()
# Only the required modules, only 1 (mock) server
# Todo: specify modules by test?
self.function_dispatcher = FunctionDispatcher(
{"convert", "random", "server_control", "subscriptions"}, self.hallo
)
self.hallo.function_dispatcher = self.function_dispatcher
self.server = ServerMock(self.hallo)
self.server.name = "mock-server"
self.hallo.add_server(self.server)
# Start hallo thread
self.hallo_thread = Thread(target=self.hallo.start,)
self.hallo_thread.start()
# Create test users and channel, and configure them
self.hallo_user = self.server.get_user_by_address(
self.server.get_nick().lower(), self.server.get_nick()
)
self.test_user = self.server.get_user_by_address("test", "test")
self.test_user.online = True
self.test_chan = self.server.get_channel_by_address("#test", "#test")
self.test_chan.in_channel = True
self.test_chan.add_user(self.hallo_user)
self.test_chan.add_user(self.test_user)
# Wait until hallo is open
count = 0
while not self.hallo.open:
time.sleep(0.01)
count += 1
assert count < 1000, "Hallo took too long to start."
if count > 1000:
break
# Clear any data in the server
self.server.get_send_data()
def tearDown(self):
self.hallo.close()
self.hallo_thread.join()
@classmethod
def tearDownClass(cls):
del cls
gc.collect()
```
#### File: dailys/fields/test_dailys_mood_field.py
```python
from datetime import time, date, datetime, timedelta
import pytest
import hallo.modules.dailys.dailys_field
from hallo.events import EventMessage, RawDataTelegram, EventMinute
from hallo.modules.dailys.field_mood import DailysMoodField, MoodTime
from hallo.test.modules.dailys.dailys_spreadsheet_mock import DailysSpreadsheetMock
class Obj:
pass
def get_telegram_time(date_time_val):
fake_telegram_obj = Obj()
fake_telegram_obj.message = Obj()
fake_telegram_obj.message.date = date_time_val
fake_telegram_obj.message.reply_to_message = None
return fake_telegram_obj
def get_telegram_time_reply(date_time_val, message_id):
fake_telegram_obj = Obj()
fake_telegram_obj.message = Obj()
fake_telegram_obj.message.date = date_time_val
fake_telegram_obj.message.reply_to_message = Obj()
fake_telegram_obj.message.reply_to_message.message_id = message_id
return fake_telegram_obj
def test_create_from_input(hallo_getter, requests_mock):
dailys_times = ["WakeUpTime", "12:00:00", "SleepTime"]
dailys_moods = ["happiness", "anger", "tiredness", "boisterousness"]
# Setup stuff
command_name = "setup dailys field"
command_args = "mood"
test_hallo = hallo_getter({"dailys"})
evt = EventMessage(
test_hallo.test_server,
test_hallo.test_chan,
test_hallo.test_user,
"{} {}".format(command_name, command_args),
)
evt.split_command_text(command_name, command_args)
spreadsheet = DailysSpreadsheetMock(test_hallo.test_user, test_hallo.test_chan)
requests_mock.get(
"{}/stats/mood/static/".format(spreadsheet.dailys_url),
json=[
{
"date": "static",
"source": "Mock test data",
"stat_name": "mood",
"data": {
"moods": dailys_moods,
"times": dailys_times
}
}
]
)
# Try and create dailys field
field = DailysMoodField.create_from_input(evt, spreadsheet)
assert field.spreadsheet == spreadsheet
assert isinstance(field.times, list)
assert len(field.times) == 3
assert MoodTime(MoodTime.WAKE) in field.times
assert MoodTime(MoodTime.SLEEP) in field.times
assert MoodTime(time(12, 0, 0)) in field.times
assert isinstance(field.moods, list)
assert len(field.moods) == 4
assert field.moods == dailys_moods
def test_create_from_input__no_static_data(hallo_getter, requests_mock):
# Setup stuff
command_name = "setup dailys field"
command_args = "mood"
test_hallo = hallo_getter({"dailys"})
evt = EventMessage(
test_hallo.test_server,
test_hallo.test_chan,
test_hallo.test_user,
"{} {}".format(command_name, command_args),
)
evt.split_command_text(command_name, command_args)
spreadsheet = DailysSpreadsheetMock(test_hallo.test_user, test_hallo.test_chan)
requests_mock.get(
"{}/stats/mood/static/".format(spreadsheet.dailys_url),
json=[]
)
# Try and create dailys field
with pytest.raises(hallo.modules.dailys.dailys_field.DailysException) as e:
DailysMoodField.create_from_input(evt, spreadsheet)
assert "mood field static data has not been set up on dailys system" in str(e.value).lower()
def test_trigger_morning_query(hallo_getter):
test_hallo = hallo_getter({"dailys"})
# Setup
spreadsheet = DailysSpreadsheetMock(test_hallo.test_user, test_hallo.test_chan)
# Setup field
times = [MoodTime(MoodTime.WAKE), MoodTime(time(14, 0, 0))]
moods = ["Happiness", "Anger", "Tiredness"]
field = DailysMoodField(spreadsheet, times, moods)
# Send message
evt_wake = EventMessage(test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "morning")
field.passive_trigger(evt_wake)
# Check mood query is sent
notif_dict = spreadsheet.saved_data["mood"][evt_wake.get_send_time().date()]
assert MoodTime.WAKE in notif_dict
assert "message_id" in notif_dict[MoodTime.WAKE]
# Check query is given
data_wake = test_hallo.test_server.get_send_data(1, test_hallo.test_chan, EventMessage)
assert "how are you feeling" in data_wake[0].text.lower()
assert MoodTime.WAKE in data_wake[0].text
assert all([mood in data_wake[0].text for mood in moods])
def test_trigger_sleep_query(hallo_getter):
test_hallo = hallo_getter({"dailys"})
# Setup
moods = ["Happiness", "Anger", "Tiredness"]
evt_sleep = EventMessage(test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "night")
saved_data = dict()
saved_data[MoodTime.WAKE] = dict()
saved_data[MoodTime.WAKE]["message_id"] = 1232
saved_data[str(time(14, 0, 0))] = dict()
saved_data[str(time(14, 0, 0))]["message_id"] = 1234
for mood in moods:
saved_data[MoodTime.WAKE][mood] = 3
saved_data[str(time(14, 0, 0))][mood] = 2
spreadsheet = DailysSpreadsheetMock(
test_hallo.test_user,
test_hallo.test_chan,
saved_data={"mood": {evt_sleep.get_send_time().date(): saved_data}},
)
# Setup field
times = [MoodTime(MoodTime.WAKE), MoodTime(time(14, 0)), MoodTime(MoodTime.SLEEP)]
field = DailysMoodField(spreadsheet, times, moods)
# Send message
field.passive_trigger(evt_sleep)
# Check mood query is sent
notif_dict = spreadsheet.saved_data["mood"][evt_sleep.get_send_time().date()]
assert MoodTime.SLEEP in notif_dict
assert "message_id" in notif_dict[MoodTime.SLEEP]
# Check query is given
data_wake = test_hallo.test_server.get_send_data(1, test_hallo.test_chan, EventMessage)
assert "how are you feeling" in data_wake[0].text.lower()
assert MoodTime.SLEEP in data_wake[0].text
assert all([mood in data_wake[0].text for mood in moods])
def test_trigger_morning_no_query_if_not_in_times(hallo_getter):
test_hallo = hallo_getter({"dailys"})
# Setup
spreadsheet = DailysSpreadsheetMock(
test_hallo.test_user, test_hallo.test_chan, saved_data={"mood": {}}
)
# Setup field
times = [MoodTime(time(14, 0)), MoodTime(MoodTime.SLEEP)]
moods = ["Happiness", "Anger", "Tiredness"]
field = DailysMoodField(spreadsheet, times, moods)
# Send message
evt_wake = EventMessage(test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "morning")
field.passive_trigger(evt_wake)
# Check mood query is not sent or added to saved data
assert evt_wake.get_send_time().date() not in spreadsheet.saved_data["mood"]
test_hallo.test_server.get_send_data(0)
def test_trigger_sleep_no_query_if_not_in_times(hallo_getter):
test_hallo = hallo_getter({"dailys"})
# Setup
spreadsheet = DailysSpreadsheetMock(
test_hallo.test_user, test_hallo.test_chan, saved_data={"mood": {}}
)
# Setup field
times = [MoodTime(MoodTime.WAKE), MoodTime(time(14, 0))]
moods = ["Happiness", "Anger", "Tiredness"]
field = DailysMoodField(spreadsheet, times, moods)
# Send message
evt_sleep = EventMessage(test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "night")
field.passive_trigger(evt_sleep)
# Check mood query is not sent or added to saved data
assert evt_sleep.get_send_time().date() not in spreadsheet.saved_data["mood"]
test_hallo.test_server.get_send_data(0)
def test_trigger_sleep_no_query_if_already_given(hallo_getter):
test_hallo = hallo_getter({"dailys"})
# Setup
moods = ["Happiness", "Anger", "Tiredness"]
evt_sleep1 = EventMessage(test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "night")
saved_data = dict()
saved_data[MoodTime.WAKE] = dict()
saved_data[MoodTime.WAKE]["message_id"] = 1232
saved_data[str(time(14, 0, 0))] = dict()
saved_data[str(time(14, 0, 0))]["message_id"] = 1234
for mood in moods:
saved_data[MoodTime.WAKE][mood] = 3
saved_data[str(time(14, 0, 0))][mood] = 2
spreadsheet = DailysSpreadsheetMock(
test_hallo.test_user,
test_hallo.test_chan,
saved_data={"mood": {evt_sleep1.get_send_time().date(): saved_data}},
)
# Setup field
times = [MoodTime(MoodTime.WAKE), MoodTime(time(14, 0)), MoodTime(MoodTime.SLEEP)]
field = DailysMoodField(spreadsheet, times, moods)
# Send message
evt_sleep1 = EventMessage(test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "night")
field.passive_trigger(evt_sleep1)
# Check mood query is sent
notif_dict = spreadsheet.saved_data["mood"][evt_sleep1.get_send_time().date()]
assert MoodTime.SLEEP in notif_dict
assert "message_id" in notif_dict[MoodTime.SLEEP]
# Check query is given
data_wake = test_hallo.test_server.get_send_data(1, test_hallo.test_chan, EventMessage)
assert "how are you feeling" in data_wake[0].text.lower()
assert MoodTime.SLEEP in data_wake[0].text
assert all([mood in data_wake[0].text for mood in moods])
# Set message ID to something
msg_id = "test_message_id"
notif_dict[MoodTime.SLEEP]["message_id"] = msg_id
spreadsheet.saved_data["mood"][evt_sleep1.get_send_time().date()] = notif_dict
# Send second sleep query
evt_sleep2 = EventMessage(test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "night")
field.passive_trigger(evt_sleep2)
# Check no mood query is sent
notif_dict = spreadsheet.saved_data["mood"][evt_sleep1.get_send_time().date()]
assert notif_dict[MoodTime.SLEEP]["message_id"] == msg_id
test_hallo.test_server.get_send_data(0)
def test_trigger_sleep_after_midnight(hallo_getter):
test_hallo = hallo_getter({"dailys"})
mood_date = date(2019, 1, 15)
sleep_time = datetime(2019, 1, 16, 0, 34, 15)
times = [MoodTime(MoodTime.WAKE), MoodTime(time(14, 0)), MoodTime(MoodTime.SLEEP)]
moods = ["Happiness", "Anger", "Tiredness"]
# Setup
saved_data = dict()
saved_data[MoodTime.WAKE] = dict()
saved_data[MoodTime.WAKE]["message_id"] = 1232
saved_data[str(time(14, 0, 0))] = dict()
saved_data[str(time(14, 0, 0))]["message_id"] = 1234
for mood in moods:
saved_data[MoodTime.WAKE][mood] = 3
saved_data[str(time(14, 0, 0))][mood] = 2
spreadsheet = DailysSpreadsheetMock(
test_hallo.test_user, test_hallo.test_chan, saved_data={"mood": {mood_date: saved_data}}
)
# Setup field
field = DailysMoodField(spreadsheet, times, moods)
# Send message
evt_sleep = EventMessage(
test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "night"
).with_raw_data(RawDataTelegram(get_telegram_time(sleep_time)))
field.passive_trigger(evt_sleep)
# Check mood query is sent for previous day
notif_dict = spreadsheet.saved_data["mood"][mood_date]
assert MoodTime.SLEEP in notif_dict
assert "message_id" in notif_dict[MoodTime.SLEEP]
# Check query is given
data_wake = test_hallo.test_server.get_send_data(1, test_hallo.test_chan, EventMessage)
assert "how are you feeling" in data_wake[0].text.lower()
assert MoodTime.SLEEP in data_wake[0].text
assert all([mood in data_wake[0].text for mood in moods])
def test_trigger_time_exactly_once(hallo_getter):
test_hallo = hallo_getter({"dailys"})
mood_date = date(2019, 1, 18)
# Setup
spreadsheet = DailysSpreadsheetMock(
test_hallo.test_user, test_hallo.test_chan, saved_data={"mood": {}}
)
# Setup field
times = [MoodTime(MoodTime.WAKE), MoodTime(time(14, 0, 0)), MoodTime(MoodTime.SLEEP)]
moods = ["Happiness", "Anger", "Tiredness"]
field = DailysMoodField(spreadsheet, times, moods)
# Prepare events
evt1 = EventMinute()
evt1.send_time = datetime(2019, 1, 18, 13, 59, 11)
evt2 = EventMinute()
evt2.send_time = datetime(2019, 1, 18, 14, 0, 11)
evt3 = EventMinute()
evt3.send_time = datetime(2019, 1, 18, 14, 1, 11)
# Send time before trigger time
field.passive_trigger(evt1)
# Check mood data not updated and query not sent
assert mood_date not in spreadsheet.saved_data["mood"]
test_hallo.test_server.get_send_data(0)
# Send time after trigger time
field.passive_trigger(evt2)
# Check mood query is sent
notif_dict = spreadsheet.saved_data["mood"][mood_date]
assert str(time(14, 0, 0)) in notif_dict
assert "message_id" in notif_dict[str(time(14, 0, 0))]
# Check query is given
data_wake = test_hallo.test_server.get_send_data(1, test_hallo.test_chan, EventMessage)
assert "how are you feeling" in data_wake[0].text.lower()
assert str(time(14, 0, 0)) in data_wake[0].text
assert all([mood in data_wake[0].text for mood in moods])
# Set message ID to something
msg_id = "test_message_id"
notif_dict[str(time(14, 0, 0))]["message_id"] = msg_id
spreadsheet.saved_data["mood"][mood_date] = notif_dict
# Send another time after trigger time
field.passive_trigger(evt3)
# Check mood data not updated and query not sent
notif_dict = spreadsheet.saved_data["mood"][mood_date]
assert notif_dict[str(time(14, 0, 0))]["message_id"] == msg_id
test_hallo.test_server.get_send_data(0)
def test_process_reply_to_query(hallo_getter):
test_hallo = hallo_getter({"dailys"})
# Setup
mood_date = date(2019, 1, 18)
mood_datetime = datetime.combine(mood_date, time(8, 13, 6))
msg_id = 41212
mood_data = dict()
mood_data[MoodTime.WAKE] = dict()
mood_data[MoodTime.WAKE]["message_id"] = msg_id
spreadsheet = DailysSpreadsheetMock(
test_hallo.test_user, test_hallo.test_chan, saved_data={"mood": {mood_date: mood_data}}
)
# Setup field
times = [MoodTime(MoodTime.WAKE), MoodTime(time(14, 0, 0))]
moods = ["Happiness", "Anger", "Tiredness"]
field = DailysMoodField(spreadsheet, times, moods)
# Send message
evt_mood = EventMessage(
test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "413"
).with_raw_data(
RawDataTelegram(get_telegram_time_reply(mood_datetime, msg_id))
)
field.passive_trigger(evt_mood)
# Check mood response is logged
notif_dict = spreadsheet.saved_data["mood"][mood_date]
assert MoodTime.WAKE in notif_dict
assert "message_id" in notif_dict[MoodTime.WAKE]
assert notif_dict[MoodTime.WAKE]["message_id"] == msg_id
assert notif_dict[MoodTime.WAKE]["Happiness"] == 4
assert notif_dict[MoodTime.WAKE]["Anger"] == 1
assert notif_dict[MoodTime.WAKE]["Tiredness"] == 3
# Check response is given
data_wake = test_hallo.test_server.get_send_data(1, test_hallo.test_chan, EventMessage)
assert "added" in data_wake[0].text.lower()
assert MoodTime.WAKE in data_wake[0].text
assert mood_date.isoformat() in data_wake[0].text
assert "413" in data_wake[0].text
def test_process_most_recent_query(hallo_getter):
test_hallo = hallo_getter({"dailys"})
# Setup
mood_date = date(2019, 1, 18)
mood_datetime = datetime.combine(mood_date, time(8, 13, 6))
msg_id = 41212
mood_data = dict()
mood_data[MoodTime.WAKE] = dict()
mood_data[MoodTime.WAKE]["message_id"] = msg_id
spreadsheet = DailysSpreadsheetMock(
test_hallo.test_user, test_hallo.test_chan, saved_data={"mood": {mood_date: mood_data}}
)
# Setup field
times = [MoodTime(MoodTime.WAKE), MoodTime(time(14, 0, 0))]
moods = ["Happiness", "Anger", "Tiredness"]
field = DailysMoodField(spreadsheet, times, moods)
# Send message
evt_mood = EventMessage(
test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "413"
).with_raw_data(RawDataTelegram(get_telegram_time(mood_datetime)))
field.passive_trigger(evt_mood)
# Check mood response is logged
notif_dict = spreadsheet.saved_data["mood"][mood_date]
assert MoodTime.WAKE in notif_dict
assert "message_id" in notif_dict[MoodTime.WAKE]
assert notif_dict[MoodTime.WAKE]["message_id"] == msg_id
assert notif_dict[MoodTime.WAKE]["Happiness"] == 4
assert notif_dict[MoodTime.WAKE]["Anger"] == 1
assert notif_dict[MoodTime.WAKE]["Tiredness"] == 3
# Check response is given
data_wake = test_hallo.test_server.get_send_data(1, test_hallo.test_chan, EventMessage)
assert "added" in data_wake[0].text.lower()
assert MoodTime.WAKE in data_wake[0].text
assert mood_date.isoformat() in data_wake[0].text
assert "413" in data_wake[0].text
def test_process_most_recent_sleep_query_after_midnight(hallo_getter):
test_hallo = hallo_getter({"dailys"})
# Setup
mood_date = date(2019, 1, 18)
sleep_datetime = datetime.combine(mood_date, time(23, 55, 56))
mood_datetime = datetime.combine(mood_date + timedelta(days=1), time(0, 3, 2))
msg_id = 41212
mood_data = dict()
spreadsheet = DailysSpreadsheetMock(
test_hallo.test_user, test_hallo.test_chan, saved_data={"mood": {mood_date: mood_data}}
)
# Setup field
times = [MoodTime(MoodTime.SLEEP)]
moods = ["Happiness", "Anger", "Tiredness"]
field = DailysMoodField(spreadsheet, times, moods)
# Send sleep message, check response
evt_sleep = EventMessage(
test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "sleep"
).with_raw_data(RawDataTelegram(get_telegram_time(sleep_datetime)))
field.passive_trigger(evt_sleep)
notif_dict = spreadsheet.saved_data["mood"][mood_date]
assert MoodTime.SLEEP in notif_dict
assert "message_id" in notif_dict[MoodTime.SLEEP]
notif_dict[MoodTime.SLEEP]["message_id"] = msg_id
spreadsheet.saved_data["mood"][mood_date] = notif_dict
test_hallo.test_server.get_send_data()
# Send message
evt_mood = EventMessage(
test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "413"
).with_raw_data(RawDataTelegram(get_telegram_time(mood_datetime)))
field.passive_trigger(evt_mood)
# Check mood response is logged
notif_dict = spreadsheet.saved_data["mood"][mood_date]
assert MoodTime.SLEEP in notif_dict
assert "message_id" in notif_dict[MoodTime.SLEEP]
assert notif_dict[MoodTime.SLEEP]["message_id"] == msg_id
assert notif_dict[MoodTime.SLEEP]["Happiness"] == 4
assert notif_dict[MoodTime.SLEEP]["Anger"] == 1
assert notif_dict[MoodTime.SLEEP]["Tiredness"] == 3
# Check response is given
data_wake = test_hallo.test_server.get_send_data(1, test_hallo.test_chan, EventMessage)
assert "added" in data_wake[0].text.lower()
assert MoodTime.SLEEP in data_wake[0].text
assert mood_date.isoformat() in data_wake[0].text
assert "413" in data_wake[0].text
def test_process_no_mood_query(hallo_getter):
test_hallo = hallo_getter({"dailys"})
# Setup
mood_date = date(2019, 1, 18)
mood_datetime = datetime.combine(mood_date, time(13, 13, 6))
moods = ["Happiness", "Anger", "Tiredness"]
msg_id = 41212
mood_data = dict()
mood_data[MoodTime.WAKE] = dict()
mood_data[MoodTime.WAKE]["message_id"] = msg_id
for mood in moods:
mood_data[MoodTime.WAKE][mood] = 3
spreadsheet = DailysSpreadsheetMock(
test_hallo.test_user, test_hallo.test_chan, saved_data={"mood": {mood_date: mood_data}}
)
# Setup field
times = [MoodTime(MoodTime.WAKE), MoodTime(time(14, 0, 0))]
field = DailysMoodField(spreadsheet, times, moods)
# Send message
evt_mood = EventMessage(
test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "413"
).with_raw_data(RawDataTelegram(get_telegram_time(mood_datetime)))
field.passive_trigger(evt_mood)
# Check mood response is not logged
notif_dict = spreadsheet.saved_data["mood"][mood_date]
assert MoodTime.WAKE in notif_dict
assert notif_dict[MoodTime.WAKE]["message_id"] == msg_id
assert notif_dict[MoodTime.WAKE]["Happiness"] == 3
assert notif_dict[MoodTime.WAKE]["Anger"] == 3
assert notif_dict[MoodTime.WAKE]["Tiredness"] == 3
assert str(time(14, 0, 0)) not in notif_dict
# Check error response is given
data_err = test_hallo.test_server.get_send_data(1, test_hallo.test_chan, EventMessage)
assert "is this a mood measurement, because i can't find a mood query" in data_err[0].text.lower()
def test_process_time_specified(hallo_getter):
test_hallo = hallo_getter({"dailys"})
# Setup
mood_date = date(2019, 1, 18)
mood_datetime = datetime.combine(mood_date, time(13, 13, 6))
moods = ["Happiness", "Anger", "Tiredness"]
msg_id = 41212
mood_data = dict()
mood_data[MoodTime.WAKE] = dict()
mood_data[MoodTime.WAKE]["message_id"] = msg_id
for mood in moods:
mood_data[MoodTime.WAKE][mood] = 3
spreadsheet = DailysSpreadsheetMock(
test_hallo.test_user, test_hallo.test_chan, saved_data={"mood": {mood_date: mood_data}}
)
# Setup field
times = [MoodTime(MoodTime.WAKE), MoodTime(time(14, 0, 0))]
field = DailysMoodField(spreadsheet, times, moods)
# Send message
evt_mood = EventMessage(
test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "HAT 1400 413"
).with_raw_data(RawDataTelegram(get_telegram_time(mood_datetime)))
field.passive_trigger(evt_mood)
# Check mood response is logged
notif_dict = spreadsheet.saved_data["mood"][mood_date]
assert MoodTime.WAKE in notif_dict
assert notif_dict[MoodTime.WAKE]["message_id"] == msg_id
assert notif_dict[MoodTime.WAKE]["Happiness"] == 3
assert notif_dict[MoodTime.WAKE]["Anger"] == 3
assert notif_dict[MoodTime.WAKE]["Tiredness"] == 3
assert str(time(14, 0, 0)) in notif_dict
assert "message_id" not in notif_dict[str(time(14, 0, 0))]
assert notif_dict[str(time(14, 0, 0))]["Happiness"] == 4
assert notif_dict[str(time(14, 0, 0))]["Anger"] == 1
assert notif_dict[str(time(14, 0, 0))]["Tiredness"] == 3
# Check response is given
data_1400 = test_hallo.test_server.get_send_data(1, test_hallo.test_chan, EventMessage)
assert "added" in data_1400[0].text.lower()
assert str(time(14, 0, 0)) in data_1400[0].text
assert mood_date.isoformat() in data_1400[0].text
assert "413" in data_1400[0].text
def test_process_wake_specified(hallo_getter):
test_hallo = hallo_getter({"dailys"})
# Setup
mood_date = date(2019, 1, 18)
mood_datetime = datetime.combine(mood_date, time(13, 13, 6))
moods = ["Happiness", "Anger", "Tiredness"]
spreadsheet = DailysSpreadsheetMock(test_hallo.test_user, test_hallo.test_chan)
# Setup field
times = [MoodTime(MoodTime.WAKE), MoodTime(time(14, 0, 0))]
field = DailysMoodField(spreadsheet, times, moods)
# Send message
evt_mood = EventMessage(
test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "HAT wake 413"
).with_raw_data(RawDataTelegram(get_telegram_time(mood_datetime)))
field.passive_trigger(evt_mood)
# Check mood response is logged
notif_dict = spreadsheet.saved_data["mood"][mood_date]
assert MoodTime.WAKE in notif_dict
assert "message_id" not in notif_dict[MoodTime.WAKE]
assert notif_dict[MoodTime.WAKE]["Happiness"] == 4
assert notif_dict[MoodTime.WAKE]["Anger"] == 1
assert notif_dict[MoodTime.WAKE]["Tiredness"] == 3
# Check response is given
data_wake = test_hallo.test_server.get_send_data(1, test_hallo.test_chan, EventMessage)
assert "added" in data_wake[0].text.lower()
assert MoodTime.WAKE in data_wake[0].text
assert mood_date.isoformat() in data_wake[0].text
assert "413" in data_wake[0].text
def test_process_sleep_specified(hallo_getter):
test_hallo = hallo_getter({"dailys"})
# Setup
mood_date = date(2019, 1, 18)
mood_datetime = datetime.combine(mood_date, time(13, 13, 6))
moods = ["Happiness", "Anger", "Tiredness"]
spreadsheet = DailysSpreadsheetMock(test_hallo.test_user, test_hallo.test_chan)
# Setup field
times = [MoodTime(MoodTime.WAKE), MoodTime(time(14, 0, 0)), MoodTime(MoodTime.SLEEP)]
field = DailysMoodField(spreadsheet, times, moods)
# Send message
evt_mood = EventMessage(
test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "HAT sleep 413"
).with_raw_data(RawDataTelegram(get_telegram_time(mood_datetime)))
field.passive_trigger(evt_mood)
# Check mood response is logged
notif_dict = spreadsheet.saved_data["mood"][mood_date]
assert MoodTime.SLEEP in notif_dict
assert "message_id" not in notif_dict[MoodTime.SLEEP]
assert notif_dict[MoodTime.SLEEP]["Happiness"] == 4
assert notif_dict[MoodTime.SLEEP]["Anger"] == 1
assert notif_dict[MoodTime.SLEEP]["Tiredness"] == 3
# Check response is given
data_sleep = test_hallo.test_server.get_send_data(1, test_hallo.test_chan, EventMessage)
assert "added" in data_sleep[0].text.lower()
assert MoodTime.SLEEP in data_sleep[0].text
assert mood_date.isoformat() in data_sleep[0].text
assert "413" in data_sleep[0].text
def test_no_trigger_after_processed(hallo_getter):
test_hallo = hallo_getter({"dailys"})
# Setup
mood_date = date(2019, 1, 18)
mood_datetime = datetime.combine(mood_date, time(13, 13, 6))
moods = ["Happiness", "Anger", "Tiredness"]
msg_id = 41212
mood_data = dict()
mood_data[MoodTime.WAKE] = dict()
mood_data[MoodTime.WAKE]["message_id"] = msg_id
for mood in moods:
mood_data[MoodTime.WAKE][mood] = 3
spreadsheet = DailysSpreadsheetMock(
test_hallo.test_user, test_hallo.test_chan, saved_data={"mood": {mood_date: mood_data}}
)
# Setup field
times = [MoodTime(MoodTime.WAKE), MoodTime(time(14, 0, 0))]
field = DailysMoodField(spreadsheet, times, moods)
# Send message
evt_mood = EventMessage(
test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "HAT 1400 413"
).with_raw_data(RawDataTelegram(get_telegram_time(mood_datetime)))
field.passive_trigger(evt_mood)
# Check mood response is logged
notif_dict = spreadsheet.saved_data["mood"][mood_date]
assert MoodTime.WAKE in notif_dict
assert notif_dict[MoodTime.WAKE]["message_id"] == msg_id
assert notif_dict[MoodTime.WAKE]["Happiness"] == 3
assert notif_dict[MoodTime.WAKE]["Anger"] == 3
assert notif_dict[MoodTime.WAKE]["Tiredness"] == 3
assert str(time(14, 0, 0)) in notif_dict
assert "message_id" not in notif_dict[str(time(14, 0, 0))]
assert notif_dict[str(time(14, 0, 0))]["Happiness"] == 4
assert notif_dict[str(time(14, 0, 0))]["Anger"] == 1
assert notif_dict[str(time(14, 0, 0))]["Tiredness"] == 3
# Check response is given
data_1400 = test_hallo.test_server.get_send_data(1, test_hallo.test_chan, EventMessage)
assert "added" in data_1400[0].text.lower()
assert str(time(14, 0, 0)) in data_1400[0].text
assert mood_date.isoformat() in data_1400[0].text
assert "413" in data_1400[0].text
# Check that when the time happens, a query isn't sent
evt_time = EventMinute()
evt_time.send_time = datetime.combine(mood_date, time(14, 3, 10))
field.passive_trigger(evt_time)
# Check data isn't added
notif_dict = spreadsheet.saved_data["mood"][mood_date]
assert str(time(14, 0, 0)) in notif_dict
assert "message_id" not in notif_dict[str(time(14, 0, 0))]
assert notif_dict[str(time(14, 0, 0))]["Happiness"] == 4
assert notif_dict[str(time(14, 0, 0))]["Anger"] == 1
assert notif_dict[str(time(14, 0, 0))]["Tiredness"] == 3
# Check query isn't sent
test_hallo.test_server.get_send_data(0)
def test_no_trigger_wake_after_processed(hallo_getter):
test_hallo = hallo_getter({"dailys"})
# Setup
mood_date = date(2019, 1, 18)
mood_datetime = datetime.combine(mood_date, time(13, 13, 6))
wake_datetime = datetime.combine(mood_date, time(13, 15, 7))
moods = ["Happiness", "Anger", "Tiredness"]
spreadsheet = DailysSpreadsheetMock(test_hallo.test_user, test_hallo.test_chan)
# Setup field
times = [MoodTime(MoodTime.WAKE), MoodTime(time(14, 0, 0))]
field = DailysMoodField(spreadsheet, times, moods)
# Send message
evt_mood = EventMessage(
test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "HAT wake 413"
).with_raw_data(RawDataTelegram(get_telegram_time(mood_datetime)))
field.passive_trigger(evt_mood)
# Check mood response is logged
notif_dict = spreadsheet.saved_data["mood"][mood_date]
assert MoodTime.WAKE in notif_dict
assert "message_id" not in notif_dict[MoodTime.WAKE]
assert notif_dict[MoodTime.WAKE]["Happiness"] == 4
assert notif_dict[MoodTime.WAKE]["Anger"] == 1
assert notif_dict[MoodTime.WAKE]["Tiredness"] == 3
# Check response is given
data_wake = test_hallo.test_server.get_send_data(1, test_hallo.test_chan, EventMessage)
assert "added" in data_wake[0].text.lower()
assert MoodTime.WAKE in data_wake[0].text
assert mood_date.isoformat() in data_wake[0].text
assert "413" in data_wake[0].text
# Send wake message, ensure no response
evt_wake = EventMessage(
test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "morning"
).with_raw_data(RawDataTelegram(get_telegram_time(wake_datetime)))
field.passive_trigger(evt_wake)
# Check query isn't logged
notif_dict = spreadsheet.saved_data["mood"][mood_date]
assert MoodTime.WAKE in notif_dict
assert "message_id" not in notif_dict[MoodTime.WAKE]
# Check response wasn't given
test_hallo.test_server.get_send_data(0)
def test_no_trigger_sleep_after_processed_sleep_and_midnight(hallo_getter):
test_hallo = hallo_getter({"dailys"})
# Setup
mood_date = date(2019, 1, 18)
sleep_datetime = datetime.combine(mood_date, time(23, 13, 6))
mood_datetime = datetime.combine(mood_date, time(23, 15, 7))
sleep2_datetime = datetime.combine(
mood_date + timedelta(days=1), time(0, 3, 15)
)
msg_id = 123123
moods = ["Happiness", "Anger", "Tiredness"]
saved_data = dict()
saved_data[MoodTime.WAKE] = dict()
saved_data[MoodTime.WAKE]["message_id"] = 1232
saved_data[str(time(14, 0, 0))] = dict()
saved_data[str(time(14, 0, 0))]["message_id"] = 1234
for mood in moods:
saved_data[MoodTime.WAKE][mood] = 3
saved_data[str(time(14, 0, 0))][mood] = 2
spreadsheet = DailysSpreadsheetMock(
test_hallo.test_user, test_hallo.test_chan, saved_data={"mood": {mood_date: saved_data}}
)
# Setup field
times = [MoodTime(MoodTime.WAKE), MoodTime(time(14, 0, 0)), MoodTime(MoodTime.SLEEP)]
field = DailysMoodField(spreadsheet, times, moods)
# Send sleep query
evt_sleep1 = EventMessage(
test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "sleep"
).with_raw_data(RawDataTelegram(get_telegram_time(sleep_datetime)))
field.passive_trigger(evt_sleep1)
# Check mood query is given and stuff
notif_dict = spreadsheet.saved_data["mood"][mood_date]
assert MoodTime.SLEEP in notif_dict
assert "message_id" in notif_dict[MoodTime.SLEEP]
notif_dict[MoodTime.SLEEP]["message_id"] = msg_id
spreadsheet.saved_data["mood"][mood_date] = notif_dict
data_wake = test_hallo.test_server.get_send_data(1, test_hallo.test_chan, EventMessage)
assert "how are you feeling" in data_wake[0].text.lower()
assert MoodTime.SLEEP in data_wake[0].text
assert all([mood in data_wake[0].text for mood in moods])
# Then mood response
evt_mood = EventMessage(
test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "413"
).with_raw_data(
RawDataTelegram(get_telegram_time_reply(mood_datetime, msg_id))
)
field.passive_trigger(evt_mood)
# Check mood is recorded and response given
notif_dict = spreadsheet.saved_data["mood"][mood_date]
assert MoodTime.SLEEP in notif_dict
assert "message_id" in notif_dict[MoodTime.SLEEP]
assert notif_dict[MoodTime.SLEEP]["message_id"] == msg_id
assert notif_dict[MoodTime.SLEEP]["Happiness"] == 4
assert notif_dict[MoodTime.SLEEP]["Anger"] == 1
assert notif_dict[MoodTime.SLEEP]["Tiredness"] == 3
data_wake = test_hallo.test_server.get_send_data(1, test_hallo.test_chan, EventMessage)
assert "added" in data_wake[0].text.lower()
assert MoodTime.SLEEP in data_wake[0].text
assert mood_date.isoformat() in data_wake[0].text
assert "413" in data_wake[0].text
# Then midnight
# Another sleep query
evt_sleep1 = EventMessage(
test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "sleep"
).with_raw_data(RawDataTelegram(get_telegram_time(sleep2_datetime)))
field.passive_trigger(evt_sleep1)
# Check there's no response
test_hallo.test_server.get_send_data(0)
```
#### File: modules/dailys/test_all_field_types.py
```python
import importlib
import inspect
import os
from datetime import time
import pytest
from hallo.events import EventMessage
from hallo.inc.commons import inherits_from
from hallo.modules.dailys.dailys_field_factory import DailysFieldFactory
from hallo.modules.dailys.field_dream import DailysDreamField
from hallo.modules.dailys.field_duolingo import DailysDuolingoField
from hallo.modules.dailys.field_fa import DailysFAField
from hallo.modules.dailys.field_mood import DailysMoodField, MoodTime
from hallo.modules.dailys.field_sleep import DailysSleepField
from hallo.modules.dailys.field_question import QuestionsField, Question, RepeatingInterval
from hallo.test.modules.dailys.dailys_spreadsheet_mock import DailysSpreadsheetMock
def get_field_objects(test_user, test_chan):
spreadsheet = DailysSpreadsheetMock(test_user, test_chan)
field_obs = list()
field_obs.append(
DailysDuolingoField(spreadsheet, "cabinet", os.getenv("test_duo_password"))
)
field_obs.append(
DailysMoodField(
spreadsheet,
[MoodTime(MoodTime.SLEEP), MoodTime(time(14, 00)), MoodTime(time(22, 00))],
["Happiness", "Anger", "Sleepiness"],
)
)
field_obs.append(DailysFAField(spreadsheet))
field_obs.append(DailysSleepField(spreadsheet))
field_obs.append(DailysDreamField(spreadsheet))
field_obs.append(QuestionsField(
spreadsheet,
[Question(
"test_question",
"Is this question a test?",
RepeatingInterval("R/2021-05-03T21:00:00/P1D")
)]
))
return field_obs
def test_field_type_name_doesnt_overlap():
"""
Test that field classes don't have type_name values which overlap each other
"""
all_type_names = []
for field_class in DailysFieldFactory.fields:
assert field_class.type_name not in all_type_names
all_type_names.append(field_class.type_name)
def test_field_classes_added_to_factory(hallo_getter):
"""
Test tht all field classes which are implemented are added to DailysFieldFactory
"""
test_hallo = hallo_getter({"dailys"})
module_obj = importlib.import_module("hallo.modules.dailys")
# Loop through module, searching for DailysField subclasses.
for function_tuple in inspect.getmembers(module_obj, inspect.isclass):
function_class = function_tuple[1]
# Only look at subclasses of DailysField
if not inherits_from(function_class, "DailysField"):
continue
# Only look at implemented classes.
spreadsheet = DailysSpreadsheetMock(test_hallo.test_user, test_hallo.test_chan)
# noinspection PyBroadException
try:
function_class.create_from_input(
EventMessage(test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "hello"),
spreadsheet,
)
except NotImplementedError:
continue
except Exception:
pass
# Check it's in DailysFieldFactory
assert function_class.__name__ in [
sub_class.__name__ for sub_class in DailysFieldFactory.fields
]
@pytest.mark.parametrize(
"field_class", DailysFieldFactory.fields
)
def test_all_field_classes_in_field_objs(field_class, hallo_getter):
"""
Tests that all field classes have an object in the get_field_objects method here.
"""
test_hallo = hallo_getter({"dailys"})
assert field_class in [
field_obj.__class__ for field_obj in get_field_objects(test_hallo.test_user, test_hallo.test_chan)
]
@pytest.mark.parametrize(
"field_class", DailysFieldFactory.fields
)
def test_sub_class_has_type_name(field_class):
"""
Test that the type_name value has been set for each field class, and that it is lower case
"""
assert len(field_class.type_name) != 0
assert field_class.type_name == field_class.type_name.lower()
@pytest.mark.parametrize("field_object", get_field_objects(None, None))
def test_to_json_contains_field_type(field_object):
"""
Test that to_json() for each field type remembers to set field_type in the json dict
"""
json_obj = field_object.to_json()
assert "type_name" in json_obj
```
#### File: modules/hallo_control/test_active_threads.py
```python
from threading import Thread
import time
from hallo.events import EventMessage
def test_threads_simple(hallo_getter):
test_hallo = hallo_getter({"hallo_control"})
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, None, test_hallo.test_user, "active threads")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
assert "i have" in data[0].text.lower()
assert "active threads" in data[0].text.lower()
def test_threads_increase(hallo_getter):
test_hallo = hallo_getter({"hallo_control"})
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, None, test_hallo.test_user, "active threads")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
first_threads = int(
data[0].text.lower().split("active")[0].split("have")[1].strip()
)
# Launch 10 threads
for _ in range(10):
Thread(target=time.sleep, args=(10,)).start()
# Run function again
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, None, test_hallo.test_user, "active threads")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
second_threads = int(
data[0].text.lower().split("active")[0].split("have")[1].strip()
)
assert second_threads > first_threads, "Thread count should have increased"
```
#### File: modules/math/test_average.py
```python
from hallo.events import EventMessage
def test_avg_simple(hallo_getter):
test_hallo = hallo_getter({"math"})
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, None, test_hallo.test_user, "average 2 4")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
assert float(data[0].text.split()[-1][:-1]) == 3, "average of 2 and 4 should be 3"
def test_avg_same(hallo_getter):
test_hallo = hallo_getter({"math"})
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, None, test_hallo.test_user, "average 2 2 2 2 2")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
assert (
float(data[0].text.split()[-1][:-1]) == 2
), "average of the same number should be the same number"
def test_avg_many(hallo_getter):
test_hallo = hallo_getter({"math"})
test_hallo.function_dispatcher.dispatch(
EventMessage(
test_hallo.test_server, None, test_hallo.test_user, "average 2 7 4 6 32 4 1 17 12 12 100"
)
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
assert (
data[0].text.split()[-1][:-1][:6] == "17.909"
), "average of many numbers calculated incorrectly"
def test_avg_floats(hallo_getter):
test_hallo = hallo_getter({"math"})
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, None, test_hallo.test_user, "average 2.4 3.2 6.6 1.2")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
assert (
round(float(data[0].text.split()[-1][:-1]), 2) == 3.35
), "average of floats incorrect"
def test_avg_negative(hallo_getter):
test_hallo = hallo_getter({"math"})
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, None, test_hallo.test_user, "average -5 5 -10 10 -14 -16 13 17")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
assert (
float(data[0].text.split()[-1][:-1]) == 0
), "average including negatives was incorrect"
def test_avg_fail(hallo_getter):
test_hallo = hallo_getter({"math"})
# Test that words fail
test_hallo.function_dispatcher.dispatch(
EventMessage(
test_hallo.test_server, None, test_hallo.test_user, "average -5 5 hello 10 -14 -16 13 17"
)
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
assert "error" in data[0].text.lower(), "average of words should throw error"
# Test that invalid numbers fail
test_hallo.function_dispatcher.dispatch(
EventMessage(
test_hallo.test_server, None, test_hallo.test_user, "average -5 5 -10 10.0.0 -14 -16 13 17"
)
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
assert "error" in data[0].text.lower(), "Invalid numbers did not return error"
```
#### File: modules/math/test_change_options.py
```python
from hallo.events import EventMessage
def test_change_options_simple(hallo_getter):
test_hallo = hallo_getter({"math"})
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, None, test_hallo.test_user, "change options 5")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
assert "[5]" in data[0].text, "Option missing from results."
assert "[2,2,1]" in data[0].text, "Option missing from results."
assert "[2,1,1,1]" in data[0].text, "Option missing from results."
assert "[1,1,1,1,1]" in data[0].text, "Option missing from results."
def test_change_options_over_limit(hallo_getter):
test_hallo = hallo_getter({"math"})
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, None, test_hallo.test_user, "change options 21")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
assert (
"error" in data[0].text.lower()
), "Change options size limit has not been enforced."
def test_change_options_negative(hallo_getter):
test_hallo = hallo_getter({"math"})
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, None, test_hallo.test_user, "change options -5")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
assert (
"error" in data[0].text.lower()
), "Change options should fail with negative input."
def test_change_options_float(hallo_getter):
test_hallo = hallo_getter({"math"})
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, None, test_hallo.test_user, "change option 2.3")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
assert (
"error" in data[0].text.lower()
), "Change options should fail with non-integer input."
```
#### File: modules/random/mock_chooser.py
```python
class MockChooser:
def __init__(self):
self.choice = 0
self.last_choices = None
self.last_count = None
def choose(self, choices, count=1):
self.last_choices = choices
self.last_count = count
return [choices[self.choice]] * count
```
#### File: modules/random/test_eight_ball.py
```python
from hallo.events import EventMessage
from hallo.modules.random.eight_ball import EightBall
def test_eightball(hallo_getter):
test_hallo = hallo_getter({"random"})
all_responses = (
EightBall.RESPONSES_YES_TOTALLY
+ EightBall.RESPONSES_YES_PROBABLY
+ EightBall.RESPONSES_MAYBE
+ EightBall.RESPONSES_NO
)
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, None, test_hallo.test_user, "eight ball")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
assert data[0].text.lower() in [
"{}.".format(x.lower()) for x in all_responses
], "Response isn't valid."
def test_eightball_with_message(hallo_getter):
test_hallo = hallo_getter({"random"})
all_responses = (
EightBall.RESPONSES_YES_TOTALLY
+ EightBall.RESPONSES_YES_PROBABLY
+ EightBall.RESPONSES_MAYBE
+ EightBall.RESPONSES_NO
)
test_hallo.function_dispatcher.dispatch(
EventMessage(
test_hallo.test_server,
None,
test_hallo.test_user,
"magic eightball will this test pass?",
)
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
assert data[0].text.lower() in [
"{}.".format(x.lower()) for x in all_responses
], "Response isn't valid."
def test_all_responses(mock_chooser, hallo_getter):
test_hallo = hallo_getter({"random"})
all_responses = (
EightBall.RESPONSES_YES_TOTALLY
+ EightBall.RESPONSES_YES_PROBABLY
+ EightBall.RESPONSES_MAYBE
+ EightBall.RESPONSES_NO
)
responses = []
for x in range(len(all_responses)):
# Set RNG
mock_chooser.choice = x
# Shake magic eight ball
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, None, test_hallo.test_user, "magic8-ball")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
responses.append(data[0].text.lower()[:-1])
assert data[0].text.lower() in [
"{}.".format(x.lower()) for x in all_responses
], "Response isn't valid."
# Check all responses given
assert len(responses) == len(
all_responses
), "Not the same number of responses as possible responses"
assert set(responses) == set(
[x.lower() for x in all_responses]
), "Not all responses are given"
```
#### File: modules/random/test_night_vale_proverb.py
```python
from hallo.events import EventMessage
from hallo.modules.random.night_vale_proverb import NightValeProverb
def test_proverb(mock_chooser, hallo_getter):
test_hallo = hallo_getter({"random"})
# Get proverb list
n = NightValeProverb()
proverb_list = n.proverb_list
response_list = []
# Check all proverbs are given
for x in range(len(proverb_list)):
# Set RNG
mock_chooser.choice = x
# Check function
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, None, test_hallo.test_user, "nightvale proverb")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
assert data[0].text in proverb_list, "Proverb isn't from list"
response_list.append(data[0].text)
assert len(response_list) == len(
proverb_list
), "Not all proverbs options given?"
assert set(response_list) == set(proverb_list), "Proverb options didn't match"
```
#### File: modules/subscriptions/test_source_e621.py
```python
import pytest
from yippi import YippiClient
from hallo.events import EventMessage, EventMessageWithPhoto
from hallo.modules.subscriptions.source_e621 import E621Source
from hallo.modules.subscriptions.subscription_repo import SubscriptionRepo
def test_init(hallo_getter):
test_hallo = hallo_getter({"subscriptions"})
e6_client = YippiClient("hallo_test", "0.1.0", "dr-spangle")
rf = E621Source("cabinet", e6_client, test_hallo.test_user)
keys = [
"search",
"last_keys"
]
for key in keys:
assert key in rf.__dict__, "Key is missing from E621Sub object: " + key
def test_matches_name(hallo_getter):
test_hallo = hallo_getter({"subscriptions"})
e6_client = YippiClient("hallo_test", "0.1.0", "dr-spangle")
rf = E621Source("Cabinet ", e6_client, test_hallo.test_user)
assert rf.matches_name("cabinet")
def test_title(hallo_getter):
test_hallo = hallo_getter({"subscriptions"})
e6_client = YippiClient("hallo_test", "0.1.0", "dr-spangle")
rf = E621Source("cabinet", e6_client, test_hallo.test_user)
assert "\"cabinet\"" in rf.title
def test_from_input(hallo_getter):
test_hallo = hallo_getter({"subscriptions"})
sub_repo = SubscriptionRepo(test_hallo)
rf = E621Source.from_input("cabinet", test_hallo.test_user, sub_repo)
assert rf.search == "cabinet"
@pytest.mark.external_integration
def test_current_state(hallo_getter):
test_hallo = hallo_getter({"subscriptions"})
e6_client = YippiClient("hallo_test", "0.1.0", "dr-spangle")
rf = E621Source("cabinet", e6_client, test_hallo.test_user)
state = rf.current_state()
assert isinstance(state, list)
assert len(state) > 0
last_id = None
for post in state:
# Check tag is in tags
assert "cabinet" in [x for v in post.tags.values() for x in v]
# Check id is decreasing
if last_id is None:
last_id = post.id
continue
assert last_id > post.id
last_id = post.id
@pytest.mark.external_integration
def test_item_to_key(hallo_getter):
test_hallo = hallo_getter({"subscriptions"})
e6_client = YippiClient("hallo_test", "0.1.0", "dr-spangle")
rf = E621Source("cabinet", e6_client, test_hallo.test_user)
item = e6_client.post(1092773)
assert rf.item_to_key(item) == 1092773
@pytest.mark.external_integration
def test_item_to_event(hallo_getter):
test_hallo = hallo_getter({"subscriptions"})
e6_client = YippiClient("hallo_test", "0.1.0", "dr-spangle")
rf = E621Source("chital", e6_client, test_hallo.test_user)
item = e6_client.post(1092773)
event = rf.item_to_event(test_hallo.test_server, test_hallo.test_chan, None, item)
assert isinstance(event, EventMessageWithPhoto)
assert event.photo_id == "https://static1.e621.net/data/02/7e/027e18f9db1fd4906d98b987b202066e.png"
assert event.server == test_hallo.test_server
assert event.channel == test_hallo.test_chan
assert event.user is None
assert "\"chital\"" in event.text
assert "1092773" in event.text
assert "Safe" in event.text
@pytest.mark.external_integration
def test_item_to_event__no_embed(hallo_getter):
test_hallo = hallo_getter({"subscriptions"})
e6_client = YippiClient("hallo_test", "0.1.0", "dr-spangle")
rf = E621Source("acrobatics", e6_client, test_hallo.test_user)
item = e6_client.post(257069)
event = rf.item_to_event(test_hallo.test_server, test_hallo.test_chan, None, item)
assert isinstance(event, EventMessage)
assert not isinstance(event, EventMessageWithPhoto)
assert event.server == test_hallo.test_server
assert event.channel == test_hallo.test_chan
assert event.user is None
assert "\"acrobatics\"" in event.text
assert "257069" in event.text
assert "Safe" in event.text
def test_json(hallo_getter):
test_hallo = hallo_getter({"subscriptions"})
e6_client = YippiClient("hallo_test", "0.1.0", "dr-spangle")
sub_repo = SubscriptionRepo(test_hallo)
test_e621_search = "cabinet"
# Create example source
rf = E621Source(test_e621_search, e6_client, test_hallo.test_user)
rf.last_keys = [1234, 2345, 3456]
# Save to json and load up new E621Sub
rf_json = rf.to_json()
assert "type" in rf_json
assert "last_keys" in rf_json
assert "search" in rf_json
assert rf_json["type"] == E621Source.type_name
rf2 = E621Source.from_json(rf_json, test_hallo.test_chan, sub_repo)
assert rf2.search == test_e621_search
assert rf2.last_keys == rf.last_keys
assert rf2.owner == rf.owner
``` |
{
"source": "joshcoales/pyparsing",
"score": 3
} |
#### File: pyparsing/tests/test_diagram.py
```python
import unittest
from examples.jsonParser import jsonObject
from examples.simpleBool import boolExpr
from examples.simpleSQL import simpleSQL
from examples.mozillaCalendarParser import calendars
from pyparsing.diagram import to_railroad, railroad_to_html
from pyparsing import Or
import tempfile
import os
class TestRailroadDiagrams(unittest.TestCase):
def railroad_debug(self) -> bool:
"""
Returns True if we're in debug mode
"""
return os.environ.get("RAILROAD_DEBUG", False)
def get_temp(self):
"""
Returns an appropriate temporary file for writing a railroad diagram
"""
return tempfile.NamedTemporaryFile(
delete=not self.railroad_debug(), mode="w", encoding="utf-8", suffix=".html"
)
def test_bool_expr(self):
with self.get_temp() as temp:
railroad = to_railroad(boolExpr)
assert len(railroad) == 3
temp.write(railroad_to_html(railroad))
if self.railroad_debug():
print("bool expr:" + temp.name)
def test_json(self):
with self.get_temp() as temp:
railroad = to_railroad(jsonObject)
assert len(railroad) == 4
temp.write(railroad_to_html(railroad))
if self.railroad_debug():
print("json: " + temp.name)
def test_sql(self):
with self.get_temp() as temp:
railroad = to_railroad(simpleSQL)
assert len(railroad) == 7
temp.write(railroad_to_html(railroad))
if self.railroad_debug():
print("sql: " + temp.name)
def test_calendars(self):
with self.get_temp() as temp:
railroad = to_railroad(calendars)
temp.write(railroad_to_html(railroad))
if self.railroad_debug():
print("calendar: " + temp.name)
def test_none_name(self):
grammar = Or(["foo", "bar"])
railroad = to_railroad(grammar)
assert len(railroad) == 1
assert railroad[0].name is not None
```
#### File: pyparsing/tests/test_unit.py
```python
import contextlib
import datetime
import sys
from io import StringIO
from unittest import TestCase
import pyparsing as pp
from examples.jsonParser import jsonObject
from pyparsing import ParseException
from pyparsing import ParserElement
from tests.json_parser_tests import test1, test2, test3, test4, test5
import platform
ppc = pp.pyparsing_common
ppt = pp.pyparsing_test
# see which Python implementation we are running
python_impl = platform.python_implementation()
CPYTHON_ENV = python_impl == "CPython"
IRON_PYTHON_ENV = python_impl == "IronPython"
JYTHON_ENV = python_impl == "Jython"
PYPY_ENV = python_impl == "PyPy"
# simple utility for flattening nested lists
def flatten(L):
if type(L) is not list:
return [L]
if L == []:
return L
return flatten(L[0]) + flatten(L[1:])
class resetting:
def __init__(self, *args):
ob = args[0]
attrnames = args[1:]
self.ob = ob
self.save_attrs = attrnames
self.save_values = [getattr(ob, attrname) for attrname in attrnames]
def __enter__(self):
pass
def __exit__(self, *args):
for attr, value in zip(self.save_attrs, self.save_values):
setattr(self.ob, attr, value)
class Test1_PyparsingTestInit(TestCase):
def runTest(self):
from pyparsing import (
__version__ as pyparsingVersion,
__versionTime__ as pyparsingVersionTime,
)
print(
"Beginning test of pyparsing, version",
pyparsingVersion,
pyparsingVersionTime,
)
print("Python version", sys.version)
class Test2_WithoutPackrat(ppt.TestParseResultsAsserts, TestCase):
suite_context = None
save_suite_context = None
def setUp(self):
self.suite_context.restore()
@contextlib.contextmanager
def assertRaises(self, expected_exception_type, msg=None):
"""
Simple wrapper to print out the exceptions raised after assertRaises
"""
try:
with super().assertRaises(expected_exception_type, msg=msg) as ar:
yield
finally:
if getattr(ar, "exception", None) is not None:
print(
"Raised expected exception: {}: {}".format(
type(ar.exception).__name__, str(ar.exception)
)
)
else:
print(
"Expected {} exception not raised".format(
expected_exception_type.__name__
)
)
return ar
def test000_assert_packrat_status(self):
print("Packrat enabled:", ParserElement._packratEnabled)
self.assertFalse(ParserElement._packratEnabled, "packrat enabled")
def testScanStringWithOverlap(self):
parser = pp.Word(pp.alphas, exact=3)
without_overlaps = sum(t for t, s, e in parser.scanString("ABCDEFGHI")).asList()
self.assertEqual(
["ABC", "DEF", "GHI"],
without_overlaps,
msg="scanString without overlaps failed",
)
with_overlaps = sum(
t for t, s, e in parser.scanString("ABCDEFGHI", overlap=True)
).asList()
self.assertEqual(
["ABC", "BCD", "CDE", "DEF", "EFG", "FGH", "GHI"],
with_overlaps,
msg="scanString with overlaps failed",
)
def testTransformString(self):
make_int_with_commas = ppc.integer().addParseAction(
lambda t: "{:,}".format(t[0])
)
lower_case_words = pp.Word(pp.alphas.lower(), asKeyword=True) + pp.Optional(
pp.White()
)
nested_list = pp.nestedExpr().addParseAction(pp.ParseResults.asList)
transformer = make_int_with_commas | nested_list | lower_case_words.suppress()
in_string = (
"I wish to buy 12345 shares of Acme Industries (as a gift to my (ex)wife)"
)
print(in_string)
out_string = transformer.transformString(in_string)
print(out_string)
self.assertEqual(
"I 12,345 Acme Industries asagifttomyexwife",
out_string,
msg="failure in transformString",
)
def testUpdateDefaultWhitespace(self):
prev_default_whitespace_chars = pp.ParserElement.DEFAULT_WHITE_CHARS
try:
pp.dblQuotedString.copyDefaultWhiteChars = False
pp.ParserElement.setDefaultWhitespaceChars(" \t")
self.assertEqual(
set(" \t"),
set(pp.sglQuotedString.whiteChars),
"setDefaultWhitespaceChars did not update sglQuotedString",
)
self.assertEqual(
set(prev_default_whitespace_chars),
set(pp.dblQuotedString.whiteChars),
"setDefaultWhitespaceChars updated dblQuotedString but should not",
)
finally:
pp.dblQuotedString.copyDefaultWhiteChars = True
pp.ParserElement.setDefaultWhitespaceChars(prev_default_whitespace_chars)
self.assertEqual(
set(prev_default_whitespace_chars),
set(pp.dblQuotedString.whiteChars),
"setDefaultWhitespaceChars updated dblQuotedString",
)
with ppt.reset_pyparsing_context():
pp.ParserElement.setDefaultWhitespaceChars(" \t")
self.assertNotEqual(
set(prev_default_whitespace_chars),
set(pp.dblQuotedString.whiteChars),
"setDefaultWhitespaceChars updated dblQuotedString but should not",
)
EOL = pp.LineEnd().suppress().setName("EOL")
# Identifiers is a string + optional $
identifier = pp.Combine(pp.Word(pp.alphas) + pp.Optional("$"))
# Literals (number or double quoted string)
literal = ppc.number | pp.dblQuotedString
expression = literal | identifier
# expression.setName("expression").setDebug()
# ppc.number.setDebug()
# ppc.integer.setDebug()
line_number = ppc.integer
# Keywords
PRINT = pp.CaselessKeyword("print")
print_stmt = PRINT - pp.ZeroOrMore(expression | ";")
statement = print_stmt
code_line = pp.Group(line_number + statement + EOL)
program = pp.ZeroOrMore(code_line)
test = """\
10 print 123;
20 print 234; 567;
30 print 890
"""
parsed_program = program.parseString(test)
print(parsed_program.dump())
self.assertEqual(
3,
len(parsed_program),
"failed to apply new whitespace chars to existing builtins",
)
def testUpdateDefaultWhitespace2(self):
with ppt.reset_pyparsing_context():
expr_tests = [
(pp.dblQuotedString, '"abc"'),
(pp.sglQuotedString, "'def'"),
(ppc.integer, "123"),
(ppc.number, "4.56"),
(ppc.identifier, "a_bc"),
]
NL = pp.LineEnd()
for expr, test_str in expr_tests:
parser = pp.Group(expr[1, ...] + pp.Optional(NL))[1, ...]
test_string = "\n".join([test_str] * 3)
result = parser.parseString(test_string, parseAll=True)
print(result.dump())
self.assertEqual(1, len(result), "failed {!r}".format(test_string))
pp.ParserElement.setDefaultWhitespaceChars(" \t")
for expr, test_str in expr_tests:
parser = pp.Group(expr[1, ...] + pp.Optional(NL))[1, ...]
test_string = "\n".join([test_str] * 3)
result = parser.parseString(test_string, parseAll=True)
print(result.dump())
self.assertEqual(3, len(result), "failed {!r}".format(test_string))
pp.ParserElement.setDefaultWhitespaceChars(" \n\t")
for expr, test_str in expr_tests:
parser = pp.Group(expr[1, ...] + pp.Optional(NL))[1, ...]
test_string = "\n".join([test_str] * 3)
result = parser.parseString(test_string, parseAll=True)
print(result.dump())
self.assertEqual(1, len(result), "failed {!r}".format(test_string))
def testParseFourFn(self):
import examples.fourFn as fourFn
import math
def test(s, ans):
fourFn.exprStack[:] = []
results = fourFn.BNF().parseString(s)
try:
resultValue = fourFn.evaluate_stack(fourFn.exprStack)
except Exception:
self.assertIsNone(ans, "exception raised for expression {!r}".format(s))
else:
self.assertEqual(
ans,
resultValue,
"failed to evaluate {}, got {:f}".format(s, resultValue),
)
print(s, "->", resultValue)
test("9", 9)
test("-9", -9)
test("--9", 9)
test("-E", -math.e)
test("9 + 3 + 6", 9 + 3 + 6)
test("9 + 3 / 11", 9 + 3.0 / 11)
test("(9 + 3)", (9 + 3))
test("(9+3) / 11", (9 + 3.0) / 11)
test("9 - 12 - 6", 9 - 12 - 6)
test("9 - (12 - 6)", 9 - (12 - 6))
test("2*3.14159", 2 * 3.14159)
test("3.1415926535*3.1415926535 / 10", 3.1415926535 * 3.1415926535 / 10)
test("PI * PI / 10", math.pi * math.pi / 10)
test("PI*PI/10", math.pi * math.pi / 10)
test("PI^2", math.pi ** 2)
test("round(PI^2)", round(math.pi ** 2))
test("6.02E23 * 8.048", 6.02e23 * 8.048)
test("e / 3", math.e / 3)
test("sin(PI/2)", math.sin(math.pi / 2))
test("10+sin(PI/4)^2", 10 + math.sin(math.pi / 4) ** 2)
test("trunc(E)", int(math.e))
test("trunc(-E)", int(-math.e))
test("round(E)", round(math.e))
test("round(-E)", round(-math.e))
test("E^PI", math.e ** math.pi)
test("exp(0)", 1)
test("exp(1)", math.e)
test("2^3^2", 2 ** 3 ** 2)
test("(2^3)^2", (2 ** 3) ** 2)
test("2^3+2", 2 ** 3 + 2)
test("2^3+5", 2 ** 3 + 5)
test("2^9", 2 ** 9)
test("sgn(-2)", -1)
test("sgn(0)", 0)
test("sgn(0.1)", 1)
test("foo(0.1)", None)
test("round(E, 3)", round(math.e, 3))
test("round(PI^2, 3)", round(math.pi ** 2, 3))
test("sgn(cos(PI/4))", 1)
test("sgn(cos(PI/2))", 0)
test("sgn(cos(PI*3/4))", -1)
test("+(sgn(cos(PI/4)))", 1)
test("-(sgn(cos(PI/4)))", -1)
def testParseSQL(self):
import examples.simpleSQL as simpleSQL
def test(s, num_expected_toks, expected_errloc=-1):
try:
sqlToks = flatten(simpleSQL.simpleSQL.parseString(s).asList())
print(s, sqlToks, len(sqlToks))
self.assertEqual(
num_expected_toks,
len(sqlToks),
"invalid parsed tokens, expected {}, found {} ({})".format(
num_expected_toks, len(sqlToks), sqlToks
),
)
except ParseException as e:
if expected_errloc >= 0:
self.assertEqual(
expected_errloc,
e.loc,
"expected error at {}, found at {}".format(
expected_errloc, e.loc
),
)
test("SELECT * from XYZZY, ABC", 6)
test("select * from SYS.XYZZY", 5)
test("Select A from Sys.dual", 5)
test("Select A,B,C from Sys.dual", 7)
test("Select A, B, C from Sys.dual", 7)
test("Select A, B, C from Sys.dual, Table2 ", 8)
test("Xelect A, B, C from Sys.dual", 0, 0)
test("Select A, B, C frox Sys.dual", 0, 15)
test("Select", 0, 6)
test("Select &&& frox Sys.dual", 0, 7)
test("Select A from Sys.dual where a in ('RED','GREEN','BLUE')", 12)
test(
"Select A from Sys.dual where a in ('RED','GREEN','BLUE') and b in (10,20,30)",
20,
)
test(
"Select A,b from table1,table2 where table1.id eq table2.id -- test out comparison operators",
10,
)
def testParseConfigFile(self):
from examples import configParse
def test(fnam, num_expected_toks, resCheckList):
print("Parsing", fnam, "...", end=" ")
with open(fnam) as infile:
iniFileLines = "\n".join(infile.read().splitlines())
iniData = configParse.inifile_BNF().parseString(iniFileLines)
print(len(flatten(iniData.asList())))
print(list(iniData.keys()))
self.assertEqual(
num_expected_toks,
len(flatten(iniData.asList())),
"file %s not parsed correctly" % fnam,
)
for chkkey, chkexpect in resCheckList:
var = iniData
for attr in chkkey.split("."):
var = getattr(var, attr)
print(chkkey, var, chkexpect)
self.assertEqual(
chkexpect,
var,
"ParseConfigFileTest: failed to parse ini {!r} as expected {!r}, found {}".format(
chkkey, chkexpect, var
),
)
print("OK")
test(
"tests/karthik.ini",
23,
[("users.K", "8"), ("users.mod_scheme", "'QPSK'"), ("users.Na", "K+2")],
)
test(
"examples/Setup.ini",
125,
[
("Startup.audioinf", "M3i"),
("Languages.key1", "0x0003"),
("test.foo", "bar"),
],
)
def testParseJSONData(self):
expected = [
[
[
"glossary",
[
["title", "example glossary"],
[
"GlossDiv",
[
["title", "S"],
[
"GlossList",
[
[
["ID", "SGML"],
["SortAs", "SGML"],
[
"GlossTerm",
"Standard Generalized Markup Language",
],
["Acronym", "SGML"],
["LargestPrimeLessThan100", 97],
["AvogadroNumber", 6.02e23],
["EvenPrimesGreaterThan2", None],
["PrimesLessThan10", [2, 3, 5, 7]],
["WMDsFound", False],
["IraqAlQaedaConnections", None],
["Abbrev", "ISO 8879:1986"],
[
"GlossDef",
"A meta-markup language, used to create markup languages such as "
"DocBook.",
],
["GlossSeeAlso", ["GML", "XML", "markup"]],
["EmptyDict", []],
["EmptyList", [[]]],
]
],
],
],
],
],
]
],
[
[
"menu",
[
["id", "file"],
["value", "File:"],
[
"popup",
[
[
"menuitem",
[
[
["value", "New"],
["onclick", "CreateNewDoc()"],
],
[["value", "Open"], ["onclick", "OpenDoc()"]],
[["value", "Close"], ["onclick", "CloseDoc()"]],
],
]
],
],
],
]
],
[
[
"widget",
[
["debug", "on"],
[
"window",
[
["title", "Sample Konfabulator Widget"],
["name", "main_window"],
["width", 500],
["height", 500],
],
],
[
"image",
[
["src", "Images/Sun.png"],
["name", "sun1"],
["hOffset", 250],
["vOffset", 250],
["alignment", "center"],
],
],
[
"text",
[
["data", "Click Here"],
["size", 36],
["style", "bold"],
["name", "text1"],
["hOffset", 250],
["vOffset", 100],
["alignment", "center"],
[
"onMouseUp",
"sun1.opacity = (sun1.opacity / 100) * 90;",
],
],
],
],
]
],
[
[
"web-app",
[
[
"servlet",
[
[
["servlet-name", "cofaxCDS"],
["servlet-class", "org.cofax.cds.CDSServlet"],
[
"init-param",
[
[
"configGlossary:installationAt",
"Philadelphia, PA",
],
[
"configGlossary:adminEmail",
"<EMAIL>",
],
["configGlossary:poweredBy", "Cofax"],
[
"configGlossary:poweredByIcon",
"/images/cofax.gif",
],
[
"configGlossary:staticPath",
"/content/static",
],
[
"templateProcessorClass",
"org.cofax.WysiwygTemplate",
],
[
"templateLoaderClass",
"org.cofax.FilesTemplateLoader",
],
["templatePath", "templates"],
["templateOverridePath", ""],
["defaultListTemplate", "listTemplate.htm"],
[
"defaultFileTemplate",
"articleTemplate.htm",
],
["useJSP", False],
["jspListTemplate", "listTemplate.jsp"],
["jspFileTemplate", "articleTemplate.jsp"],
["cachePackageTagsTrack", 200],
["cachePackageTagsStore", 200],
["cachePackageTagsRefresh", 60],
["cacheTemplatesTrack", 100],
["cacheTemplatesStore", 50],
["cacheTemplatesRefresh", 15],
["cachePagesTrack", 200],
["cachePagesStore", 100],
["cachePagesRefresh", 10],
["cachePagesDirtyRead", 10],
[
"searchEngineListTemplate",
"forSearchEnginesList.htm",
],
[
"searchEngineFileTemplate",
"forSearchEngines.htm",
],
[
"searchEngineRobotsDb",
"WEB-INF/robots.db",
],
["useDataStore", True],
[
"dataStoreClass",
"org.cofax.SqlDataStore",
],
[
"redirectionClass",
"org.cofax.SqlRedirection",
],
["dataStoreName", "cofax"],
[
"dataStoreDriver",
"com.microsoft.jdbc.sqlserver.SQLServerDriver",
],
[
"dataStoreUrl",
"jdbc:microsoft:sqlserver://LOCALHOST:1433;DatabaseName=goon",
],
["dataStoreUser", "sa"],
["dataStorePassword", "<PASSWORD>"],
[
"dataStoreTestQuery",
"SET NOCOUNT ON;select test='test';",
],
[
"dataStoreLogFile",
"/usr/local/tomcat/logs/datastore.log",
],
["dataStoreInitConns", 10],
["dataStoreMaxConns", 100],
["dataStoreConnUsageLimit", 100],
["dataStoreLogLevel", "debug"],
["maxUrlLength", 500],
],
],
],
[
["servlet-name", "cofaxEmail"],
["servlet-class", "org.cofax.cds.EmailServlet"],
[
"init-param",
[
["mailHost", "mail1"],
["mailHostOverride", "mail2"],
],
],
],
[
["servlet-name", "cofaxAdmin"],
["servlet-class", "org.cofax.cds.AdminServlet"],
],
[
["servlet-name", "fileServlet"],
["servlet-class", "org.cofax.cds.FileServlet"],
],
[
["servlet-name", "cofaxTools"],
[
"servlet-class",
"org.cofax.cms.CofaxToolsServlet",
],
[
"init-param",
[
["templatePath", "toolstemplates/"],
["log", 1],
[
"logLocation",
"/usr/local/tomcat/logs/CofaxTools.log",
],
["logMaxSize", ""],
["dataLog", 1],
[
"dataLogLocation",
"/usr/local/tomcat/logs/dataLog.log",
],
["dataLogMaxSize", ""],
[
"removePageCache",
"/content/admin/remove?cache=pages&id=",
],
[
"removeTemplateCache",
"/content/admin/remove?cache=templates&id=",
],
[
"fileTransferFolder",
"/usr/local/tomcat/webapps/content/fileTransferFolder",
],
["lookInContext", 1],
["adminGroupID", 4],
["betaServer", True],
],
],
],
],
],
[
"servlet-mapping",
[
["cofaxCDS", "/"],
["cofaxEmail", "/cofaxutil/aemail/*"],
["cofaxAdmin", "/admin/*"],
["fileServlet", "/static/*"],
["cofaxTools", "/tools/*"],
],
],
[
"taglib",
[
["taglib-uri", "cofax.tld"],
["taglib-location", "/WEB-INF/tlds/cofax.tld"],
],
],
],
]
],
[
[
"menu",
[
["header", "SVG Viewer"],
[
"items",
[
[["id", "Open"]],
[["id", "OpenNew"], ["label", "Open New"]],
None,
[["id", "ZoomIn"], ["label", "Zoom In"]],
[["id", "ZoomOut"], ["label", "Zoom Out"]],
[["id", "OriginalView"], ["label", "Original View"]],
None,
[["id", "Quality"]],
[["id", "Pause"]],
[["id", "Mute"]],
None,
[["id", "Find"], ["label", "Find..."]],
[["id", "FindAgain"], ["label", "Find Again"]],
[["id", "Copy"]],
[["id", "CopyAgain"], ["label", "Copy Again"]],
[["id", "CopySVG"], ["label", "Copy SVG"]],
[["id", "ViewSVG"], ["label", "View SVG"]],
[["id", "ViewSource"], ["label", "View Source"]],
[["id", "SaveAs"], ["label", "Save As"]],
None,
[["id", "Help"]],
[
["id", "About"],
["label", "About Adobe CVG Viewer..."],
],
],
],
],
]
],
]
for t, exp in zip((test1, test2, test3, test4, test5), expected):
self.assertParseAndCheckList(jsonObject, t, exp, verbose=True)
def testParseCommaSeparatedValues(self):
testData = [
"a,b,c,100.2,,3",
"d, e, j k , m ",
"'Hello, World', f, g , , 5.1,x",
"<NAME>, 123 Main St., Cleveland, Ohio",
"<NAME>, 456 St. James St., Los Angeles , California ",
"",
]
testVals = [
[(3, "100.2"), (4, ""), (5, "3")],
[(2, "j k"), (3, "m")],
[(0, "'Hello, World'"), (2, "g"), (3, "")],
[(0, "<NAME>"), (1, "123 Main St."), (2, "Cleveland"), (3, "Ohio")],
[
(0, "<NAME>"),
(1, "456 St. James St."),
(2, "Los Angeles"),
(3, "California"),
],
]
for line, tests in zip(testData, testVals):
print("Parsing: %r ->" % line, end=" ")
results = ppc.comma_separated_list.parseString(line)
print(results)
for t in tests:
if not (len(results) > t[0] and results[t[0]] == t[1]):
print("$$$", results.dump())
print("$$$", results[0])
self.assertTrue(
len(results) > t[0] and results[t[0]] == t[1],
"failed on %s, item %d s/b '%s', got '%s'"
% (line, t[0], t[1], str(results.asList())),
)
def testParseEBNF(self):
from examples import ebnf
print("Constructing EBNF parser with pyparsing...")
grammar = """
syntax = (syntax_rule), {(syntax_rule)};
syntax_rule = meta_identifier, '=', definitions_list, ';';
definitions_list = single_definition, {'|', single_definition};
single_definition = syntactic_term, {',', syntactic_term};
syntactic_term = syntactic_factor,['-', syntactic_factor];
syntactic_factor = [integer, '*'], syntactic_primary;
syntactic_primary = optional_sequence | repeated_sequence |
grouped_sequence | meta_identifier | terminal_string;
optional_sequence = '[', definitions_list, ']';
repeated_sequence = '{', definitions_list, '}';
grouped_sequence = '(', definitions_list, ')';
(*
terminal_string = "'", character - "'", {character - "'"}, "'" |
'"', character - '"', {character - '"'}, '"';
meta_identifier = letter, {letter | digit};
integer = digit, {digit};
*)
"""
table = {}
table["terminal_string"] = pp.quotedString
table["meta_identifier"] = pp.Word(pp.alphas + "_", pp.alphas + "_" + pp.nums)
table["integer"] = pp.Word(pp.nums)
print("Parsing EBNF grammar with EBNF parser...")
parsers = ebnf.parse(grammar, table)
ebnf_parser = parsers["syntax"]
print("-", "\n- ".join(parsers.keys()))
self.assertEqual(
13, len(list(parsers.keys())), "failed to construct syntax grammar"
)
print("Parsing EBNF grammar with generated EBNF parser...")
parsed_chars = ebnf_parser.parseString(grammar)
parsed_char_len = len(parsed_chars)
print("],\n".join(str(parsed_chars.asList()).split("],")))
self.assertEqual(
98,
len(flatten(parsed_chars.asList())),
"failed to tokenize grammar correctly",
)
def testParseIDL(self):
from examples import idlParse
def test(strng, numToks, expectedErrloc=0):
print(strng)
try:
bnf = idlParse.CORBA_IDL_BNF()
tokens = bnf.parseString(strng)
print("tokens = ")
tokens.pprint()
tokens = flatten(tokens.asList())
print(len(tokens))
self.assertEqual(
numToks,
len(tokens),
"error matching IDL string, {} -> {}".format(strng, str(tokens)),
)
except ParseException as err:
print(err.line)
print(" " * (err.column - 1) + "^")
print(err)
self.assertEqual(
0,
numToks,
"unexpected ParseException while parsing {}, {}".format(
strng, str(err)
),
)
self.assertEqual(
expectedErrloc,
err.loc,
"expected ParseException at %d, found exception at %d"
% (expectedErrloc, err.loc),
)
test(
"""
/*
* a block comment *
*/
typedef string[10] tenStrings;
typedef sequence<string> stringSeq;
typedef sequence< sequence<string> > stringSeqSeq;
interface QoSAdmin {
stringSeq method1(in string arg1, inout long arg2);
stringSeqSeq method2(in string arg1, inout long arg2, inout long arg3);
string method3();
};
""",
59,
)
test(
"""
/*
* a block comment *
*/
typedef string[10] tenStrings;
typedef
/** ** *** **** *
* a block comment *
*/
sequence<string> /*comment inside an And */ stringSeq;
/* */ /**/ /***/ /****/
typedef sequence< sequence<string> > stringSeqSeq;
interface QoSAdmin {
stringSeq method1(in string arg1, inout long arg2);
stringSeqSeq method2(in string arg1, inout long arg2, inout long arg3);
string method3();
};
""",
59,
)
test(
r"""
const string test="Test String\n";
const long a = 0;
const long b = -100;
const float c = 3.14159;
const long d = 0x007f7f7f;
exception TestException
{
string msg;
sequence<string> dataStrings;
};
interface TestInterface
{
void method1(in string arg1, inout long arg2);
};
""",
60,
)
test(
"""
module Test1
{
exception TestException
{
string msg;
];
interface TestInterface
{
void method1(in string arg1, inout long arg2)
raises (TestException);
};
};
""",
0,
56,
)
test(
"""
module Test1
{
exception TestException
{
string msg;
};
};
""",
13,
)
def testParseVerilog(self):
pass
def testScanString(self):
from pyparsing import Word, Combine, Suppress, CharsNotIn, nums, StringEnd
testdata = """
<table border="0" cellpadding="3" cellspacing="3" frame="" width="90%">
<tr align="left" valign="top">
<td><b>Name</b></td>
<td><b>IP Address</b></td>
<td><b>Location</b></td>
</tr>
<tr align="left" valign="top" bgcolor="#c7efce">
<td>time-a.nist.gov</td>
<td>172.16.58.3</td>
<td>NIST, Gaithersburg, Maryland</td>
</tr>
<tr align="left" valign="top">
<td>time-b.nist.gov</td>
<td>192.168.127.12</td>
<td>NIST, Gaithersburg, Maryland</td>
</tr>
<tr align="left" valign="top" bgcolor="#c7efce">
<td>time-a.timefreq.bldrdoc.gov</td>
<td>192.168.3.11</td>
<td>NIST, Boulder, Colorado</td>
</tr>
<tr align="left" valign="top">
<td>time-b.timefreq.bldrdoc.gov</td>
<td>172.16.58.3</td>
<td>NIST, Boulder, Colorado</td>
</tr>
<tr align="left" valign="top" bgcolor="#c7efce">
<td>time-c.timefreq.bldrdoc.gov</td>
<td>172.16.17.32</td>
<td>NIST, Boulder, Colorado</td>
</tr>
</table>
"""
integer = Word(nums)
ipAddress = Combine(integer + "." + integer + "." + integer + "." + integer)
tdStart = Suppress("<td>")
tdEnd = Suppress("</td>")
timeServerPattern = (
tdStart
+ ipAddress("ipAddr")
+ tdEnd
+ tdStart
+ CharsNotIn("<")("loc")
+ tdEnd
)
servers = [
srvr.ipAddr
for srvr, startloc, endloc in timeServerPattern.scanString(testdata)
]
print(servers)
self.assertEqual(
[
"172.16.58.3",
"192.168.127.12",
"192.168.3.11",
"172.16.58.3",
"172.16.17.32",
],
servers,
"failed scanString()",
)
# test for stringEnd detection in scanString
foundStringEnds = [r for r in StringEnd().scanString("xyzzy")]
print(foundStringEnds)
self.assertTrue(foundStringEnds, "Failed to find StringEnd in scanString")
def testQuotedStrings(self):
from pyparsing import (
sglQuotedString,
dblQuotedString,
quotedString,
QuotedString,
)
testData = """
'a valid single quoted string'
'an invalid single quoted string
because it spans lines'
"a valid double quoted string"
"an invalid double quoted string
because it spans lines"
"""
print(testData)
sglStrings = [
(t[0], b, e) for (t, b, e) in sglQuotedString.scanString(testData)
]
print(sglStrings)
self.assertTrue(
len(sglStrings) == 1
and (sglStrings[0][1] == 17 and sglStrings[0][2] == 47),
"single quoted string failure",
)
dblStrings = [
(t[0], b, e) for (t, b, e) in dblQuotedString.scanString(testData)
]
print(dblStrings)
self.assertTrue(
len(dblStrings) == 1
and (dblStrings[0][1] == 154 and dblStrings[0][2] == 184),
"double quoted string failure",
)
allStrings = [(t[0], b, e) for (t, b, e) in quotedString.scanString(testData)]
print(allStrings)
self.assertTrue(
len(allStrings) == 2
and (allStrings[0][1] == 17 and allStrings[0][2] == 47)
and (allStrings[1][1] == 154 and allStrings[1][2] == 184),
"quoted string failure",
)
escapedQuoteTest = r"""
'This string has an escaped (\') quote character'
"This string has an escaped (\") quote character"
"""
sglStrings = [
(t[0], b, e) for (t, b, e) in sglQuotedString.scanString(escapedQuoteTest)
]
print(sglStrings)
self.assertTrue(
len(sglStrings) == 1
and (sglStrings[0][1] == 17 and sglStrings[0][2] == 66),
"single quoted string escaped quote failure (%s)" % str(sglStrings[0]),
)
dblStrings = [
(t[0], b, e) for (t, b, e) in dblQuotedString.scanString(escapedQuoteTest)
]
print(dblStrings)
self.assertTrue(
len(dblStrings) == 1
and (dblStrings[0][1] == 83 and dblStrings[0][2] == 132),
"double quoted string escaped quote failure (%s)" % str(dblStrings[0]),
)
allStrings = [
(t[0], b, e) for (t, b, e) in quotedString.scanString(escapedQuoteTest)
]
print(allStrings)
self.assertTrue(
len(allStrings) == 2
and (
allStrings[0][1] == 17
and allStrings[0][2] == 66
and allStrings[1][1] == 83
and allStrings[1][2] == 132
),
"quoted string escaped quote failure (%s)"
% ([str(s[0]) for s in allStrings]),
)
dblQuoteTest = r"""
'This string has an doubled ('') quote character'
"This string has an doubled ("") quote character"
"""
sglStrings = [
(t[0], b, e) for (t, b, e) in sglQuotedString.scanString(dblQuoteTest)
]
print(sglStrings)
self.assertTrue(
len(sglStrings) == 1
and (sglStrings[0][1] == 17 and sglStrings[0][2] == 66),
"single quoted string escaped quote failure (%s)" % str(sglStrings[0]),
)
dblStrings = [
(t[0], b, e) for (t, b, e) in dblQuotedString.scanString(dblQuoteTest)
]
print(dblStrings)
self.assertTrue(
len(dblStrings) == 1
and (dblStrings[0][1] == 83 and dblStrings[0][2] == 132),
"double quoted string escaped quote failure (%s)" % str(dblStrings[0]),
)
allStrings = [
(t[0], b, e) for (t, b, e) in quotedString.scanString(dblQuoteTest)
]
print(allStrings)
self.assertTrue(
len(allStrings) == 2
and (
allStrings[0][1] == 17
and allStrings[0][2] == 66
and allStrings[1][1] == 83
and allStrings[1][2] == 132
),
"quoted string escaped quote failure (%s)"
% ([str(s[0]) for s in allStrings]),
)
print(
"testing catastrophic RE backtracking in implementation of dblQuotedString"
)
for expr, test_string in [
(dblQuotedString, '"' + "\\xff" * 500),
(sglQuotedString, "'" + "\\xff" * 500),
(quotedString, '"' + "\\xff" * 500),
(quotedString, "'" + "\\xff" * 500),
(QuotedString('"'), '"' + "\\xff" * 500),
(QuotedString("'"), "'" + "\\xff" * 500),
]:
expr.parseString(test_string + test_string[0])
try:
expr.parseString(test_string)
except Exception:
continue
# test invalid endQuoteChar
with self.assertRaises(
SyntaxError, msg="issue raising error for invalid endQuoteChar"
):
expr = pp.QuotedString('"', endQuoteChar=" ")
def testCaselessOneOf(self):
caseless1 = pp.oneOf("d a b c aA B A C", caseless=True)
caseless1str = str(caseless1)
print(caseless1str)
caseless2 = pp.oneOf("d a b c Aa B A C", caseless=True)
caseless2str = str(caseless2)
print(caseless2str)
self.assertEqual(
caseless1str.upper(),
caseless2str.upper(),
"oneOf not handling caseless option properly",
)
self.assertNotEqual(
caseless1str, caseless2str, "Caseless option properly sorted"
)
res = caseless1[...].parseString("AAaaAaaA")
print(res)
self.assertEqual(4, len(res), "caseless1 oneOf failed")
self.assertEqual(
"aA" * 4, "".join(res), "caseless1 CaselessLiteral return failed"
)
res = caseless2[...].parseString("AAaaAaaA")
print(res)
self.assertEqual(4, len(res), "caseless2 oneOf failed")
self.assertEqual(
"Aa" * 4, "".join(res), "caseless1 CaselessLiteral return failed"
)
def testCommentParser(self):
print("verify processing of C and HTML comments")
testdata = """
/* */
/** **/
/**/
/***/
/****/
/* /*/
/** /*/
/*** /*/
/*
ablsjdflj
*/
"""
foundLines = [
pp.lineno(s, testdata) for t, s, e in pp.cStyleComment.scanString(testdata)
]
self.assertEqual(
list(range(11))[2:],
foundLines,
"only found C comments on lines " + str(foundLines),
)
testdata = """
<!-- -->
<!--- --->
<!---->
<!----->
<!------>
<!-- /-->
<!--- /-->
<!---- /-->
<!---- /- ->
<!---- / -- >
<!--
ablsjdflj
-->
"""
foundLines = [
pp.lineno(s, testdata) for t, s, e in pp.htmlComment.scanString(testdata)
]
self.assertEqual(
list(range(11))[2:],
foundLines,
"only found HTML comments on lines " + str(foundLines),
)
# test C++ single line comments that have line terminated with '\' (should continue comment to following line)
testSource = r"""
// comment1
// comment2 \
still comment 2
// comment 3
"""
self.assertEqual(
41,
len(pp.cppStyleComment.searchString(testSource)[1][0]),
r"failed to match single-line comment with '\' at EOL",
)
def testParseExpressionResults(self):
from pyparsing import Word, alphas, OneOrMore, Optional, Group
a = Word("a", alphas).setName("A")
b = Word("b", alphas).setName("B")
c = Word("c", alphas).setName("C")
ab = (a + b).setName("AB")
abc = (ab + c).setName("ABC")
word = Word(alphas).setName("word")
words = Group(OneOrMore(~a + word)).setName("words")
phrase = (
words("Head") + Group(a + Optional(b + Optional(c)))("ABC") + words("Tail")
)
results = phrase.parseString("xavier yeti alpha beta charlie will beaver")
print(results, results.Head, results.ABC, results.Tail)
for key, ln in [("Head", 2), ("ABC", 3), ("Tail", 2)]:
self.assertEqual(
ln,
len(results[key]),
"expected %d elements in %s, found %s" % (ln, key, str(results[key])),
)
def testParseKeyword(self):
kw = pp.Keyword("if")
lit = pp.Literal("if")
def test(s, litShouldPass, kwShouldPass):
print("Test", s)
print("Match Literal", end=" ")
try:
print(lit.parseString(s))
except Exception:
print("failed")
if litShouldPass:
self.fail("Literal failed to match %s, should have" % s)
else:
if not litShouldPass:
self.fail("Literal matched %s, should not have" % s)
print("Match Keyword", end=" ")
try:
print(kw.parseString(s))
except Exception:
print("failed")
if kwShouldPass:
self.fail("Keyword failed to match %s, should have" % s)
else:
if not kwShouldPass:
self.fail("Keyword matched %s, should not have" % s)
test("ifOnlyIfOnly", True, False)
test("if(OnlyIfOnly)", True, True)
test("if (OnlyIf Only)", True, True)
kw = pp.Keyword("if", caseless=True)
test("IFOnlyIfOnly", False, False)
test("If(OnlyIfOnly)", False, True)
test("iF (OnlyIf Only)", False, True)
with self.assertWarns(
SyntaxWarning, msg="failed to warn empty string passed to Keyword"
):
kw = pp.Keyword("")
def testParseExpressionResultsAccumulate(self):
from pyparsing import Word, delimitedList, Combine, alphas, nums
num = Word(nums).setName("num")("base10*")
hexnum = Combine("0x" + Word(nums)).setName("hexnum")("hex*")
name = Word(alphas).setName("word")("word*")
list_of_num = delimitedList(hexnum | num | name, ",")
tokens = list_of_num.parseString("1, 0x2, 3, 0x4, aaa")
print(tokens.dump())
self.assertParseResultsEquals(
tokens,
expected_list=["1", "0x2", "3", "0x4", "aaa"],
expected_dict={
"base10": ["1", "3"],
"hex": ["0x2", "0x4"],
"word": ["aaa"],
},
)
from pyparsing import (
Literal,
Word,
nums,
Group,
Dict,
alphas,
quotedString,
oneOf,
delimitedList,
removeQuotes,
alphanums,
)
lbrack = Literal("(").suppress()
rbrack = Literal(")").suppress()
integer = Word(nums).setName("int")
variable = Word(alphas, max=1).setName("variable")
relation_body_item = (
variable | integer | quotedString.copy().setParseAction(removeQuotes)
)
relation_name = Word(alphas + "_", alphanums + "_")
relation_body = lbrack + Group(delimitedList(relation_body_item)) + rbrack
Goal = Dict(Group(relation_name + relation_body))
Comparison_Predicate = Group(variable + oneOf("< >") + integer)("pred*")
Query = Goal("head") + ":-" + delimitedList(Goal | Comparison_Predicate)
test = """Q(x,y,z):-Bloo(x,"Mitsis",y),Foo(y,z,1243),y>28,x<12,x>3"""
queryRes = Query.parseString(test)
print(queryRes.dump())
self.assertParseResultsEquals(
queryRes.pred,
expected_list=[["y", ">", "28"], ["x", "<", "12"], ["x", ">", "3"]],
msg="Incorrect list for attribute pred, %s" % str(queryRes.pred.asList()),
)
def testReStringRange(self):
testCases = (
(r"[A-Z]"),
(r"[A-A]"),
(r"[A-Za-z]"),
(r"[A-z]"),
(r"[\ -\~]"),
(r"[\0x20-0]"),
(r"[\0x21-\0x7E]"),
(r"[\0xa1-\0xfe]"),
(r"[\040-0]"),
(r"[A-Za-z0-9]"),
(r"[A-Za-z0-9_]"),
(r"[A-Za-z0-9_$]"),
(r"[A-Za-z0-9_$\-]"),
(r"[^0-9\\]"),
(r"[a-zA-Z]"),
(r"[/\^~]"),
(r"[=\+\-!]"),
(r"[A-]"),
(r"[-A]"),
(r"[\x21]"),
(r"[а-яА-ЯёЁA-Z$_\041α-ω]"),
)
expectedResults = (
"ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"A",
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz",
"ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz",
" !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~",
" !\"#$%&'()*+,-./0",
"!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~",
"¡¢£¤¥¦§¨©ª«¬®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖרÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþ",
" !\"#$%&'()*+,-./0",
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789",
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_",
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_$",
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_$-",
"0123456789\\",
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ",
"/^~",
"=+-!",
"A-",
"-A",
"!",
"абвгдежзийклмнопрстуфхцчшщъыьэюяАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯёЁABCDEFGHIJKLMNOPQRSTUVWXYZ$_!αβγδεζηθικλμνξοπρςστυφχψω",
)
for test in zip(testCases, expectedResults):
t, exp = test
res = pp.srange(t)
# print(t, "->", res)
self.assertEqual(
exp,
res,
"srange error, srange({!r})->'{!r}', expected '{!r}'".format(
t, res, exp
),
)
def testSkipToParserTests(self):
from pyparsing import Literal, SkipTo, cStyleComment
thingToFind = Literal("working")
testExpr = (
SkipTo(Literal(";"), include=True, ignore=cStyleComment) + thingToFind
)
def test_parse(someText):
print(testExpr.parseString(someText))
# This first test works, as the SkipTo expression is immediately following the ignore expression (cStyleComment)
test_parse("some text /* comment with ; in */; working")
# This second test previously failed, as there is text following the ignore expression, and before the SkipTo expression.
test_parse("some text /* comment with ; in */some other stuff; working")
# tests for optional failOn argument
testExpr = (
SkipTo(Literal(";"), include=True, ignore=cStyleComment, failOn="other")
+ thingToFind
)
test_parse("some text /* comment with ; in */; working")
with self.assertRaisesParseException():
test_parse("some text /* comment with ; in */some other stuff; working")
# test that we correctly create named results
text = "prefixDATAsuffix"
data = Literal("DATA")
suffix = Literal("suffix")
expr = SkipTo(data + suffix)("prefix") + data + suffix
result = expr.parseString(text)
self.assertTrue(
isinstance(result.prefix, str),
"SkipTo created with wrong saveAsList attribute",
)
from pyparsing import Literal, And, Word, alphas, nums
alpha_word = (~Literal("end") + Word(alphas, asKeyword=True)).setName("alpha")
num_word = Word(nums, asKeyword=True).setName("int")
def test(expr, test_string, expected_list, expected_dict):
if (expected_list, expected_dict) == (None, None):
with self.assertRaises(
Exception, msg="{} failed to parse {!r}".format(expr, test_string)
):
expr.parseString(test_string)
else:
result = expr.parseString(test_string)
self.assertParseResultsEquals(
result, expected_list=expected_list, expected_dict=expected_dict
)
# ellipses for SkipTo
e = ... + Literal("end")
test(e, "start 123 end", ["start 123 ", "end"], {"_skipped": ["start 123 "]})
e = Literal("start") + ... + Literal("end")
test(e, "start 123 end", ["start", "123 ", "end"], {"_skipped": ["123 "]})
e = Literal("start") + ...
test(e, "start 123 end", None, None)
e = And(["start", ..., "end"])
test(e, "start 123 end", ["start", "123 ", "end"], {"_skipped": ["123 "]})
e = And([..., "end"])
test(e, "start 123 end", ["start 123 ", "end"], {"_skipped": ["start 123 "]})
e = "start" + (num_word | ...) + "end"
test(e, "start 456 end", ["start", "456", "end"], {})
test(
e,
"start 123 456 end",
["start", "123", "456 ", "end"],
{"_skipped": ["456 "]},
)
test(e, "start end", ["start", "", "end"], {"_skipped": ["missing <int>"]})
# e = define_expr('"start" + (num_word | ...)("inner") + "end"')
# test(e, "start 456 end", ['start', '456', 'end'], {'inner': '456'})
e = "start" + (alpha_word[...] & num_word[...] | ...) + "end"
test(e, "start 456 red end", ["start", "456", "red", "end"], {})
test(e, "start red 456 end", ["start", "red", "456", "end"], {})
test(
e,
"start 456 red + end",
["start", "456", "red", "+ ", "end"],
{"_skipped": ["+ "]},
)
test(e, "start red end", ["start", "red", "end"], {})
test(e, "start 456 end", ["start", "456", "end"], {})
test(e, "start end", ["start", "end"], {})
test(e, "start 456 + end", ["start", "456", "+ ", "end"], {"_skipped": ["+ "]})
e = "start" + (alpha_word[1, ...] & num_word[1, ...] | ...) + "end"
test(e, "start 456 red end", ["start", "456", "red", "end"], {})
test(e, "start red 456 end", ["start", "red", "456", "end"], {})
test(
e,
"start 456 red + end",
["start", "456", "red", "+ ", "end"],
{"_skipped": ["+ "]},
)
test(e, "start red end", ["start", "red ", "end"], {"_skipped": ["red "]})
test(e, "start 456 end", ["start", "456 ", "end"], {"_skipped": ["456 "]})
test(
e,
"start end",
["start", "", "end"],
{"_skipped": ["missing <{{alpha}... & {int}...}>"]},
)
test(e, "start 456 + end", ["start", "456 + ", "end"], {"_skipped": ["456 + "]})
e = "start" + (alpha_word | ...) + (num_word | ...) + "end"
test(e, "start red 456 end", ["start", "red", "456", "end"], {})
test(
e,
"start red end",
["start", "red", "", "end"],
{"_skipped": ["missing <int>"]},
)
test(
e,
"start end",
["start", "", "", "end"],
{"_skipped": ["missing <alpha>", "missing <int>"]},
)
e = Literal("start") + ... + "+" + ... + "end"
test(
e,
"start red + 456 end",
["start", "red ", "+", "456 ", "end"],
{"_skipped": ["red ", "456 "]},
)
def testEllipsisRepetion(self):
import re
word = pp.Word(pp.alphas).setName("word")
num = pp.Word(pp.nums).setName("num")
exprs = [
word[...] + num,
word[0, ...] + num,
word[1, ...] + num,
word[2, ...] + num,
word[..., 3] + num,
word[2] + num,
]
expected_res = [
r"([abcd]+ )*\d+",
r"([abcd]+ )*\d+",
r"([abcd]+ )+\d+",
r"([abcd]+ ){2,}\d+",
r"([abcd]+ ){0,3}\d+",
r"([abcd]+ ){2}\d+",
]
tests = ["aa bb cc dd 123", "bb cc dd 123", "cc dd 123", "dd 123", "123"]
all_success = True
for expr, expected_re in zip(exprs, expected_res):
successful_tests = [t for t in tests if re.match(expected_re, t)]
failure_tests = [t for t in tests if not re.match(expected_re, t)]
success1, _ = expr.runTests(successful_tests)
success2, _ = expr.runTests(failure_tests, failureTests=True)
all_success = all_success and success1 and success2
if not all_success:
print("Failed expression:", expr)
break
self.assertTrue(all_success, "failed getItem_ellipsis test")
def testEllipsisRepetionWithResultsNames(self):
label = pp.Word(pp.alphas)
val = ppc.integer()
parser = label("label") + pp.ZeroOrMore(val)("values")
_, results = parser.runTests(
"""
a 1
b 1 2 3
c
"""
)
expected = [
(["a", 1], {"label": "a", "values": [1]}),
(["b", 1, 2, 3], {"label": "b", "values": [1, 2, 3]}),
(["c"], {"label": "c", "values": []}),
]
for obs, exp in zip(results, expected):
test, result = obs
exp_list, exp_dict = exp
self.assertParseResultsEquals(
result, expected_list=exp_list, expected_dict=exp_dict
)
parser = label("label") + val[...]("values")
_, results = parser.runTests(
"""
a 1
b 1 2 3
c
"""
)
expected = [
(["a", 1], {"label": "a", "values": [1]}),
(["b", 1, 2, 3], {"label": "b", "values": [1, 2, 3]}),
(["c"], {"label": "c", "values": []}),
]
for obs, exp in zip(results, expected):
test, result = obs
exp_list, exp_dict = exp
self.assertParseResultsEquals(
result, expected_list=exp_list, expected_dict=exp_dict
)
pt = pp.Group(val("x") + pp.Suppress(",") + val("y"))
parser = label("label") + pt[...]("points")
_, results = parser.runTests(
"""
a 1,1
b 1,1 2,2 3,3
c
"""
)
expected = [
(["a", [1, 1]], {"label": "a", "points": [{"x": 1, "y": 1}]}),
(
["b", [1, 1], [2, 2], [3, 3]],
{
"label": "b",
"points": [{"x": 1, "y": 1}, {"x": 2, "y": 2}, {"x": 3, "y": 3}],
},
),
(["c"], {"label": "c", "points": []}),
]
for obs, exp in zip(results, expected):
test, result = obs
exp_list, exp_dict = exp
self.assertParseResultsEquals(
result, expected_list=exp_list, expected_dict=exp_dict
)
def testCustomQuotes(self):
from pyparsing import QuotedString
testString = r"""
sdlfjs :sdf\:jls::djf: sl:kfsjf
sdlfjs -sdf\:jls::--djf: sl-kfsjf
sdlfjs -sdf\:::jls::--djf: sl:::-kfsjf
sdlfjs ^sdf\:jls^^--djf^ sl-kfsjf
sdlfjs ^^^==sdf\:j=lz::--djf: sl=^^=kfsjf
sdlfjs ==sdf\:j=ls::--djf: sl==kfsjf^^^
"""
colonQuotes = QuotedString(":", "\\", "::")
dashQuotes = QuotedString("-", "\\", "--")
hatQuotes = QuotedString("^", "\\")
hatQuotes1 = QuotedString("^", "\\", "^^")
dblEqQuotes = QuotedString("==", "\\")
def test(quoteExpr, expected):
print(quoteExpr.pattern)
print(quoteExpr.searchString(testString))
print(quoteExpr.searchString(testString)[0][0])
print(expected)
self.assertEqual(
expected,
quoteExpr.searchString(testString)[0][0],
"failed to match {}, expected '{}', got '{}'".format(
quoteExpr, expected, quoteExpr.searchString(testString)[0]
),
)
print()
test(colonQuotes, r"sdf:jls:djf")
test(dashQuotes, r"sdf:jls::-djf: sl")
test(hatQuotes, r"sdf:jls")
test(hatQuotes1, r"sdf:jls^--djf")
test(dblEqQuotes, r"sdf:j=ls::--djf: sl")
test(QuotedString(":::"), "jls::--djf: sl")
test(QuotedString("==", endQuoteChar="--"), r"sdf\:j=lz::")
test(
QuotedString("^^^", multiline=True),
r"""==sdf\:j=lz::--djf: sl=^^=kfsjf
sdlfjs ==sdf\:j=ls::--djf: sl==kfsjf""",
)
with self.assertRaises(SyntaxError):
QuotedString("", "\\")
def testRepeater(self):
from pyparsing import (
matchPreviousLiteral,
matchPreviousExpr,
Word,
nums,
ParserElement,
)
if ParserElement._packratEnabled:
print("skipping this test, not compatible with packratting")
return
first = Word("abcdef").setName("word1")
bridge = Word(nums).setName("number")
second = matchPreviousLiteral(first).setName("repeat(word1Literal)")
seq = first + bridge + second
tests = [
("abc12abc", True),
("abc12aabc", False),
("abc12cba", True),
("abc12bca", True),
]
for tst, expected in tests:
found = False
for tokens, start, end in seq.scanString(tst):
f, b, s = tokens
print(f, b, s)
found = True
if not found:
print("No literal match in", tst)
self.assertEqual(
expected,
found,
"Failed repeater for test: {}, matching {}".format(tst, str(seq)),
)
print()
# retest using matchPreviousExpr instead of matchPreviousLiteral
second = matchPreviousExpr(first).setName("repeat(word1expr)")
seq = first + bridge + second
tests = [("abc12abc", True), ("abc12cba", False), ("abc12abcdef", False)]
for tst, expected in tests:
found = False
for tokens, start, end in seq.scanString(tst):
print(tokens)
found = True
if not found:
print("No expression match in", tst)
self.assertEqual(
expected,
found,
"Failed repeater for test: {}, matching {}".format(tst, str(seq)),
)
print()
first = Word("abcdef").setName("word1")
bridge = Word(nums).setName("number")
second = matchPreviousExpr(first).setName("repeat(word1)")
seq = first + bridge + second
csFirst = seq.setName("word-num-word")
csSecond = matchPreviousExpr(csFirst)
compoundSeq = csFirst + ":" + csSecond
compoundSeq.streamline()
print(compoundSeq)
tests = [
("abc12abc:abc12abc", True),
("abc12cba:abc12abc", False),
("abc12abc:abc12abcdef", False),
]
for tst, expected in tests:
found = False
for tokens, start, end in compoundSeq.scanString(tst):
print("match:", tokens)
found = True
break
if not found:
print("No expression match in", tst)
self.assertEqual(
expected,
found,
"Failed repeater for test: {}, matching {}".format(tst, str(seq)),
)
print()
eFirst = Word(nums)
eSecond = matchPreviousExpr(eFirst)
eSeq = eFirst + ":" + eSecond
tests = [("1:1A", True), ("1:10", False)]
for tst, expected in tests:
found = False
for tokens, start, end in eSeq.scanString(tst):
print(tokens)
found = True
if not found:
print("No match in", tst)
self.assertEqual(
expected,
found,
"Failed repeater for test: {}, matching {}".format(tst, str(seq)),
)
def testRepeater2(self):
"""test matchPreviousLiteral with empty repeater"""
if ParserElement._packratEnabled:
print("skipping this test, not compatible with packratting")
return
first = pp.Optional(pp.Word("abcdef").setName("words1"))
bridge = pp.Word(pp.nums).setName("number")
second = pp.matchPreviousLiteral(first).setName("repeat(word1Literal)")
seq = first + bridge + second
tst = "12"
expected = ["12"]
result = seq.parseString(tst)
print(result.dump())
self.assertParseResultsEquals(result, expected_list=expected)
def testRepeater3(self):
"""test matchPreviousLiteral with multiple repeater tokens"""
if ParserElement._packratEnabled:
print("skipping this test, not compatible with packratting")
return
first = pp.Word("a") + pp.Word("d")
bridge = pp.Word(pp.nums).setName("number")
second = pp.matchPreviousLiteral(first) # ("second")
seq = first + bridge + second
tst = "aaaddd12aaaddd"
expected = ["aaa", "ddd", "12", "aaa", "ddd"]
result = seq.parseString(tst)
print(result.dump())
self.assertParseResultsEquals(result, expected_list=expected)
def testRepeater4(self):
"""test matchPreviousExpr with multiple repeater tokens"""
if ParserElement._packratEnabled:
print("skipping this test, not compatible with packratting")
return
first = pp.Group(pp.Word(pp.alphas) + pp.Word(pp.alphas))
bridge = pp.Word(pp.nums)
# no matching is used - this is just here for a sanity check
# second = pp.Group(pp.Word(pp.alphas) + pp.Word(pp.alphas))("second")
# second = pp.Group(pp.Word(pp.alphas) + pp.Word(pp.alphas)).setResultsName("second")
# ISSUE: when matchPreviousExpr returns multiple tokens the matching tokens are nested an extra level deep.
# This behavior is not seen with a single return token (see testRepeater5 directly below.)
second = pp.matchPreviousExpr(first)
expr = first + bridge.suppress() + second
tst = "aaa ddd 12 aaa ddd"
expected = [["aaa", "ddd"], ["aaa", "ddd"]]
result = expr.parseString(tst)
print(result.dump())
self.assertParseResultsEquals(result, expected_list=expected)
def testRepeater5(self):
"""a simplified testRepeater4 to examine matchPreviousExpr with a single repeater token"""
if ParserElement._packratEnabled:
print("skipping this test, not compatible with packratting")
return
first = pp.Word(pp.alphas)
bridge = pp.Word(pp.nums)
second = pp.matchPreviousExpr(first)
expr = first + bridge.suppress() + second
tst = "aaa 12 aaa"
expected = tst.replace("12", "").split()
result = expr.parseString(tst)
print(result.dump())
self.assertParseResultsEquals(result, expected_list=expected)
def testRecursiveCombine(self):
from pyparsing import Forward, Word, alphas, nums, Optional, Combine
testInput = "myc(114)r(11)dd"
stream = Forward()
stream <<= Optional(Word(alphas)) + Optional("(" + Word(nums) + ")" + stream)
expected = ["".join(stream.parseString(testInput))]
print(expected)
stream = Forward()
stream << Combine(
Optional(Word(alphas)) + Optional("(" + Word(nums) + ")" + stream)
)
testVal = stream.parseString(testInput)
print(testVal)
self.assertParseResultsEquals(testVal, expected_list=expected)
def testInfixNotationBasicArithEval(self):
from pyparsing import Word, nums, alphas, Literal, oneOf, infixNotation, opAssoc
import ast
integer = Word(nums).setParseAction(lambda t: int(t[0]))
variable = Word(alphas, exact=1)
operand = integer | variable
expop = Literal("^")
signop = oneOf("+ -")
multop = oneOf("* /")
plusop = oneOf("+ -")
factop = Literal("!")
expr = infixNotation(
operand,
[
(factop, 1, opAssoc.LEFT),
(expop, 2, opAssoc.RIGHT),
(signop, 1, opAssoc.RIGHT),
(multop, 2, opAssoc.LEFT),
(plusop, 2, opAssoc.LEFT),
],
)
test = [
"9 + 2 + 3",
"9 + 2 * 3",
"(9 + 2) * 3",
"(9 + -2) * 3",
"(9 + --2) * 3",
"(9 + -2) * 3^2^2",
"(9! + -2) * 3^2^2",
"M*X + B",
"M*(X + B)",
"1+2*-3^4*5+-+-6",
"3!!",
]
expected = """[[9, '+', 2, '+', 3]]
[[9, '+', [2, '*', 3]]]
[[[9, '+', 2], '*', 3]]
[[[9, '+', ['-', 2]], '*', 3]]
[[[9, '+', ['-', ['-', 2]]], '*', 3]]
[[[9, '+', ['-', 2]], '*', [3, '^', [2, '^', 2]]]]
[[[[9, '!'], '+', ['-', 2]], '*', [3, '^', [2, '^', 2]]]]
[[['M', '*', 'X'], '+', 'B']]
[['M', '*', ['X', '+', 'B']]]
[[1, '+', [2, '*', ['-', [3, '^', 4]], '*', 5], '+', ['-', ['+', ['-', 6]]]]]
[[3, '!', '!']]""".split(
"\n"
)
expected = [ast.literal_eval(x.strip()) for x in expected]
for test_str, exp_list in zip(test, expected):
self.assertParseAndCheckList(expr, test_str, exp_list, verbose=True)
def testInfixNotationEvalBoolExprUsingAstClasses(self):
from pyparsing import infixNotation, Word, alphas, oneOf, opAssoc
boolVars = {"True": True, "False": False}
class BoolOperand:
reprsymbol = ""
def __init__(self, t):
self.args = t[0][0::2]
def __str__(self):
sep = " %s " % self.reprsymbol
return "(" + sep.join(map(str, self.args)) + ")"
class BoolAnd(BoolOperand):
reprsymbol = "&"
def __bool__(self):
for a in self.args:
if isinstance(a, str):
v = boolVars[a]
else:
v = bool(a)
if not v:
return False
return True
class BoolOr(BoolOperand):
reprsymbol = "|"
def __bool__(self):
for a in self.args:
if isinstance(a, str):
v = boolVars[a]
else:
v = bool(a)
if v:
return True
return False
class BoolNot(BoolOperand):
def __init__(self, t):
self.arg = t[0][1]
def __str__(self):
return "~" + str(self.arg)
def __bool__(self):
if isinstance(self.arg, str):
v = boolVars[self.arg]
else:
v = bool(self.arg)
return not v
boolOperand = Word(alphas, max=1) | oneOf("True False")
boolExpr = infixNotation(
boolOperand,
[
("not", 1, opAssoc.RIGHT, BoolNot),
("and", 2, opAssoc.LEFT, BoolAnd),
("or", 2, opAssoc.LEFT, BoolOr),
],
)
test = [
"p and not q",
"not not p",
"not(p and q)",
"q or not p and r",
"q or not p or not r",
"q or not (p and r)",
"p or q or r",
"p or q or r and False",
"(p or q or r) and False",
]
boolVars["p"] = True
boolVars["q"] = False
boolVars["r"] = True
print("p =", boolVars["p"])
print("q =", boolVars["q"])
print("r =", boolVars["r"])
print()
for t in test:
res = boolExpr.parseString(t)
print(t, "\n", res[0], "=", bool(res[0]), "\n")
expected = eval(t, {}, boolVars)
self.assertEqual(
expected, bool(res[0]), "failed boolean eval test {}".format(t)
)
def testInfixNotationMinimalParseActionCalls(self):
from pyparsing import infixNotation, Word, alphas, oneOf, opAssoc, nums, Literal
global count
count = 0
def evaluate_int(t):
global count
value = int(t[0])
print("evaluate_int", value)
count += 1
return value
integer = Word(nums).setParseAction(evaluate_int)
variable = Word(alphas, exact=1)
operand = integer | variable
expop = Literal("^")
signop = oneOf("+ -")
multop = oneOf("* /")
plusop = oneOf("+ -")
factop = Literal("!")
expr = infixNotation(
operand,
[
("!", 1, opAssoc.LEFT),
("^", 2, opAssoc.LEFT),
(signop, 1, opAssoc.RIGHT),
(multop, 2, opAssoc.LEFT),
(plusop, 2, opAssoc.LEFT),
],
)
test = ["9"]
for t in test:
count = 0
print("%r => %s (count=%d)" % (t, expr.parseString(t), count))
self.assertEqual(1, count, "count evaluated too many times!")
def testInfixNotationWithParseActions(self):
word = pp.Word(pp.alphas)
def supLiteral(s):
"""Returns the suppressed literal s"""
return pp.Literal(s).suppress()
def booleanExpr(atom):
ops = [
(supLiteral("!"), 1, pp.opAssoc.RIGHT, lambda s, l, t: ["!", t[0][0]]),
(pp.oneOf("= !="), 2, pp.opAssoc.LEFT),
(supLiteral("&"), 2, pp.opAssoc.LEFT, lambda s, l, t: ["&", t[0]]),
(supLiteral("|"), 2, pp.opAssoc.LEFT, lambda s, l, t: ["|", t[0]]),
]
return pp.infixNotation(atom, ops)
f = booleanExpr(word) + pp.StringEnd()
tests = [
("bar = foo", [["bar", "=", "foo"]]),
(
"bar = foo & baz = fee",
["&", [["bar", "=", "foo"], ["baz", "=", "fee"]]],
),
]
for test, expected in tests:
print(test)
results = f.parseString(test)
print(results)
self.assertParseResultsEquals(results, expected_list=expected)
print()
def testInfixNotationGrammarTest5(self):
expop = pp.Literal("**")
signop = pp.oneOf("+ -")
multop = pp.oneOf("* /")
plusop = pp.oneOf("+ -")
class ExprNode:
def __init__(self, tokens):
self.tokens = tokens[0]
def eval(self):
return None
class NumberNode(ExprNode):
def eval(self):
return self.tokens
class SignOp(ExprNode):
def eval(self):
mult = {"+": 1, "-": -1}[self.tokens[0]]
return mult * self.tokens[1].eval()
class BinOp(ExprNode):
def eval(self):
ret = self.tokens[0].eval()
for op, operand in zip(self.tokens[1::2], self.tokens[2::2]):
ret = self.opn_map[op](ret, operand.eval())
return ret
class ExpOp(BinOp):
opn_map = {"**": lambda a, b: b ** a}
class MultOp(BinOp):
import operator
opn_map = {"*": operator.mul, "/": operator.truediv}
class AddOp(BinOp):
import operator
opn_map = {"+": operator.add, "-": operator.sub}
operand = ppc.number().setParseAction(NumberNode)
expr = pp.infixNotation(
operand,
[
(expop, 2, pp.opAssoc.LEFT, (lambda pr: [pr[0][::-1]], ExpOp)),
(signop, 1, pp.opAssoc.RIGHT, SignOp),
(multop, 2, pp.opAssoc.LEFT, MultOp),
(plusop, 2, pp.opAssoc.LEFT, AddOp),
],
)
tests = """\
2+7
2**3
2**3**2
3**9
3**3**2
"""
for t in tests.splitlines():
t = t.strip()
if not t:
continue
parsed = expr.parseString(t)
eval_value = parsed[0].eval()
self.assertEqual(
eval(t),
eval_value,
"Error evaluating {!r}, expected {!r}, got {!r}".format(
t, eval(t), eval_value
),
)
def testInfixNotationExceptions(self):
num = pp.Word(pp.nums)
# arity 3 with None opExpr - should raise ValueError
with self.assertRaises(ValueError):
expr = pp.infixNotation(num, [(None, 3, pp.opAssoc.LEFT),])
# arity 3 with invalid tuple - should raise ValueError
with self.assertRaises(ValueError):
expr = pp.infixNotation(num, [(("+", "-", "*"), 3, pp.opAssoc.LEFT)])
# left arity > 3 - should raise ValueError
with self.assertRaises(ValueError):
expr = pp.infixNotation(num, [("*", 4, pp.opAssoc.LEFT)])
# right arity > 3 - should raise ValueError
with self.assertRaises(ValueError):
expr = pp.infixNotation(num, [("*", 4, pp.opAssoc.RIGHT)])
# assoc not from opAssoc - should raise ValueError
with self.assertRaises(ValueError):
expr = pp.infixNotation(num, [("*", 2, "LEFT")])
def testInfixNotationWithNonOperators(self):
# left arity 2 with None expr
# right arity 2 with None expr
num = pp.Word(pp.nums).addParseAction(pp.tokenMap(int))
ident = ppc.identifier()
for assoc in (pp.opAssoc.LEFT, pp.opAssoc.RIGHT):
expr = pp.infixNotation(
num | ident, [(None, 2, assoc), ("+", 2, pp.opAssoc.LEFT)]
)
self.assertParseAndCheckList(expr, "3x+2", [[[3, "x"], "+", 2]])
def testInfixNotationTernaryOperator(self):
# left arity 3
# right arity 3
num = pp.Word(pp.nums).addParseAction(pp.tokenMap(int))
for assoc in (pp.opAssoc.LEFT, pp.opAssoc.RIGHT):
expr = pp.infixNotation(
num, [("+", 2, pp.opAssoc.LEFT), (("?", ":"), 3, assoc),]
)
self.assertParseAndCheckList(
expr, "3 + 2? 12: 13", [[[3, "+", 2], "?", 12, ":", 13]]
)
def testParseResultsPickle(self):
import pickle
# test 1
body = pp.makeHTMLTags("BODY")[0]
result = body.parseString("<BODY BGCOLOR='#00FFBB' FGCOLOR=black>")
print(result.dump())
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
print("Test pickle dump protocol", protocol)
try:
pickleString = pickle.dumps(result, protocol)
except Exception as e:
print("dumps exception:", e)
newresult = pp.ParseResults()
else:
newresult = pickle.loads(pickleString)
print(newresult.dump())
self.assertEqual(
result.dump(),
newresult.dump(),
"Error pickling ParseResults object (protocol=%d)" % protocol,
)
def testParseResultsPickle2(self):
import pickle
word = pp.Word(pp.alphas + "'.")
salutation = pp.OneOrMore(word)
comma = pp.Literal(",")
greetee = pp.OneOrMore(word)
endpunc = pp.oneOf("! ?")
greeting = (
salutation("greeting")
+ pp.Suppress(comma)
+ greetee("greetee")
+ endpunc("punc*")[1, ...]
)
string = "Good morning, <NAME>!"
result = greeting.parseString(string)
self.assertParseResultsEquals(
result,
["Good", "morning", "Miss", "Crabtree", "!"],
{
"greeting": ["Good", "morning"],
"greetee": ["Miss", "Crabtree"],
"punc": ["!"],
},
)
print(result.dump())
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
print("Test pickle dump protocol", protocol)
try:
pickleString = pickle.dumps(result, protocol)
except Exception as e:
print("dumps exception:", e)
newresult = pp.ParseResults()
else:
newresult = pickle.loads(pickleString)
print(newresult.dump())
self.assertEqual(
newresult.dump(),
result.dump(),
"failed to pickle/unpickle ParseResults: expected {!r}, got {!r}".format(
result, newresult
),
)
def testParseResultsPickle3(self):
import pickle
# result with aslist=False
res_not_as_list = pp.Word("ABC").parseString("BABBAB")
# result with aslist=True
res_as_list = pp.Group(pp.Word("ABC")).parseString("BABBAB")
# result with modal=True
res_modal = pp.Word("ABC")("name").parseString("BABBAB")
# self.assertTrue(res_modal._modal)
# result with modal=False
res_not_modal = pp.Word("ABC")("name*").parseString("BABBAB")
# self.assertFalse(res_not_modal._modal)
for result in (res_as_list, res_not_as_list, res_modal, res_not_modal):
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
print("Test pickle dump protocol", protocol)
try:
pickleString = pickle.dumps(result, protocol)
except Exception as e:
print("dumps exception:", e)
newresult = pp.ParseResults()
else:
newresult = pickle.loads(pickleString)
print(newresult.dump())
self.assertEqual(
newresult.dump(),
result.dump(),
"failed to pickle/unpickle ParseResults: expected {!r}, got {!r}".format(
result, newresult
),
)
def testParseResultsInsertWithResultsNames(self):
test_string = "1 2 3 dice rolled first try"
wd = pp.Word(pp.alphas)
num = ppc.number
expr = (
pp.Group(num[1, ...])("nums")
+ wd("label")
+ pp.Group(wd[...])("additional")
)
result = expr.parseString(test_string)
print("Pre-insert")
print(result.dump())
result.insert(1, sum(result.nums))
print("\nPost-insert")
print(result.dump())
self.assertParseResultsEquals(
result,
expected_list=[[1, 2, 3], 6, "dice", ["rolled", "first", "try"]],
expected_dict={
"additional": ["rolled", "first", "try"],
"label": "dice",
"nums": [1, 2, 3],
},
)
def testParseResultsStringListUsingCombine(self):
test_string = "1 2 3 dice rolled first try"
wd = pp.Word(pp.alphas)
num = ppc.number
expr = pp.Combine(
pp.Group(num[1, ...])("nums")
+ wd("label")
+ pp.Group(wd[...])("additional"),
joinString="/",
adjacent=False,
)
self.assertEqual("123/dice/rolledfirsttry", expr.parseString(test_string)[0])
def testMatchOnlyAtCol(self):
"""successfully use matchOnlyAtCol helper function"""
expr = pp.Word(pp.nums)
expr.setParseAction(pp.matchOnlyAtCol(5))
largerExpr = pp.ZeroOrMore(pp.Word("A")) + expr + pp.ZeroOrMore(pp.Word("A"))
res = largerExpr.parseString("A A 3 A")
print(res.dump())
def testMatchOnlyAtColErr(self):
"""raise a ParseException in matchOnlyAtCol with incorrect col"""
expr = pp.Word(pp.nums)
expr.setParseAction(pp.matchOnlyAtCol(1))
largerExpr = pp.ZeroOrMore(pp.Word("A")) + expr + pp.ZeroOrMore(pp.Word("A"))
with self.assertRaisesParseException():
largerExpr.parseString("A A 3 A")
def testParseResultsWithNamedTuple(self):
from pyparsing import Literal, replaceWith
expr = Literal("A")("Achar")
expr.setParseAction(replaceWith(tuple(["A", "Z"])))
res = expr.parseString("A")
print(repr(res))
print(res.Achar)
self.assertParseResultsEquals(
res,
expected_dict={"Achar": ("A", "Z")},
msg="Failed accessing named results containing a tuple, "
"got {!r}".format(res.Achar),
)
def testParserElementAddOperatorWithOtherTypes(self):
"""test the overridden "+" operator with other data types"""
# ParserElement + str
expr = pp.Word(pp.alphas)("first") + pp.Word(pp.alphas)("second") + "suf"
result = expr.parseString("spam eggs suf")
print(result)
expected_l = ["spam", "eggs", "suf"]
self.assertParseResultsEquals(
result, expected_l, msg="issue with ParserElement + str",
)
# str + ParserElement
expr = "pre" + pp.Word(pp.alphas)("first") + pp.Word(pp.alphas)("second")
result = expr.parseString("pre spam eggs")
print(result)
expected_l = ["pre", "spam", "eggs"]
self.assertParseResultsEquals(
result, expected_l, msg="issue with str + ParserElement",
)
# ParserElement + int
with self.assertWarns(SyntaxWarning, msg="failed to warn ParserElement + int"):
expr = pp.Word(pp.alphas)("first") + pp.Word(pp.alphas)("second") + 12
self.assertEqual(expr, None)
# int + ParserElement
with self.assertWarns(SyntaxWarning, msg="failed to warn int + ParserElement"):
expr = 12 + pp.Word(pp.alphas)("first") + pp.Word(pp.alphas)("second")
self.assertEqual(expr, None)
def testParserElementSubOperatorWithOtherTypes(self):
"""test the overridden "-" operator with other data types"""
# ParserElement - str
expr = pp.Word(pp.alphas)("first") + pp.Word(pp.alphas)("second") - "suf"
result = expr.parseString("spam eggs suf")
print(result)
expected = ["spam", "eggs", "suf"]
self.assertParseResultsEquals(
result, expected, msg="issue with ParserElement - str"
)
# str - ParserElement
expr = "pre" - pp.Word(pp.alphas)("first") + pp.Word(pp.alphas)("second")
result = expr.parseString("pre spam eggs")
print(result)
expected = ["pre", "spam", "eggs"]
self.assertParseResultsEquals(
result, expected, msg="issue with str - ParserElement"
)
# ParserElement - int
with self.assertWarns(SyntaxWarning, msg="failed to warn ParserElement - int"):
expr = pp.Word(pp.alphas)("first") + pp.Word(pp.alphas)("second") - 12
self.assertEqual(expr, None)
# int - ParserElement
with self.assertWarns(SyntaxWarning, msg="failed to warn int - ParserElement"):
expr = 12 - pp.Word(pp.alphas)("first") + pp.Word(pp.alphas)("second")
self.assertEqual(expr, None)
def testParserElementMulOperatorWithTuples(self):
"""test ParserElement "*" with various tuples"""
# ParserElement * (0, 0)
with self.assertRaises(
ValueError, msg="ParserElement * (0,0) should raise error"
):
expr = pp.Word(pp.alphas)("first") + pp.Word(pp.nums)("second*") * (0, 0)
# ParserElement * (None, n)
expr = pp.Word(pp.alphas)("first") + pp.Word(pp.nums)("second*") * (None, 3)
results1 = expr.parseString("spam")
print(results1.dump())
expected = ["spam"]
self.assertParseResultsEquals(
results1, expected, msg="issue with ParserElement * w/ optional matches"
)
results2 = expr.parseString("spam 12 23 34")
print(results2.dump())
expected = ["spam", "12", "23", "34"]
self.assertParseResultsEquals(
results2, expected, msg="issue with ParserElement * w/ optional matches"
)
# ParserElement * (1, 1)
expr = pp.Word(pp.alphas)("first") + pp.Word(pp.nums)("second*") * (1, 1)
results = expr.parseString("spam 45")
print(results.dump())
expected = ["spam", "45"]
self.assertParseResultsEquals(
results, expected, msg="issue with ParserElement * (1, 1)"
)
# ParserElement * (1, 1+n)
expr = pp.Word(pp.alphas)("first") + pp.Word(pp.nums)("second*") * (1, 3)
results1 = expr.parseString("spam 100")
print(results1.dump())
expected = ["spam", "100"]
self.assertParseResultsEquals(
results1, expected, msg="issue with ParserElement * (1, 1+n)"
)
results2 = expr.parseString("spam 100 200 300")
print(results2.dump())
expected = ["spam", "100", "200", "300"]
self.assertParseResultsEquals(
results2, expected, msg="issue with ParserElement * (1, 1+n)"
)
# ParserElement * (lesser, greater)
expr = pp.Word(pp.alphas)("first") + pp.Word(pp.nums)("second*") * (2, 3)
results1 = expr.parseString("spam 1 2")
print(results1.dump())
expected = ["spam", "1", "2"]
self.assertParseResultsEquals(
results1, expected, msg="issue with ParserElement * (lesser, greater)"
)
results2 = expr.parseString("spam 1 2 3")
print(results2.dump())
expected = ["spam", "1", "2", "3"]
self.assertParseResultsEquals(
results2, expected, msg="issue with ParserElement * (lesser, greater)"
)
# ParserElement * (greater, lesser)
with self.assertRaises(
ValueError, msg="ParserElement * (greater, lesser) should raise error"
):
expr = pp.Word(pp.alphas)("first") + pp.Word(pp.nums)("second") * (3, 2)
# ParserElement * (str, str)
with self.assertRaises(
TypeError, msg="ParserElement * (str, str) should raise error"
):
expr = pp.Word(pp.alphas)("first") + pp.Word(pp.nums)("second") * ("2", "3")
def testParserElementMulOperatorWithOtherTypes(self):
"""test the overridden "*" operator with other data types"""
# ParserElement * str
with self.assertRaises(TypeError, msg="ParserElement * str should raise error"):
expr = pp.Word(pp.alphas)("first") + pp.Word(pp.nums)("second") * "3"
# str * ParserElement
with self.assertRaises(TypeError, msg="str * ParserElement should raise error"):
expr = pp.Word(pp.alphas)("first") + "3" * pp.Word(pp.nums)("second")
# ParserElement * int
expr = pp.Word(pp.alphas)("first") + pp.Word(pp.nums)("second*") * 2
results = expr.parseString("spam 11 22")
print(results.dump())
expected = ["spam", "11", "22"]
self.assertParseResultsEquals(
results, expected, msg="issue with ParserElement * int"
)
# int * ParserElement
expr = pp.Word(pp.alphas)("first") + 2 * pp.Word(pp.nums)("second*")
results = expr.parseString("spam 111 222")
print(results.dump())
expected = ["spam", "111", "222"]
self.assertParseResultsEquals(
results, expected, msg="issue with int * ParserElement"
)
def testParserElementMatchFirstOperatorWithOtherTypes(self):
"""test the overridden "|" operator with other data types"""
# ParserElement | int
with self.assertWarns(SyntaxWarning, msg="failed to warn ParserElement | int"):
expr = pp.Word(pp.alphas)("first") + (pp.Word(pp.alphas)("second") | 12)
self.assertEqual(expr, None)
# int | ParserElement
with self.assertWarns(SyntaxWarning, msg="failed to warn int | ParserElement"):
expr = pp.Word(pp.alphas)("first") + (12 | pp.Word(pp.alphas)("second"))
self.assertEqual(expr, None)
def testParserElementMatchLongestWithOtherTypes(self):
"""test the overridden "^" operator with other data types"""
# ParserElement ^ str
expr = pp.Word(pp.alphas)("first") + (pp.Word(pp.nums)("second") ^ "eggs")
result = expr.parseString("spam eggs")
print(result)
expected = ["spam", "eggs"]
self.assertParseResultsEquals(
result, expected, msg="issue with ParserElement ^ str"
)
# str ^ ParserElement
expr = ("pre" ^ pp.Word("pr")("first")) + pp.Word(pp.alphas)("second")
result = expr.parseString("pre eggs")
print(result)
expected = ["pre", "eggs"]
self.assertParseResultsEquals(
result, expected, msg="issue with str ^ ParserElement",
)
# ParserElement ^ int
with self.assertWarns(SyntaxWarning, msg="failed to warn ParserElement ^ int"):
expr = pp.Word(pp.alphas)("first") + (pp.Word(pp.alphas)("second") ^ 54)
self.assertEqual(expr, None)
# int ^ ParserElement
with self.assertWarns(SyntaxWarning, msg="failed to warn int ^ ParserElement"):
expr = pp.Word(pp.alphas)("first") + (65 ^ pp.Word(pp.alphas)("second"))
self.assertEqual(expr, None)
def testParserElementEachOperatorWithOtherTypes(self):
"""test the overridden "&" operator with other data types"""
# ParserElement & str
expr = pp.Word(pp.alphas)("first") + (pp.Word(pp.alphas)("second") & "and")
with self.assertRaisesParseException(msg="issue with ParserElement & str"):
result = expr.parseString("spam and eggs")
# str & ParserElement
expr = pp.Word(pp.alphas)("first") + ("and" & pp.Word(pp.alphas)("second"))
result = expr.parseString("spam and eggs")
print(result.dump())
expected_l = ["spam", "and", "eggs"]
expected_d = {"first": "spam", "second": "eggs"}
self.assertParseResultsEquals(
result,
expected_list=expected_l,
expected_dict=expected_d,
msg="issue with str & ParserElement",
)
# ParserElement & int
with self.assertWarns(SyntaxWarning, msg="failed to warn ParserElement & int"):
expr = pp.Word(pp.alphas)("first") + (pp.Word(pp.alphas) & 78)
self.assertEqual(expr, None)
# int & ParserElement
with self.assertWarns(SyntaxWarning, msg="failed to warn int & ParserElement"):
expr = pp.Word(pp.alphas)("first") + (89 & pp.Word(pp.alphas))
self.assertEqual(expr, None)
def testParserElementPassedThreeArgsToMultiplierShorthand(self):
"""test the ParserElement form expr[m,n,o]"""
with self.assertWarns(
UserWarning, msg="failed to warn three index arguments to expr[m, n, o]"
):
expr = pp.Word(pp.alphas)[2, 3, 4]
result = expr.parseString("spam eggs grail")
print(result)
expected = ["spam", "eggs", "grail"]
self.assertParseResultsEquals(result, expected)
result2 = expr.parseString("spam eggs holy grail")
print(result2)
expected2 = ["spam", "eggs", "holy"]
self.assertParseResultsEquals(result2, expected2)
def testParserElementPassedStrToMultiplierShorthand(self):
"""test the ParserElement form expr[str]"""
with self.assertRaises(
TypeError, msg="failed to raise expected error using string multiplier"
):
expr2 = pp.Word(pp.alphas)["2"]
def testParseResultsNewEdgeCases(self):
"""test less common paths of ParseResults.__new__()"""
# create new ParseResults w/ None
result1 = pp.ParseResults(None)
print(result1.dump())
self.assertParseResultsEquals(
result1, [], msg="ParseResults(None) should return empty ParseResults"
)
# create new ParseResults w/ integer name
result2 = pp.ParseResults(name=12)
print(result2.dump())
self.assertEqual(
"12",
result2.getName(),
"ParseResults int name should be accepted and converted to str",
)
# create new ParseResults w/ generator type
gen = (a for a in range(1, 6))
result3 = pp.ParseResults(gen)
print(result3.dump())
expected3 = [1, 2, 3, 4, 5]
self.assertParseResultsEquals(
result3, expected3, msg="issue initializing ParseResults w/ gen type"
)
def testParseResultsReversed(self):
"""test simple case of reversed(ParseResults)"""
tst = "1 2 3 4 5"
expr = pp.OneOrMore(pp.Word(pp.nums))
result = expr.parseString(tst)
reversed_list = [ii for ii in reversed(result)]
print(reversed_list)
expected = ["5", "4", "3", "2", "1"]
self.assertEqual(
expected, reversed_list, msg="issue calling reversed(ParseResults)"
)
def testParseResultsValues(self):
"""test simple case of ParseResults.values()"""
expr = pp.Word(pp.alphas)("first") + pp.Word(pp.alphas)("second")
result = expr.parseString("spam eggs")
values_set = set(result.values())
print(values_set)
expected = {"spam", "eggs"}
self.assertEqual(
expected, values_set, msg="issue calling ParseResults.values()"
)
def testParseResultsAppend(self):
"""test simple case of ParseResults.append()"""
# use a parse action to compute the sum of the parsed integers, and add it to the end
def append_sum(tokens):
tokens.append(sum(map(int, tokens)))
expr = pp.OneOrMore(pp.Word(pp.nums)).addParseAction(append_sum)
result = expr.parseString("0 123 321")
expected = ["0", "123", "321", 444]
print(result.dump())
self.assertParseResultsEquals(
result, expected, msg="issue with ParseResults.append()"
)
def testParseResultsClear(self):
"""test simple case of ParseResults.clear()"""
tst = "spam eggs"
expr = pp.Word(pp.alphas)("first") + pp.Word(pp.alphas)("second")
result = expr.parseString(tst)
print(result.dump())
self.assertParseResultsEquals(
result, ["spam", "eggs"], msg="issue with ParseResults before clear()"
)
result.clear()
print(result.dump())
self.assertParseResultsEquals(
result,
expected_list=[],
expected_dict={},
msg="issue with ParseResults.clear()",
)
def testParseResultsExtendWithString(self):
"""test ParseResults.extend() with input of type str"""
# use a parse action to append the reverse of the matched strings to make a palindrome
def make_palindrome(tokens):
tokens.extend(reversed([t[::-1] for t in tokens]))
tst = "abc def ghi"
expr = pp.OneOrMore(pp.Word(pp.alphas))
result = expr.addParseAction(make_palindrome).parseString(tst)
print(result.dump())
expected = ["abc", "def", "ghi", "ihg", "fed", "cba"]
self.assertParseResultsEquals(
result, expected, msg="issue with ParseResults.extend(str)"
)
def testParseResultsExtendWithParseResults(self):
"""test ParseResults.extend() with input of type ParseResults"""
expr = pp.OneOrMore(pp.Word(pp.alphas))
result1 = expr.parseString("spam eggs")
result2 = expr.parseString("foo bar")
result1.extend(result2)
print(result1.dump())
expected = ["spam", "eggs", "foo", "bar"]
self.assertParseResultsEquals(
result1, expected, msg="issue with ParseResults.extend(ParseResults)"
)
def testParseResultsFromDict(self):
"""test helper classmethod ParseResults.from_dict()"""
dict = {
"first": "123",
"second": 456,
"third": {"threeStr": "789", "threeInt": 789},
}
name = "trios"
result = pp.ParseResults.from_dict(dict, name=name)
print(result.dump())
expected = {name: dict}
self.assertParseResultsEquals(
result,
expected_dict=expected,
msg="issue creating ParseResults.from _dict()",
)
def testParseResultsDir(self):
"""test dir(ParseResults)"""
dict = {"first": "123", "second": "456", "third": "789"}
name = "trios"
result = pp.ParseResults.from_dict(dict, name=name)
dir_result = dir(result)
print(dir_result)
self.assertIn(
name, dir_result, msg="name value wasn't returned by dir(ParseResults)"
)
self.assertIn(
"asList", dir_result, msg="asList was not returned by dir(ParseResults)"
)
def testParseResultsInsert(self):
"""test ParseResults.insert() with named tokens"""
from random import randint
result = pp.Word(pp.alphas)[...].parseString("A B C D E F G H I J")
compare_list = result.asList()
print(result)
print(compare_list)
for s in "abcdefghij":
index = randint(-5, 5)
result.insert(index, s)
compare_list.insert(index, s)
print(result)
print(compare_list)
self.assertParseResultsEquals(
result, compare_list, msg="issue with ParseResults.insert()"
)
def testIgnoreString(self):
"""test ParserElement.ignore() passed a string arg"""
tst = "I like totally like love pickles"
expr = pp.Word(pp.alphas)[...].ignore("like")
result = expr.parseString(tst)
print(result)
expected = ["I", "totally", "love", "pickles"]
self.assertParseResultsEquals(result, expected, msg="issue with ignore(string)")
def testParseHTMLTags(self):
test = """
<BODY>
<BODY BGCOLOR="#00FFCC">
<BODY BGCOLOR="#00FFAA"/>
<BODY BGCOLOR='#00FFBB' FGCOLOR=black>
<BODY/>
</BODY>
"""
results = [
("startBody", False, "", ""),
("startBody", False, "#00FFCC", ""),
("startBody", True, "#00FFAA", ""),
("startBody", False, "#00FFBB", "black"),
("startBody", True, "", ""),
("endBody", False, "", ""),
]
bodyStart, bodyEnd = pp.makeHTMLTags("BODY")
resIter = iter(results)
for t, s, e in (bodyStart | bodyEnd).scanString(test):
print(test[s:e], "->", t)
(expectedType, expectedEmpty, expectedBG, expectedFG) = next(resIter)
print(t.dump())
if "startBody" in t:
self.assertEqual(
expectedEmpty,
bool(t.empty),
"expected {} token, got {}".format(
expectedEmpty and "empty" or "not empty",
t.empty and "empty" or "not empty",
),
)
self.assertEqual(
expectedBG,
t.bgcolor,
"failed to match BGCOLOR, expected {}, got {}".format(
expectedBG, t.bgcolor
),
)
self.assertEqual(
expectedFG,
t.fgcolor,
"failed to match FGCOLOR, expected {}, got {}".format(
expectedFG, t.bgcolor
),
)
elif "endBody" in t:
print("end tag")
pass
else:
print("BAD!!!")
def testSetParseActionUncallableErr(self):
"""raise a TypeError in setParseAction() by adding uncallable arg"""
expr = pp.Literal("A")("Achar")
uncallable = 12
with self.assertRaises(TypeError):
expr.setParseAction(uncallable)
res = expr.parseString("A")
print(res.dump())
def testMulWithNegativeNumber(self):
"""raise a ValueError in __mul__ by multiplying a negative number"""
with self.assertRaises(ValueError):
pp.Literal("A")("Achar") * (-1)
def testMulWithEllipsis(self):
"""multiply an expression with Ellipsis as ``expr * ...`` to match ZeroOrMore"""
expr = pp.Literal("A")("Achar") * ...
res = expr.parseString("A")
self.assertEqual(["A"], res.asList(), "expected expr * ... to match ZeroOrMore")
print(res.dump())
def testUpcaseDowncaseUnicode(self):
from pyparsing import pyparsing_unicode as ppu
import sys
a = "\u00bfC\u00f3mo esta usted?"
if not JYTHON_ENV:
ualphas = ppu.alphas
else:
ualphas = "".join(
chr(i)
for i in list(range(0xD800)) + list(range(0xE000, sys.maxunicode))
if chr(i).isalpha()
)
uword = pp.Word(ualphas).setParseAction(ppc.upcaseTokens)
print = lambda *args: None
print(uword.searchString(a))
uword = pp.Word(ualphas).setParseAction(ppc.downcaseTokens)
print(uword.searchString(a))
kw = pp.Keyword("mykey", caseless=True).setParseAction(ppc.upcaseTokens)(
"rname"
)
ret = kw.parseString("mykey")
print(ret.rname)
self.assertEqual(
"MYKEY", ret.rname, "failed to upcase with named result (pyparsing_common)"
)
kw = pp.Keyword("MYKEY", caseless=True).setParseAction(ppc.downcaseTokens)(
"rname"
)
ret = kw.parseString("mykey")
print(ret.rname)
self.assertEqual("mykey", ret.rname, "failed to upcase with named result")
if not IRON_PYTHON_ENV:
# test html data
html = "<TR class=maintxt bgColor=#ffffff> \
<TD vAlign=top>Производитель, модель</TD> \
<TD vAlign=top><STRONG>BenQ-Siemens CF61</STRONG></TD> \
" # .decode('utf-8')
# 'Manufacturer, model
text_manuf = "Производитель, модель"
manufacturer = pp.Literal(text_manuf)
td_start, td_end = pp.makeHTMLTags("td")
manuf_body = (
td_start.suppress()
+ manufacturer
+ pp.SkipTo(td_end)("cells*")
+ td_end.suppress()
)
def testParseUsingRegex(self):
import re
signedInt = pp.Regex(r"[-+][0-9]+")
unsignedInt = pp.Regex(r"[0-9]+")
simpleString = pp.Regex(r'("[^\"]*")|(\'[^\']*\')')
namedGrouping = pp.Regex(r'("(?P<content>[^\"]*)")')
compiledRE = pp.Regex(re.compile(r"[A-Z]+"))
def testMatch(expression, instring, shouldPass, expectedString=None):
if shouldPass:
try:
result = expression.parseString(instring)
print(
"{} correctly matched {}".format(
repr(expression), repr(instring)
)
)
if expectedString != result[0]:
print("\tbut failed to match the pattern as expected:")
print(
"\tproduced %s instead of %s"
% (repr(result[0]), repr(expectedString))
)
return True
except pp.ParseException:
print(
"%s incorrectly failed to match %s"
% (repr(expression), repr(instring))
)
else:
try:
result = expression.parseString(instring)
print(
"{} incorrectly matched {}".format(
repr(expression), repr(instring)
)
)
print("\tproduced %s as a result" % repr(result[0]))
except pp.ParseException:
print(
"%s correctly failed to match %s"
% (repr(expression), repr(instring))
)
return True
return False
# These should fail
self.assertTrue(
testMatch(signedInt, "1234 foo", False), "Re: (1) passed, expected fail"
)
self.assertTrue(
testMatch(signedInt, " +foo", False), "Re: (2) passed, expected fail"
)
self.assertTrue(
testMatch(unsignedInt, "abc", False), "Re: (3) passed, expected fail"
)
self.assertTrue(
testMatch(unsignedInt, "+123 foo", False), "Re: (4) passed, expected fail"
)
self.assertTrue(
testMatch(simpleString, "foo", False), "Re: (5) passed, expected fail"
)
self.assertTrue(
testMatch(simpleString, "\"foo bar'", False),
"Re: (6) passed, expected fail",
)
self.assertTrue(
testMatch(simpleString, "'foo bar\"", False),
"Re: (7) passed, expected fail",
)
# These should pass
self.assertTrue(
testMatch(signedInt, " +123", True, "+123"),
"Re: (8) failed, expected pass",
)
self.assertTrue(
testMatch(signedInt, "+123", True, "+123"), "Re: (9) failed, expected pass"
)
self.assertTrue(
testMatch(signedInt, "+123 foo", True, "+123"),
"Re: (10) failed, expected pass",
)
self.assertTrue(
testMatch(signedInt, "-0 foo", True, "-0"), "Re: (11) failed, expected pass"
)
self.assertTrue(
testMatch(unsignedInt, "123 foo", True, "123"),
"Re: (12) failed, expected pass",
)
self.assertTrue(
testMatch(unsignedInt, "0 foo", True, "0"), "Re: (13) failed, expected pass"
)
self.assertTrue(
testMatch(simpleString, '"foo"', True, '"foo"'),
"Re: (14) failed, expected pass",
)
self.assertTrue(
testMatch(simpleString, "'foo bar' baz", True, "'foo bar'"),
"Re: (15) failed, expected pass",
)
self.assertTrue(
testMatch(compiledRE, "blah", False), "Re: (16) passed, expected fail"
)
self.assertTrue(
testMatch(compiledRE, "BLAH", True, "BLAH"),
"Re: (17) failed, expected pass",
)
self.assertTrue(
testMatch(namedGrouping, '"foo bar" baz', True, '"foo bar"'),
"Re: (16) failed, expected pass",
)
ret = namedGrouping.parseString('"zork" blah')
print(ret)
print(list(ret.items()))
print(ret.content)
self.assertEqual("zork", ret.content, "named group lookup failed")
self.assertEqual(
simpleString.parseString('"zork" blah')[0],
ret[0],
"Regex not properly returning ParseResults for named vs. unnamed groups",
)
try:
print("lets try an invalid RE")
invRe = pp.Regex("(\"[^\"]*\")|('[^']*'")
except Exception as e:
print("successfully rejected an invalid RE:", end=" ")
print(e)
else:
self.fail("failed to reject invalid RE")
with self.assertWarns(
SyntaxWarning, msg="failed to warn empty string passed to Regex"
):
invRe = pp.Regex("")
def testRegexAsType(self):
test_str = "sldkjfj 123 456 lsdfkj"
print("return as list of match groups")
expr = pp.Regex(r"\w+ (\d+) (\d+) (\w+)", asGroupList=True)
expected_group_list = [tuple(test_str.split()[1:])]
result = expr.parseString(test_str)
print(result.dump())
print(expected_group_list)
self.assertParseResultsEquals(
result,
expected_list=expected_group_list,
msg="incorrect group list returned by Regex)",
)
print("return as re.match instance")
expr = pp.Regex(
r"\w+ (?P<num1>\d+) (?P<num2>\d+) (?P<last_word>\w+)", asMatch=True
)
result = expr.parseString(test_str)
print(result.dump())
print(result[0].groups())
print(expected_group_list)
self.assertEqual(
{"num1": "123", "num2": "456", "last_word": "lsdfkj"},
result[0].groupdict(),
"invalid group dict from Regex(asMatch=True)",
)
self.assertEqual(
expected_group_list[0],
result[0].groups(),
"incorrect group list returned by Regex(asMatch)",
)
def testRegexSub(self):
print("test sub with string")
expr = pp.Regex(r"<title>").sub("'<NAME>'")
result = expr.transformString("This is the title: <title>")
print(result)
self.assertEqual(
"This is the title: '<NAME>'",
result,
"incorrect Regex.sub result with simple string",
)
print("test sub with re string")
expr = pp.Regex(r"([Hh]\d):\s*(.*)").sub(r"<\1>\2</\1>")
result = expr.transformString(
"h1: This is the main heading\nh2: This is the sub-heading"
)
print(result)
self.assertEqual(
"<h1>This is the main heading</h1>\n<h2>This is the sub-heading</h2>",
result,
"incorrect Regex.sub result with re string",
)
print("test sub with re string (Regex returns re.match)")
expr = pp.Regex(r"([Hh]\d):\s*(.*)", asMatch=True).sub(r"<\1>\2</\1>")
result = expr.transformString(
"h1: This is the main heading\nh2: This is the sub-heading"
)
print(result)
self.assertEqual(
"<h1>This is the main heading</h1>\n<h2>This is the sub-heading</h2>",
result,
"incorrect Regex.sub result with re string",
)
print("test sub with callable that return str")
expr = pp.Regex(r"<(.*?)>").sub(lambda m: m.group(1).upper())
result = expr.transformString("I want this in upcase: <what? what?>")
print(result)
self.assertEqual(
"I want this in upcase: WHAT? WHAT?",
result,
"incorrect Regex.sub result with callable",
)
with self.assertRaises(SyntaxError):
pp.Regex(r"<(.*?)>", asMatch=True).sub(lambda m: m.group(1).upper())
with self.assertRaises(SyntaxError):
pp.Regex(r"<(.*?)>", asGroupList=True).sub(lambda m: m.group(1).upper())
with self.assertRaises(SyntaxError):
pp.Regex(r"<(.*?)>", asGroupList=True).sub("")
def testRegexInvalidType(self):
"""test Regex of an invalid type"""
with self.assertRaisesParseException(
TypeError, msg="issue with Regex of type int"
):
expr = pp.Regex(12)
def testPrecededBy(self):
num = pp.Word(pp.nums).setParseAction(lambda t: int(t[0]))
interesting_num = pp.PrecededBy(pp.Char("abc")("prefix*")) + num
semi_interesting_num = pp.PrecededBy("_") + num
crazy_num = pp.PrecededBy(pp.Word("^", "$%^")("prefix*"), 10) + num
boring_num = ~pp.PrecededBy(pp.Char("abc_$%^" + pp.nums)) + num
very_boring_num = pp.PrecededBy(pp.WordStart()) + num
finicky_num = pp.PrecededBy(pp.Word("^", "$%^"), retreat=3) + num
s = "c384 b8324 _9293874 _293 404 $%^$^%$2939"
print(s)
for expr, expected_list, expected_dict in [
(interesting_num, [384, 8324], {"prefix": ["c", "b"]}),
(semi_interesting_num, [9293874, 293], {}),
(boring_num, [404], {}),
(crazy_num, [2939], {"prefix": ["^%$"]}),
(finicky_num, [2939], {}),
(very_boring_num, [404], {}),
]:
# print(expr.searchString(s))
result = sum(expr.searchString(s))
print(result.dump())
self.assertParseResultsEquals(result, expected_list, expected_dict)
# infinite loop test - from Issue #127
string_test = "notworking"
# negs = pp.Or(['not', 'un'])('negs')
negs_pb = pp.PrecededBy("not", retreat=100)("negs_lb")
# negs_pb = pp.PrecededBy(negs, retreat=100)('negs_lb')
pattern = (negs_pb + pp.Literal("working"))("main")
results = pattern.searchString(string_test)
try:
print(results.dump())
except RecursionError:
self.fail("got maximum excursion limit exception")
else:
print("got maximum excursion limit exception")
def testCountedArray(self):
from pyparsing import Word, nums, OneOrMore, Group, countedArray
testString = "2 5 7 6 0 1 2 3 4 5 0 3 5 4 3"
integer = Word(nums).setParseAction(lambda t: int(t[0]))
countedField = countedArray(integer)
r = OneOrMore(Group(countedField)).parseString(testString)
print(testString)
print(r)
self.assertParseResultsEquals(
r, expected_list=[[5, 7], [0, 1, 2, 3, 4, 5], [], [5, 4, 3]]
)
# addresses bug raised by <NAME>
def testCountedArrayTest2(self):
from pyparsing import Word, nums, OneOrMore, Group, countedArray
testString = "2 5 7 6 0 1 2 3 4 5 0 3 5 4 3"
integer = Word(nums).setParseAction(lambda t: int(t[0]))
countedField = countedArray(integer)
dummy = Word("A")
r = OneOrMore(Group(dummy ^ countedField)).parseString(testString)
print(testString)
print(r)
self.assertParseResultsEquals(
r, expected_list=[[5, 7], [0, 1, 2, 3, 4, 5], [], [5, 4, 3]]
)
def testCountedArrayTest3(self):
from pyparsing import Word, nums, OneOrMore, Group, countedArray, alphas
int_chars = "_" + alphas
array_counter = Word(int_chars).setParseAction(lambda t: int_chars.index(t[0]))
# 123456789012345678901234567890
testString = "B 5 7 F 0 1 2 3 4 5 _ C 5 4 3"
integer = Word(nums).setParseAction(lambda t: int(t[0]))
countedField = countedArray(integer, intExpr=array_counter)
r = OneOrMore(Group(countedField)).parseString(testString)
print(testString)
print(r)
self.assertParseResultsEquals(
r, expected_list=[[5, 7], [0, 1, 2, 3, 4, 5], [], [5, 4, 3]]
)
def testCountedArrayTest4(self):
import pyparsing as pp
ppc = pp.pyparsing_common
# array counter contains several fields - first field *must* be the number of
# items in the array
# - number of elements
# - type of elements
# - source of elements
counter_with_metadata = (
ppc.integer("count") + ppc.identifier("type") + ppc.identifier("source")
)
countedField = pp.countedArray(
pp.Word(pp.alphanums), intExpr=counter_with_metadata
)
testString = (
"5 string input item1 item2 item3 item4 item5 0 int user 2 int file 3 8"
)
r = pp.Group(countedField("items"))[...].parseString(testString, parseAll=True)
print(testString)
print(r.dump())
print("type = {!r}".format(r.type))
print("source = {!r}".format(r.source))
self.assertParseResultsEquals(
r,
expected_list=[
["item1", "item2", "item3", "item4", "item5"],
[],
["3", "8"],
],
)
self.assertParseResultsEquals(
r[0],
expected_dict={
"count": 5,
"source": "input",
"type": "string",
"items": ["item1", "item2", "item3", "item4", "item5"],
},
)
# parse with additional fields between the count and the actual list items
count_with_metadata = ppc.integer + pp.Word(pp.alphas)("type")
typed_array = pp.countedArray(
pp.Word(pp.alphanums), intExpr=count_with_metadata
)("items")
result = typed_array.parseString("3 bool True True False")
print(result.dump())
self.assertParseResultsEquals(
result,
expected_list=["True", "True", "False"],
expected_dict={"type": "bool", "items": ["True", "True", "False"]},
)
def testLineStart(self):
pass_tests = [
"""\
AAA
BBB
""",
"""\
AAA...
BBB
""",
]
fail_tests = [
"""\
AAA...
...BBB
""",
"""\
AAA BBB
""",
]
# cleanup test strings
pass_tests = [
"\n".join(s.lstrip() for s in t.splitlines()).replace(".", " ")
for t in pass_tests
]
fail_tests = [
"\n".join(s.lstrip() for s in t.splitlines()).replace(".", " ")
for t in fail_tests
]
test_patt = pp.Word("A") - pp.LineStart() + pp.Word("B")
print(test_patt.streamline())
success = test_patt.runTests(pass_tests)[0]
self.assertTrue(success, "failed LineStart passing tests (1)")
success = test_patt.runTests(fail_tests, failureTests=True)[0]
self.assertTrue(success, "failed LineStart failure mode tests (1)")
with ppt.reset_pyparsing_context():
print(r"no \n in default whitespace chars")
pp.ParserElement.setDefaultWhitespaceChars(" ")
test_patt = pp.Word("A") - pp.LineStart() + pp.Word("B")
print(test_patt.streamline())
# should fail the pass tests too, since \n is no longer valid whitespace and we aren't parsing for it
success = test_patt.runTests(pass_tests, failureTests=True)[0]
self.assertTrue(success, "failed LineStart passing tests (2)")
success = test_patt.runTests(fail_tests, failureTests=True)[0]
self.assertTrue(success, "failed LineStart failure mode tests (2)")
test_patt = (
pp.Word("A")
- pp.LineEnd().suppress()
+ pp.LineStart()
+ pp.Word("B")
+ pp.LineEnd().suppress()
)
print(test_patt.streamline())
success = test_patt.runTests(pass_tests)[0]
self.assertTrue(success, "failed LineStart passing tests (3)")
success = test_patt.runTests(fail_tests, failureTests=True)[0]
self.assertTrue(success, "failed LineStart failure mode tests (3)")
test = """\
AAA 1
AAA 2
AAA
B AAA
"""
from textwrap import dedent
test = dedent(test)
print(test)
for t, s, e in (pp.LineStart() + "AAA").scanString(test):
print(s, e, pp.lineno(s, test), pp.line(s, test), ord(test[s]))
print()
self.assertEqual(
"A", test[s], "failed LineStart with insignificant newlines"
)
with ppt.reset_pyparsing_context():
pp.ParserElement.setDefaultWhitespaceChars(" ")
for t, s, e in (pp.LineStart() + "AAA").scanString(test):
print(s, e, pp.lineno(s, test), pp.line(s, test), ord(test[s]))
print()
self.assertEqual(
"A", test[s], "failed LineStart with insignificant newlines"
)
def testLineAndStringEnd(self):
NLs = pp.OneOrMore(pp.lineEnd)
bnf1 = pp.delimitedList(pp.Word(pp.alphanums).leaveWhitespace(), NLs)
bnf2 = pp.Word(pp.alphanums) + pp.stringEnd
bnf3 = pp.Word(pp.alphanums) + pp.SkipTo(pp.stringEnd)
tests = [
("testA\ntestB\ntestC\n", ["testA", "testB", "testC"]),
("testD\ntestE\ntestF", ["testD", "testE", "testF"]),
("a", ["a"]),
]
for test, expected in tests:
res1 = bnf1.parseString(test)
print(res1, "=?", expected)
self.assertParseResultsEquals(
res1,
expected_list=expected,
msg="Failed lineEnd/stringEnd test (1): "
+ repr(test)
+ " -> "
+ str(res1),
)
res2 = bnf2.searchString(test)[0]
print(res2, "=?", expected[-1:])
self.assertParseResultsEquals(
res2,
expected_list=expected[-1:],
msg="Failed lineEnd/stringEnd test (2): "
+ repr(test)
+ " -> "
+ str(res2),
)
res3 = bnf3.parseString(test)
first = res3[0]
rest = res3[1]
# ~ print res3.dump()
print(repr(rest), "=?", repr(test[len(first) + 1 :]))
self.assertEqual(
rest,
test[len(first) + 1 :],
"Failed lineEnd/stringEnd test (3): "
+ repr(test)
+ " -> "
+ str(res3.asList()),
)
print()
from pyparsing import Regex
import re
k = Regex(r"a+", flags=re.S + re.M)
k = k.parseWithTabs()
k = k.leaveWhitespace()
tests = [
(r"aaa", ["aaa"]),
(r"\naaa", None),
(r"a\naa", None),
(r"aaa\n", None),
]
for i, (src, expected) in enumerate(tests):
print(i, repr(src).replace("\\\\", "\\"), end=" ")
if expected is None:
with self.assertRaisesParseException():
k.parseString(src, parseAll=True)
else:
res = k.parseString(src, parseAll=True)
self.assertParseResultsEquals(
res, expected, msg="Failed on parseAll=True test %d" % i
)
def testVariableParseActionArgs(self):
pa3 = lambda s, l, t: t
pa2 = lambda l, t: t
pa1 = lambda t: t
pa0 = lambda: None
class Callable3:
def __call__(self, s, l, t):
return t
class Callable2:
def __call__(self, l, t):
return t
class Callable1:
def __call__(self, t):
return t
class Callable0:
def __call__(self):
return
class CallableS3:
@staticmethod
def __call__(s, l, t):
return t
class CallableS2:
@staticmethod
def __call__(l, t):
return t
class CallableS1:
@staticmethod
def __call__(t):
return t
class CallableS0:
@staticmethod
def __call__():
return
class CallableC3:
@classmethod
def __call__(cls, s, l, t):
return t
class CallableC2:
@classmethod
def __call__(cls, l, t):
return t
class CallableC1:
@classmethod
def __call__(cls, t):
return t
class CallableC0:
@classmethod
def __call__(cls):
return
class parseActionHolder:
@staticmethod
def pa3(s, l, t):
return t
@staticmethod
def pa2(l, t):
return t
@staticmethod
def pa1(t):
return t
@staticmethod
def pa0():
return
def paArgs(*args):
print(args)
return args[2]
class ClassAsPA0:
def __init__(self):
pass
def __str__(self):
return "A"
class ClassAsPA1:
def __init__(self, t):
print("making a ClassAsPA1")
self.t = t
def __str__(self):
return self.t[0]
class ClassAsPA2:
def __init__(self, l, t):
self.t = t
def __str__(self):
return self.t[0]
class ClassAsPA3:
def __init__(self, s, l, t):
self.t = t
def __str__(self):
return self.t[0]
class ClassAsPAStarNew(tuple):
def __new__(cls, *args):
print("make a ClassAsPAStarNew", args)
return tuple.__new__(cls, *args[2].asList())
def __str__(self):
return "".join(self)
from pyparsing import Literal, OneOrMore
A = Literal("A").setParseAction(pa0)
B = Literal("B").setParseAction(pa1)
C = Literal("C").setParseAction(pa2)
D = Literal("D").setParseAction(pa3)
E = Literal("E").setParseAction(Callable0())
F = Literal("F").setParseAction(Callable1())
G = Literal("G").setParseAction(Callable2())
H = Literal("H").setParseAction(Callable3())
I = Literal("I").setParseAction(CallableS0())
J = Literal("J").setParseAction(CallableS1())
K = Literal("K").setParseAction(CallableS2())
L = Literal("L").setParseAction(CallableS3())
M = Literal("M").setParseAction(CallableC0())
N = Literal("N").setParseAction(CallableC1())
O = Literal("O").setParseAction(CallableC2())
P = Literal("P").setParseAction(CallableC3())
Q = Literal("Q").setParseAction(paArgs)
R = Literal("R").setParseAction(parseActionHolder.pa3)
S = Literal("S").setParseAction(parseActionHolder.pa2)
T = Literal("T").setParseAction(parseActionHolder.pa1)
U = Literal("U").setParseAction(parseActionHolder.pa0)
V = Literal("V")
gg = OneOrMore(
A
| C
| D
| E
| F
| G
| H
| I
| J
| K
| L
| M
| N
| O
| P
| Q
| R
| S
| U
| V
| B
| T
)
testString = "VUTSRQPONMLKJIHGFEDCBA"
res = gg.parseString(testString)
print(res)
self.assertParseResultsEquals(
res,
expected_list=list(testString),
msg="Failed to parse using variable length parse actions",
)
A = Literal("A").setParseAction(ClassAsPA0)
B = Literal("B").setParseAction(ClassAsPA1)
C = Literal("C").setParseAction(ClassAsPA2)
D = Literal("D").setParseAction(ClassAsPA3)
E = Literal("E").setParseAction(ClassAsPAStarNew)
gg = OneOrMore(
A
| B
| C
| D
| E
| F
| G
| H
| I
| J
| K
| L
| M
| N
| O
| P
| Q
| R
| S
| T
| U
| V
)
testString = "VUTSRQPONMLKJIHGFEDCBA"
res = gg.parseString(testString)
print(list(map(str, res)))
self.assertEqual(
list(testString),
list(map(str, res)),
"Failed to parse using variable length parse actions "
"using class constructors as parse actions",
)
def testSingleArgException(self):
testMessage = "just one arg"
try:
raise pp.ParseFatalException(testMessage)
except pp.ParseBaseException as pbe:
print("Received expected exception:", pbe)
raisedMsg = pbe.msg
self.assertEqual(
testMessage, raisedMsg, "Failed to get correct exception message"
)
def testOriginalTextFor(self):
def rfn(t):
return "%s:%d" % (t.src, len("".join(t)))
makeHTMLStartTag = lambda tag: pp.originalTextFor(
pp.makeHTMLTags(tag)[0], asString=False
)
# use the lambda, Luke
start = makeHTMLStartTag("IMG")
# don't replace our fancy parse action with rfn,
# append rfn to the list of parse actions
start.addParseAction(rfn)
text = """_<img src="images/cal.png"
alt="cal image" width="16" height="15">_"""
s = start.transformString(text)
print(s)
self.assertTrue(
s.startswith("_images/cal.png:"), "failed to preserve input s properly"
)
self.assertTrue(
s.endswith("77_"), "failed to return full original text properly"
)
tag_fields = makeHTMLStartTag("IMG").searchString(text)[0]
print(sorted(tag_fields.keys()))
self.assertEqual(
["alt", "empty", "height", "src", "startImg", "tag", "width"],
sorted(tag_fields.keys()),
"failed to preserve results names in originalTextFor",
)
def testPackratParsingCacheCopy(self):
integer = pp.Word(pp.nums).setName("integer")
id = pp.Word(pp.alphas + "_", pp.alphanums + "_")
simpleType = pp.Literal("int")
arrayType = simpleType + ("[" + pp.delimitedList(integer) + "]")[...]
varType = arrayType | simpleType
varDec = varType + pp.delimitedList(id + pp.Optional("=" + integer)) + ";"
codeBlock = pp.Literal("{}")
funcDef = (
pp.Optional(varType | "void")
+ id
+ "("
+ (pp.delimitedList(varType + id) | "void" | pp.empty)
+ ")"
+ codeBlock
)
program = varDec | funcDef
input = "int f(){}"
self.assertParseAndCheckList(
program,
input,
["int", "f", "(", ")", "{}"],
msg="Error in packrat parsing",
verbose=True,
)
def testPackratParsingCacheCopyTest2(self):
DO, AA = list(map(pp.Keyword, "DO AA".split()))
LPAR, RPAR = list(map(pp.Suppress, "()"))
identifier = ~AA + pp.Word("Z")
function_name = identifier.copy()
# ~ function_name = ~AA + Word("Z") #identifier.copy()
expr = pp.Forward().setName("expr")
expr <<= pp.Group(
function_name + LPAR + pp.Optional(pp.delimitedList(expr)) + RPAR
).setName("functionCall") | identifier.setName(
"ident"
) # .setDebug()#.setBreak()
stmt = DO + pp.Group(pp.delimitedList(identifier + ".*" | expr))
result = stmt.parseString("DO Z")
print(result.asList())
self.assertEqual(
1, len(result[1]), "packrat parsing is duplicating And term exprs"
)
def testParseResultsDel(self):
from pyparsing import OneOrMore, Word, alphas, nums
grammar = OneOrMore(Word(nums))("ints") + OneOrMore(Word(alphas))("words")
res = grammar.parseString("123 456 ABC DEF")
print(res.dump())
origInts = res.ints.asList()
origWords = res.words.asList()
del res[1]
del res["words"]
print(res.dump())
self.assertEqual("ABC", res[1], "failed to delete 0'th element correctly")
self.assertEqual(
origInts,
res.ints.asList(),
"updated named attributes, should have updated list only",
)
self.assertEqual("", res.words, "failed to update named attribute correctly")
self.assertEqual(
"DEF", res[-1], "updated list, should have updated named attributes only"
)
def testWithAttributeParseAction(self):
"""
This unit test checks withAttribute in these ways:
* Argument forms as keywords and tuples
* Selecting matching tags by attribute
* Case-insensitive attribute matching
* Correctly matching tags having the attribute, and rejecting tags not having the attribute
(Unit test written by voigts as part of the Google Highly Open Participation Contest)
"""
from pyparsing import makeHTMLTags, Word, withAttribute, withClass, nums
data = """
<a>1</a>
<a b="x">2</a>
<a B="x">3</a>
<a b="X">4</a>
<a b="y">5</a>
<a class="boo">8</ a>
"""
tagStart, tagEnd = makeHTMLTags("a")
expr = tagStart + Word(nums)("value") + tagEnd
expected = (
[
["a", ["b", "x"], False, "2", "</a>"],
["a", ["b", "x"], False, "3", "</a>"],
],
[
["a", ["b", "x"], False, "2", "</a>"],
["a", ["b", "x"], False, "3", "</a>"],
],
[["a", ["class", "boo"], False, "8", "</a>"]],
)
for attrib, exp in zip(
[
withAttribute(b="x"),
# withAttribute(B="x"),
withAttribute(("b", "x")),
# withAttribute(("B", "x")),
withClass("boo"),
],
expected,
):
tagStart.setParseAction(attrib)
result = expr.searchString(data)
print(result.dump())
self.assertParseResultsEquals(
result,
expected_list=exp,
msg="Failed test, expected {}, got {}".format(
expected, result.asList()
),
)
def testNestedExpressions(self):
"""
This unit test checks nestedExpr in these ways:
- use of default arguments
- use of non-default arguments (such as a pyparsing-defined comment
expression in place of quotedString)
- use of a custom content expression
- use of a pyparsing expression for opener and closer is *OPTIONAL*
- use of input data containing nesting delimiters
- correct grouping of parsed tokens according to nesting of opening
and closing delimiters in the input string
(Unit test written by christoph... as part of the Google Highly Open Participation Contest)
"""
from pyparsing import nestedExpr, Literal, Regex, restOfLine, quotedString
# All defaults. Straight out of the example script. Also, qualifies for
# the bonus: note the fact that (Z | (E^F) & D) is not parsed :-).
# Tests for bug fixed in 1.4.10
print("Test defaults:")
teststring = "((ax + by)*C) (Z | (E^F) & D)"
expr = nestedExpr()
expected = [[["ax", "+", "by"], "*C"]]
result = expr.parseString(teststring)
print(result.dump())
self.assertParseResultsEquals(
result,
expected_list=expected,
msg="Defaults didn't work. That's a bad sign. Expected: {}, got: {}".format(
expected, result
),
)
# Going through non-defaults, one by one; trying to think of anything
# odd that might not be properly handled.
# Change opener
print("\nNon-default opener")
teststring = "[[ ax + by)*C)"
expected = [[["ax", "+", "by"], "*C"]]
expr = nestedExpr("[")
self.assertParseAndCheckList(
expr,
teststring,
expected,
"Non-default opener didn't work. Expected: {}, got: {}".format(
expected, result
),
verbose=True,
)
# Change closer
print("\nNon-default closer")
teststring = "((ax + by]*C]"
expected = [[["ax", "+", "by"], "*C"]]
expr = nestedExpr(closer="]")
self.assertParseAndCheckList(
expr,
teststring,
expected,
"Non-default closer didn't work. Expected: {}, got: {}".format(
expected, result
),
verbose=True,
)
# #Multicharacter opener, closer
# opener = "bar"
# closer = "baz"
print("\nLiteral expressions for opener and closer")
opener, closer = list(map(Literal, "bar baz".split()))
expr = nestedExpr(opener, closer, content=Regex(r"([^b ]|b(?!a)|ba(?![rz]))+"))
teststring = "barbar ax + bybaz*Cbaz"
expected = [[["ax", "+", "by"], "*C"]]
self.assertParseAndCheckList(
expr,
teststring,
expected,
"Multicharacter opener and closer didn't work. Expected: {}, got: {}".format(
expected, result
),
verbose=True,
)
# Lisp-ish comments
print("\nUse ignore expression (1)")
comment = Regex(r";;.*")
teststring = """
(let ((greeting "Hello, world!")) ;;(foo bar
(display greeting))
"""
expected = [
[
"let",
[["greeting", '"Hello,', 'world!"']],
";;(foo bar",
["display", "greeting"],
]
]
expr = nestedExpr(ignoreExpr=comment)
self.assertParseAndCheckList(
expr,
teststring,
expected,
'Lisp-ish comments (";; <...> $") didn\'t work. Expected: {}, got: {}'.format(
expected, result
),
verbose=True,
)
# Lisp-ish comments, using a standard bit of pyparsing, and an Or.
print("\nUse ignore expression (2)")
comment = ";;" + restOfLine
teststring = """
(let ((greeting "Hello, )world!")) ;;(foo bar
(display greeting))
"""
expected = [
[
"let",
[["greeting", '"Hello, )world!"']],
";;",
"(foo bar",
["display", "greeting"],
]
]
expr = nestedExpr(ignoreExpr=(comment ^ quotedString))
self.assertParseAndCheckList(
expr,
teststring,
expected,
'Lisp-ish comments (";; <...> $") and quoted strings didn\'t work. Expected: {}, got: {}'.format(
expected, result
),
verbose=True,
)
def testNestedExpressions2(self):
"""test nestedExpr with conditions that explore other paths
identical opener and closer
opener and/or closer of type other than string or iterable
multi-character opener and/or closer
single character opener and closer with ignoreExpr=None
multi-character opener and/or closer with ignoreExpr=None
"""
name = pp.Word(pp.alphanums + "_")
# identical opener and closer
with self.assertRaises(
ValueError, msg="matching opener and closer should raise error"
):
expr = name + pp.nestedExpr(opener="{", closer="{")
# opener and/or closer of type other than string or iterable
with self.assertRaises(
ValueError, msg="opener and closer as ints should raise error"
):
expr = name + pp.nestedExpr(opener=12, closer=18)
# multi-character opener and/or closer
tstMulti = "aName {{ outer {{ 'inner with opener {{ and closer }} in quoted string' }} }}"
expr = name + pp.nestedExpr(opener="{{", closer="}}")
result = expr.parseString(tstMulti)
expected = [
"aName",
["outer", ["'inner with opener {{ and closer }} in quoted string'"]],
]
print(result.dump())
self.assertParseResultsEquals(
result, expected, msg="issue with multi-character opener and closer"
)
# single character opener and closer with ignoreExpr=None
tst = (
"aName { outer { 'inner with opener { and closer } in quoted string' }} }}"
)
expr = name + pp.nestedExpr(opener="{", closer="}", ignoreExpr=None)
singleCharResult = expr.parseString(tst)
print(singleCharResult.dump())
# multi-character opener and/or closer with ignoreExpr=None
expr = name + pp.nestedExpr(opener="{{", closer="}}", ignoreExpr=None)
multiCharResult = expr.parseString(tstMulti)
print(multiCharResult.dump())
self.assertParseResultsEquals(
singleCharResult,
multiCharResult.asList(),
msg="using different openers and closers shouldn't affect resulting ParseResults",
)
def testWordExclude(self):
allButPunc = pp.Word(pp.printables, excludeChars=".,:;-_!?")
test = "Hello, Mr. Ed, it's Wilbur!"
result = allButPunc.searchString(test).asList()
print(result)
self.assertEqual(
[["Hello"], ["Mr"], ["Ed"], ["it's"], ["Wilbur"]],
result,
"failed WordExcludeTest",
)
def testWordMinOfZero(self):
"""test a Word with min=0"""
with self.assertRaises(ValueError, msg="expected min 0 to error"):
expr = pp.Word(pp.nums, min=0, max=10)
def testCharAsKeyword(self):
"""test a Char with asKeyword=True"""
grade = pp.OneOrMore(pp.Char("ABCDF", asKeyword=True))
# all single char words
result = grade.parseString("B B C A D")
print(result)
expected = ["B", "B", "C", "A", "D"]
self.assertParseResultsEquals(
result, expected, msg="issue with Char asKeyword=True"
)
# NOT all single char words
test2 = "B BB C A D"
result2 = grade.parseString(test2)
print(result2)
expected2 = ["B"]
self.assertParseResultsEquals(
result2, expected2, msg="issue with Char asKeyword=True parsing 2 chars"
)
def testCharsNotIn(self):
"""test CharsNotIn initialized with various arguments"""
vowels = "AEIOU"
tst = "bcdfghjklmnpqrstvwxyz"
# default args
consonants = pp.CharsNotIn(vowels)
result = consonants.parseString(tst)
print(result)
self.assertParseResultsEquals(
result, [tst], msg="issue with CharsNotIn w/ default args"
)
# min = 0
with self.assertRaises(ValueError, msg="issue with CharsNotIn w/ min=0"):
consonants = pp.CharsNotIn(vowels, min=0)
# max > 0
consonants = pp.CharsNotIn(vowels, max=5)
result = consonants.parseString(tst)
print(result)
self.assertParseResultsEquals(
result, [tst[:5]], msg="issue with CharsNotIn w max > 0"
)
# exact > 0
consonants = pp.CharsNotIn(vowels, exact=10)
result = consonants.parseString(tst[:10])
print(result)
self.assertParseResultsEquals(
result, [tst[:10]], msg="issue with CharsNotIn w/ exact > 0"
)
# min > length
consonants = pp.CharsNotIn(vowels, min=25)
with self.assertRaisesParseException(msg="issue with CharsNotIn min > tokens"):
result = consonants.parseString(tst)
def testParseAll(self):
from pyparsing import Word, cppStyleComment
testExpr = Word("A")
tests = [
("AAAAA", False, True),
("AAAAA", True, True),
("AAABB", False, True),
("AAABB", True, False),
]
for s, parseAllFlag, shouldSucceed in tests:
try:
print(
"'{}' parseAll={} (shouldSucceed={})".format(
s, parseAllFlag, shouldSucceed
)
)
testExpr.parseString(s, parseAll=parseAllFlag)
self.assertTrue(
shouldSucceed, "successfully parsed when should have failed"
)
except ParseException as pe:
print(pe.explain())
self.assertFalse(
shouldSucceed, "failed to parse when should have succeeded"
)
# add test for trailing comments
testExpr.ignore(cppStyleComment)
tests = [
("AAAAA //blah", False, True),
("AAAAA //blah", True, True),
("AAABB //blah", False, True),
("AAABB //blah", True, False),
]
for s, parseAllFlag, shouldSucceed in tests:
try:
print(
"'{}' parseAll={} (shouldSucceed={})".format(
s, parseAllFlag, shouldSucceed
)
)
testExpr.parseString(s, parseAll=parseAllFlag)
self.assertTrue(
shouldSucceed, "successfully parsed when should have failed"
)
except ParseException as pe:
print(pe.explain())
self.assertFalse(
shouldSucceed, "failed to parse when should have succeeded"
)
# add test with very long expression string
# testExpr = pp.MatchFirst([pp.Literal(c) for c in pp.printables if c != 'B'])[1, ...]
anything_but_an_f = pp.OneOrMore(
pp.MatchFirst([pp.Literal(c) for c in pp.printables if c != "f"])
)
testExpr = pp.Word("012") + anything_but_an_f
tests = [
("00aab", False, True),
("00aab", True, True),
("00aaf", False, True),
("00aaf", True, False),
]
for s, parseAllFlag, shouldSucceed in tests:
try:
print(
"'{}' parseAll={} (shouldSucceed={})".format(
s, parseAllFlag, shouldSucceed
)
)
testExpr.parseString(s, parseAll=parseAllFlag)
self.assertTrue(
shouldSucceed, "successfully parsed when should have failed"
)
except ParseException as pe:
print(pe.explain())
self.assertFalse(
shouldSucceed, "failed to parse when should have succeeded"
)
def testGreedyQuotedStrings(self):
from pyparsing import (
QuotedString,
sglQuotedString,
dblQuotedString,
quotedString,
delimitedList,
)
src = """\
"string1", "strin""g2"
'string1', 'string2'
^string1^, ^string2^
<string1>, <string2>"""
testExprs = (
sglQuotedString,
dblQuotedString,
quotedString,
QuotedString('"', escQuote='""'),
QuotedString("'", escQuote="''"),
QuotedString("^"),
QuotedString("<", endQuoteChar=">"),
)
for expr in testExprs:
strs = delimitedList(expr).searchString(src)
print(strs)
self.assertTrue(
bool(strs), "no matches found for test expression '%s'" % expr
)
for lst in strs:
self.assertEqual(
2, len(lst), "invalid match found for test expression '%s'" % expr
)
from pyparsing import alphas, nums, Word
src = """'ms1',1,0,'2009-12-22','2009-12-22 10:41:22') ON DUPLICATE KEY UPDATE sent_count = sent_count + 1, mtime = '2009-12-22 10:41:22';"""
tok_sql_quoted_value = QuotedString(
"'", "\\", "''", True, False
) ^ QuotedString('"', "\\", '""', True, False)
tok_sql_computed_value = Word(nums)
tok_sql_identifier = Word(alphas)
val = tok_sql_quoted_value | tok_sql_computed_value | tok_sql_identifier
vals = delimitedList(val)
print(vals.parseString(src))
self.assertEqual(
5, len(vals.parseString(src)), "error in greedy quote escaping"
)
def testWordBoundaryExpressions(self):
from pyparsing import WordEnd, WordStart, oneOf
ws = WordStart()
we = WordEnd()
vowel = oneOf(list("AEIOUY"))
consonant = oneOf(list("BCDFGHJKLMNPQRSTVWXZ"))
leadingVowel = ws + vowel
trailingVowel = vowel + we
leadingConsonant = ws + consonant
trailingConsonant = consonant + we
internalVowel = ~ws + vowel + ~we
bnf = leadingVowel | trailingVowel
tests = """\
ABC DEF GHI
JKL MNO PQR
STU VWX YZ """.splitlines()
tests.append("\n".join(tests))
expectedResult = [
[["D", "G"], ["A"], ["C", "F"], ["I"], ["E"], ["A", "I"]],
[["J", "M", "P"], [], ["L", "R"], ["O"], [], ["O"]],
[["S", "V"], ["Y"], ["X", "Z"], ["U"], [], ["U", "Y"]],
[
["D", "G", "J", "M", "P", "S", "V"],
["A", "Y"],
["C", "F", "L", "R", "X", "Z"],
["I", "O", "U"],
["E"],
["A", "I", "O", "U", "Y"],
],
]
for t, expected in zip(tests, expectedResult):
print(t)
results = [
flatten(e.searchString(t).asList())
for e in [
leadingConsonant,
leadingVowel,
trailingConsonant,
trailingVowel,
internalVowel,
bnf,
]
]
print(results)
print()
self.assertEqual(
expected,
results,
"Failed WordBoundaryTest, expected {}, got {}".format(
expected, results
),
)
def testRequiredEach(self):
from pyparsing import Keyword
parser = Keyword("bam") & Keyword("boo")
try:
res1 = parser.parseString("bam boo")
print(res1.asList())
res2 = parser.parseString("boo bam")
print(res2.asList())
except ParseException:
failed = True
else:
failed = False
self.assertFalse(failed, "invalid logic in Each")
self.assertEqual(
set(res1),
set(res2),
"Failed RequiredEachTest, expected "
+ str(res1.asList())
+ " and "
+ str(res2.asList())
+ "to contain same words in any order",
)
def testOptionalEachTest1(self):
from pyparsing import Optional, Keyword
for the_input in [
"Tal Weiss Major",
"Tal Major",
"Weiss Major",
"Major",
"Major Tal",
"Major Weiss",
"Major Tal Weiss",
]:
print(the_input)
parser1 = (Optional("Tal") + Optional("Weiss")) & Keyword("Major")
parser2 = Optional(Optional("Tal") + Optional("Weiss")) & Keyword("Major")
p1res = parser1.parseString(the_input)
p2res = parser2.parseString(the_input)
self.assertEqual(
p1res.asList(),
p2res.asList(),
"Each failed to match with nested Optionals, "
+ str(p1res.asList())
+ " should match "
+ str(p2res.asList()),
)
def testOptionalEachTest2(self):
from pyparsing import Word, alphanums, OneOrMore, Group, Regex, Optional
word = Word(alphanums + "_").setName("word")
with_stmt = "with" + OneOrMore(Group(word("key") + "=" + word("value")))(
"overrides"
)
using_stmt = "using" + Regex("id-[0-9a-f]{8}")("id")
modifiers = Optional(with_stmt("with_stmt")) & Optional(
using_stmt("using_stmt")
)
self.assertEqual("with foo=bar bing=baz using id-deadbeef", modifiers)
self.assertNotEqual(
"with foo=bar bing=baz using id-deadbeef using id-feedfeed", modifiers
)
def testOptionalEachTest3(self):
from pyparsing import Literal, Suppress
foo = Literal("foo")
bar = Literal("bar")
openBrace = Suppress(Literal("{"))
closeBrace = Suppress(Literal("}"))
exp = openBrace + (foo[1, ...]("foo") & bar[...]("bar")) + closeBrace
tests = """\
{foo}
{bar foo bar foo bar foo}
""".splitlines()
for test in tests:
test = test.strip()
if not test:
continue
self.assertParseAndCheckList(
exp,
test,
test.strip("{}").split(),
"failed to parse Each expression {!r}".format(test),
verbose=True,
)
with self.assertRaisesParseException():
exp.parseString("{bar}")
def testOptionalEachTest4(self):
from pyparsing import Group
expr = (~ppc.iso8601_date + ppc.integer("id")) & (
Group(ppc.iso8601_date)("date*")[...]
)
expr.runTests(
"""
1999-12-31 100 2001-01-01
42
"""
)
def testEachWithParseFatalException(self):
option_expr = pp.Keyword("options") - "(" + ppc.integer + ")"
step_expr1 = pp.Keyword("step") - "(" + ppc.integer + ")"
step_expr2 = pp.Keyword("step") - "(" + ppc.integer + "Z" + ")"
step_expr = step_expr1 ^ step_expr2
parser = option_expr & step_expr[...]
tests = [
(
"options(100) step(A)",
"Expected integer, found 'A' (at char 18), (line:1, col:19)",
),
(
"step(A) options(100)",
"Expected integer, found 'A' (at char 5), (line:1, col:6)",
),
(
"options(100) step(100A)",
"""Expected 'Z', found 'A' (at char 21), (line:1, col:22)""",
),
(
"options(100) step(22) step(100ZA)",
"""Expected ')', found 'A' (at char 31), (line:1, col:32)""",
),
]
test_lookup = dict(tests)
success, output = parser.runTests((t[0] for t in tests), failureTests=True)
for test_str, result in output:
self.assertEqual(
test_lookup[test_str],
str(result),
"incorrect exception raised for test string {!r}".format(test_str),
)
def testSumParseResults(self):
samplestr1 = "garbage;DOB 10-10-2010;more garbage\nID PARI12345678;more garbage"
samplestr2 = "garbage;ID PARI12345678;more garbage\nDOB 10-10-2010;more garbage"
samplestr3 = "garbage;DOB 10-10-2010"
samplestr4 = "garbage;ID PARI12345678;more garbage- I am cool"
res1 = "ID:PARI12345678 DOB:10-10-2010 INFO:"
res2 = "ID:PARI12345678 DOB:10-10-2010 INFO:"
res3 = "ID: DOB:10-10-2010 INFO:"
res4 = "ID:PARI12345678 DOB: INFO: I am cool"
from pyparsing import Regex, Word, alphanums, restOfLine
dob_ref = "DOB" + Regex(r"\d{2}-\d{2}-\d{4}")("dob")
id_ref = "ID" + Word(alphanums, exact=12)("id")
info_ref = "-" + restOfLine("info")
person_data = dob_ref | id_ref | info_ref
tests = (samplestr1, samplestr2, samplestr3, samplestr4)
results = (res1, res2, res3, res4)
for test, expected in zip(tests, results):
person = sum(person_data.searchString(test))
result = "ID:{} DOB:{} INFO:{}".format(person.id, person.dob, person.info)
print(test)
print(expected)
print(result)
for pd in person_data.searchString(test):
print(pd.dump())
print()
self.assertEqual(
expected,
result,
"Failed to parse '{}' correctly, \nexpected '{}', got '{}'".format(
test, expected, result
),
)
def testMarkInputLine(self):
samplestr1 = "DOB 100-10-2010;more garbage\nID PARI12345678;more garbage"
from pyparsing import Regex
dob_ref = "DOB" + Regex(r"\d{2}-\d{2}-\d{4}")("dob")
try:
res = dob_ref.parseString(samplestr1)
except ParseException as pe:
outstr = pe.markInputline()
print(outstr)
self.assertEqual(
"DOB >!<100-10-2010;more garbage",
outstr,
"did not properly create marked input line",
)
else:
self.fail("test construction failed - should have raised an exception")
def testLocatedExpr(self):
# 012345678901234567890123456789012345678901234567890
samplestr1 = "DOB 10-10-2010;more garbage;ID PARI12345678 ;more garbage"
from pyparsing import Word, alphanums, locatedExpr
id_ref = locatedExpr("ID" + Word(alphanums, exact=12)("id"))
res = id_ref.searchString(samplestr1)[0][0]
print(res.dump())
self.assertEqual(
"ID PARI12345678",
samplestr1[res.locn_start : res.locn_end],
"incorrect location calculation",
)
def testPop(self):
from pyparsing import Word, alphas, nums
source = "AAA 123 456 789 234"
patt = Word(alphas)("name") + Word(nums) * (1,)
result = patt.parseString(source)
tests = [
(0, "AAA", ["123", "456", "789", "234"]),
(None, "234", ["123", "456", "789"]),
("name", "AAA", ["123", "456", "789"]),
(-1, "789", ["123", "456"]),
]
for test in tests:
idx, val, remaining = test
if idx is not None:
ret = result.pop(idx)
else:
ret = result.pop()
print("EXP:", val, remaining)
print("GOT:", ret, result.asList())
print(ret, result.asList())
self.assertEqual(
val,
ret,
"wrong value returned, got {!r}, expected {!r}".format(ret, val),
)
self.assertEqual(
remaining,
result.asList(),
"list is in wrong state after pop, got {!r}, expected {!r}".format(
result.asList(), remaining
),
)
print()
prevlist = result.asList()
ret = result.pop("name", default="noname")
print(ret)
print(result.asList())
self.assertEqual(
"noname",
ret,
"default value not successfully returned, got {!r}, expected {!r}".format(
ret, "noname"
),
)
self.assertEqual(
prevlist,
result.asList(),
"list is in wrong state after pop, got {!r}, expected {!r}".format(
result.asList(), remaining
),
)
def testPopKwargsErr(self):
"""raise a TypeError in pop by adding invalid named args"""
source = "AAA 123 456 789 234"
patt = pp.Word(pp.alphas)("name") + pp.Word(pp.nums) * (1,)
result = patt.parseString(source)
print(result.dump())
with self.assertRaises(TypeError):
result.pop(notDefault="foo")
def testAddCondition(self):
from pyparsing import Word, nums, Suppress, ParseFatalException
numParser = Word(nums)
numParser.addParseAction(lambda s, l, t: int(t[0]))
numParser.addCondition(lambda s, l, t: t[0] % 2)
numParser.addCondition(lambda s, l, t: t[0] >= 7)
result = numParser.searchString("1 2 3 4 5 6 7 8 9 10")
print(result.asList())
self.assertEqual(
[[7], [9]], result.asList(), "failed to properly process conditions"
)
numParser = Word(nums)
numParser.addParseAction(lambda s, l, t: int(t[0]))
rangeParser = numParser("from_") + Suppress("-") + numParser("to")
result = rangeParser.searchString("1-4 2-4 4-3 5 6 7 8 9 10")
print(result.asList())
self.assertEqual(
[[1, 4], [2, 4], [4, 3]],
result.asList(),
"failed to properly process conditions",
)
rangeParser.addCondition(
lambda t: t.to > t.from_, message="from must be <= to", fatal=False
)
result = rangeParser.searchString("1-4 2-4 4-3 5 6 7 8 9 10")
print(result.asList())
self.assertEqual(
[[1, 4], [2, 4]], result.asList(), "failed to properly process conditions"
)
rangeParser = numParser("from_") + Suppress("-") + numParser("to")
rangeParser.addCondition(
lambda t: t.to > t.from_, message="from must be <= to", fatal=True
)
try:
result = rangeParser.searchString("1-4 2-4 4-3 5 6 7 8 9 10")
self.fail("failed to interrupt parsing on fatal condition failure")
except ParseFatalException:
print("detected fatal condition")
def testPatientOr(self):
# Two expressions and a input string which could - syntactically - be matched against
# both expressions. The "Literal" expression is considered invalid though, so this PE
# should always detect the "Word" expression.
def validate(token):
if token[0] == "def":
raise pp.ParseException("signalling invalid token")
return token
a = pp.Word("de").setName("Word") # .setDebug()
b = pp.Literal("def").setName("Literal").setParseAction(validate) # .setDebug()
c = pp.Literal("d").setName("d") # .setDebug()
# The "Literal" expressions's ParseAction is not executed directly after syntactically
# detecting the "Literal" Expression but only after the Or-decision has been made
# (which is too late)...
try:
result = (a ^ b ^ c).parseString("def")
self.assertEqual(
["de"],
result.asList(),
"failed to select longest match, chose %s" % result,
)
except ParseException:
failed = True
else:
failed = False
if failed:
self.fail(
"invalid logic in Or, fails on longest match with exception in parse action"
)
# from issue #93
word = pp.Word(pp.alphas).setName("word")
word_1 = (
pp.Word(pp.alphas).setName("word_1").addCondition(lambda t: len(t[0]) == 1)
)
a = word + (word_1 + word ^ word)
b = word * 3
c = a ^ b
c.streamline()
print(c)
test_string = "foo bar temp"
result = c.parseString(test_string)
print(test_string, "->", result.asList())
self.assertEqual(
test_string.split(), result.asList(), "failed to match longest choice"
)
def testEachWithOptionalWithResultsName(self):
from pyparsing import Optional
result = (Optional("foo")("one") & Optional("bar")("two")).parseString(
"bar foo"
)
print(result.dump())
self.assertEqual(sorted(["one", "two"]), sorted(result.keys()))
def testUnicodeExpression(self):
from pyparsing import Literal, ParseException
z = "a" | Literal("\u1111")
z.streamline()
try:
z.parseString("b")
except ParseException as pe:
self.assertEqual(
r"""Expected {'a' | 'ᄑ'}""",
pe.msg,
"Invalid error message raised, got %r" % pe.msg,
)
def testSetName(self):
a = pp.oneOf("a b c")
b = pp.oneOf("d e f")
arith_expr = pp.infixNotation(
pp.Word(pp.nums),
[
(pp.oneOf("* /"), 2, pp.opAssoc.LEFT),
(pp.oneOf("+ -"), 2, pp.opAssoc.LEFT),
],
)
arith_expr2 = pp.infixNotation(
pp.Word(pp.nums), [(("?", ":"), 3, pp.opAssoc.LEFT)]
)
recursive = pp.Forward()
recursive <<= a + (b + recursive)[...]
tests = [
a,
b,
(a | b),
arith_expr,
arith_expr.expr,
arith_expr2,
arith_expr2.expr,
recursive,
pp.delimitedList(pp.Word(pp.nums).setName("int")),
pp.countedArray(pp.Word(pp.nums).setName("int")),
pp.nestedExpr(),
pp.makeHTMLTags("Z"),
(pp.anyOpenTag, pp.anyCloseTag),
pp.commonHTMLEntity,
pp.commonHTMLEntity.setParseAction(pp.replaceHTMLEntity).transformString(
"lsdjkf <lsdjkf>&'"&xyzzy;"
),
]
expected = map(
str.strip,
"""\
a | b | c
d | e | f
{a | b | c | d | e | f}
Forward: + | - term
+ | - term
Forward: ?: term
?: term
Forward: {a | b | c [{d | e | f : ...}]...}
int [, int]...
(len) int...
nested () expression
(<Z>, </Z>)
(<any tag>, </any tag>)
common HTML entity
lsdjkf <lsdjkf>&'"&xyzzy;""".splitlines(),
)
for t, e in zip(tests, expected):
tname = str(t)
print(tname)
self.assertEqual(
e,
tname,
"expression name mismatch, expected {} got {}".format(e, tname),
)
def testTrimArityExceptionMasking(self):
from pyparsing import Word
invalid_message = "<lambda>() missing 1 required positional argument: 't'"
try:
Word("a").setParseAction(lambda t: t[0] + 1).parseString("aaa")
except Exception as e:
exc_msg = str(e)
self.assertNotEqual(
exc_msg,
invalid_message,
"failed to catch TypeError thrown in _trim_arity",
)
def testTrimArityExceptionMaskingTest2(self):
# construct deep call tree
def A():
import traceback
traceback.print_stack(limit=2)
from pyparsing import Word
invalid_message = "<lambda>() missing 1 required positional argument: 't'"
try:
Word("a").setParseAction(lambda t: t[0] + 1).parseString("aaa")
except Exception as e:
exc_msg = str(e)
self.assertNotEqual(
exc_msg,
invalid_message,
"failed to catch TypeError thrown in _trim_arity",
)
def B():
A()
def C():
B()
def D():
C()
def E():
D()
def F():
E()
def G():
F()
def H():
G()
def J():
H()
def K():
J()
K()
def testClearParseActions(self):
realnum = ppc.real()
self.assertEqual(
3.14159,
realnum.parseString("3.14159")[0],
"failed basic real number parsing",
)
# clear parse action that converts to float
realnum.setParseAction(None)
self.assertEqual(
"3.14159", realnum.parseString("3.14159")[0], "failed clearing parse action"
)
# add a new parse action that tests if a '.' is prsent
realnum.addParseAction(lambda t: "." in t[0])
self.assertEqual(
True,
realnum.parseString("3.14159")[0],
"failed setting new parse action after clearing parse action",
)
def testOneOrMoreStop(self):
test = "BEGIN aaa bbb ccc END"
BEGIN, END = map(pp.Keyword, "BEGIN,END".split(","))
body_word = pp.Word(pp.alphas).setName("word")
for ender in (END, "END", pp.CaselessKeyword("END")):
expr = BEGIN + pp.OneOrMore(body_word, stopOn=ender) + END
self.assertEqual(
expr, test, "Did not successfully stop on ending expression %r" % ender
)
expr = BEGIN + body_word[...].stopOn(ender) + END
self.assertEqual(
expr, test, "Did not successfully stop on ending expression %r" % ender
)
number = pp.Word(pp.nums + ",.()").setName("number with optional commas")
parser = pp.OneOrMore(pp.Word(pp.alphanums + "-/."), stopOn=number)(
"id"
).setParseAction(" ".join) + number("data")
self.assertParseAndCheckList(
parser,
" XXX Y/123 1,234.567890",
["XXX Y/123", "1,234.567890"],
"Did not successfully stop on ending expression %r" % number,
verbose=True,
)
def testZeroOrMoreStop(self):
from pyparsing import Word, ZeroOrMore, alphas, Keyword, CaselessKeyword
test = "BEGIN END"
BEGIN, END = map(Keyword, "BEGIN,END".split(","))
body_word = Word(alphas).setName("word")
for ender in (END, "END", CaselessKeyword("END")):
expr = BEGIN + ZeroOrMore(body_word, stopOn=ender) + END
self.assertEqual(
expr, test, "Did not successfully stop on ending expression %r" % ender
)
expr = BEGIN + body_word[0, ...].stopOn(ender) + END
self.assertEqual(
expr, test, "Did not successfully stop on ending expression %r" % ender
)
def testNestedAsDict(self):
equals = pp.Literal("=").suppress()
lbracket = pp.Literal("[").suppress()
rbracket = pp.Literal("]").suppress()
lbrace = pp.Literal("{").suppress()
rbrace = pp.Literal("}").suppress()
value_dict = pp.Forward()
value_list = pp.Forward()
value_string = pp.Word(pp.alphanums + "@. ")
value = value_list ^ value_dict ^ value_string
values = pp.Group(pp.delimitedList(value, ","))
# ~ values = delimitedList(value, ",").setParseAction(lambda toks: [toks.asList()])
value_list <<= lbracket + values + rbracket
identifier = pp.Word(pp.alphanums + "_.")
assignment = pp.Group(identifier + equals + pp.Optional(value))
assignments = pp.Dict(pp.delimitedList(assignment, ";"))
value_dict <<= lbrace + assignments + rbrace
response = assignments
rsp = (
"username=goat; errors={username=[already taken, too short]}; empty_field="
)
result_dict = response.parseString(rsp).asDict()
print(result_dict)
self.assertEqual(
"goat",
result_dict["username"],
"failed to process string in ParseResults correctly",
)
self.assertEqual(
["already taken", "too short"],
result_dict["errors"]["username"],
"failed to process nested ParseResults correctly",
)
def testTraceParseActionDecorator(self):
from pyparsing import traceParseAction, Word, nums
@traceParseAction
def convert_to_int(t):
return int(t[0])
class Z:
def __call__(self, other):
return other[0] * 1000
integer = Word(nums).addParseAction(convert_to_int)
integer.addParseAction(traceParseAction(lambda t: t[0] * 10))
integer.addParseAction(traceParseAction(Z()))
integer.parseString("132")
def testRunTests(self):
from pyparsing import Word, nums, delimitedList
integer = Word(nums).setParseAction(lambda t: int(t[0]))
intrange = integer("start") + "-" + integer("end")
intrange.addCondition(
lambda t: t.end > t.start,
message="invalid range, start must be <= end",
fatal=True,
)
intrange.addParseAction(lambda t: list(range(t.start, t.end + 1)))
indices = delimitedList(intrange | integer)
indices.addParseAction(lambda t: sorted(set(t)))
tests = """\
# normal data
1-3,2-4,6,8-10,16
# lone integer
11"""
results = indices.runTests(tests, printResults=False)[1]
expectedResults = [[1, 2, 3, 4, 6, 8, 9, 10, 16], [11]]
for res, expected in zip(results, expectedResults):
print(res[1].asList())
print(expected)
self.assertEqual(expected, res[1].asList(), "failed test: " + str(expected))
tests = """\
# invalid range
1-2, 3-1, 4-6, 7, 12
"""
success = indices.runTests(tests, printResults=False, failureTests=True)[0]
self.assertTrue(success, "failed to raise exception on improper range test")
def testRunTestsPostParse(self):
integer = ppc.integer
fraction = integer("numerator") + "/" + integer("denominator")
accum = []
def eval_fraction(test, result):
accum.append((test, result.asList()))
return "eval: {}".format(result.numerator / result.denominator)
success = fraction.runTests(
"""\
1/2
1/0
""",
postParse=eval_fraction,
)[0]
print(success)
self.assertTrue(success, "failed to parse fractions in RunTestsPostParse")
expected_accum = [("1/2", [1, "/", 2]), ("1/0", [1, "/", 0])]
self.assertEqual(
expected_accum, accum, "failed to call postParse method during runTests"
)
def testConvertToDateErr(self):
"""raise a ParseException in convertToDate with incompatible date str"""
expr = pp.Word(pp.alphanums + "-")
expr.addParseAction(ppc.convertToDate())
with self.assertRaisesParseException():
expr.parseString("1997-07-error")
def testConvertToDatetimeErr(self):
"""raise a ParseException in convertToDatetime with incompatible datetime str"""
expr = pp.Word(pp.alphanums + "-")
expr.addParseAction(ppc.convertToDatetime())
with self.assertRaisesParseException():
expr.parseString("1997-07-error")
def testCommonExpressions(self):
import ast
success = ppc.mac_address.runTests(
"""
AA:BB:CC:DD:EE:FF
AA.BB.CC.DD.EE.FF
AA-BB-CC-DD-EE-FF
"""
)[0]
self.assertTrue(success, "error in parsing valid MAC address")
success = ppc.mac_address.runTests(
"""
# mixed delimiters
AA.BB:CC:DD:EE:FF
""",
failureTests=True,
)[0]
self.assertTrue(success, "error in detecting invalid mac address")
success = ppc.ipv4_address.runTests(
"""
0.0.0.0
1.1.1.1
127.0.0.1
172.16.58.3
255.255.255.255
"""
)[0]
self.assertTrue(success, "error in parsing valid IPv4 address")
success = ppc.ipv4_address.runTests(
"""
# out of range value
256.255.255.255
""",
failureTests=True,
)[0]
self.assertTrue(success, "error in detecting invalid IPv4 address")
success = ppc.ipv6_address.runTests(
"""
2001:0db8:85a3:0000:0000:8a2e:0370:7334
fc00:e968:6179::de52:7100:4567:2468:1236:2444:2106
fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b
fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:200C:417A
fc00:e968:6179::de52:7100:1
# loopback address
::1
# the null address
::
# ipv4 compatibility form
fc00:db20:35b:7399::5:192.168.0.1
"""
)[0]
self.assertTrue(success, "error in parsing valid IPv6 address")
success = ppc.ipv6_address.runTests(
"""
# too few values
1080:0:0:0:8:800:200C
# too many ::'s, only 1 allowed
fc00:e968:6179::de52:7100:fc00:db20:35b:7399::5:2106
""",
failureTests=True,
)[0]
self.assertTrue(success, "error in detecting invalid IPv6 address")
success = ppc.number.runTests(
"""
100
-100
+100
3.14159
6.02e23
1e-12
"""
)[0]
self.assertTrue(success, "error in parsing valid numerics")
success = ppc.sci_real.runTests(
"""
1e12
-1e12
3.14159
6.02e23
"""
)[0]
self.assertTrue(success, "error in parsing valid scientific notation reals")
# any int or real number, returned as float
success = ppc.fnumber.runTests(
"""
100
-100
+100
3.14159
6.02e23
1e-12
"""
)[0]
self.assertTrue(success, "error in parsing valid numerics")
success, results = ppc.iso8601_date.runTests(
"""
1997
1997-07
1997-07-16
"""
)
self.assertTrue(success, "error in parsing valid iso8601_date")
expected = [("1997", None, None), ("1997", "07", None), ("1997", "07", "16")]
for r, exp in zip(results, expected):
self.assertEqual(
exp,
(r[1].year, r[1].month, r[1].day),
"failed to parse date into fields",
)
success, results = (
ppc.iso8601_date()
.addParseAction(ppc.convertToDate())
.runTests(
"""
1997-07-16
"""
)
)
self.assertTrue(
success, "error in parsing valid iso8601_date with parse action"
)
self.assertEqual(
datetime.date(1997, 7, 16),
results[0][1][0],
"error in parsing valid iso8601_date with parse action - incorrect value",
)
success, results = ppc.iso8601_datetime.runTests(
"""
1997-07-16T19:20+01:00
1997-07-16T19:20:30+01:00
1997-07-16T19:20:30.45Z
1997-07-16 19:20:30.45
"""
)
self.assertTrue(success, "error in parsing valid iso8601_datetime")
success, results = (
ppc.iso8601_datetime()
.addParseAction(ppc.convertToDatetime())
.runTests(
"""
1997-07-16T19:20:30.45
"""
)
)
self.assertTrue(success, "error in parsing valid iso8601_datetime")
self.assertEqual(
datetime.datetime(1997, 7, 16, 19, 20, 30, 450000),
results[0][1][0],
"error in parsing valid iso8601_datetime - incorrect value",
)
success = ppc.uuid.runTests(
"""
123e4567-e89b-12d3-a456-426655440000
"""
)[0]
self.assertTrue(success, "failed to parse valid uuid")
success = ppc.fraction.runTests(
"""
1/2
-15/16
-3/-4
"""
)[0]
self.assertTrue(success, "failed to parse valid fraction")
success = ppc.mixed_integer.runTests(
"""
1/2
-15/16
-3/-4
1 1/2
2 -15/16
0 -3/-4
12
"""
)[0]
self.assertTrue(success, "failed to parse valid mixed integer")
success, results = ppc.number.runTests(
"""
100
-3
1.732
-3.14159
6.02e23"""
)
self.assertTrue(success, "failed to parse numerics")
for test, result in results:
expected = ast.literal_eval(test)
self.assertEqual(
expected,
result[0],
"numeric parse failed (wrong value) ({} should be {})".format(
result[0], expected
),
)
self.assertEqual(
type(expected),
type(result[0]),
"numeric parse failed (wrong type) ({} should be {})".format(
type(result[0]), type(expected)
),
)
def testNumericExpressions(self):
# disable parse actions that do type conversion so we don't accidentally trigger
# conversion exceptions when what we want to check is the parsing expression
real = ppc.real().setParseAction(None)
sci_real = ppc.sci_real().setParseAction(None)
signed_integer = ppc.signed_integer().setParseAction(None)
from itertools import product
def make_tests():
leading_sign = ["+", "-", ""]
leading_digit = ["0", ""]
dot = [".", ""]
decimal_digit = ["1", ""]
e = ["e", "E", ""]
e_sign = ["+", "-", ""]
e_int = ["22", ""]
stray = ["9", ".", ""]
seen = set()
seen.add("")
for parts in product(
leading_sign,
stray,
leading_digit,
dot,
decimal_digit,
stray,
e,
e_sign,
e_int,
stray,
):
parts_str = "".join(parts).strip()
if parts_str in seen:
continue
seen.add(parts_str)
yield parts_str
print(len(seen) - 1, "tests produced")
# collect tests into valid/invalid sets, depending on whether they evaluate to valid Python floats or ints
valid_ints = set()
valid_reals = set()
valid_sci_reals = set()
invalid_ints = set()
invalid_reals = set()
invalid_sci_reals = set()
# check which strings parse as valid floats or ints, and store in related valid or invalid test sets
for test_str in make_tests():
if "." in test_str or "e" in test_str.lower():
try:
float(test_str)
except ValueError:
invalid_sci_reals.add(test_str)
if "e" not in test_str.lower():
invalid_reals.add(test_str)
else:
valid_sci_reals.add(test_str)
if "e" not in test_str.lower():
valid_reals.add(test_str)
try:
int(test_str)
except ValueError:
invalid_ints.add(test_str)
else:
valid_ints.add(test_str)
# now try all the test sets against their respective expressions
all_pass = True
suppress_results = {"printResults": False}
for expr, tests, is_fail, fn in zip(
[real, sci_real, signed_integer] * 2,
[
valid_reals,
valid_sci_reals,
valid_ints,
invalid_reals,
invalid_sci_reals,
invalid_ints,
],
[False, False, False, True, True, True],
[float, float, int] * 2,
):
#
# success, test_results = expr.runTests(sorted(tests, key=len), failureTests=is_fail, **suppress_results)
# filter_result_fn = (lambda r: isinstance(r, Exception),
# lambda r: not isinstance(r, Exception))[is_fail]
# print(expr, ('FAIL', 'PASS')[success], "{}valid tests ({})".format(len(tests),
# 'in' if is_fail else ''))
# if not success:
# all_pass = False
# for test_string, result in test_results:
# if filter_result_fn(result):
# try:
# test_value = fn(test_string)
# except ValueError as ve:
# test_value = str(ve)
# print("{!r}: {} {} {}".format(test_string, result,
# expr.matches(test_string, parseAll=True), test_value))
success = True
for t in tests:
if expr.matches(t, parseAll=True):
if is_fail:
print(t, "should fail but did not")
success = False
else:
if not is_fail:
print(t, "should not fail but did")
success = False
print(
expr,
("FAIL", "PASS")[success],
"{}valid tests ({})".format("in" if is_fail else "", len(tests)),
)
all_pass = all_pass and success
self.assertTrue(all_pass, "failed one or more numeric tests")
def testTokenMap(self):
from pyparsing import tokenMap, Word, hexnums, OneOrMore
parser = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
success, report = parser.runTests(
"""
00 11 22 aa FF 0a 0d 1a
"""
)
self.assertRunTestResults(
(success, report),
[([0, 17, 34, 170, 255, 10, 13, 26], "tokenMap parse action failed")],
msg="failed to parse hex integers",
)
def testParseFile(self):
from pyparsing import OneOrMore
s = """
123 456 789
"""
input_file = StringIO(s)
integer = ppc.integer
results = OneOrMore(integer).parseFile(input_file)
print(results)
results = OneOrMore(integer).parseFile("tests/parsefiletest_input_file.txt")
print(results)
def testHTMLStripper(self):
sample = """
<html>
Here is some sample <i>HTML</i> text.
</html>
"""
read_everything = pp.originalTextFor(pp.OneOrMore(pp.Word(pp.printables)))
read_everything.addParseAction(ppc.stripHTMLTags)
result = read_everything.parseString(sample)
self.assertEqual("Here is some sample HTML text.", result[0].strip())
def testExprSplitter(self):
from pyparsing import Literal, quotedString, pythonStyleComment, Empty
expr = Literal(";") + Empty()
expr.ignore(quotedString)
expr.ignore(pythonStyleComment)
sample = """
def main():
this_semi_does_nothing();
neither_does_this_but_there_are_spaces_afterward();
a = "a;b"; return a # this is a comment; it has a semicolon!
def b():
if False:
z=1000;b("; in quotes"); c=200;return z
return ';'
class Foo(object):
def bar(self):
'''a docstring; with a semicolon'''
a = 10; b = 11; c = 12
# this comment; has several; semicolons
if self.spam:
x = 12; return x # so; does; this; one
x = 15;;; y += x; return y
def baz(self):
return self.bar
"""
expected = [
[" this_semi_does_nothing()", ""],
[" neither_does_this_but_there_are_spaces_afterward()", ""],
[
' a = "a;b"',
"return a # this is a comment; it has a semicolon!",
],
[" z=1000", 'b("; in quotes")', "c=200", "return z"],
[" return ';'"],
[" '''a docstring; with a semicolon'''"],
[" a = 10", "b = 11", "c = 12"],
[" # this comment; has several; semicolons"],
[" x = 12", "return x # so; does; this; one"],
[" x = 15", "", "", "y += x", "return y"],
]
exp_iter = iter(expected)
for line in filter(lambda ll: ";" in ll, sample.splitlines()):
print(str(list(expr.split(line))) + ",")
self.assertEqual(
next(exp_iter), list(expr.split(line)), "invalid split on expression"
)
print()
expected = [
[" this_semi_does_nothing()", ";", ""],
[" neither_does_this_but_there_are_spaces_afterward()", ";", ""],
[
' a = "a;b"',
";",
"return a # this is a comment; it has a semicolon!",
],
[
" z=1000",
";",
'b("; in quotes")',
";",
"c=200",
";",
"return z",
],
[" return ';'"],
[" '''a docstring; with a semicolon'''"],
[" a = 10", ";", "b = 11", ";", "c = 12"],
[" # this comment; has several; semicolons"],
[" x = 12", ";", "return x # so; does; this; one"],
[
" x = 15",
";",
"",
";",
"",
";",
"y += x",
";",
"return y",
],
]
exp_iter = iter(expected)
for line in filter(lambda ll: ";" in ll, sample.splitlines()):
print(str(list(expr.split(line, includeSeparators=True))) + ",")
self.assertEqual(
next(exp_iter),
list(expr.split(line, includeSeparators=True)),
"invalid split on expression",
)
print()
expected = [
[" this_semi_does_nothing()", ""],
[" neither_does_this_but_there_are_spaces_afterward()", ""],
[
' a = "a;b"',
"return a # this is a comment; it has a semicolon!",
],
[" z=1000", 'b("; in quotes"); c=200;return z'],
[" a = 10", "b = 11; c = 12"],
[" x = 12", "return x # so; does; this; one"],
[" x = 15", ";; y += x; return y"],
]
exp_iter = iter(expected)
for line in sample.splitlines():
pieces = list(expr.split(line, maxsplit=1))
print(str(pieces) + ",")
if len(pieces) == 2:
exp = next(exp_iter)
self.assertEqual(
exp, pieces, "invalid split on expression with maxSplits=1"
)
elif len(pieces) == 1:
self.assertEqual(
0,
len(expr.searchString(line)),
"invalid split with maxSplits=1 when expr not present",
)
else:
print("\n>>> " + line)
self.fail("invalid split on expression with maxSplits=1, corner case")
def testParseFatalException(self):
from pyparsing import Word, nums, ParseFatalException
with self.assertRaisesParseException(
exc_type=ParseFatalException, msg="failed to raise ErrorStop exception"
):
expr = "ZZZ" - Word(nums)
expr.parseString("ZZZ bad")
# WAS:
# success = False
# try:
# expr = "ZZZ" - Word(nums)
# expr.parseString("ZZZ bad")
# except ParseFatalException as pfe:
# print('ParseFatalException raised correctly')
# success = True
# except Exception as e:
# print(type(e))
# print(e)
#
# self.assertTrue(success, "bad handling of syntax error")
def testInlineLiteralsUsing(self):
wd = pp.Word(pp.alphas)
pp.ParserElement.inlineLiteralsUsing(pp.Suppress)
result = (wd + "," + wd + pp.oneOf("! . ?")).parseString("Hello, World!")
self.assertEqual(3, len(result), "inlineLiteralsUsing(Suppress) failed!")
pp.ParserElement.inlineLiteralsUsing(pp.Literal)
result = (wd + "," + wd + pp.oneOf("! . ?")).parseString("Hello, World!")
self.assertEqual(4, len(result), "inlineLiteralsUsing(Literal) failed!")
pp.ParserElement.inlineLiteralsUsing(pp.CaselessKeyword)
self.assertParseAndCheckList(
"SELECT" + wd + "FROM" + wd,
"select color from colors",
expected_list=["SELECT", "color", "FROM", "colors"],
msg="inlineLiteralsUsing(CaselessKeyword) failed!",
verbose=True,
)
pp.ParserElement.inlineLiteralsUsing(pp.CaselessLiteral)
self.assertParseAndCheckList(
"SELECT" + wd + "FROM" + wd,
"select color from colors",
expected_list=["SELECT", "color", "FROM", "colors"],
msg="inlineLiteralsUsing(CaselessLiteral) failed!",
verbose=True,
)
integer = pp.Word(pp.nums)
pp.ParserElement.inlineLiteralsUsing(pp.Literal)
date_str = integer("year") + "/" + integer("month") + "/" + integer("day")
self.assertParseAndCheckList(
date_str,
"1999/12/31",
expected_list=["1999", "/", "12", "/", "31"],
msg="inlineLiteralsUsing(example 1) failed!",
verbose=True,
)
# change to Suppress
pp.ParserElement.inlineLiteralsUsing(pp.Suppress)
date_str = integer("year") + "/" + integer("month") + "/" + integer("day")
self.assertParseAndCheckList(
date_str,
"1999/12/31",
expected_list=["1999", "12", "31"],
msg="inlineLiteralsUsing(example 2) failed!",
verbose=True,
)
def testCloseMatch(self):
searchseq = pp.CloseMatch("ATCATCGAATGGA", 2)
_, results = searchseq.runTests(
"""
ATCATCGAATGGA
XTCATCGAATGGX
ATCATCGAAXGGA
ATCAXXGAATGGA
ATCAXXGAATGXA
ATCAXXGAATGG
"""
)
expected = ([], [0, 12], [9], [4, 5], None, None)
for r, exp in zip(results, expected):
if exp is not None:
self.assertEqual(
exp,
r[1].mismatches,
"fail CloseMatch between {!r} and {!r}".format(
searchseq.match_string, r[0]
),
)
print(
r[0],
"exc: %s" % r[1]
if exp is None and isinstance(r[1], Exception)
else ("no match", "match")[r[1].mismatches == exp],
)
def testDefaultKeywordChars(self):
with self.assertRaisesParseException(
msg="failed to fail matching keyword using updated keyword chars"
):
pp.Keyword("start").parseString("start1000")
try:
pp.Keyword("start", identChars=pp.alphas).parseString("start1000")
except pp.ParseException:
self.fail("failed to match keyword using updated keyword chars")
with ppt.reset_pyparsing_context():
pp.Keyword.setDefaultKeywordChars(pp.alphas)
try:
pp.Keyword("start").parseString("start1000")
except pp.ParseException:
self.fail("failed to match keyword using updated keyword chars")
with self.assertRaisesParseException(
msg="failed to fail matching keyword using updated keyword chars"
):
pp.CaselessKeyword("START").parseString("start1000")
try:
pp.CaselessKeyword("START", identChars=pp.alphas).parseString("start1000")
except pp.ParseException:
self.fail("failed to match keyword using updated keyword chars")
with ppt.reset_pyparsing_context():
pp.Keyword.setDefaultKeywordChars(pp.alphas)
try:
pp.CaselessKeyword("START").parseString("start1000")
except pp.ParseException:
self.assertTrue(
False, "failed to match keyword using updated keyword chars"
)
def testLiteralVsKeyword(self):
integer = ppc.integer
literal_expr = integer + pp.Literal("start") + integer
keyword_expr = integer + pp.Keyword("start") + integer
caseless_keyword_expr = integer + pp.CaselessKeyword("START") + integer
word_keyword_expr = (
integer + pp.Word(pp.alphas, asKeyword=True).setName("word") + integer
)
print()
test_string = "1 start 2"
print(test_string)
print(literal_expr, literal_expr.parseString(test_string, parseAll=True))
print(keyword_expr, keyword_expr.parseString(test_string, parseAll=True))
print(
caseless_keyword_expr,
caseless_keyword_expr.parseString(test_string, parseAll=True),
)
print(
word_keyword_expr, word_keyword_expr.parseString(test_string, parseAll=True)
)
print()
test_string = "3 start4"
print(test_string)
print(literal_expr, literal_expr.parseString(test_string, parseAll=True))
with self.assertRaisesParseException(
msg="failed to fail matching keyword using updated keyword chars"
):
print(keyword_expr.parseString(test_string, parseAll=True))
with self.assertRaisesParseException(
msg="failed to fail matching keyword using updated keyword chars"
):
print(caseless_keyword_expr.parseString(test_string, parseAll=True))
with self.assertRaisesParseException(
msg="failed to fail matching keyword using updated keyword chars"
):
print(word_keyword_expr.parseString(test_string, parseAll=True))
print()
test_string = "5start 6"
print(test_string)
print(literal_expr.parseString(test_string, parseAll=True))
with self.assertRaisesParseException(
msg="failed to fail matching keyword using updated keyword chars"
):
print(keyword_expr.parseString(test_string, parseAll=True))
with self.assertRaisesParseException(
msg="failed to fail matching keyword using updated keyword chars"
):
print(caseless_keyword_expr.parseString(test_string, parseAll=True))
with self.assertRaisesParseException(
msg="failed to fail matching keyword using updated keyword chars"
):
print(word_keyword_expr.parseString(test_string, parseAll=True))
def testCol(self):
test = "*\n* \n* ALF\n*\n"
initials = [c for i, c in enumerate(test) if pp.col(i, test) == 1]
print(initials)
self.assertTrue(
len(initials) == 4 and all(c == "*" for c in initials), "fail col test"
)
def testLiteralException(self):
for cls in (
pp.Literal,
pp.CaselessLiteral,
pp.Keyword,
pp.CaselessKeyword,
pp.Word,
pp.Regex,
):
expr = cls("xyz") # .setName('{}_expr'.format(cls.__name__.lower()))
try:
expr.parseString(" ")
except Exception as e:
print(cls.__name__, str(e))
self.assertTrue(
isinstance(e, pp.ParseBaseException),
"class {} raised wrong exception type {}".format(
cls.__name__, type(e).__name__
),
)
def testParseActionException(self):
import traceback
number = pp.Word(pp.nums)
def number_action():
raise IndexError # this is the important line!
number.setParseAction(number_action)
symbol = pp.Word("abcd", max=1)
expr = number | symbol
try:
expr.parseString("1 + 2")
except Exception as e:
print_traceback = True
try:
self.assertTrue(
hasattr(e, "__cause__"),
"no __cause__ attribute in the raised exception",
)
self.assertTrue(
e.__cause__ is not None,
"__cause__ not propagated to outer exception",
)
self.assertEqual(
IndexError,
type(e.__cause__),
"__cause__ references wrong exception",
)
print_traceback = False
finally:
if print_traceback:
traceback.print_exc()
else:
self.fail("Expected ParseException not raised")
# tests Issue #22
def testParseActionNesting(self):
vals = pp.OneOrMore(ppc.integer)("int_values")
def add_total(tokens):
tokens["total"] = sum(tokens)
return tokens
vals.addParseAction(add_total)
results = vals.parseString("244 23 13 2343")
print(results.dump())
self.assertParseResultsEquals(
results,
expected_dict={"int_values": [244, 23, 13, 2343], "total": 2623},
msg="noop parse action changed ParseResults structure",
)
name = pp.Word(pp.alphas)("name")
score = pp.Word(pp.nums + ".")("score")
nameScore = pp.Group(name + score)
line1 = nameScore("Rider")
result1 = line1.parseString("Mauney 46.5")
print("### before parse action is added ###")
print("result1.dump():\n" + result1.dump() + "\n")
before_pa_dict = result1.asDict()
line1.setParseAction(lambda t: t)
result1 = line1.parseString("Mauney 46.5")
after_pa_dict = result1.asDict()
print("### after parse action was added ###")
print("result1.dump():\n" + result1.dump() + "\n")
self.assertEqual(
before_pa_dict,
after_pa_dict,
"noop parse action changed ParseResults structure",
)
def testParseResultsNameBelowUngroupedName(self):
rule_num = pp.Regex("[0-9]+")("LIT_NUM*")
list_num = pp.Group(
pp.Literal("[")("START_LIST")
+ pp.delimitedList(rule_num)("LIST_VALUES")
+ pp.Literal("]")("END_LIST")
)("LIST")
test_string = "[ 1,2,3,4,5,6 ]"
list_num.runTests(test_string)
U = list_num.parseString(test_string)
self.assertTrue(
"LIT_NUM" not in U.LIST.LIST_VALUES,
"results name retained as sub in ungrouped named result",
)
def testParseResultsNamesInGroupWithDict(self):
key = ppc.identifier()
value = ppc.integer()
lat = ppc.real()
long = ppc.real()
EQ = pp.Suppress("=")
data = (
lat("lat")
+ long("long")
+ pp.Dict(pp.OneOrMore(pp.Group(key + EQ + value)))
)
site = pp.QuotedString('"')("name") + pp.Group(data)("data")
test_string = '"Golden Gate Bridge" 37.819722 -122.478611 height=746 span=4200'
site.runTests(test_string)
a, aEnd = pp.makeHTMLTags("a")
attrs = a.parseString("<a href='blah'>")
print(attrs.dump())
self.assertParseResultsEquals(
attrs,
expected_dict={
"startA": {"href": "blah", "tag": "a", "empty": False},
"href": "blah",
"tag": "a",
"empty": False,
},
)
def testMakeXMLTags(self):
"""test helper function makeXMLTags in simple use case"""
body, bodyEnd = pp.makeXMLTags("body")
tst = "<body>Hello</body>"
expr = body + pp.Word(pp.alphas)("contents") + bodyEnd
result = expr.parseString(tst)
print(result.dump())
self.assertParseResultsEquals(
result, ["body", False, "Hello", "</body>"], msg="issue using makeXMLTags"
)
def testFollowedBy(self):
expr = pp.Word(pp.alphas)("item") + pp.FollowedBy(ppc.integer("qty"))
result = expr.parseString("balloon 99", parseAll=False)
print(result.dump())
self.assertTrue("qty" in result, "failed to capture results name in FollowedBy")
self.assertEqual(
{"item": "balloon", "qty": 99},
result.asDict(),
"invalid results name structure from FollowedBy",
)
def testSetBreak(self):
"""
Test behavior of ParserElement.setBreak(), to invoke the debugger before parsing that element is attempted.
Temporarily monkeypatches pdb.set_trace.
"""
was_called = False
def mock_set_trace():
nonlocal was_called
was_called = True
wd = pp.Word(pp.alphas)
wd.setBreak()
print("Before parsing with setBreak:", was_called)
import pdb
with ppt.reset_pyparsing_context():
pdb.set_trace = mock_set_trace
wd.parseString("ABC")
print("After parsing with setBreak:", was_called)
self.assertTrue(was_called, "set_trace wasn't called by setBreak")
def testUnicodeTests(self):
ppu = pp.pyparsing_unicode
# verify proper merging of ranges by addition
kanji_printables = ppu.Japanese.Kanji.printables
katakana_printables = ppu.Japanese.Katakana.printables
hiragana_printables = ppu.Japanese.Hiragana.printables
japanese_printables = ppu.Japanese.printables
self.assertEqual(
set(kanji_printables + katakana_printables + hiragana_printables),
set(japanese_printables),
"failed to construct ranges by merging Japanese types",
)
# verify proper merging of ranges using multiple inheritance
cjk_printables = ppu.CJK.printables
self.assertEqual(
len(set(cjk_printables)),
len(cjk_printables),
"CJK contains duplicate characters - all should be unique",
)
chinese_printables = ppu.Chinese.printables
korean_printables = ppu.Korean.printables
print(
len(set(chinese_printables + korean_printables + japanese_printables)),
len(cjk_printables),
)
self.assertEqual(
len(set(chinese_printables + korean_printables + japanese_printables)),
len(cjk_printables),
"failed to construct ranges by merging Chinese, Japanese and Korean",
)
alphas = ppu.Greek.alphas
greet = pp.Word(alphas) + "," + pp.Word(alphas) + "!"
# input string
hello = "Καλημέρα, κόσμε!"
result = greet.parseString(hello)
print(result)
self.assertParseResultsEquals(
result,
expected_list=["Καλημέρα", ",", "κόσμε", "!"],
msg="Failed to parse Greek 'Hello, World!' using "
"pyparsing_unicode.Greek.alphas",
)
# define a custom unicode range using multiple inheritance
class Turkish_set(ppu.Latin1, ppu.LatinA):
pass
self.assertEqual(
set(ppu.Latin1.printables + ppu.LatinA.printables),
set(Turkish_set.printables),
"failed to construct ranges by merging Latin1 and LatinA (printables)",
)
self.assertEqual(
set(ppu.Latin1.alphas + ppu.LatinA.alphas),
set(Turkish_set.alphas),
"failed to construct ranges by merging Latin1 and LatinA (alphas)",
)
self.assertEqual(
set(ppu.Latin1.nums + ppu.LatinA.nums),
set(Turkish_set.nums),
"failed to construct ranges by merging Latin1 and LatinA (nums)",
)
key = pp.Word(Turkish_set.alphas)
value = ppc.integer | pp.Word(Turkish_set.alphas, Turkish_set.alphanums)
EQ = pp.Suppress("=")
key_value = key + EQ + value
sample = """\
şehir=İzmir
ülke=Türkiye
nüfus=4279677"""
result = pp.Dict(pp.OneOrMore(pp.Group(key_value))).parseString(sample)
print(result.dump())
self.assertParseResultsEquals(
result,
expected_dict={"şehir": "İzmir", "ülke": "Türkiye", "nüfus": 4279677},
msg="Failed to parse Turkish key-value pairs",
)
# Make sure example in indentedBlock docstring actually works!
def testIndentedBlockExample(self):
from textwrap import dedent
data = dedent(
"""
def A(z):
A1
B = 100
G = A2
A2
A3
B
def BB(a,b,c):
BB1
def BBA():
bba1
bba2
bba3
C
D
def spam(x,y):
def eggs(z):
pass
"""
)
indentStack = [1]
stmt = pp.Forward()
identifier = pp.Word(pp.alphas, pp.alphanums)
funcDecl = (
"def"
+ identifier
+ pp.Group("(" + pp.Optional(pp.delimitedList(identifier)) + ")")
+ ":"
)
func_body = pp.indentedBlock(stmt, indentStack)
funcDef = pp.Group(funcDecl + func_body)
rvalue = pp.Forward()
funcCall = pp.Group(
identifier + "(" + pp.Optional(pp.delimitedList(rvalue)) + ")"
)
rvalue << (funcCall | identifier | pp.Word(pp.nums))
assignment = pp.Group(identifier + "=" + rvalue)
stmt <<= funcDef | assignment | identifier
module_body = pp.OneOrMore(stmt)
self.assertParseAndCheckList(
module_body,
data,
[
[
"def",
"A",
["(", "z", ")"],
":",
[["A1"], [["B", "=", "100"]], [["G", "=", "A2"]], ["A2"], ["A3"]],
],
"B",
[
"def",
"BB",
["(", "a", "b", "c", ")"],
":",
[
["BB1"],
[
[
"def",
"BBA",
["(", ")"],
":",
[["bba1"], ["bba2"], ["bba3"]],
]
],
],
],
"C",
"D",
[
"def",
"spam",
["(", "x", "y", ")"],
":",
[[["def", "eggs", ["(", "z", ")"], ":", [["pass"]]]]],
],
],
"Failed indentedBlock example",
verbose=True,
)
def testIndentedBlock(self):
# parse pseudo-yaml indented text
import textwrap
EQ = pp.Suppress("=")
stack = [1]
key = ppc.identifier
value = pp.Forward()
key_value = key + EQ + value
compound_value = pp.Dict(pp.ungroup(pp.indentedBlock(key_value, stack)))
value <<= ppc.integer | pp.QuotedString("'") | compound_value
parser = pp.Dict(pp.OneOrMore(pp.Group(key_value)))
text = """
a = 100
b = 101
c =
c1 = 200
c2 =
c21 = 999
c3 = 'A horse, a horse, my kingdom for a horse'
d = 505
"""
text = textwrap.dedent(text)
print(text)
result = parser.parseString(text)
print(result.dump())
self.assertEqual(100, result.a, "invalid indented block result")
self.assertEqual(200, result.c.c1, "invalid indented block result")
self.assertEqual(999, result.c.c2.c21, "invalid indented block result")
# exercise indentedBlock with example posted in issue #87
def testIndentedBlockTest2(self):
from textwrap import dedent
indent_stack = [1]
key = pp.Word(pp.alphas, pp.alphanums) + pp.Suppress(":")
stmt = pp.Forward()
suite = pp.indentedBlock(stmt, indent_stack)
body = key + suite
pattern = (
pp.Word(pp.alphas)
+ pp.Suppress("(")
+ pp.Word(pp.alphas)
+ pp.Suppress(")")
)
stmt <<= pattern
def key_parse_action(toks):
print("Parsing '%s'..." % toks[0])
key.setParseAction(key_parse_action)
header = pp.Suppress("[") + pp.Literal("test") + pp.Suppress("]")
content = header - pp.OneOrMore(pp.indentedBlock(body, indent_stack, False))
contents = pp.Forward()
suites = pp.indentedBlock(content, indent_stack)
extra = pp.Literal("extra") + pp.Suppress(":") - suites
contents <<= content | extra
parser = pp.OneOrMore(contents)
sample = dedent(
"""
extra:
[test]
one0:
two (three)
four0:
five (seven)
extra:
[test]
one1:
two (three)
four1:
five (seven)
"""
)
success, _ = parser.runTests([sample])
self.assertTrue(success, "Failed indentedBlock test for issue #87")
sample2 = dedent(
"""
extra:
[test]
one:
two (three)
four:
five (seven)
extra:
[test]
one:
two (three)
four:
five (seven)
[test]
one:
two (three)
four:
five (seven)
[test]
eight:
nine (ten)
eleven:
twelve (thirteen)
fourteen:
fifteen (sixteen)
seventeen:
eighteen (nineteen)
"""
)
del indent_stack[1:]
success, _ = parser.runTests([sample2])
self.assertTrue(success, "Failed indentedBlock multi-block test for issue #87")
def testIndentedBlockScan(self):
def get_parser():
"""
A valid statement is the word "block:", followed by an indent, followed by the letter A only, or another block
"""
stack = [1]
block = pp.Forward()
body = pp.indentedBlock(
pp.Literal("A") ^ block, indentStack=stack, indent=True
)
block <<= pp.Literal("block:") + body
return block
from textwrap import dedent
# This input string is a perfect match for the parser, so a single match is found
p1 = get_parser()
r1 = list(
p1.scanString(
dedent(
"""\
block:
A
"""
)
)
)
self.assertEqual(1, len(r1))
# This input string is a perfect match for the parser, except for the letter B instead of A, so this will fail (and should)
p2 = get_parser()
r2 = list(
p2.scanString(
dedent(
"""\
block:
B
"""
)
)
)
self.assertEqual(0, len(r2))
# This input string contains both string A and string B, and it finds one match (as it should)
p3 = get_parser()
r3 = list(
p3.scanString(
dedent(
"""\
block:
A
block:
B
"""
)
)
)
self.assertEqual(1, len(r3))
# This input string contains both string A and string B, but in a different order.
p4 = get_parser()
r4 = list(
p4.scanString(
dedent(
"""\
block:
B
block:
A
"""
)
)
)
self.assertEqual(1, len(r4))
# This is the same as case 3, but with nesting
p5 = get_parser()
r5 = list(
p5.scanString(
dedent(
"""\
block:
block:
A
block:
block:
B
"""
)
)
)
self.assertEqual(1, len(r5))
# This is the same as case 4, but with nesting
p6 = get_parser()
r6 = list(
p6.scanString(
dedent(
"""\
block:
block:
B
block:
block:
A
"""
)
)
)
self.assertEqual(1, len(r6))
def testInvalidDiagSetting(self):
with self.assertRaises(
ValueError,
msg="failed to raise exception when setting non-existent __diag__",
):
pp.__diag__.enable("xyzzy")
with self.assertWarns(
UserWarning, msg="failed to warn disabling 'collect_all_And_tokens"
):
pp.__compat__.disable("collect_all_And_tokens")
def testParseResultsWithNameMatchFirst(self):
expr_a = pp.Literal("not") + pp.Literal("the") + pp.Literal("bird")
expr_b = pp.Literal("the") + pp.Literal("bird")
expr = (expr_a | expr_b)("rexp")
success, report = expr.runTests(
"""\
not the bird
the bird
"""
)
results = [rpt[1] for rpt in report]
self.assertParseResultsEquals(
results[0], ["not", "the", "bird"], {"rexp": ["not", "the", "bird"]}
)
self.assertParseResultsEquals(
results[1], ["the", "bird"], {"rexp": ["the", "bird"]}
)
# test compatibility mode, no longer restoring pre-2.3.1 behavior
with ppt.reset_pyparsing_context():
pp.__compat__.collect_all_And_tokens = False
pp.__diag__.enable("warn_multiple_tokens_in_named_alternation")
expr_a = pp.Literal("not") + pp.Literal("the") + pp.Literal("bird")
expr_b = pp.Literal("the") + pp.Literal("bird")
with self.assertWarns(
UserWarning, msg="failed to warn of And within alternation"
):
expr = (expr_a | expr_b)("rexp")
success, report = expr.runTests(
"""
not the bird
the bird
"""
)
results = [rpt[1] for rpt in report]
self.assertParseResultsEquals(
results[0], ["not", "the", "bird"], {"rexp": ["not", "the", "bird"]}
)
self.assertParseResultsEquals(
results[1], ["the", "bird"], {"rexp": ["the", "bird"]}
)
def testParseResultsWithNameOr(self):
expr_a = pp.Literal("not") + pp.Literal("the") + pp.Literal("bird")
expr_b = pp.Literal("the") + pp.Literal("bird")
expr = (expr_a ^ expr_b)("rexp")
expr.runTests(
"""\
not the bird
the bird
"""
)
result = expr.parseString("not the bird")
self.assertParseResultsEquals(
result, ["not", "the", "bird"], {"rexp": ["not", "the", "bird"]}
)
result = expr.parseString("the bird")
self.assertParseResultsEquals(
result, ["the", "bird"], {"rexp": ["the", "bird"]}
)
expr = (expr_a | expr_b)("rexp")
expr.runTests(
"""\
not the bird
the bird
"""
)
result = expr.parseString("not the bird")
self.assertParseResultsEquals(
result, ["not", "the", "bird"], {"rexp": ["not", "the", "bird"]}
)
result = expr.parseString("the bird")
self.assertParseResultsEquals(
result, ["the", "bird"], {"rexp": ["the", "bird"]}
)
# test compatibility mode, no longer restoring pre-2.3.1 behavior
with ppt.reset_pyparsing_context():
pp.__compat__.collect_all_And_tokens = False
pp.__diag__.enable("warn_multiple_tokens_in_named_alternation")
expr_a = pp.Literal("not") + pp.Literal("the") + pp.Literal("bird")
expr_b = pp.Literal("the") + pp.Literal("bird")
with self.assertWarns(
UserWarning, msg="failed to warn of And within alternation"
):
expr = (expr_a ^ expr_b)("rexp")
expr.runTests(
"""\
not the bird
the bird
"""
)
self.assertEqual(
"not the bird".split(), list(expr.parseString("not the bird")["rexp"])
)
self.assertEqual(
"the bird".split(), list(expr.parseString("the bird")["rexp"])
)
def testEmptyDictDoesNotRaiseException(self):
key = pp.Word(pp.alphas)
value = pp.Word(pp.nums)
EQ = pp.Suppress("=")
key_value_dict = pp.dictOf(key, EQ + value)
print(
key_value_dict.parseString(
"""\
a = 10
b = 20
"""
).dump()
)
try:
print(key_value_dict.parseString("").dump())
except pp.ParseException as pe:
print(pp.ParseException.explain(pe))
else:
self.fail("failed to raise exception when matching empty string")
def testExplainException(self):
expr = pp.Word(pp.nums).setName("int") + pp.Word(pp.alphas).setName("word")
try:
expr.parseString("123 355")
except pp.ParseException as pe:
print(pe.explain(depth=0))
expr = pp.Word(pp.nums).setName("int") - pp.Word(pp.alphas).setName("word")
try:
expr.parseString("123 355 (test using ErrorStop)")
except pp.ParseSyntaxException as pe:
print(pe.explain())
integer = pp.Word(pp.nums).setName("int").addParseAction(lambda t: int(t[0]))
expr = integer + integer
def divide_args(t):
integer.parseString("A")
return t[0] / t[1]
expr.addParseAction(divide_args)
pp.ParserElement.enablePackrat()
print()
try:
expr.parseString("123 0")
except pp.ParseException as pe:
print(pe.explain())
except Exception as exc:
print(pp.ParseBaseException.explain_exception(exc))
raise
def testCaselessKeywordVsKeywordCaseless(self):
frule = pp.Keyword("t", caseless=True) + pp.Keyword("yes", caseless=True)
crule = pp.CaselessKeyword("t") + pp.CaselessKeyword("yes")
flist = frule.searchString("not yes").asList()
print(flist)
clist = crule.searchString("not yes").asList()
print(clist)
self.assertEqual(
flist,
clist,
"CaselessKeyword not working the same as Keyword(caseless=True)",
)
def testOneOfKeywords(self):
literal_expr = pp.oneOf("a b c")
success, _ = literal_expr[...].runTests(
"""
# literal oneOf tests
a b c
a a a
abc
"""
)
self.assertTrue(success, "failed literal oneOf matching")
keyword_expr = pp.oneOf("a b c", asKeyword=True)
success, _ = keyword_expr[...].runTests(
"""
# keyword oneOf tests
a b c
a a a
"""
)
self.assertTrue(success, "failed keyword oneOf matching")
success, _ = keyword_expr[...].runTests(
"""
# keyword oneOf failure tests
abc
""",
failureTests=True,
)
self.assertTrue(success, "failed keyword oneOf failure tests")
def testWarnUngroupedNamedTokens(self):
"""
- warn_ungrouped_named_tokens_in_collection - flag to enable warnings when a results
name is defined on a containing expression with ungrouped subexpressions that also
have results names (default=True)
"""
with ppt.reset_pyparsing_context():
pp.__diag__.enable("warn_ungrouped_named_tokens_in_collection")
COMMA = pp.Suppress(",").setName("comma")
coord = ppc.integer("x") + COMMA + ppc.integer("y")
# this should emit a warning
with self.assertWarns(
UserWarning,
msg="failed to warn with named repetition of"
" ungrouped named expressions",
):
path = coord[...].setResultsName("path")
def testWarnNameSetOnEmptyForward(self):
"""
- warn_name_set_on_empty_Forward - flag to enable warnings whan a Forward is defined
with a results name, but has no contents defined (default=False)
"""
with ppt.reset_pyparsing_context():
pp.__diag__.enable("warn_name_set_on_empty_Forward")
base = pp.Forward()
with self.assertWarns(
UserWarning,
msg="failed to warn when naming an empty Forward expression",
):
base("x")
def testWarnParsingEmptyForward(self):
"""
- warn_on_parse_using_empty_Forward - flag to enable warnings whan a Forward
has no contents defined (default=False)
"""
with ppt.reset_pyparsing_context():
pp.__diag__.enable("warn_on_parse_using_empty_Forward")
base = pp.Forward()
with self.assertWarns(
UserWarning,
msg="failed to warn when naming an empty Forward expression",
):
try:
print(base.parseString("x"))
except ParseException as pe:
pass
def testWarnIncorrectAssignmentToForward(self):
"""
- warn_on_parse_using_empty_Forward - flag to enable warnings whan a Forward
has no contents defined (default=False)
"""
if PYPY_ENV:
print("warn_on_assignment_to_Forward not supported on PyPy")
return
with ppt.reset_pyparsing_context():
pp.__diag__.enable("warn_on_assignment_to_Forward")
def a_method():
base = pp.Forward()
base = pp.Word(pp.alphas)[...] | "(" + base + ")"
with self.assertWarns(
SyntaxWarning,
msg="failed to warn when using '=' to assign expression to a Forward",
):
a_method()
def testWarnOnMultipleStringArgsToOneOf(self):
"""
- warn_on_multiple_string_args_to_oneof - flag to enable warnings whan oneOf is
incorrectly called with multiple str arguments (default=True)
"""
with ppt.reset_pyparsing_context():
pp.__diag__.enable("warn_on_multiple_string_args_to_oneof")
with self.assertWarns(
UserWarning,
msg="failed to warn when incorrectly calling oneOf(string, string)",
):
a = pp.oneOf("A", "B")
def testEnableDebugOnNamedExpressions(self):
"""
- enable_debug_on_named_expressions - flag to auto-enable debug on all subsequent
calls to ParserElement.setName() (default=False)
"""
import textwrap
with ppt.reset_pyparsing_context():
test_stdout = StringIO()
with resetting(sys, "stdout", "stderr"):
sys.stdout = test_stdout
sys.stderr = test_stdout
pp.__diag__.enable("enable_debug_on_named_expressions")
integer = pp.Word(pp.nums).setName("integer")
integer[...].parseString("1 2 3")
expected_debug_output = textwrap.dedent(
"""\
Match integer at loc 0(1,1)
Matched integer -> ['1']
Match integer at loc 1(1,2)
Matched integer -> ['2']
Match integer at loc 3(1,4)
Matched integer -> ['3']
Match integer at loc 5(1,6)
Exception raised:Expected integer, found end of text (at char 5), (line:1, col:6)
"""
)
output = test_stdout.getvalue()
print(output)
self.assertEqual(
expected_debug_output,
output,
"failed to auto-enable debug on named expressions "
"using enable_debug_on_named_expressions",
)
def testEnableDebugOnExpressionWithParseAction(self):
import textwrap
test_stdout = StringIO()
with resetting(sys, "stdout", "stderr"):
sys.stdout = test_stdout
sys.stderr = test_stdout
parser = (ppc.integer().setDebug() | pp.Word(pp.alphanums).setDebug())[...]
parser.setDebug()
parser.parseString("123 A100")
# now turn off debug - should only get output for components, not overall parser
print()
parser.setDebug(False)
parser.parseString("123 A100")
expected_debug_output = textwrap.dedent(
"""\
Match [{integer | W:(0-9A-Za-z)}]... at loc 0(1,1)
Match integer at loc 0(1,1)
Matched integer -> [123]
Match integer at loc 3(1,4)
Exception raised:Expected integer, found 'A' (at char 4), (line:1, col:5)
Match W:(0-9A-Za-z) at loc 3(1,4)
Matched W:(0-9A-Za-z) -> ['A100']
Match integer at loc 8(1,9)
Exception raised:Expected integer, found end of text (at char 8), (line:1, col:9)
Match W:(0-9A-Za-z) at loc 8(1,9)
Exception raised:Expected W:(0-9A-Za-z), found end of text (at char 8), (line:1, col:9)
Matched [{integer | W:(0-9A-Za-z)}]... -> [123, 'A100']
Match integer at loc 0(1,1)
Matched integer -> [123]
Match integer at loc 3(1,4)
Exception raised:Expected integer, found 'A' (at char 4), (line:1, col:5)
Match W:(0-9A-Za-z) at loc 3(1,4)
Matched W:(0-9A-Za-z) -> ['A100']
Match integer at loc 8(1,9)
Exception raised:Expected integer, found end of text (at char 8), (line:1, col:9)
Match W:(0-9A-Za-z) at loc 8(1,9)
Exception raised:Expected W:(0-9A-Za-z), found end of text (at char 8), (line:1, col:9)
"""
)
output = test_stdout.getvalue()
print(output)
self.assertEqual(
expected_debug_output,
output,
"invalid debug output when using parse action",
)
def testUndesirableButCommonPractices(self):
# While these are valid constructs, and they are not encouraged
# there is apparently a lot of code out there using these
# coding styles.
#
# Even though they are not encouraged, we shouldn't break them.
# Create an And using a list of expressions instead of using '+' operator
expr = pp.And([pp.Word("abc"), pp.Word("123")])
expr.runTests(
"""
aaa 333
b 1
ababab 32123
"""
)
# Passing a single expression to a ParseExpression, when it really wants a sequence
expr = pp.Or(pp.Or(ppc.integer))
expr.runTests(
"""
123
456
abc
"""
)
def testEnableWarnDiags(self):
import pprint
def filtered_vars(var_dict):
dunders = [nm for nm in var_dict if nm.startswith("__")]
return {
k: v
for k, v in var_dict.items()
if isinstance(v, bool) and k not in dunders
}
pprint.pprint(filtered_vars(vars(pp.__diag__)), width=30)
warn_names = pp.__diag__._warning_names
other_names = pp.__diag__._debug_names
# make sure they are off by default
for diag_name in warn_names:
self.assertFalse(
getattr(pp.__diag__, diag_name),
"__diag__.{} not set to True".format(diag_name),
)
with ppt.reset_pyparsing_context():
# enable all warn_* diag_names
pp.__diag__.enable_all_warnings()
pprint.pprint(filtered_vars(vars(pp.__diag__)), width=30)
# make sure they are on after being enabled
for diag_name in warn_names:
self.assertTrue(
getattr(pp.__diag__, diag_name),
"__diag__.{} not set to True".format(diag_name),
)
# non-warn diag_names must be enabled individually
for diag_name in other_names:
self.assertFalse(
getattr(pp.__diag__, diag_name),
"__diag__.{} not set to True".format(diag_name),
)
# make sure they are off after AutoReset
for diag_name in warn_names:
self.assertFalse(
getattr(pp.__diag__, diag_name),
"__diag__.{} not set to True".format(diag_name),
)
def testWordInternalReRanges(self):
import random
import re
self.assertEqual(
"[!-~]+",
pp.Word(pp.printables).reString,
"failed to generate correct internal re",
)
self.assertEqual(
"[0-9A-Za-z]+",
pp.Word(pp.alphanums).reString,
"failed to generate correct internal re",
)
self.assertEqual(
"[!-~¡-ÿ]+",
pp.Word(pp.pyparsing_unicode.Latin1.printables).reString,
"failed to generate correct internal re",
)
self.assertEqual(
"[À-ÖØ-öø-ÿ]+",
pp.Word(pp.alphas8bit).reString,
"failed to generate correct internal re",
)
esc_chars = r"\^-]["
esc_chars2 = r"*+.?"
for esc_char in esc_chars + esc_chars2:
# test escape char as first character in range
next_char = chr(ord(esc_char) + 1)
prev_char = chr(ord(esc_char) - 1)
esc_word = pp.Word(esc_char + next_char)
expected = r"[{}{}-{}{}]+".format(
"\\" if esc_char in esc_chars else "",
esc_char,
"\\" if next_char in esc_chars else "",
next_char,
)
print(
"Testing escape char: {} -> {} re: '{}')".format(
esc_char, esc_word, esc_word.reString
)
)
self.assertEqual(
expected, esc_word.reString, "failed to generate correct internal re"
)
test_string = "".join(
random.choice([esc_char, next_char]) for __ in range(16)
)
print(
"Match '{}' -> {}".format(
test_string, test_string == esc_word.parseString(test_string)[0]
)
)
self.assertEqual(
test_string,
esc_word.parseString(test_string)[0],
"Word using escaped range char failed to parse",
)
# test escape char as last character in range
esc_word = pp.Word(prev_char + esc_char)
expected = r"[{}{}-{}{}]+".format(
"\\" if prev_char in esc_chars else "",
prev_char,
"\\" if esc_char in esc_chars else "",
esc_char,
)
print(
"Testing escape char: {} -> {} re: '{}')".format(
esc_char, esc_word, esc_word.reString
)
)
self.assertEqual(
expected, esc_word.reString, "failed to generate correct internal re"
)
test_string = "".join(
random.choice([esc_char, prev_char]) for __ in range(16)
)
print(
"Match '{}' -> {}".format(
test_string, test_string == esc_word.parseString(test_string)[0]
)
)
self.assertEqual(
test_string,
esc_word.parseString(test_string)[0],
"Word using escaped range char failed to parse",
)
# test escape char as first character in range
next_char = chr(ord(esc_char) + 1)
prev_char = chr(ord(esc_char) - 1)
esc_word = pp.Word(esc_char + next_char)
expected = r"[{}{}-{}{}]+".format(
"\\" if esc_char in esc_chars else "",
esc_char,
"\\" if next_char in esc_chars else "",
next_char,
)
print(
"Testing escape char: {} -> {} re: '{}')".format(
esc_char, esc_word, esc_word.reString
)
)
self.assertEqual(
expected, esc_word.reString, "failed to generate correct internal re"
)
test_string = "".join(
random.choice([esc_char, next_char]) for __ in range(16)
)
print(
"Match '{}' -> {}".format(
test_string, test_string == esc_word.parseString(test_string)[0]
)
)
self.assertEqual(
test_string,
esc_word.parseString(test_string)[0],
"Word using escaped range char failed to parse",
)
# test escape char as only character in range
esc_word = pp.Word(esc_char + esc_char, pp.alphas.upper())
expected = r"[{}{}][A-Z]*".format(
"\\" if esc_char in esc_chars else "", esc_char
)
print(
"Testing escape char: {} -> {} re: '{}')".format(
esc_char, esc_word, esc_word.reString
)
)
self.assertEqual(
expected, esc_word.reString, "failed to generate correct internal re"
)
test_string = esc_char + "".join(
random.choice(pp.alphas.upper()) for __ in range(16)
)
print(
"Match '{}' -> {}".format(
test_string, test_string == esc_word.parseString(test_string)[0]
)
)
self.assertEqual(
test_string,
esc_word.parseString(test_string)[0],
"Word using escaped range char failed to parse",
)
# test escape char as only character
esc_word = pp.Word(esc_char, pp.alphas.upper())
expected = r"{}[A-Z]*".format(re.escape(esc_char))
print(
"Testing escape char: {} -> {} re: '{}')".format(
esc_char, esc_word, esc_word.reString
)
)
self.assertEqual(
expected, esc_word.reString, "failed to generate correct internal re"
)
test_string = esc_char + "".join(
random.choice(pp.alphas.upper()) for __ in range(16)
)
print(
"Match '{}' -> {}".format(
test_string, test_string == esc_word.parseString(test_string)[0]
)
)
self.assertEqual(
test_string,
esc_word.parseString(test_string)[0],
"Word using escaped range char failed to parse",
)
print()
def testChainedTernaryOperator(self):
TERNARY_INFIX = pp.infixNotation(
ppc.integer, [(("?", ":"), 3, pp.opAssoc.LEFT)]
)
self.assertParseAndCheckList(
TERNARY_INFIX, "1?1:0?1:0", [[1, "?", 1, ":", 0, "?", 1, ":", 0]]
)
TERNARY_INFIX = pp.infixNotation(
ppc.integer, [(("?", ":"), 3, pp.opAssoc.RIGHT)]
)
self.assertParseAndCheckList(
TERNARY_INFIX, "1?1:0?1:0", [[1, "?", 1, ":", [0, "?", 1, ":", 0]]]
)
def testOneOfWithDuplicateSymbols(self):
# test making oneOf with duplicate symbols
print("verify oneOf handles duplicate symbols")
try:
test1 = pp.oneOf("a b c d a")
except RuntimeError:
self.fail(
"still have infinite loop in oneOf with duplicate symbols (string input)"
)
print("verify oneOf handles generator input")
try:
test1 = pp.oneOf(c for c in "a b c d a" if not c.isspace())
except RuntimeError:
self.fail(
"still have infinite loop in oneOf with duplicate symbols (generator input)"
)
print("verify oneOf handles list input")
try:
test1 = pp.oneOf("a b c d a".split())
except RuntimeError:
self.fail(
"still have infinite loop in oneOf with duplicate symbols (list input)"
)
print("verify oneOf handles set input")
try:
test1 = pp.oneOf(set("a b c d a"))
except RuntimeError:
self.fail(
"still have infinite loop in oneOf with duplicate symbols (set input)"
)
def testOneOfWithEmptyList(self):
"""test oneOf helper function with an empty list as input"""
tst = []
result = pp.oneOf(tst)
expected = True
found = isinstance(result, pp.NoMatch)
self.assertEqual(expected, found)
def testOneOfWithUnexpectedInput(self):
"""test oneOf with an input that isn't a string or iterable"""
with self.assertWarns(
SyntaxWarning, msg="failed to warn use of integer for oneOf"
):
expr = pp.oneOf(6)
def testMatchFirstIteratesOverAllChoices(self):
# test MatchFirst bugfix
print("verify MatchFirst iterates properly")
results = pp.quotedString.parseString("'this is a single quoted string'")
self.assertTrue(
len(results) > 0, "MatchFirst error - not iterating over all choices"
)
def testStreamlineOfSubexpressions(self):
# verify streamline of subexpressions
print("verify proper streamline logic")
compound = pp.Literal("A") + "B" + "C" + "D"
self.assertEqual(2, len(compound.exprs), "bad test setup")
print(compound)
compound.streamline()
print(compound)
self.assertEqual(4, len(compound.exprs), "streamline not working")
def testOptionalWithResultsNameAndNoMatch(self):
# test for Optional with results name and no match
print("verify Optional's do not cause match failure if have results name")
testGrammar = pp.Literal("A") + pp.Optional("B")("gotB") + pp.Literal("C")
try:
testGrammar.parseString("ABC")
testGrammar.parseString("AC")
except pp.ParseException as pe:
print(pe.pstr, "->", pe)
self.fail("error in Optional matching of string %s" % pe.pstr)
def testReturnOfFurthestException(self):
# test return of furthest exception
testGrammar = (
pp.Literal("A") | (pp.Optional("B") + pp.Literal("C")) | pp.Literal("D")
)
try:
testGrammar.parseString("BC")
testGrammar.parseString("BD")
except pp.ParseException as pe:
print(pe.pstr, "->", pe)
self.assertEqual("BD", pe.pstr, "wrong test string failed to parse")
self.assertEqual(
1, pe.loc, "error in Optional matching, pe.loc=" + str(pe.loc)
)
def testValidateCorrectlyDetectsInvalidLeftRecursion(self):
# test validate
print("verify behavior of validate()")
if IRON_PYTHON_ENV:
print("disable this test under IronPython")
return
def testValidation(grmr, gnam, isValid):
try:
grmr.streamline()
grmr.validate()
self.assertTrue(isValid, "validate() accepted invalid grammar " + gnam)
except pp.RecursiveGrammarException as rge:
print(grmr)
print(rge)
self.assertFalse(isValid, "validate() rejected valid grammar " + gnam)
fwd = pp.Forward()
g1 = pp.OneOrMore((pp.Literal("A") + "B" + "C") | fwd)
g2 = ("C" + g1)[...]
fwd <<= pp.Group(g2)
testValidation(fwd, "fwd", isValid=True)
fwd2 = pp.Forward()
fwd2 <<= pp.Group("A" | fwd2)
testValidation(fwd2, "fwd2", isValid=False)
fwd3 = pp.Forward()
fwd3 <<= pp.Optional("A") + fwd3
testValidation(fwd3, "fwd3", isValid=False)
def testGetNameBehavior(self):
# test getName
print("verify behavior of getName()")
aaa = pp.Group(pp.Word("a")("A"))
bbb = pp.Group(pp.Word("b")("B"))
ccc = pp.Group(":" + pp.Word("c")("C"))
g1 = "XXX" + (aaa | bbb | ccc)[...]
teststring = "XXX b bb a bbb bbbb aa bbbbb :c bbbbbb aaa"
names = []
print(g1.parseString(teststring).dump())
for t in g1.parseString(teststring):
print(t, repr(t))
try:
names.append(t[0].getName())
except Exception:
try:
names.append(t.getName())
except Exception:
names.append(None)
print(teststring)
print(names)
self.assertEqual(
[None, "B", "B", "A", "B", "B", "A", "B", None, "B", "A"],
names,
"failure in getting names for tokens",
)
from pyparsing import Keyword, Word, alphas, OneOrMore
IF, AND, BUT = map(Keyword, "if and but".split())
ident = ~(IF | AND | BUT) + Word(alphas)("non-key")
scanner = OneOrMore(IF | AND | BUT | ident)
def getNameTester(s, l, t):
print(t, t.getName())
ident.addParseAction(getNameTester)
scanner.parseString("lsjd sldkjf IF Saslkj AND lsdjf")
# test ParseResults.get() method
print("verify behavior of ParseResults.get()")
# use sum() to merge separate groups into single ParseResults
res = sum(g1.parseString(teststring)[1:])
print(res.dump())
print(res.get("A", "A not found"))
print(res.get("D", "!D"))
self.assertEqual(
"aaa", res.get("A", "A not found"), "get on existing key failed"
)
self.assertEqual("!D", res.get("D", "!D"), "get on missing key failed")
def testOptionalBeyondEndOfString(self):
print("verify handling of Optional's beyond the end of string")
testGrammar = "A" + pp.Optional("B") + pp.Optional("C") + pp.Optional("D")
testGrammar.parseString("A")
testGrammar.parseString("AB")
def testCreateLiteralWithEmptyString(self):
# test creating Literal with empty string
print('verify non-fatal usage of Literal("")')
with self.assertWarns(
SyntaxWarning, msg="failed to warn use of empty string for Literal"
):
e = pp.Literal("")
try:
e.parseString("SLJFD")
except Exception as e:
self.fail("Failed to handle empty Literal")
def testLineMethodSpecialCaseAtStart(self):
# test line() behavior when starting at 0 and the opening line is an \n
print("verify correct line() behavior when first line is empty string")
self.assertEqual(
"",
pp.line(0, "\nabc\ndef\n"),
"Error in line() with empty first line in text",
)
txt = "\nabc\ndef\n"
results = [pp.line(i, txt) for i in range(len(txt))]
self.assertEqual(
["", "abc", "abc", "abc", "abc", "def", "def", "def", "def"],
results,
"Error in line() with empty first line in text",
)
txt = "abc\ndef\n"
results = [pp.line(i, txt) for i in range(len(txt))]
self.assertEqual(
["abc", "abc", "abc", "abc", "def", "def", "def", "def"],
results,
"Error in line() with non-empty first line in text",
)
def testRepeatedTokensWhenPackratting(self):
# test bugfix with repeated tokens when packrat parsing enabled
print("verify behavior with repeated tokens when packrat parsing is enabled")
a = pp.Literal("a")
b = pp.Literal("b")
c = pp.Literal("c")
abb = a + b + b
abc = a + b + c
aba = a + b + a
grammar = abb | abc | aba
self.assertEqual(
"aba", "".join(grammar.parseString("aba")), "Packrat ABA failure!"
)
def testSetResultsNameWithOneOrMoreAndZeroOrMore(self):
print("verify behavior of setResultsName with OneOrMore and ZeroOrMore")
stmt = pp.Keyword("test")
print(stmt[...]("tests").parseString("test test").tests)
print(stmt[1, ...]("tests").parseString("test test").tests)
print(pp.Optional(stmt[1, ...]("tests")).parseString("test test").tests)
print(pp.Optional(stmt[1, ...])("tests").parseString("test test").tests)
print(
pp.Optional(pp.delimitedList(stmt))("tests").parseString("test,test").tests
)
self.assertEqual(
2,
len(stmt[...]("tests").parseString("test test").tests),
"ZeroOrMore failure with setResultsName",
)
self.assertEqual(
2,
len(stmt[1, ...]("tests").parseString("test test").tests),
"OneOrMore failure with setResultsName",
)
self.assertEqual(
2,
len(pp.Optional(stmt[1, ...]("tests")).parseString("test test").tests),
"OneOrMore failure with setResultsName",
)
self.assertEqual(
2,
len(
pp.Optional(pp.delimitedList(stmt))("tests")
.parseString("test,test")
.tests
),
"delimitedList failure with setResultsName",
)
self.assertEqual(
2,
len((stmt * 2)("tests").parseString("test test").tests),
"multiplied(1) failure with setResultsName",
)
self.assertEqual(
2,
len(stmt[..., 2]("tests").parseString("test test").tests),
"multiplied(2) failure with setResultsName",
)
self.assertEqual(
2,
len(stmt[1, ...]("tests").parseString("test test").tests),
"multiplied(3) failure with setResultsName",
)
self.assertEqual(
2,
len(stmt[2, ...]("tests").parseString("test test").tests),
"multiplied(3) failure with setResultsName",
)
def testParseResultsReprWithResultsNames(self):
word = pp.Word(pp.printables)("word")
res = word[...].parseString("test blub")
print(repr(res))
print(res["word"])
print(res.asDict())
self.assertEqual(
"(['test', 'blub'], {'word': 'blub'})",
repr(res),
"incorrect repr for ParseResults with listAllMatches=False",
)
word = pp.Word(pp.printables)("word*")
res = word[...].parseString("test blub")
print(repr(res))
print(res["word"])
print(res.asDict())
self.assertEqual(
"(['test', 'blub'], {'word': ['test', 'blub']})",
repr(res),
"incorrect repr for ParseResults with listAllMatches=True",
)
def testWarnUsingLshiftForward(self):
import warnings
print(
"verify that using '<<' operator with a Forward raises a warning if there is a dangling '|' operator"
)
fwd = pp.Forward()
print("unsafe << and |, but diag not enabled, should not warn")
fwd << pp.Word("a") | pp.Word("b")
pp.__diag__.enable("warn_on_match_first_with_lshift_operator")
with self.assertWarns(
SyntaxWarning, msg="failed to warn of using << and | operators"
):
fwd = pp.Forward()
print("unsafe << and |, should warn")
fwd << pp.Word("a") | pp.Word("b")
with self.assertWarns(
SyntaxWarning,
msg="failed to warn of using << and | operators (within lambda)",
):
fwd = pp.Forward()
print("unsafe << and |, should warn")
fwd_fn = lambda expr1, expr2: fwd << expr1 | expr2
fwd_fn(pp.Word("a"), pp.Word("b"))
fwd = pp.Forward()
print("safe <<= and |, should not warn")
fwd <<= pp.Word("a") | pp.Word("b")
c = fwd | pp.Word("c")
print("safe << and (|), should not warn")
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
fwd = pp.Forward()
fwd << (pp.Word("a") | pp.Word("b"))
try:
c = fwd | pp.Word("c")
except Exception as e:
self.fail("raised warning when it should not have")
def testParseExpressionsWithRegex(self):
from itertools import product
match_empty_regex = pp.Regex(r"[a-z]*")
match_nonempty_regex = pp.Regex(r"[a-z]+")
parser_classes = pp.ParseExpression.__subclasses__()
test_string = "abc def"
expected = ["abc"]
for expr, cls in product(
(match_nonempty_regex, match_empty_regex), parser_classes
):
print(expr, cls)
parser = cls([expr])
parsed_result = parser.parseString(test_string)
print(parsed_result.dump())
self.assertParseResultsEquals(parsed_result, expected)
for expr, cls in product(
(match_nonempty_regex, match_empty_regex), (pp.MatchFirst, pp.Or)
):
parser = cls([expr, expr])
print(parser)
parsed_result = parser.parseString(test_string)
print(parsed_result.dump())
self.assertParseResultsEquals(parsed_result, expected)
def testAssertParseAndCheckDict(self):
"""test assertParseAndCheckDict in test framework"""
expr = pp.Word(pp.alphas)("item") + pp.Word(pp.nums)("qty")
self.assertParseAndCheckDict(
expr, "balloon 25", {"item": "balloon", "qty": "25"}
)
exprWithInt = pp.Word(pp.alphas)("item") + ppc.integer("qty")
self.assertParseAndCheckDict(
exprWithInt, "rucksack 49", {"item": "rucksack", "qty": 49}
)
def testOnlyOnce(self):
"""test class OnlyOnce and its reset method"""
# use a parse action to compute the sum of the parsed integers,
# and add it to the end
def append_sum(tokens):
tokens.append(sum(map(int, tokens)))
pa = pp.OnlyOnce(append_sum)
expr = pp.OneOrMore(pp.Word(pp.nums)).addParseAction(pa)
result = expr.parseString("0 123 321")
print(result.dump())
expected = ["0", "123", "321", 444]
self.assertParseResultsEquals(
result, expected, msg="issue with OnlyOnce first call"
)
with self.assertRaisesParseException(
msg="failed to raise exception calling OnlyOnce more than once"
):
result2 = expr.parseString("1 2 3 4 5")
pa.reset()
result = expr.parseString("100 200 300")
print(result.dump())
expected = ["100", "200", "300", 600]
self.assertParseResultsEquals(
result, expected, msg="issue with OnlyOnce after reset"
)
def testGoToColumn(self):
"""tests for GoToColumn class"""
dateExpr = pp.Regex(r"\d\d(\.\d\d){2}")("date")
numExpr = ppc.number("num")
sample = """\
date Not Important value NotImportant2
11.11.13 | useless . useless,21 useless 2 | 14.21 | asmdakldm
21.12.12 | fmpaosmfpoamsp 4 | 41 | ajfa9si90""".splitlines()
# Column number finds match
patt = dateExpr + pp.GoToColumn(70).ignore("|") + numExpr + pp.restOfLine
infile = iter(sample)
next(infile)
expecteds = [["11.11.13", 14.21], ["21.12.12", 41]]
for line, expected in zip(infile, expecteds):
result = patt.parseString(line)
print(result)
self.assertEqual(
expected, [result.date, result.num], msg="issue with GoToColumn"
)
# Column number does NOT match
patt = dateExpr("date") + pp.GoToColumn(30) + numExpr + pp.restOfLine
infile = iter(sample)
next(infile)
for line in infile:
with self.assertRaisesParseException(
msg="issue with GoToColumn not finding match"
):
result = patt.parseString(line)
def testExceptionExplainVariations(self):
class Modifier:
def modify_upper(self, tokens):
tokens[:] = map(str.upper, tokens)
modder = Modifier()
# force an exception in the attached parse action
# integer has a parse action to convert to an int;
# this parse action should fail with a TypeError, since
# str.upper expects a str argument, not an int
grammar = ppc.integer().addParseAction(modder.modify_upper)
self_testcase_name = "tests.test_unit." + type(self).__name__
try:
grammar.parseString("1000")
except Exception as e:
# extract the exception explanation
explain_str = ParseException.explain_exception(e)
print(explain_str)
explain_str_lines = explain_str.splitlines()
expected = [
self_testcase_name,
"pyparsing.core._WordRegex - integer",
"tests.test_unit.Modifier",
"pyparsing.results.ParseResults",
]
# verify the list of names shown in the explain "stack"
self.assertEqual(
expected,
explain_str_lines[-len(expected) :],
msg="invalid explain str",
)
# check type of raised exception matches explain output
# (actual exception text varies by Python version, and even
# by how the exception is raised, so we can only check the
# type name)
exception_line = explain_str_lines[-(len(expected) + 1)]
self.assertTrue(
exception_line.startswith("TypeError:"),
msg="unexpected exception line ({!r})".format(exception_line),
)
def testMiscellaneousExceptionBits(self):
pp.ParserElement.verbose_stacktrace = True
self_testcase_name = "tests.test_unit." + type(self).__name__
# force a parsing exception - match an integer against "ABC"
try:
pp.Word(pp.nums).parseString("ABC")
except pp.ParseException as pe:
with self.assertRaises(AttributeError):
print(pe.nonexistent_attribute)
expected_str = "Expected W:(0-9), found 'A' (at char 0), (line:1, col:1)"
self.assertEqual(expected_str, str(pe), "invalid ParseException str")
self.assertEqual(expected_str, repr(pe), "invalid ParseException repr")
expected_dir = [
"args",
"col",
"explain",
"explain_exception",
"line",
"lineno",
"markInputline",
"with_traceback",
]
observed_dir = [attr for attr in dir(pe) if not attr.startswith("_")]
print(observed_dir)
self.assertEqual(expected_dir, observed_dir, "invalid dir(ParseException)")
self.assertEqual(
">!<ABC", pe.markInputline(), "invalid default mark input line"
)
self.assertEqual(
"ABC", pe.markInputline(""), "invalid mark input line with '' marker"
)
# test explain using depth=None, 0, 1
depth_none_explain_str = pe.explain(depth=None)
depth_0_explain_str = pe.explain(depth=0)
depth_1_explain_str = pe.explain(depth=1)
print(depth_none_explain_str)
expr_name = "pyparsing.core._WordRegex - W:(0-9)"
for expected_function in [
self_testcase_name,
expr_name,
]:
self.assertTrue(
expected_function in depth_none_explain_str,
"{!r} not found in ParseException.explain()".format(
expected_function
),
)
self.assertFalse(
expected_function in depth_0_explain_str,
"{!r} found in ParseException.explain(depth=0)".format(
expected_function
),
)
self.assertTrue(
expr_name in depth_1_explain_str,
"{!r} not found in ParseException.explain()".format(expected_function),
)
self.assertFalse(
self_testcase_name in depth_1_explain_str,
"{!r} not found in ParseException.explain()".format(expected_function),
)
class Test3_EnablePackratParsing(TestCase):
def runTest(self):
Test2_WithoutPackrat.suite_context.restore()
ParserElement.enablePackrat()
# SAVE A NEW SUITE CONTEXT
Test2_WithoutPackrat.suite_context = ppt.reset_pyparsing_context().save()
class Test4_WithPackrat(Test2_WithoutPackrat):
"""
rerun Test2 tests, now that packrat is enabled
"""
def test000_assert_packrat_status(self):
print("Packrat enabled:", ParserElement._packratEnabled)
print(
"Packrat cache:",
type(ParserElement.packrat_cache).__name__,
getattr(ParserElement.packrat_cache, "size", "- no size attribute -"),
)
self.assertTrue(ParserElement._packratEnabled, "packrat not enabled")
self.assertEqual(
"_FifoCache",
type(ParserElement.packrat_cache).__name__,
msg="incorrect cache type",
)
class Test5_EnableBoundedPackratParsing(TestCase):
def runTest(self):
Test2_WithoutPackrat.suite_context = Test2_WithoutPackrat.save_suite_context
Test2_WithoutPackrat.suite_context.restore()
ParserElement.enablePackrat(cache_size_limit=16)
# SAVE A NEW SUITE CONTEXT
Test2_WithoutPackrat.suite_context = ppt.reset_pyparsing_context().save()
class Test6_WithBoundedPackrat(Test2_WithoutPackrat):
"""
rerun Test2 tests, now with bounded packrat cache
"""
def test000_assert_packrat_status(self):
print("Packrat enabled:", ParserElement._packratEnabled)
print(
"Packrat cache:",
type(ParserElement.packrat_cache).__name__,
getattr(ParserElement.packrat_cache, "size", "- no size attribute -"),
)
self.assertTrue(ParserElement._packratEnabled, "packrat not enabled")
self.assertEqual(
"_FifoCache",
type(ParserElement.packrat_cache).__name__,
msg="incorrect cache type",
)
class Test7_EnableUnboundedPackratParsing(TestCase):
def runTest(self):
Test2_WithoutPackrat.suite_context = Test2_WithoutPackrat.save_suite_context
Test2_WithoutPackrat.suite_context.restore()
ParserElement.enablePackrat(cache_size_limit=None)
# SAVE A NEW SUITE CONTEXT
Test2_WithoutPackrat.suite_context = ppt.reset_pyparsing_context().save()
class Test8_WithUnboundedPackrat(Test2_WithoutPackrat):
"""
rerun Test2 tests, now with unbounded packrat cache
"""
def test000_assert_packrat_status(self):
print("Packrat enabled:", ParserElement._packratEnabled)
print(
"Packrat cache:",
type(ParserElement.packrat_cache).__name__,
getattr(ParserElement.packrat_cache, "size", "- no size attribute -"),
)
self.assertTrue(ParserElement._packratEnabled, "packrat not enabled")
self.assertEqual(
"_UnboundedCache",
type(ParserElement.packrat_cache).__name__,
msg="incorrect cache type",
)
# force clear of packrat parsing flags before saving contexts
pp.ParserElement._packratEnabled = False
pp.ParserElement._parse = pp.ParserElement._parseNoCache
Test2_WithoutPackrat.suite_context = ppt.reset_pyparsing_context().save()
Test2_WithoutPackrat.save_suite_context = ppt.reset_pyparsing_context().save()
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.